pax_global_header00006660000000000000000000000064136165413000014511gustar00rootroot0000000000000052 comment=bb20fc3c98b5acdae87fad091e68b64a4d836c08 s3fs-fuse-1.86/000077500000000000000000000000001361654130000132655ustar00rootroot00000000000000s3fs-fuse-1.86/.clang-tidy000066400000000000000000000016301361654130000153210ustar00rootroot00000000000000Checks: ' -*, bugprone-*, -bugprone-branch-clone, -bugprone-macro-parentheses, google-*, -google-build-using-namespace, -google-readability-casting, -google-readability-function-size, -google-readability-todo, -google-runtime-int, -google-runtime-references, misc-*, -misc-redundant-expression, -misc-unused-parameters, modernize-*, -modernize-avoid-c-arrays, -modernize-deprecated-headers, -modernize-loop-convert, -modernize-use-auto, -modernize-use-nullptr, -modernize-use-trailing-return-type, -modernize-use-using, performance-*, portability-*, readability-*, -readability-else-after-return, -readability-function-size, -readability-implicit-bool-conversion, -readability-isolate-declaration, -readability-magic-numbers, -readability-named-parameter, -readability-simplify-boolean-expr' s3fs-fuse-1.86/.gitattributes000066400000000000000000000017201361654130000161600ustar00rootroot00000000000000# # s3fs - FUSE-based file system backed by Amazon S3 # # Copyright(C) 2007 Randy Rizun # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # * text eol=lf *.png binary # # Local variables: # tab-width: 4 # c-basic-offset: 4 # End: # vim600: noet sw=4 ts=4 fdm=marker # vim<600: noet sw=4 ts=4 # s3fs-fuse-1.86/.github/000077500000000000000000000000001361654130000146255ustar00rootroot00000000000000s3fs-fuse-1.86/.github/ISSUE_TEMPLATE.md000066400000000000000000000020061361654130000173300ustar00rootroot00000000000000### Additional Information _The following information is very important in order to help us to help you. Omission of the following details may delay your support request or receive no attention at all._ _Keep in mind that the commands we provide to retrieve information are oriented to GNU/Linux Distributions, so you could need to use others if you use s3fs on macOS or BSD_ #### Version of s3fs being used (s3fs --version) _example: 1.00_ #### Version of fuse being used (pkg-config --modversion fuse, rpm -qi fuse, dpkg -s fuse) _example: 2.9.4_ #### Kernel information (uname -r) _command result: uname -r_ #### GNU/Linux Distribution, if applicable (cat /etc/os-release) _command result: cat /etc/os-release_ #### s3fs command line used, if applicable ``` ``` #### /etc/fstab entry, if applicable ``` ``` #### s3fs syslog messages (grep s3fs /var/log/syslog, journalctl | grep s3fs, or s3fs outputs) _if you execute s3fs with dbglevel, curldbg option, you can get detail debug messages_ ``` ``` ### Details about issue s3fs-fuse-1.86/.github/PULL_REQUEST_TEMPLATE.md000066400000000000000000000002411361654130000204230ustar00rootroot00000000000000### Relevant Issue (if applicable) _If there are Issues related to this PullRequest, please list it._ ### Details _Please describe the details of PullRequest._ s3fs-fuse-1.86/.gitignore000066400000000000000000000026511361654130000152610ustar00rootroot00000000000000# # s3fs - FUSE-based file system backed by Amazon S3 # # Copyright(C) 2007 Randy Rizun # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # # Compiled Object files # *.slo *.lo *.o *.Po *.Plo # # autotools/automake # aclocal.m4 autom4te.cache autoscan.log config.guess config.h config.h.in config.h.in~ config.log config.status config.sub configure configure.scan depcomp install-sh libtool ltmain.sh m4 m4/* missing stamp-h1 Makefile Makefile.in test-driver compile missing # # object directories # .deps .libs */.deps */.deps/* */.libs */.libs/* # # each directories # *.log *.trs default_commit_hash src/s3fs src/test_* test/s3proxy-* # # Local variables: # tab-width: 4 # c-basic-offset: 4 # End: # vim600: noet sw=4 ts=4 fdm=marker # vim<600: noet sw=4 ts=4 # s3fs-fuse-1.86/.mailmap000066400000000000000000000007231361654130000147100ustar00rootroot00000000000000Adrian Petrescu Adrian Petrescu Ben Lemasurier Dan Moore Randy Rizun Randy Rizun Takeshi Nakatani s3fs-fuse-1.86/.travis.yml000066400000000000000000000111521361654130000153760ustar00rootroot00000000000000# # s3fs - FUSE-based file system backed by Amazon S3 # # Copyright(C) 2007 Randy Rizun # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # language: cpp matrix: include: - os: linux sudo: required dist: trusty cache: apt before_install: - sudo apt-get update -qq - sudo apt-get install -qq attr cppcheck libfuse-dev openjdk-7-jdk - sudo update-alternatives --set java /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java - sudo pip install --upgrade awscli script: - ./autogen.sh - ./configure CPPFLAGS='-I/usr/local/opt/openssl/include' CXXFLAGS='-std=c++03 -DS3FS_PTHREAD_ERRORCHECK=1' - make - make cppcheck - make check -C src - modprobe fuse - make check -C test - test/filter-suite-log.sh test/test-suite.log - os: osx osx_image: xcode9.2 cache: directories: - $HOME/Library/Caches/Homebrew - /usr/local/Homebrew - $HOME/.osx_cache before_cache: - brew cleanup - cd /usr/local/Homebrew; find . \! -regex ".+\.git.+" -delete - mkdir -p $HOME/.osx_cache; touch $HOME/.osx_cache/cached before_install: - TAPS="$(brew --repository)/Library/Taps"; if [ -e "$TAPS/caskroom/homebrew-cask" ]; then rm -rf "$TAPS/caskroom/homebrew-cask"; fi; if [ ! -f $HOME/.osx_cache/cached ]; then brew tap homebrew/homebrew-cask; else HOMEBREW_NO_AUTO_UPDATE=1 brew tap homebrew/homebrew-cask; fi - HOMEBREW_NO_AUTO_UPDATE=1 brew cask install osxfuse - S3FS_BREW_PACKAGES='awscli cppcheck truncate'; for s3fs_brew_pkg in ${S3FS_BREW_PACKAGES}; do brew list | grep -q ${s3fs_brew_pkg}; if [ $? -eq 0 ]; then brew outdated | grep -q ${s3fs_brew_pkg} && HOMEBREW_NO_AUTO_UPDATE=1 brew upgrade ${s3fs_brew_pkg}; else HOMEBREW_NO_AUTO_UPDATE=1 brew install ${s3fs_brew_pkg}; fi; done - if [ -f /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs ]; then sudo chmod +s /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs; elif [ -f /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse ]; then sudo chmod +s /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse; fi - sudo ln -s /usr/local/opt/coreutils/bin/gstdbuf /usr/local/bin/stdbuf script: - ./autogen.sh - PKG_CONFIG_PATH=/usr/local/opt/curl/lib/pkgconfig:/usr/local/opt/openssl/lib/pkgconfig ./configure CXXFLAGS='-std=c++03 -DS3FS_PTHREAD_ERRORCHECK=1' - make - make cppcheck - make check -C src - if [ -f /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs ]; then /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs ; elif [ -f /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse ]; then /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse ; fi - make check -C test - test/filter-suite-log.sh test/test-suite.log - os: linux-ppc64le sudo: required dist: trusty cache: apt before_install: - sudo add-apt-repository -y ppa:openjdk-r/ppa - sudo apt-get update -qq - sudo apt-get install -qq attr cppcheck libfuse-dev openjdk-7-jdk - sudo update-alternatives --set java /usr/lib/jvm/java-7-openjdk-ppc64el/jre/bin/java - sudo pip install --upgrade awscli script: - ./autogen.sh - ./configure CPPFLAGS='-I/usr/local/opt/openssl/include' CXXFLAGS='-std=c++03 -DS3FS_PTHREAD_ERRORCHECK=1' - make - make cppcheck - make check -C src - modprobe fuse - make check -C test - test/filter-suite-log.sh test/test-suite.log # # Local variables: # tab-width: 4 # c-basic-offset: 4 # End: # vim600: noet sw=4 ts=4 fdm=marker # vim<600: noet sw=4 ts=4 # s3fs-fuse-1.86/AUTHORS000066400000000000000000000007551361654130000143440ustar00rootroot000000000000001. Randy Rizun Wrote from scratch the initial version of S3FS. 2. Dan Moore Patches and improvements. 3. Adrian Petrescu Converted the project to be autotools-based. 4. Ben LeMasurier Bugfixes, performance and other improvements. 5. Takeshi Nakatani Bugfixes, performance and other improvements. 6. Andrew Gaul Bugfixes, performance and other improvements. s3fs-fuse-1.86/COMPILATION.md000066400000000000000000000012551361654130000153700ustar00rootroot00000000000000# Compilation from source code These are generic instructions should work on almost any GNU/Linux, macOS, BSD, or similar. If you want specific instructions for some distributions, check the [wiki](https://github.com/s3fs-fuse/s3fs-fuse/wiki/Installation-Notes). Keep in mind using the pre-built packages when available. 1. Ensure your system satisfies build and runtime dependencies for: * fuse >= 2.8.4 * automake * gcc-c++ * make * libcurl * libxml2 * openssl * pkg-config (or your OS equivalent) 2. Then compile from master via the following commands: ``` git clone https://github.com/s3fs-fuse/s3fs-fuse.git cd s3fs-fuse ./autogen.sh ./configure make sudo make install ``` s3fs-fuse-1.86/COPYING000066400000000000000000000431031361654130000143210ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. s3fs-fuse-1.86/ChangeLog000066400000000000000000000600271361654130000150440ustar00rootroot00000000000000ChangeLog for S3FS ------------------ Version 1.86 -- 04 Feb, 2020 (major changes only) #965 - enable various optimizations when using modern curl #1002 - allow SSE-C keys to have NUL bytes #1008 - add session token support #1039 - allow large files on 32-bit systems like Raspberry Pi #1049 - fix data corruption when external modification changes a cached object #1063 - fix data corruption when opening a second fd to an unflushed file #1066 - fix clock skew errors when writing large files #1081 - allow concurrent metadata queries during data operations #1098 - use server-side copy for partially modified files #1107 - #1108 - fix multiple concurrency issues #1199 - add requester_pays support #1209 - add symlink cache #1224 - add intelligent_ia storage tier Version 1.85 -- 11 Mar, 2019 #804 - add Backblaze B2 #812 - Fix typo s/mutliple/multiple/ #819 - #691: Made instructions for creating password file more obvious. #820 - Enable big writes if capable #826 - For RPM distributions fuse-libs is enough #831 - Add support for storage class ONEZONE_IA. #832 - Simplify hex conversion #833 - New installation instructions for Fedora >= 27 and CentOS7 #834 - Improve template for issues #835 - Make the compilation instructions generic #840 - Replace all mentions to MacOS X to macOS #849 - Correct typo #851 - Correctly compare list_object_max_keys #852 - Allow credentials from ${HOME}/.aws/credentials #853 - Replace ~ with ${HOME} in examples #855 - Include StackOverflow in FAQs #856 - Add icon for s3fs #859 - Upload S3 parts without batching #861 - Add 'profile' option to command line help. #865 - fix multihead warning check #866 - Multi-arch support for ppc64le #870 - Correct typos in command-line parsing #874 - Address cppcheck 1.86 errors #877 - Check arguments and environment before .aws/creds #882 - [curl] Assume long encryption keys are base64 encoded #885 - Update s3fs_util.cpp for correspondence of Nextcloud contype #888 - Add Server Fault to FAQs #892 - Repair xattr tests #893 - Store and retrieve file change time #894 - Default uid/gid/mode when object lacks permissions #895 - Emit more friendly error for buckets with dots #898 - Flush file before renaming #899 - Tighten up HTTP response code check #900 - Plug memory leak #901 - Plug memory leaks #902 - Avoid pass-by-value when not necessary #903 - Prefer find(char) over find(const char *) #904 - Remove unnecessary calls to std::string::c_str #905 - Fix comparison in s3fs_strtoofft #906 - Prefer HTTPS links where possible #908 - Added an error message when HTTP 301 status #909 - Ignore after period character of floating point in x-amz-meta-mtime #910 - Added a missing extension to .gitignore, and formatted dot files #911 - Added detail error message when HTTP 301/307 status #912 - Automatic region change made possible other than us-east-1(default) #913 - Prefer abort over assert(false) #914 - Issue readdir HEAD requests without batching #917 - Reference better-known AWS CLI for compatibility #918 - Load tail range during overwrite #919 - Add test for mv non-empty directory #920 - Remove unnecessary string copies #921 - Remove redundant string initializations #923 - Reverted automatic region change and changed messages #924 - Prefer empty over size checks #925 - Remove redundant null checks before delete #926 - Accept paths with : in them #930 - Correct enable_content_md5 docs #931 - Correct sigv2 typo #932 - Prefer AutoLock for synchronization #933 - Remove mirror path when deleting cache #934 - Checked and corrected all typo #937 - Disable malloc_trim #938 - Remove unneeded void parameter #939 - Prefer specific [io]stringstream where possible #940 - Copy parts in parallel #942 - Ensure s3fs compiles with C++03 #943 - Return not supported when hard linking #944 - Repair utility mode #946 - Simplify async request completion code #948 - Add logging for too many parts #949 - Implement exponential backoff for 503 #950 - Added S3FS_MALLOC_TRIM build switch #951 - Added a non-interactive option to utility mode #952 - Automatically abort failed multipart requests #953 - Update s3ql link #954 - Clear containers instead of individual erases #955 - Address miscellaneous clang-tidy warnings #957 - Upgrade to S3Proxy 1.6.1 #958 - Document lack of inotify support #959 - Fixed code for latest cppcheck error on OSX #960 - Wtf8 #961 - Work around cppcheck warnings #965 - Improvement of curl session pool for multipart #967 - Increase FdEntity reference count when returning #969 - Fix lazy typo #970 - Remove from file from stat cache during rename #972 - Add instructions for Amazon Linux #974 - Changed the description order of man page options #975 - Fixed ref-count when error occurred. #977 - Make macOS instructions consistent with others Version 1.84 -- Jul 8, 2018 #704 - Update README.md with details about .passwd-s3fs #710 - add disk space reservation #712 - Added Cygwin build options #714 - reduce lock contention on file open #724 - don't fail multirequest on single thread error #726 - add an instance_name option for logging #727 - Fixed Travis CI error about cppcheck - #713 #729 - FreeBSD build fixes #733 - More useful error message for dupe entries in passwd file #739 - cleanup curl handle state on retries #745 - don't fail mkdir when directory exists #753 - fix xpath selector in bucket listing #754 - Validate the URL format for http/https #755 - Added reset curl handle when returning to handle pool #756 - Optimize defaults #761 - Simplify installation for Ubuntu 16.04 #762 - Upgrade to S3Proxy 1.6.0 #763 - cleanup curl handles before curl share #764 - Remove false multihead warnings #765 - Add Debian installation instructions #766 - Remove s3fs-python #768 - Fixed memory leak #769 - Revert "enable FUSE read_sync by default" #774 - Option for IAM authentication endpoint #780 - gnutls_auth: initialize libgcrypt #781 - Fixed an error by cppcheck on OSX #786 - Log messages for 5xx and 4xx HTTP response code #789 - Instructions for SUSE and openSUSE prebuilt packages #793 - Added list_object_max_keys option based on #783 PR Version 1.83 -- Dec 17, 2017 #606 - Add Homebrew instructions #608 - Fix chown_nocopy losing existing uid/gid if unspecified #609 - Group permission checks sometimes fail with large number of groups #611 - Fixed clock_gettime build failure on macOS 10.12 Sierra - #600 #621 - Upgrade to S3Proxy 1.5.3 #627 - Update README.md #630 - Added travis test on osx for #601 #631 - Merged macosx branch into master branch #601 #636 - Fix intermittent upload failures on macOS #637 - Add blurb about non-Amazon S3 implementations #638 - Minor fixes to README #639 - Update Homebrew instructions #642 - Fixed potential atomic violation in S3fsCurl::AddUserAgent - #633 #644 - Fixed with unnecessary equal in POST uploads url argument - #643 #645 - Configure S3Proxy for SSL #646 - Simplify S3Proxy PID handling #652 - Fix s3fs_init message #659 - Do not fail updating directory when removing old-style object(ref #658) #660 - Refixed s3fs_init message(ref #652) #663 - Lock FdEntity when mutating orgmeta #664 - auth headers insertion refactoring #668 - Changed .travis.yml for fixing not found gpg2 on osx #669 - add IBM IAM authentication support #670 - Fixed a bug in S3fsCurl::LocateBundle #671 - Add support for ECS metadata endpoint #675 - Reduce use of preprocessor #676 - Move str definition from header to implementation #677 - Add s3proxy to .gitignore #679 - README.md Addition #681 - Changed functions about reading passwd file #684 - Correct signedness warning #686 - remove use of jsoncpp #688 - Improved use of temporary files - #678 #690 - Added option ecs description to man page #692 - Updated template md files for issue and pr #695 - fix condition for parallel download #697 - Fixing race condition in FdEntity::GetStats #699 - Fix dbglevel usage Version 1.82 -- May 13, 2017 #597 - Not fallback to HTTP - #596 #598 - Updated ChangeLog and configure.ac for release 1.82 Version 1.81 -- May 13, 2017 #426 - Updated to correct ChangeLog #431 - fix typo s/controll/control/ #432 - Include location constraint when creating bucket #433 - Correct search and replace typo #440 - Handled all curl error without exiting process - #437 #443 - Fix for leaks during stat cache entry expiry / truncation (#340) #444 - Add mirror file logic for removing cache file #447 - added fuse package for mounting via /etc/fstab, fixes #417 #449 - Accept mount options compatible with mtab #451 - Correct path in README #454 - Changed for accepting mount options compatible with mtab - #449 #466 - Fixed a bug about could not copy file mode from org file #471 - Added use_xattr option for #467 and #460 #477 - OS-specific correspondence of the extended attribute header #483 - Trim symbolic link original path in file #487 - Split header debugging onto multiple lines for easier reading #488 - Fixed searching Content-Length without case sensitive - #480 #489 - Changed headers_t map using nocase compare function - #488 #494 - Fix typo s/destroied/destroyed/ #495 - Fix invalid V4 signature on multipart copy requests #498 - Upgrade to S3Proxy 1.5.1 #502 - Fixed issue#435 branch codes for remaining bugs(2) #503 - Add missing call to mtime test #504 - Use describe helper function #505 - Correct typos #509 - Use server-provided ETag during complete upload #511 - Fixed a bug about uploading NULL to some part of the file contents #512 - Changed clock_gettime func to s3fs_clock_gettime for homebrew - #468 #513 - Added issue and PR templates. #517 - Update s3fs.1 - removed duplicated word #520 - Added links for eventual consistency in README.md - #515 #539 - Upgrade to S3Proxy 1.5.2 #540 - Address cppcheck 1.77 warnings #545 - Changed base cached time of stat_cache_expire option - #523 #546 - Fixed double initialization of SSL library at foreground #550 - Add umount instruction for unprivileged user #551 - Updated stat_cache_expire option description - #545 #552 - switch S3fsMultiCurl to use foreground threads #553 - add TLS cipher suites customization #554 - cleanup cache directory when running out of disk space #555 - don't sign empty headers (as they are discarded #556 - fix multipart upload handling without cache #557 - Added check_cache_dir_exist option(refixed #347) - #538 #558 - Fixed a bug in logic about truncating stat cache #560 - Fixed about multipart uploading at no free space related to #509 #567 - Do not send ACL unless overridden #576 - Added option for complementing lack of stat mode #578 - Refactored the get_object_attribute function #579 - Added notsup_compat_dir option #580 - Enhanced bucket/path parameter check #582 - Check errors returned in 200 OK responses for put header request #583 - Updated limit object size in s3fs man page #585 - Fixed failure to upload/copy with SSE_C and SSE_KMS #587 - Changed copyright year format for debian pkg #588 - Default transport to HTTPS #590 - Updated man page for default_acl option - #567 #593 - Backward compatible for changing default transport to HTTPS #594 - Check bucket at public bucket and add nocopyapi option automatically #595 - Updated ChangeLog and configure.ac for release 1.81 Version 1.80 -- May 29, 2016 #213 - Parse ETag from copy multipart correctly #215 - Fix mem leak in openssl_auth.cpp:s3fs_sha256hexsum #217 - Override install, so that the make install does not install rename_before_close under /test #219 - Address Coverity errors #220 - Test removing a non-empty directory #221 - Compare idiomatically #222 - Annotate constructors as explicit #224 - Configure cppcheck #229 - Convert rename_before_close to a shell script #231 - Rewrite AutoLock #232 - Always hold stat_cache_lock when using stat_cache #233 - Remove IntToStr #234 - Update README #235 - Plug leak during complete multipart upload #237 - Refactor tests into individual functions #238 - Enable all cppcheck rules #239 - Update stale Google Code reference in --help #240 - Enable Content-MD5 during multipart upload part #243 - Run cppcheck during Travis builds #245 - Elide duplicate lookups of std::map via iterators #246 - Unlock during early return in TruncateCache #247 - Base64 cleanup #248 - Enable integration tests for Travis #249 - Silence wget #250 - s3fs can print version with short commit hash - #228 #251 - Skip xattr tests if utilities are missing #252 - This fixes an issue with caching when the creation of a subdirectory … #253 - Added checking cache dir perms at starting. #256 - Add no atomic rename to limitations #257 - Update README.md: Bugfix password file permissions errors #258 - Update README.md to better explain mount upon boot #260 - Wrap help text at 80 characters #261 - Correct help timeouts #263 - Allow integration testing against Amazon S3 #265 - Fix integration tests #266 - Cleanup from PR #265 #267 - Added the _netdev option to the fstab example. #268 - Use 127.0.0.1 not localhost in s3proxy wait loop #271 - Add support for standard_ia storage class #274 - Modified man page for storage_class option(#271) #275 - Changed and cleaned the logic for debug message. #278 - Supported for SSE KMS(#270) #280 - Supported a object which is larger than free disk space #285 - Add test for symlink #288 - Fixed a bug about head request(copy) for SSE - issue#286 #289 - Print source file in log messages #291 - File opened with O_TRUNC is not flushed - Issue #290 #293 - Fix a small spelling issue. #295 - File opened with O_TRUNC is not flushed - changed #291 #300 - Update integration-test-main.sh #302 - Fix syslog level used by S3FS_PRN_EXIT() #304 - Fixed a bug about mtime - #299 #306 - Fix read concurrency to work in parallel count #307 - Fix pthread portability problem #308 - Changed ensure free disk space as additional change for #306 #309 - Check pthread portability in configure as additional change for #307 #310 - Update integration-test-main.sh as additional change for #300 #311 - Change error log to debug log in s3fs_read() #313 - fix gitignore #319 - Clean up mount point on errors in s3fs_init() #321 - delete stat cache entry in s3fs_fsync so st_size is refreshed - #320 #323 - Add goofys to references #328 - Fix v4 signature with use_path_request_style #329 - Correct multiple issues with GET and v4 signing #330 - Pass by const reference where possible #331 - Address various clang warnings #334 - Bucket host should include port and not path #336 - update README.md for fstab #338 - Fixed a bug about IAMCRED type could not be retried. #339 - Updated README.md for fstab example. #341 - Fix the memory leak issue in fdcache. #346 - Fix empty directory check against AWS S3 #348 - Integration test summary, continue on error #350 - Changed cache out logic for stat - #340 #351 - Check cache directory path and attributes - #347 #352 - Remove stat file cache dir if specified del_cache - #337 #354 - Supported regex type for additional header format - #343 #355 - Fixed codes about clock_gettime for osx #356 - Fixed codes about clock_gettime for osx(2) #357 - Fixed codes about clock_gettime for osx(3) #359 - Remove optional parameter from Content-Type header - #358 #360 - Fix clock_gettime autotools detection on Linux #364 - Checked content-type by no case-sensitivity - #363 #371 - Always set stats cache for opened file #372 - Fixed a bug about etag comparison in stats cache, etc. #376 - Test for writing after an lseek past end of file #379 - Fixed a bug about writing sparsed file - #375 #385 - fix typo in curl.cpp: s/returing/returning/ #391 - Update s3fs.1 #394 - Revert "Fixed a bug about writing sparsed file - #375" #395 - Fixed writing sparsed file - #375,#379,#394 #397 - Supported User-Agent header - #383 #403 - Fix a bug of truncating empty file #404 - Add curl handler pool to reuse connections #409 - Fixed 'load_sse_c' option not working - #388 #410 - Allow duplicate key in ahbe_conf - #386 #411 - loading IAM role name automatically(iam_role option) - #387 #415 - Fixed a bug about stat_cache_expire - #382 #420 - Skip early credential checks when iam_role=auto #422 - Fixes for iam_role=auto #424 - Added travis CI badge in README.md #425 - Updated ChangeLog and configure.ac for release 1.80 Version 1.79 -- Jul 19, 2015 issue #60 - Emit user-friendly log messages on failed CheckBucket requests issue #62 - Remove stray chars from source files issue #63 - Fix spelling errors issue #68 - FreeBSD issue issue #69 - Address clang always true warnings issue #73 - Small gitignore fixes issue #74 - url: handle scheme omission issue #83 - Changed option processing to use strtol() to get a umask issue #93 - Add simple unit tests for trim functions issue #100 - CURL handles not properly initialized to use DNS or SSL session caching issue #101 - Optimized function "bool directory_empty()" issue #103 - Remove prefix option in s3fs man page - issue#87 issue #104 - fix rename before close issue #116 - Supported signature version 4 issue #119 - Added new mp_umask option about issue#107, pr#110 issue #124 - Fallback to v2 signatures correctly. issue #130 - refactor integration tests create/cleanup file issue #131 - Test ls issue #132 - Use S3Proxy to run integration tests issue #134 - Include Content-Type in complete MPU V2 signature issue #135 - Correct V4 signature for initiate multipart upload issue #136 - Small fixes to integration tests issue #137 - Add test for multi-part upload issue #138 - Fixed bugs, not turn use_cache off and ty to load to end - issue#97 issue #143 - Fixed a bug no use_cache case about fixed #138 - issue#141 issue #144 - Add Travis configuration issue #146 - add exit handler to cleanup on failures issue #147 - Use S3Proxy 1.4.0-SNAPSHOT issue #150 - Fixed a bug not handling fsync - #145 issue #154 - Fixed url-encoding for ampersand etc on sigv4 - Improvement/#149 issue #155 - Fixed a bug: unable to mount bucket subdirectory issue #156 - Fixed a bug about ssl session sharing with libcurl older 7.23.0 - issue#126 issue #159 - Upgrade to S3Proxy 1.4.0 issue #164 - send the correct Host header when using -o url issue #165 - Auth v4 refactor issue #167 - Increased default connecting/reading/writing timeout value issue #168 - switch to use region specific endpoints to compute correct v4 signature issue #170 - Reviewed and fixed response codes print in curl.cpp - #157 issue #171 - Support buckets with mixed-case names issue #173 - Run integration tests via Travis issue #176 - configure.ac: detect target, if target is darwin (OSX), then #176 issue #177 - Add .mailmap issue #178 - Update .gitignore issue #184 - Add usage information for multipart_size issue #185 - Correct obvious typos in usage and README issue #190 - Add a no_check_certificate option. issue #194 - Tilda in a file-name breaks things (EPERM) issue #198 - Disable integration tests for Travis issue #199 - Supported extended attributes(retry) issue #200 - fixed fallback to sigv2 for bucket create and GCS issue #202 - Specialize {set,get}xattr for OS X issue #204 - Add integration test for xattr issue #207 - Fixed a few small spelling issues. Version 1.78 -- Sep 15, 2014 issue #29 - Possible to create Debian/Ubuntu packages?(googlecode issue 109) issue 417(googlecode) - Password file with DOS format is not handled properly issue #41 - Failed making signature issue #40 - Moving a directory containing more than 1000 files truncates the directory issue #49 - use_sse is ignored when creating new files issue #39 - Support for SSE-C issue #50 - Cannot find pkg-config when configured with any SSL backend except openssl Version 1.77 -- Apr 19, 2014 issue 405(googlecode) - enable_content_md5 Input/output error issue #14 - s3fs -u should return 0 if there are no lost multiparts issue #16 - empty file is written to s3 issue #18 - s3fs crashes with segfault issue #22 - Fix typos in docs for max_stat_cache_size issue #23 - curl ssl problems issue #28 - Address signedness warning in FdCache::Init Version 1.76 -- Jan 21, 2014 issue #5 - du shows incorrect usage stats issue #8 - version in configure.ac is 1.74 for release 1.75 Version 1.75 -- Jan 6, 2014 issue #1 - Using %20 instead of the plus (+) sign for encoding spaces issue #3 - Fixed local timezone was incorrectly being applied to IAM and Last-Modified dates. issue #4 - Fix compilation error on MacOSX with missing const Version 1.74 -- Nov 24, 2013 This version is initial version on Github, same as on GoogleCodes(s3fs). https://github.com/s3fs-fuse/s3fs-fuse/releases/tag/v1.74 see more detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.74.tar.gz Version 1.73 -- Aug 23, 2013 see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.73.tar.gz Version 1.72 -- Aug 10, 2013 see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.72.tar.gz Version 1.71 -- Jun 15, 2013 see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.71.tar.gz Version 1.70 -- Jun 01, 2013 see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.70.tar.gz Version 1.69 -- May 15, 2013 see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.69.tar.gz Version 1.68 -- Apr 30, 2013 see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.68.tar.gz Version 1.67 -- Apr 13, 2013 see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.67.tar.gz Version 1.66 -- Apr 06, 2013 see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.66.tar.gz Version 1.65 -- Mar 30, 2013 see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.65.tar.gz Version 1.64 -- Mar 23, 2013 see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.64.tar.gz Version 1.63 -- Feb 24, 2013 see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.63.tar.gz Version 1.62 -- Jan 27, 2013 see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.62.tar.gz Version 1.61 -- Aug 30, 2011 see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.61.tar.gz Version 1.60 -- Aug 29, 2011 see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.60.tar.gz Version 1.59 -- Jul 28, 2011 see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.59.tar.gz Version 1.58 -- Jul 19, 2011 see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.58.tar.gz Version 1.57 -- Jul 07, 2011 see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.57.tar.gz Version 1.56 -- Jul 07, 2011 see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.56.tar.gz Version 1.55 -- Jul 02, 2011 see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.55.tar.gz Version 1.54 -- Jun 25, 2011 see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.54.tar.gz Version 1.53 -- Jun 22, 2011 see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.53.tar.gz Version 1.40 -- Feb 11, 2011 see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.40.tar.gz Version 1.33 -- Dec 30, 2010 see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.33.tar.gz Version 1.25 -- Dec 16, 2010 see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.25.tar.gz Version 1.19 -- Dec 2, 2010 see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.19.tar.gz Version 1.16 -- Nov 22, 2010 see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.16.tar.gz Version 1.10 -- Nov 6, 2010 see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.10.tar.gz Version 1.02 -- Oct 29, 2010 see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.02.tar.gz Version 1.01 -- Oct 28, 2010 see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.01.tar.gz Version 1.0 -- Oct 24, 2010 see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.0.tar.gz ------ Version 1.1 -- Mon Oct 18 2010 Dan Moore reopens the project and fixes various issues that had accumulated in the tracker. Adrian Petrescu converts the project to autotools and posts it to GitHub. Version 1.0 -- 2008 Randy Rizun releases a basic version of S3FS on Google Code. s3fs-fuse-1.86/INSTALL000066400000000000000000000363301361654130000143230ustar00rootroot00000000000000Installation Instructions ************************* Copyright (C) 1994, 1995, 1996, 1999, 2000, 2001, 2002, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc. Copying and distribution of this file, with or without modification, are permitted in any medium without royalty provided the copyright notice and this notice are preserved. This file is offered as-is, without warranty of any kind. Basic Installation ================== Briefly, the shell commands `./configure; make; make install' should configure, build, and install this package. The following more-detailed instructions are generic; see the `README' file for instructions specific to this package. Some packages provide this `INSTALL' file but do not implement all of the features documented below. The lack of an optional feature in a given package is not necessarily a bug. More recommendations for GNU packages can be found in *note Makefile Conventions: (standards)Makefile Conventions. The `configure' shell script attempts to guess correct values for various system-dependent variables used during compilation. It uses those values to create a `Makefile' in each directory of the package. It may also create one or more `.h' files containing system-dependent definitions. Finally, it creates a shell script `config.status' that you can run in the future to recreate the current configuration, and a file `config.log' containing compiler output (useful mainly for debugging `configure'). It can also use an optional file (typically called `config.cache' and enabled with `--cache-file=config.cache' or simply `-C') that saves the results of its tests to speed up reconfiguring. Caching is disabled by default to prevent problems with accidental use of stale cache files. If you need to do unusual things to compile the package, please try to figure out how `configure' could check whether to do them, and mail diffs or instructions to the address given in the `README' so they can be considered for the next release. If you are using the cache, and at some point `config.cache' contains results you don't want to keep, you may remove or edit it. The file `configure.ac' (or `configure.in') is used to create `configure' by a program called `autoconf'. You need `configure.ac' if you want to change it or regenerate `configure' using a newer version of `autoconf'. The simplest way to compile this package is: 1. `cd' to the directory containing the package's source code and type `./configure' to configure the package for your system. Running `configure' might take a while. While running, it prints some messages telling which features it is checking for. 2. Type `make' to compile the package. 3. Optionally, type `make check' to run any self-tests that come with the package, generally using the just-built uninstalled binaries. 4. Type `make install' to install the programs and any data files and documentation. When installing into a prefix owned by root, it is recommended that the package be configured and built as a regular user, and only the `make install' phase executed with root privileges. 5. Optionally, type `make installcheck' to repeat any self-tests, but this time using the binaries in their final installed location. This target does not install anything. Running this target as a regular user, particularly if the prior `make install' required root privileges, verifies that the installation completed correctly. 6. You can remove the program binaries and object files from the source code directory by typing `make clean'. To also remove the files that `configure' created (so you can compile the package for a different kind of computer), type `make distclean'. There is also a `make maintainer-clean' target, but that is intended mainly for the package's developers. If you use it, you may have to get all sorts of other programs in order to regenerate files that came with the distribution. 7. Often, you can also type `make uninstall' to remove the installed files again. In practice, not all packages have tested that uninstallation works correctly, even though it is required by the GNU Coding Standards. 8. Some packages, particularly those that use Automake, provide `make distcheck', which can by used by developers to test that all other targets like `make install' and `make uninstall' work correctly. This target is generally not run by end users. Compilers and Options ===================== Some systems require unusual options for compilation or linking that the `configure' script does not know about. Run `./configure --help' for details on some of the pertinent environment variables. You can give `configure' initial values for configuration parameters by setting variables in the command line or in the environment. Here is an example: ./configure CC=c99 CFLAGS=-g LIBS=-lposix *Note Defining Variables::, for more details. Compiling For Multiple Architectures ==================================== You can compile the package for more than one kind of computer at the same time, by placing the object files for each architecture in their own directory. To do this, you can use GNU `make'. `cd' to the directory where you want the object files and executables to go and run the `configure' script. `configure' automatically checks for the source code in the directory that `configure' is in and in `..'. This is known as a "VPATH" build. With a non-GNU `make', it is safer to compile the package for one architecture at a time in the source code directory. After you have installed the package for one architecture, use `make distclean' before reconfiguring for another architecture. On macOS 10.5 and later systems, you can create libraries and executables that work on multiple system types--known as "fat" or "universal" binaries--by specifying multiple `-arch' options to the compiler but only a single `-arch' option to the preprocessor. Like this: ./configure CC="gcc -arch i386 -arch x86_64 -arch ppc -arch ppc64" \ CXX="g++ -arch i386 -arch x86_64 -arch ppc -arch ppc64" \ CPP="gcc -E" CXXCPP="g++ -E" This is not guaranteed to produce working output in all cases, you may have to build one architecture at a time and combine the results using the `lipo' tool if you have problems. Installation Names ================== By default, `make install' installs the package's commands under `/usr/local/bin', include files under `/usr/local/include', etc. You can specify an installation prefix other than `/usr/local' by giving `configure' the option `--prefix=PREFIX', where PREFIX must be an absolute file name. You can specify separate installation prefixes for architecture-specific files and architecture-independent files. If you pass the option `--exec-prefix=PREFIX' to `configure', the package uses PREFIX as the prefix for installing programs and libraries. Documentation and other data files still use the regular prefix. In addition, if you use an unusual directory layout you can give options like `--bindir=DIR' to specify different values for particular kinds of files. Run `configure --help' for a list of the directories you can set and what kinds of files go in them. In general, the default for these options is expressed in terms of `${prefix}', so that specifying just `--prefix' will affect all of the other directory specifications that were not explicitly provided. The most portable way to affect installation locations is to pass the correct locations to `configure'; however, many packages provide one or both of the following shortcuts of passing variable assignments to the `make install' command line to change installation locations without having to reconfigure or recompile. The first method involves providing an override variable for each affected directory. For example, `make install prefix=/alternate/directory' will choose an alternate location for all directory configuration variables that were expressed in terms of `${prefix}'. Any directories that were specified during `configure', but not in terms of `${prefix}', must each be overridden at install time for the entire installation to be relocated. The approach of makefile variable overrides for each directory variable is required by the GNU Coding Standards, and ideally causes no recompilation. However, some platforms have known limitations with the semantics of shared libraries that end up requiring recompilation when using this method, particularly noticeable in packages that use GNU Libtool. The second method involves providing the `DESTDIR' variable. For example, `make install DESTDIR=/alternate/directory' will prepend `/alternate/directory' before all installation names. The approach of `DESTDIR' overrides is not required by the GNU Coding Standards, and does not work on platforms that have drive letters. On the other hand, it does better at avoiding recompilation issues, and works well even when some directory options were not specified in terms of `${prefix}' at `configure' time. Optional Features ================= If the package supports it, you can cause programs to be installed with an extra prefix or suffix on their names by giving `configure' the option `--program-prefix=PREFIX' or `--program-suffix=SUFFIX'. Some packages pay attention to `--enable-FEATURE' options to `configure', where FEATURE indicates an optional part of the package. They may also pay attention to `--with-PACKAGE' options, where PACKAGE is something like `gnu-as' or `x' (for the X Window System). The `README' should mention any `--enable-' and `--with-' options that the package recognizes. For packages that use the X Window System, `configure' can usually find the X include and library files automatically, but if it doesn't, you can use the `configure' options `--x-includes=DIR' and `--x-libraries=DIR' to specify their locations. Some packages offer the ability to configure how verbose the execution of `make' will be. For these packages, running `./configure --enable-silent-rules' sets the default to minimal output, which can be overridden with `make V=1'; while running `./configure --disable-silent-rules' sets the default to verbose, which can be overridden with `make V=0'. Particular systems ================== On HP-UX, the default C compiler is not ANSI C compatible. If GNU CC is not installed, it is recommended to use the following options in order to use an ANSI C compiler: ./configure CC="cc -Ae -D_XOPEN_SOURCE=500" and if that doesn't work, install pre-built binaries of GCC for HP-UX. On OSF/1 a.k.a. Tru64, some versions of the default C compiler cannot parse its `' header file. The option `-nodtk' can be used as a workaround. If GNU CC is not installed, it is therefore recommended to try ./configure CC="cc" and if that doesn't work, try ./configure CC="cc -nodtk" On Solaris, don't put `/usr/ucb' early in your `PATH'. This directory contains several dysfunctional programs; working variants of these programs are available in `/usr/bin'. So, if you need `/usr/ucb' in your `PATH', put it _after_ `/usr/bin'. On Haiku, software installed for all users goes in `/boot/common', not `/usr/local'. It is recommended to use the following options: ./configure --prefix=/boot/common Specifying the System Type ========================== There may be some features `configure' cannot figure out automatically, but needs to determine by the type of machine the package will run on. Usually, assuming the package is built to be run on the _same_ architectures, `configure' can figure that out, but if it prints a message saying it cannot guess the machine type, give it the `--build=TYPE' option. TYPE can either be a short name for the system type, such as `sun4', or a canonical name which has the form: CPU-COMPANY-SYSTEM where SYSTEM can have one of these forms: OS KERNEL-OS See the file `config.sub' for the possible values of each field. If `config.sub' isn't included in this package, then this package doesn't need to know the machine type. If you are _building_ compiler tools for cross-compiling, you should use the option `--target=TYPE' to select the type of system they will produce code for. If you want to _use_ a cross compiler, that generates code for a platform different from the build platform, you should specify the "host" platform (i.e., that on which the generated programs will eventually be run) with `--host=TYPE'. Sharing Defaults ================ If you want to set default values for `configure' scripts to share, you can create a site shell script called `config.site' that gives default values for variables like `CC', `cache_file', and `prefix'. `configure' looks for `PREFIX/share/config.site' if it exists, then `PREFIX/etc/config.site' if it exists. Or, you can set the `CONFIG_SITE' environment variable to the location of the site script. A warning: not all `configure' scripts look for a site script. Defining Variables ================== Variables not defined in a site shell script can be set in the environment passed to `configure'. However, some packages may run configure again during the build, and the customized values of these variables may be lost. In order to avoid this problem, you should set them in the `configure' command line, using `VAR=value'. For example: ./configure CC=/usr/local2/bin/gcc causes the specified `gcc' to be used as the C compiler (unless it is overridden in the site shell script). Unfortunately, this technique does not work for `CONFIG_SHELL' due to an Autoconf bug. Until the bug is fixed you can use this workaround: CONFIG_SHELL=/bin/bash /bin/bash ./configure CONFIG_SHELL=/bin/bash `configure' Invocation ====================== `configure' recognizes the following options to control how it operates. `--help' `-h' Print a summary of all of the options to `configure', and exit. `--help=short' `--help=recursive' Print a summary of the options unique to this package's `configure', and exit. The `short' variant lists options used only in the top level, while the `recursive' variant lists options also present in any nested packages. `--version' `-V' Print the version of Autoconf used to generate the `configure' script, and exit. `--cache-file=FILE' Enable the cache: use and save the results of the tests in FILE, traditionally `config.cache'. FILE defaults to `/dev/null' to disable caching. `--config-cache' `-C' Alias for `--cache-file=config.cache'. `--quiet' `--silent' `-q' Do not print messages saying which checks are being made. To suppress all normal output, redirect it to `/dev/null' (any error messages will still be shown). `--srcdir=DIR' Look for the package's source code in directory DIR. Usually `configure' can determine that directory automatically. `--prefix=DIR' Use DIR as the installation prefix. *note Installation Names:: for more details, including other options available for fine-tuning the installation locations. `--no-create' `-n' Run the configure checks, but stop before creating any output files. `configure' also accepts some other, not widely useful, options. Run `configure --help' for more details. s3fs-fuse-1.86/Makefile.am000066400000000000000000000032551361654130000153260ustar00rootroot00000000000000###################################################################### # s3fs - FUSE-based file system backed by Amazon S3 # # Copyright 2007-2008 Randy Rizun # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. ###################################################################### SUBDIRS=src test doc EXTRA_DIST=doc default_commit_hash dist-hook: rm -rf `find $(distdir)/doc -type d -name .svn` rm -f `find $(distdir)/doc -type f -name Makefile` release : dist ../utils/release.sh ../utils/release.sh $(DIST_ARCHIVES) cppcheck: cppcheck --quiet --error-exitcode=1 \ --inline-suppr \ --std=c++03 \ --xml \ -D HAVE_ATTR_XATTR_H \ -D HAVE_SYS_EXTATTR_H \ -D HAVE_MALLOC_TRIM \ -U CURLE_PEER_FAILED_VERIFICATION \ -U P_tmpdir \ -U ENOATTR \ --enable=warning,style,information,missingInclude \ --suppress=missingIncludeSystem \ --suppress=unmatchedSuppression \ src/ test/ s3fs-fuse-1.86/README.md000066400000000000000000000121571361654130000145520ustar00rootroot00000000000000# s3fs s3fs allows Linux and macOS to mount an S3 bucket via FUSE. s3fs preserves the native object format for files, allowing use of other tools like [AWS CLI](https://github.com/aws/aws-cli). [![Build Status](https://travis-ci.org/s3fs-fuse/s3fs-fuse.svg?branch=master)](https://travis-ci.org/s3fs-fuse/s3fs-fuse) ## Features * large subset of POSIX including reading/writing files, directories, symlinks, mode, uid/gid, and extended attributes * compatible with Amazon S3, Google Cloud Storage, and other S3-based object stores * large files via multi-part upload * renames via server-side copy * optional server-side encryption * data integrity via MD5 hashes * in-memory metadata caching * local disk data caching * user-specified regions, including Amazon GovCloud * authenticate via v2 or v4 signatures ## Installation Many systems provide pre-built packages: * Amazon Linux via EPEL: ``` sudo amazon-linux-extras install epel sudo yum install s3fs-fuse ``` * Debian 9 and Ubuntu 16.04 or newer: ``` sudo apt install s3fs ``` * Fedora 27 or newer: ``` sudo dnf install s3fs-fuse ``` * Gentoo: ``` sudo emerge net-fs/s3fs ``` * RHEL and CentOS 7 or newer through via EPEL: ``` sudo yum install epel-release sudo yum install s3fs-fuse ``` * SUSE 12 and openSUSE 42.1 or newer: ``` sudo zypper install s3fs ``` * macOS via [Homebrew](https://brew.sh/): ``` brew cask install osxfuse brew install s3fs ``` Otherwise consult the [compilation instructions](COMPILATION.md). ## Examples s3fs supports the standard [AWS credentials file](https://docs.aws.amazon.com/cli/latest/userguide/cli-config-files.html) stored in `${HOME}/.aws/credentials`. Alternatively, s3fs supports a custom passwd file. The default location for the s3fs password file can be created: * using a .passwd-s3fs file in the users home directory (i.e. ${HOME}/.passwd-s3fs) * using the system-wide /etc/passwd-s3fs file Enter your credentials in a file `${HOME}/.passwd-s3fs` and set owner-only permissions: ``` echo ACCESS_KEY_ID:SECRET_ACCESS_KEY > ${HOME}/.passwd-s3fs chmod 600 ${HOME}/.passwd-s3fs ``` Run s3fs with an existing bucket `mybucket` and directory `/path/to/mountpoint`: ``` s3fs mybucket /path/to/mountpoint -o passwd_file=${HOME}/.passwd-s3fs ``` If you encounter any errors, enable debug output: ``` s3fs mybucket /path/to/mountpoint -o passwd_file=${HOME}/.passwd-s3fs -o dbglevel=info -f -o curldbg ``` You can also mount on boot by entering the following line to `/etc/fstab`: ``` s3fs#mybucket /path/to/mountpoint fuse _netdev,allow_other 0 0 ``` or ``` mybucket /path/to/mountpoint fuse.s3fs _netdev,allow_other 0 0 ``` If you use s3fs with a non-Amazon S3 implementation, specify the URL and path-style requests: ``` s3fs mybucket /path/to/mountpoint -o passwd_file=${HOME}/.passwd-s3fs -o url=https://url.to.s3/ -o use_path_request_style ``` or(fstab) ``` s3fs#mybucket /path/to/mountpoint fuse _netdev,allow_other,use_path_request_style,url=https://url.to.s3/ 0 0 ``` To use IBM IAM Authentication, use the `-o ibm_iam_auth` option, and specify the Service Instance ID and API Key in your credentials file: ``` echo SERVICEINSTANCEID:APIKEY > /path/to/passwd ``` The Service Instance ID is only required when using the `-o create_bucket` option. Note: You may also want to create the global credential file first ``` echo ACCESS_KEY_ID:SECRET_ACCESS_KEY > /etc/passwd-s3fs chmod 600 /etc/passwd-s3fs ``` Note2: You may also need to make sure `netfs` service is start on boot ## Limitations Generally S3 cannot offer the same performance or semantics as a local file system. More specifically: * random writes or appends to files require rewriting the entire file * metadata operations such as listing directories have poor performance due to network latency * [eventual consistency](https://en.wikipedia.org/wiki/Eventual_consistency) can temporarily yield stale data([Amazon S3 Data Consistency Model](https://docs.aws.amazon.com/AmazonS3/latest/dev/Introduction.html#ConsistencyModel)) * no atomic renames of files or directories * no coordination between multiple clients mounting the same bucket * no hard links * inotify detects only local modifications, not external ones by other clients or tools ## References * [goofys](https://github.com/kahing/goofys) - similar to s3fs but has better performance and less POSIX compatibility * [s3backer](https://github.com/archiecobbs/s3backer) - mount an S3 bucket as a single file * [S3Proxy](https://github.com/gaul/s3proxy) - combine with s3fs to mount Backblaze B2, EMC Atmos, Microsoft Azure, and OpenStack Swift buckets * [s3ql](https://github.com/s3ql/s3ql/) - similar to s3fs but uses its own object format * [YAS3FS](https://github.com/danilop/yas3fs) - similar to s3fs but uses SNS to allow multiple clients to mount a bucket ## Frequently Asked Questions * [FAQ wiki page](https://github.com/s3fs-fuse/s3fs-fuse/wiki/FAQ) * [s3fs on Stack Overflow](https://stackoverflow.com/questions/tagged/s3fs) * [s3fs on Server Fault](https://serverfault.com/questions/tagged/s3fs) ## License Copyright (C) 2010 Randy Rizun Licensed under the GNU GPL version 2 s3fs-fuse-1.86/autogen.sh000077500000000000000000000023111361654130000152630ustar00rootroot00000000000000#! /bin/sh # This file is part of S3FS. # # Copyright 2009, 2010 Free Software Foundation, Inc. # # S3FS is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or (at # your option) any later version. # # S3FS is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see http://www.gnu.org/licenses/. # # See the file ChangeLog for a revision history. echo "--- Make commit hash file -------" SHORTHASH="unknown" type git > /dev/null 2>&1 if [ $? -eq 0 -a -d .git ]; then RESULT=`git rev-parse --short HEAD` if [ $? -eq 0 ]; then SHORTHASH=${RESULT} fi fi echo ${SHORTHASH} > default_commit_hash echo "--- Finished commit hash file ---" echo "--- Start autotools -------------" aclocal \ && autoheader \ && automake --add-missing \ && autoconf echo "--- Finished autotools ----------" exit 0 s3fs-fuse-1.86/configure.ac000066400000000000000000000240161361654130000155560ustar00rootroot00000000000000###################################################################### # s3fs - FUSE-based file system backed by Amazon S3 # # Copyright 2007-2008 Randy Rizun # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. ###################################################################### dnl Process this file with autoconf to produce a configure script. AC_PREREQ(2.59) AC_INIT(s3fs, 1.86) AC_CONFIG_HEADER([config.h]) AC_CANONICAL_SYSTEM AM_INIT_AUTOMAKE([foreign]) AC_PROG_CXX AC_PROG_CC AC_CHECK_HEADERS([sys/xattr.h]) AC_CHECK_HEADERS([attr/xattr.h]) AC_CHECK_HEADERS([sys/extattr.h]) CXXFLAGS="$CXXFLAGS -Wall -D_FILE_OFFSET_BITS=64 -D_FORTIFY_SOURCE=2" dnl ---------------------------------------------- dnl For macOS dnl ---------------------------------------------- case "$target" in *-cygwin* ) # Do something specific for windows using winfsp CXXFLAGS="$CXXFLAGS -D_GNU_SOURCE=1" min_fuse_version=2.8 ;; *-darwin* ) # Do something specific for mac min_fuse_version=2.7.3 ;; *) # Default Case # assume other supported linux system min_fuse_version=2.8.4 ;; esac dnl ---------------------------------------------- dnl Choice SSL library dnl ---------------------------------------------- auth_lib=na nettle_lib=no dnl dnl nettle library dnl AC_MSG_CHECKING([s3fs build with nettle(GnuTLS)]) AC_ARG_WITH( nettle, [AS_HELP_STRING([--with-nettle], [s3fs build with nettle in GnuTLS(default no)])], [ case "${withval}" in yes) AC_MSG_RESULT(yes) nettle_lib=yes ;; *) AC_MSG_RESULT(no) ;; esac ], [ AC_MSG_RESULT(no) ]) dnl dnl use openssl library for ssl dnl AC_MSG_CHECKING([s3fs build with OpenSSL]) AC_ARG_WITH( openssl, [AS_HELP_STRING([--with-openssl], [s3fs build with OpenSSL(default is no)])], [ case "${withval}" in yes) AC_MSG_RESULT(yes) AS_IF( [test $nettle_lib = no], [auth_lib=openssl], [AC_MSG_ERROR([could not set openssl with nettle, nettle is only for gnutls library])]) ;; *) AC_MSG_RESULT(no) ;; esac ], [ AC_MSG_RESULT(no) ]) dnl dnl use GnuTLS library for ssl dnl AC_MSG_CHECKING([s3fs build with GnuTLS]) AC_ARG_WITH( gnutls, [AS_HELP_STRING([--with-gnutls], [s3fs build with GnuTLS(default is no)])], [ case "${withval}" in yes) AC_MSG_RESULT(yes) AS_IF( [test $auth_lib = na], [ AS_IF( [test $nettle_lib = no], [auth_lib=gnutls], [auth_lib=nettle]) ], [AC_MSG_ERROR([could not set gnutls because already set another ssl library])]) ;; *) AC_MSG_RESULT(no) ;; esac ], [ AC_MSG_RESULT(no) ]) dnl dnl use nss library for ssl dnl AC_MSG_CHECKING([s3fs build with NSS]) AC_ARG_WITH( nss, [AS_HELP_STRING([--with-nss], [s3fs build with NSS(default is no)])], [ case "${withval}" in yes) AC_MSG_RESULT(yes) AS_IF( [test $auth_lib = na], [ AS_IF( [test $nettle_lib = no], [auth_lib=nss], [AC_MSG_ERROR([could not set openssl with nettle, nettle is only for gnutls library])]) ], [AC_MSG_ERROR([could not set nss because already set another ssl library])]) ;; *) AC_MSG_RESULT(no) ;; esac ], [ AC_MSG_RESULT(no) ]) AS_IF( [test $auth_lib = na], AS_IF( [test $nettle_lib = no], [auth_lib=openssl], [AC_MSG_ERROR([could not set nettle without GnuTLS library])] ) ) dnl dnl For PKG_CONFIG before checking nss/gnutls. dnl this is redundant checking, but we need checking before following. dnl PKG_CHECK_MODULES([common_lib_checking], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 ]) AC_MSG_CHECKING([compile s3fs with]) case "${auth_lib}" in openssl) AC_MSG_RESULT(OpenSSL) PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 libcrypto >= 0.9 ]) ;; gnutls) AC_MSG_RESULT(GnuTLS-gcrypt) gnutls_nettle="" AC_CHECK_LIB(gnutls, gcry_control, [gnutls_nettle=0]) AS_IF([test "$gnutls_nettle" = ""], [AC_CHECK_LIB(gcrypt, gcry_control, [gnutls_nettle=0])]) AS_IF([test $gnutls_nettle = 0], [ PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 gnutls >= 2.12.0 ]) LIBS="-lgnutls -lgcrypt $LIBS" AC_MSG_CHECKING([gnutls is build with]) AC_MSG_RESULT(gcrypt) ], [AC_MSG_ERROR([GnuTLS found, but gcrypt not found])]) ;; nettle) AC_MSG_RESULT(GnuTLS-nettle) gnutls_nettle="" AC_CHECK_LIB(gnutls, nettle_MD5Init, [gnutls_nettle=1]) AS_IF([test "$gnutls_nettle" = ""], [AC_CHECK_LIB(nettle, nettle_MD5Init, [gnutls_nettle=1])]) AS_IF([test $gnutls_nettle = 1], [ PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 nettle >= 2.7.1 ]) LIBS="-lgnutls -lnettle $LIBS" AC_MSG_CHECKING([gnutls is build with]) AC_MSG_RESULT(nettle) ], [AC_MSG_ERROR([GnuTLS found, but nettle not found])]) ;; nss) AC_MSG_RESULT(NSS) PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 nss >= 3.15.0 ]) ;; *) AC_MSG_ERROR([unknown ssl library type.]) ;; esac AM_CONDITIONAL([USE_SSL_OPENSSL], [test "$auth_lib" = openssl]) AM_CONDITIONAL([USE_SSL_GNUTLS], [test "$auth_lib" = gnutls -o "$auth_lib" = nettle]) AM_CONDITIONAL([USE_GNUTLS_NETTLE], [test "$auth_lib" = nettle]) AM_CONDITIONAL([USE_SSL_NSS], [test "$auth_lib" = nss]) dnl ---------------------------------------------- dnl check functions dnl ---------------------------------------------- dnl malloc_trim function AC_CHECK_FUNCS([malloc_trim]) dnl clock_gettime function(macos) AC_SEARCH_LIBS([clock_gettime],[rt posix4]) AC_CHECK_FUNCS([clock_gettime]) dnl ---------------------------------------------- dnl check symbols/macros/enums dnl ---------------------------------------------- dnl PTHREAD_MUTEX_RECURSIVE AC_MSG_CHECKING([pthread mutex recursive]) AC_COMPILE_IFELSE( [AC_LANG_PROGRAM([[#include ]], [[int i = PTHREAD_MUTEX_RECURSIVE;]]) ], [AC_DEFINE(S3FS_MUTEX_RECURSIVE, PTHREAD_MUTEX_RECURSIVE, [Define if you have PTHREAD_MUTEX_RECURSIVE]) AC_MSG_RESULT(PTHREAD_MUTEX_RECURSIVE) ], [AC_COMPILE_IFELSE( [AC_LANG_PROGRAM([[#include ]], [[int i = PTHREAD_MUTEX_RECURSIVE_NP;]]) ], [AC_DEFINE(S3FS_MUTEX_RECURSIVE, PTHREAD_MUTEX_RECURSIVE_NP, [Define if you have PTHREAD_MUTEX_RECURSIVE_NP]) AC_MSG_RESULT(PTHREAD_MUTEX_RECURSIVE_NP) ], [AC_MSG_ERROR([do not have PTHREAD_MUTEX_RECURSIVE symbol])]) ] ) dnl ---------------------------------------------- dnl check CURLoption dnl ---------------------------------------------- dnl CURLOPT_TCP_KEEPALIVE (is supported by 7.25.0 and later) AC_MSG_CHECKING([checking CURLOPT_TCP_KEEPALIVE]) AC_COMPILE_IFELSE( [AC_LANG_PROGRAM([[#include ]], [[CURLoption opt = CURLOPT_TCP_KEEPALIVE;]]) ], [AC_DEFINE(HAVE_CURLOPT_TCP_KEEPALIVE, 1, [Define to 1 if libcurl has CURLOPT_TCP_KEEPALIVE CURLoption]) AC_MSG_RESULT(yes) ], [AC_DEFINE(HAVE_CURLOPT_TCP_KEEPALIVE, 0, [Define to 1 if libcurl has CURLOPT_TCP_KEEPALIVE CURLoption]) AC_MSG_RESULT(no) ] ) dnl CURLOPT_SSL_ENABLE_ALPN (is supported by 7.36.0 and later) AC_MSG_CHECKING([checking CURLOPT_SSL_ENABLE_ALPN]) AC_COMPILE_IFELSE( [AC_LANG_PROGRAM([[#include ]], [[CURLoption opt = CURLOPT_SSL_ENABLE_ALPN;]]) ], [AC_DEFINE(HAVE_CURLOPT_SSL_ENABLE_ALPN, 1, [Define to 1 if libcurl has CURLOPT_SSL_ENABLE_ALPN CURLoption]) AC_MSG_RESULT(yes) ], [AC_DEFINE(HAVE_CURLOPT_SSL_ENABLE_ALPN, 0, [Define to 1 if libcurl has CURLOPT_SSL_ENABLE_ALPN CURLoption]) AC_MSG_RESULT(no) ] ) dnl CURLOPT_KEEP_SENDING_ON_ERROR (is supported by 7.51.0 and later) AC_MSG_CHECKING([checking CURLOPT_KEEP_SENDING_ON_ERROR]) AC_COMPILE_IFELSE( [AC_LANG_PROGRAM([[#include ]], [[CURLoption opt = CURLOPT_KEEP_SENDING_ON_ERROR;]]) ], [AC_DEFINE(HAVE_CURLOPT_KEEP_SENDING_ON_ERROR, 1, [Define to 1 if libcurl has CURLOPT_KEEP_SENDING_ON_ERROR CURLoption]) AC_MSG_RESULT(yes) ], [AC_DEFINE(HAVE_CURLOPT_KEEP_SENDING_ON_ERROR, 0, [Define to 1 if libcurl has CURLOPT_KEEP_SENDING_ON_ERROR CURLoption]) AC_MSG_RESULT(no) ] ) dnl ---------------------------------------------- dnl output files dnl ---------------------------------------------- AC_CONFIG_FILES(Makefile src/Makefile test/Makefile doc/Makefile) dnl ---------------------------------------------- dnl short commit hash dnl ---------------------------------------------- AC_CHECK_PROG([GITCMD], [git —version], [yes], [no]) AS_IF([test -d .git], [DOTGITDIR=yes], [DOTGITDIR=no]) AC_MSG_CHECKING([github short commit hash]) if test "x${GITCMD}" = "xyes" -a "x${DOTGITDIR}" = "xyes"; then GITCOMMITHASH=`git rev-parse --short HEAD` elif test -f default_commit_hash; then GITCOMMITHASH=`cat default_commit_hash` else GITCOMMITHASH="unknown" fi AC_MSG_RESULT([${GITCOMMITHASH}]) AC_DEFINE_UNQUOTED([COMMIT_HASH_VAL], ["${GITCOMMITHASH}"], [short commit hash value on github]) dnl ---------------------------------------------- dnl put dnl ---------------------------------------------- AC_OUTPUT dnl ---------------------------------------------- dnl end configuration dnl ---------------------------------------------- s3fs-fuse-1.86/doc/000077500000000000000000000000001361654130000140325ustar00rootroot00000000000000s3fs-fuse-1.86/doc/Makefile.am000066400000000000000000000017321361654130000160710ustar00rootroot00000000000000###################################################################### # s3fs - FUSE-based file system backed by Amazon S3 # # Copyright 2007-2008 Randy Rizun # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. ###################################################################### dist_man1_MANS = man/s3fs.1 s3fs-fuse-1.86/doc/man/000077500000000000000000000000001361654130000146055ustar00rootroot00000000000000s3fs-fuse-1.86/doc/man/s3fs.1000066400000000000000000000512731361654130000155550ustar00rootroot00000000000000.TH S3FS "1" "February 2011" "S3FS" "User Commands" .SH NAME S3FS \- FUSE-based file system backed by Amazon S3 .SH SYNOPSIS .SS mounting .TP \fBs3fs bucket[:/path] mountpoint \fP [options] .TP \fBs3fs mountpoint \fP [options (must specify bucket= option)] .SS unmounting .TP \fBumount mountpoint For root. .TP \fBfusermount -u mountpoint For unprivileged user. .SS utility mode (remove interrupted multipart uploading objects) .TP \fBs3fs --incomplete-mpu-list (-u) bucket .TP \fBs3fs --incomplete-mpu-abort[=all | =] bucket .SH DESCRIPTION s3fs is a FUSE filesystem that allows you to mount an Amazon S3 bucket as a local filesystem. It stores files natively and transparently in S3 (i.e., you can use other programs to access the same files). .SH AUTHENTICATION s3fs supports the standard AWS credentials file (https://docs.aws.amazon.com/cli/latest/userguide/cli-config-files.html) stored in `${HOME}/.aws/credentials`. Alternatively, s3fs supports a custom passwd file. Only AWS credentials file format can be used when AWS session token is required. The s3fs password file has this format (use this format if you have only one set of credentials): .RS 4 \fBaccessKeyId\fP:\fBsecretAccessKey\fP .RE If you have more than one set of credentials, this syntax is also recognized: .RS 4 \fBbucketName\fP:\fBaccessKeyId\fP:\fBsecretAccessKey\fP .RE .PP Password files can be stored in two locations: .RS 4 \fB/etc/passwd-s3fs\fP [0640] \fB$HOME/.passwd-s3fs\fP [0600] .RE .PP s3fs also recognizes the \fBAWSACCESSKEYID\fP and \fBAWSSECRETACCESSKEY\fP environment variables. .SH OPTIONS .SS "general options" .TP \fB\-h\fR \fB\-\-help\fR print help .TP \fB\ \fR \fB\-\-version\fR print version .TP \fB\-f\fR FUSE foreground option - do not run as daemon. .TP \fB\-s\fR FUSE singlethreaded option (disables multi-threaded operation) .SS "mount options" .TP All s3fs options must given in the form where "opt" is: = .TP \fB\-o\fR bucket if it is not specified bucket name (and path) in command line, must specify this option after \-o option for bucket name. .TP \fB\-o\fR default_acl (default="private") the default canned acl to apply to all written s3 objects, e.g., "private", "public-read". see https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl for the full list of canned acls. .TP \fB\-o\fR retries (default="5") number of times to retry a failed S3 transaction. .TP \fB\-o\fR use_cache (default="" which means disabled) local folder to use for local file cache. .TP \fB\-o\fR check_cache_dir_exist (default is disable) If use_cache is set, check if the cache directory exists. If this option is not specified, it will be created at runtime when the cache directory does not exist. .TP \fB\-o\fR del_cache - delete local file cache delete local file cache when s3fs starts and exits. .TP \fB\-o\fR storage_class (default="standard") store object with specified storage class. this option replaces the old option use_rrs. Possible values: standard, standard_ia, onezone_ia, reduced_redundancy, and intelligent_tiering. .TP \fB\-o\fR use_rrs (default is disable) use Amazon's Reduced Redundancy Storage. this option can not be specified with use_sse. (can specify use_rrs=1 for old version) this option has been replaced by new storage_class option. .TP \fB\-o\fR use_sse (default is disable) Specify three type Amazon's Server-Site Encryption: SSE-S3, SSE-C or SSE-KMS. SSE-S3 uses Amazon S3-managed encryption keys, SSE-C uses customer-provided encryption keys, and SSE-KMS uses the master key which you manage in AWS KMS. You can specify "use_sse" or "use_sse=1" enables SSE-S3 type (use_sse=1 is old type parameter). Case of setting SSE-C, you can specify "use_sse=custom", "use_sse=custom:" or "use_sse=" (only specified is old type parameter). You can use "c" for short "custom". The custom key file must be 600 permission. The file can have some lines, each line is one SSE-C key. The first line in file is used as Customer-Provided Encryption Keys for uploading and changing headers etc. If there are some keys after first line, those are used downloading object which are encrypted by not first key. So that, you can keep all SSE-C keys in file, that is SSE-C key history. If you specify "custom" ("c") without file path, you need to set custom key by load_sse_c option or AWSSSECKEYS environment. (AWSSSECKEYS environment has some SSE-C keys with ":" separator.) This option is used to decide the SSE type. So that if you do not want to encrypt a object at uploading, but you need to decrypt encrypted object at downloading, you can use load_sse_c option instead of this option. For setting SSE-KMS, specify "use_sse=kmsid" or "use_sse=kmsid:". You can use "k" for short "kmsid". If you san specify SSE-KMS type with your in AWS KMS, you can set it after "kmsid:" (or "k:"). If you specify only "kmsid" ("k"), you need to set AWSSSEKMSID environment which value is . You must be careful about that you can not use the KMS id which is not same EC2 region. .TP \fB\-o\fR load_sse_c - specify SSE-C keys Specify the custom-provided encryption keys file path for decrypting at downloading. If you use the custom-provided encryption key at uploading, you specify with "use_sse=custom". The file has many lines, one line means one custom key. So that you can keep all SSE-C keys in file, that is SSE-C key history. AWSSSECKEYS environment is as same as this file contents. .TP \fB\-o\fR passwd_file (default="") specify the path to the password file, which which takes precedence over the password in $HOME/.passwd-s3fs and /etc/passwd-s3fs .TP \fB\-o\fR ahbe_conf (default="" which means disabled) This option specifies the configuration file path which file is the additional HTTP header by file (object) extension. The configuration file format is below: ----------- line = [file suffix or regex] HTTP-header [HTTP-values] file suffix = file (object) suffix, if this field is empty, it means "reg:(.*)".(=all object). regex = regular expression to match the file (object) path. this type starts with "reg:" prefix. HTTP-header = additional HTTP header name HTTP-values = additional HTTP header value ----------- Sample: ----------- .gz Content-Encoding gzip .Z Content-Encoding compress reg:^/MYDIR/(.*)[.]t2$ Content-Encoding text2 ----------- A sample configuration file is uploaded in "test" directory. If you specify this option for set "Content-Encoding" HTTP header, please take care for RFC 2616. .TP \fB\-o\fR profile (default="default") Choose a profile from ${HOME}/.aws/credentials to authenticate against S3. Note that this format matches the AWS CLI format and differs from the s3fs passwd format. .TP \fB\-o\fR public_bucket (default="" which means disabled) anonymously mount a public bucket when set to 1, ignores the $HOME/.passwd-s3fs and /etc/passwd-s3fs files. S3 does not allow copy object api for anonymous users, then s3fs sets nocopyapi option automatically when public_bucket=1 option is specified. .TP \fB\-o\fR connect_timeout (default="300" seconds) time to wait for connection before giving up. .TP \fB\-o\fR readwrite_timeout (default="120" seconds) time to wait between read/write activity before giving up. .TP \fB\-o\fR list_object_max_keys (default="1000") specify the maximum number of keys returned by S3 list object API. The default is 1000. you can set this value to 1000 or more. .TP \fB\-o\fR max_stat_cache_size (default="100,000" entries (about 40MB)) maximum number of entries in the stat cache and symbolic link cache. .TP \fB\-o\fR stat_cache_expire (default is no expire) specify expire time (seconds) for entries in the stat cache and symbolic link cache. This expire time indicates the time since cached. .TP \fB\-o\fR stat_cache_interval_expire (default is no expire) specify expire time (seconds) for entries in the stat cache and symbolic link cache. This expire time is based on the time from the last access time of those cache. This option is exclusive with stat_cache_expire, and is left for compatibility with older versions. .TP \fB\-o\fR enable_noobj_cache (default is disable) enable cache entries for the object which does not exist. s3fs always has to check whether file (or sub directory) exists under object (path) when s3fs does some command, since s3fs has recognized a directory which does not exist and has files or sub directories under itself. It increases ListBucket request and makes performance bad. You can specify this option for performance, s3fs memorizes in stat cache that the object (file or directory) does not exist. .TP \fB\-o\fR no_check_certificate (by default this option is disabled) do not check ssl certificate. server certificate won't be checked against the available certificate authorities. .TP \fB\-o\fR ssl_verify_hostname (default="2") When 0, do not verify the SSL certificate against the hostname. .TP \fB\-o\fR nodnscache - disable dns cache. s3fs is always using dns cache, this option make dns cache disable. .TP \fB\-o\fR nosscache - disable ssl session cache. s3fs is always using ssl session cache, this option make ssl session cache disable. .TP \fB\-o\fR multireq_max (default="20") maximum number of parallel request for listing objects. .TP \fB\-o\fR parallel_count (default="5") number of parallel request for uploading big objects. s3fs uploads large object (over 20MB) by multipart post request, and sends parallel requests. This option limits parallel request count which s3fs requests at once. It is necessary to set this value depending on a CPU and a network band. .TP \fB\-o\fR multipart_size (default="10") part size, in MB, for each multipart request. The minimum value is 5 MB and the maximum value is 5 GB. .TP \fB\-o\fR ensure_diskfree (default 0) sets MB to ensure disk free space. This option means the threshold of free space size on disk which is used for the cache file by s3fs. s3fs makes file for downloading, uploading and caching files. If the disk free space is smaller than this value, s3fs do not use diskspace as possible in exchange for the performance. .TP \fB\-o\fR singlepart_copy_limit (default="512") maximum size, in MB, of a single-part copy before trying multipart copy. .TP \fB\-o\fR host (default="https://s3.amazonaws.com") Set a non-Amazon host, e.g., https://example.com. .TP \fB\-o\fR sevicepath (default="/") Set a service path when the non-Amazon host requires a prefix. .TP \fB\-o\fR url (default="https://s3.amazonaws.com") sets the url to use to access Amazon S3. If you want to use HTTP, then you can set "url=http://s3.amazonaws.com". If you do not use https, please specify the URL with the url option. .TP \fB\-o\fR endpoint (default="us-east-1") sets the endpoint to use on signature version 4. If this option is not specified, s3fs uses "us-east-1" region as the default. If the s3fs could not connect to the region specified by this option, s3fs could not run. But if you do not specify this option, and if you can not connect with the default region, s3fs will retry to automatically connect to the other region. So s3fs can know the correct region name, because s3fs can find it in an error from the S3 server. .TP \fB\-o\fR sigv2 (default is signature version 4) sets signing AWS requests by using Signature Version 2. .TP \fB\-o\fR mp_umask (default is "0000") sets umask for the mount point directory. If allow_other option is not set, s3fs allows access to the mount point only to the owner. In the opposite case s3fs allows access to all users as the default. But if you set the allow_other with this option, you can control the permissions of the mount point by this option like umask. .TP \fB\-o\fR umask (default is "0000") sets umask for files under the mountpoint. This can allow users other than the mounting user to read and write to files that they did not create. .TP \fB\-o\fR nomultipart - disable multipart uploads .TP \fB\-o\fR enable_content_md5 (default is disable) Allow S3 server to check data integrity of uploads via the Content-MD5 header. This can add CPU overhead to transfers. .TP \fB\-o\fR ecs (default is disable) This option instructs s3fs to query the ECS container credential metadata address instead of the instance metadata address. .TP \fB\-o\fR iam_role (default is no IAM role) This option requires the IAM role name or "auto". If you specify "auto", s3fs will automatically use the IAM role names that are set to an instance. If you specify this option without any argument, it is the same as that you have specified the "auto". .TP \fB\-o\fR ibm_iam_auth (default is not using IBM IAM authentication) This option instructs s3fs to use IBM IAM authentication. In this mode, the AWSAccessKey and AWSSecretKey will be used as IBM's Service-Instance-ID and APIKey, respectively. .TP \fB\-o\fR ibm_iam_endpoint (default is https://iam.bluemix.net) Sets the URL to use for IBM IAM authentication. .TP \fB\-o\fR use_xattr (default is not handling the extended attribute) Enable to handle the extended attribute (xattrs). If you set this option, you can use the extended attribute. For example, encfs and ecryptfs need to support the extended attribute. Notice: if s3fs handles the extended attribute, s3fs can not work to copy command with preserve=mode. .TP \fB\-o\fR noxmlns - disable registering xml name space. disable registering xml name space for response of ListBucketResult and ListVersionsResult etc. Default name space is looked up from "http://s3.amazonaws.com/doc/2006-03-01". This option should not be specified now, because s3fs looks up xmlns automatically after v1.66. .TP \fB\-o\fR nomixupload - disable copy in multipart uploads. Disable to use PUT (copy api) when multipart uploading large size objects. By default, when doing multipart upload, the range of unchanged data will use PUT (copy api) whenever possible. When nocopyapi or norenameapi is specified, use of PUT (copy api) is invalidated even if this option is not specified. .TP \fB\-o\fR nocopyapi - for other incomplete compatibility object storage. For a distributed object storage which is compatibility S3 API without PUT (copy api). If you set this option, s3fs do not use PUT with "x-amz-copy-source" (copy api). Because traffic is increased 2-3 times by this option, we do not recommend this. .TP \fB\-o\fR norenameapi - for other incomplete compatibility object storage. For a distributed object storage which is compatibility S3 API without PUT (copy api). This option is a subset of nocopyapi option. The nocopyapi option does not use copy-api for all command (ex. chmod, chown, touch, mv, etc), but this option does not use copy-api for only rename command (ex. mv). If this option is specified with nocopyapi, then s3fs ignores it. .TP \fB\-o\fR use_path_request_style (use legacy API calling style) Enable compatibility with S3-like APIs which do not support the virtual-host request style, by using the older path request style. .TP \fB\-o\fR noua (suppress User-Agent header) Usually s3fs outputs of the User-Agent in "s3fs/ (commit hash ; )" format. If this option is specified, s3fs suppresses the output of the User-Agent. .TP \fB\-o\fR cipher_suites Customize the list of TLS cipher suites. Expects a colon separated list of cipher suite names. A list of available cipher suites, depending on your TLS engine, can be found on the CURL library documentation: https://curl.haxx.se/docs/ssl-ciphers.html .TP \fB\-o\fR instance_name The instance name of the current s3fs mountpoint. This name will be added to logging messages and user agent headers sent by s3fs. .TP \fB\-o\fR complement_stat (complement lack of file/directory mode) s3fs complements lack of information about file/directory mode if a file or a directory object does not have x-amz-meta-mode header. As default, s3fs does not complements stat information for a object, then the object will not be able to be allowed to list/modify. .TP \fB\-o\fR notsup_compat_dir (not support compatibility directory types) As a default, s3fs supports objects of the directory type as much as possible and recognizes them as directories. Objects that can be recognized as directory objects are "dir/", "dir", "dir_$folder$", and there is a file object that does not have a directory object but contains that directory path. s3fs needs redundant communication to support all these directory types. The object as the directory created by s3fs is "dir/". By restricting s3fs to recognize only "dir/" as a directory, communication traffic can be reduced. This option is used to give this restriction to s3fs. However, if there is a directory object other than "dir/" in the bucket, specifying this option is not recommended. s3fs may not be able to recognize the object correctly if an object created by s3fs exists in the bucket. Please use this option when the directory in the bucket is only "dir/" object. .TP \fB\-o\fR use_wtf8 - support arbitrary file system encoding. S3 requires all object names to be valid utf-8. But some clients, notably Windows NFS clients, use their own encoding. This option re-encodes invalid utf-8 object names into valid utf-8 by mapping offending codes into a 'private' codepage of the Unicode set. Useful on clients not using utf-8 as their file system encoding. .TP \fB\-o\fR use_session_token - indicate that session token should be provided. If credentials are provided by environment variables this switch forces presence check of AWSSESSIONTOKEN variable. Otherwise an error is returned. .TP \fB\-o\fR requester_pays (default is disable) This option instructs s3fs to enable requests involving Requester Pays buckets (It includes the 'x-amz-request-payer=requester' entry in the request header). .TP \fB\-o\fR dbglevel (default="crit") Set the debug message level. set value as crit (critical), err (error), warn (warning), info (information) to debug level. default debug level is critical. If s3fs run with "-d" option, the debug level is set information. When s3fs catch the signal SIGUSR2, the debug level is bumpup. .TP \fB\-o\fR curldbg - put curl debug message Put the debug message from libcurl when this option is specified. .SS "utility mode options" .TP \fB\-u\fR or \fB\-\-incomplete\-mpu\-list\fR Lists multipart incomplete objects uploaded to the specified bucket. .TP \fB\-\-incomplete\-mpu\-abort\fR all or date format (default="24H") Delete the multipart incomplete object uploaded to the specified bucket. If "all" is specified for this option, all multipart incomplete objects will be deleted. If you specify no argument as an option, objects older than 24 hours (24H) will be deleted (This is the default value). You can specify an optional date format. It can be specified as year, month, day, hour, minute, second, and it is expressed as "Y", "M", "D", "h", "m", "s" respectively. For example, "1Y6M10D12h30m30s". .SH FUSE/MOUNT OPTIONS .TP Most of the generic mount options described in 'man mount' are supported (ro, rw, suid, nosuid, dev, nodev, exec, noexec, atime, noatime, sync async, dirsync). Filesystems are mounted with '\-onodev,nosuid' by default, which can only be overridden by a privileged user. .TP There are many FUSE specific mount options that can be specified. e.g. allow_other. See the FUSE README for the full set. .SH NOTES .TP The maximum size of objects that s3fs can handle depends on Amazon S3. For example, up to 5 GB when using single PUT API. And up to 5 TB is supported when Multipart Upload API is used. .TP If enabled via the "use_cache" option, s3fs automatically maintains a local cache of files in the folder specified by use_cache. Whenever s3fs needs to read or write a file on S3, it first downloads the entire file locally to the folder specified by use_cache and operates on it. When fuse_release() is called, s3fs will re-upload the file to S3 if it has been changed. s3fs uses md5 checksums to minimize downloads from S3. .TP The folder specified by use_cache is just a local cache. It can be deleted at any time. s3fs rebuilds it on demand. .TP Local file caching works by calculating and comparing md5 checksums (ETag HTTP header). .TP s3fs leverages /etc/mime.types to "guess" the "correct" content-type based on file name extension. This means that you can copy a website to S3 and serve it up directly from S3 with correct content-types! .SH SEE ALSO fuse(8), mount(8), fusermount(1), fstab(5) .SH BUGS Due to S3's "eventual consistency" limitations, file creation can and will occasionally fail. Even after a successful create, subsequent reads can fail for an indeterminate time, even after one or more successful reads. Create and read enough files and you will eventually encounter this failure. This is not a flaw in s3fs and it is not something a FUSE wrapper like s3fs can work around. The retries option does not address this issue. Your application must either tolerate or compensate for these failures, for example by retrying creates or reads. .SH AUTHOR s3fs has been written by Randy Rizun . s3fs-fuse-1.86/doc/s3fs.png000066400000000000000000000124021361654130000154150ustar00rootroot00000000000000PNG  IHDR\rf pHYs.#.#x?vtIME  tEXtCommentCreated with GIMPW|IDATx}tT?I&1&@d*(A,ڂ/cDkc={mHǺvk[ۜVVmWQV׵ewMנkTePK${}~ιg2Ν{rA!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!BPKPZ[6󾩡KRH kosmOI?>asom)۶U $uP̷^ZBa`ASCJ·v/ӏ$@QI}gge@^B $p| (_\J֪F $;~'RhMB $C;=j!)`-"'-O0wkh@AȞ믰"(aj"b'"f%%Dut#n.K#% _" ByD?aI't9$asF9"(P [οV"*lZ %f?>ӥU('bٴZ6M7Z=l9kWEA-&A޿ w՘Kh4% UˏtQp%^6-n䟄]˦6ly_j+HZ6MPRW{R{lZSڀii4 Z] ,߶"*Z6-.'ہ 4ꄕ6&Gjztp#0>p:fOX#6"lZX/# _=P/$"eӂ.3%"ށ` @&bGf@Ὲ$%Mz@4W/JL,q =<ȩ@p@@TtqPFjR@ vBxF(J e%B( (@ DԢ!e\@D‘~&~$yH\ QM0[U)$D`+ @J" |$^r (@( tK =&|i1_+ , \{L\gY*t )65tIrw$0X t?GZŖ_cNpz@6;pp?;6}¸@cWK^99:ÁN :a0jjv2렶z 0a2TU;k/S?C{lK}#M m~;28~r3@_yv ;`cۓWCzaH̅1̦zO+|͛?azԐd+&L~5ذy1_5x\z\\1!vRAJV?bhY3 n:̽< ~(<|>W*98Nxa@e{;I?4^7njye~B} 7 ӭ߯oI[4"@vW^~>y};9_ /!V]"B]S@y?`<}8rݾ{ wU7iPy?{ZP@@ί  P7 3δUP^aOC]{+tl][a<>\&F6@19ϸf1ۄQi2.'޷˝q(4aJ3ijX/{g[?NY8&|e?p9_agx-9RΟSg¢s~;5_p_ w8[]G8o'wxN3cmn3ɦv oքŞh ?|vx ATo^\o1-.5y{f&^eUicp>XxxYgT6L ŋ`Xpz?cFYv,Ŀ}&OV5lxp&E@(O碓1n{ Q@O,T_n}ïDFLu[ʦË@s>p _?< w}A~Ug]s‚dp~cl:2 d[~l=Mk+kLRpzlS}z@̡' 9/ߴ߸Rس@W84Ȥ0ig 6q4Uf>8q 9rt9m{)[ƻእ0jlpY6ڞt{s_Ls_nXe*b?l [S -3ˀ4@6 MWI shYeīW S4ۂŰ&G=g@L?8ο& ߾itd[p]V+w\v/צi=Dr Vre_@x{nl| ԯ$,Kh epb>mEmVCz鼲iX Nul+` Wk1f&x Mw$l4 hq'י~n'O<$ͅa\Τt$8a`ïYpՍiyh8ߘ' B}0ǮK0BqSor^ӏ} wlzq[~ r_zlpH2nZ?̈́)/d>o~k'g`MYTT.QcM5l#*rb ݑ篽]]mZ(,~Y*k}>l(``(ƌ3۴YǏ  5}rJt4DPT;zo+[ë92c2L4U up|3ëq]{}>ìހ jy_i.}c0elM%O؟1L3vc9|Ag夨O>`{+q6~j쳖?b~:6O^ 0Aωƃ{{`NpQz$`:Jke{x槰cs߻λto~VplZH ̖MΞ|+[#P6[T,f ƹ^v\ ,L6 @ᯃK>n YѲj%h) ><P6-dq7W.99Yu"6kn1'L}BˣUžћKʦJ|s VDg/+LS**diqvm5ݍƜpulZHR9Ay 9ho64ɓN SaTLjs7${~3smؿ4D fĩ0~YIJxQ6 YWK4)HP @ӐA@ D%nBIt݄45 Iig@Qͭ D! FJa]2JaKFP#(@iA s` !|͈{) "H? @f1PQ!'u!X^⟛  ?7ȌZt~{9.@Q . ߂ B'? ^t$.OpӝES5!|A | բz%gU.!Jj\ud ((D)<ݧ"J" Da0?-g`z$6& Q$ DM0t$$e/!~kY-^+tO>[K_rFb@rlmP=zcÜmZ(a M_i}Gej۶OXK SXU6;Wَ~mg65f;^u :I9UvΔ_O ;l98K,7Hj*!>73ma.N|g>-6LGnǬߑh8#P]ֹ^"vvޣ[}W*KK꽏^ $U6GU",3&S gB!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!BdW>DGIENDB`s3fs-fuse-1.86/src/000077500000000000000000000000001361654130000140545ustar00rootroot00000000000000s3fs-fuse-1.86/src/Makefile.am000066400000000000000000000031121361654130000161050ustar00rootroot00000000000000###################################################################### # s3fs - FUSE-based file system backed by Amazon S3 # # Copyright 2007-2008 Randy Rizun # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. ###################################################################### bin_PROGRAMS=s3fs AM_CPPFLAGS = $(DEPS_CFLAGS) if USE_GNUTLS_NETTLE AM_CPPFLAGS += -DUSE_GNUTLS_NETTLE endif s3fs_SOURCES = \ s3fs.cpp \ curl.cpp \ cache.cpp \ string_util.cpp \ s3fs_util.cpp \ fdcache.cpp \ common_auth.cpp \ addhead.cpp if USE_SSL_OPENSSL s3fs_SOURCES += openssl_auth.cpp endif if USE_SSL_GNUTLS s3fs_SOURCES += gnutls_auth.cpp endif if USE_SSL_NSS s3fs_SOURCES += nss_auth.cpp endif s3fs_LDADD = $(DEPS_LIBS) noinst_PROGRAMS = test_string_util test_string_util_SOURCES = string_util.cpp test_string_util.cpp TESTS = test_string_util clang-tidy: clang-tidy $(s3fs_SOURCES) -- $(DEPS_CFLAGS) $(CPPFLAGS) s3fs-fuse-1.86/src/addhead.cpp000066400000000000000000000162011361654130000161320ustar00rootroot00000000000000/* * s3fs - FUSE-based file system backed by Amazon S3 * * Copyright(C) 2007 Randy Rizun * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include #include #include #include #include #include #include #include #include #include #include #include "common.h" #include "addhead.h" #include "curl.h" #include "s3fs.h" using namespace std; //------------------------------------------------------------------- // Symbols //------------------------------------------------------------------- #define ADD_HEAD_REGEX "reg:" //------------------------------------------------------------------- // Class AdditionalHeader //------------------------------------------------------------------- AdditionalHeader AdditionalHeader::singleton; //------------------------------------------------------------------- // Class AdditionalHeader method //------------------------------------------------------------------- AdditionalHeader::AdditionalHeader() { if(this == AdditionalHeader::get()){ is_enable = false; }else{ abort(); } } AdditionalHeader::~AdditionalHeader() { if(this == AdditionalHeader::get()){ Unload(); }else{ abort(); } } bool AdditionalHeader::Load(const char* file) { if(!file){ S3FS_PRN_WARN("file is NULL."); return false; } Unload(); ifstream AH(file); if(!AH.good()){ S3FS_PRN_WARN("Could not open file(%s).", file); return false; } // read file string line; ADDHEAD *paddhead; while(getline(AH, line)){ if('#' == line[0]){ continue; } if(line.empty()){ continue; } // load a line istringstream ss(line); string key; // suffix(key) string head; // additional HTTP header string value; // header value if(0 == isblank(line[0])){ ss >> key; } if(ss){ ss >> head; if(ss && static_cast(ss.tellg()) < line.size()){ value = line.substr(static_cast(ss.tellg()) + 1); } } // check it if(head.empty()){ if(key.empty()){ continue; } S3FS_PRN_ERR("file format error: %s key(suffix) is no HTTP header value.", key.c_str()); Unload(); return false; } paddhead = new ADDHEAD; if(0 == strncasecmp(key.c_str(), ADD_HEAD_REGEX, strlen(ADD_HEAD_REGEX))){ // regex if(key.size() <= strlen(ADD_HEAD_REGEX)){ S3FS_PRN_ERR("file format error: %s key(suffix) does not have key string.", key.c_str()); delete paddhead; continue; } key = key.substr(strlen(ADD_HEAD_REGEX)); // compile regex_t* preg = new regex_t; int result; if(0 != (result = regcomp(preg, key.c_str(), REG_EXTENDED | REG_NOSUB))){ // we do not need matching info char errbuf[256]; regerror(result, preg, errbuf, sizeof(errbuf)); S3FS_PRN_ERR("failed to compile regex from %s key by %s.", key.c_str(), errbuf); delete preg; delete paddhead; continue; } // set paddhead->pregex = preg; paddhead->basestring = key; paddhead->headkey = head; paddhead->headvalue = value; }else{ // not regex, directly comparing paddhead->pregex = NULL; paddhead->basestring = key; paddhead->headkey = head; paddhead->headvalue = value; } // add list addheadlist.push_back(paddhead); // set flag if(!is_enable){ is_enable = true; } } return true; } void AdditionalHeader::Unload() { is_enable = false; for(addheadlist_t::iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter){ ADDHEAD *paddhead = *iter; if(paddhead){ if(paddhead->pregex){ regfree(paddhead->pregex); delete paddhead->pregex; } delete paddhead; } } addheadlist.clear(); } bool AdditionalHeader::AddHeader(headers_t& meta, const char* path) const { if(!is_enable){ return true; } if(!path){ S3FS_PRN_WARN("path is NULL."); return false; } size_t pathlength = strlen(path); // loop // // [NOTE] // Because to allow duplicate key, and then scanning the entire table. // for(addheadlist_t::const_iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter){ const ADDHEAD *paddhead = *iter; if(!paddhead){ continue; } if(paddhead->pregex){ // regex regmatch_t match; // not use if(0 == regexec(paddhead->pregex, path, 1, &match, 0)){ // match -> adding header meta[paddhead->headkey] = paddhead->headvalue; } }else{ // directly comparing if(paddhead->basestring.length() < pathlength){ if(0 == paddhead->basestring.length() || 0 == strcmp(&path[pathlength - paddhead->basestring.length()], paddhead->basestring.c_str())){ // match -> adding header meta[paddhead->headkey] = paddhead->headvalue; } } } } return true; } struct curl_slist* AdditionalHeader::AddHeader(struct curl_slist* list, const char* path) const { headers_t meta; if(!AddHeader(meta, path)){ return list; } for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){ // Adding header list = curl_slist_sort_insert(list, iter->first.c_str(), iter->second.c_str()); } meta.clear(); S3FS_MALLOCTRIM(0); return list; } bool AdditionalHeader::Dump() const { if(!IS_S3FS_LOG_DBG()){ return true; } ostringstream ssdbg; int cnt = 1; ssdbg << "Additional Header list[" << addheadlist.size() << "] = {" << endl; for(addheadlist_t::const_iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter, ++cnt){ const ADDHEAD *paddhead = *iter; ssdbg << " [" << cnt << "] = {" << endl; if(paddhead){ if(paddhead->pregex){ ssdbg << " type\t\t--->\tregex" << endl; }else{ ssdbg << " type\t\t--->\tsuffix matching" << endl; } ssdbg << " base string\t--->\t" << paddhead->basestring << endl; ssdbg << " add header\t--->\t" << paddhead->headkey << ": " << paddhead->headvalue << endl; } ssdbg << " }" << endl; } ssdbg << "}" << endl; // print all S3FS_PRN_DBG("%s", ssdbg.str().c_str()); return true; } /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ s3fs-fuse-1.86/src/addhead.h000066400000000000000000000037241361654130000156050ustar00rootroot00000000000000/* * s3fs - FUSE-based file system backed by Amazon S3 * * Copyright(C) 2007 Randy Rizun * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #ifndef S3FS_ADDHEAD_H_ #define S3FS_ADDHEAD_H_ #include //---------------------------------------------- // class AdditionalHeader //---------------------------------------------- typedef struct add_header{ regex_t* pregex; // not NULL means using regex, NULL means comparing suffix directly. std::string basestring; std::string headkey; std::string headvalue; }ADDHEAD; typedef std::vector addheadlist_t; class AdditionalHeader { private: static AdditionalHeader singleton; bool is_enable; addheadlist_t addheadlist; protected: AdditionalHeader(); ~AdditionalHeader(); public: // Reference singleton static AdditionalHeader* get(void) { return &singleton; } bool Load(const char* file); void Unload(void); bool AddHeader(headers_t& meta, const char* path) const; struct curl_slist* AddHeader(struct curl_slist* list, const char* path) const; bool Dump(void) const; }; #endif // S3FS_ADDHEAD_H_ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ s3fs-fuse-1.86/src/cache.cpp000066400000000000000000000503421361654130000156270ustar00rootroot00000000000000/* * s3fs - FUSE-based file system backed by Amazon S3 * * Copyright(C) 2007 Randy Rizun * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include #include #include #ifndef HAVE_CLOCK_GETTIME #include #endif #include #include #include #include #include #include #include #include #include #include #include "cache.h" #include "s3fs.h" #include "s3fs_util.h" #include "string_util.h" using namespace std; //------------------------------------------------------------------- // Utility //------------------------------------------------------------------- #ifndef CLOCK_REALTIME #define CLOCK_REALTIME 0 #endif #ifndef CLOCK_MONOTONIC #define CLOCK_MONOTONIC CLOCK_REALTIME #endif #ifndef CLOCK_MONOTONIC_COARSE #define CLOCK_MONOTONIC_COARSE CLOCK_MONOTONIC #endif #ifdef HAVE_CLOCK_GETTIME static int s3fs_clock_gettime(int clk_id, struct timespec* ts) { return clock_gettime(static_cast(clk_id), ts); } #else static int s3fs_clock_gettime(int clk_id, struct timespec* ts) { struct timeval now; if(0 != gettimeofday(&now, NULL)){ return -1; } ts->tv_sec = now.tv_sec; ts->tv_nsec = now.tv_usec * 1000; return 0; } #endif inline void SetStatCacheTime(struct timespec& ts) { if(-1 == s3fs_clock_gettime(CLOCK_MONOTONIC_COARSE, &ts)){ ts.tv_sec = time(NULL); ts.tv_nsec = 0; } } inline void InitStatCacheTime(struct timespec& ts) { ts.tv_sec = 0; ts.tv_nsec = 0; } inline int CompareStatCacheTime(const struct timespec& ts1, const struct timespec& ts2) { // return -1: ts1 < ts2 // 0: ts1 == ts2 // 1: ts1 > ts2 if(ts1.tv_sec < ts2.tv_sec){ return -1; }else if(ts1.tv_sec > ts2.tv_sec){ return 1; }else{ if(ts1.tv_nsec < ts2.tv_nsec){ return -1; }else if(ts1.tv_nsec > ts2.tv_nsec){ return 1; } } return 0; } inline bool IsExpireStatCacheTime(const struct timespec& ts, const time_t& expire) { struct timespec nowts; SetStatCacheTime(nowts); return ((ts.tv_sec + expire) < nowts.tv_sec); } // // For stats cache out // typedef std::vector statiterlist_t; struct sort_statiterlist{ // ascending order bool operator()(const stat_cache_t::iterator& src1, const stat_cache_t::iterator& src2) const { int result = CompareStatCacheTime(src1->second->cache_date, src2->second->cache_date); if(0 == result){ if(src1->second->hit_count < src2->second->hit_count){ result = -1; } } return (result < 0); } }; // // For symbolic link cache out // typedef std::vector symlinkiterlist_t; struct sort_symlinkiterlist{ // ascending order bool operator()(const symlink_cache_t::iterator& src1, const symlink_cache_t::iterator& src2) const { int result = CompareStatCacheTime(src1->second->cache_date, src2->second->cache_date); // use the same as Stats if(0 == result){ if(src1->second->hit_count < src2->second->hit_count){ result = -1; } } return (result < 0); } }; //------------------------------------------------------------------- // Static //------------------------------------------------------------------- StatCache StatCache::singleton; pthread_mutex_t StatCache::stat_cache_lock; //------------------------------------------------------------------- // Constructor/Destructor //------------------------------------------------------------------- StatCache::StatCache() : IsExpireTime(false), IsExpireIntervalType(false), ExpireTime(0), CacheSize(100000), IsCacheNoObject(false) { if(this == StatCache::getStatCacheData()){ stat_cache.clear(); pthread_mutexattr_t attr; pthread_mutexattr_init(&attr); #if S3FS_PTHREAD_ERRORCHECK pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK); #endif pthread_mutex_init(&StatCache::stat_cache_lock, &attr); }else{ abort(); } } StatCache::~StatCache() { if(this == StatCache::getStatCacheData()){ Clear(); pthread_mutex_destroy(&StatCache::stat_cache_lock); }else{ abort(); } } //------------------------------------------------------------------- // Methods //------------------------------------------------------------------- unsigned long StatCache::GetCacheSize() const { return CacheSize; } unsigned long StatCache::SetCacheSize(unsigned long size) { unsigned long old = CacheSize; CacheSize = size; return old; } time_t StatCache::GetExpireTime() const { return (IsExpireTime ? ExpireTime : (-1)); } time_t StatCache::SetExpireTime(time_t expire, bool is_interval) { time_t old = ExpireTime; ExpireTime = expire; IsExpireTime = true; IsExpireIntervalType = is_interval; return old; } time_t StatCache::UnsetExpireTime() { time_t old = IsExpireTime ? ExpireTime : (-1); ExpireTime = 0; IsExpireTime = false; IsExpireIntervalType = false; return old; } bool StatCache::SetCacheNoObject(bool flag) { bool old = IsCacheNoObject; IsCacheNoObject = flag; return old; } void StatCache::Clear() { AutoLock lock(&StatCache::stat_cache_lock); for(stat_cache_t::iterator iter = stat_cache.begin(); iter != stat_cache.end(); ++iter){ delete (*iter).second; } stat_cache.clear(); S3FS_MALLOCTRIM(0); } bool StatCache::GetStat(const string& key, struct stat* pst, headers_t* meta, bool overcheck, const char* petag, bool* pisforce) { bool is_delete_cache = false; string strpath = key; AutoLock lock(&StatCache::stat_cache_lock); stat_cache_t::iterator iter = stat_cache.end(); if(overcheck && '/' != strpath[strpath.length() - 1]){ strpath += "/"; iter = stat_cache.find(strpath); } if(iter == stat_cache.end()){ strpath = key; iter = stat_cache.find(strpath); } if(iter != stat_cache.end() && (*iter).second){ stat_cache_entry* ent = (*iter).second; if(!IsExpireTime || !IsExpireStatCacheTime(ent->cache_date, ExpireTime)){ if(ent->noobjcache){ if(!IsCacheNoObject){ // need to delete this cache. DelStat(strpath, /*lock_already_held=*/ true); }else{ // noobjcache = true means no object. } return false; } // hit without checking etag string stretag; if(petag){ // find & check ETag for(headers_t::iterator hiter = ent->meta.begin(); hiter != ent->meta.end(); ++hiter){ string tag = lower(hiter->first); if(tag == "etag"){ stretag = hiter->second; if('\0' != petag[0] && 0 != strcmp(petag, stretag.c_str())){ is_delete_cache = true; } break; } } } if(is_delete_cache){ // not hit by different ETag S3FS_PRN_DBG("stat cache not hit by ETag[path=%s][time=%lld.%09ld][hit count=%lu][ETag(%s)!=(%s)]", strpath.c_str(), static_cast(ent->cache_date.tv_sec), ent->cache_date.tv_nsec, ent->hit_count, petag ? petag : "null", stretag.c_str()); }else{ // hit S3FS_PRN_DBG("stat cache hit [path=%s][time=%lld.%09ld][hit count=%lu]", strpath.c_str(), static_cast(ent->cache_date.tv_sec), ent->cache_date.tv_nsec, ent->hit_count); if(pst!= NULL){ *pst= ent->stbuf; } if(meta != NULL){ *meta = ent->meta; } if(pisforce != NULL){ (*pisforce) = ent->isforce; } ent->hit_count++; if(IsExpireIntervalType){ SetStatCacheTime(ent->cache_date); } return true; } }else{ // timeout is_delete_cache = true; } } if(is_delete_cache){ DelStat(strpath, /*lock_already_held=*/ true); } return false; } bool StatCache::IsNoObjectCache(const string& key, bool overcheck) { bool is_delete_cache = false; string strpath = key; if(!IsCacheNoObject){ return false; } AutoLock lock(&StatCache::stat_cache_lock); stat_cache_t::iterator iter = stat_cache.end(); if(overcheck && '/' != strpath[strpath.length() - 1]){ strpath += "/"; iter = stat_cache.find(strpath); } if(iter == stat_cache.end()){ strpath = key; iter = stat_cache.find(strpath); } if(iter != stat_cache.end() && (*iter).second) { if(!IsExpireTime || !IsExpireStatCacheTime((*iter).second->cache_date, ExpireTime)){ if((*iter).second->noobjcache){ // noobjcache = true means no object. SetStatCacheTime((*iter).second->cache_date); return true; } }else{ // timeout is_delete_cache = true; } } if(is_delete_cache){ DelStat(strpath, /*lock_already_held=*/ true); } return false; } bool StatCache::AddStat(const std::string& key, headers_t& meta, bool forcedir, bool no_truncate) { if(!no_truncate && CacheSize< 1){ return true; } S3FS_PRN_INFO3("add stat cache entry[path=%s]", key.c_str()); bool found; bool do_truncate; { AutoLock lock(&StatCache::stat_cache_lock); found = stat_cache.end() != stat_cache.find(key); do_truncate = stat_cache.size() > CacheSize; } if(found){ DelStat(key.c_str()); }else{ if(do_truncate){ if(!TruncateCache()){ return false; } } } // make new stat_cache_entry* ent = new stat_cache_entry(); if(!convert_header_to_stat(key.c_str(), meta, &(ent->stbuf), forcedir)){ delete ent; return false; } ent->hit_count = 0; ent->isforce = forcedir; ent->noobjcache = false; ent->notruncate = (no_truncate ? 1L : 0L); ent->meta.clear(); SetStatCacheTime(ent->cache_date); // Set time. //copy only some keys for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){ string tag = lower(iter->first); string value = iter->second; if(tag == "content-type"){ ent->meta[iter->first] = value; }else if(tag == "content-length"){ ent->meta[iter->first] = value; }else if(tag == "etag"){ ent->meta[iter->first] = value; }else if(tag == "last-modified"){ ent->meta[iter->first] = value; }else if(tag.substr(0, 5) == "x-amz"){ ent->meta[tag] = value; // key is lower case for "x-amz" } } // add AutoLock lock(&StatCache::stat_cache_lock); stat_cache_t::iterator iter = stat_cache.find(key); // recheck for same key exists if(stat_cache.end() != iter){ delete iter->second; stat_cache.erase(iter); } stat_cache[key] = ent; // check symbolic link cache if(!S_ISLNK(ent->stbuf.st_mode)){ if(symlink_cache.end() != symlink_cache.find(key)){ // if symbolic link cache has key, thus remove it. DelSymlink(key.c_str(), true); } } return true; } bool StatCache::AddNoObjectCache(const string& key) { if(!IsCacheNoObject){ return true; // pretend successful } if(CacheSize < 1){ return true; } S3FS_PRN_INFO3("add no object cache entry[path=%s]", key.c_str()); bool found; bool do_truncate; { AutoLock lock(&StatCache::stat_cache_lock); found = stat_cache.end() != stat_cache.find(key); do_truncate = stat_cache.size() > CacheSize; } if(found){ DelStat(key.c_str()); }else{ if(do_truncate){ if(!TruncateCache()){ return false; } } } // make new stat_cache_entry* ent = new stat_cache_entry(); memset(&(ent->stbuf), 0, sizeof(struct stat)); ent->hit_count = 0; ent->isforce = false; ent->noobjcache = true; ent->notruncate = 0L; ent->meta.clear(); SetStatCacheTime(ent->cache_date); // Set time. // add AutoLock lock(&StatCache::stat_cache_lock); stat_cache_t::iterator iter = stat_cache.find(key); // recheck for same key exists if(stat_cache.end() != iter){ delete iter->second; stat_cache.erase(iter); } stat_cache[key] = ent; // check symbolic link cache if(symlink_cache.end() != symlink_cache.find(key)){ // if symbolic link cache has key, thus remove it. DelSymlink(key.c_str(), true); } return true; } void StatCache::ChangeNoTruncateFlag(const std::string& key, bool no_truncate) { AutoLock lock(&StatCache::stat_cache_lock); stat_cache_t::iterator iter = stat_cache.find(key); if(stat_cache.end() != iter){ stat_cache_entry* ent = iter->second; if(ent){ if(no_truncate){ ++(ent->notruncate); }else{ if(0L < ent->notruncate){ --(ent->notruncate); } } } } } bool StatCache::TruncateCache() { AutoLock lock(&StatCache::stat_cache_lock); if(stat_cache.empty()){ return true; } // 1) erase over expire time if(IsExpireTime){ for(stat_cache_t::iterator iter = stat_cache.begin(); iter != stat_cache.end(); ){ stat_cache_entry* entry = iter->second; if(!entry || (0L == entry->notruncate && IsExpireStatCacheTime(entry->cache_date, ExpireTime))){ delete entry; stat_cache.erase(iter++); }else{ ++iter; } } } // 2) check stat cache count if(stat_cache.size() < CacheSize){ return true; } // 3) erase from the old cache in order size_t erase_count= stat_cache.size() - CacheSize + 1; statiterlist_t erase_iters; for(stat_cache_t::iterator iter = stat_cache.begin(); iter != stat_cache.end(); ++iter){ // check no truncate stat_cache_entry* ent = iter->second; if(ent && 0L < ent->notruncate){ // skip for no truncate entry if(0 < erase_count){ --erase_count; // decrement } } // iter is not have notruncate flag erase_iters.push_back(iter); sort(erase_iters.begin(), erase_iters.end(), sort_statiterlist()); if(erase_count < erase_iters.size()){ erase_iters.pop_back(); } } for(statiterlist_t::iterator iiter = erase_iters.begin(); iiter != erase_iters.end(); ++iiter){ stat_cache_t::iterator siter = *iiter; S3FS_PRN_DBG("truncate stat cache[path=%s]", siter->first.c_str()); delete siter->second; stat_cache.erase(siter); } S3FS_MALLOCTRIM(0); return true; } bool StatCache::DelStat(const char* key, bool lock_already_held) { if(!key){ return false; } S3FS_PRN_INFO3("delete stat cache entry[path=%s]", key); AutoLock lock(&StatCache::stat_cache_lock, lock_already_held ? AutoLock::ALREADY_LOCKED : AutoLock::NONE); stat_cache_t::iterator iter; if(stat_cache.end() != (iter = stat_cache.find(string(key)))){ delete (*iter).second; stat_cache.erase(iter); } if(0 < strlen(key) && 0 != strcmp(key, "/")){ string strpath = key; if('/' == strpath[strpath.length() - 1]){ // If there is "path" cache, delete it. strpath = strpath.substr(0, strpath.length() - 1); }else{ // If there is "path/" cache, delete it. strpath += "/"; } if(stat_cache.end() != (iter = stat_cache.find(strpath))){ delete (*iter).second; stat_cache.erase(iter); } } S3FS_MALLOCTRIM(0); return true; } bool StatCache::GetSymlink(const string& key, string& value) { bool is_delete_cache = false; string strpath = key; AutoLock lock(&StatCache::stat_cache_lock); symlink_cache_t::iterator iter = symlink_cache.find(strpath); if(iter != symlink_cache.end() && iter->second){ symlink_cache_entry* ent = iter->second; if(!IsExpireTime || !IsExpireStatCacheTime(ent->cache_date, ExpireTime)){ // use the same as Stats // found S3FS_PRN_DBG("symbolic link cache hit [path=%s][time=%lld.%09ld][hit count=%lu]", strpath.c_str(), static_cast(ent->cache_date.tv_sec), ent->cache_date.tv_nsec, ent->hit_count); value = ent->link; ent->hit_count++; if(IsExpireIntervalType){ SetStatCacheTime(ent->cache_date); } return true; }else{ // timeout is_delete_cache = true; } } if(is_delete_cache){ DelSymlink(strpath.c_str(), /*lock_already_held=*/ true); } return false; } bool StatCache::AddSymlink(const string& key, const string& value) { if(CacheSize< 1){ return true; } S3FS_PRN_INFO3("add symbolic link cache entry[path=%s, value=%s]", key.c_str(), value.c_str()); bool found; bool do_truncate; { AutoLock lock(&StatCache::stat_cache_lock); found = symlink_cache.end() != symlink_cache.find(key); do_truncate = symlink_cache.size() > CacheSize; } if(found){ DelSymlink(key.c_str()); }else{ if(do_truncate){ if(!TruncateSymlink()){ return false; } } } // make new symlink_cache_entry* ent = new symlink_cache_entry(); ent->link = value; ent->hit_count = 0; SetStatCacheTime(ent->cache_date); // Set time(use the same as Stats). // add AutoLock lock(&StatCache::stat_cache_lock); symlink_cache_t::iterator iter = symlink_cache.find(key); // recheck for same key exists if(symlink_cache.end() != iter){ delete iter->second; symlink_cache.erase(iter); } symlink_cache[key] = ent; return true; } bool StatCache::TruncateSymlink() { AutoLock lock(&StatCache::stat_cache_lock); if(symlink_cache.empty()){ return true; } // 1) erase over expire time if(IsExpireTime){ for(symlink_cache_t::iterator iter = symlink_cache.begin(); iter != symlink_cache.end(); ){ symlink_cache_entry* entry = iter->second; if(!entry || IsExpireStatCacheTime(entry->cache_date, ExpireTime)){ // use the same as Stats delete entry; symlink_cache.erase(iter++); }else{ ++iter; } } } // 2) check stat cache count if(symlink_cache.size() < CacheSize){ return true; } // 3) erase from the old cache in order size_t erase_count= symlink_cache.size() - CacheSize + 1; symlinkiterlist_t erase_iters; for(symlink_cache_t::iterator iter = symlink_cache.begin(); iter != symlink_cache.end(); ++iter){ erase_iters.push_back(iter); sort(erase_iters.begin(), erase_iters.end(), sort_symlinkiterlist()); if(erase_count < erase_iters.size()){ erase_iters.pop_back(); } } for(symlinkiterlist_t::iterator iiter = erase_iters.begin(); iiter != erase_iters.end(); ++iiter){ symlink_cache_t::iterator siter = *iiter; S3FS_PRN_DBG("truncate symbolic link cache[path=%s]", siter->first.c_str()); delete siter->second; symlink_cache.erase(siter); } S3FS_MALLOCTRIM(0); return true; } bool StatCache::DelSymlink(const char* key, bool lock_already_held) { if(!key){ return false; } S3FS_PRN_INFO3("delete symbolic link cache entry[path=%s]", key); AutoLock lock(&StatCache::stat_cache_lock, lock_already_held ? AutoLock::ALREADY_LOCKED : AutoLock::NONE); symlink_cache_t::iterator iter; if(symlink_cache.end() != (iter = symlink_cache.find(string(key)))){ delete iter->second; symlink_cache.erase(iter); } S3FS_MALLOCTRIM(0); return true; } //------------------------------------------------------------------- // Functions //------------------------------------------------------------------- bool convert_header_to_stat(const char* path, headers_t& meta, struct stat* pst, bool forcedir) { if(!path || !pst){ return false; } memset(pst, 0, sizeof(struct stat)); pst->st_nlink = 1; // see fuse FAQ // mode pst->st_mode = get_mode(meta, path, true, forcedir); // blocks if(S_ISREG(pst->st_mode)){ pst->st_blocks = get_blocks(pst->st_size); } pst->st_blksize = 4096; // mtime pst->st_mtime = get_mtime(meta); // ctime pst->st_ctime = get_ctime(meta); // size pst->st_size = get_size(meta); // uid/gid pst->st_uid = get_uid(meta); pst->st_gid = get_gid(meta); return true; } /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ s3fs-fuse-1.86/src/cache.h000066400000000000000000000133411361654130000152720ustar00rootroot00000000000000/* * s3fs - FUSE-based file system backed by Amazon S3 * * Copyright(C) 2007 Randy Rizun * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #ifndef S3FS_CACHE_H_ #define S3FS_CACHE_H_ #include "common.h" // // Struct for stats cache // struct stat_cache_entry { struct stat stbuf; unsigned long hit_count; struct timespec cache_date; headers_t meta; bool isforce; bool noobjcache; // Flag: cache is no object for no listing. unsigned long notruncate; // 0<: not remove automatically at checking truncate stat_cache_entry() : hit_count(0), isforce(false), noobjcache(false), notruncate(0L) { memset(&stbuf, 0, sizeof(struct stat)); cache_date.tv_sec = 0; cache_date.tv_nsec = 0; meta.clear(); } }; typedef std::map stat_cache_t; // key=path // // Struct for symbolic link cache // struct symlink_cache_entry { std::string link; unsigned long hit_count; struct timespec cache_date; // The function that operates timespec uses the same as Stats symlink_cache_entry() : link(""), hit_count(0) { cache_date.tv_sec = 0; cache_date.tv_nsec = 0; } }; typedef std::map symlink_cache_t; // // Class // // [NOTE] About Symbolic link cache // The Stats cache class now also has a symbolic link cache. // It is possible to take out the Symbolic link cache in another class, // but the cache out etc. should be synchronized with the Stats cache // and implemented in this class. // Symbolic link cache size and timeout use the same settings as Stats // cache. This simplifies user configuration, and from a user perspective, // the symbolic link cache appears to be included in the Stats cache. // class StatCache { private: static StatCache singleton; static pthread_mutex_t stat_cache_lock; stat_cache_t stat_cache; bool IsExpireTime; bool IsExpireIntervalType; // if this flag is true, cache data is updated at last access time. time_t ExpireTime; unsigned long CacheSize; bool IsCacheNoObject; symlink_cache_t symlink_cache; private: StatCache(); ~StatCache(); void Clear(void); bool GetStat(const std::string& key, struct stat* pst, headers_t* meta, bool overcheck, const char* petag, bool* pisforce); // Truncate stat cache bool TruncateCache(void); // Truncate symbolic link cache bool TruncateSymlink(void); public: // Reference singleton static StatCache* getStatCacheData(void) { return &singleton; } // Attribute unsigned long GetCacheSize(void) const; unsigned long SetCacheSize(unsigned long size); time_t GetExpireTime(void) const; time_t SetExpireTime(time_t expire, bool is_interval = false); time_t UnsetExpireTime(void); bool SetCacheNoObject(bool flag); bool EnableCacheNoObject(void) { return SetCacheNoObject(true); } bool DisableCacheNoObject(void) { return SetCacheNoObject(false); } bool GetCacheNoObject(void) const { return IsCacheNoObject; } // Get stat cache bool GetStat(const std::string& key, struct stat* pst, headers_t* meta, bool overcheck = true, bool* pisforce = NULL) { return GetStat(key, pst, meta, overcheck, NULL, pisforce); } bool GetStat(const std::string& key, struct stat* pst, bool overcheck = true) { return GetStat(key, pst, NULL, overcheck, NULL, NULL); } bool GetStat(const std::string& key, headers_t* meta, bool overcheck = true) { return GetStat(key, NULL, meta, overcheck, NULL, NULL); } bool HasStat(const std::string& key, bool overcheck = true) { return GetStat(key, NULL, NULL, overcheck, NULL, NULL); } bool HasStat(const std::string& key, const char* etag, bool overcheck = true) { return GetStat(key, NULL, NULL, overcheck, etag, NULL); } // Cache For no object bool IsNoObjectCache(const std::string& key, bool overcheck = true); bool AddNoObjectCache(const std::string& key); // Add stat cache bool AddStat(const std::string& key, headers_t& meta, bool forcedir = false, bool no_truncate = false); // Change no truncate flag void ChangeNoTruncateFlag(const std::string& key, bool no_truncate); // Delete stat cache bool DelStat(const char* key, bool lock_already_held = false); bool DelStat(std::string& key, bool lock_already_held = false) { return DelStat(key.c_str(), lock_already_held); } // Cache for symbolic link bool GetSymlink(const std::string& key, std::string& value); bool AddSymlink(const std::string& key, const std::string& value); bool DelSymlink(const char* key, bool lock_already_held = false); }; // // Functions // bool convert_header_to_stat(const char* path, headers_t& meta, struct stat* pst, bool forcedir = false); #endif // S3FS_CACHE_H_ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ s3fs-fuse-1.86/src/common.h000066400000000000000000000157051361654130000155250ustar00rootroot00000000000000/* * s3fs - FUSE-based file system backed by Amazon S3 * * Copyright(C) 2007 Randy Rizun * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #ifndef S3FS_COMMON_H_ #define S3FS_COMMON_H_ #include #include "../config.h" // // Extended attribute // #ifdef HAVE_SYS_EXTATTR_H #include #elif HAVE_ATTR_XATTR_H #include #elif HAVE_SYS_XATTR_H #include #endif // // Macro // static inline const char *SAFESTRPTR(const char *strptr) { return strptr ? strptr : ""; } // // Debug level // enum s3fs_log_level{ S3FS_LOG_CRIT = 0, // LOG_CRIT S3FS_LOG_ERR = 1, // LOG_ERR S3FS_LOG_WARN = 3, // LOG_WARNING S3FS_LOG_INFO = 7, // LOG_INFO S3FS_LOG_DBG = 15 // LOG_DEBUG }; // // Debug macros // #define IS_S3FS_LOG_CRIT() (S3FS_LOG_CRIT == debug_level) #define IS_S3FS_LOG_ERR() (S3FS_LOG_ERR == (debug_level & S3FS_LOG_DBG)) #define IS_S3FS_LOG_WARN() (S3FS_LOG_WARN == (debug_level & S3FS_LOG_DBG)) #define IS_S3FS_LOG_INFO() (S3FS_LOG_INFO == (debug_level & S3FS_LOG_DBG)) #define IS_S3FS_LOG_DBG() (S3FS_LOG_DBG == (debug_level & S3FS_LOG_DBG)) #define S3FS_LOG_LEVEL_TO_SYSLOG(level) \ ( S3FS_LOG_DBG == (level & S3FS_LOG_DBG) ? LOG_DEBUG : \ S3FS_LOG_INFO == (level & S3FS_LOG_DBG) ? LOG_INFO : \ S3FS_LOG_WARN == (level & S3FS_LOG_DBG) ? LOG_WARNING : \ S3FS_LOG_ERR == (level & S3FS_LOG_DBG) ? LOG_ERR : LOG_CRIT ) #define S3FS_LOG_LEVEL_STRING(level) \ ( S3FS_LOG_DBG == (level & S3FS_LOG_DBG) ? "[DBG] " : \ S3FS_LOG_INFO == (level & S3FS_LOG_DBG) ? "[INF] " : \ S3FS_LOG_WARN == (level & S3FS_LOG_DBG) ? "[WAN] " : \ S3FS_LOG_ERR == (level & S3FS_LOG_DBG) ? "[ERR] " : "[CRT] " ) #define S3FS_LOG_NEST_MAX 4 #define S3FS_LOG_NEST(nest) (nest < S3FS_LOG_NEST_MAX ? s3fs_log_nest[nest] : s3fs_log_nest[S3FS_LOG_NEST_MAX - 1]) #define S3FS_LOW_LOGPRN(level, fmt, ...) \ do{ \ if(S3FS_LOG_CRIT == level || (S3FS_LOG_CRIT != debug_level && level == (debug_level & level))){ \ if(foreground){ \ fprintf(stdout, "%s%s:%s(%d): " fmt "%s\n", S3FS_LOG_LEVEL_STRING(level), __FILE__, __func__, __LINE__, __VA_ARGS__); \ }else{ \ syslog(S3FS_LOG_LEVEL_TO_SYSLOG(level), "%s%s:%s(%d): " fmt "%s", instance_name.c_str(), __FILE__, __func__, __LINE__, __VA_ARGS__); \ } \ } \ }while(0) #define S3FS_LOW_LOGPRN2(level, nest, fmt, ...) \ do{ \ if(S3FS_LOG_CRIT == level || (S3FS_LOG_CRIT != debug_level && level == (debug_level & level))){ \ if(foreground){ \ fprintf(stdout, "%s%s%s:%s(%d): " fmt "%s\n", S3FS_LOG_LEVEL_STRING(level), S3FS_LOG_NEST(nest), __FILE__, __func__, __LINE__, __VA_ARGS__); \ }else{ \ syslog(S3FS_LOG_LEVEL_TO_SYSLOG(level), "%s%s" fmt "%s", instance_name.c_str(), S3FS_LOG_NEST(nest), __VA_ARGS__); \ } \ } \ }while(0) #define S3FS_LOW_LOGPRN_EXIT(fmt, ...) \ do{ \ if(foreground){ \ fprintf(stderr, "s3fs: " fmt "%s\n", __VA_ARGS__); \ }else{ \ fprintf(stderr, "s3fs: " fmt "%s\n", __VA_ARGS__); \ syslog(S3FS_LOG_LEVEL_TO_SYSLOG(S3FS_LOG_CRIT), "%ss3fs: " fmt "%s", instance_name.c_str(), __VA_ARGS__); \ } \ }while(0) // Special macro for init message #define S3FS_PRN_INIT_INFO(fmt, ...) \ do{ \ if(foreground){ \ fprintf(stdout, "%s%s%s:%s(%d): " fmt "%s\n", S3FS_LOG_LEVEL_STRING(S3FS_LOG_INFO), S3FS_LOG_NEST(0), __FILE__, __func__, __LINE__, __VA_ARGS__, ""); \ }else{ \ syslog(S3FS_LOG_LEVEL_TO_SYSLOG(S3FS_LOG_INFO), "%s%s" fmt "%s", instance_name.c_str(), S3FS_LOG_NEST(0), __VA_ARGS__, ""); \ } \ }while(0) // [NOTE] // small trick for VA_ARGS // #define S3FS_PRN_EXIT(fmt, ...) S3FS_LOW_LOGPRN_EXIT(fmt, ##__VA_ARGS__, "") #define S3FS_PRN_CRIT(fmt, ...) S3FS_LOW_LOGPRN(S3FS_LOG_CRIT, fmt, ##__VA_ARGS__, "") #define S3FS_PRN_ERR(fmt, ...) S3FS_LOW_LOGPRN(S3FS_LOG_ERR, fmt, ##__VA_ARGS__, "") #define S3FS_PRN_WARN(fmt, ...) S3FS_LOW_LOGPRN(S3FS_LOG_WARN, fmt, ##__VA_ARGS__, "") #define S3FS_PRN_DBG(fmt, ...) S3FS_LOW_LOGPRN(S3FS_LOG_DBG, fmt, ##__VA_ARGS__, "") #define S3FS_PRN_INFO(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 0, fmt, ##__VA_ARGS__, "") #define S3FS_PRN_INFO0(fmt, ...) S3FS_LOG_INFO(fmt, __VA_ARGS__) #define S3FS_PRN_INFO1(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 1, fmt, ##__VA_ARGS__, "") #define S3FS_PRN_INFO2(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 2, fmt, ##__VA_ARGS__, "") #define S3FS_PRN_INFO3(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 3, fmt, ##__VA_ARGS__, "") #define S3FS_PRN_CURL(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_CRIT, 0, fmt, ##__VA_ARGS__, "") // // Typedef // struct header_nocase_cmp : public std::binary_function{ bool operator()(const std::string &strleft, const std::string &strright) const { return (strcasecmp(strleft.c_str(), strright.c_str()) < 0); } }; typedef std::map headers_t; // // Header "x-amz-meta-xattr" is for extended attributes. // This header is url encoded string which is json formatted. // x-amz-meta-xattr:urlencode({"xattr-1":"base64(value-1)","xattr-2":"base64(value-2)","xattr-3":"base64(value-3)"}) // typedef struct xattr_value{ unsigned char* pvalue; size_t length; explicit xattr_value(unsigned char* pval = NULL, size_t len = 0) : pvalue(pval), length(len) {} ~xattr_value() { delete[] pvalue; } }XATTRVAL, *PXATTRVAL; typedef std::map xattrs_t; // // Global variables // // TODO: namespace these extern bool foreground; extern bool nomultipart; extern bool pathrequeststyle; extern bool complement_stat; extern std::string program_name; extern std::string service_path; extern std::string host; extern std::string bucket; extern std::string mount_prefix; extern std::string endpoint; extern std::string cipher_suites; extern std::string instance_name; extern s3fs_log_level debug_level; extern const char* s3fs_log_nest[S3FS_LOG_NEST_MAX]; #endif // S3FS_COMMON_H_ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ s3fs-fuse-1.86/src/common_auth.cpp000066400000000000000000000045751361654130000171040ustar00rootroot00000000000000/* * s3fs - FUSE-based file system backed by Amazon S3 * * Copyright(C) 2007 Randy Rizun * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include #include #include #include #include #include "s3fs_auth.h" #include "string_util.h" using namespace std; //------------------------------------------------------------------- // Utility Function //------------------------------------------------------------------- string s3fs_get_content_md5(int fd) { unsigned char* md5hex; char* base64; string Signature; if(NULL == (md5hex = s3fs_md5hexsum(fd, 0, -1))){ return string(""); } if(NULL == (base64 = s3fs_base64(md5hex, get_md5_digest_length()))){ return string(""); // ENOMEM } delete[] md5hex; Signature = base64; delete[] base64; return Signature; } string s3fs_md5sum(int fd, off_t start, ssize_t size) { size_t digestlen = get_md5_digest_length(); unsigned char* md5hex; if(NULL == (md5hex = s3fs_md5hexsum(fd, start, size))){ return string(""); } std::string md5 = s3fs_hex(md5hex, digestlen); delete[] md5hex; return md5; } string s3fs_sha256sum(int fd, off_t start, ssize_t size) { size_t digestlen = get_sha256_digest_length(); char sha256[2 * digestlen + 1]; unsigned char* sha256hex; if(NULL == (sha256hex = s3fs_sha256hexsum(fd, start, size))){ return string(""); } memset(sha256, 0, 2 * digestlen + 1); for(size_t pos = 0; pos < digestlen; pos++){ snprintf(sha256 + 2 * pos, 3, "%02x", sha256hex[pos]); } delete[] sha256hex; return string(sha256); } /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ s3fs-fuse-1.86/src/curl.cpp000066400000000000000000004375171361654130000155460ustar00rootroot00000000000000/* * s3fs - FUSE-based file system backed by Amazon S3 * * Copyright(C) 2007 Randy Rizun * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "common.h" #include "curl.h" #include "string_util.h" #include "s3fs.h" #include "s3fs_util.h" #include "s3fs_auth.h" #include "addhead.h" #include "fdcache.h" #include "psemaphore.h" using namespace std; static const std::string empty_payload_hash = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"; //------------------------------------------------------------------- // Utilities //------------------------------------------------------------------- // [TODO] // This function uses temporary file, but should not use it. // For not using it, we implement function in each auth file(openssl, nss. gnutls). // static bool make_md5_from_binary(const char* pstr, size_t length, string& md5) { if(!pstr || '\0' == pstr[0]){ S3FS_PRN_ERR("Parameter is wrong."); return false; } FILE* fp; if(NULL == (fp = tmpfile())){ S3FS_PRN_ERR("Could not make tmpfile."); return false; } if(length != fwrite(pstr, sizeof(char), length, fp)){ S3FS_PRN_ERR("Failed to write tmpfile."); fclose(fp); return false; } int fd; if(0 != fflush(fp) || 0 != fseek(fp, 0L, SEEK_SET) || -1 == (fd = fileno(fp))){ S3FS_PRN_ERR("Failed to make MD5."); fclose(fp); return false; } // base64 md5 md5 = s3fs_get_content_md5(fd); if(0 == md5.length()){ S3FS_PRN_ERR("Failed to make MD5."); fclose(fp); return false; } fclose(fp); return true; } static string url_to_host(const std::string &url) { S3FS_PRN_INFO3("url is %s", url.c_str()); static const string http = "http://"; static const string https = "https://"; std::string hostname; if (url.compare(0, http.size(), http) == 0) { hostname = url.substr(http.size()); } else if (url.compare(0, https.size(), https) == 0) { hostname = url.substr(https.size()); } else { S3FS_PRN_EXIT("url does not begin with http:// or https://"); abort(); } size_t idx; if ((idx = hostname.find('/')) != string::npos) { return hostname.substr(0, idx); } else { return hostname; } } static string get_bucket_host() { if(!pathrequeststyle){ return bucket + "." + url_to_host(host); } return url_to_host(host); } // compare ETag ignoring quotes static bool etag_equals(std::string s1, std::string s2) { if(s1.length() > 1 && s1[0] == '\"' && s1[s1.length() - 1] == '\"'){ s1 = s1.substr(1, s1.size() - 2); } if(s2.length() > 1 && s2[0] == '\"' && s2[s2.length() - 1] == '\"'){ s2 = s2.substr(1, s2.size() - 2); } return s1 == s2; } #if 0 // noused static string tolower_header_name(const char* head) { string::size_type pos; string name = head; string value(""); if(string::npos != (pos = name.find(':'))){ value= name.substr(pos); name = name.substr(0, pos); } name = lower(name); name += value; return name; } #endif //------------------------------------------------------------------- // Class BodyData //------------------------------------------------------------------- static const int BODYDATA_RESIZE_APPEND_MIN = 1024; static const int BODYDATA_RESIZE_APPEND_MID = 1024 * 1024; static const int BODYDATA_RESIZE_APPEND_MAX = 10 * 1024 * 1024; static size_t adjust_block(size_t bytes, size_t block) { return ((bytes / block) + ((bytes % block) ? 1 : 0)) * block; } bool BodyData::Resize(size_t addbytes) { if(IsSafeSize(addbytes)){ return true; } // New size size_t need_size = adjust_block((lastpos + addbytes + 1) - bufsize, sizeof(off_t)); if(BODYDATA_RESIZE_APPEND_MAX < bufsize){ need_size = (BODYDATA_RESIZE_APPEND_MAX < need_size ? need_size : BODYDATA_RESIZE_APPEND_MAX); }else if(BODYDATA_RESIZE_APPEND_MID < bufsize){ need_size = (BODYDATA_RESIZE_APPEND_MID < need_size ? need_size : BODYDATA_RESIZE_APPEND_MID); }else if(BODYDATA_RESIZE_APPEND_MIN < bufsize){ need_size = ((bufsize * 2) < need_size ? need_size : (bufsize * 2)); }else{ need_size = (BODYDATA_RESIZE_APPEND_MIN < need_size ? need_size : BODYDATA_RESIZE_APPEND_MIN); } // realloc char* newtext; if(NULL == (newtext = (char*)realloc(text, (bufsize + need_size)))){ S3FS_PRN_CRIT("not enough memory (realloc returned NULL)"); free(text); text = NULL; return false; } text = newtext; bufsize += need_size; return true; } void BodyData::Clear() { if(text){ free(text); text = NULL; } lastpos = 0; bufsize = 0; } bool BodyData::Append(void* ptr, size_t bytes) { if(!ptr){ return false; } if(0 == bytes){ return true; } if(!Resize(bytes)){ return false; } memcpy(&text[lastpos], ptr, bytes); lastpos += bytes; text[lastpos] = '\0'; return true; } const char* BodyData::str() const { if(!text){ static const char* strnull = ""; return strnull; } return text; } //------------------------------------------------------------------- // Class CurlHandlerPool //------------------------------------------------------------------- bool CurlHandlerPool::Init() { pthread_mutexattr_t attr; pthread_mutexattr_init(&attr); #if S3FS_PTHREAD_ERRORCHECK pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK); #endif if (0 != pthread_mutex_init(&mLock, &attr)) { S3FS_PRN_ERR("Init curl handlers lock failed"); return false; } for(int cnt = 0; cnt < mMaxHandlers; ++cnt){ CURL* hCurl = curl_easy_init(); if(!hCurl){ S3FS_PRN_ERR("Init curl handlers pool failed"); Destroy(); return false; } mPool.push_back(hCurl); } return true; } bool CurlHandlerPool::Destroy() { while(!mPool.empty()){ CURL* hCurl = mPool.back(); mPool.pop_back(); if(hCurl){ curl_easy_cleanup(hCurl); } } if (0 != pthread_mutex_destroy(&mLock)) { S3FS_PRN_ERR("Destroy curl handlers lock failed"); return false; } return true; } CURL* CurlHandlerPool::GetHandler(bool only_pool) { CURL* hCurl = NULL; { AutoLock lock(&mLock); if(!mPool.empty()){ hCurl = mPool.back(); mPool.pop_back(); S3FS_PRN_DBG("Get handler from pool: rest = %d", static_cast(mPool.size())); } } if(only_pool){ return hCurl; } if(!hCurl){ S3FS_PRN_INFO("Pool empty: force to create new handler"); hCurl = curl_easy_init(); } return hCurl; } void CurlHandlerPool::ReturnHandler(CURL* hCurl, bool restore_pool) { if(!hCurl){ return; } if(restore_pool){ AutoLock lock(&mLock); S3FS_PRN_DBG("Return handler to pool"); mPool.push_back(hCurl); while(mMaxHandlers <= static_cast(mPool.size())){ CURL* hOldCurl = mPool.front(); mPool.pop_front(); if(hOldCurl){ S3FS_PRN_INFO("Pool full: destroy the oldest handler"); curl_easy_cleanup(hOldCurl); } } }else{ S3FS_PRN_INFO("Pool full: destroy the handler"); curl_easy_cleanup(hCurl); } } //------------------------------------------------------------------- // Class S3fsCurl //------------------------------------------------------------------- static const int MULTIPART_SIZE = 10 * 1024 * 1024; // constant must be at least 512 MB to copy the maximum 5 TB object size // TODO: scale part size with object size static const int MAX_MULTI_COPY_SOURCE_SIZE = 512 * 1024 * 1024; static const int IAM_EXPIRE_MERGIN = 20 * 60; // update timing static const std::string ECS_IAM_ENV_VAR = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"; static const std::string IAMCRED_ACCESSKEYID = "AccessKeyId"; static const std::string IAMCRED_SECRETACCESSKEY = "SecretAccessKey"; static const std::string IAMCRED_ROLEARN = "RoleArn"; static const long S3FSCURL_RESPONSECODE_NOTSET = -1; static const long S3FSCURL_RESPONSECODE_FATAL_ERROR = -2; static const int S3FSCURL_PERFORM_RESULT_NOTSET = 1; // [NOTICE] // This symbol is for libcurl under 7.23.0 #ifndef CURLSHE_NOT_BUILT_IN #define CURLSHE_NOT_BUILT_IN 5 #endif pthread_mutex_t S3fsCurl::curl_handles_lock; pthread_mutex_t S3fsCurl::curl_share_lock[SHARE_MUTEX_MAX]; bool S3fsCurl::is_initglobal_done = false; CurlHandlerPool* S3fsCurl::sCurlPool = NULL; int S3fsCurl::sCurlPoolSize = 32; CURLSH* S3fsCurl::hCurlShare = NULL; bool S3fsCurl::is_cert_check = true; // default bool S3fsCurl::is_dns_cache = true; // default bool S3fsCurl::is_ssl_session_cache= true; // default long S3fsCurl::connect_timeout = 300; // default time_t S3fsCurl::readwrite_timeout = 120; // default int S3fsCurl::retries = 5; // default bool S3fsCurl::is_public_bucket = false; acl_t S3fsCurl::default_acl = PRIVATE; storage_class_t S3fsCurl::storage_class = STANDARD; sseckeylist_t S3fsCurl::sseckeys; std::string S3fsCurl::ssekmsid; sse_type_t S3fsCurl::ssetype = SSE_DISABLE; bool S3fsCurl::is_content_md5 = false; bool S3fsCurl::is_verbose = false; string S3fsCurl::AWSAccessKeyId; string S3fsCurl::AWSSecretAccessKey; string S3fsCurl::AWSAccessToken; time_t S3fsCurl::AWSAccessTokenExpire= 0; bool S3fsCurl::is_ecs = false; bool S3fsCurl::is_ibm_iam_auth = false; string S3fsCurl::IAM_cred_url = "http://169.254.169.254/latest/meta-data/iam/security-credentials/"; size_t S3fsCurl::IAM_field_count = 4; string S3fsCurl::IAM_token_field = "Token"; string S3fsCurl::IAM_expiry_field = "Expiration"; string S3fsCurl::IAM_role; long S3fsCurl::ssl_verify_hostname = 1; // default(original code...) curltime_t S3fsCurl::curl_times; curlprogress_t S3fsCurl::curl_progress; string S3fsCurl::curl_ca_bundle; mimes_t S3fsCurl::mimeTypes; string S3fsCurl::userAgent; int S3fsCurl::max_parallel_cnt = 5; // default int S3fsCurl::max_multireq = 20; // default off_t S3fsCurl::multipart_size = MULTIPART_SIZE; // default bool S3fsCurl::is_sigv4 = true; // default bool S3fsCurl::is_ua = true; // default bool S3fsCurl::is_use_session_token = false; // default bool S3fsCurl::requester_pays = false; // default //------------------------------------------------------------------- // Class methods for S3fsCurl //------------------------------------------------------------------- bool S3fsCurl::InitS3fsCurl(const char* MimeFile) { pthread_mutexattr_t attr; pthread_mutexattr_init(&attr); #if S3FS_PTHREAD_ERRORCHECK pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK); #endif if(0 != pthread_mutex_init(&S3fsCurl::curl_handles_lock, &attr)){ return false; } if(0 != pthread_mutex_init(&S3fsCurl::curl_share_lock[SHARE_MUTEX_DNS], &attr)){ return false; } if(0 != pthread_mutex_init(&S3fsCurl::curl_share_lock[SHARE_MUTEX_SSL_SESSION], &attr)){ return false; } if(!S3fsCurl::InitMimeType(MimeFile)){ return false; } if(!S3fsCurl::InitGlobalCurl()){ return false; } if(!S3fsCurl::InitShareCurl()){ return false; } if(!S3fsCurl::InitCryptMutex()){ return false; } // [NOTE] // sCurlPoolSize must be over parallel(or multireq) count. // if(sCurlPoolSize < std::max(GetMaxParallelCount(), GetMaxMultiRequest())){ sCurlPoolSize = std::max(GetMaxParallelCount(), GetMaxMultiRequest()); } sCurlPool = new CurlHandlerPool(sCurlPoolSize); if (!sCurlPool->Init()) { return false; } return true; } bool S3fsCurl::DestroyS3fsCurl() { bool result = true; if(!S3fsCurl::DestroyCryptMutex()){ result = false; } if(!sCurlPool->Destroy()){ result = false; } delete sCurlPool; sCurlPool = NULL; if(!S3fsCurl::DestroyShareCurl()){ result = false; } if(!S3fsCurl::DestroyGlobalCurl()){ result = false; } if(0 != pthread_mutex_destroy(&S3fsCurl::curl_share_lock[SHARE_MUTEX_DNS])){ result = false; } if(0 != pthread_mutex_destroy(&S3fsCurl::curl_share_lock[SHARE_MUTEX_SSL_SESSION])){ result = false; } if(0 != pthread_mutex_destroy(&S3fsCurl::curl_handles_lock)){ result = false; } return result; } bool S3fsCurl::InitGlobalCurl() { if(S3fsCurl::is_initglobal_done){ return false; } if(CURLE_OK != curl_global_init(CURL_GLOBAL_ALL)){ S3FS_PRN_ERR("init_curl_global_all returns error."); return false; } S3fsCurl::is_initglobal_done = true; return true; } bool S3fsCurl::DestroyGlobalCurl() { if(!S3fsCurl::is_initglobal_done){ return false; } curl_global_cleanup(); S3fsCurl::is_initglobal_done = false; return true; } bool S3fsCurl::InitShareCurl() { CURLSHcode nSHCode; if(!S3fsCurl::is_dns_cache && !S3fsCurl::is_ssl_session_cache){ S3FS_PRN_INFO("Curl does not share DNS data."); return true; } if(S3fsCurl::hCurlShare){ S3FS_PRN_WARN("already initiated."); return false; } if(NULL == (S3fsCurl::hCurlShare = curl_share_init())){ S3FS_PRN_ERR("curl_share_init failed"); return false; } if(CURLSHE_OK != (nSHCode = curl_share_setopt(S3fsCurl::hCurlShare, CURLSHOPT_LOCKFUNC, S3fsCurl::LockCurlShare))){ S3FS_PRN_ERR("curl_share_setopt(LOCKFUNC) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode)); return false; } if(CURLSHE_OK != (nSHCode = curl_share_setopt(S3fsCurl::hCurlShare, CURLSHOPT_UNLOCKFUNC, S3fsCurl::UnlockCurlShare))){ S3FS_PRN_ERR("curl_share_setopt(UNLOCKFUNC) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode)); return false; } if(S3fsCurl::is_dns_cache){ nSHCode = curl_share_setopt(S3fsCurl::hCurlShare, CURLSHOPT_SHARE, CURL_LOCK_DATA_DNS); if(CURLSHE_OK != nSHCode && CURLSHE_BAD_OPTION != nSHCode && CURLSHE_NOT_BUILT_IN != nSHCode){ S3FS_PRN_ERR("curl_share_setopt(DNS) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode)); return false; }else if(CURLSHE_BAD_OPTION == nSHCode || CURLSHE_NOT_BUILT_IN == nSHCode){ S3FS_PRN_WARN("curl_share_setopt(DNS) returns %d(%s), but continue without shared dns data.", nSHCode, curl_share_strerror(nSHCode)); } } if(S3fsCurl::is_ssl_session_cache){ nSHCode = curl_share_setopt(S3fsCurl::hCurlShare, CURLSHOPT_SHARE, CURL_LOCK_DATA_SSL_SESSION); if(CURLSHE_OK != nSHCode && CURLSHE_BAD_OPTION != nSHCode && CURLSHE_NOT_BUILT_IN != nSHCode){ S3FS_PRN_ERR("curl_share_setopt(SSL SESSION) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode)); return false; }else if(CURLSHE_BAD_OPTION == nSHCode || CURLSHE_NOT_BUILT_IN == nSHCode){ S3FS_PRN_WARN("curl_share_setopt(SSL SESSION) returns %d(%s), but continue without shared ssl session data.", nSHCode, curl_share_strerror(nSHCode)); } } if(CURLSHE_OK != (nSHCode = curl_share_setopt(S3fsCurl::hCurlShare, CURLSHOPT_USERDATA, (void*)&S3fsCurl::curl_share_lock[0]))){ S3FS_PRN_ERR("curl_share_setopt(USERDATA) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode)); return false; } return true; } bool S3fsCurl::DestroyShareCurl() { if(!S3fsCurl::hCurlShare){ if(!S3fsCurl::is_dns_cache && !S3fsCurl::is_ssl_session_cache){ return true; } S3FS_PRN_WARN("already destroy share curl."); return false; } if(CURLSHE_OK != curl_share_cleanup(S3fsCurl::hCurlShare)){ return false; } S3fsCurl::hCurlShare = NULL; return true; } void S3fsCurl::LockCurlShare(CURL* handle, curl_lock_data nLockData, curl_lock_access laccess, void* useptr) { if(!hCurlShare){ return; } pthread_mutex_t* lockmutex = static_cast(useptr); if(CURL_LOCK_DATA_DNS == nLockData){ pthread_mutex_lock(&lockmutex[SHARE_MUTEX_DNS]); }else if(CURL_LOCK_DATA_SSL_SESSION == nLockData){ pthread_mutex_lock(&lockmutex[SHARE_MUTEX_SSL_SESSION]); } } void S3fsCurl::UnlockCurlShare(CURL* handle, curl_lock_data nLockData, void* useptr) { if(!hCurlShare){ return; } pthread_mutex_t* lockmutex = static_cast(useptr); if(CURL_LOCK_DATA_DNS == nLockData){ pthread_mutex_unlock(&lockmutex[SHARE_MUTEX_DNS]); }else if(CURL_LOCK_DATA_SSL_SESSION == nLockData){ pthread_mutex_unlock(&lockmutex[SHARE_MUTEX_SSL_SESSION]); } } bool S3fsCurl::InitCryptMutex() { return s3fs_init_crypt_mutex(); } bool S3fsCurl::DestroyCryptMutex() { return s3fs_destroy_crypt_mutex(); } // homegrown timeout mechanism int S3fsCurl::CurlProgress(void *clientp, double dltotal, double dlnow, double ultotal, double ulnow) { CURL* curl = static_cast(clientp); time_t now = time(0); progress_t p(dlnow, ulnow); AutoLock lock(&S3fsCurl::curl_handles_lock); // any progress? if(p != S3fsCurl::curl_progress[curl]){ // yes! S3fsCurl::curl_times[curl] = now; S3fsCurl::curl_progress[curl] = p; }else{ // timeout? if(now - S3fsCurl::curl_times[curl] > readwrite_timeout){ S3FS_PRN_ERR("timeout now: %lld, curl_times[curl]: %lld, readwrite_timeout: %lld", static_cast(now), static_cast((S3fsCurl::curl_times[curl])), static_cast(readwrite_timeout)); return CURLE_ABORTED_BY_CALLBACK; } } return 0; } bool S3fsCurl::InitMimeType(const char* MimeFile) { if(!MimeFile){ MimeFile = "/etc/mime.types"; // default } string line; ifstream MT(MimeFile); if(MT.good()){ while(getline(MT, line)){ if(line[0]=='#'){ continue; } if(line.empty()){ continue; } istringstream tmp(line); string mimeType; tmp >> mimeType; while(tmp){ string ext; tmp >> ext; if(ext.empty()){ continue; } S3fsCurl::mimeTypes[ext] = mimeType; } } } return true; } void S3fsCurl::InitUserAgent() { if(S3fsCurl::userAgent.empty()){ S3fsCurl::userAgent = "s3fs/"; S3fsCurl::userAgent += VERSION; S3fsCurl::userAgent += " (commit hash "; S3fsCurl::userAgent += COMMIT_HASH_VAL; S3fsCurl::userAgent += "; "; S3fsCurl::userAgent += s3fs_crypt_lib_name(); S3fsCurl::userAgent += ")"; S3fsCurl::userAgent += instance_name; } } // // @param s e.g., "index.html" // @return e.g., "text/html" // string S3fsCurl::LookupMimeType(const string& name) { if(!name.empty() && name[name.size() - 1] == '/'){ return "application/x-directory"; } string result("application/octet-stream"); string::size_type last_pos = name.find_last_of('.'); string::size_type first_pos = name.find_first_of('.'); string prefix, ext, ext2; // No dots in name, just return if(last_pos == string::npos){ return result; } // extract the last extension ext = name.substr(1+last_pos, string::npos); if (last_pos != string::npos) { // one dot was found, now look for another if (first_pos != string::npos && first_pos < last_pos) { prefix = name.substr(0, last_pos); // Now get the second to last file extension string::size_type next_pos = prefix.find_last_of('.'); if (next_pos != string::npos) { ext2 = prefix.substr(1+next_pos, string::npos); } } } // if we get here, then we have an extension (ext) mimes_t::const_iterator iter = S3fsCurl::mimeTypes.find(ext); // if the last extension matches a mimeType, then return // that mime type if (iter != S3fsCurl::mimeTypes.end()) { result = (*iter).second; return result; } // return with the default result if there isn't a second extension if(first_pos == last_pos){ return result; } // Didn't find a mime-type for the first extension // Look for second extension in mimeTypes, return if found iter = S3fsCurl::mimeTypes.find(ext2); if (iter != S3fsCurl::mimeTypes.end()) { result = (*iter).second; return result; } // neither the last extension nor the second-to-last extension // matched a mimeType, return the default mime type return result; } bool S3fsCurl::LocateBundle() { // See if environment variable CURL_CA_BUNDLE is set // if so, check it, if it is a good path, then set the // curl_ca_bundle variable to it if(S3fsCurl::curl_ca_bundle.empty()){ char* CURL_CA_BUNDLE = getenv("CURL_CA_BUNDLE"); if(CURL_CA_BUNDLE != NULL) { // check for existence and readability of the file ifstream BF(CURL_CA_BUNDLE); if(!BF.good()){ S3FS_PRN_ERR("%s: file specified by CURL_CA_BUNDLE environment variable is not readable", program_name.c_str()); return false; } BF.close(); S3fsCurl::curl_ca_bundle.assign(CURL_CA_BUNDLE); return true; } }else{ // Already set ca bundle variable return true; } // not set via environment variable, look in likely locations /////////////////////////////////////////// // following comment from curl's (7.21.2) acinclude.m4 file /////////////////////////////////////////// // dnl CURL_CHECK_CA_BUNDLE // dnl ------------------------------------------------- // dnl Check if a default ca-bundle should be used // dnl // dnl regarding the paths this will scan: // dnl /etc/ssl/certs/ca-certificates.crt Debian systems // dnl /etc/pki/tls/certs/ca-bundle.crt Redhat and Mandriva // dnl /usr/share/ssl/certs/ca-bundle.crt old(er) Redhat // dnl /usr/local/share/certs/ca-root.crt FreeBSD // dnl /etc/ssl/cert.pem OpenBSD // dnl /etc/ssl/certs/ (ca path) SUSE /////////////////////////////////////////// // Within CURL the above path should have been checked // according to the OS. Thus, although we do not need // to check files here, we will only examine some files. // ifstream BF("/etc/pki/tls/certs/ca-bundle.crt"); if(BF.good()){ BF.close(); S3fsCurl::curl_ca_bundle.assign("/etc/pki/tls/certs/ca-bundle.crt"); }else{ BF.open("/etc/ssl/certs/ca-certificates.crt"); if(BF.good()){ BF.close(); S3fsCurl::curl_ca_bundle.assign("/etc/ssl/certs/ca-certificates.crt"); }else{ BF.open("/usr/share/ssl/certs/ca-bundle.crt"); if(BF.good()){ BF.close(); S3fsCurl::curl_ca_bundle.assign("/usr/share/ssl/certs/ca-bundle.crt"); }else{ BF.open("/usr/local/share/certs/ca-root.crt"); if(BF.good()){ BF.close(); S3fsCurl::curl_ca_bundle.assign("/usr/share/ssl/certs/ca-bundle.crt"); }else{ S3FS_PRN_ERR("%s: /.../ca-bundle.crt is not readable", program_name.c_str()); return false; } } } } return true; } size_t S3fsCurl::WriteMemoryCallback(void* ptr, size_t blockSize, size_t numBlocks, void* data) { BodyData* body = static_cast(data); if(!body->Append(ptr, blockSize, numBlocks)){ S3FS_PRN_CRIT("BodyData.Append() returned false."); S3FS_FUSE_EXIT(); return -1; } return (blockSize * numBlocks); } size_t S3fsCurl::ReadCallback(void* ptr, size_t size, size_t nmemb, void* userp) { S3fsCurl* pCurl = reinterpret_cast(userp); if(1 > (size * nmemb)){ return 0; } if(0 >= pCurl->postdata_remaining){ return 0; } int copysize = std::min((int)(size * nmemb), pCurl->postdata_remaining); memcpy(ptr, pCurl->postdata, copysize); pCurl->postdata_remaining = (pCurl->postdata_remaining > copysize ? (pCurl->postdata_remaining - copysize) : 0); pCurl->postdata += static_cast(copysize); return copysize; } size_t S3fsCurl::HeaderCallback(void* data, size_t blockSize, size_t numBlocks, void* userPtr) { headers_t* headers = reinterpret_cast(userPtr); string header(reinterpret_cast(data), blockSize * numBlocks); string key; istringstream ss(header); if(getline(ss, key, ':')){ // Force to lower, only "x-amz" string lkey = key; transform(lkey.begin(), lkey.end(), lkey.begin(), static_cast(std::tolower)); if(lkey.compare(0, 5, "x-amz") == 0){ key = lkey; } string value; getline(ss, value); (*headers)[key] = trim(value); } return blockSize * numBlocks; } size_t S3fsCurl::UploadReadCallback(void* ptr, size_t size, size_t nmemb, void* userp) { S3fsCurl* pCurl = reinterpret_cast(userp); if(1 > (size * nmemb)){ return 0; } if(-1 == pCurl->partdata.fd || 0 >= pCurl->partdata.size){ return 0; } // read size ssize_t copysize = (size * nmemb) < (size_t)pCurl->partdata.size ? (size * nmemb) : (size_t)pCurl->partdata.size; ssize_t readbytes; ssize_t totalread; // read and set for(totalread = 0, readbytes = 0; totalread < copysize; totalread += readbytes){ readbytes = pread(pCurl->partdata.fd, &((char*)ptr)[totalread], (copysize - totalread), pCurl->partdata.startpos + totalread); if(0 == readbytes){ // eof break; }else if(-1 == readbytes){ // error S3FS_PRN_ERR("read file error(%d).", errno); return 0; } } pCurl->partdata.startpos += totalread; pCurl->partdata.size -= totalread; return totalread; } size_t S3fsCurl::DownloadWriteCallback(void* ptr, size_t size, size_t nmemb, void* userp) { S3fsCurl* pCurl = reinterpret_cast(userp); if(1 > (size * nmemb)){ return 0; } if(-1 == pCurl->partdata.fd || 0 >= pCurl->partdata.size){ return 0; } // write size ssize_t copysize = (size * nmemb) < (size_t)pCurl->partdata.size ? (size * nmemb) : (size_t)pCurl->partdata.size; ssize_t writebytes; ssize_t totalwrite; // write for(totalwrite = 0, writebytes = 0; totalwrite < copysize; totalwrite += writebytes){ writebytes = pwrite(pCurl->partdata.fd, &((char*)ptr)[totalwrite], (copysize - totalwrite), pCurl->partdata.startpos + totalwrite); if(0 == writebytes){ // eof? break; }else if(-1 == writebytes){ // error S3FS_PRN_ERR("write file error(%d).", errno); return 0; } } pCurl->partdata.startpos += totalwrite; pCurl->partdata.size -= totalwrite; return totalwrite; } bool S3fsCurl::SetCheckCertificate(bool isCertCheck) { bool old = S3fsCurl::is_cert_check; S3fsCurl::is_cert_check = isCertCheck; return old; } bool S3fsCurl::SetDnsCache(bool isCache) { bool old = S3fsCurl::is_dns_cache; S3fsCurl::is_dns_cache = isCache; return old; } bool S3fsCurl::SetSslSessionCache(bool isCache) { bool old = S3fsCurl::is_ssl_session_cache; S3fsCurl::is_ssl_session_cache = isCache; return old; } long S3fsCurl::SetConnectTimeout(long timeout) { long old = S3fsCurl::connect_timeout; S3fsCurl::connect_timeout = timeout; return old; } time_t S3fsCurl::SetReadwriteTimeout(time_t timeout) { time_t old = S3fsCurl::readwrite_timeout; S3fsCurl::readwrite_timeout = timeout; return old; } int S3fsCurl::SetRetries(int count) { int old = S3fsCurl::retries; S3fsCurl::retries = count; return old; } bool S3fsCurl::SetPublicBucket(bool flag) { bool old = S3fsCurl::is_public_bucket; S3fsCurl::is_public_bucket = flag; return old; } acl_t S3fsCurl::SetDefaultAcl(acl_t acl) { acl_t old = S3fsCurl::default_acl; S3fsCurl::default_acl = acl; return old; } acl_t S3fsCurl::GetDefaultAcl() { return S3fsCurl::default_acl; } storage_class_t S3fsCurl::SetStorageClass(storage_class_t storage_class) { storage_class_t old = S3fsCurl::storage_class; S3fsCurl::storage_class = storage_class; return old; } bool S3fsCurl::PushbackSseKeys(string& onekey) { onekey = trim(onekey); if(onekey.empty()){ return false; } if('#' == onekey[0]){ return false; } // make base64 if the key is short enough, otherwise assume it is already so string base64_key; string raw_key; if(onekey.length() > 256 / 8){ char* p_key; size_t keylength; if(NULL != (p_key = (char *)s3fs_decode64(onekey.c_str(), &keylength))) { raw_key = string(p_key, keylength); base64_key = onekey; delete[] p_key; } else { S3FS_PRN_ERR("Failed to convert base64 to SSE-C key %s", onekey.c_str()); return false; } } else { char* pbase64_key; if(NULL != (pbase64_key = s3fs_base64((unsigned char*)onekey.c_str(), onekey.length()))) { raw_key = onekey; base64_key = pbase64_key; delete[] pbase64_key; } else { S3FS_PRN_ERR("Failed to convert base64 from SSE-C key %s", onekey.c_str()); return false; } } // make MD5 string strMd5; if(!make_md5_from_binary(raw_key.c_str(), raw_key.length(), strMd5)){ S3FS_PRN_ERR("Could not make MD5 from SSE-C keys(%s).", raw_key.c_str()); return false; } // mapped MD5 = SSE Key sseckeymap_t md5map; md5map.clear(); md5map[strMd5] = base64_key; S3fsCurl::sseckeys.push_back(md5map); return true; } sse_type_t S3fsCurl::SetSseType(sse_type_t type) { sse_type_t old = S3fsCurl::ssetype; S3fsCurl::ssetype = type; return old; } bool S3fsCurl::SetSseCKeys(const char* filepath) { if(!filepath){ S3FS_PRN_ERR("SSE-C keys filepath is empty."); return false; } struct stat st; if(0 != stat(filepath, &st)){ S3FS_PRN_ERR("could not open use_sse keys file(%s).", filepath); return false; } if(st.st_mode & (S_IXUSR | S_IRWXG | S_IRWXO)){ S3FS_PRN_ERR("use_sse keys file %s should be 0600 permissions.", filepath); return false; } S3fsCurl::sseckeys.clear(); ifstream ssefs(filepath); if(!ssefs.good()){ S3FS_PRN_ERR("Could not open SSE-C keys file(%s).", filepath); return false; } string line; while(getline(ssefs, line)){ S3fsCurl::PushbackSseKeys(line); } if(S3fsCurl::sseckeys.empty()){ S3FS_PRN_ERR("There is no SSE Key in file(%s).", filepath); return false; } return true; } bool S3fsCurl::SetSseKmsid(const char* kmsid) { if(!kmsid || '\0' == kmsid[0]){ S3FS_PRN_ERR("SSE-KMS kms id is empty."); return false; } S3fsCurl::ssekmsid = kmsid; return true; } // [NOTE] // Because SSE is set by some options and environment, // this function check the integrity of the SSE data finally. bool S3fsCurl::FinalCheckSse() { if(SSE_DISABLE == S3fsCurl::ssetype){ S3fsCurl::ssekmsid.erase(); }else if(SSE_S3 == S3fsCurl::ssetype){ S3fsCurl::ssekmsid.erase(); }else if(SSE_C == S3fsCurl::ssetype){ if(S3fsCurl::sseckeys.empty()){ S3FS_PRN_ERR("sse type is SSE-C, but there is no custom key."); return false; } S3fsCurl::ssekmsid.erase(); }else if(SSE_KMS == S3fsCurl::ssetype){ if(S3fsCurl::ssekmsid.empty()){ S3FS_PRN_ERR("sse type is SSE-KMS, but there is no specified kms id."); return false; } if(!S3fsCurl::IsSignatureV4()){ S3FS_PRN_ERR("sse type is SSE-KMS, but signature type is not v4. SSE-KMS require signature v4."); return false; } }else{ S3FS_PRN_ERR("sse type is unknown(%d).", S3fsCurl::ssetype); return false; } return true; } bool S3fsCurl::LoadEnvSseCKeys() { char* envkeys = getenv("AWSSSECKEYS"); if(NULL == envkeys){ // nothing to do return true; } S3fsCurl::sseckeys.clear(); istringstream fullkeys(envkeys); string onekey; while(getline(fullkeys, onekey, ':')){ S3fsCurl::PushbackSseKeys(onekey); } if(S3fsCurl::sseckeys.empty()){ S3FS_PRN_ERR("There is no SSE Key in environment(AWSSSECKEYS=%s).", envkeys); return false; } return true; } bool S3fsCurl::LoadEnvSseKmsid() { char* envkmsid = getenv("AWSSSEKMSID"); if(NULL == envkmsid){ // nothing to do return true; } return S3fsCurl::SetSseKmsid(envkmsid); } // // If md5 is empty, returns first(current) sse key. // bool S3fsCurl::GetSseKey(string& md5, string& ssekey) { for(sseckeylist_t::const_iterator iter = S3fsCurl::sseckeys.begin(); iter != S3fsCurl::sseckeys.end(); ++iter){ if(0 == md5.length() || md5 == (*iter).begin()->first){ md5 = iter->begin()->first; ssekey = iter->begin()->second; return true; } } return false; } bool S3fsCurl::GetSseKeyMd5(int pos, string& md5) { if(pos < 0){ return false; } if(S3fsCurl::sseckeys.size() <= static_cast(pos)){ return false; } int cnt = 0; for(sseckeylist_t::const_iterator iter = S3fsCurl::sseckeys.begin(); iter != S3fsCurl::sseckeys.end(); ++iter, ++cnt){ if(pos == cnt){ md5 = iter->begin()->first; return true; } } return false; } int S3fsCurl::GetSseKeyCount() { return S3fsCurl::sseckeys.size(); } bool S3fsCurl::SetContentMd5(bool flag) { bool old = S3fsCurl::is_content_md5; S3fsCurl::is_content_md5 = flag; return old; } bool S3fsCurl::SetVerbose(bool flag) { bool old = S3fsCurl::is_verbose; S3fsCurl::is_verbose = flag; return old; } bool S3fsCurl::SetAccessKey(const char* AccessKeyId, const char* SecretAccessKey) { if((!S3fsCurl::is_ibm_iam_auth && (!AccessKeyId || '\0' == AccessKeyId[0])) || !SecretAccessKey || '\0' == SecretAccessKey[0]){ return false; } AWSAccessKeyId = AccessKeyId; AWSSecretAccessKey = SecretAccessKey; return true; } bool S3fsCurl::SetAccessKeyWithSessionToken(const char* AccessKeyId, const char* SecretAccessKey, const char* SessionToken) { bool access_key_is_empty = !AccessKeyId || '\0' == AccessKeyId[0]; bool secret_access_key_is_empty = !SecretAccessKey || '\0' == SecretAccessKey[0]; bool session_token_is_empty = !SessionToken || '\0' == SessionToken[0]; if((!S3fsCurl::is_ibm_iam_auth && access_key_is_empty) || secret_access_key_is_empty || session_token_is_empty){ return false; } AWSAccessKeyId = AccessKeyId; AWSSecretAccessKey = SecretAccessKey; AWSAccessToken = SessionToken; S3fsCurl::is_use_session_token = true; return true; } long S3fsCurl::SetSslVerifyHostname(long value) { if(0 != value && 1 != value){ return -1; } long old = S3fsCurl::ssl_verify_hostname; S3fsCurl::ssl_verify_hostname = value; return old; } bool S3fsCurl::SetIsIBMIAMAuth(bool flag) { bool old = S3fsCurl::is_ibm_iam_auth; S3fsCurl::is_ibm_iam_auth = flag; return old; } bool S3fsCurl::SetIsECS(bool flag) { bool old = S3fsCurl::is_ecs; S3fsCurl::is_ecs = flag; return old; } string S3fsCurl::SetIAMRole(const char* role) { string old = S3fsCurl::IAM_role; S3fsCurl::IAM_role = role ? role : ""; return old; } size_t S3fsCurl::SetIAMFieldCount(size_t field_count) { size_t old = S3fsCurl::IAM_field_count; S3fsCurl::IAM_field_count = field_count; return old; } string S3fsCurl::SetIAMCredentialsURL(const char* url) { string old = S3fsCurl::IAM_cred_url; S3fsCurl::IAM_cred_url = url ? url : ""; return old; } string S3fsCurl::SetIAMTokenField(const char* token_field) { string old = S3fsCurl::IAM_token_field; S3fsCurl::IAM_token_field = token_field ? token_field : ""; return old; } string S3fsCurl::SetIAMExpiryField(const char* expiry_field) { string old = S3fsCurl::IAM_expiry_field; S3fsCurl::IAM_expiry_field = expiry_field ? expiry_field : ""; return old; } bool S3fsCurl::SetMultipartSize(off_t size) { size = size * 1024 * 1024; if(size < MIN_MULTIPART_SIZE){ return false; } S3fsCurl::multipart_size = size; return true; } int S3fsCurl::SetMaxParallelCount(int value) { int old = S3fsCurl::max_parallel_cnt; S3fsCurl::max_parallel_cnt = value; return old; } int S3fsCurl::SetMaxMultiRequest(int max) { int old = S3fsCurl::max_multireq; S3fsCurl::max_multireq = max; return old; } bool S3fsCurl::UploadMultipartPostCallback(S3fsCurl* s3fscurl) { if(!s3fscurl){ return false; } return s3fscurl->UploadMultipartPostComplete(); } bool S3fsCurl::MixMultipartPostCallback(S3fsCurl* s3fscurl) { if(!s3fscurl){ return false; } return s3fscurl->MixMultipartPostComplete(); } S3fsCurl* S3fsCurl::UploadMultipartPostRetryCallback(S3fsCurl* s3fscurl) { if(!s3fscurl){ return NULL; } // parse and get part_num, upload_id. string upload_id; string part_num_str; int part_num; if(!get_keyword_value(s3fscurl->url, "uploadId", upload_id)){ return NULL; } if(!get_keyword_value(s3fscurl->url, "partNumber", part_num_str)){ return NULL; } part_num = s3fs_strtoofft(part_num_str.c_str(), /*base=*/ 10); if(s3fscurl->retry_count >= S3fsCurl::retries){ S3FS_PRN_ERR("Over retry count(%d) limit(%s:%d).", s3fscurl->retry_count, s3fscurl->path.c_str(), part_num); return NULL; } // duplicate request S3fsCurl* newcurl = new S3fsCurl(s3fscurl->IsUseAhbe()); newcurl->partdata.etaglist = s3fscurl->partdata.etaglist; newcurl->partdata.etagpos = s3fscurl->partdata.etagpos; newcurl->partdata.fd = s3fscurl->partdata.fd; newcurl->partdata.startpos = s3fscurl->b_partdata_startpos; newcurl->partdata.size = s3fscurl->b_partdata_size; newcurl->b_partdata_startpos = s3fscurl->b_partdata_startpos; newcurl->b_partdata_size = s3fscurl->b_partdata_size; newcurl->retry_count = s3fscurl->retry_count + 1; newcurl->op = s3fscurl->op; newcurl->type = s3fscurl->type; // setup new curl object if(0 != newcurl->UploadMultipartPostSetup(s3fscurl->path.c_str(), part_num, upload_id)){ S3FS_PRN_ERR("Could not duplicate curl object(%s:%d).", s3fscurl->path.c_str(), part_num); delete newcurl; return NULL; } return newcurl; } S3fsCurl* S3fsCurl::CopyMultipartPostRetryCallback(S3fsCurl* s3fscurl) { if(!s3fscurl){ return NULL; } // parse and get part_num, upload_id. string upload_id; string part_num_str; int part_num; if(!get_keyword_value(s3fscurl->url, "uploadId", upload_id)){ return NULL; } if(!get_keyword_value(s3fscurl->url, "partNumber", part_num_str)){ return NULL; } part_num = s3fs_strtoofft(part_num_str.c_str(), /*base=*/ 10); if(s3fscurl->retry_count >= S3fsCurl::retries){ S3FS_PRN_ERR("Over retry count(%d) limit(%s:%d).", s3fscurl->retry_count, s3fscurl->path.c_str(), part_num); return NULL; } // duplicate request S3fsCurl* newcurl = new S3fsCurl(s3fscurl->IsUseAhbe()); newcurl->partdata.etaglist = s3fscurl->partdata.etaglist; newcurl->partdata.etagpos = s3fscurl->partdata.etagpos; newcurl->b_from = s3fscurl->b_from; newcurl->b_meta = s3fscurl->b_meta; newcurl->retry_count = s3fscurl->retry_count + 1; newcurl->op = s3fscurl->op; newcurl->type = s3fscurl->type; // setup new curl object if(0 != newcurl->CopyMultipartPostSetup(s3fscurl->b_from.c_str(), s3fscurl->path.c_str(), part_num, upload_id, s3fscurl->b_meta)){ S3FS_PRN_ERR("Could not duplicate curl object(%s:%d).", s3fscurl->path.c_str(), part_num); delete newcurl; return NULL; } return newcurl; } S3fsCurl* S3fsCurl::MixMultipartPostRetryCallback(S3fsCurl* s3fscurl) { if(!s3fscurl){ return NULL; } S3fsCurl* pcurl; if(-1 == s3fscurl->partdata.fd){ pcurl = S3fsCurl::CopyMultipartPostRetryCallback(s3fscurl); }else{ pcurl = S3fsCurl::UploadMultipartPostRetryCallback(s3fscurl); } return pcurl; } int S3fsCurl::ParallelMultipartUploadRequest(const char* tpath, headers_t& meta, int fd) { int result; string upload_id; struct stat st; int fd2; etaglist_t list; off_t remaining_bytes; S3fsCurl s3fscurl(true); S3FS_PRN_INFO3("[tpath=%s][fd=%d]", SAFESTRPTR(tpath), fd); // duplicate fd if(-1 == (fd2 = dup(fd)) || 0 != lseek(fd2, 0, SEEK_SET)){ S3FS_PRN_ERR("Could not duplicate file descriptor(errno=%d)", errno); if(-1 != fd2){ close(fd2); } return -errno; } if(-1 == fstat(fd2, &st)){ S3FS_PRN_ERR("Invalid file descriptor(errno=%d)", errno); close(fd2); return -errno; } if(0 != (result = s3fscurl.PreMultipartPostRequest(tpath, meta, upload_id, false))){ close(fd2); return result; } s3fscurl.DestroyCurlHandle(); // Initialize S3fsMultiCurl S3fsMultiCurl curlmulti(GetMaxParallelCount()); curlmulti.SetSuccessCallback(S3fsCurl::UploadMultipartPostCallback); curlmulti.SetRetryCallback(S3fsCurl::UploadMultipartPostRetryCallback); // cycle through open fd, pulling off 10MB chunks at a time for(remaining_bytes = st.st_size; 0 < remaining_bytes; ){ off_t chunk = remaining_bytes > S3fsCurl::multipart_size ? S3fsCurl::multipart_size : remaining_bytes; // s3fscurl sub object S3fsCurl* s3fscurl_para = new S3fsCurl(true); s3fscurl_para->partdata.fd = fd2; s3fscurl_para->partdata.startpos = st.st_size - remaining_bytes; s3fscurl_para->partdata.size = chunk; s3fscurl_para->b_partdata_startpos = s3fscurl_para->partdata.startpos; s3fscurl_para->b_partdata_size = s3fscurl_para->partdata.size; s3fscurl_para->partdata.add_etag_list(&list); // initiate upload part for parallel if(0 != (result = s3fscurl_para->UploadMultipartPostSetup(tpath, list.size(), upload_id))){ S3FS_PRN_ERR("failed uploading part setup(%d)", result); close(fd2); delete s3fscurl_para; return result; } // set into parallel object if(!curlmulti.SetS3fsCurlObject(s3fscurl_para)){ S3FS_PRN_ERR("Could not make curl object into multi curl(%s).", tpath); close(fd2); delete s3fscurl_para; return -1; } remaining_bytes -= chunk; } // Multi request if(0 != (result = curlmulti.Request())){ S3FS_PRN_ERR("error occurred in multi request(errno=%d).", result); S3fsCurl s3fscurl_abort(true); int result2 = s3fscurl_abort.AbortMultipartUpload(tpath, upload_id); s3fscurl_abort.DestroyCurlHandle(); if(result2 != 0){ S3FS_PRN_ERR("error aborting multipart upload(errno=%d).", result2); } return result; } close(fd2); if(0 != (result = s3fscurl.CompleteMultipartPostRequest(tpath, upload_id, list))){ return result; } return 0; } int S3fsCurl::ParallelMixMultipartUploadRequest(const char* tpath, headers_t& meta, int fd, const PageList& pagelist) { int result; string upload_id; struct stat st; int fd2; etaglist_t list; S3fsCurl s3fscurl(true); S3FS_PRN_INFO3("[tpath=%s][fd=%d]", SAFESTRPTR(tpath), fd); // get upload mixed page list fdpage_list_t fdplist; if(!pagelist.GetMultipartSizeList(fdplist, S3fsCurl::multipart_size)){ return -1; } // duplicate fd if(-1 == (fd2 = dup(fd)) || 0 != lseek(fd2, 0, SEEK_SET)){ S3FS_PRN_ERR("Could not duplicate file descriptor(errno=%d)", errno); PageList::FreeList(fdplist); if(-1 != fd2){ close(fd2); } return -errno; } if(-1 == fstat(fd2, &st)){ S3FS_PRN_ERR("Invalid file descriptor(errno=%d)", errno); PageList::FreeList(fdplist); close(fd2); return -errno; } if(0 != (result = s3fscurl.PreMultipartPostRequest(tpath, meta, upload_id, true))){ PageList::FreeList(fdplist); close(fd2); return result; } s3fscurl.DestroyCurlHandle(); // for copy multipart string srcresource; string srcurl; MakeUrlResource(get_realpath(tpath).c_str(), srcresource, srcurl); meta["Content-Type"] = S3fsCurl::LookupMimeType(string(tpath)); meta["x-amz-copy-source"] = srcresource; // Initialize S3fsMultiCurl S3fsMultiCurl curlmulti(GetMaxParallelCount()); curlmulti.SetSuccessCallback(S3fsCurl::MixMultipartPostCallback); curlmulti.SetRetryCallback(S3fsCurl::MixMultipartPostRetryCallback); for(fdpage_list_t::const_iterator iter = fdplist.begin(); iter != fdplist.end(); ++iter){ // s3fscurl sub object S3fsCurl* s3fscurl_para = new S3fsCurl(true); if(iter->modified){ // Multipart upload s3fscurl_para->partdata.fd = fd2; s3fscurl_para->partdata.startpos = iter->offset; s3fscurl_para->partdata.size = iter->bytes; s3fscurl_para->b_partdata_startpos = s3fscurl_para->partdata.startpos; s3fscurl_para->b_partdata_size = s3fscurl_para->partdata.size; s3fscurl_para->partdata.add_etag_list(&list); S3FS_PRN_INFO3("Upload Part [tpath=%s][start=%jd][size=%jd][part=%jd]", SAFESTRPTR(tpath), (intmax_t)(iter->offset), (intmax_t)(iter->bytes), (intmax_t)(list.size())); // initiate upload part for parallel if(0 != (result = s3fscurl_para->UploadMultipartPostSetup(tpath, list.size(), upload_id))){ S3FS_PRN_ERR("failed uploading part setup(%d)", result); PageList::FreeList(fdplist); close(fd2); delete s3fscurl_para; return result; } }else{ // Multipart copy ostringstream strrange; strrange << "bytes=" << iter->offset << "-" << (iter->offset + iter->bytes - 1); meta["x-amz-copy-source-range"] = strrange.str(); strrange.str(""); strrange.clear(stringstream::goodbit); s3fscurl_para->b_from = SAFESTRPTR(tpath); s3fscurl_para->b_meta = meta; s3fscurl_para->partdata.add_etag_list(&list); S3FS_PRN_INFO3("Copy Part [tpath=%s][start=%jd][size=%jd][part=%jd]", SAFESTRPTR(tpath), (intmax_t)(iter->offset), (intmax_t)(iter->bytes), (intmax_t)(list.size())); // initiate upload part for parallel if(0 != (result = s3fscurl_para->CopyMultipartPostSetup(tpath, tpath, list.size(), upload_id, meta))){ S3FS_PRN_ERR("failed uploading part setup(%d)", result); close(fd2); delete s3fscurl_para; return result; } } // set into parallel object if(!curlmulti.SetS3fsCurlObject(s3fscurl_para)){ S3FS_PRN_ERR("Could not make curl object into multi curl(%s).", tpath); PageList::FreeList(fdplist); close(fd2); delete s3fscurl_para; return -1; } } PageList::FreeList(fdplist); // Multi request if(0 != (result = curlmulti.Request())){ S3FS_PRN_ERR("error occurred in multi request(errno=%d).", result); S3fsCurl s3fscurl_abort(true); int result2 = s3fscurl_abort.AbortMultipartUpload(tpath, upload_id); s3fscurl_abort.DestroyCurlHandle(); if(result2 != 0){ S3FS_PRN_ERR("error aborting multipart upload(errno=%d).", result2); } close(fd2); return result; } close(fd2); if(0 != (result = s3fscurl.CompleteMultipartPostRequest(tpath, upload_id, list))){ return result; } return 0; } S3fsCurl* S3fsCurl::ParallelGetObjectRetryCallback(S3fsCurl* s3fscurl) { int result; if(!s3fscurl){ return NULL; } if(s3fscurl->retry_count >= S3fsCurl::retries){ S3FS_PRN_ERR("Over retry count(%d) limit(%s).", s3fscurl->retry_count, s3fscurl->path.c_str()); return NULL; } // duplicate request(setup new curl object) S3fsCurl* newcurl = new S3fsCurl(s3fscurl->IsUseAhbe()); if(0 != (result = newcurl->PreGetObjectRequest(s3fscurl->path.c_str(), s3fscurl->partdata.fd, s3fscurl->partdata.startpos, s3fscurl->partdata.size, s3fscurl->b_ssetype, s3fscurl->b_ssevalue))) { S3FS_PRN_ERR("failed downloading part setup(%d)", result); delete newcurl; return NULL;; } newcurl->retry_count = s3fscurl->retry_count + 1; return newcurl; } int S3fsCurl::ParallelGetObjectRequest(const char* tpath, int fd, off_t start, ssize_t size) { S3FS_PRN_INFO3("[tpath=%s][fd=%d]", SAFESTRPTR(tpath), fd); sse_type_t ssetype; string ssevalue; if(!get_object_sse_type(tpath, ssetype, ssevalue)){ S3FS_PRN_WARN("Failed to get SSE type for file(%s).", SAFESTRPTR(tpath)); } int result = 0; ssize_t remaining_bytes; // cycle through open fd, pulling off 10MB chunks at a time for(remaining_bytes = size; 0 < remaining_bytes; ){ S3fsMultiCurl curlmulti(GetMaxParallelCount()); int para_cnt; off_t chunk; // Initialize S3fsMultiCurl //curlmulti.SetSuccessCallback(NULL); // not need to set success callback curlmulti.SetRetryCallback(S3fsCurl::ParallelGetObjectRetryCallback); // Loop for setup parallel upload(multipart) request. for(para_cnt = 0; para_cnt < S3fsCurl::max_parallel_cnt && 0 < remaining_bytes; para_cnt++, remaining_bytes -= chunk){ // chunk size chunk = remaining_bytes > S3fsCurl::multipart_size ? S3fsCurl::multipart_size : remaining_bytes; // s3fscurl sub object S3fsCurl* s3fscurl_para = new S3fsCurl(); if(0 != (result = s3fscurl_para->PreGetObjectRequest(tpath, fd, (start + size - remaining_bytes), chunk, ssetype, ssevalue))){ S3FS_PRN_ERR("failed downloading part setup(%d)", result); delete s3fscurl_para; return result; } // set into parallel object if(!curlmulti.SetS3fsCurlObject(s3fscurl_para)){ S3FS_PRN_ERR("Could not make curl object into multi curl(%s).", tpath); delete s3fscurl_para; return -1; } } // Multi request if(0 != (result = curlmulti.Request())){ S3FS_PRN_ERR("error occurred in multi request(errno=%d).", result); break; } // reinit for loop. curlmulti.Clear(); } return result; } bool S3fsCurl::UploadMultipartPostSetCurlOpts(S3fsCurl* s3fscurl) { if(!s3fscurl){ return false; } if(!s3fscurl->CreateCurlHandle()){ return false; } curl_easy_setopt(s3fscurl->hCurl, CURLOPT_URL, s3fscurl->url.c_str()); curl_easy_setopt(s3fscurl->hCurl, CURLOPT_UPLOAD, true); // HTTP PUT curl_easy_setopt(s3fscurl->hCurl, CURLOPT_WRITEDATA, (void*)(&s3fscurl->bodydata)); curl_easy_setopt(s3fscurl->hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); curl_easy_setopt(s3fscurl->hCurl, CURLOPT_HEADERDATA, (void*)&(s3fscurl->responseHeaders)); curl_easy_setopt(s3fscurl->hCurl, CURLOPT_HEADERFUNCTION, HeaderCallback); curl_easy_setopt(s3fscurl->hCurl, CURLOPT_INFILESIZE_LARGE, static_cast(s3fscurl->partdata.size)); // Content-Length curl_easy_setopt(s3fscurl->hCurl, CURLOPT_READFUNCTION, UploadReadCallback); curl_easy_setopt(s3fscurl->hCurl, CURLOPT_READDATA, (void*)s3fscurl); S3fsCurl::AddUserAgent(s3fscurl->hCurl); // put User-Agent return true; } bool S3fsCurl::CopyMultipartPostSetCurlOpts(S3fsCurl* s3fscurl) { if(!s3fscurl){ return false; } if(!s3fscurl->CreateCurlHandle()){ return false; } curl_easy_setopt(s3fscurl->hCurl, CURLOPT_URL, s3fscurl->url.c_str()); curl_easy_setopt(s3fscurl->hCurl, CURLOPT_UPLOAD, true); // HTTP PUT curl_easy_setopt(s3fscurl->hCurl, CURLOPT_WRITEDATA, (void*)(&s3fscurl->bodydata)); curl_easy_setopt(s3fscurl->hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); curl_easy_setopt(s3fscurl->hCurl, CURLOPT_HEADERDATA, (void*)(&s3fscurl->headdata)); curl_easy_setopt(s3fscurl->hCurl, CURLOPT_HEADERFUNCTION, WriteMemoryCallback); curl_easy_setopt(s3fscurl->hCurl, CURLOPT_INFILESIZE, 0); // Content-Length S3fsCurl::AddUserAgent(s3fscurl->hCurl); // put User-Agent return true; } bool S3fsCurl::PreGetObjectRequestSetCurlOpts(S3fsCurl* s3fscurl) { if(!s3fscurl){ return false; } if(!s3fscurl->CreateCurlHandle()){ return false; } curl_easy_setopt(s3fscurl->hCurl, CURLOPT_URL, s3fscurl->url.c_str()); curl_easy_setopt(s3fscurl->hCurl, CURLOPT_WRITEFUNCTION, DownloadWriteCallback); curl_easy_setopt(s3fscurl->hCurl, CURLOPT_WRITEDATA, (void*)s3fscurl); S3fsCurl::AddUserAgent(s3fscurl->hCurl); // put User-Agent return true; } bool S3fsCurl::PreHeadRequestSetCurlOpts(S3fsCurl* s3fscurl) { if(!s3fscurl){ return false; } if(!s3fscurl->CreateCurlHandle()){ return false; } curl_easy_setopt(s3fscurl->hCurl, CURLOPT_URL, s3fscurl->url.c_str()); curl_easy_setopt(s3fscurl->hCurl, CURLOPT_NOBODY, true); // HEAD curl_easy_setopt(s3fscurl->hCurl, CURLOPT_FILETIME, true); // Last-Modified // responseHeaders curl_easy_setopt(s3fscurl->hCurl, CURLOPT_HEADERDATA, (void*)&(s3fscurl->responseHeaders)); curl_easy_setopt(s3fscurl->hCurl, CURLOPT_HEADERFUNCTION, HeaderCallback); S3fsCurl::AddUserAgent(s3fscurl->hCurl); // put User-Agent return true; } bool S3fsCurl::ParseIAMCredentialResponse(const char* response, iamcredmap_t& keyval) { if(!response){ return false; } istringstream sscred(response); string oneline; keyval.clear(); while(getline(sscred, oneline, ',')){ string::size_type pos; string key; string val; if(string::npos != (pos = oneline.find(IAMCRED_ACCESSKEYID))){ key = IAMCRED_ACCESSKEYID; }else if(string::npos != (pos = oneline.find(IAMCRED_SECRETACCESSKEY))){ key = IAMCRED_SECRETACCESSKEY; }else if(string::npos != (pos = oneline.find(S3fsCurl::IAM_token_field))){ key = S3fsCurl::IAM_token_field; }else if(string::npos != (pos = oneline.find(S3fsCurl::IAM_expiry_field))){ key = S3fsCurl::IAM_expiry_field; }else if(string::npos != (pos = oneline.find(IAMCRED_ROLEARN))){ key = IAMCRED_ROLEARN; }else{ continue; } if(string::npos == (pos = oneline.find(':', pos + key.length()))){ continue; } if(S3fsCurl::is_ibm_iam_auth && key == S3fsCurl::IAM_expiry_field){ // parse integer value if(string::npos == (pos = oneline.find_first_of("0123456789", pos))){ continue; } oneline = oneline.substr(pos); if(string::npos == (pos = oneline.find_last_of("0123456789"))){ continue; } val = oneline.substr(0, pos+1); }else{ // parse string value (starts and ends with quotes) if(string::npos == (pos = oneline.find('\"', pos))){ continue; } oneline = oneline.substr(pos + sizeof(char)); if(string::npos == (pos = oneline.find('\"'))){ continue; } val = oneline.substr(0, pos); } keyval[key] = val; } return true; } bool S3fsCurl::SetIAMCredentials(const char* response) { S3FS_PRN_INFO3("IAM credential response = \"%s\"", response); iamcredmap_t keyval; if(!ParseIAMCredentialResponse(response, keyval)){ return false; } if(S3fsCurl::IAM_field_count != keyval.size()){ return false; } S3fsCurl::AWSAccessToken = keyval[string(S3fsCurl::IAM_token_field)]; if(S3fsCurl::is_ibm_iam_auth){ S3fsCurl::AWSAccessTokenExpire = s3fs_strtoofft(keyval[string(S3fsCurl::IAM_expiry_field)].c_str(), /*base=*/ 10); }else{ S3fsCurl::AWSAccessKeyId = keyval[string(IAMCRED_ACCESSKEYID)]; S3fsCurl::AWSSecretAccessKey = keyval[string(IAMCRED_SECRETACCESSKEY)]; S3fsCurl::AWSAccessTokenExpire = cvtIAMExpireStringToTime(keyval[S3fsCurl::IAM_expiry_field].c_str()); } return true; } bool S3fsCurl::CheckIAMCredentialUpdate() { if(S3fsCurl::IAM_role.empty() && !S3fsCurl::is_ecs && !S3fsCurl::is_ibm_iam_auth){ return true; } if(time(NULL) + IAM_EXPIRE_MERGIN <= S3fsCurl::AWSAccessTokenExpire){ return true; } // update S3fsCurl s3fscurl; if(0 != s3fscurl.GetIAMCredentials()){ return false; } return true; } bool S3fsCurl::ParseIAMRoleFromMetaDataResponse(const char* response, string& rolename) { if(!response){ return false; } // [NOTE] // expected following strings. // // myrolename // istringstream ssrole(response); string oneline; if (getline(ssrole, oneline, '\n')){ rolename = oneline; return !rolename.empty(); } return false; } bool S3fsCurl::SetIAMRoleFromMetaData(const char* response) { S3FS_PRN_INFO3("IAM role name response = \"%s\"", response); string rolename; if(!S3fsCurl::ParseIAMRoleFromMetaDataResponse(response, rolename)){ return false; } SetIAMRole(rolename.c_str()); return true; } bool S3fsCurl::AddUserAgent(CURL* hCurl) { if(!hCurl){ return false; } if(S3fsCurl::IsUserAgentFlag()){ curl_easy_setopt(hCurl, CURLOPT_USERAGENT, S3fsCurl::userAgent.c_str()); } return true; } int S3fsCurl::CurlDebugFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr) { if(!hcurl){ // something wrong... return 0; } switch(type){ case CURLINFO_TEXT: // Swap tab indentation with spaces so it stays pretty in syslog int indent; indent = 0; while (*data == '\t' && size > 0) { indent += 4; size--; data++; } S3FS_PRN_CURL("* %*s%.*s", indent, "", (int)size, data); break; case CURLINFO_HEADER_IN: case CURLINFO_HEADER_OUT: size_t remaining; char* p; // Print each line individually for tidy output remaining = size; p = data; do { char* eol = (char*)memchr(p, '\n', remaining); int newline = 0; if (eol == NULL) { eol = (char*)memchr(p, '\r', remaining); } else { if (eol > p && *(eol - 1) == '\r') { newline++; } newline++; eol++; } size_t length = eol - p; S3FS_PRN_CURL("%c %.*s", CURLINFO_HEADER_IN == type ? '<' : '>', (int)length - newline, p); remaining -= length; p = eol; } while (p != NULL && remaining > 0); break; case CURLINFO_DATA_IN: case CURLINFO_DATA_OUT: case CURLINFO_SSL_DATA_IN: case CURLINFO_SSL_DATA_OUT: // not put break; default: // why break; } return 0; } //------------------------------------------------------------------- // Methods for S3fsCurl //------------------------------------------------------------------- S3fsCurl::S3fsCurl(bool ahbe) : hCurl(NULL), type(REQTYPE_UNSET), path(""), base_path(""), saved_path(""), url(""), requestHeaders(NULL), LastResponseCode(S3FSCURL_RESPONSECODE_NOTSET), postdata(NULL), postdata_remaining(0), is_use_ahbe(ahbe), retry_count(0), b_infile(NULL), b_postdata(NULL), b_postdata_remaining(0), b_partdata_startpos(0), b_partdata_size(0), b_ssekey_pos(-1), b_ssevalue(""), b_ssetype(SSE_DISABLE), op(""), query_string(""), sem(NULL), completed_tids_lock(NULL), completed_tids(NULL), fpLazySetup(NULL) { } S3fsCurl::~S3fsCurl() { DestroyCurlHandle(); } bool S3fsCurl::ResetHandle() { static volatile bool run_once = false; // emit older curl warnings only once curl_easy_reset(hCurl); curl_easy_setopt(hCurl, CURLOPT_NOSIGNAL, 1); curl_easy_setopt(hCurl, CURLOPT_FOLLOWLOCATION, true); curl_easy_setopt(hCurl, CURLOPT_CONNECTTIMEOUT, S3fsCurl::connect_timeout); curl_easy_setopt(hCurl, CURLOPT_NOPROGRESS, 0); curl_easy_setopt(hCurl, CURLOPT_PROGRESSFUNCTION, S3fsCurl::CurlProgress); curl_easy_setopt(hCurl, CURLOPT_PROGRESSDATA, hCurl); // curl_easy_setopt(hCurl, CURLOPT_FORBID_REUSE, 1); if(CURLE_OK != curl_easy_setopt(hCurl, S3FS_CURLOPT_TCP_KEEPALIVE, 1) && !run_once){ S3FS_PRN_WARN("The CURLOPT_TCP_KEEPALIVE option could not be set. For maximize performance you need to enable this option and you should use libcurl 7.25.0 or later."); } if(CURLE_OK != curl_easy_setopt(hCurl, S3FS_CURLOPT_SSL_ENABLE_ALPN, 0) && !run_once){ S3FS_PRN_WARN("The CURLOPT_SSL_ENABLE_ALPN option could not be unset. S3 server does not support ALPN, then this option should be disabled to maximize performance. you need to use libcurl 7.36.0 or later."); } if(CURLE_OK != curl_easy_setopt(hCurl, S3FS_CURLOPT_KEEP_SENDING_ON_ERROR, 1) && !run_once){ S3FS_PRN_WARN("The S3FS_CURLOPT_KEEP_SENDING_ON_ERROR option could not be set. For maximize performance you need to enable this option and you should use libcurl 7.51.0 or later."); } run_once = true; if(type != REQTYPE_IAMCRED && type != REQTYPE_IAMROLE){ // REQTYPE_IAMCRED and REQTYPE_IAMROLE are always HTTP if(0 == S3fsCurl::ssl_verify_hostname){ curl_easy_setopt(hCurl, CURLOPT_SSL_VERIFYHOST, 0); } if(!S3fsCurl::curl_ca_bundle.empty()){ curl_easy_setopt(hCurl, CURLOPT_CAINFO, S3fsCurl::curl_ca_bundle.c_str()); } } if((S3fsCurl::is_dns_cache || S3fsCurl::is_ssl_session_cache) && S3fsCurl::hCurlShare){ curl_easy_setopt(hCurl, CURLOPT_SHARE, S3fsCurl::hCurlShare); } if(!S3fsCurl::is_cert_check) { S3FS_PRN_DBG("'no_check_certificate' option in effect."); S3FS_PRN_DBG("The server certificate won't be checked against the available certificate authorities."); curl_easy_setopt(hCurl, CURLOPT_SSL_VERIFYPEER, false); } if(S3fsCurl::is_verbose){ curl_easy_setopt(hCurl, CURLOPT_VERBOSE, true); if(!foreground){ curl_easy_setopt(hCurl, CURLOPT_DEBUGFUNCTION, S3fsCurl::CurlDebugFunc); } } if(!cipher_suites.empty()) { curl_easy_setopt(hCurl, CURLOPT_SSL_CIPHER_LIST, cipher_suites.c_str()); } S3fsCurl::curl_times[hCurl] = time(0); S3fsCurl::curl_progress[hCurl] = progress_t(-1, -1); return true; } bool S3fsCurl::CreateCurlHandle(bool only_pool, bool remake) { AutoLock lock(&S3fsCurl::curl_handles_lock); if(hCurl && remake){ if(!DestroyCurlHandle(false)){ S3FS_PRN_ERR("could not destroy handle."); return false; } S3FS_PRN_INFO3("already has handle, so destroyed it or restored it to pool."); } if(!hCurl){ if(NULL == (hCurl = sCurlPool->GetHandler(only_pool))){ if(!only_pool){ S3FS_PRN_ERR("Failed to create handle."); return false; }else{ // [NOTE] // Further initialization processing is left to lazy processing to be executed later. // (Currently we do not use only_pool=true, but this code is remained for the future) return true; } } } ResetHandle(); return true; } bool S3fsCurl::DestroyCurlHandle(bool restore_pool, bool clear_internal_data) { // [NOTE] // If type is REQTYPE_IAMCRED or REQTYPE_IAMROLE, do not clear type. // Because that type only uses HTTP protocol, then the special // logic in ResetHandle function. // if(type != REQTYPE_IAMCRED && type != REQTYPE_IAMROLE){ type = REQTYPE_UNSET; } if(clear_internal_data){ ClearInternalData(); } if(hCurl){ AutoLock lock(&S3fsCurl::curl_handles_lock); S3fsCurl::curl_times.erase(hCurl); S3fsCurl::curl_progress.erase(hCurl); sCurlPool->ReturnHandler(hCurl, restore_pool); hCurl = NULL; }else{ return false; } return true; } bool S3fsCurl::ClearInternalData() { // Always clear internal data // type = REQTYPE_UNSET; path = ""; base_path = ""; saved_path = ""; url = ""; op = ""; query_string= ""; if(requestHeaders){ curl_slist_free_all(requestHeaders); requestHeaders = NULL; } responseHeaders.clear(); bodydata.Clear(); headdata.Clear(); LastResponseCode = S3FSCURL_RESPONSECODE_NOTSET; postdata = NULL; postdata_remaining = 0; retry_count = 0; b_infile = NULL; b_postdata = NULL; b_postdata_remaining = 0; b_partdata_startpos = 0; b_partdata_size = 0; partdata.clear(); fpLazySetup = NULL; S3FS_MALLOCTRIM(0); return true; } bool S3fsCurl::SetUseAhbe(bool ahbe) { bool old = is_use_ahbe; is_use_ahbe = ahbe; return old; } bool S3fsCurl::GetResponseCode(long& responseCode, bool from_curl_handle) { responseCode = -1; if(!from_curl_handle){ responseCode = LastResponseCode; }else{ if(!hCurl){ return false; } if(CURLE_OK != curl_easy_getinfo(hCurl, CURLINFO_RESPONSE_CODE, &LastResponseCode)){ return false; } responseCode = LastResponseCode; } return true; } // // Reset all options for retrying // bool S3fsCurl::RemakeHandle() { S3FS_PRN_INFO3("Retry request. [type=%d][url=%s][path=%s]", type, url.c_str(), path.c_str()); if(REQTYPE_UNSET == type){ return false; } // rewind file struct stat st; if(b_infile){ rewind(b_infile); if(-1 == fstat(fileno(b_infile), &st)){ S3FS_PRN_WARN("Could not get file stat(fd=%d)", fileno(b_infile)); return false; } } // reinitialize internal data responseHeaders.clear(); bodydata.Clear(); headdata.Clear(); LastResponseCode = S3FSCURL_RESPONSECODE_NOTSET; // count up(only use for multipart) retry_count++; // set from backup postdata = b_postdata; postdata_remaining = b_postdata_remaining; partdata.startpos = b_partdata_startpos; partdata.size = b_partdata_size; // reset handle ResetHandle(); // set options switch(type){ case REQTYPE_DELETE: curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); curl_easy_setopt(hCurl, CURLOPT_CUSTOMREQUEST, "DELETE"); break; case REQTYPE_HEAD: curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); curl_easy_setopt(hCurl, CURLOPT_NOBODY, true); curl_easy_setopt(hCurl, CURLOPT_FILETIME, true); // responseHeaders curl_easy_setopt(hCurl, CURLOPT_HEADERDATA, (void*)&responseHeaders); curl_easy_setopt(hCurl, CURLOPT_HEADERFUNCTION, HeaderCallback); break; case REQTYPE_PUTHEAD: curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); curl_easy_setopt(hCurl, CURLOPT_UPLOAD, true); curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); curl_easy_setopt(hCurl, CURLOPT_INFILESIZE, 0); break; case REQTYPE_PUT: curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); curl_easy_setopt(hCurl, CURLOPT_UPLOAD, true); curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); if(b_infile){ curl_easy_setopt(hCurl, CURLOPT_INFILESIZE_LARGE, static_cast(st.st_size)); curl_easy_setopt(hCurl, CURLOPT_INFILE, b_infile); }else{ curl_easy_setopt(hCurl, CURLOPT_INFILESIZE, 0); } break; case REQTYPE_GET: curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, S3fsCurl::DownloadWriteCallback); curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)this); break; case REQTYPE_CHKBUCKET: curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); break; case REQTYPE_LISTBUCKET: curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); break; case REQTYPE_PREMULTIPOST: curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); curl_easy_setopt(hCurl, CURLOPT_POST, true); curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); curl_easy_setopt(hCurl, CURLOPT_POSTFIELDSIZE, 0); break; case REQTYPE_COMPLETEMULTIPOST: curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); curl_easy_setopt(hCurl, CURLOPT_POST, true); curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); curl_easy_setopt(hCurl, CURLOPT_POSTFIELDSIZE, static_cast(postdata_remaining)); curl_easy_setopt(hCurl, CURLOPT_READDATA, (void*)this); curl_easy_setopt(hCurl, CURLOPT_READFUNCTION, S3fsCurl::ReadCallback); break; case REQTYPE_UPLOADMULTIPOST: curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); curl_easy_setopt(hCurl, CURLOPT_UPLOAD, true); curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); curl_easy_setopt(hCurl, CURLOPT_HEADERDATA, (void*)&responseHeaders); curl_easy_setopt(hCurl, CURLOPT_HEADERFUNCTION, HeaderCallback); curl_easy_setopt(hCurl, CURLOPT_INFILESIZE_LARGE, static_cast(partdata.size)); curl_easy_setopt(hCurl, CURLOPT_READFUNCTION, S3fsCurl::UploadReadCallback); curl_easy_setopt(hCurl, CURLOPT_READDATA, (void*)this); break; case REQTYPE_COPYMULTIPOST: curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); curl_easy_setopt(hCurl, CURLOPT_UPLOAD, true); curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); curl_easy_setopt(hCurl, CURLOPT_HEADERDATA, (void*)&headdata); curl_easy_setopt(hCurl, CURLOPT_HEADERFUNCTION, WriteMemoryCallback); curl_easy_setopt(hCurl, CURLOPT_INFILESIZE, 0); break; case REQTYPE_MULTILIST: curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); break; case REQTYPE_IAMCRED: curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); if(S3fsCurl::is_ibm_iam_auth){ curl_easy_setopt(hCurl, CURLOPT_POST, true); curl_easy_setopt(hCurl, CURLOPT_POSTFIELDSIZE, static_cast(postdata_remaining)); curl_easy_setopt(hCurl, CURLOPT_READDATA, (void*)this); curl_easy_setopt(hCurl, CURLOPT_READFUNCTION, S3fsCurl::ReadCallback); } break; case REQTYPE_ABORTMULTIUPLOAD: curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); curl_easy_setopt(hCurl, CURLOPT_CUSTOMREQUEST, "DELETE"); break; case REQTYPE_IAMROLE: curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); break; default: S3FS_PRN_ERR("request type is unknown(%d)", type); return false; } S3fsCurl::AddUserAgent(hCurl); // put User-Agent return true; } // // returns curl return code // int S3fsCurl::RequestPerform(bool dontAddAuthHeaders /*=false*/) { if(IS_S3FS_LOG_DBG()){ char* ptr_url = NULL; curl_easy_getinfo(hCurl, CURLINFO_EFFECTIVE_URL , &ptr_url); S3FS_PRN_DBG("connecting to URL %s", SAFESTRPTR(ptr_url)); } LastResponseCode = S3FSCURL_RESPONSECODE_NOTSET; long responseCode; int result = S3FSCURL_PERFORM_RESULT_NOTSET; if(!dontAddAuthHeaders) { insertAuthHeaders(); } curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders); // 1 attempt + retries... for(int retrycnt = 0; S3FSCURL_PERFORM_RESULT_NOTSET == result && retrycnt < S3fsCurl::retries; ++retrycnt){ // Reset response code responseCode = S3FSCURL_RESPONSECODE_NOTSET; // Requests CURLcode curlCode = curl_easy_perform(hCurl); // Check result switch(curlCode){ case CURLE_OK: // Need to look at the HTTP response code if(0 != curl_easy_getinfo(hCurl, CURLINFO_RESPONSE_CODE, &responseCode)){ S3FS_PRN_ERR("curl_easy_getinfo failed while trying to retrieve HTTP response code"); responseCode = S3FSCURL_RESPONSECODE_FATAL_ERROR; result = -EIO; break; } if(responseCode >= 200 && responseCode < 300){ S3FS_PRN_INFO3("HTTP response code %ld", responseCode); result = 0; break; } // Service response codes which are >= 300 && < 500 switch(responseCode){ case 301: case 307: S3FS_PRN_ERR("HTTP response code 301(Moved Permanently: also happens when bucket's region is incorrect), returning EIO. Body Text: %s", bodydata.str()); S3FS_PRN_ERR("The options of url and endpoint may be useful for solving, please try to use both options."); result = -EIO; break; case 400: S3FS_PRN_ERR("HTTP response code %ld, returning EIO. Body Text: %s", responseCode, bodydata.str()); result = -EIO; break; case 403: S3FS_PRN_ERR("HTTP response code %ld, returning EPERM. Body Text: %s", responseCode, bodydata.str()); result = -EPERM; break; case 404: S3FS_PRN_INFO3("HTTP response code 404 was returned, returning ENOENT"); S3FS_PRN_DBG("Body Text: %s", bodydata.str()); result = -ENOENT; break; case 501: S3FS_PRN_INFO3("HTTP response code 501 was returned, returning ENOTSUP"); S3FS_PRN_DBG("Body Text: %s", bodydata.str()); result = -ENOTSUP; break; case 503: S3FS_PRN_INFO3("HTTP response code 503 was returned, slowing down"); S3FS_PRN_DBG("Body Text: %s", bodydata.str()); sleep(4 << retry_count); break; default: S3FS_PRN_ERR("HTTP response code %ld, returning EIO. Body Text: %s", responseCode, bodydata.str()); result = -EIO; break; } break; case CURLE_WRITE_ERROR: S3FS_PRN_ERR("### CURLE_WRITE_ERROR"); sleep(2); break; case CURLE_OPERATION_TIMEDOUT: S3FS_PRN_ERR("### CURLE_OPERATION_TIMEDOUT"); sleep(2); break; case CURLE_COULDNT_RESOLVE_HOST: S3FS_PRN_ERR("### CURLE_COULDNT_RESOLVE_HOST"); sleep(2); break; case CURLE_COULDNT_CONNECT: S3FS_PRN_ERR("### CURLE_COULDNT_CONNECT"); sleep(4); break; case CURLE_GOT_NOTHING: S3FS_PRN_ERR("### CURLE_GOT_NOTHING"); sleep(4); break; case CURLE_ABORTED_BY_CALLBACK: S3FS_PRN_ERR("### CURLE_ABORTED_BY_CALLBACK"); sleep(4); S3fsCurl::curl_times[hCurl] = time(0); break; case CURLE_PARTIAL_FILE: S3FS_PRN_ERR("### CURLE_PARTIAL_FILE"); sleep(4); break; case CURLE_SEND_ERROR: S3FS_PRN_ERR("### CURLE_SEND_ERROR"); sleep(2); break; case CURLE_RECV_ERROR: S3FS_PRN_ERR("### CURLE_RECV_ERROR"); sleep(2); break; case CURLE_SSL_CONNECT_ERROR: S3FS_PRN_ERR("### CURLE_SSL_CONNECT_ERROR"); sleep(2); break; case CURLE_SSL_CACERT: S3FS_PRN_ERR("### CURLE_SSL_CACERT"); // try to locate cert, if successful, then set the // option and continue if(S3fsCurl::curl_ca_bundle.empty()){ if(!S3fsCurl::LocateBundle()){ S3FS_PRN_ERR("could not get CURL_CA_BUNDLE."); result = -EIO; } // retry with CAINFO }else{ S3FS_PRN_ERR("curlCode: %d msg: %s", curlCode, curl_easy_strerror(curlCode)); result = -EIO; } break; #ifdef CURLE_PEER_FAILED_VERIFICATION case CURLE_PEER_FAILED_VERIFICATION: S3FS_PRN_ERR("### CURLE_PEER_FAILED_VERIFICATION"); first_pos = bucket.find_first_of("."); if(first_pos != string::npos){ S3FS_PRN_INFO("curl returned a CURL_PEER_FAILED_VERIFICATION error"); S3FS_PRN_INFO("security issue found: buckets with periods in their name are incompatible with http"); S3FS_PRN_INFO("This check can be over-ridden by using the -o ssl_verify_hostname=0"); S3FS_PRN_INFO("The certificate will still be checked but the hostname will not be verified."); S3FS_PRN_INFO("A more secure method would be to use a bucket name without periods."); }else{ S3FS_PRN_INFO("my_curl_easy_perform: curlCode: %d -- %s", curlCode, curl_easy_strerror(curlCode)); } result = -EIO; break; #endif // This should be invalid since curl option HTTP FAILONERROR is now off case CURLE_HTTP_RETURNED_ERROR: S3FS_PRN_ERR("### CURLE_HTTP_RETURNED_ERROR"); if(0 != curl_easy_getinfo(hCurl, CURLINFO_RESPONSE_CODE, &responseCode)){ result = -EIO; }else{ S3FS_PRN_INFO3("HTTP response code =%ld", responseCode); // Let's try to retrieve the if(404 == responseCode){ result = -ENOENT; }else if(500 > responseCode){ result = -EIO; } } break; // Unknown CURL return code default: S3FS_PRN_ERR("###curlCode: %d msg: %s", curlCode, curl_easy_strerror(curlCode)); result = -EIO; break; } if(S3FSCURL_PERFORM_RESULT_NOTSET == result){ S3FS_PRN_INFO("### retrying..."); if(!RemakeHandle()){ S3FS_PRN_INFO("Failed to reset handle and internal data for retrying."); result = -EIO; break; } } } // set last response code if(S3FSCURL_RESPONSECODE_NOTSET == responseCode){ LastResponseCode = S3FSCURL_RESPONSECODE_FATAL_ERROR; }else{ LastResponseCode = responseCode; } if(S3FSCURL_PERFORM_RESULT_NOTSET == result){ S3FS_PRN_ERR("### giving up"); result = -EIO; } return result; } // // Returns the Amazon AWS signature for the given parameters. // // @param method e.g., "GET" // @param content_type e.g., "application/x-directory" // @param date e.g., get_date_rfc850() // @param resource e.g., "/pub" // string S3fsCurl::CalcSignatureV2(const string& method, const string& strMD5, const string& content_type, const string& date, const string& resource) { string Signature; string StringToSign; if(!S3fsCurl::IAM_role.empty() || S3fsCurl::is_ecs || S3fsCurl::is_use_session_token){ requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-security-token", S3fsCurl::AWSAccessToken.c_str()); } StringToSign += method + "\n"; StringToSign += strMD5 + "\n"; // md5 StringToSign += content_type + "\n"; StringToSign += date + "\n"; StringToSign += get_canonical_headers(requestHeaders, true); StringToSign += resource; const void* key = S3fsCurl::AWSSecretAccessKey.data(); int key_len = S3fsCurl::AWSSecretAccessKey.size(); const unsigned char* sdata = reinterpret_cast(StringToSign.data()); int sdata_len = StringToSign.size(); unsigned char* md = NULL; unsigned int md_len = 0;; s3fs_HMAC(key, key_len, sdata, sdata_len, &md, &md_len); char* base64; if(NULL == (base64 = s3fs_base64(md, md_len))){ delete[] md; return string(""); // ENOMEM } delete[] md; Signature = base64; delete[] base64; return Signature; } string S3fsCurl::CalcSignature(const string& method, const string& canonical_uri, const string& query_string, const string& strdate, const string& payload_hash, const string& date8601) { string Signature, StringCQ, StringToSign; string uriencode; if(!S3fsCurl::IAM_role.empty() || S3fsCurl::is_ecs || S3fsCurl::is_use_session_token){ requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-security-token", S3fsCurl::AWSAccessToken.c_str()); } uriencode = urlEncode(canonical_uri); StringCQ = method + "\n"; if(0 == strcmp(method.c_str(),"HEAD") || 0 == strcmp(method.c_str(),"PUT") || 0 == strcmp(method.c_str(),"DELETE")){ StringCQ += uriencode + "\n"; }else if (0 == strcmp(method.c_str(), "GET") && 0 == strcmp(uriencode.c_str(), "")) { StringCQ +="/\n"; }else if (0 == strcmp(method.c_str(), "GET") && 0 == strncmp(uriencode.c_str(), "/", 1)) { StringCQ += uriencode +"\n"; }else if (0 == strcmp(method.c_str(), "GET") && 0 != strncmp(uriencode.c_str(), "/", 1)) { StringCQ += "/\n" + urlEncode2(canonical_uri) +"\n"; }else if (0 == strcmp(method.c_str(), "POST")) { StringCQ += uriencode + "\n"; } StringCQ += urlEncode2(query_string) + "\n"; StringCQ += get_canonical_headers(requestHeaders) + "\n"; StringCQ += get_sorted_header_keys(requestHeaders) + "\n"; StringCQ += payload_hash; char kSecret[128]; unsigned char *kDate, *kRegion, *kService, *kSigning, *sRequest = NULL; unsigned int kDate_len,kRegion_len, kService_len, kSigning_len, sRequest_len = 0; char hexsRequest[64 + 1]; int kSecret_len = snprintf(kSecret, sizeof(kSecret), "AWS4%s", S3fsCurl::AWSSecretAccessKey.c_str()); unsigned int cnt; s3fs_HMAC256(kSecret, kSecret_len, reinterpret_cast(strdate.data()), strdate.size(), &kDate, &kDate_len); s3fs_HMAC256(kDate, kDate_len, reinterpret_cast(endpoint.c_str()), endpoint.size(), &kRegion, &kRegion_len); s3fs_HMAC256(kRegion, kRegion_len, reinterpret_cast("s3"), sizeof("s3") - 1, &kService, &kService_len); s3fs_HMAC256(kService, kService_len, reinterpret_cast("aws4_request"), sizeof("aws4_request") - 1, &kSigning, &kSigning_len); delete[] kDate; delete[] kRegion; delete[] kService; const unsigned char* cRequest = reinterpret_cast(StringCQ.c_str()); unsigned int cRequest_len = StringCQ.size(); s3fs_sha256(cRequest, cRequest_len, &sRequest, &sRequest_len); for(cnt = 0; cnt < sRequest_len; cnt++){ sprintf(&hexsRequest[cnt * 2], "%02x", sRequest[cnt]); } delete[] sRequest; StringToSign = "AWS4-HMAC-SHA256\n"; StringToSign += date8601 + "\n"; StringToSign += strdate + "/" + endpoint + "/s3/aws4_request\n"; StringToSign += hexsRequest; const unsigned char* cscope = reinterpret_cast(StringToSign.c_str()); unsigned int cscope_len = StringToSign.size(); unsigned char* md = NULL; unsigned int md_len = 0; s3fs_HMAC256(kSigning, kSigning_len, cscope, cscope_len, &md, &md_len); char *hexSig = new char[2 * md_len + 1]; for(cnt = 0; cnt < md_len; cnt++){ sprintf(&hexSig[cnt * 2], "%02x", md[cnt]); } delete[] kSigning; delete[] md; Signature = hexSig; delete[] hexSig; return Signature; } void S3fsCurl::insertV4Headers() { string server_path = type == REQTYPE_LISTBUCKET ? "/" : path; string payload_hash; switch (type) { case REQTYPE_PUT: payload_hash = s3fs_sha256sum(b_infile == NULL ? -1 : fileno(b_infile), 0, -1); break; case REQTYPE_COMPLETEMULTIPOST: { unsigned int cRequest_len = strlen(reinterpret_cast(b_postdata)); unsigned char* sRequest = NULL; unsigned int sRequest_len = 0; char hexsRequest[64 + 1]; unsigned int cnt; s3fs_sha256(b_postdata, cRequest_len, &sRequest, &sRequest_len); for(cnt = 0; cnt < sRequest_len; cnt++){ sprintf(&hexsRequest[cnt * 2], "%02x", sRequest[cnt]); } delete[] sRequest; payload_hash.assign(hexsRequest, &hexsRequest[sRequest_len * 2]); break; } case REQTYPE_UPLOADMULTIPOST: payload_hash = s3fs_sha256sum(partdata.fd, partdata.startpos, partdata.size); break; default: break; } S3FS_PRN_INFO3("computing signature [%s] [%s] [%s] [%s]", op.c_str(), server_path.c_str(), query_string.c_str(), payload_hash.c_str()); string strdate; string date8601; get_date_sigv3(strdate, date8601); string contentSHA256 = payload_hash.empty() ? empty_payload_hash : payload_hash; const std::string realpath = pathrequeststyle ? "/" + bucket + server_path : server_path; //string canonical_headers, signed_headers; requestHeaders = curl_slist_sort_insert(requestHeaders, "host", get_bucket_host().c_str()); requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-content-sha256", contentSHA256.c_str()); requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-date", date8601.c_str()); if (S3fsCurl::IsRequesterPays()) { requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-request-payer", "requester"); } if(!S3fsCurl::IsPublicBucket()){ string Signature = CalcSignature(op, realpath, query_string + (type == REQTYPE_PREMULTIPOST || type == REQTYPE_MULTILIST ? "=" : ""), strdate, contentSHA256, date8601); string auth = "AWS4-HMAC-SHA256 Credential=" + AWSAccessKeyId + "/" + strdate + "/" + endpoint + "/s3/aws4_request, SignedHeaders=" + get_sorted_header_keys(requestHeaders) + ", Signature=" + Signature; requestHeaders = curl_slist_sort_insert(requestHeaders, "Authorization", auth.c_str()); } } void S3fsCurl::insertV2Headers() { string resource; string turl; string server_path = type == REQTYPE_LISTBUCKET ? "/" : path; MakeUrlResource(server_path.c_str(), resource, turl); if(!query_string.empty() && type != REQTYPE_LISTBUCKET){ resource += "?" + query_string; } string date = get_date_rfc850(); requestHeaders = curl_slist_sort_insert(requestHeaders, "Date", date.c_str()); if(op != "PUT" && op != "POST"){ requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-Type", NULL); } if(!S3fsCurl::IsPublicBucket()){ string Signature = CalcSignatureV2(op, get_header_value(requestHeaders, "Content-MD5"), get_header_value(requestHeaders, "Content-Type"), date, resource); requestHeaders = curl_slist_sort_insert(requestHeaders, "Authorization", string("AWS " + AWSAccessKeyId + ":" + Signature).c_str()); } } void S3fsCurl::insertIBMIAMHeaders() { requestHeaders = curl_slist_sort_insert(requestHeaders, "Authorization", ("Bearer " + S3fsCurl::AWSAccessToken).c_str()); if(op == "PUT" && path == mount_prefix + "/"){ // ibm-service-instance-id header is required for bucket creation requests requestHeaders = curl_slist_sort_insert(requestHeaders, "ibm-service-instance-id", S3fsCurl::AWSAccessKeyId.c_str()); } } void S3fsCurl::insertAuthHeaders() { if(!S3fsCurl::CheckIAMCredentialUpdate()){ S3FS_PRN_ERR("An error occurred in checking IAM credential."); return; // do not insert auth headers on error } if(S3fsCurl::is_ibm_iam_auth){ insertIBMIAMHeaders(); }else if(!S3fsCurl::is_sigv4){ insertV2Headers(); }else{ insertV4Headers(); } } int S3fsCurl::DeleteRequest(const char* tpath) { S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath)); if(!tpath){ return -1; } if(!CreateCurlHandle()){ return -1; } string resource; string turl; MakeUrlResource(get_realpath(tpath).c_str(), resource, turl); url = prepare_url(turl.c_str()); path = get_realpath(tpath); requestHeaders = NULL; responseHeaders.clear(); op = "DELETE"; type = REQTYPE_DELETE; curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); curl_easy_setopt(hCurl, CURLOPT_CUSTOMREQUEST, "DELETE"); S3fsCurl::AddUserAgent(hCurl); // put User-Agent return RequestPerform(); } // // Get AccessKeyId/SecretAccessKey/AccessToken/Expiration by IAM role, // and Set these value to class variable. // int S3fsCurl::GetIAMCredentials() { if (!S3fsCurl::is_ecs && !S3fsCurl::is_ibm_iam_auth) { S3FS_PRN_INFO3("[IAM role=%s]", S3fsCurl::IAM_role.c_str()); if(S3fsCurl::IAM_role.empty()) { S3FS_PRN_ERR("IAM role name is empty."); return -EIO; } } // at first set type for handle type = REQTYPE_IAMCRED; if(!CreateCurlHandle()){ return -EIO; } // url if (is_ecs) { url = string(S3fsCurl::IAM_cred_url) + std::getenv(ECS_IAM_ENV_VAR.c_str()); } else { url = string(S3fsCurl::IAM_cred_url) + S3fsCurl::IAM_role; } requestHeaders = NULL; responseHeaders.clear(); bodydata.Clear(); string postContent; if(S3fsCurl::is_ibm_iam_auth){ url = string(S3fsCurl::IAM_cred_url); // make contents postContent += "grant_type=urn:ibm:params:oauth:grant-type:apikey"; postContent += "&response_type=cloud_iam"; postContent += "&apikey=" + S3fsCurl::AWSSecretAccessKey; // set postdata postdata = reinterpret_cast(postContent.c_str()); b_postdata = postdata; postdata_remaining = postContent.size(); // without null b_postdata_remaining = postdata_remaining; requestHeaders = curl_slist_sort_insert(requestHeaders, "Authorization", "Basic Yng6Yng="); curl_easy_setopt(hCurl, CURLOPT_POST, true); // POST curl_easy_setopt(hCurl, CURLOPT_POSTFIELDSIZE, static_cast(postdata_remaining)); curl_easy_setopt(hCurl, CURLOPT_READDATA, (void*)this); curl_easy_setopt(hCurl, CURLOPT_READFUNCTION, S3fsCurl::ReadCallback); } curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); S3fsCurl::AddUserAgent(hCurl); // put User-Agent int result = RequestPerform(true); // analyzing response if(0 == result && !S3fsCurl::SetIAMCredentials(bodydata.str())){ S3FS_PRN_ERR("Something error occurred, could not get IAM credential."); result = -EIO; } bodydata.Clear(); return result; } // // Get IAM role name automatically. // bool S3fsCurl::LoadIAMRoleFromMetaData() { S3FS_PRN_INFO3("Get IAM Role name"); // at first set type for handle type = REQTYPE_IAMROLE; if(!CreateCurlHandle()){ return false; } // url url = string(S3fsCurl::IAM_cred_url); requestHeaders = NULL; responseHeaders.clear(); bodydata.Clear(); curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); S3fsCurl::AddUserAgent(hCurl); // put User-Agent int result = RequestPerform(true); // analyzing response if(0 == result && !S3fsCurl::SetIAMRoleFromMetaData(bodydata.str())){ S3FS_PRN_ERR("Something error occurred, could not get IAM role name."); result = -EIO; } bodydata.Clear(); return (0 == result); } bool S3fsCurl::AddSseRequestHead(sse_type_t ssetype, string& ssevalue, bool is_only_c, bool is_copy) { if(SSE_S3 == ssetype){ if(!is_only_c){ requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-server-side-encryption", "AES256"); } }else if(SSE_C == ssetype){ string sseckey; if(S3fsCurl::GetSseKey(ssevalue, sseckey)){ if(is_copy){ requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-copy-source-server-side-encryption-customer-algorithm", "AES256"); requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-copy-source-server-side-encryption-customer-key", sseckey.c_str()); requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-copy-source-server-side-encryption-customer-key-md5", ssevalue.c_str()); }else{ requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-server-side-encryption-customer-algorithm", "AES256"); requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-server-side-encryption-customer-key", sseckey.c_str()); requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-server-side-encryption-customer-key-md5", ssevalue.c_str()); } }else{ S3FS_PRN_WARN("Failed to insert SSE-C header."); } }else if(SSE_KMS == ssetype){ if(!is_only_c){ if(ssevalue.empty()){ ssevalue = S3fsCurl::GetSseKmsId(); } requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-server-side-encryption", "aws:kms"); requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-server-side-encryption-aws-kms-key-id", ssevalue.c_str()); } } return true; } // // tpath : target path for head request // bpath : saved into base_path // savedpath : saved into saved_path // ssekey_pos : -1 means "not" SSE-C type // 0 - X means SSE-C type and position for SSE-C key(0 is latest key) // bool S3fsCurl::PreHeadRequest(const char* tpath, const char* bpath, const char* savedpath, int ssekey_pos) { S3FS_PRN_INFO3("[tpath=%s][bpath=%s][save=%s][sseckeypos=%d]", SAFESTRPTR(tpath), SAFESTRPTR(bpath), SAFESTRPTR(savedpath), ssekey_pos); if(!tpath){ return false; } string resource; string turl; MakeUrlResource(get_realpath(tpath).c_str(), resource, turl); // libcurl 7.17 does deep copy of url, deep copy "stable" url url = prepare_url(turl.c_str()); path = get_realpath(tpath); base_path = SAFESTRPTR(bpath); saved_path = SAFESTRPTR(savedpath); requestHeaders = NULL; responseHeaders.clear(); // requestHeaders if(0 <= ssekey_pos){ string md5; if(!S3fsCurl::GetSseKeyMd5(ssekey_pos, md5) || !AddSseRequestHead(SSE_C, md5, true, false)){ S3FS_PRN_ERR("Failed to set SSE-C headers for sse-c key pos(%d)(=md5(%s)).", ssekey_pos, md5.c_str()); return false; } } b_ssekey_pos = ssekey_pos; op = "HEAD"; type = REQTYPE_HEAD; // set lazy function fpLazySetup = PreHeadRequestSetCurlOpts; return true; } int S3fsCurl::HeadRequest(const char* tpath, headers_t& meta) { int result = -1; S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath)); // At first, try to get without SSE-C headers if(!PreHeadRequest(tpath) || !fpLazySetup || !fpLazySetup(this) || 0 != (result = RequestPerform())){ // If has SSE-C keys, try to get with all SSE-C keys. for(int pos = 0; static_cast(pos) < S3fsCurl::sseckeys.size(); pos++){ if(!DestroyCurlHandle()){ break; } if(!PreHeadRequest(tpath, NULL, NULL, pos)){ break; } if(!fpLazySetup || !fpLazySetup(this)){ S3FS_PRN_ERR("Failed to lazy setup in single head request."); break; } if(0 == (result = RequestPerform())){ break; } } if(0 != result){ DestroyCurlHandle(); // not check result. return result; } } // file exists in s3 // fixme: clean this up. meta.clear(); for(headers_t::iterator iter = responseHeaders.begin(); iter != responseHeaders.end(); ++iter){ string key = lower(iter->first); string value = iter->second; if(key == "content-type"){ meta[iter->first] = value; }else if(key == "content-length"){ meta[iter->first] = value; }else if(key == "etag"){ meta[iter->first] = value; }else if(key == "last-modified"){ meta[iter->first] = value; }else if(key.substr(0, 5) == "x-amz"){ meta[key] = value; // key is lower case for "x-amz" } } return 0; } int S3fsCurl::PutHeadRequest(const char* tpath, headers_t& meta, bool is_copy) { S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath)); if(!tpath){ return -1; } if(!CreateCurlHandle()){ return -1; } string resource; string turl; MakeUrlResource(get_realpath(tpath).c_str(), resource, turl); url = prepare_url(turl.c_str()); path = get_realpath(tpath); requestHeaders = NULL; responseHeaders.clear(); bodydata.Clear(); string contype = S3fsCurl::LookupMimeType(string(tpath)); requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-Type", contype.c_str()); // Make request headers for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){ string key = lower(iter->first); string value = iter->second; if(key.substr(0, 9) == "x-amz-acl"){ // not set value, but after set it. }else if(key.substr(0, 10) == "x-amz-meta"){ requestHeaders = curl_slist_sort_insert(requestHeaders, iter->first.c_str(), value.c_str()); }else if(key == "x-amz-copy-source"){ requestHeaders = curl_slist_sort_insert(requestHeaders, iter->first.c_str(), value.c_str()); }else if(key == "x-amz-server-side-encryption" && value != "aws:kms"){ // Only copy mode. if(is_copy && !AddSseRequestHead(SSE_S3, value, false, true)){ S3FS_PRN_WARN("Failed to insert SSE-S3 header."); } }else if(key == "x-amz-server-side-encryption-aws-kms-key-id"){ // Only copy mode. if(is_copy && !value.empty() && !AddSseRequestHead(SSE_KMS, value, false, true)){ S3FS_PRN_WARN("Failed to insert SSE-KMS header."); } }else if(key == "x-amz-server-side-encryption-customer-key-md5"){ // Only copy mode. if(is_copy){ if(!AddSseRequestHead(SSE_C, value, true, true) || !AddSseRequestHead(SSE_C, value, true, false)){ S3FS_PRN_WARN("Failed to insert SSE-C header."); } } } } // "x-amz-acl", storage class, sse if(S3fsCurl::default_acl != PRIVATE){ requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-acl", acl_to_string(S3fsCurl::default_acl)); } if(REDUCED_REDUNDANCY == GetStorageClass()){ requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class", "REDUCED_REDUNDANCY"); } else if(STANDARD_IA == GetStorageClass()){ requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class", "STANDARD_IA"); } else if(ONEZONE_IA == GetStorageClass()){ requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class", "ONEZONE_IA"); } else if(INTELLIGENT_TIERING == GetStorageClass()) { requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class", "INTELLIGENT_TIERING"); } // SSE if(!is_copy){ string ssevalue; if(!AddSseRequestHead(S3fsCurl::GetSseType(), ssevalue, false, false)){ S3FS_PRN_WARN("Failed to set SSE header, but continue..."); } } if(is_use_ahbe){ // set additional header by ahbe conf requestHeaders = AdditionalHeader::get()->AddHeader(requestHeaders, tpath); } op = "PUT"; type = REQTYPE_PUTHEAD; // setopt curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); curl_easy_setopt(hCurl, CURLOPT_UPLOAD, true); // HTTP PUT curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); curl_easy_setopt(hCurl, CURLOPT_INFILESIZE, 0); // Content-Length S3fsCurl::AddUserAgent(hCurl); // put User-Agent S3FS_PRN_INFO3("copying... [path=%s]", tpath); int result = RequestPerform(); if(0 == result){ // PUT returns 200 status code with something error, thus // we need to check body. // // example error body: // // // AccessDenied // Access Denied // E4CA6F6767D6685C // BHzLOATeDuvN8Es1wI8IcERq4kl4dc2A9tOB8Yqr39Ys6fl7N4EJ8sjGiVvu6wLP // // const char* pstrbody = bodydata.str(); if(!pstrbody || NULL != strcasestr(pstrbody, "")){ S3FS_PRN_ERR("PutHeadRequest get 200 status response, but it included error body(or NULL). The request failed during copying the object in S3."); S3FS_PRN_DBG("PutHeadRequest Response Body : %s", (pstrbody ? pstrbody : "(null)")); result = -EIO; } } bodydata.Clear(); return result; } int S3fsCurl::PutRequest(const char* tpath, headers_t& meta, int fd) { struct stat st; FILE* file = NULL; S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath)); if(!tpath){ return -1; } if(-1 != fd){ // duplicate fd int fd2; if(-1 == (fd2 = dup(fd)) || -1 == fstat(fd2, &st) || 0 != lseek(fd2, 0, SEEK_SET) || NULL == (file = fdopen(fd2, "rb"))){ S3FS_PRN_ERR("Could not duplicate file descriptor(errno=%d)", errno); if(-1 != fd2){ close(fd2); } return -errno; } b_infile = file; }else{ // This case is creating zero byte object.(calling by create_file_object()) S3FS_PRN_INFO3("create zero byte file object."); } if(!CreateCurlHandle()){ if(file){ fclose(file); } return -1; } string resource; string turl; MakeUrlResource(get_realpath(tpath).c_str(), resource, turl); url = prepare_url(turl.c_str()); path = get_realpath(tpath); requestHeaders = NULL; responseHeaders.clear(); bodydata.Clear(); // Make request headers string strMD5; if(-1 != fd && S3fsCurl::is_content_md5){ strMD5 = s3fs_get_content_md5(fd); requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-MD5", strMD5.c_str()); } string contype = S3fsCurl::LookupMimeType(string(tpath)); requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-Type", contype.c_str()); for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){ string key = lower(iter->first); string value = iter->second; if(key.substr(0, 9) == "x-amz-acl"){ // not set value, but after set it. }else if(key.substr(0, 10) == "x-amz-meta"){ requestHeaders = curl_slist_sort_insert(requestHeaders, iter->first.c_str(), value.c_str()); }else if(key == "x-amz-server-side-encryption" && value != "aws:kms"){ // skip this header, because this header is specified after logic. }else if(key == "x-amz-server-side-encryption-aws-kms-key-id"){ // skip this header, because this header is specified after logic. }else if(key == "x-amz-server-side-encryption-customer-key-md5"){ // skip this header, because this header is specified after logic. } } // "x-amz-acl", storage class, sse if(S3fsCurl::default_acl != PRIVATE){ requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-acl", acl_to_string(S3fsCurl::default_acl)); } if(REDUCED_REDUNDANCY == GetStorageClass()){ requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class", "REDUCED_REDUNDANCY"); } else if(STANDARD_IA == GetStorageClass()){ requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class", "STANDARD_IA"); } else if(ONEZONE_IA == GetStorageClass()){ requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class", "ONEZONE_IA"); } else if(INTELLIGENT_TIERING == GetStorageClass()) { requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class", "INTELLIGENT_TIERING"); } // SSE string ssevalue; if(!AddSseRequestHead(S3fsCurl::GetSseType(), ssevalue, false, false)){ S3FS_PRN_WARN("Failed to set SSE header, but continue..."); } if(is_use_ahbe){ // set additional header by ahbe conf requestHeaders = AdditionalHeader::get()->AddHeader(requestHeaders, tpath); } op = "PUT"; type = REQTYPE_PUT; // setopt curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); curl_easy_setopt(hCurl, CURLOPT_UPLOAD, true); // HTTP PUT curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); if(file){ curl_easy_setopt(hCurl, CURLOPT_INFILESIZE_LARGE, static_cast(st.st_size)); // Content-Length curl_easy_setopt(hCurl, CURLOPT_INFILE, file); }else{ curl_easy_setopt(hCurl, CURLOPT_INFILESIZE, 0); // Content-Length: 0 } S3fsCurl::AddUserAgent(hCurl); // put User-Agent S3FS_PRN_INFO3("uploading... [path=%s][fd=%d][size=%lld]", tpath, fd, static_cast(-1 != fd ? st.st_size : 0)); int result = RequestPerform(); bodydata.Clear(); if(file){ fclose(file); } return result; } int S3fsCurl::PreGetObjectRequest(const char* tpath, int fd, off_t start, ssize_t size, sse_type_t ssetype, string& ssevalue) { S3FS_PRN_INFO3("[tpath=%s][start=%lld][size=%lld]", SAFESTRPTR(tpath), static_cast(start), static_cast(size)); if(!tpath || -1 == fd || 0 > start || 0 > size){ return -1; } string resource; string turl; MakeUrlResource(get_realpath(tpath).c_str(), resource, turl); url = prepare_url(turl.c_str()); path = get_realpath(tpath); requestHeaders = NULL; responseHeaders.clear(); if(-1 != start && 0 < size){ string range = "bytes="; range += str(start); range += "-"; range += str(start + size - 1); requestHeaders = curl_slist_sort_insert(requestHeaders, "Range", range.c_str()); } // SSE if(!AddSseRequestHead(ssetype, ssevalue, true, false)){ S3FS_PRN_WARN("Failed to set SSE header, but continue..."); } op = "GET"; type = REQTYPE_GET; // set lazy function fpLazySetup = PreGetObjectRequestSetCurlOpts; // set info for callback func. // (use only fd, startpos and size, other member is not used.) partdata.clear(); partdata.fd = fd; partdata.startpos = start; partdata.size = size; b_partdata_startpos = start; b_partdata_size = size; b_ssetype = ssetype; b_ssevalue = ssevalue; b_ssekey_pos = -1; // not use this value for get object. return 0; } int S3fsCurl::GetObjectRequest(const char* tpath, int fd, off_t start, ssize_t size) { int result; S3FS_PRN_INFO3("[tpath=%s][start=%lld][size=%lld]", SAFESTRPTR(tpath), static_cast(start), static_cast(size)); if(!tpath){ return -1; } sse_type_t ssetype; string ssevalue; if(!get_object_sse_type(tpath, ssetype, ssevalue)){ S3FS_PRN_WARN("Failed to get SSE type for file(%s).", SAFESTRPTR(tpath)); } if(0 != (result = PreGetObjectRequest(tpath, fd, start, size, ssetype, ssevalue))){ return result; } if(!fpLazySetup || !fpLazySetup(this)){ S3FS_PRN_ERR("Failed to lazy setup in single get object request."); return -1; } S3FS_PRN_INFO3("downloading... [path=%s][fd=%d]", tpath, fd); result = RequestPerform(); partdata.clear(); return result; } int S3fsCurl::CheckBucket() { S3FS_PRN_INFO3("check a bucket."); if(!CreateCurlHandle()){ return -1; } string resource; string turl; MakeUrlResource(get_realpath("/").c_str(), resource, turl); url = prepare_url(turl.c_str()); path = get_realpath("/"); requestHeaders = NULL; responseHeaders.clear(); bodydata.Clear(); op = "GET"; type = REQTYPE_CHKBUCKET; // setopt curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); S3fsCurl::AddUserAgent(hCurl); // put User-Agent int result = RequestPerform(); if (result != 0) { S3FS_PRN_ERR("Check bucket failed, S3 response: %s", bodydata.str()); } return result; } int S3fsCurl::ListBucketRequest(const char* tpath, const char* query) { S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath)); if(!tpath){ return -1; } if(!CreateCurlHandle()){ return -1; } string resource; string turl; MakeUrlResource("", resource, turl); // NOTICE: path is "". if(query){ turl += "?"; turl += query; query_string = query; } url = prepare_url(turl.c_str()); path = get_realpath(tpath); requestHeaders = NULL; responseHeaders.clear(); bodydata.Clear(); op = "GET"; type = REQTYPE_LISTBUCKET; // setopt curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); S3fsCurl::AddUserAgent(hCurl); // put User-Agent return RequestPerform(); } // // Initialize multipart upload // // Example : // POST /example-object?uploads HTTP/1.1 // Host: example-bucket.s3.amazonaws.com // Date: Mon, 1 Nov 2010 20:34:56 GMT // Authorization: AWS VGhpcyBtZXNzYWdlIHNpZ25lZCBieSBlbHZpbmc= // int S3fsCurl::PreMultipartPostRequest(const char* tpath, headers_t& meta, string& upload_id, bool is_copy) { S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath)); if(!tpath){ return -1; } if(!CreateCurlHandle()){ return -1; } string resource; string turl; MakeUrlResource(get_realpath(tpath).c_str(), resource, turl); query_string = "uploads"; turl += "?" + query_string; url = prepare_url(turl.c_str()); path = get_realpath(tpath); requestHeaders = NULL; bodydata.Clear(); responseHeaders.clear(); string contype = S3fsCurl::LookupMimeType(string(tpath)); for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){ string key = lower(iter->first); string value = iter->second; if(key.substr(0, 9) == "x-amz-acl"){ // not set value, but after set it. }else if(key.substr(0, 10) == "x-amz-meta"){ requestHeaders = curl_slist_sort_insert(requestHeaders, iter->first.c_str(), value.c_str()); }else if(key == "x-amz-server-side-encryption" && value != "aws:kms"){ // Only copy mode. if(is_copy && !AddSseRequestHead(SSE_S3, value, false, true)){ S3FS_PRN_WARN("Failed to insert SSE-S3 header."); } }else if(key == "x-amz-server-side-encryption-aws-kms-key-id"){ // Only copy mode. if(is_copy && !value.empty() && !AddSseRequestHead(SSE_KMS, value, false, true)){ S3FS_PRN_WARN("Failed to insert SSE-KMS header."); } }else if(key == "x-amz-server-side-encryption-customer-key-md5"){ // Only copy mode. if(is_copy){ if(!AddSseRequestHead(SSE_C, value, true, true) || !AddSseRequestHead(SSE_C, value, true, false)){ S3FS_PRN_WARN("Failed to insert SSE-C header."); } } } } // "x-amz-acl", storage class, sse if(S3fsCurl::default_acl != PRIVATE){ requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-acl", acl_to_string(S3fsCurl::default_acl)); } if(REDUCED_REDUNDANCY == GetStorageClass()){ requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class", "REDUCED_REDUNDANCY"); } else if(STANDARD_IA == GetStorageClass()){ requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class", "STANDARD_IA"); } else if(ONEZONE_IA == GetStorageClass()){ requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class", "ONEZONE_IA"); } else if(INTELLIGENT_TIERING == GetStorageClass()) { requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class", "INTELLIGENT_TIERING"); } // SSE if(!is_copy){ string ssevalue; if(!AddSseRequestHead(S3fsCurl::GetSseType(), ssevalue, false, false)){ S3FS_PRN_WARN("Failed to set SSE header, but continue..."); } } if(is_use_ahbe){ // set additional header by ahbe conf requestHeaders = AdditionalHeader::get()->AddHeader(requestHeaders, tpath); } requestHeaders = curl_slist_sort_insert(requestHeaders, "Accept", NULL); requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-Length", NULL); requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-Type", contype.c_str()); op = "POST"; type = REQTYPE_PREMULTIPOST; // setopt curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); curl_easy_setopt(hCurl, CURLOPT_POST, true); // POST curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); curl_easy_setopt(hCurl, CURLOPT_POSTFIELDSIZE, 0); S3fsCurl::AddUserAgent(hCurl); // put User-Agent // request int result; if(0 != (result = RequestPerform())){ bodydata.Clear(); return result; } if(!simple_parse_xml(bodydata.str(), bodydata.size(), "UploadId", upload_id)){ bodydata.Clear(); return -1; } bodydata.Clear(); return 0; } int S3fsCurl::CompleteMultipartPostRequest(const char* tpath, const string& upload_id, etaglist_t& parts) { S3FS_PRN_INFO3("[tpath=%s][parts=%zu]", SAFESTRPTR(tpath), parts.size()); if(!tpath){ return -1; } // make contents string postContent; postContent += "\n"; for(int cnt = 0; cnt < (int)parts.size(); cnt++){ if(0 == parts[cnt].length()){ S3FS_PRN_ERR("%d file part is not finished uploading.", cnt + 1); return -1; } postContent += "\n"; postContent += " " + str(cnt + 1) + "\n"; postContent += " " + parts[cnt] + "\n"; postContent += "\n"; } postContent += "\n"; // set postdata postdata = reinterpret_cast(postContent.c_str()); b_postdata = postdata; postdata_remaining = postContent.size(); // without null b_postdata_remaining = postdata_remaining; if(!CreateCurlHandle()){ return -1; } string resource; string turl; MakeUrlResource(get_realpath(tpath).c_str(), resource, turl); query_string = "uploadId=" + upload_id; turl += "?" + query_string; url = prepare_url(turl.c_str()); path = get_realpath(tpath); requestHeaders = NULL; bodydata.Clear(); responseHeaders.clear(); string contype = S3fsCurl::LookupMimeType(string(tpath)); requestHeaders = curl_slist_sort_insert(requestHeaders, "Accept", NULL); requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-Type", contype.c_str()); op = "POST"; type = REQTYPE_COMPLETEMULTIPOST; // setopt curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); curl_easy_setopt(hCurl, CURLOPT_POST, true); // POST curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); curl_easy_setopt(hCurl, CURLOPT_POSTFIELDSIZE, static_cast(postdata_remaining)); curl_easy_setopt(hCurl, CURLOPT_READDATA, (void*)this); curl_easy_setopt(hCurl, CURLOPT_READFUNCTION, S3fsCurl::ReadCallback); S3fsCurl::AddUserAgent(hCurl); // put User-Agent // request int result = RequestPerform(); bodydata.Clear(); postdata = NULL; return result; } int S3fsCurl::MultipartListRequest(string& body) { S3FS_PRN_INFO3("list request(multipart)"); if(!CreateCurlHandle()){ return -1; } string resource; string turl; path = get_realpath("/"); MakeUrlResource(path.c_str(), resource, turl); query_string = "uploads"; turl += "?" + query_string; url = prepare_url(turl.c_str()); requestHeaders = NULL; responseHeaders.clear(); bodydata.Clear(); requestHeaders = curl_slist_sort_insert(requestHeaders, "Accept", NULL); op = "GET"; type = REQTYPE_MULTILIST; // setopt curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); S3fsCurl::AddUserAgent(hCurl); // put User-Agent int result; if(0 == (result = RequestPerform()) && 0 < bodydata.size()){ body = bodydata.str(); }else{ body = ""; } bodydata.Clear(); return result; } int S3fsCurl::AbortMultipartUpload(const char* tpath, const string& upload_id) { S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath)); if(!tpath){ return -1; } if(!CreateCurlHandle()){ return -1; } string resource; string turl; MakeUrlResource(get_realpath(tpath).c_str(), resource, turl); query_string = "uploadId=" + upload_id; turl += "?" + query_string; url = prepare_url(turl.c_str()); path = get_realpath(tpath); requestHeaders = NULL; responseHeaders.clear(); op = "DELETE"; type = REQTYPE_ABORTMULTIUPLOAD; curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); curl_easy_setopt(hCurl, CURLOPT_CUSTOMREQUEST, "DELETE"); S3fsCurl::AddUserAgent(hCurl); // put User-Agent return RequestPerform(); } // // PUT /ObjectName?partNumber=PartNumber&uploadId=UploadId HTTP/1.1 // Host: BucketName.s3.amazonaws.com // Date: date // Content-Length: Size // Authorization: Signature // // PUT /my-movie.m2ts?partNumber=1&uploadId=VCVsb2FkIElEIGZvciBlbZZpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZR HTTP/1.1 // Host: example-bucket.s3.amazonaws.com // Date: Mon, 1 Nov 2010 20:34:56 GMT // Content-Length: 10485760 // Content-MD5: pUNXr/BjKK5G2UKvaRRrOA== // Authorization: AWS VGhpcyBtZXNzYWdlIHNpZ25lZGGieSRlbHZpbmc= // int S3fsCurl::UploadMultipartPostSetup(const char* tpath, int part_num, const string& upload_id) { S3FS_PRN_INFO3("[tpath=%s][start=%lld][size=%lld][part=%d]", SAFESTRPTR(tpath), static_cast(partdata.startpos), static_cast(partdata.size), part_num); if(-1 == partdata.fd || -1 == partdata.startpos || -1 == partdata.size){ return -1; } requestHeaders = NULL; // make md5 and file pointer if(S3fsCurl::is_content_md5){ unsigned char *md5raw = s3fs_md5hexsum(partdata.fd, partdata.startpos, partdata.size); if(md5raw == NULL){ S3FS_PRN_ERR("Could not make md5 for file(part %d)", part_num); return -1; } partdata.etag = s3fs_hex(md5raw, get_md5_digest_length()); char* md5base64p = s3fs_base64(md5raw, get_md5_digest_length()); requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-MD5", md5base64p); delete[] md5base64p; delete[] md5raw; } // make request query_string = "partNumber=" + str(part_num) + "&uploadId=" + upload_id; string urlargs = "?" + query_string; string resource; string turl; MakeUrlResource(get_realpath(tpath).c_str(), resource, turl); turl += urlargs; url = prepare_url(turl.c_str()); path = get_realpath(tpath); bodydata.Clear(); headdata.Clear(); responseHeaders.clear(); // SSE if(SSE_C == S3fsCurl::GetSseType()){ string ssevalue; if(!AddSseRequestHead(S3fsCurl::GetSseType(), ssevalue, false, false)){ S3FS_PRN_WARN("Failed to set SSE header, but continue..."); } } requestHeaders = curl_slist_sort_insert(requestHeaders, "Accept", NULL); op = "PUT"; type = REQTYPE_UPLOADMULTIPOST; // set lazy function fpLazySetup = UploadMultipartPostSetCurlOpts; return 0; } int S3fsCurl::UploadMultipartPostRequest(const char* tpath, int part_num, const string& upload_id) { int result; S3FS_PRN_INFO3("[tpath=%s][start=%lld][size=%lld][part=%d]", SAFESTRPTR(tpath), static_cast(partdata.startpos), static_cast(partdata.size), part_num); // setup if(0 != (result = S3fsCurl::UploadMultipartPostSetup(tpath, part_num, upload_id))){ return result; } if(!fpLazySetup || !fpLazySetup(this)){ S3FS_PRN_ERR("Failed to lazy setup in multipart upload post request."); return -1; } // request if(0 == (result = RequestPerform())){ // UploadMultipartPostComplete returns true on success -> convert to 0 result = !UploadMultipartPostComplete(); } // closing bodydata.Clear(); headdata.Clear(); return result; } int S3fsCurl::CopyMultipartPostSetup(const char* from, const char* to, int part_num, const string& upload_id, headers_t& meta) { S3FS_PRN_INFO3("[from=%s][to=%s][part=%d]", SAFESTRPTR(from), SAFESTRPTR(to), part_num); if(!from || !to){ return -1; } query_string = "partNumber=" + str(part_num) + "&uploadId=" + upload_id; string urlargs = "?" + query_string; string resource; string turl; MakeUrlResource(get_realpath(to).c_str(), resource, turl); turl += urlargs; url = prepare_url(turl.c_str()); path = get_realpath(to); requestHeaders = NULL; responseHeaders.clear(); bodydata.Clear(); headdata.Clear(); string contype = S3fsCurl::LookupMimeType(string(to)); requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-Type", contype.c_str()); // Make request headers for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){ string key = lower(iter->first); string value = iter->second; if(key == "x-amz-copy-source"){ requestHeaders = curl_slist_sort_insert(requestHeaders, iter->first.c_str(), value.c_str()); }else if(key == "x-amz-copy-source-range"){ requestHeaders = curl_slist_sort_insert(requestHeaders, iter->first.c_str(), value.c_str()); } // NOTICE: x-amz-acl, x-amz-server-side-encryption is not set! } op = "PUT"; type = REQTYPE_COPYMULTIPOST; // set lazy function fpLazySetup = CopyMultipartPostSetCurlOpts; // request S3FS_PRN_INFO3("copying... [from=%s][to=%s][part=%d]", from, to, part_num); return 0; } bool S3fsCurl::UploadMultipartPostComplete() { headers_t::iterator it = responseHeaders.find("ETag"); if (it == responseHeaders.end()) { return false; } // check etag(md5); // // The ETAG when using SSE_C and SSE_KMS does not reflect the MD5 we sent // SSE_C: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html // SSE_KMS is ignored in the above, but in the following it states the same in the highlights: // https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html // if(S3fsCurl::is_content_md5 && SSE_C != S3fsCurl::GetSseType() && SSE_KMS != S3fsCurl::GetSseType()){ if(!etag_equals(it->second, partdata.etag)){ return false; } } partdata.etaglist->at(partdata.etagpos).assign(it->second); partdata.uploaded = true; return true; } bool S3fsCurl::CopyMultipartPostCallback(S3fsCurl* s3fscurl) { if(!s3fscurl){ return false; } return s3fscurl->CopyMultipartPostComplete(); } bool S3fsCurl::CopyMultipartPostComplete() { std::string etag; partdata.uploaded = simple_parse_xml(bodydata.str(), bodydata.size(), "ETag", etag); if(etag.size() >= 2 && *etag.begin() == '"' && *etag.rbegin() == '"'){ etag.assign(etag.substr(1, etag.size() - 2)); } partdata.etaglist->at(partdata.etagpos).assign(etag); bodydata.Clear(); headdata.Clear(); return true; } bool S3fsCurl::MixMultipartPostComplete() { bool result; if(-1 == partdata.fd){ result = CopyMultipartPostComplete(); }else{ result = UploadMultipartPostComplete(); } return result; } int S3fsCurl::MultipartHeadRequest(const char* tpath, off_t size, headers_t& meta, bool is_copy) { int result; string upload_id; off_t chunk; off_t bytes_remaining; etaglist_t list; ostringstream strrange; S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath)); if(0 != (result = PreMultipartPostRequest(tpath, meta, upload_id, is_copy))){ return result; } DestroyCurlHandle(); // Initialize S3fsMultiCurl S3fsMultiCurl curlmulti(GetMaxParallelCount()); curlmulti.SetSuccessCallback(S3fsCurl::CopyMultipartPostCallback); curlmulti.SetRetryCallback(S3fsCurl::CopyMultipartPostRetryCallback); for(bytes_remaining = size, chunk = 0; 0 < bytes_remaining; bytes_remaining -= chunk){ chunk = bytes_remaining > MAX_MULTI_COPY_SOURCE_SIZE ? MAX_MULTI_COPY_SOURCE_SIZE : bytes_remaining; strrange << "bytes=" << (size - bytes_remaining) << "-" << (size - bytes_remaining + chunk - 1); meta["x-amz-copy-source-range"] = strrange.str(); strrange.str(""); strrange.clear(stringstream::goodbit); // s3fscurl sub object S3fsCurl* s3fscurl_para = new S3fsCurl(true); s3fscurl_para->b_from = SAFESTRPTR(tpath); s3fscurl_para->b_meta = meta; s3fscurl_para->partdata.add_etag_list(&list); // initiate upload part for parallel if(0 != (result = s3fscurl_para->CopyMultipartPostSetup(tpath, tpath, list.size(), upload_id, meta))){ S3FS_PRN_ERR("failed uploading part setup(%d)", result); delete s3fscurl_para; return result; } // set into parallel object if(!curlmulti.SetS3fsCurlObject(s3fscurl_para)){ S3FS_PRN_ERR("Could not make curl object into multi curl(%s).", tpath); delete s3fscurl_para; return -1; } } // Multi request if(0 != (result = curlmulti.Request())){ S3FS_PRN_ERR("error occurred in multi request(errno=%d).", result); S3fsCurl s3fscurl_abort(true); int result2 = s3fscurl_abort.AbortMultipartUpload(tpath, upload_id); s3fscurl_abort.DestroyCurlHandle(); if(result2 != 0){ S3FS_PRN_ERR("error aborting multipart upload(errno=%d).", result2); } return result; } if(0 != (result = CompleteMultipartPostRequest(tpath, upload_id, list))){ return result; } return 0; } int S3fsCurl::MultipartUploadRequest(const char* tpath, headers_t& meta, int fd, bool is_copy) { int result; string upload_id; struct stat st; int fd2; etaglist_t list; off_t remaining_bytes; off_t chunk; S3FS_PRN_INFO3("[tpath=%s][fd=%d]", SAFESTRPTR(tpath), fd); // duplicate fd if(-1 == (fd2 = dup(fd)) || 0 != lseek(fd2, 0, SEEK_SET)){ S3FS_PRN_ERR("Could not duplicate file descriptor(errno=%d)", errno); if(-1 != fd2){ close(fd2); } return -errno; } if(-1 == fstat(fd2, &st)){ S3FS_PRN_ERR("Invalid file descriptor(errno=%d)", errno); close(fd2); return -errno; } if(0 != (result = PreMultipartPostRequest(tpath, meta, upload_id, is_copy))){ close(fd2); return result; } DestroyCurlHandle(); // cycle through open fd, pulling off 10MB chunks at a time for(remaining_bytes = st.st_size; 0 < remaining_bytes; remaining_bytes -= chunk){ // chunk size chunk = remaining_bytes > S3fsCurl::multipart_size ? S3fsCurl::multipart_size : remaining_bytes; // set partdata.fd = fd2; partdata.startpos = st.st_size - remaining_bytes; partdata.size = chunk; b_partdata_startpos = partdata.startpos; b_partdata_size = partdata.size; partdata.add_etag_list(&list); // upload part if(0 != (result = UploadMultipartPostRequest(tpath, list.size(), upload_id))){ S3FS_PRN_ERR("failed uploading part(%d)", result); close(fd2); return result; } DestroyCurlHandle(); } close(fd2); if(0 != (result = CompleteMultipartPostRequest(tpath, upload_id, list))){ return result; } return 0; } int S3fsCurl::MultipartUploadRequest(const string& upload_id, const char* tpath, int fd, off_t offset, off_t size, etaglist_t& list) { S3FS_PRN_INFO3("[upload_id=%s][tpath=%s][fd=%d][offset=%lld][size=%lld]", upload_id.c_str(), SAFESTRPTR(tpath), fd, static_cast(offset), static_cast(size)); // duplicate fd int fd2; if(-1 == (fd2 = dup(fd)) || 0 != lseek(fd2, 0, SEEK_SET)){ S3FS_PRN_ERR("Could not duplicate file descriptor(errno=%d)", errno); if(-1 != fd2){ close(fd2); } return -errno; } // set partdata.fd = fd2; partdata.startpos = offset; partdata.size = size; b_partdata_startpos = partdata.startpos; b_partdata_size = partdata.size; partdata.add_etag_list(&list); // upload part int result; if(0 != (result = UploadMultipartPostRequest(tpath, list.size(), upload_id))){ S3FS_PRN_ERR("failed uploading part(%d)", result); close(fd2); return result; } DestroyCurlHandle(); close(fd2); return 0; } int S3fsCurl::MultipartRenameRequest(const char* from, const char* to, headers_t& meta, off_t size) { int result; string upload_id; off_t chunk; off_t bytes_remaining; etaglist_t list; ostringstream strrange; S3FS_PRN_INFO3("[from=%s][to=%s]", SAFESTRPTR(from), SAFESTRPTR(to)); string srcresource; string srcurl; MakeUrlResource(get_realpath(from).c_str(), srcresource, srcurl); meta["Content-Type"] = S3fsCurl::LookupMimeType(string(to)); meta["x-amz-copy-source"] = srcresource; if(0 != (result = PreMultipartPostRequest(to, meta, upload_id, true))){ return result; } DestroyCurlHandle(); // Initialize S3fsMultiCurl S3fsMultiCurl curlmulti(GetMaxParallelCount()); curlmulti.SetSuccessCallback(S3fsCurl::CopyMultipartPostCallback); curlmulti.SetRetryCallback(S3fsCurl::CopyMultipartPostRetryCallback); for(bytes_remaining = size, chunk = 0; 0 < bytes_remaining; bytes_remaining -= chunk){ chunk = bytes_remaining > MAX_MULTI_COPY_SOURCE_SIZE ? MAX_MULTI_COPY_SOURCE_SIZE : bytes_remaining; strrange << "bytes=" << (size - bytes_remaining) << "-" << (size - bytes_remaining + chunk - 1); meta["x-amz-copy-source-range"] = strrange.str(); strrange.str(""); strrange.clear(stringstream::goodbit); // s3fscurl sub object S3fsCurl* s3fscurl_para = new S3fsCurl(true); s3fscurl_para->b_from = SAFESTRPTR(from); s3fscurl_para->b_meta = meta; s3fscurl_para->partdata.add_etag_list(&list); // initiate upload part for parallel if(0 != (result = s3fscurl_para->CopyMultipartPostSetup(from, to, list.size(), upload_id, meta))){ S3FS_PRN_ERR("failed uploading part setup(%d)", result); delete s3fscurl_para; return result; } // set into parallel object if(!curlmulti.SetS3fsCurlObject(s3fscurl_para)){ S3FS_PRN_ERR("Could not make curl object into multi curl(%s).", to); delete s3fscurl_para; return -1; } } // Multi request if(0 != (result = curlmulti.Request())){ S3FS_PRN_ERR("error occurred in multi request(errno=%d).", result); S3fsCurl s3fscurl_abort(true); int result2 = s3fscurl_abort.AbortMultipartUpload(to, upload_id); s3fscurl_abort.DestroyCurlHandle(); if(result2 != 0){ S3FS_PRN_ERR("error aborting multipart upload(errno=%d).", result2); } return result; } if(0 != (result = CompleteMultipartPostRequest(to, upload_id, list))){ return result; } return 0; } //------------------------------------------------------------------- // method for S3fsMultiCurl //------------------------------------------------------------------- S3fsMultiCurl::S3fsMultiCurl(int maxParallelism) : maxParallelism(maxParallelism) , SuccessCallback(NULL) , RetryCallback(NULL) { int res; pthread_mutexattr_t attr; pthread_mutexattr_init(&attr); #if S3FS_PTHREAD_ERRORCHECK pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK); #endif if (0 != (res = pthread_mutex_init(&completed_tids_lock, &attr))) { S3FS_PRN_ERR("could not initialize completed_tids_lock: %i", res); } } S3fsMultiCurl::~S3fsMultiCurl() { Clear(); int res; if(0 != (res = pthread_mutex_destroy(&completed_tids_lock))){ S3FS_PRN_ERR("could not destroy completed_tids_lock: %i", res); } } bool S3fsMultiCurl::ClearEx(bool is_all) { s3fscurllist_t::iterator iter; for(iter = clist_req.begin(); iter != clist_req.end(); ++iter){ S3fsCurl* s3fscurl = *iter; if(s3fscurl){ s3fscurl->DestroyCurlHandle(); delete s3fscurl; // with destroy curl handle. } } clist_req.clear(); if(is_all){ for(iter = clist_all.begin(); iter != clist_all.end(); ++iter){ S3fsCurl* s3fscurl = *iter; s3fscurl->DestroyCurlHandle(); delete s3fscurl; } clist_all.clear(); } S3FS_MALLOCTRIM(0); return true; } S3fsMultiSuccessCallback S3fsMultiCurl::SetSuccessCallback(S3fsMultiSuccessCallback function) { S3fsMultiSuccessCallback old = SuccessCallback; SuccessCallback = function; return old; } S3fsMultiRetryCallback S3fsMultiCurl::SetRetryCallback(S3fsMultiRetryCallback function) { S3fsMultiRetryCallback old = RetryCallback; RetryCallback = function; return old; } bool S3fsMultiCurl::SetS3fsCurlObject(S3fsCurl* s3fscurl) { if(!s3fscurl){ return false; } clist_all.push_back(s3fscurl); return true; } int S3fsMultiCurl::MultiPerform() { std::vector threads; bool success = true; bool isMultiHead = false; Semaphore sem(GetMaxParallelism()); int rc; for(s3fscurllist_t::iterator iter = clist_req.begin(); iter != clist_req.end(); ++iter) { pthread_t thread; S3fsCurl* s3fscurl = *iter; s3fscurl->sem = &sem; s3fscurl->completed_tids_lock = &completed_tids_lock; s3fscurl->completed_tids = &completed_tids; sem.wait(); { AutoLock lock(&completed_tids_lock); for(std::vector::iterator it = completed_tids.begin(); it != completed_tids.end(); ++it){ void* retval; rc = pthread_join(*it, &retval); if (rc) { success = false; S3FS_PRN_ERR("failed pthread_join - rc(%d) %s", rc, strerror(rc)); } else { int int_retval = (int)(intptr_t)(retval); if (int_retval && !(int_retval == -ENOENT && isMultiHead)) { S3FS_PRN_WARN("thread failed - rc(%d)", int_retval); } } } completed_tids.clear(); } isMultiHead |= s3fscurl->GetOp() == "HEAD"; rc = pthread_create(&thread, NULL, S3fsMultiCurl::RequestPerformWrapper, static_cast(s3fscurl)); if (rc != 0) { success = false; S3FS_PRN_ERR("failed pthread_create - rc(%d)", rc); break; } threads.push_back(thread); } for(int i = 0; i < sem.get_value(); ++i){ sem.wait(); } AutoLock lock(&completed_tids_lock); for (std::vector::iterator titer = completed_tids.begin(); titer != completed_tids.end(); ++titer) { void* retval; rc = pthread_join(*titer, &retval); if (rc) { success = false; S3FS_PRN_ERR("failed pthread_join - rc(%d)", rc); } else { int int_retval = (int)(intptr_t)(retval); if (int_retval && !(int_retval == -ENOENT && isMultiHead)) { S3FS_PRN_WARN("thread failed - rc(%d)", int_retval); } } } completed_tids.clear(); return success ? 0 : -EIO; } int S3fsMultiCurl::MultiRead() { int result = 0; for(s3fscurllist_t::iterator iter = clist_req.begin(); iter != clist_req.end(); ){ S3fsCurl* s3fscurl = *iter; bool isRetry = false; bool isPostpone = false; long responseCode = S3FSCURL_RESPONSECODE_NOTSET; if(s3fscurl->GetResponseCode(responseCode, false)){ if(S3FSCURL_RESPONSECODE_NOTSET == responseCode){ // This is a case where the processing result has not yet been updated (should be very rare). isPostpone = true; }else if(400 > responseCode){ // add into stat cache if(SuccessCallback && !SuccessCallback(s3fscurl)){ S3FS_PRN_WARN("error from callback function(%s).", s3fscurl->url.c_str()); } }else if(400 == responseCode){ // as possibly in multipart S3FS_PRN_WARN("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str()); isRetry = true; }else if(404 == responseCode){ // not found // HEAD requests on readdir_multi_head can return 404 if(s3fscurl->GetOp() != "HEAD"){ S3FS_PRN_WARN("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str()); } }else if(500 == responseCode){ // case of all other result, do retry.(11/13/2013) // because it was found that s3fs got 500 error from S3, but could success // to retry it. S3FS_PRN_WARN("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str()); isRetry = true; }else{ // Retry in other case. S3FS_PRN_WARN("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str()); isRetry = true; } }else{ S3FS_PRN_ERR("failed a request(Unknown response code: %s)", s3fscurl->url.c_str()); } if(isPostpone){ clist_req.erase(iter); clist_req.push_back(s3fscurl); // Re-evaluate at the end iter = clist_req.begin(); }else{ if(!isRetry || 0 != result){ // If an EIO error has already occurred, it will be terminated // immediately even if retry processing is required. s3fscurl->DestroyCurlHandle(); delete s3fscurl; }else{ S3fsCurl* retrycurl = NULL; // For retry if(RetryCallback){ retrycurl = RetryCallback(s3fscurl); if(NULL != retrycurl){ clist_all.push_back(retrycurl); }else{ // set EIO and wait for other parts. result = -EIO; } } if(s3fscurl != retrycurl){ s3fscurl->DestroyCurlHandle(); delete s3fscurl; } } iter = clist_req.erase(iter); } } clist_req.clear(); if(0 != result){ // If an EIO error has already occurred, clear all retry objects. for(s3fscurllist_t::iterator iter = clist_all.begin(); iter != clist_all.end(); ++iter){ S3fsCurl* s3fscurl = *iter; s3fscurl->DestroyCurlHandle(); delete s3fscurl; } clist_all.clear(); } return result; } int S3fsMultiCurl::Request() { S3FS_PRN_INFO3("[count=%zu]", clist_all.size()); // Make request list. // // Send multi request loop( with retry ) // (When many request is sends, sometimes gets "Couldn't connect to server") // while(!clist_all.empty()){ // set curl handle to multi handle int result; s3fscurllist_t::iterator iter; for(iter = clist_all.begin(); iter != clist_all.end(); ++iter){ S3fsCurl* s3fscurl = *iter; clist_req.push_back(s3fscurl); } clist_all.clear(); // Send multi request. if(0 != (result = MultiPerform())){ Clear(); return result; } // Read the result if(0 != (result = MultiRead())){ Clear(); return result; } // Cleanup curl handle in multi handle ClearEx(false); } return 0; } // thread function for performing an S3fsCurl request // void* S3fsMultiCurl::RequestPerformWrapper(void* arg) { S3fsCurl* s3fscurl= static_cast(arg); void* result = NULL; if(s3fscurl && s3fscurl->fpLazySetup){ if(!s3fscurl->fpLazySetup(s3fscurl)){ S3FS_PRN_ERR("Failed to lazy setup, then respond EIO."); result = (void*)(intptr_t)(-EIO); } } if(!result){ result = (void*)(intptr_t)(s3fscurl->RequestPerform()); s3fscurl->DestroyCurlHandle(true, false); } AutoLock lock(s3fscurl->completed_tids_lock); s3fscurl->completed_tids->push_back(pthread_self()); s3fscurl->sem->post(); return result; } //------------------------------------------------------------------- // Utility functions //------------------------------------------------------------------- // // curl_slist_sort_insert // This function is like curl_slist_append function, but this adds data by a-sorting. // Because AWS signature needs sorted header. // struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* data) { if(!data){ return list; } string strkey = data; string strval; string::size_type pos = strkey.find(':', 0); if(string::npos != pos){ strval = strkey.substr(pos + 1); strkey = strkey.substr(0, pos); } return curl_slist_sort_insert(list, strkey.c_str(), strval.c_str()); } struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* key, const char* value) { struct curl_slist* curpos; struct curl_slist* lastpos; struct curl_slist* new_item; if(!key){ return list; } if(NULL == (new_item = reinterpret_cast(malloc(sizeof(struct curl_slist))))){ return list; } // key & value are trimmed and lower (only key) string strkey = trim(string(key)); string strval = trim(string(value ? value : "")); string strnew = key + string(": ") + strval; if(NULL == (new_item->data = strdup(strnew.c_str()))){ free(new_item); return list; } new_item->next = NULL; for(lastpos = NULL, curpos = list; curpos; lastpos = curpos, curpos = curpos->next){ string strcur = curpos->data; size_t pos; if(string::npos != (pos = strcur.find(':', 0))){ strcur = strcur.substr(0, pos); } int result = strcasecmp(strkey.c_str(), strcur.c_str()); if(0 == result){ // same data, so replace it. if(lastpos){ lastpos->next = new_item; }else{ list = new_item; } new_item->next = curpos->next; free(curpos->data); free(curpos); break; }else if(0 > result){ // add data before curpos. if(lastpos){ lastpos->next = new_item; }else{ list = new_item; } new_item->next = curpos; break; } } if(!curpos){ // append to last pos if(lastpos){ lastpos->next = new_item; }else{ // a case of list is null list = new_item; } } return list; } string get_sorted_header_keys(const struct curl_slist* list) { string sorted_headers; if(!list){ return sorted_headers; } for( ; list; list = list->next){ string strkey = list->data; size_t pos; if(string::npos != (pos = strkey.find(':', 0))){ if (trim(strkey.substr(pos + 1)).empty()) { // skip empty-value headers (as they are discarded by libcurl) continue; } strkey = strkey.substr(0, pos); } if(0 < sorted_headers.length()){ sorted_headers += ";"; } sorted_headers += lower(strkey); } return sorted_headers; } string get_header_value(const struct curl_slist* list, const string &key) { if(!list){ return ""; } for( ; list; list = list->next){ string strkey = list->data; size_t pos; if(string::npos != (pos = strkey.find(':', 0))){ if(0 == strcasecmp(trim(strkey.substr(0, pos)).c_str(), key.c_str())){ return trim(strkey.substr(pos+1)); } } } return ""; } string get_canonical_headers(const struct curl_slist* list) { string canonical_headers; if(!list){ canonical_headers = "\n"; return canonical_headers; } for( ; list; list = list->next){ string strhead = list->data; size_t pos; if(string::npos != (pos = strhead.find(':', 0))){ string strkey = trim(lower(strhead.substr(0, pos))); string strval = trim(strhead.substr(pos + 1)); if (strval.empty()) { // skip empty-value headers (as they are discarded by libcurl) continue; } strhead = strkey.append(":").append(strval); }else{ strhead = trim(lower(strhead)); } canonical_headers += strhead; canonical_headers += "\n"; } return canonical_headers; } string get_canonical_headers(const struct curl_slist* list, bool only_amz) { string canonical_headers; if(!list){ canonical_headers = "\n"; return canonical_headers; } for( ; list; list = list->next){ string strhead = list->data; size_t pos; if(string::npos != (pos = strhead.find(':', 0))){ string strkey = trim(lower(strhead.substr(0, pos))); string strval = trim(strhead.substr(pos + 1)); if (strval.empty()) { // skip empty-value headers (as they are discarded by libcurl) continue; } strhead = strkey.append(":").append(strval); }else{ strhead = trim(lower(strhead)); } if(only_amz && strhead.substr(0, 5) != "x-amz"){ continue; } canonical_headers += strhead; canonical_headers += "\n"; } return canonical_headers; } // function for using global values bool MakeUrlResource(const char* realpath, string& resourcepath, string& url) { if(!realpath){ return false; } resourcepath = urlEncode(service_path + bucket + realpath); url = host + resourcepath; return true; } string prepare_url(const char* url) { S3FS_PRN_INFO3("URL is %s", url); string uri; string hostname; string path; string url_str = string(url); string token = string("/") + bucket; int bucket_pos; int bucket_length = token.size(); int uri_length = 0; if(!strncasecmp(url_str.c_str(), "https://", 8)){ uri_length = 8; } else if(!strncasecmp(url_str.c_str(), "http://", 7)) { uri_length = 7; } uri = url_str.substr(0, uri_length); bucket_pos = url_str.find(token, uri_length); if(!pathrequeststyle){ hostname = bucket + "." + url_str.substr(uri_length, bucket_pos - uri_length); path = url_str.substr((bucket_pos + bucket_length)); }else{ hostname = url_str.substr(uri_length, bucket_pos - uri_length); string part = url_str.substr((bucket_pos + bucket_length)); if('/' != part[0]){ part = "/" + part; } path = "/" + bucket + part; } url_str = uri + hostname + path; S3FS_PRN_INFO3("URL changed is %s", url_str.c_str()); return url_str; } const char *acl_to_string(acl_t acl) { switch(acl){ case PRIVATE: return "private"; case PUBLIC_READ: return "public-read"; case PUBLIC_READ_WRITE: return "public-read-write"; case AWS_EXEC_READ: return "aws-exec-read"; case AUTHENTICATED_READ: return "authenticated-read"; case BUCKET_OWNER_READ: return "bucket-owner-read"; case BUCKET_OWNER_FULL_CONTROL: return "bucket-owner-full-control"; case LOG_DELIVERY_WRITE: return "log-delivery-write"; case INVALID_ACL: return NULL; } abort(); } acl_t string_to_acl(const char *acl) { if(0 == strcmp(acl, "private")){ return PRIVATE; }else if(0 == strcmp(acl, "public-read")){ return PUBLIC_READ; }else if(0 == strcmp(acl, "public-read-write")){ return PUBLIC_READ_WRITE; }else if(0 == strcmp(acl, "aws-exec-read")){ return AWS_EXEC_READ; }else if(0 == strcmp(acl, "authenticated-read")){ return AUTHENTICATED_READ; }else if(0 == strcmp(acl, "bucket-owner-read")){ return BUCKET_OWNER_READ; }else if(0 == strcmp(acl, "bucket-owner-full-control")){ return BUCKET_OWNER_FULL_CONTROL; }else if(0 == strcmp(acl, "log-delivery-write")){ return LOG_DELIVERY_WRITE; }else{ return INVALID_ACL; } } /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ s3fs-fuse-1.86/src/curl.h000066400000000000000000000615521361654130000152030ustar00rootroot00000000000000/* * s3fs - FUSE-based file system backed by Amazon S3 * * Copyright(C) 2007 Randy Rizun * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #ifndef S3FS_CURL_H_ #define S3FS_CURL_H_ #include #include "psemaphore.h" //---------------------------------------------- // Avoid dependency on libcurl version //---------------------------------------------- // [NOTE] // The following symbols (enum) depend on the version of libcurl. // CURLOPT_TCP_KEEPALIVE 7.25.0 and later // CURLOPT_SSL_ENABLE_ALPN 7.36.0 and later // CURLOPT_KEEP_SENDING_ON_ERROR 7.51.0 and later // // s3fs uses these, if you build s3fs with the old libcurl, // substitute the following symbols to avoid errors. // If the version of libcurl linked at runtime is old, // curl_easy_setopt results in an error(CURLE_UNKNOWN_OPTION) and // a message is output. // #if defined(HAVE_CURLOPT_TCP_KEEPALIVE) && (HAVE_CURLOPT_TCP_KEEPALIVE == 1) #define S3FS_CURLOPT_TCP_KEEPALIVE CURLOPT_TCP_KEEPALIVE #else #define S3FS_CURLOPT_TCP_KEEPALIVE static_cast(213) #endif #if defined(HAVE_CURLOPT_SSL_ENABLE_ALPN) && (HAVE_CURLOPT_SSL_ENABLE_ALPN == 1) #define S3FS_CURLOPT_SSL_ENABLE_ALPN CURLOPT_SSL_ENABLE_ALPN #else #define S3FS_CURLOPT_SSL_ENABLE_ALPN static_cast(226) #endif #if defined(HAVE_CURLOPT_KEEP_SENDING_ON_ERROR) && (HAVE_CURLOPT_KEEP_SENDING_ON_ERROR == 1) #define S3FS_CURLOPT_KEEP_SENDING_ON_ERROR CURLOPT_KEEP_SENDING_ON_ERROR #else #define S3FS_CURLOPT_KEEP_SENDING_ON_ERROR static_cast(245) #endif //---------------------------------------------- // Symbols //---------------------------------------------- static const int MIN_MULTIPART_SIZE = 5 * 1024 * 1024; //---------------------------------------------- // class BodyData //---------------------------------------------- // memory class for curl write memory callback // class BodyData { private: char* text; size_t lastpos; size_t bufsize; private: bool IsSafeSize(size_t addbytes) const { return ((lastpos + addbytes + 1) > bufsize ? false : true); } bool Resize(size_t addbytes); public: BodyData() : text(NULL), lastpos(0), bufsize(0) {} ~BodyData() { Clear(); } void Clear(void); bool Append(void* ptr, size_t bytes); bool Append(void* ptr, size_t blockSize, size_t numBlocks) { return Append(ptr, (blockSize * numBlocks)); } const char* str() const; size_t size() const { return lastpos; } }; //---------------------------------------------- // Utility structs & typedefs //---------------------------------------------- typedef std::vector etaglist_t; // Each part information for Multipart upload struct filepart { bool uploaded; // does finish uploading std::string etag; // expected etag value int fd; // base file(temporary full file) descriptor off_t startpos; // seek fd point for uploading off_t size; // uploading size etaglist_t* etaglist; // use only parallel upload int etagpos; // use only parallel upload filepart() : uploaded(false), fd(-1), startpos(0), size(-1), etaglist(NULL), etagpos(-1) {} ~filepart() { clear(); } void clear(void) { uploaded = false; etag = ""; fd = -1; startpos = 0; size = -1; etaglist = NULL; etagpos = - 1; } void add_etag_list(etaglist_t* list) { if(list){ list->push_back(std::string("")); etaglist = list; etagpos = list->size() - 1; }else{ etaglist = NULL; etagpos = - 1; } } }; // for progress struct case_insensitive_compare_func { bool operator()(const std::string& a, const std::string& b) const { return strcasecmp(a.c_str(), b.c_str()) < 0; } }; typedef std::map mimes_t; typedef std::pair progress_t; typedef std::map curltime_t; typedef std::map curlprogress_t; class S3fsMultiCurl; //---------------------------------------------- // class CurlHandlerPool //---------------------------------------------- typedef std::list hcurllist_t; class CurlHandlerPool { public: explicit CurlHandlerPool(int maxHandlers) : mMaxHandlers(maxHandlers) { assert(maxHandlers > 0); } bool Init(); bool Destroy(); CURL* GetHandler(bool only_pool); void ReturnHandler(CURL* hCurl, bool restore_pool); private: int mMaxHandlers; pthread_mutex_t mLock; hcurllist_t mPool; }; //---------------------------------------------- // class S3fsCurl //---------------------------------------------- class PageList; class S3fsCurl; // Prototype function for lazy setup options for curl handle typedef bool (*s3fscurl_lazy_setup)(S3fsCurl* s3fscurl); typedef std::map iamcredmap_t; typedef std::map sseckeymap_t; typedef std::list sseckeylist_t; // storage class(rrs) enum storage_class_t { STANDARD, STANDARD_IA, ONEZONE_IA, REDUCED_REDUNDANCY, INTELLIGENT_TIERING }; enum acl_t { PRIVATE, PUBLIC_READ, PUBLIC_READ_WRITE, AWS_EXEC_READ, AUTHENTICATED_READ, BUCKET_OWNER_READ, BUCKET_OWNER_FULL_CONTROL, LOG_DELIVERY_WRITE, INVALID_ACL }; // sse type enum sse_type_t { SSE_DISABLE = 0, // not use server side encrypting SSE_S3, // server side encrypting by S3 key SSE_C, // server side encrypting by custom key SSE_KMS // server side encrypting by kms id }; // share enum { SHARE_MUTEX_DNS = 0, SHARE_MUTEX_SSL_SESSION = 1, SHARE_MUTEX_MAX = 2, }; // Class for lapping curl // class S3fsCurl { friend class S3fsMultiCurl; private: enum REQTYPE { REQTYPE_UNSET = -1, REQTYPE_DELETE = 0, REQTYPE_HEAD, REQTYPE_PUTHEAD, REQTYPE_PUT, REQTYPE_GET, REQTYPE_CHKBUCKET, REQTYPE_LISTBUCKET, REQTYPE_PREMULTIPOST, REQTYPE_COMPLETEMULTIPOST, REQTYPE_UPLOADMULTIPOST, REQTYPE_COPYMULTIPOST, REQTYPE_MULTILIST, REQTYPE_IAMCRED, REQTYPE_ABORTMULTIUPLOAD, REQTYPE_IAMROLE }; // class variables static pthread_mutex_t curl_handles_lock; static pthread_mutex_t curl_share_lock[SHARE_MUTEX_MAX]; static bool is_initglobal_done; static CurlHandlerPool* sCurlPool; static int sCurlPoolSize; static CURLSH* hCurlShare; static bool is_cert_check; static bool is_dns_cache; static bool is_ssl_session_cache; static long connect_timeout; static time_t readwrite_timeout; static int retries; static bool is_public_bucket; static acl_t default_acl; static storage_class_t storage_class; static sseckeylist_t sseckeys; static std::string ssekmsid; static sse_type_t ssetype; static bool is_content_md5; static bool is_verbose; static std::string AWSAccessKeyId; static std::string AWSSecretAccessKey; static std::string AWSAccessToken; static time_t AWSAccessTokenExpire; static bool is_ecs; static bool is_use_session_token; static bool is_ibm_iam_auth; static std::string IAM_cred_url; static size_t IAM_field_count; static std::string IAM_token_field; static std::string IAM_expiry_field; static std::string IAM_role; static long ssl_verify_hostname; static curltime_t curl_times; static curlprogress_t curl_progress; static std::string curl_ca_bundle; static mimes_t mimeTypes; static std::string userAgent; static int max_parallel_cnt; static int max_multireq; static off_t multipart_size; static bool is_sigv4; static bool is_ua; // User-Agent static bool requester_pays; // variables CURL* hCurl; REQTYPE type; // type of request std::string path; // target object path std::string base_path; // base path (for multi curl head request) std::string saved_path; // saved path = cache key (for multi curl head request) std::string url; // target object path(url) struct curl_slist* requestHeaders; headers_t responseHeaders; // header data by HeaderCallback BodyData bodydata; // body data by WriteMemoryCallback BodyData headdata; // header data by WriteMemoryCallback volatile long LastResponseCode; const unsigned char* postdata; // use by post method and read callback function. int postdata_remaining; // use by post method and read callback function. filepart partdata; // use by multipart upload/get object callback bool is_use_ahbe; // additional header by extension int retry_count; // retry count for multipart FILE* b_infile; // backup for retrying const unsigned char* b_postdata; // backup for retrying int b_postdata_remaining; // backup for retrying off_t b_partdata_startpos; // backup for retrying ssize_t b_partdata_size; // backup for retrying int b_ssekey_pos; // backup for retrying std::string b_ssevalue; // backup for retrying sse_type_t b_ssetype; // backup for retrying std::string b_from; // backup for retrying(for copy request) headers_t b_meta; // backup for retrying(for copy request) std::string op; // the HTTP verb of the request ("PUT", "GET", etc.) std::string query_string; // request query string Semaphore *sem; pthread_mutex_t *completed_tids_lock; std::vector *completed_tids; s3fscurl_lazy_setup fpLazySetup; // curl options for lazy setting function public: // constructor/destructor explicit S3fsCurl(bool ahbe = false); ~S3fsCurl(); private: // class methods static bool InitGlobalCurl(void); static bool DestroyGlobalCurl(void); static bool InitShareCurl(void); static bool DestroyShareCurl(void); static void LockCurlShare(CURL* handle, curl_lock_data nLockData, curl_lock_access laccess, void* useptr); static void UnlockCurlShare(CURL* handle, curl_lock_data nLockData, void* useptr); static bool InitCryptMutex(void); static bool DestroyCryptMutex(void); static int CurlProgress(void *clientp, double dltotal, double dlnow, double ultotal, double ulnow); static bool InitMimeType(const char* MimeFile = NULL); static bool LocateBundle(void); static size_t HeaderCallback(void *data, size_t blockSize, size_t numBlocks, void *userPtr); static size_t WriteMemoryCallback(void *ptr, size_t blockSize, size_t numBlocks, void *data); static size_t ReadCallback(void *ptr, size_t size, size_t nmemb, void *userp); static size_t UploadReadCallback(void *ptr, size_t size, size_t nmemb, void *userp); static size_t DownloadWriteCallback(void* ptr, size_t size, size_t nmemb, void* userp); static bool UploadMultipartPostCallback(S3fsCurl* s3fscurl); static bool CopyMultipartPostCallback(S3fsCurl* s3fscurl); static bool MixMultipartPostCallback(S3fsCurl* s3fscurl); static S3fsCurl* UploadMultipartPostRetryCallback(S3fsCurl* s3fscurl); static S3fsCurl* CopyMultipartPostRetryCallback(S3fsCurl* s3fscurl); static S3fsCurl* MixMultipartPostRetryCallback(S3fsCurl* s3fscurl); static S3fsCurl* ParallelGetObjectRetryCallback(S3fsCurl* s3fscurl); // lazy functions for set curl options static bool UploadMultipartPostSetCurlOpts(S3fsCurl* s3fscurl); static bool CopyMultipartPostSetCurlOpts(S3fsCurl* s3fscurl); static bool PreGetObjectRequestSetCurlOpts(S3fsCurl* s3fscurl); static bool PreHeadRequestSetCurlOpts(S3fsCurl* s3fscurl); static bool ParseIAMCredentialResponse(const char* response, iamcredmap_t& keyval); static bool SetIAMCredentials(const char* response); static bool ParseIAMRoleFromMetaDataResponse(const char* response, std::string& rolename); static bool SetIAMRoleFromMetaData(const char* response); static bool LoadEnvSseCKeys(void); static bool LoadEnvSseKmsid(void); static bool PushbackSseKeys(std::string& onekey); static bool AddUserAgent(CURL* hCurl); static int CurlDebugFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr); // methods bool ResetHandle(void); bool RemakeHandle(void); bool ClearInternalData(void); void insertV4Headers(); void insertV2Headers(); void insertIBMIAMHeaders(); void insertAuthHeaders(); std::string CalcSignatureV2(const std::string& method, const std::string& strMD5, const std::string& content_type, const std::string& date, const std::string& resource); std::string CalcSignature(const std::string& method, const std::string& canonical_uri, const std::string& query_string, const std::string& strdate, const std::string& payload_hash, const std::string& date8601); int GetIAMCredentials(void); int UploadMultipartPostSetup(const char* tpath, int part_num, const std::string& upload_id); int CopyMultipartPostSetup(const char* from, const char* to, int part_num, const std::string& upload_id, headers_t& meta); bool UploadMultipartPostComplete(); bool CopyMultipartPostComplete(); bool MixMultipartPostComplete(); public: // class methods static bool InitS3fsCurl(const char* MimeFile = NULL); static bool DestroyS3fsCurl(void); static int ParallelMultipartUploadRequest(const char* tpath, headers_t& meta, int fd); static int ParallelMixMultipartUploadRequest(const char* tpath, headers_t& meta, int fd, const PageList& pagelist); static int ParallelGetObjectRequest(const char* tpath, int fd, off_t start, ssize_t size); static bool CheckIAMCredentialUpdate(void); // class methods(variables) static std::string LookupMimeType(const std::string& name); static bool SetCheckCertificate(bool isCertCheck); static bool SetDnsCache(bool isCache); static bool SetSslSessionCache(bool isCache); static long SetConnectTimeout(long timeout); static time_t SetReadwriteTimeout(time_t timeout); static time_t GetReadwriteTimeout(void) { return S3fsCurl::readwrite_timeout; } static int SetRetries(int count); static bool SetPublicBucket(bool flag); static bool IsPublicBucket(void) { return S3fsCurl::is_public_bucket; } static acl_t SetDefaultAcl(acl_t acl); static acl_t GetDefaultAcl(); static storage_class_t SetStorageClass(storage_class_t storage_class); static storage_class_t GetStorageClass() { return S3fsCurl::storage_class; } static bool LoadEnvSse(void) { return (S3fsCurl::LoadEnvSseCKeys() && S3fsCurl::LoadEnvSseKmsid()); } static sse_type_t SetSseType(sse_type_t type); static sse_type_t GetSseType(void) { return S3fsCurl::ssetype; } static bool IsSseDisable(void) { return (SSE_DISABLE == S3fsCurl::ssetype); } static bool IsSseS3Type(void) { return (SSE_S3 == S3fsCurl::ssetype); } static bool IsSseCType(void) { return (SSE_C == S3fsCurl::ssetype); } static bool IsSseKmsType(void) { return (SSE_KMS == S3fsCurl::ssetype); } static bool FinalCheckSse(void); static bool SetSseCKeys(const char* filepath); static bool SetSseKmsid(const char* kmsid); static bool IsSetSseKmsId(void) { return !S3fsCurl::ssekmsid.empty(); } static const char* GetSseKmsId(void) { return S3fsCurl::ssekmsid.c_str(); } static bool GetSseKey(std::string& md5, std::string& ssekey); static bool GetSseKeyMd5(int pos, std::string& md5); static int GetSseKeyCount(void); static bool SetContentMd5(bool flag); static bool SetVerbose(bool flag); static bool GetVerbose(void) { return S3fsCurl::is_verbose; } static bool SetAccessKey(const char* AccessKeyId, const char* SecretAccessKey); static bool SetAccessKeyWithSessionToken(const char* AccessKeyId, const char* SecretAccessKey, const char * SessionToken); static bool IsSetAccessKeyID(void){ return (0 < S3fsCurl::AWSAccessKeyId.size()); } static bool IsSetAccessKeys(void){ return (0 < S3fsCurl::IAM_role.size() || ((0 < S3fsCurl::AWSAccessKeyId.size() || S3fsCurl::is_ibm_iam_auth) && 0 < S3fsCurl::AWSSecretAccessKey.size())); } static long SetSslVerifyHostname(long value); static long GetSslVerifyHostname(void) { return S3fsCurl::ssl_verify_hostname; } // maximum parallel GET and PUT requests static int SetMaxParallelCount(int value); static int GetMaxParallelCount(void) { return S3fsCurl::max_parallel_cnt; } // maximum parallel HEAD requests static int SetMaxMultiRequest(int max); static int GetMaxMultiRequest(void) { return S3fsCurl::max_multireq; } static bool SetIsECS(bool flag); static bool SetIsIBMIAMAuth(bool flag); static size_t SetIAMFieldCount(size_t field_count); static std::string SetIAMCredentialsURL(const char* url); static std::string SetIAMTokenField(const char* token_field); static std::string SetIAMExpiryField(const char* expiry_field); static std::string SetIAMRole(const char* role); static const char* GetIAMRole(void) { return S3fsCurl::IAM_role.c_str(); } static bool SetMultipartSize(off_t size); static off_t GetMultipartSize(void) { return S3fsCurl::multipart_size; } static bool SetSignatureV4(bool isset) { bool bresult = S3fsCurl::is_sigv4; S3fsCurl::is_sigv4 = isset; return bresult; } static bool IsSignatureV4(void) { return S3fsCurl::is_sigv4; } static bool SetUserAgentFlag(bool isset) { bool bresult = S3fsCurl::is_ua; S3fsCurl::is_ua = isset; return bresult; } static bool IsUserAgentFlag(void) { return S3fsCurl::is_ua; } static void InitUserAgent(void); static bool SetRequesterPays(bool flag) { bool old_flag = S3fsCurl::requester_pays; S3fsCurl::requester_pays = flag; return old_flag; } static bool IsRequesterPays(void) { return S3fsCurl::requester_pays; } // methods bool CreateCurlHandle(bool only_pool = false, bool remake = false); bool DestroyCurlHandle(bool restore_pool = true, bool clear_internal_data = true); bool LoadIAMRoleFromMetaData(void); bool AddSseRequestHead(sse_type_t ssetype, std::string& ssevalue, bool is_only_c, bool is_copy); bool GetResponseCode(long& responseCode, bool from_curl_handle = true); int RequestPerform(bool dontAddAuthHeaders=false); int DeleteRequest(const char* tpath); bool PreHeadRequest(const char* tpath, const char* bpath = NULL, const char* savedpath = NULL, int ssekey_pos = -1); bool PreHeadRequest(std::string& tpath, std::string& bpath, std::string& savedpath, int ssekey_pos = -1) { return PreHeadRequest(tpath.c_str(), bpath.c_str(), savedpath.c_str(), ssekey_pos); } int HeadRequest(const char* tpath, headers_t& meta); int PutHeadRequest(const char* tpath, headers_t& meta, bool is_copy); int PutRequest(const char* tpath, headers_t& meta, int fd); int PreGetObjectRequest(const char* tpath, int fd, off_t start, ssize_t size, sse_type_t ssetype, std::string& ssevalue); int GetObjectRequest(const char* tpath, int fd, off_t start = -1, ssize_t size = -1); int CheckBucket(void); int ListBucketRequest(const char* tpath, const char* query); int PreMultipartPostRequest(const char* tpath, headers_t& meta, std::string& upload_id, bool is_copy); int CompleteMultipartPostRequest(const char* tpath, const std::string& upload_id, etaglist_t& parts); int UploadMultipartPostRequest(const char* tpath, int part_num, const std::string& upload_id); int MultipartListRequest(std::string& body); int AbortMultipartUpload(const char* tpath, const std::string& upload_id); int MultipartHeadRequest(const char* tpath, off_t size, headers_t& meta, bool is_copy); int MultipartUploadRequest(const char* tpath, headers_t& meta, int fd, bool is_copy); int MultipartUploadRequest(const std::string& upload_id, const char* tpath, int fd, off_t offset, off_t size, etaglist_t& list); int MultipartRenameRequest(const char* from, const char* to, headers_t& meta, off_t size); // methods(variables) CURL* GetCurlHandle(void) const { return hCurl; } std::string GetPath(void) const { return path; } std::string GetBasePath(void) const { return base_path; } std::string GetSpacialSavedPath(void) const { return saved_path; } std::string GetUrl(void) const { return url; } std::string GetOp(void) const { return op; } headers_t* GetResponseHeaders(void) { return &responseHeaders; } BodyData* GetBodyData(void) { return &bodydata; } BodyData* GetHeadData(void) { return &headdata; } long GetLastResponseCode(void) const { return LastResponseCode; } bool SetUseAhbe(bool ahbe); bool EnableUseAhbe(void) { return SetUseAhbe(true); } bool DisableUseAhbe(void) { return SetUseAhbe(false); } bool IsUseAhbe(void) const { return is_use_ahbe; } int GetMultipartRetryCount(void) const { return retry_count; } void SetMultipartRetryCount(int retrycnt) { retry_count = retrycnt; } bool IsOverMultipartRetryCount(void) const { return (retry_count >= S3fsCurl::retries); } int GetLastPreHeadSeecKeyPos(void) const { return b_ssekey_pos; } }; //---------------------------------------------- // class S3fsMultiCurl //---------------------------------------------- // Class for lapping multi curl // typedef std::vector s3fscurllist_t; typedef bool (*S3fsMultiSuccessCallback)(S3fsCurl* s3fscurl); // callback for succeed multi request typedef S3fsCurl* (*S3fsMultiRetryCallback)(S3fsCurl* s3fscurl); // callback for failure and retrying class S3fsMultiCurl { private: const int maxParallelism; s3fscurllist_t clist_all; // all of curl requests s3fscurllist_t clist_req; // curl requests are sent S3fsMultiSuccessCallback SuccessCallback; S3fsMultiRetryCallback RetryCallback; pthread_mutex_t completed_tids_lock; std::vector completed_tids; private: bool ClearEx(bool is_all); int MultiPerform(void); int MultiRead(void); static void* RequestPerformWrapper(void* arg); public: explicit S3fsMultiCurl(int maxParallelism); ~S3fsMultiCurl(); int GetMaxParallelism() { return maxParallelism; } S3fsMultiSuccessCallback SetSuccessCallback(S3fsMultiSuccessCallback function); S3fsMultiRetryCallback SetRetryCallback(S3fsMultiRetryCallback function); bool Clear(void) { return ClearEx(true); } bool SetS3fsCurlObject(S3fsCurl* s3fscurl); int Request(void); }; //---------------------------------------------- // Utility Functions //---------------------------------------------- std::string GetContentMD5(int fd); unsigned char* md5hexsum(int fd, off_t start, ssize_t size); std::string md5sum(int fd, off_t start, ssize_t size); struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* data); struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* key, const char* value); std::string get_sorted_header_keys(const struct curl_slist* list); std::string get_canonical_headers(const struct curl_slist* list, bool only_amz = false); std::string get_header_value(const struct curl_slist* list, const std::string &key); bool MakeUrlResource(const char* realpath, std::string& resourcepath, std::string& url); std::string prepare_url(const char* url); bool get_object_sse_type(const char* path, sse_type_t& ssetype, std::string& ssevalue); // implement in s3fs.cpp const char *acl_to_string(acl_t acl); acl_t string_to_acl(const char *acl); #endif // S3FS_CURL_H_ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ s3fs-fuse-1.86/src/fdcache.cpp000066400000000000000000002361561361654130000161520ustar00rootroot00000000000000/* * s3fs - FUSE-based file system backed by Amazon S3 * * Copyright(C) 2007 Takeshi Nakatani * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "common.h" #include "fdcache.h" #include "s3fs.h" #include "s3fs_util.h" #include "string_util.h" #include "curl.h" using namespace std; //------------------------------------------------ // Symbols //------------------------------------------------ static const int MAX_MULTIPART_CNT = 10 * 1000; // S3 multipart max count // // For cache directory top path // #if defined(P_tmpdir) #define TMPFILE_DIR_0PATH P_tmpdir #else #define TMPFILE_DIR_0PATH "/tmp" #endif //------------------------------------------------ // CacheFileStat class methods //------------------------------------------------ bool CacheFileStat::MakeCacheFileStatPath(const char* path, string& sfile_path, bool is_create_dir) { // make stat dir top path( "//..stat" ) string top_path = FdManager::GetCacheDir(); top_path += "/."; top_path += bucket; top_path += ".stat"; if(is_create_dir){ int result; if(0 != (result = mkdirp(top_path + mydirname(path), 0777))){ S3FS_PRN_ERR("failed to create dir(%s) by errno(%d).", path, result); return false; } } if(!path || '\0' == path[0]){ sfile_path = top_path; }else{ sfile_path = top_path + SAFESTRPTR(path); } return true; } bool CacheFileStat::CheckCacheFileStatTopDir() { if(!FdManager::IsCacheDir()){ return true; } // make stat dir top path( "//..stat" ) string top_path = FdManager::GetCacheDir(); top_path += "/."; top_path += bucket; top_path += ".stat"; return check_exist_dir_permission(top_path.c_str()); } bool CacheFileStat::DeleteCacheFileStat(const char* path) { if(!path || '\0' == path[0]){ return false; } // stat path string sfile_path; if(!CacheFileStat::MakeCacheFileStatPath(path, sfile_path, false)){ S3FS_PRN_ERR("failed to create cache stat file path(%s)", path); return false; } if(0 != unlink(sfile_path.c_str())){ if(ENOENT == errno){ S3FS_PRN_DBG("failed to delete file(%s): errno=%d", path, errno); }else{ S3FS_PRN_ERR("failed to delete file(%s): errno=%d", path, errno); } return false; } return true; } // [NOTE] // If remove stat file directory, it should do before removing // file cache directory. // bool CacheFileStat::DeleteCacheFileStatDirectory() { string top_path = FdManager::GetCacheDir(); if(top_path.empty() || bucket.empty()){ return true; } top_path += "/."; top_path += bucket; top_path += ".stat"; return delete_files_in_dir(top_path.c_str(), true); } bool CacheFileStat::RenameCacheFileStat(const char* oldpath, const char* newpath) { if(!oldpath || '\0' == oldpath[0] || !newpath || '\0' == newpath[0]){ return false; } // stat path string old_filestat; string new_filestat; if(!CacheFileStat::MakeCacheFileStatPath(oldpath, old_filestat, false) || !CacheFileStat::MakeCacheFileStatPath(newpath, new_filestat, false)){ return false; } // check new stat path struct stat st; if(0 == stat(new_filestat.c_str(), &st)){ // new stat path is existed, then unlink it. if(-1 == unlink(new_filestat.c_str())){ S3FS_PRN_ERR("failed to unlink new cache file stat path(%s) by errno(%d).", new_filestat.c_str(), errno); return false; } } // check old stat path if(0 != stat(old_filestat.c_str(), &st)){ // old stat path is not existed, then nothing to do any more. return true; } // link and unlink if(-1 == link(old_filestat.c_str(), new_filestat.c_str())){ S3FS_PRN_ERR("failed to link old cache file stat path(%s) to new cache file stat path(%s) by errno(%d).", old_filestat.c_str(), new_filestat.c_str(), errno); return false; } if(-1 == unlink(old_filestat.c_str())){ S3FS_PRN_ERR("failed to unlink old cache file stat path(%s) by errno(%d).", old_filestat.c_str(), errno); return false; } return true; } //------------------------------------------------ // CacheFileStat methods //------------------------------------------------ CacheFileStat::CacheFileStat(const char* tpath) : path(""), fd(-1) { if(tpath && '\0' != tpath[0]){ SetPath(tpath, true); } } CacheFileStat::~CacheFileStat() { Release(); } bool CacheFileStat::SetPath(const char* tpath, bool is_open) { if(!tpath || '\0' == tpath[0]){ return false; } if(!Release()){ // could not close old stat file. return false; } if(tpath){ path = tpath; } if(!is_open){ return true; } return Open(); } bool CacheFileStat::Open() { if(path.empty()){ return false; } if(-1 != fd){ // already opened return true; } // stat path string sfile_path; if(!CacheFileStat::MakeCacheFileStatPath(path.c_str(), sfile_path, true)){ S3FS_PRN_ERR("failed to create cache stat file path(%s)", path.c_str()); return false; } // open if(-1 == (fd = open(sfile_path.c_str(), O_CREAT|O_RDWR, 0600))){ S3FS_PRN_ERR("failed to open cache stat file path(%s) - errno(%d)", path.c_str(), errno); return false; } // lock if(-1 == flock(fd, LOCK_EX)){ S3FS_PRN_ERR("failed to lock cache stat file(%s) - errno(%d)", path.c_str(), errno); close(fd); fd = -1; return false; } // seek top if(0 != lseek(fd, 0, SEEK_SET)){ S3FS_PRN_ERR("failed to lseek cache stat file(%s) - errno(%d)", path.c_str(), errno); flock(fd, LOCK_UN); close(fd); fd = -1; return false; } S3FS_PRN_DBG("file locked(%s - %s)", path.c_str(), sfile_path.c_str()); return true; } bool CacheFileStat::Release() { if(-1 == fd){ // already release return true; } // unlock if(-1 == flock(fd, LOCK_UN)){ S3FS_PRN_ERR("failed to unlock cache stat file(%s) - errno(%d)", path.c_str(), errno); return false; } S3FS_PRN_DBG("file unlocked(%s)", path.c_str()); if(-1 == close(fd)){ S3FS_PRN_ERR("failed to close cache stat file(%s) - errno(%d)", path.c_str(), errno); return false; } fd = -1; return true; } //------------------------------------------------ // PageList methods //------------------------------------------------ void PageList::FreeList(fdpage_list_t& list) { list.clear(); } PageList::PageList(off_t size, bool is_loaded, bool is_modified) { Init(size, is_loaded, is_modified); } PageList::PageList(const PageList& other) { for(fdpage_list_t::const_iterator iter = other.pages.begin(); iter != other.pages.end(); ++iter){ pages.push_back(*iter); } } PageList::~PageList() { Clear(); } void PageList::Clear() { PageList::FreeList(pages); } bool PageList::Init(off_t size, bool is_loaded, bool is_modified) { Clear(); fdpage page(0, size, is_loaded, is_modified); pages.push_back(page); return true; } off_t PageList::Size() const { if(pages.empty()){ return 0; } fdpage_list_t::const_reverse_iterator riter = pages.rbegin(); return riter->next(); } bool PageList::Compress(bool force_modified) { bool is_first = true; bool is_last_loaded = false; bool is_last_modified = false; for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ){ if(is_first){ is_first = false; is_last_loaded = force_modified ? true : iter->loaded; is_last_modified = iter->modified; ++iter; }else{ if(is_last_modified == iter->modified){ if(force_modified || is_last_loaded == iter->loaded){ fdpage_list_t::iterator biter = iter; --biter; biter->bytes += iter->bytes; iter = pages.erase(iter); }else{ is_last_loaded = iter->loaded; is_last_modified = iter->modified; ++iter; } }else{ is_last_loaded = force_modified ? true : iter->loaded; is_last_modified = iter->modified; ++iter; } } } return true; } bool PageList::Parse(off_t new_pos) { for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ++iter){ if(new_pos == iter->offset){ // nothing to do return true; }else if(iter->offset < new_pos && new_pos < iter->next()){ fdpage page(iter->offset, new_pos - iter->offset, iter->loaded, false); iter->bytes -= (new_pos - iter->offset); iter->offset = new_pos; pages.insert(iter, page); return true; } } return false; } bool PageList::Resize(off_t size, bool is_loaded, bool is_modified) { off_t total = Size(); if(0 == total){ Init(size, is_loaded, is_modified); }else if(total < size){ // add new area fdpage page(total, (size - total), is_loaded, is_modified); pages.push_back(page); }else if(size < total){ // cut area for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ){ if(iter->next() <= size){ ++iter; }else{ if(size <= iter->offset){ iter = pages.erase(iter); }else{ iter->bytes = size - iter->offset; } } } }else{ // total == size // nothing to do } // compress area return Compress(); } bool PageList::IsPageLoaded(off_t start, off_t size) const { for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){ if(iter->end() < start){ continue; } if(!iter->loaded){ return false; } if(0 != size && start + size <= iter->next()){ break; } } return true; } bool PageList::SetPageLoadedStatus(off_t start, off_t size, PageList::page_status pstatus, bool is_compress) { off_t now_size = Size(); bool is_loaded = (PAGE_LOAD_MODIFIED == pstatus || PAGE_LOADED == pstatus); bool is_modified = (PAGE_LOAD_MODIFIED == pstatus || PAGE_MODIFIED == pstatus); if(now_size <= start){ if(now_size < start){ // add Resize(start, false, is_modified); // set modified flag from now end pos to specified start pos. } Resize(start + size, is_loaded, is_modified); }else if(now_size <= start + size){ // cut Resize(start, false, false); // not changed loaded/modified flags in existing area. // add Resize(start + size, is_loaded, is_modified); }else{ // start-size are inner pages area // parse "start", and "start + size" position Parse(start); Parse(start + size); // set loaded flag for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ++iter){ if(iter->end() < start){ continue; }else if(start + size <= iter->offset){ break; }else{ iter->loaded = is_loaded; iter->modified = is_modified; } } } // compress area return (is_compress ? Compress() : true); } bool PageList::FindUnloadedPage(off_t start, off_t& resstart, off_t& ressize) const { for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){ if(start <= iter->end()){ if(!iter->loaded && !iter->modified){ // Do not load unloaded and modified areas resstart = iter->offset; ressize = iter->bytes; return true; } } } return false; } off_t PageList::GetTotalUnloadedPageSize(off_t start, off_t size) const { off_t restsize = 0; off_t next = start + size; for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){ if(iter->next() <= start){ continue; } if(next <= iter->offset){ break; } if(iter->loaded || iter->modified){ continue; } off_t tmpsize; if(iter->offset <= start){ if(iter->next() <= next){ tmpsize = (iter->next() - start); }else{ tmpsize = next - start; // = size } }else{ if(iter->next() <= next){ tmpsize = iter->next() - iter->offset; // = iter->bytes }else{ tmpsize = next - iter->offset; } } restsize += tmpsize; } return restsize; } int PageList::GetUnloadedPages(fdpage_list_t& unloaded_list, off_t start, off_t size) const { // If size is 0, it means loading to end. if(0 == size){ if(start < Size()){ size = Size() - start; } } off_t next = start + size; for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){ if(iter->next() <= start){ continue; } if(next <= iter->offset){ break; } if(iter->loaded || iter->modified){ continue; // already loaded or modified } // page area off_t page_start = max(iter->offset, start); off_t page_next = min(iter->next(), next); off_t page_size = page_next - page_start; // add list fdpage_list_t::reverse_iterator riter = unloaded_list.rbegin(); if(riter != unloaded_list.rend() && riter->next() == page_start){ // merge to before page riter->bytes += page_size; }else{ fdpage page(page_start, page_size, false, false); unloaded_list.push_back(page); } } return unloaded_list.size(); } // [NOTE] // This method is called in advance when mixing POST and COPY in multi-part upload. // The minimum size of each part must be 5 MB, and the data area below this must be // downloaded from S3. // This method checks the current PageList status and returns the area that needs // to be downloaded so that each part is at least 5 MB. // bool PageList::GetLoadPageListForMultipartUpload(fdpage_list_t& dlpages) { // compress before this processing if(!Compress()){ return false; } bool is_prev_modified_page = false; off_t accumulated_bytes = 0; off_t last_modified_bytes = 0; for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){ if(iter->modified){ // this is modified page if(is_prev_modified_page){ // in case of continuous modified page accumulated_bytes += iter->bytes; }else{ // previous page is unmodified page // check unmodified page bytes is over minimum size(5MB) if(static_cast(MIN_MULTIPART_SIZE) <= accumulated_bytes){ // over minimum size accumulated_bytes = iter->bytes; // reset accumulated size }else{ // less than minimum size(5MB) // the previous unmodified page needs to load, if it is not loaded. // and that page will be included in consecutive modified page. PageList::RawGetUnloadPageList(dlpages, (iter->offset - accumulated_bytes), accumulated_bytes); accumulated_bytes += last_modified_bytes + iter->bytes; // this page size and last modified page size are accumulated last_modified_bytes = 0; } is_prev_modified_page = true; } }else{ // this is unmodified page if(!is_prev_modified_page){ // in case of continuous unmodified page accumulated_bytes += iter->bytes; }else{ // previous page is modified page // check modified page bytes is over minimum size(5MB) if(static_cast(MIN_MULTIPART_SIZE) <= accumulated_bytes){ // over minimum size last_modified_bytes = accumulated_bytes; // backup last modified page size accumulated_bytes = iter->bytes; // set new accumulated size(this page size) is_prev_modified_page = false; }else{ // less than minimum size(5MB) // this unmodified page needs to load, if it is not loaded. // and this page will be included in consecutive modified page. if((static_cast(MIN_MULTIPART_SIZE) - accumulated_bytes) <= iter->bytes){ // Split the missing size from this page size for just before modified page. if(!iter->loaded){ // because this page is not loaded fdpage dlpage(iter->offset, (iter->bytes - (static_cast(MIN_MULTIPART_SIZE) - accumulated_bytes))); // don't care for loaded/modified flag dlpages.push_back(dlpage); } last_modified_bytes = static_cast(MIN_MULTIPART_SIZE); // backup last modified page size accumulated_bytes = iter->bytes - (static_cast(MIN_MULTIPART_SIZE) - accumulated_bytes); // set rest bytes to accumulated size is_prev_modified_page = false; }else{ // assign all this page sizes to just before modified page. // but still it is not enough for the minimum size. if(!iter->loaded){ // because this page is not loaded fdpage dlpage(iter->offset, iter->bytes); // don't care for loaded/modified flag dlpages.push_back(dlpage); } accumulated_bytes += iter->bytes; // add all bytes to accumulated size } } } } } // compress dlpages bool is_first = true; for(fdpage_list_t::iterator dliter = dlpages.begin(); dliter != dlpages.end(); ){ if(is_first){ is_first = false; ++dliter; continue; } fdpage_list_t::iterator biter = dliter; --biter; if((biter->offset + biter->bytes) == dliter->offset){ biter->bytes += dliter->bytes; dliter = dlpages.erase(dliter); }else{ ++dliter; } } return true; } // [NOTE] // This static method assumes that it is called only from GetLoadPageListForMultipartUpload. // If you want to exclusive control, please do with GetLoadPageListForMultipartUpload, // not with this method. // bool PageList::RawGetUnloadPageList(fdpage_list_t& dlpages, off_t offset, off_t size) { for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){ if((iter->offset + iter->bytes) <= offset){ continue; }else if((offset + size) <= iter->offset){ break; }else{ if(!iter->loaded && !iter->modified){ fdpage dlpage(iter->offset, iter->bytes); // don't care for loaded/modified flag dlpages.push_back(dlpage); } } } return true; } bool PageList::GetMultipartSizeList(fdpage_list_t& mplist, off_t partsize) const { if(!mplist.empty()){ return false; } // temporary page list PageList tmpPageObj(*this); if(!tmpPageObj.Compress(true)){ // compress by modified flag return false; } // [NOTE] // Set the modified flag in page list to the minimum size. // This process needs to match the GetLoadPageListForMultipartUpload method exactly. // // [FIXME] // Make the common processing of GetLoadPageListForMultipartUpload and this method // to one method. // bool is_first = true; bool is_prev_modified_page = false; off_t accumulated_bytes = 0; off_t last_modified_bytes = 0; fdpage_list_t::iterator iter; for(iter = tmpPageObj.pages.begin(); iter != tmpPageObj.pages.end(); ++iter){ if(is_first){ is_prev_modified_page = iter->modified; is_first = false; } if(iter->modified){ // this is modified page if(is_prev_modified_page){ // in case of continuous modified page accumulated_bytes += iter->bytes; }else{ // previous page is unmodified page // check unmodified page bytes is over minimum size(5MB) if(static_cast(MIN_MULTIPART_SIZE) <= accumulated_bytes){ // over minimum size accumulated_bytes = iter->bytes; // reset accumulated size }else{ // less than minimum size(5MB) // the previous unmodified page is set modified flag. fdpage_list_t::iterator biter = iter; --biter; biter->loaded = true; biter->modified = true; accumulated_bytes += last_modified_bytes + iter->bytes; // this page size and last modified page size are accumulated last_modified_bytes = 0; } is_prev_modified_page = true; } }else{ // this is unmodified page if(!is_prev_modified_page){ // in case of continuous unmodified page accumulated_bytes += iter->bytes; }else{ // previous page is modified page // check modified page bytes is over minimum size(5MB) if(static_cast(MIN_MULTIPART_SIZE) <= accumulated_bytes){ // over minimum size last_modified_bytes = accumulated_bytes; // backup last modified page size accumulated_bytes = iter->bytes; // set new accumulated size(this page size) is_prev_modified_page = false; }else{ // less than minimum size(5MB) // this unmodified page is set modified flag. if((static_cast(MIN_MULTIPART_SIZE) - accumulated_bytes) <= iter->bytes){ // Split the missing size from this page size for just before modified page. fdpage newpage(iter->offset, (static_cast(MIN_MULTIPART_SIZE) - accumulated_bytes), true, true); iter->bytes -= (static_cast(MIN_MULTIPART_SIZE) - accumulated_bytes); iter->offset += (static_cast(MIN_MULTIPART_SIZE) - accumulated_bytes); tmpPageObj.pages.insert(iter, newpage); last_modified_bytes = static_cast(MIN_MULTIPART_SIZE); // backup last modified page size accumulated_bytes = iter->bytes; // set rest bytes to accumulated size is_prev_modified_page = false; }else{ // assign all this page sizes to just before modified page. // but still it is not enough for the minimum size. accumulated_bytes += iter->bytes; // add all bytes to accumulated size } } } } } // recompress if(!tmpPageObj.Compress(true)){ // compress by modified flag return false; } // normalization for uploading parts for(iter = tmpPageObj.pages.begin(); iter != tmpPageObj.pages.end(); ++iter){ off_t start = iter->offset; off_t remains = iter->bytes; while(0 < remains){ off_t onesize; if(iter->modified){ // Uploading parts, this page must be 5MB - partsize onesize = std::min(remains, partsize); }else{ // Not uploading parts, this page must be 5MB - 5GB onesize = std::min(remains, static_cast(FIVE_GB)); } fdpage page(start, onesize, iter->loaded, iter->modified); mplist.push_back(page); start += onesize; remains -= onesize; } } return true; } bool PageList::IsModified() const { for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){ if(iter->modified){ return true; } } return false; } bool PageList::ClearAllModified() { for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ++iter){ if(iter->modified){ iter->modified = false; } } return Compress(); } bool PageList::Serialize(CacheFileStat& file, bool is_output) { if(!file.Open()){ return false; } if(is_output){ // // put to file // ostringstream ssall; ssall << Size(); for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ++iter){ ssall << "\n" << iter->offset << ":" << iter->bytes << ":" << (iter->loaded ? "1" : "0") << ":" << (iter->modified ? "1" : "0"); } string strall = ssall.str(); if(0 >= pwrite(file.GetFd(), strall.c_str(), strall.length(), 0)){ S3FS_PRN_ERR("failed to write stats(%d)", errno); return false; } }else{ // // loading from file // struct stat st; memset(&st, 0, sizeof(struct stat)); if(-1 == fstat(file.GetFd(), &st)){ S3FS_PRN_ERR("fstat is failed. errno(%d)", errno); return false; } if(0 >= st.st_size){ // nothing Init(0, false, false); return true; } char* ptmp = new char[st.st_size + 1]; ptmp[st.st_size] = '\0'; // read from file if(0 >= pread(file.GetFd(), ptmp, st.st_size, 0)){ S3FS_PRN_ERR("failed to read stats(%d)", errno); delete[] ptmp; return false; } string oneline; istringstream ssall(ptmp); // loaded Clear(); // load(size) if(!getline(ssall, oneline, '\n')){ S3FS_PRN_ERR("failed to parse stats."); delete[] ptmp; return false; } off_t total = s3fs_strtoofft(oneline.c_str()); // load each part bool is_err = false; while(getline(ssall, oneline, '\n')){ string part; istringstream ssparts(oneline); // offset if(!getline(ssparts, part, ':')){ is_err = true; break; } off_t offset = s3fs_strtoofft(part.c_str()); // size if(!getline(ssparts, part, ':')){ is_err = true; break; } off_t size = s3fs_strtoofft(part.c_str()); // loaded if(!getline(ssparts, part, ':')){ is_err = true; break; } bool is_loaded = (1 == s3fs_strtoofft(part.c_str()) ? true : false); bool is_modified; if(!getline(ssparts, part, ':')){ is_modified = false; // old version does not have this part. }else{ is_modified = (1 == s3fs_strtoofft(part.c_str()) ? true : false); } // add new area PageList::page_status pstatus = ( is_loaded && is_modified ? PageList::PAGE_LOAD_MODIFIED : !is_loaded && is_modified ? PageList::PAGE_MODIFIED : is_loaded && !is_modified ? PageList::PAGE_LOADED : PageList::PAGE_NOT_LOAD_MODIFIED ); SetPageLoadedStatus(offset, size, pstatus); } delete[] ptmp; if(is_err){ S3FS_PRN_ERR("failed to parse stats."); Clear(); return false; } // check size if(total != Size()){ S3FS_PRN_ERR("different size(%lld - %lld).", static_cast(total), static_cast(Size())); Clear(); return false; } } return true; } void PageList::Dump() { int cnt = 0; S3FS_PRN_DBG("pages = {"); for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ++iter, ++cnt){ S3FS_PRN_DBG(" [%08d] -> {%014lld - %014lld : %s / %s}", cnt, static_cast(iter->offset), static_cast(iter->bytes), iter->loaded ? "loaded" : "unloaded", iter->modified ? "modified" : "not modified"); } S3FS_PRN_DBG("}"); } //------------------------------------------------ // FdEntity class methods //------------------------------------------------ bool FdEntity::mixmultipart = true; bool FdEntity::SetNoMixMultipart() { bool old = mixmultipart; mixmultipart = false; return old; } int FdEntity::FillFile(int fd, unsigned char byte, off_t size, off_t start) { unsigned char bytes[1024 * 32]; // 32kb memset(bytes, byte, min(static_cast(sizeof(bytes)), size)); for(off_t total = 0, onewrote = 0; total < size; total += onewrote){ if(-1 == (onewrote = pwrite(fd, bytes, min(static_cast(sizeof(bytes)), size - total), start + total))){ S3FS_PRN_ERR("pwrite failed. errno(%d)", errno); return -errno; } } return 0; } //------------------------------------------------ // FdEntity methods //------------------------------------------------ FdEntity::FdEntity(const char* tpath, const char* cpath) : is_lock_init(false), refcnt(0), path(SAFESTRPTR(tpath)), fd(-1), pfile(NULL), size_orgmeta(0), upload_id(""), mp_start(0), mp_size(0), cachepath(SAFESTRPTR(cpath)), mirrorpath("") { try{ pthread_mutexattr_t attr; pthread_mutexattr_init(&attr); #if S3FS_PTHREAD_ERRORCHECK pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK); #endif pthread_mutex_init(&fdent_lock, &attr); pthread_mutex_init(&fdent_data_lock, &attr); is_lock_init = true; }catch(exception& e){ S3FS_PRN_CRIT("failed to init mutex"); } } FdEntity::~FdEntity() { Clear(); if(is_lock_init){ try{ pthread_mutex_destroy(&fdent_data_lock); pthread_mutex_destroy(&fdent_lock); }catch(exception& e){ S3FS_PRN_CRIT("failed to destroy mutex"); } is_lock_init = false; } } void FdEntity::Clear() { AutoLock auto_lock(&fdent_lock); AutoLock auto_data_lock(&fdent_data_lock); if(-1 != fd){ if(!cachepath.empty()){ CacheFileStat cfstat(path.c_str()); if(!pagelist.Serialize(cfstat, true)){ S3FS_PRN_WARN("failed to save cache stat file(%s).", path.c_str()); } } if(pfile){ fclose(pfile); pfile = NULL; } fd = -1; if(!mirrorpath.empty()){ if(-1 == unlink(mirrorpath.c_str())){ S3FS_PRN_WARN("failed to remove mirror cache file(%s) by errno(%d).", mirrorpath.c_str(), errno); } mirrorpath.erase(); } } pagelist.Init(0, false, false); refcnt = 0; path = ""; cachepath = ""; } void FdEntity::Close() { AutoLock auto_lock(&fdent_lock); S3FS_PRN_DBG("[path=%s][fd=%d][refcnt=%d]", path.c_str(), fd, (-1 != fd ? refcnt - 1 : refcnt)); if(-1 != fd){ if(0 < refcnt){ refcnt--; }else{ S3FS_PRN_EXIT("reference count underflow"); abort(); } if(0 == refcnt){ AutoLock auto_data_lock(&fdent_data_lock); if(!cachepath.empty()){ CacheFileStat cfstat(path.c_str()); if(!pagelist.Serialize(cfstat, true)){ S3FS_PRN_WARN("failed to save cache stat file(%s).", path.c_str()); } } if(pfile){ fclose(pfile); pfile = NULL; } fd = -1; if(!mirrorpath.empty()){ if(-1 == unlink(mirrorpath.c_str())){ S3FS_PRN_WARN("failed to remove mirror cache file(%s) by errno(%d).", mirrorpath.c_str(), errno); } mirrorpath.erase(); } } } } int FdEntity::Dup(bool lock_already_held) { AutoLock auto_lock(&fdent_lock, lock_already_held ? AutoLock::ALREADY_LOCKED : AutoLock::NONE); S3FS_PRN_DBG("[path=%s][fd=%d][refcnt=%d]", path.c_str(), fd, (-1 != fd ? refcnt + 1 : refcnt)); if(-1 != fd){ refcnt++; } return fd; } // // Open mirror file which is linked cache file. // int FdEntity::OpenMirrorFile() { if(cachepath.empty()){ S3FS_PRN_ERR("cache path is empty, why come here"); return -EIO; } // make temporary directory string bupdir; if(!FdManager::MakeCachePath(NULL, bupdir, true, true)){ S3FS_PRN_ERR("could not make bup cache directory path or create it."); return -EIO; } // create seed generating mirror file name unsigned int seed = static_cast(time(NULL)); int urandom_fd; if(-1 != (urandom_fd = open("/dev/urandom", O_RDONLY))){ unsigned int rand_data; if(sizeof(rand_data) == read(urandom_fd, &rand_data, sizeof(rand_data))){ seed ^= rand_data; } close(urandom_fd); } // try to link mirror file while(true){ // make random(temp) file path // (do not care for threading, because allowed any value returned.) // char szfile[NAME_MAX + 1]; sprintf(szfile, "%x.tmp", rand_r(&seed)); mirrorpath = bupdir + "/" + szfile; // link mirror file to cache file if(0 == link(cachepath.c_str(), mirrorpath.c_str())){ break; } if(EEXIST != errno){ S3FS_PRN_ERR("could not link mirror file(%s) to cache file(%s) by errno(%d).", mirrorpath.c_str(), cachepath.c_str(), errno); return -errno; } ++seed; } // open mirror file int mirrorfd; if(-1 == (mirrorfd = open(mirrorpath.c_str(), O_RDWR))){ S3FS_PRN_ERR("could not open mirror file(%s) by errno(%d).", mirrorpath.c_str(), errno); return -errno; } return mirrorfd; } int FdEntity::Open(headers_t* pmeta, off_t size, time_t time, bool no_fd_lock_wait) { AutoLock auto_lock(&fdent_lock, no_fd_lock_wait ? AutoLock::NO_WAIT : AutoLock::NONE); S3FS_PRN_DBG("[path=%s][fd=%d][size=%lld][time=%lld]", path.c_str(), fd, static_cast(size), static_cast(time)); if (!auto_lock.isLockAcquired()) { // had to wait for fd lock, return return -EIO; } S3FS_PRN_DBG("[path=%s][fd=%d][size=%lld][time=%lld]", path.c_str(), fd, static_cast(size), static_cast(time)); AutoLock auto_data_lock(&fdent_data_lock); if(-1 != fd){ // already opened, needs to increment refcnt. Dup(/*lock_already_held=*/ true); // check only file size(do not need to save cfs and time. if(0 <= size && pagelist.Size() != size){ // truncate temporary file size if(-1 == ftruncate(fd, size)){ S3FS_PRN_ERR("failed to truncate temporary file(%d) by errno(%d).", fd, errno); if(0 < refcnt){ refcnt--; } return -EIO; } // resize page list if(!pagelist.Resize(size, false, false)){ S3FS_PRN_ERR("failed to truncate temporary file information(%d).", fd); if(0 < refcnt){ refcnt--; } return -EIO; } } // set original headers and set size. off_t new_size = (0 <= size ? size : size_orgmeta); if(pmeta){ orgmeta = *pmeta; new_size = get_size(orgmeta); } if(new_size < size_orgmeta){ size_orgmeta = new_size; } return 0; } bool need_save_csf = false; // need to save(reset) cache stat file bool is_truncate = false; // need to truncate if(!cachepath.empty()){ // using cache struct stat st; if(stat(cachepath.c_str(), &st) == 0){ if(st.st_mtime < time){ S3FS_PRN_DBG("cache file stale, removing: %s", cachepath.c_str()); if(unlink(cachepath.c_str()) != 0){ return (0 == errno ? -EIO : -errno); } } } // open cache and cache stat file, load page info. CacheFileStat cfstat(path.c_str()); // try to open cache file if(-1 != (fd = open(cachepath.c_str(), O_RDWR)) && pagelist.Serialize(cfstat, false)){ // succeed to open cache file and to load stats data memset(&st, 0, sizeof(struct stat)); if(-1 == fstat(fd, &st)){ S3FS_PRN_ERR("fstat is failed. errno(%d)", errno); fd = -1; return (0 == errno ? -EIO : -errno); } // check size, st_size, loading stat file if(-1 == size){ if(st.st_size != pagelist.Size()){ pagelist.Resize(st.st_size, false, false); need_save_csf = true; // need to update page info } size = st.st_size; }else{ if(size != pagelist.Size()){ pagelist.Resize(size, false, false); need_save_csf = true; // need to update page info } if(size != st.st_size){ is_truncate = true; } } }else{ // could not open cache file or could not load stats data, so initialize it. if(-1 == (fd = open(cachepath.c_str(), O_CREAT|O_RDWR|O_TRUNC, 0600))){ S3FS_PRN_ERR("failed to open file(%s). errno(%d)", cachepath.c_str(), errno); return (0 == errno ? -EIO : -errno); } need_save_csf = true; // need to update page info if(-1 == size){ size = 0; pagelist.Init(0, false, false); }else{ pagelist.Resize(size, false, false); is_truncate = true; } } // open mirror file int mirrorfd; if(0 >= (mirrorfd = OpenMirrorFile())){ S3FS_PRN_ERR("failed to open mirror file linked cache file(%s).", cachepath.c_str()); return (0 == mirrorfd ? -EIO : mirrorfd); } // switch fd close(fd); fd = mirrorfd; // make file pointer(for being same tmpfile) if(NULL == (pfile = fdopen(fd, "wb"))){ S3FS_PRN_ERR("failed to get fileno(%s). errno(%d)", cachepath.c_str(), errno); close(fd); fd = -1; return (0 == errno ? -EIO : -errno); } }else{ // not using cache // open temporary file if(NULL == (pfile = tmpfile()) || -1 ==(fd = fileno(pfile))){ S3FS_PRN_ERR("failed to open tmp file. err(%d)", errno); if(pfile){ fclose(pfile); pfile = NULL; } return (0 == errno ? -EIO : -errno); } if(-1 == size){ size = 0; pagelist.Init(0, false, false); }else{ pagelist.Resize(size, false, false); is_truncate = true; } } // truncate cache(tmp) file if(is_truncate){ if(0 != ftruncate(fd, size) || 0 != fsync(fd)){ S3FS_PRN_ERR("ftruncate(%s) or fsync returned err(%d)", cachepath.c_str(), errno); fclose(pfile); pfile = NULL; fd = -1; return (0 == errno ? -EIO : -errno); } } // reset cache stat file if(need_save_csf){ CacheFileStat cfstat(path.c_str()); if(!pagelist.Serialize(cfstat, true)){ S3FS_PRN_WARN("failed to save cache stat file(%s), but continue...", path.c_str()); } } // init internal data refcnt = 1; // set original headers and size in it. if(pmeta){ orgmeta = *pmeta; size_orgmeta = get_size(orgmeta); }else{ orgmeta.clear(); size_orgmeta = 0; } // set mtime(set "x-amz-meta-mtime" in orgmeta) if(-1 != time){ if(0 != SetMtime(time, /*lock_already_held=*/ true)){ S3FS_PRN_ERR("failed to set mtime. errno(%d)", errno); fclose(pfile); pfile = NULL; fd = -1; return (0 == errno ? -EIO : -errno); } } return 0; } // [NOTE] // This method is called from only nocopyapi functions. // So we do not check disk space for this option mode, if there is no enough // disk space this method will be failed. // bool FdEntity::OpenAndLoadAll(headers_t* pmeta, off_t* size, bool force_load) { AutoLock auto_lock(&fdent_lock); int result; S3FS_PRN_INFO3("[path=%s][fd=%d]", path.c_str(), fd); if(-1 == fd){ if(0 != Open(pmeta)){ return false; } } AutoLock auto_data_lock(&fdent_data_lock); if(force_load){ SetAllStatusUnloaded(); } // // TODO: possibly do background for delay loading // if(0 != (result = Load(/*start=*/ 0, /*size=*/ 0, /*lock_already_held=*/ true))){ S3FS_PRN_ERR("could not download, result(%d)", result); return false; } if(size){ *size = pagelist.Size(); } return true; } // // Rename file path. // // This method sets the FdManager::fent map registration key to fentmapkey. // // [NOTE] // This method changes the file path of FdEntity. // Old file is deleted after linking to the new file path, and this works // without problem because the file descriptor is not affected even if the // cache file is open. // The mirror file descriptor is also the same. The mirror file path does // not need to be changed and will remain as it is. // bool FdEntity::RenamePath(const string& newpath, string& fentmapkey) { if(!cachepath.empty()){ // has cache path // make new cache path string newcachepath; if(!FdManager::MakeCachePath(newpath.c_str(), newcachepath, true)){ S3FS_PRN_ERR("failed to make cache path for object(%s).", newpath.c_str()); return false; } // link and unlink cache file if(-1 == link(cachepath.c_str(), newcachepath.c_str())){ S3FS_PRN_ERR("failed to link old cache path(%s) to new cache path(%s) by errno(%d).", cachepath.c_str(), newcachepath.c_str(), errno); return false; } if(-1 == unlink(cachepath.c_str())){ S3FS_PRN_ERR("failed to unlink old cache path(%s) by errno(%d).", cachepath.c_str(), errno); return false; } // link and unlink cache file stat if(!CacheFileStat::RenameCacheFileStat(path.c_str(), newpath.c_str())){ S3FS_PRN_ERR("failed to rename cache file stat(%s to %s).", path.c_str(), newpath.c_str()); return false; } fentmapkey = newpath; cachepath = newcachepath; }else{ // does not have cache path fentmapkey.erase(); FdManager::MakeRandomTempPath(newpath.c_str(), fentmapkey); } // set new path path = newpath; return true; } bool FdEntity::GetStats(struct stat& st, bool lock_already_held) { AutoLock auto_lock(&fdent_lock, lock_already_held ? AutoLock::ALREADY_LOCKED : AutoLock::NONE); if(-1 == fd){ return false; } memset(&st, 0, sizeof(struct stat)); if(-1 == fstat(fd, &st)){ S3FS_PRN_ERR("fstat failed. errno(%d)", errno); return false; } return true; } int FdEntity::SetCtime(time_t time) { if(-1 == time){ return 0; } AutoLock auto_lock(&fdent_lock); orgmeta["x-amz-meta-ctime"] = str(time); return 0; } int FdEntity::SetMtime(time_t time, bool lock_already_held) { AutoLock auto_lock(&fdent_lock, lock_already_held ? AutoLock::ALREADY_LOCKED : AutoLock::NONE); S3FS_PRN_INFO3("[path=%s][fd=%d][time=%lld]", path.c_str(), fd, static_cast(time)); if(-1 == time){ return 0; } if(-1 != fd){ struct timeval tv[2]; tv[0].tv_sec = time; tv[0].tv_usec= 0L; tv[1].tv_sec = tv[0].tv_sec; tv[1].tv_usec= 0L; if(-1 == futimes(fd, tv)){ S3FS_PRN_ERR("futimes failed. errno(%d)", errno); return -errno; } }else if(!cachepath.empty()){ // not opened file yet. struct utimbuf n_mtime; n_mtime.modtime = time; n_mtime.actime = time; if(-1 == utime(cachepath.c_str(), &n_mtime)){ S3FS_PRN_ERR("utime failed. errno(%d)", errno); return -errno; } } orgmeta["x-amz-meta-ctime"] = str(time); orgmeta["x-amz-meta-mtime"] = str(time); return 0; } bool FdEntity::UpdateCtime() { AutoLock auto_lock(&fdent_lock); struct stat st; if(!GetStats(st, /*lock_already_held=*/ true)){ return false; } orgmeta["x-amz-meta-ctime"] = str(st.st_ctime); return true; } bool FdEntity::UpdateMtime() { AutoLock auto_lock(&fdent_lock); struct stat st; if(!GetStats(st, /*lock_already_held=*/ true)){ return false; } orgmeta["x-amz-meta-ctime"] = str(st.st_ctime); orgmeta["x-amz-meta-mtime"] = str(st.st_mtime); return true; } bool FdEntity::GetSize(off_t& size) { AutoLock auto_lock(&fdent_lock); if(-1 == fd){ return false; } size = pagelist.Size(); return true; } bool FdEntity::SetMode(mode_t mode) { AutoLock auto_lock(&fdent_lock); orgmeta["x-amz-meta-mode"] = str(mode); return true; } bool FdEntity::SetUId(uid_t uid) { AutoLock auto_lock(&fdent_lock); orgmeta["x-amz-meta-uid"] = str(uid); return true; } bool FdEntity::SetGId(gid_t gid) { AutoLock auto_lock(&fdent_lock); orgmeta["x-amz-meta-gid"] = str(gid); return true; } bool FdEntity::SetContentType(const char* path) { if(!path){ return false; } AutoLock auto_lock(&fdent_lock); orgmeta["Content-Type"] = S3fsCurl::LookupMimeType(string(path)); return true; } bool FdEntity::SetAllStatus(bool is_loaded) { S3FS_PRN_INFO3("[path=%s][fd=%d][%s]", path.c_str(), fd, is_loaded ? "loaded" : "unloaded"); if(-1 == fd){ return false; } // [NOTE] // this method is only internal use, and calling after locking. // so do not lock now. // //AutoLock auto_lock(&fdent_lock); // get file size struct stat st; memset(&st, 0, sizeof(struct stat)); if(-1 == fstat(fd, &st)){ S3FS_PRN_ERR("fstat is failed. errno(%d)", errno); return false; } // Reinit pagelist.Init(st.st_size, is_loaded, false); return true; } int FdEntity::Load(off_t start, off_t size, bool lock_already_held) { AutoLock auto_lock(&fdent_lock, lock_already_held ? AutoLock::ALREADY_LOCKED : AutoLock::NONE); S3FS_PRN_DBG("[path=%s][fd=%d][offset=%lld][size=%lld]", path.c_str(), fd, static_cast(start), static_cast(size)); if(-1 == fd){ return -EBADF; } AutoLock auto_data_lock(&fdent_data_lock, lock_already_held ? AutoLock::ALREADY_LOCKED : AutoLock::NONE); int result = 0; // check loaded area & load fdpage_list_t unloaded_list; if(0 < pagelist.GetUnloadedPages(unloaded_list, start, size)){ for(fdpage_list_t::iterator iter = unloaded_list.begin(); iter != unloaded_list.end(); ++iter){ if(0 != size && start + size <= iter->offset){ // reached end break; } // check loading size off_t need_load_size = 0; if(iter->offset < size_orgmeta){ // original file size(on S3) is smaller than request. need_load_size = (iter->next() <= size_orgmeta ? iter->bytes : (size_orgmeta - iter->offset)); } // download if(S3fsCurl::GetMultipartSize() <= need_load_size && !nomultipart){ // parallel request result = S3fsCurl::ParallelGetObjectRequest(path.c_str(), fd, iter->offset, need_load_size); }else{ // single request if(0 < need_load_size){ S3fsCurl s3fscurl; result = s3fscurl.GetObjectRequest(path.c_str(), fd, iter->offset, need_load_size); }else{ result = 0; } } if(0 != result){ break; } // Set loaded flag pagelist.SetPageLoadedStatus(iter->offset, iter->bytes, PageList::PAGE_LOADED); } PageList::FreeList(unloaded_list); } return result; } // [NOTE] // At no disk space for caching object. // This method is downloading by dividing an object of the specified range // and uploading by multipart after finishing downloading it. // // [NOTICE] // Need to lock before calling this method. // int FdEntity::NoCacheLoadAndPost(off_t start, off_t size) { int result = 0; S3FS_PRN_INFO3("[path=%s][fd=%d][offset=%lld][size=%lld]", path.c_str(), fd, static_cast(start), static_cast(size)); if(-1 == fd){ return -EBADF; } // [NOTE] // This method calling means that the cache file is never used no more. // if(!cachepath.empty()){ // remove cache files(and cache stat file) FdManager::DeleteCacheFile(path.c_str()); // cache file path does not use no more. cachepath.erase(); mirrorpath.erase(); } // Change entity key in manager mapping FdManager::get()->ChangeEntityToTempPath(this, path.c_str()); // open temporary file FILE* ptmpfp; int tmpfd; if(NULL == (ptmpfp = tmpfile()) || -1 ==(tmpfd = fileno(ptmpfp))){ S3FS_PRN_ERR("failed to open tmp file. err(%d)", errno); if(ptmpfp){ fclose(ptmpfp); } return (0 == errno ? -EIO : -errno); } // loop uploading by multipart for(fdpage_list_t::iterator iter = pagelist.pages.begin(); iter != pagelist.pages.end(); ++iter){ if(iter->end() < start){ continue; } if(0 != size && start + size <= iter->offset){ break; } // download each multipart size(default 10MB) in unit for(off_t oneread = 0, totalread = (iter->offset < start ? start : 0); totalread < static_cast(iter->bytes); totalread += oneread){ int upload_fd = fd; off_t offset = iter->offset + totalread; oneread = min(static_cast(iter->bytes) - totalread, S3fsCurl::GetMultipartSize()); // check rest size is over minimum part size // // [NOTE] // If the final part size is smaller than 5MB, it is not allowed by S3 API. // For this case, if the previous part of the final part is not over 5GB, // we incorporate the final part to the previous part. If the previous part // is over 5GB, we want to even out the last part and the previous part. // if((iter->bytes - totalread - oneread) < MIN_MULTIPART_SIZE){ if(FIVE_GB < iter->bytes - totalread){ oneread = (iter->bytes - totalread) / 2; }else{ oneread = iter->bytes - totalread; } } if(!iter->loaded){ // // loading or initializing // upload_fd = tmpfd; // load offset & size size_t need_load_size = 0; if(size_orgmeta <= offset){ // all area is over of original size need_load_size = 0; }else{ if(size_orgmeta < (offset + oneread)){ // original file size(on S3) is smaller than request. need_load_size = size_orgmeta - offset; }else{ need_load_size = oneread; } } size_t over_size = oneread - need_load_size; // [NOTE] // truncate file to zero and set length to part offset + size // after this, file length is (offset + size), but file does not use any disk space. // if(-1 == ftruncate(tmpfd, 0) || -1 == ftruncate(tmpfd, (offset + oneread))){ S3FS_PRN_ERR("failed to truncate temporary file(%d).", tmpfd); result = -EIO; break; } // single area get request if(0 < need_load_size){ S3fsCurl s3fscurl; if(0 != (result = s3fscurl.GetObjectRequest(path.c_str(), tmpfd, offset, oneread))){ S3FS_PRN_ERR("failed to get object(start=%lld, size=%lld) for file(%d).", static_cast(offset), static_cast(oneread), tmpfd); break; } } // initialize fd without loading if(0 < over_size){ if(0 != (result = FdEntity::FillFile(tmpfd, 0, over_size, offset + need_load_size))){ S3FS_PRN_ERR("failed to fill rest bytes for fd(%d). errno(%d)", tmpfd, result); break; } } }else{ // already loaded area } // single area upload by multipart post if(0 != (result = NoCacheMultipartPost(upload_fd, offset, oneread))){ S3FS_PRN_ERR("failed to multipart post(start=%lld, size=%lld) for file(%d).", static_cast(offset), static_cast(oneread), upload_fd); break; } } if(0 != result){ break; } // set loaded flag if(!iter->loaded){ if(iter->offset < start){ fdpage page(iter->offset, start - iter->offset, iter->loaded, false); iter->bytes -= (start - iter->offset); iter->offset = start; pagelist.pages.insert(iter, page); } if(0 != size && start + size < iter->next()){ fdpage page(iter->offset, start + size - iter->offset, true, false); iter->bytes -= (start + size - iter->offset); iter->offset = start + size; pagelist.pages.insert(iter, page); }else{ iter->loaded = true; iter->modified = false; } } } if(0 == result){ // compress pagelist pagelist.Compress(); // fd data do empty if(-1 == ftruncate(fd, 0)){ S3FS_PRN_ERR("failed to truncate file(%d), but continue...", fd); } } // close temporary fclose(ptmpfp); return result; } // [NOTE] // At no disk space for caching object. // This method is starting multipart uploading. // int FdEntity::NoCachePreMultipartPost() { // initialize multipart upload values upload_id.erase(); etaglist.clear(); S3fsCurl s3fscurl(true); int result; if(0 != (result = s3fscurl.PreMultipartPostRequest(path.c_str(), orgmeta, upload_id, false))){ return result; } s3fscurl.DestroyCurlHandle(); return 0; } // [NOTE] // At no disk space for caching object. // This method is uploading one part of multipart. // int FdEntity::NoCacheMultipartPost(int tgfd, off_t start, off_t size) { if(-1 == tgfd || upload_id.empty()){ S3FS_PRN_ERR("Need to initialize for multipart post."); return -EIO; } S3fsCurl s3fscurl(true); return s3fscurl.MultipartUploadRequest(upload_id, path.c_str(), tgfd, start, size, etaglist); } // [NOTE] // At no disk space for caching object. // This method is finishing multipart uploading. // int FdEntity::NoCacheCompleteMultipartPost() { if(upload_id.empty() || etaglist.empty()){ S3FS_PRN_ERR("There is no upload id or etag list."); return -EIO; } S3fsCurl s3fscurl(true); int result; if(0 != (result = s3fscurl.CompleteMultipartPostRequest(path.c_str(), upload_id, etaglist))){ return result; } s3fscurl.DestroyCurlHandle(); // reset values upload_id.erase(); etaglist.clear(); mp_start = 0; mp_size = 0; return 0; } int FdEntity::RowFlush(const char* tpath, bool force_sync) { int result = 0; std::string tmppath; headers_t tmporgmeta; { AutoLock auto_lock(&fdent_lock); tmppath = path; tmporgmeta = orgmeta; } S3FS_PRN_INFO3("[tpath=%s][path=%s][fd=%d]", SAFESTRPTR(tpath), tmppath.c_str(), fd); if(-1 == fd){ return -EBADF; } AutoLock auto_lock(&fdent_data_lock); if(!force_sync && !pagelist.IsModified()){ // nothing to update. return 0; } // If there is no loading all of the area, loading all area. off_t restsize = pagelist.GetTotalUnloadedPageSize(); if(0 < restsize){ if(0 == upload_id.length()){ // check disk space if(ReserveDiskSpace(restsize)){ // enough disk space // Load all uninitialized area(no mix multipart uploading) if(!FdEntity::mixmultipart){ result = Load(/*start=*/ 0, /*size=*/ 0, /*lock_already_held=*/ true); } FdManager::FreeReservedDiskSpace(restsize); if(0 != result){ S3FS_PRN_ERR("failed to upload all area(errno=%d)", result); return static_cast(result); } }else{ // no enough disk space // upload all by multipart uploading if(0 != (result = NoCacheLoadAndPost())){ S3FS_PRN_ERR("failed to upload all area by multipart uploading(errno=%d)", result); return static_cast(result); } } }else{ // already start multipart uploading } } if(0 == upload_id.length()){ // normal uploading /* * Make decision to do multi upload (or not) based upon file size * * According to the AWS spec: * - 1 to 10,000 parts are allowed * - minimum size of parts is 5MB (expect for the last part) * * For our application, we will define minimum part size to be 10MB (10 * 2^20 Bytes) * minimum file size will be 64 GB - 2 ** 36 * * Initially uploads will be done serially * * If file is > 20MB, then multipart will kick in */ if(pagelist.Size() > MAX_MULTIPART_CNT * S3fsCurl::GetMultipartSize()){ // close f ? S3FS_PRN_ERR("Part count exceeds %d. Increase multipart size and try again.", MAX_MULTIPART_CNT); return -ENOTSUP; } // seek to head of file. if(0 != lseek(fd, 0, SEEK_SET)){ S3FS_PRN_ERR("lseek error(%d)", errno); return -errno; } // backup upload file size struct stat st; memset(&st, 0, sizeof(struct stat)); if(-1 == fstat(fd, &st)){ S3FS_PRN_ERR("fstat is failed by errno(%d), but continue...", errno); } if(pagelist.Size() >= S3fsCurl::GetMultipartSize() && !nomultipart){ if(FdEntity::mixmultipart){ // multipart uploading can use copy api // This is to ensure that each part is 5MB or more. // If the part is less than 5MB, download it. fdpage_list_t dlpages; if(!pagelist.GetLoadPageListForMultipartUpload(dlpages)){ S3FS_PRN_ERR("something error occurred during getting download pagelist."); return -1; } for(fdpage_list_t::const_iterator iter = dlpages.begin(); iter != dlpages.end(); ++iter){ if(0 != (result = Load(iter->offset, iter->bytes, true))){ S3FS_PRN_ERR("failed to get parts(start=%lld, size=%lld) before uploading.", static_cast(iter->offset), static_cast(iter->bytes)); return result; } } // multipart uploading with copy api result = S3fsCurl::ParallelMixMultipartUploadRequest(tpath ? tpath : tmppath.c_str(), tmporgmeta, fd, pagelist); }else{ // multipart uploading not using copy api result = S3fsCurl::ParallelMultipartUploadRequest(tpath ? tpath : tmppath.c_str(), tmporgmeta, fd); } }else{ // If there are unloaded pages, they are loaded at here. if(0 != (result = Load(/*start=*/ 0, /*size=*/ 0, /*lock_already_held=*/ true))){ S3FS_PRN_ERR("failed to load parts before uploading object(%d)", result); return result; } S3fsCurl s3fscurl(true); result = s3fscurl.PutRequest(tpath ? tpath : tmppath.c_str(), tmporgmeta, fd); } // seek to head of file. if(0 == result && 0 != lseek(fd, 0, SEEK_SET)){ S3FS_PRN_ERR("lseek error(%d)", errno); return -errno; } // reset uploaded file size size_orgmeta = st.st_size; }else{ // upload rest data if(0 < mp_size){ if(0 != (result = NoCacheMultipartPost(fd, mp_start, mp_size))){ S3FS_PRN_ERR("failed to multipart post(start=%lld, size=%lld) for file(%d).", static_cast(mp_start), static_cast(mp_size), fd); return result; } mp_start = 0; mp_size = 0; } // complete multipart uploading. if(0 != (result = NoCacheCompleteMultipartPost())){ S3FS_PRN_ERR("failed to complete(finish) multipart post for file(%d).", fd); return result; } // truncate file to zero if(-1 == ftruncate(fd, 0)){ // So the file has already been removed, skip error. S3FS_PRN_ERR("failed to truncate file(%d) to zero, but continue...", fd); } } if(0 == result){ pagelist.ClearAllModified(); } return result; } // [NOTICE] // Need to lock before calling this method. bool FdEntity::ReserveDiskSpace(off_t size) { if(FdManager::ReserveDiskSpace(size)){ return true; } if(!pagelist.IsModified()){ // try to clear all cache for this fd. pagelist.Init(pagelist.Size(), false, false); if(-1 == ftruncate(fd, 0) || -1 == ftruncate(fd, pagelist.Size())){ S3FS_PRN_ERR("failed to truncate temporary file(%d).", fd); return false; } if(FdManager::ReserveDiskSpace(size)){ return true; } } FdManager::get()->CleanupCacheDir(); return FdManager::ReserveDiskSpace(size); } ssize_t FdEntity::Read(char* bytes, off_t start, size_t size, bool force_load) { S3FS_PRN_DBG("[path=%s][fd=%d][offset=%lld][size=%zu]", path.c_str(), fd, static_cast(start), size); if(-1 == fd){ return -EBADF; } AutoLock auto_lock(&fdent_data_lock); if(force_load){ pagelist.SetPageLoadedStatus(start, size, PageList::PAGE_NOT_LOAD_MODIFIED); } ssize_t rsize; // check disk space if(0 < pagelist.GetTotalUnloadedPageSize(start, size)){ // load size(for prefetch) size_t load_size = size; if(start + static_cast(size) < pagelist.Size()){ ssize_t prefetch_max_size = max(static_cast(size), S3fsCurl::GetMultipartSize() * S3fsCurl::GetMaxParallelCount()); if(start + prefetch_max_size < pagelist.Size()){ load_size = prefetch_max_size; }else{ load_size = pagelist.Size() - start; } } if(!ReserveDiskSpace(load_size)){ S3FS_PRN_WARN("could not reserve disk space for pre-fetch download"); load_size = size; if(!ReserveDiskSpace(load_size)){ S3FS_PRN_ERR("could not reserve disk space for pre-fetch download"); return -ENOSPC; } } // Loading int result = 0; if(0 < size){ result = Load(start, load_size, /*lock_already_held=*/ true); } FdManager::FreeReservedDiskSpace(load_size); if(0 != result){ S3FS_PRN_ERR("could not download. start(%lld), size(%zu), errno(%d)", static_cast(start), size, result); return -EIO; } } // Reading if(-1 == (rsize = pread(fd, bytes, size, start))){ S3FS_PRN_ERR("pread failed. errno(%d)", errno); return -errno; } return rsize; } ssize_t FdEntity::Write(const char* bytes, off_t start, size_t size) { S3FS_PRN_DBG("[path=%s][fd=%d][offset=%lld][size=%zu]", path.c_str(), fd, static_cast(start), size); if(-1 == fd){ return -EBADF; } // check if not enough disk space left BEFORE locking fd if(FdManager::IsCacheDir() && !FdManager::IsSafeDiskSpace(NULL, size)){ FdManager::get()->CleanupCacheDir(); } AutoLock auto_lock(&fdent_data_lock); // check file size if(pagelist.Size() < start){ // grow file size if(-1 == ftruncate(fd, start)){ S3FS_PRN_ERR("failed to truncate temporary file(%d).", fd); return -EIO; } // add new area pagelist.SetPageLoadedStatus(pagelist.Size(), start - pagelist.Size(), PageList::PAGE_MODIFIED); } int result = 0; ssize_t wsize; if(0 == upload_id.length()){ // check disk space off_t restsize = pagelist.GetTotalUnloadedPageSize(0, start) + size; if(ReserveDiskSpace(restsize)){ // enough disk space // Load uninitialized area which starts from 0 to (start + size) before writing. if(!FdEntity::mixmultipart){ if(0 < start){ result = Load(0, start, /*lock_already_held=*/ true); } } FdManager::FreeReservedDiskSpace(restsize); if(0 != result){ S3FS_PRN_ERR("failed to load uninitialized area before writing(errno=%d)", result); return static_cast(result); } }else{ // no enough disk space if(0 != (result = NoCachePreMultipartPost())){ S3FS_PRN_ERR("failed to switch multipart uploading with no cache(errno=%d)", result); return static_cast(result); } // start multipart uploading if(0 != (result = NoCacheLoadAndPost(0, start))){ S3FS_PRN_ERR("failed to load uninitialized area and multipart uploading it(errno=%d)", result); return static_cast(result); } mp_start = start; mp_size = 0; } }else{ // already start multipart uploading } // Writing if(-1 == (wsize = pwrite(fd, bytes, size, start))){ S3FS_PRN_ERR("pwrite failed. errno(%d)", errno); return -errno; } if(0 < wsize){ pagelist.SetPageLoadedStatus(start, wsize, PageList::PAGE_LOAD_MODIFIED); } // Load uninitialized area which starts from (start + size) to EOF after writing. if(!FdEntity::mixmultipart){ if(pagelist.Size() > start + static_cast(size)){ result = Load(start + size, pagelist.Size(), /*lock_already_held=*/ true); if(0 != result){ S3FS_PRN_ERR("failed to load uninitialized area after writing(errno=%d)", result); return static_cast(result); } } } // check multipart uploading if(0 < upload_id.length()){ mp_size += wsize; if(S3fsCurl::GetMultipartSize() <= mp_size){ // over one multipart size if(0 != (result = NoCacheMultipartPost(fd, mp_start, mp_size))){ S3FS_PRN_ERR("failed to multipart post(start=%lld, size=%lld) for file(%d).", static_cast(mp_start), static_cast(mp_size), fd); return result; } // [NOTE] // truncate file to zero and set length to part offset + size // after this, file length is (offset + size), but file does not use any disk space. // if(-1 == ftruncate(fd, 0) || -1 == ftruncate(fd, (mp_start + mp_size))){ S3FS_PRN_ERR("failed to truncate file(%d).", fd); return -EIO; } mp_start += mp_size; mp_size = 0; } } return wsize; } //------------------------------------------------ // FdManager symbol //------------------------------------------------ // [NOTE] // NOCACHE_PATH_PREFIX symbol needs for not using cache mode. // Now s3fs I/F functions in s3fs.cpp has left the processing // to FdManager and FdEntity class. FdManager class manages // the list of local file stat and file descriptor in conjunction // with the FdEntity class. // When s3fs is not using local cache, it means FdManager must // return new temporary file descriptor at each opening it. // Then FdManager caches fd by key which is dummy file path // instead of real file path. // This process may not be complete, but it is easy way can // be realized. // #define NOCACHE_PATH_PREFIX_FORM " __S3FS_UNEXISTED_PATH_%lx__ / " // important space words for simply //------------------------------------------------ // FdManager class variable //------------------------------------------------ FdManager FdManager::singleton; pthread_mutex_t FdManager::fd_manager_lock; pthread_mutex_t FdManager::cache_cleanup_lock; pthread_mutex_t FdManager::reserved_diskspace_lock; bool FdManager::is_lock_init(false); string FdManager::cache_dir; bool FdManager::check_cache_dir_exist(false); off_t FdManager::free_disk_space = 0; //------------------------------------------------ // FdManager class methods //------------------------------------------------ bool FdManager::SetCacheDir(const char* dir) { if(!dir || '\0' == dir[0]){ cache_dir = ""; }else{ cache_dir = dir; } return true; } bool FdManager::DeleteCacheDirectory() { if(FdManager::cache_dir.empty()){ return true; } string cache_path; if(!FdManager::MakeCachePath(NULL, cache_path, false)){ return false; } if(!delete_files_in_dir(cache_path.c_str(), true)){ return false; } string mirror_path = FdManager::cache_dir + "/." + bucket + ".mirror"; if(!delete_files_in_dir(mirror_path.c_str(), true)){ return false; } return true; } int FdManager::DeleteCacheFile(const char* path) { S3FS_PRN_INFO3("[path=%s]", SAFESTRPTR(path)); if(!path){ return -EIO; } if(FdManager::cache_dir.empty()){ return 0; } string cache_path; if(!FdManager::MakeCachePath(path, cache_path, false)){ return 0; } int result = 0; if(0 != unlink(cache_path.c_str())){ if(ENOENT == errno){ S3FS_PRN_DBG("failed to delete file(%s): errno=%d", path, errno); }else{ S3FS_PRN_ERR("failed to delete file(%s): errno=%d", path, errno); } result = -errno; } if(!CacheFileStat::DeleteCacheFileStat(path)){ if(ENOENT == errno){ S3FS_PRN_DBG("failed to delete stat file(%s): errno=%d", path, errno); }else{ S3FS_PRN_ERR("failed to delete stat file(%s): errno=%d", path, errno); } if(0 != errno){ result = -errno; }else{ result = -EIO; } } return result; } bool FdManager::MakeCachePath(const char* path, string& cache_path, bool is_create_dir, bool is_mirror_path) { if(FdManager::cache_dir.empty()){ cache_path = ""; return true; } string resolved_path(FdManager::cache_dir); if(!is_mirror_path){ resolved_path += "/"; resolved_path += bucket; }else{ resolved_path += "/."; resolved_path += bucket; resolved_path += ".mirror"; } if(is_create_dir){ int result; if(0 != (result = mkdirp(resolved_path + mydirname(path), 0777))){ S3FS_PRN_ERR("failed to create dir(%s) by errno(%d).", path, result); return false; } } if(!path || '\0' == path[0]){ cache_path = resolved_path; }else{ cache_path = resolved_path + SAFESTRPTR(path); } return true; } bool FdManager::CheckCacheTopDir() { if(FdManager::cache_dir.empty()){ return true; } string toppath(FdManager::cache_dir + "/" + bucket); return check_exist_dir_permission(toppath.c_str()); } bool FdManager::MakeRandomTempPath(const char* path, string& tmppath) { char szBuff[64]; sprintf(szBuff, NOCACHE_PATH_PREFIX_FORM, random()); // worry for performance, but maybe don't worry. tmppath = szBuff; tmppath += path ? path : ""; return true; } bool FdManager::SetCheckCacheDirExist(bool is_check) { bool old = FdManager::check_cache_dir_exist; FdManager::check_cache_dir_exist = is_check; return old; } bool FdManager::CheckCacheDirExist() { if(!FdManager::check_cache_dir_exist){ return true; } if(FdManager::cache_dir.empty()){ return true; } // check the directory struct stat st; if(0 != stat(cache_dir.c_str(), &st)){ S3FS_PRN_ERR("could not access to cache directory(%s) by errno(%d).", cache_dir.c_str(), errno); return false; } if(!S_ISDIR(st.st_mode)){ S3FS_PRN_ERR("the cache directory(%s) is not directory.", cache_dir.c_str()); return false; } return true; } off_t FdManager::GetEnsureFreeDiskSpace() { AutoLock auto_lock(&FdManager::reserved_diskspace_lock); return FdManager::free_disk_space; } off_t FdManager::SetEnsureFreeDiskSpace(off_t size) { AutoLock auto_lock(&FdManager::reserved_diskspace_lock); off_t old = FdManager::free_disk_space; FdManager::free_disk_space = size; return old; } off_t FdManager::GetFreeDiskSpace(const char* path) { struct statvfs vfsbuf; string ctoppath; if(!FdManager::cache_dir.empty()){ ctoppath = FdManager::cache_dir + "/"; ctoppath = get_exist_directory_path(ctoppath); // existed directory if(ctoppath != "/"){ ctoppath += "/"; } }else{ ctoppath = TMPFILE_DIR_0PATH "/"; } if(path && '\0' != *path){ ctoppath += path; }else{ ctoppath += "."; } if(-1 == statvfs(ctoppath.c_str(), &vfsbuf)){ S3FS_PRN_ERR("could not get vfs stat by errno(%d)", errno); return 0; } return (vfsbuf.f_bavail * vfsbuf.f_frsize); } bool FdManager::IsSafeDiskSpace(const char* path, off_t size) { off_t fsize = FdManager::GetFreeDiskSpace(path); return size + FdManager::GetEnsureFreeDiskSpace() <= fsize; } //------------------------------------------------ // FdManager methods //------------------------------------------------ FdManager::FdManager() { if(this == FdManager::get()){ pthread_mutexattr_t attr; pthread_mutexattr_init(&attr); #if S3FS_PTHREAD_ERRORCHECK pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK); #endif try{ pthread_mutex_init(&FdManager::fd_manager_lock, &attr); pthread_mutex_init(&FdManager::cache_cleanup_lock, &attr); pthread_mutex_init(&FdManager::reserved_diskspace_lock, &attr); FdManager::is_lock_init = true; }catch(exception& e){ FdManager::is_lock_init = false; S3FS_PRN_CRIT("failed to init mutex"); } }else{ abort(); } } FdManager::~FdManager() { if(this == FdManager::get()){ for(fdent_map_t::iterator iter = fent.begin(); fent.end() != iter; ++iter){ FdEntity* ent = (*iter).second; delete ent; } fent.clear(); if(FdManager::is_lock_init){ try{ pthread_mutex_destroy(&FdManager::fd_manager_lock); pthread_mutex_destroy(&FdManager::cache_cleanup_lock); pthread_mutex_destroy(&FdManager::reserved_diskspace_lock); }catch(exception& e){ S3FS_PRN_CRIT("failed to init mutex"); } FdManager::is_lock_init = false; } }else{ abort(); } } FdEntity* FdManager::GetFdEntity(const char* path, int existfd) { S3FS_PRN_INFO3("[path=%s][fd=%d]", SAFESTRPTR(path), existfd); if(!path || '\0' == path[0]){ return NULL; } AutoLock auto_lock(&FdManager::fd_manager_lock); fdent_map_t::iterator iter = fent.find(string(path)); if(fent.end() != iter && (-1 == existfd || (*iter).second->GetFd() == existfd)){ iter->second->Dup(); return (*iter).second; } if(-1 != existfd){ for(iter = fent.begin(); iter != fent.end(); ++iter){ if((*iter).second && (*iter).second->GetFd() == existfd){ // found opened fd in map if(0 == strcmp((*iter).second->GetPath(), path)){ iter->second->Dup(); return (*iter).second; } // found fd, but it is used another file(file descriptor is recycled) // so returns NULL. break; } } } return NULL; } FdEntity* FdManager::Open(const char* path, headers_t* pmeta, off_t size, time_t time, bool force_tmpfile, bool is_create, bool no_fd_lock_wait) { S3FS_PRN_DBG("[path=%s][size=%lld][time=%lld]", SAFESTRPTR(path), static_cast(size), static_cast(time)); if(!path || '\0' == path[0]){ return NULL; } bool close = false; FdEntity* ent; AutoLock auto_lock(&FdManager::fd_manager_lock); // search in mapping by key(path) fdent_map_t::iterator iter = fent.find(string(path)); if(fent.end() == iter && !force_tmpfile && !FdManager::IsCacheDir()){ // If the cache directory is not specified, s3fs opens a temporary file // when the file is opened. // Then if it could not find a entity in map for the file, s3fs should // search a entity in all which opened the temporary file. // for(iter = fent.begin(); iter != fent.end(); ++iter){ if((*iter).second && (*iter).second->IsOpen() && 0 == strcmp((*iter).second->GetPath(), path)){ break; // found opened fd in mapping } } } if(fent.end() != iter){ // found ent = (*iter).second; ent->Dup(); if(ent->IsModified()){ // If the file is being modified, it will not be resized. size = -1; } close = true; }else if(is_create){ // not found string cache_path; if(!force_tmpfile && !FdManager::MakeCachePath(path, cache_path, true)){ S3FS_PRN_ERR("failed to make cache path for object(%s).", path); return NULL; } // make new obj ent = new FdEntity(path, cache_path.c_str()); if(!cache_path.empty()){ // using cache fent[string(path)] = ent; }else{ // not using cache, so the key of fdentity is set not really existing path. // (but not strictly unexisting path.) // // [NOTE] // The reason why this process here, please look at the definition of the // comments of NOCACHE_PATH_PREFIX_FORM symbol. // string tmppath; FdManager::MakeRandomTempPath(path, tmppath); fent[tmppath] = ent; } }else{ return NULL; } // open if(0 != ent->Open(pmeta, size, time, no_fd_lock_wait)){ if(close){ ent->Close(); } return NULL; } if(close){ ent->Close(); } return ent; } FdEntity* FdManager::ExistOpen(const char* path, int existfd, bool ignore_existfd) { S3FS_PRN_DBG("[path=%s][fd=%d][ignore_existfd=%s]", SAFESTRPTR(path), existfd, ignore_existfd ? "true" : "false"); // search by real path FdEntity* ent = Open(path, NULL, -1, -1, false, false); if(!ent && (ignore_existfd || (-1 != existfd))){ // search from all fdentity because of not using cache. AutoLock auto_lock(&FdManager::fd_manager_lock); for(fdent_map_t::iterator iter = fent.begin(); iter != fent.end(); ++iter){ if((*iter).second && (*iter).second->IsOpen() && (ignore_existfd || ((*iter).second->GetFd() == existfd))){ // found opened fd in map if(0 == strcmp((*iter).second->GetPath(), path)){ ent = (*iter).second; ent->Dup(); }else{ // found fd, but it is used another file(file descriptor is recycled) // so returns NULL. } break; } } } return ent; } void FdManager::Rename(const std::string &from, const std::string &to) { AutoLock auto_lock(&FdManager::fd_manager_lock); fdent_map_t::iterator iter = fent.find(from); if(fent.end() == iter && !FdManager::IsCacheDir()){ // If the cache directory is not specified, s3fs opens a temporary file // when the file is opened. // Then if it could not find a entity in map for the file, s3fs should // search a entity in all which opened the temporary file. // for(iter = fent.begin(); iter != fent.end(); ++iter){ if((*iter).second && (*iter).second->IsOpen() && 0 == strcmp((*iter).second->GetPath(), from.c_str())){ break; // found opened fd in mapping } } } if(fent.end() != iter){ // found S3FS_PRN_DBG("[from=%s][to=%s]", from.c_str(), to.c_str()); FdEntity* ent = (*iter).second; // retrieve old fd entity from map fent.erase(iter); // rename path and caches in fd entity string fentmapkey; if(!ent->RenamePath(to, fentmapkey)){ S3FS_PRN_ERR("Failed to rename FdEntity obejct for %s to %s", from.c_str(), to.c_str()); return; } // set new fd entity to map fent[fentmapkey] = ent; } } bool FdManager::Close(FdEntity* ent) { S3FS_PRN_DBG("[ent->file=%s][ent->fd=%d]", ent ? ent->GetPath() : "", ent ? ent->GetFd() : -1); if(!ent){ return true; // returns success } AutoLock auto_lock(&FdManager::fd_manager_lock); for(fdent_map_t::iterator iter = fent.begin(); iter != fent.end(); ++iter){ if((*iter).second == ent){ ent->Close(); if(!ent->IsOpen()){ // remove found entity from map. fent.erase(iter++); // check another key name for entity value to be on the safe side for(; iter != fent.end(); ){ if((*iter).second == ent){ fent.erase(iter++); }else{ ++iter; } } delete ent; } return true; } } return false; } bool FdManager::ChangeEntityToTempPath(FdEntity* ent, const char* path) { AutoLock auto_lock(&FdManager::fd_manager_lock); for(fdent_map_t::iterator iter = fent.begin(); iter != fent.end(); ){ if((*iter).second == ent){ fent.erase(iter++); string tmppath; FdManager::MakeRandomTempPath(path, tmppath); fent[tmppath] = ent; }else{ ++iter; } } return false; } void FdManager::CleanupCacheDir() { //S3FS_PRN_DBG("cache cleanup requested"); if(!FdManager::IsCacheDir()){ return; } AutoLock auto_lock_no_wait(&FdManager::cache_cleanup_lock, AutoLock::NO_WAIT); if(auto_lock_no_wait.isLockAcquired()){ //S3FS_PRN_DBG("cache cleanup started"); CleanupCacheDirInternal(""); //S3FS_PRN_DBG("cache cleanup ended"); }else{ // wait for other thread to finish cache cleanup AutoLock auto_lock(&FdManager::cache_cleanup_lock); } } void FdManager::CleanupCacheDirInternal(const std::string &path) { DIR* dp; struct dirent* dent; std::string abs_path = cache_dir + "/" + bucket + path; if(NULL == (dp = opendir(abs_path.c_str()))){ S3FS_PRN_ERR("could not open cache dir(%s) - errno(%d)", abs_path.c_str(), errno); return; } for(dent = readdir(dp); dent; dent = readdir(dp)){ if(0 == strcmp(dent->d_name, "..") || 0 == strcmp(dent->d_name, ".")){ continue; } string fullpath = abs_path; fullpath += "/"; fullpath += dent->d_name; struct stat st; if(0 != lstat(fullpath.c_str(), &st)){ S3FS_PRN_ERR("could not get stats of file(%s) - errno(%d)", fullpath.c_str(), errno); closedir(dp); return; } string next_path = path + "/" + dent->d_name; if(S_ISDIR(st.st_mode)){ CleanupCacheDirInternal(next_path); }else{ AutoLock auto_lock(&FdManager::fd_manager_lock, AutoLock::NO_WAIT); if (!auto_lock.isLockAcquired()) { S3FS_PRN_ERR("could not get fd_manager_lock when clean up file(%s)", next_path.c_str()); continue; } fdent_map_t::iterator iter = fent.find(next_path); if(fent.end() == iter) { S3FS_PRN_DBG("cleaned up: %s", next_path.c_str()); FdManager::DeleteCacheFile(next_path.c_str()); } } } closedir(dp); } bool FdManager::ReserveDiskSpace(off_t size) { if(IsSafeDiskSpace(NULL, size)){ AutoLock auto_lock(&FdManager::reserved_diskspace_lock); free_disk_space += size; return true; } return false; } void FdManager::FreeReservedDiskSpace(off_t size) { AutoLock auto_lock(&FdManager::reserved_diskspace_lock); free_disk_space -= size; } /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ s3fs-fuse-1.86/src/fdcache.h000066400000000000000000000240561361654130000156110ustar00rootroot00000000000000/* * s3fs - FUSE-based file system backed by Amazon S3 * * Copyright(C) 2007 Randy Rizun * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #ifndef FD_CACHE_H_ #define FD_CACHE_H_ #include #include "curl.h" //------------------------------------------------ // CacheFileStat //------------------------------------------------ class CacheFileStat { private: std::string path; int fd; private: static bool MakeCacheFileStatPath(const char* path, std::string& sfile_path, bool is_create_dir = true); public: static bool DeleteCacheFileStat(const char* path); static bool CheckCacheFileStatTopDir(void); static bool DeleteCacheFileStatDirectory(void); static bool RenameCacheFileStat(const char* oldpath, const char* newpath); explicit CacheFileStat(const char* tpath = NULL); ~CacheFileStat(); bool Open(void); bool Release(void); bool SetPath(const char* tpath, bool is_open = true); int GetFd(void) const { return fd; } }; //------------------------------------------------ // fdpage & PageList //------------------------------------------------ // page block information struct fdpage { off_t offset; off_t bytes; bool loaded; bool modified; fdpage(off_t start = 0, off_t size = 0, bool is_loaded = false, bool is_modified = false) : offset(start), bytes(size), loaded(is_loaded), modified(is_modified) {} off_t next(void) const { return (offset + bytes); } off_t end(void) const { return (0 < bytes ? offset + bytes - 1 : 0); } }; typedef std::list fdpage_list_t; class FdEntity; // // Management of loading area/modifying // // cppcheck-suppress copyCtorAndEqOperator class PageList { friend class FdEntity; // only one method access directly pages. private: fdpage_list_t pages; public: enum page_status{ PAGE_NOT_LOAD_MODIFIED = 0, PAGE_LOADED, PAGE_MODIFIED, PAGE_LOAD_MODIFIED }; private: void Clear(void); bool Compress(bool force_modified = false); bool Parse(off_t new_pos); bool RawGetUnloadPageList(fdpage_list_t& dlpages, off_t offset, off_t size); public: static void FreeList(fdpage_list_t& list); explicit PageList(off_t size = 0, bool is_loaded = false, bool is_modified = false); explicit PageList(const PageList& other); ~PageList(); bool Init(off_t size, bool is_loaded, bool is_modified); off_t Size(void) const; bool Resize(off_t size, bool is_loaded, bool is_modified); bool IsPageLoaded(off_t start = 0, off_t size = 0) const; // size=0 is checking to end of list bool SetPageLoadedStatus(off_t start, off_t size, PageList::page_status pstatus = PAGE_LOADED, bool is_compress = true); bool FindUnloadedPage(off_t start, off_t& resstart, off_t& ressize) const; off_t GetTotalUnloadedPageSize(off_t start = 0, off_t size = 0) const; // size=0 is checking to end of list int GetUnloadedPages(fdpage_list_t& unloaded_list, off_t start = 0, off_t size = 0) const; // size=0 is checking to end of list bool GetLoadPageListForMultipartUpload(fdpage_list_t& dlpages); bool GetMultipartSizeList(fdpage_list_t& mplist, off_t partsize) const; bool IsModified(void) const; bool ClearAllModified(void); bool Serialize(CacheFileStat& file, bool is_output); void Dump(void); }; //------------------------------------------------ // class FdEntity //------------------------------------------------ class FdEntity { private: static bool mixmultipart; // whether multipart uploading can use copy api. pthread_mutex_t fdent_lock; bool is_lock_init; int refcnt; // reference count std::string path; // object path int fd; // file descriptor(tmp file or cache file) FILE* pfile; // file pointer(tmp file or cache file) headers_t orgmeta; // original headers at opening off_t size_orgmeta; // original file size in original headers pthread_mutex_t fdent_data_lock;// protects the following members PageList pagelist; std::string upload_id; // for no cached multipart uploading when no disk space etaglist_t etaglist; // for no cached multipart uploading when no disk space off_t mp_start; // start position for no cached multipart(write method only) off_t mp_size; // size for no cached multipart(write method only) std::string cachepath; // local cache file path // (if this is empty, does not load/save pagelist.) std::string mirrorpath; // mirror file path to local cache file path private: static int FillFile(int fd, unsigned char byte, off_t size, off_t start); void Clear(void); int OpenMirrorFile(void); bool SetAllStatus(bool is_loaded); // [NOTE] not locking //bool SetAllStatusLoaded(void) { return SetAllStatus(true); } bool SetAllStatusUnloaded(void) { return SetAllStatus(false); } public: static bool SetNoMixMultipart(void); explicit FdEntity(const char* tpath = NULL, const char* cpath = NULL); ~FdEntity(); void Close(void); bool IsOpen(void) const { return (-1 != fd); } int Open(headers_t* pmeta = NULL, off_t size = -1, time_t time = -1, bool no_fd_lock_wait = false); bool OpenAndLoadAll(headers_t* pmeta = NULL, off_t* size = NULL, bool force_load = false); int Dup(bool lock_already_held = false); const char* GetPath(void) const { return path.c_str(); } bool RenamePath(const std::string& newpath, std::string& fentmapkey); int GetFd(void) const { return fd; } bool IsModified(void) const { return pagelist.IsModified(); } bool GetStats(struct stat& st, bool lock_already_held = false); int SetCtime(time_t time); int SetMtime(time_t time, bool lock_already_held = false); bool UpdateCtime(void); bool UpdateMtime(void); bool GetSize(off_t& size); bool SetMode(mode_t mode); bool SetUId(uid_t uid); bool SetGId(gid_t gid); bool SetContentType(const char* path); int Load(off_t start = 0, off_t size = 0, bool lock_already_held = false); // size=0 means loading to end int NoCacheLoadAndPost(off_t start = 0, off_t size = 0); // size=0 means loading to end int NoCachePreMultipartPost(void); int NoCacheMultipartPost(int tgfd, off_t start, off_t size); int NoCacheCompleteMultipartPost(void); int RowFlush(const char* tpath, bool force_sync = false); int Flush(bool force_sync = false) { return RowFlush(NULL, force_sync); } ssize_t Read(char* bytes, off_t start, size_t size, bool force_load = false); ssize_t Write(const char* bytes, off_t start, size_t size); bool ReserveDiskSpace(off_t size); }; typedef std::map fdent_map_t; // key=path, value=FdEntity* //------------------------------------------------ // class FdManager //------------------------------------------------ class FdManager { private: static FdManager singleton; static pthread_mutex_t fd_manager_lock; static pthread_mutex_t cache_cleanup_lock; static pthread_mutex_t reserved_diskspace_lock; static bool is_lock_init; static std::string cache_dir; static bool check_cache_dir_exist; static off_t free_disk_space; // limit free disk space fdent_map_t fent; private: static off_t GetFreeDiskSpace(const char* path); void CleanupCacheDirInternal(const std::string &path = ""); public: FdManager(); ~FdManager(); // Reference singleton static FdManager* get(void) { return &singleton; } static bool DeleteCacheDirectory(void); static int DeleteCacheFile(const char* path); static bool SetCacheDir(const char* dir); static bool IsCacheDir(void) { return (0 < FdManager::cache_dir.size()); } static const char* GetCacheDir(void) { return FdManager::cache_dir.c_str(); } static bool MakeCachePath(const char* path, std::string& cache_path, bool is_create_dir = true, bool is_mirror_path = false); static bool CheckCacheTopDir(void); static bool MakeRandomTempPath(const char* path, std::string& tmppath); static bool SetCheckCacheDirExist(bool is_check); static bool CheckCacheDirExist(void); static off_t GetEnsureFreeDiskSpace(); static off_t SetEnsureFreeDiskSpace(off_t size); static bool IsSafeDiskSpace(const char* path, off_t size); static void FreeReservedDiskSpace(off_t size); static bool ReserveDiskSpace(off_t size); // Return FdEntity associated with path, returning NULL on error. This operation increments the reference count; callers must decrement via Close after use. FdEntity* GetFdEntity(const char* path, int existfd = -1); FdEntity* Open(const char* path, headers_t* pmeta = NULL, off_t size = -1, time_t time = -1, bool force_tmpfile = false, bool is_create = true, bool no_fd_lock_wait = false); FdEntity* ExistOpen(const char* path, int existfd = -1, bool ignore_existfd = false); void Rename(const std::string &from, const std::string &to); bool Close(FdEntity* ent); bool ChangeEntityToTempPath(FdEntity* ent, const char* path); void CleanupCacheDir(); }; #endif // FD_CACHE_H_ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ s3fs-fuse-1.86/src/gnutls_auth.cpp000066400000000000000000000246771361654130000171350ustar00rootroot00000000000000/* * s3fs - FUSE-based file system backed by Amazon S3 * * Copyright(C) 2007 Randy Rizun * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include #include #include #include #include #include #include #include #include #include #include #include #ifdef USE_GNUTLS_NETTLE #include #include #include #endif #include #include #include "common.h" #include "s3fs_auth.h" using namespace std; //------------------------------------------------------------------- // Utility Function for version //------------------------------------------------------------------- #ifdef USE_GNUTLS_NETTLE const char* s3fs_crypt_lib_name(void) { static const char version[] = "GnuTLS(nettle)"; return version; } #else // USE_GNUTLS_NETTLE const char* s3fs_crypt_lib_name() { static const char version[] = "GnuTLS(gcrypt)"; return version; } #endif // USE_GNUTLS_NETTLE //------------------------------------------------------------------- // Utility Function for global init //------------------------------------------------------------------- bool s3fs_init_global_ssl() { if(GNUTLS_E_SUCCESS != gnutls_global_init()){ return false; } #ifndef USE_GNUTLS_NETTLE if(NULL == gcry_check_version(NULL)){ return false; } #endif // USE_GNUTLS_NETTLE return true; } bool s3fs_destroy_global_ssl() { gnutls_global_deinit(); return true; } //------------------------------------------------------------------- // Utility Function for crypt lock //------------------------------------------------------------------- bool s3fs_init_crypt_mutex() { return true; } bool s3fs_destroy_crypt_mutex() { return true; } //------------------------------------------------------------------- // Utility Function for HMAC //------------------------------------------------------------------- #ifdef USE_GNUTLS_NETTLE bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen) { if(!key || !data || !digest || !digestlen){ return false; } *digest = new unsigned char[SHA1_DIGEST_SIZE]; struct hmac_sha1_ctx ctx_hmac; hmac_sha1_set_key(&ctx_hmac, keylen, reinterpret_cast(key)); hmac_sha1_update(&ctx_hmac, datalen, reinterpret_cast(data)); hmac_sha1_digest(&ctx_hmac, SHA1_DIGEST_SIZE, reinterpret_cast(*digest)); *digestlen = SHA1_DIGEST_SIZE; return true; } bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen) { if(!key || !data || !digest || !digestlen){ return false; } *digest = new unsigned char[SHA256_DIGEST_SIZE]; struct hmac_sha256_ctx ctx_hmac; hmac_sha256_set_key(&ctx_hmac, keylen, reinterpret_cast(key)); hmac_sha256_update(&ctx_hmac, datalen, reinterpret_cast(data)); hmac_sha256_digest(&ctx_hmac, SHA256_DIGEST_SIZE, reinterpret_cast(*digest)); *digestlen = SHA256_DIGEST_SIZE; return true; } #else // USE_GNUTLS_NETTLE bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen) { if(!key || !data || !digest || !digestlen){ return false; } if(0 == (*digestlen = gnutls_hmac_get_len(GNUTLS_MAC_SHA1))){ return false; } *digest = new unsigned char[*digestlen + 1]; if(0 > gnutls_hmac_fast(GNUTLS_MAC_SHA1, key, keylen, data, datalen, *digest)){ delete[] *digest; *digest = NULL; return false; } return true; } bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen) { if(!key || !data || !digest || !digestlen){ return false; } if(0 == (*digestlen = gnutls_hmac_get_len(GNUTLS_MAC_SHA256))){ return false; } *digest = new unsigned char[*digestlen + 1]; if(0 > gnutls_hmac_fast(GNUTLS_MAC_SHA256, key, keylen, data, datalen, *digest)){ delete[] *digest; *digest = NULL; return false; } return true; } #endif // USE_GNUTLS_NETTLE //------------------------------------------------------------------- // Utility Function for MD5 //------------------------------------------------------------------- size_t get_md5_digest_length() { return 16; } #ifdef USE_GNUTLS_NETTLE unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size) { struct md5_ctx ctx_md5; unsigned char buf[512]; ssize_t bytes; unsigned char* result; memset(buf, 0, 512); md5_init(&ctx_md5); for(ssize_t total = 0; total < size; total += bytes){ bytes = 512 < (size - total) ? 512 : (size - total); bytes = pread(fd, buf, bytes, start + total); if(0 == bytes){ // end of file break; }else if(-1 == bytes){ // error S3FS_PRN_ERR("file read error(%d)", errno); return NULL; } md5_update(&ctx_md5, bytes, buf); memset(buf, 0, 512); } result = new unsigned char[get_md5_digest_length()]; md5_digest(&ctx_md5, get_md5_digest_length(), result); return result; } #else // USE_GNUTLS_NETTLE unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size) { gcry_md_hd_t ctx_md5; gcry_error_t err; char buf[512]; ssize_t bytes; unsigned char* result; if(-1 == size){ struct stat st; if(-1 == fstat(fd, &st)){ return NULL; } size = static_cast(st.st_size); } memset(buf, 0, 512); if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_md5, GCRY_MD_MD5, 0))){ S3FS_PRN_ERR("MD5 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err)); return NULL; } for(ssize_t total = 0; total < size; total += bytes){ bytes = 512 < (size - total) ? 512 : (size - total); bytes = pread(fd, buf, bytes, start + total); if(0 == bytes){ // end of file break; }else if(-1 == bytes){ // error S3FS_PRN_ERR("file read error(%d)", errno); gcry_md_close(ctx_md5); return NULL; } gcry_md_write(ctx_md5, buf, bytes); memset(buf, 0, 512); } result = new unsigned char[get_md5_digest_length()]; memcpy(result, gcry_md_read(ctx_md5, 0), get_md5_digest_length()); gcry_md_close(ctx_md5); return result; } #endif // USE_GNUTLS_NETTLE //------------------------------------------------------------------- // Utility Function for SHA256 //------------------------------------------------------------------- size_t get_sha256_digest_length() { return 32; } #ifdef USE_GNUTLS_NETTLE bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char** digest, unsigned int* digestlen) { (*digestlen) = static_cast(get_sha256_digest_length()); *digest = new unsigned char[*digestlen]; struct sha256_ctx ctx_sha256; sha256_init(&ctx_sha256); sha256_update(&ctx_sha256, datalen, data); sha256_digest(&ctx_sha256, *digestlen, *digest); return true; } unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size) { struct sha256_ctx ctx_sha256; unsigned char buf[512]; ssize_t bytes; unsigned char* result; memset(buf, 0, 512); sha256_init(&ctx_sha256); for(ssize_t total = 0; total < size; total += bytes){ bytes = 512 < (size - total) ? 512 : (size - total); bytes = pread(fd, buf, bytes, start + total); if(0 == bytes){ // end of file break; }else if(-1 == bytes){ // error S3FS_PRN_ERR("file read error(%d)", errno); return NULL; } sha256_update(&ctx_sha256, bytes, buf); memset(buf, 0, 512); } result = new unsigned char[get_sha256_digest_length()]; sha256_digest(&ctx_sha256, get_sha256_digest_length(), result); return result; } #else // USE_GNUTLS_NETTLE bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char** digest, unsigned int* digestlen) { size_t len = (*digestlen) = static_cast(get_sha256_digest_length()); *digest = new unsigned char[len]; gcry_md_hd_t ctx_sha256; gcry_error_t err; if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_sha256, GCRY_MD_SHA256, 0))){ S3FS_PRN_ERR("SHA256 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err)); delete[] *digest; return false; } gcry_md_write(ctx_sha256, data, datalen); memcpy(*digest, gcry_md_read(ctx_sha256, 0), *digestlen); gcry_md_close(ctx_sha256); return true; } unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size) { gcry_md_hd_t ctx_sha256; gcry_error_t err; char buf[512]; ssize_t bytes; unsigned char* result; if(-1 == size){ struct stat st; if(-1 == fstat(fd, &st)){ return NULL; } size = static_cast(st.st_size); } memset(buf, 0, 512); if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_sha256, GCRY_MD_SHA256, 0))){ S3FS_PRN_ERR("SHA256 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err)); return NULL; } for(ssize_t total = 0; total < size; total += bytes){ bytes = 512 < (size - total) ? 512 : (size - total); bytes = pread(fd, buf, bytes, start + total); if(0 == bytes){ // end of file break; }else if(-1 == bytes){ // error S3FS_PRN_ERR("file read error(%d)", errno); gcry_md_close(ctx_sha256); return NULL; } gcry_md_write(ctx_sha256, buf, bytes); memset(buf, 0, 512); } result = new unsigned char[get_sha256_digest_length()]; memcpy(result, gcry_md_read(ctx_sha256, 0), get_sha256_digest_length()); gcry_md_close(ctx_sha256); return result; } #endif // USE_GNUTLS_NETTLE /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ s3fs-fuse-1.86/src/nss_auth.cpp000066400000000000000000000167341361654130000164170ustar00rootroot00000000000000/* * s3fs - FUSE-based file system backed by Amazon S3 * * Copyright(C) 2007 Randy Rizun * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "common.h" #include "s3fs_auth.h" using namespace std; //------------------------------------------------------------------- // Utility Function for version //------------------------------------------------------------------- const char* s3fs_crypt_lib_name() { static const char version[] = "NSS"; return version; } //------------------------------------------------------------------- // Utility Function for global init //------------------------------------------------------------------- bool s3fs_init_global_ssl() { PR_Init(PR_USER_THREAD, PR_PRIORITY_NORMAL, 0); if(SECSuccess != NSS_NoDB_Init(NULL)){ S3FS_PRN_ERR("Failed NSS_NoDB_Init call."); return false; } return true; } bool s3fs_destroy_global_ssl() { NSS_Shutdown(); PL_ArenaFinish(); PR_Cleanup(); return true; } //------------------------------------------------------------------- // Utility Function for crypt lock //------------------------------------------------------------------- bool s3fs_init_crypt_mutex() { return true; } bool s3fs_destroy_crypt_mutex() { return true; } //------------------------------------------------------------------- // Utility Function for HMAC //------------------------------------------------------------------- static bool s3fs_HMAC_RAW(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen, bool is_sha256) { if(!key || !data || !digest || !digestlen){ return false; } PK11SlotInfo* Slot; PK11SymKey* pKey; PK11Context* Context; unsigned char tmpdigest[64]; SECItem KeySecItem = {siBuffer, reinterpret_cast(const_cast(key)), static_cast(keylen)}; SECItem NullSecItem = {siBuffer, NULL, 0}; if(NULL == (Slot = PK11_GetInternalKeySlot())){ return false; } if(NULL == (pKey = PK11_ImportSymKey(Slot, (is_sha256 ? CKM_SHA256_HMAC : CKM_SHA_1_HMAC), PK11_OriginUnwrap, CKA_SIGN, &KeySecItem, NULL))){ PK11_FreeSlot(Slot); return false; } if(NULL == (Context = PK11_CreateContextBySymKey((is_sha256 ? CKM_SHA256_HMAC : CKM_SHA_1_HMAC), CKA_SIGN, pKey, &NullSecItem))){ PK11_FreeSymKey(pKey); PK11_FreeSlot(Slot); return false; } *digestlen = 0; if(SECSuccess != PK11_DigestBegin(Context) || SECSuccess != PK11_DigestOp(Context, data, datalen) || SECSuccess != PK11_DigestFinal(Context, tmpdigest, digestlen, sizeof(tmpdigest)) ) { PK11_DestroyContext(Context, PR_TRUE); PK11_FreeSymKey(pKey); PK11_FreeSlot(Slot); return false; } PK11_DestroyContext(Context, PR_TRUE); PK11_FreeSymKey(pKey); PK11_FreeSlot(Slot); *digest = new unsigned char[*digestlen]; memcpy(*digest, tmpdigest, *digestlen); return true; } bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen) { return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, false); } bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen) { return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, true); } //------------------------------------------------------------------- // Utility Function for MD5 //------------------------------------------------------------------- size_t get_md5_digest_length() { return MD5_LENGTH; } unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size) { PK11Context* md5ctx; unsigned char buf[512]; ssize_t bytes; unsigned char* result; unsigned int md5outlen; if(-1 == size){ struct stat st; if(-1 == fstat(fd, &st)){ return NULL; } size = static_cast(st.st_size); } memset(buf, 0, 512); md5ctx = PK11_CreateDigestContext(SEC_OID_MD5); for(ssize_t total = 0; total < size; total += bytes){ bytes = 512 < (size - total) ? 512 : (size - total); bytes = pread(fd, buf, bytes, start + total); if(0 == bytes){ // end of file break; }else if(-1 == bytes){ // error S3FS_PRN_ERR("file read error(%d)", errno); PK11_DestroyContext(md5ctx, PR_TRUE); return NULL; } PK11_DigestOp(md5ctx, buf, bytes); memset(buf, 0, 512); } result = new unsigned char[get_md5_digest_length()]; PK11_DigestFinal(md5ctx, result, &md5outlen, get_md5_digest_length()); PK11_DestroyContext(md5ctx, PR_TRUE); return result; } //------------------------------------------------------------------- // Utility Function for SHA256 //------------------------------------------------------------------- size_t get_sha256_digest_length() { return SHA256_LENGTH; } bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char** digest, unsigned int* digestlen) { (*digestlen) = static_cast(get_sha256_digest_length()); *digest = new unsigned char[*digestlen]; PK11Context* sha256ctx; unsigned int sha256outlen; sha256ctx = PK11_CreateDigestContext(SEC_OID_SHA256); PK11_DigestOp(sha256ctx, data, datalen); PK11_DigestFinal(sha256ctx, *digest, &sha256outlen, *digestlen); PK11_DestroyContext(sha256ctx, PR_TRUE); *digestlen = sha256outlen; return true; } unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size) { PK11Context* sha256ctx; unsigned char buf[512]; ssize_t bytes; unsigned char* result; unsigned int sha256outlen; if(-1 == size){ struct stat st; if(-1 == fstat(fd, &st)){ return NULL; } size = static_cast(st.st_size); } memset(buf, 0, 512); sha256ctx = PK11_CreateDigestContext(SEC_OID_SHA256); for(ssize_t total = 0; total < size; total += bytes){ bytes = 512 < (size - total) ? 512 : (size - total); bytes = pread(fd, buf, bytes, start + total); if(0 == bytes){ // end of file break; }else if(-1 == bytes){ // error S3FS_PRN_ERR("file read error(%d)", errno); PK11_DestroyContext(sha256ctx, PR_TRUE); return NULL; } PK11_DigestOp(sha256ctx, buf, bytes); memset(buf, 0, 512); } result = new unsigned char[get_sha256_digest_length()]; PK11_DigestFinal(sha256ctx, result, &sha256outlen, get_sha256_digest_length()); PK11_DestroyContext(sha256ctx, PR_TRUE); return result; } /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ s3fs-fuse-1.86/src/openssl_auth.cpp000066400000000000000000000231331361654130000172660ustar00rootroot00000000000000/* * s3fs - FUSE-based file system backed by Amazon S3 * * Copyright(C) 2007 Randy Rizun * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "common.h" #include "s3fs_auth.h" using namespace std; //------------------------------------------------------------------- // Utility Function for version //------------------------------------------------------------------- const char* s3fs_crypt_lib_name() { static const char version[] = "OpenSSL"; return version; } //------------------------------------------------------------------- // Utility Function for global init //------------------------------------------------------------------- bool s3fs_init_global_ssl() { ERR_load_crypto_strings(); ERR_load_BIO_strings(); OpenSSL_add_all_algorithms(); return true; } bool s3fs_destroy_global_ssl() { EVP_cleanup(); ERR_free_strings(); return true; } //------------------------------------------------------------------- // Utility Function for crypt lock //------------------------------------------------------------------- // internal use struct for openssl struct CRYPTO_dynlock_value { pthread_mutex_t dyn_mutex; }; static pthread_mutex_t* s3fs_crypt_mutex = NULL; static void s3fs_crypt_mutex_lock(int mode, int pos, const char* file, int line) __attribute__ ((unused)); static void s3fs_crypt_mutex_lock(int mode, int pos, const char* file, int line) { if(s3fs_crypt_mutex){ if(mode & CRYPTO_LOCK){ pthread_mutex_lock(&s3fs_crypt_mutex[pos]); }else{ pthread_mutex_unlock(&s3fs_crypt_mutex[pos]); } } } static unsigned long s3fs_crypt_get_threadid() __attribute__ ((unused)); static unsigned long s3fs_crypt_get_threadid() { // For FreeBSD etc, some system's pthread_t is structure pointer. // Then we use cast like C style(not C++) instead of ifdef. return (unsigned long)(pthread_self()); } static struct CRYPTO_dynlock_value* s3fs_dyn_crypt_mutex(const char* file, int line) __attribute__ ((unused)); static struct CRYPTO_dynlock_value* s3fs_dyn_crypt_mutex(const char* file, int line) { struct CRYPTO_dynlock_value* dyndata = new CRYPTO_dynlock_value(); pthread_mutexattr_t attr; pthread_mutexattr_init(&attr); #if S3FS_PTHREAD_ERRORCHECK pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK); #endif pthread_mutex_init(&(dyndata->dyn_mutex), &attr); return dyndata; } static void s3fs_dyn_crypt_mutex_lock(int mode, struct CRYPTO_dynlock_value* dyndata, const char* file, int line) __attribute__ ((unused)); static void s3fs_dyn_crypt_mutex_lock(int mode, struct CRYPTO_dynlock_value* dyndata, const char* file, int line) { if(dyndata){ if(mode & CRYPTO_LOCK){ pthread_mutex_lock(&(dyndata->dyn_mutex)); }else{ pthread_mutex_unlock(&(dyndata->dyn_mutex)); } } } static void s3fs_destroy_dyn_crypt_mutex(struct CRYPTO_dynlock_value* dyndata, const char* file, int line) __attribute__ ((unused)); static void s3fs_destroy_dyn_crypt_mutex(struct CRYPTO_dynlock_value* dyndata, const char* file, int line) { if(dyndata){ pthread_mutex_destroy(&(dyndata->dyn_mutex)); delete dyndata; } } bool s3fs_init_crypt_mutex() { if(s3fs_crypt_mutex){ S3FS_PRN_DBG("s3fs_crypt_mutex is not NULL, destroy it."); if(!s3fs_destroy_crypt_mutex()){ S3FS_PRN_ERR("Failed to s3fs_crypt_mutex"); return false; } } s3fs_crypt_mutex = new pthread_mutex_t[CRYPTO_num_locks()]; pthread_mutexattr_t attr; pthread_mutexattr_init(&attr); #if S3FS_PTHREAD_ERRORCHECK pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK); #endif for(int cnt = 0; cnt < CRYPTO_num_locks(); cnt++){ pthread_mutex_init(&s3fs_crypt_mutex[cnt], &attr); } // static lock CRYPTO_set_locking_callback(s3fs_crypt_mutex_lock); CRYPTO_set_id_callback(s3fs_crypt_get_threadid); // dynamic lock CRYPTO_set_dynlock_create_callback(s3fs_dyn_crypt_mutex); CRYPTO_set_dynlock_lock_callback(s3fs_dyn_crypt_mutex_lock); CRYPTO_set_dynlock_destroy_callback(s3fs_destroy_dyn_crypt_mutex); return true; } bool s3fs_destroy_crypt_mutex() { if(!s3fs_crypt_mutex){ return true; } CRYPTO_set_dynlock_destroy_callback(NULL); CRYPTO_set_dynlock_lock_callback(NULL); CRYPTO_set_dynlock_create_callback(NULL); CRYPTO_set_id_callback(NULL); CRYPTO_set_locking_callback(NULL); for(int cnt = 0; cnt < CRYPTO_num_locks(); cnt++){ pthread_mutex_destroy(&s3fs_crypt_mutex[cnt]); } CRYPTO_cleanup_all_ex_data(); delete[] s3fs_crypt_mutex; s3fs_crypt_mutex = NULL; return true; } //------------------------------------------------------------------- // Utility Function for HMAC //------------------------------------------------------------------- static bool s3fs_HMAC_RAW(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen, bool is_sha256) { if(!key || !data || !digest || !digestlen){ return false; } (*digestlen) = EVP_MAX_MD_SIZE * sizeof(unsigned char); *digest = new unsigned char[*digestlen]; if(is_sha256){ HMAC(EVP_sha256(), key, keylen, data, datalen, *digest, digestlen); }else{ HMAC(EVP_sha1(), key, keylen, data, datalen, *digest, digestlen); } return true; } bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen) { return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, false); } bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen) { return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, true); } //------------------------------------------------------------------- // Utility Function for MD5 //------------------------------------------------------------------- size_t get_md5_digest_length() { return MD5_DIGEST_LENGTH; } unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size) { MD5_CTX md5ctx; char buf[512]; ssize_t bytes; unsigned char* result; if(-1 == size){ struct stat st; if(-1 == fstat(fd, &st)){ return NULL; } size = static_cast(st.st_size); } memset(buf, 0, 512); MD5_Init(&md5ctx); for(ssize_t total = 0; total < size; total += bytes){ bytes = 512 < (size - total) ? 512 : (size - total); bytes = pread(fd, buf, bytes, start + total); if(0 == bytes){ // end of file break; }else if(-1 == bytes){ // error S3FS_PRN_ERR("file read error(%d)", errno); return NULL; } MD5_Update(&md5ctx, buf, bytes); memset(buf, 0, 512); } result = new unsigned char[get_md5_digest_length()]; MD5_Final(result, &md5ctx); return result; } //------------------------------------------------------------------- // Utility Function for SHA256 //------------------------------------------------------------------- size_t get_sha256_digest_length() { return SHA256_DIGEST_LENGTH; } bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char** digest, unsigned int* digestlen) { (*digestlen) = EVP_MAX_MD_SIZE * sizeof(unsigned char); *digest = new unsigned char[*digestlen]; const EVP_MD* md = EVP_get_digestbyname("sha256"); EVP_MD_CTX* mdctx = EVP_MD_CTX_create(); EVP_DigestInit_ex(mdctx, md, NULL); EVP_DigestUpdate(mdctx, data, datalen); EVP_DigestFinal_ex(mdctx, *digest, digestlen); EVP_MD_CTX_destroy(mdctx); return true; } unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size) { const EVP_MD* md = EVP_get_digestbyname("sha256"); EVP_MD_CTX* sha256ctx; char buf[512]; ssize_t bytes; unsigned char* result; if(-1 == size){ struct stat st; if(-1 == fstat(fd, &st)){ return NULL; } size = static_cast(st.st_size); } sha256ctx = EVP_MD_CTX_create(); EVP_DigestInit_ex(sha256ctx, md, NULL); memset(buf, 0, 512); for(ssize_t total = 0; total < size; total += bytes){ bytes = 512 < (size - total) ? 512 : (size - total); bytes = pread(fd, buf, bytes, start + total); if(0 == bytes){ // end of file break; }else if(-1 == bytes){ // error S3FS_PRN_ERR("file read error(%d)", errno); EVP_MD_CTX_destroy(sha256ctx); return NULL; } EVP_DigestUpdate(sha256ctx, buf, bytes); memset(buf, 0, 512); } result = new unsigned char[get_sha256_digest_length()]; EVP_DigestFinal_ex(sha256ctx, result, NULL); EVP_MD_CTX_destroy(sha256ctx); return result; } /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ s3fs-fuse-1.86/src/psemaphore.h000066400000000000000000000040151361654130000163700ustar00rootroot00000000000000/* * s3fs - FUSE-based file system backed by Amazon S3 * * Copyright(C) 2007 Randy Rizun * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #ifndef S3FS_SEMAPHORE_H_ #define S3FS_SEMAPHORE_H_ // portability wrapper for sem_t since macOS does not implement it #ifdef __APPLE__ #include class Semaphore { public: explicit Semaphore(int value) : value(value), sem(dispatch_semaphore_create(value)) {} ~Semaphore() { // macOS cannot destroy a semaphore with posts less than the initializer for(int i = 0; i < get_value(); ++i){ post(); } dispatch_release(sem); } void wait() { dispatch_semaphore_wait(sem, DISPATCH_TIME_FOREVER); } void post() { dispatch_semaphore_signal(sem); } int get_value() const { return value; } private: const int value; dispatch_semaphore_t sem; }; #else #include #include class Semaphore { public: explicit Semaphore(int value) : value(value) { sem_init(&mutex, 0, value); } ~Semaphore() { sem_destroy(&mutex); } void wait() { int r; do { r = sem_wait(&mutex); } while (r == -1 && errno == EINTR); } void post() { sem_post(&mutex); } int get_value() const { return value; } private: const int value; sem_t mutex; }; #endif #endif // S3FS_SEMAPHORE_H_ s3fs-fuse-1.86/src/s3fs.cpp000066400000000000000000005060501361654130000154440ustar00rootroot00000000000000/* * s3fs - FUSE-based file system backed by Amazon S3 * * Copyright(C) 2007 Randy Rizun * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "common.h" #include "s3fs.h" #include "curl.h" #include "cache.h" #include "string_util.h" #include "s3fs_util.h" #include "fdcache.h" #include "s3fs_auth.h" #include "addhead.h" using namespace std; //------------------------------------------------------------------- // Define //------------------------------------------------------------------- enum dirtype { DIRTYPE_UNKNOWN = -1, DIRTYPE_NEW = 0, DIRTYPE_OLD = 1, DIRTYPE_FOLDER = 2, DIRTYPE_NOOBJ = 3, }; static bool IS_REPLACEDIR(dirtype type) { return DIRTYPE_OLD == type || DIRTYPE_FOLDER == type || DIRTYPE_NOOBJ == type; } static bool IS_RMTYPEDIR(dirtype type) { return DIRTYPE_OLD == type || DIRTYPE_FOLDER == type; } #if !defined(ENOATTR) #define ENOATTR ENODATA #endif // // Type of utility process mode // enum utility_incomp_type{ NO_UTILITY_MODE = 0, // not utility mode INCOMP_TYPE_LIST, // list of incomplete mpu INCOMP_TYPE_ABORT // delete incomplete mpu }; //------------------------------------------------------------------- // Structs //------------------------------------------------------------------- typedef struct incomplete_multipart_upload_info{ string key; string id; string date; }INCOMP_MPU_INFO; typedef std::list incomp_mpu_list_t; typedef std::list readline_t; typedef std::map kvmap_t; typedef std::map bucketkvmap_t; //------------------------------------------------------------------- // Global variables //------------------------------------------------------------------- bool foreground = false; bool nomultipart = false; bool pathrequeststyle = false; bool complement_stat = false; std::string program_name; std::string service_path = "/"; std::string host = "https://s3.amazonaws.com"; std::string bucket; std::string endpoint = "us-east-1"; std::string cipher_suites; std::string instance_name; s3fs_log_level debug_level = S3FS_LOG_CRIT; const char* s3fs_log_nest[S3FS_LOG_NEST_MAX] = {"", " ", " ", " "}; std::string aws_profile = "default"; //------------------------------------------------------------------- // Static variables //------------------------------------------------------------------- static uid_t mp_uid = 0; // owner of mount point(only not specified uid opt) static gid_t mp_gid = 0; // group of mount point(only not specified gid opt) static mode_t mp_mode = 0; // mode of mount point static mode_t mp_umask = 0; // umask for mount point static bool is_mp_umask = false;// default does not set. static std::string mountpoint; static std::string passwd_file; static utility_incomp_type utility_mode = NO_UTILITY_MODE; static bool noxmlns = false; static bool nocopyapi = false; static bool norenameapi = false; static bool nonempty = false; static bool allow_other = false; static bool load_iamrole = false; static uid_t s3fs_uid = 0; static gid_t s3fs_gid = 0; static mode_t s3fs_umask = 0; static bool is_s3fs_uid = false;// default does not set. static bool is_s3fs_gid = false;// default does not set. static bool is_s3fs_umask = false;// default does not set. static bool is_remove_cache = false; static bool is_ecs = false; static bool is_ibm_iam_auth = false; static bool is_use_xattr = false; static bool is_use_session_token = false; static bool create_bucket = false; static int64_t singlepart_copy_limit = 512 * 1024 * 1024; static bool is_specified_endpoint = false; static int s3fs_init_deferred_exit_status = 0; static bool support_compat_dir = true;// default supports compatibility directory type static int max_keys_list_object = 1000;// default is 1000 static bool use_wtf8 = false; static const std::string allbucket_fields_type; // special key for mapping(This name is absolutely not used as a bucket name) static const std::string keyval_fields_type = "\t"; // special key for mapping(This name is absolutely not used as a bucket name) static const std::string aws_accesskeyid = "AWSAccessKeyId"; static const std::string aws_secretkey = "AWSSecretKey"; //------------------------------------------------------------------- // Static functions : prototype //------------------------------------------------------------------- static void s3fs_usr2_handler(int sig); static bool set_s3fs_usr2_handler(); static s3fs_log_level set_s3fs_log_level(s3fs_log_level level); static s3fs_log_level bumpup_s3fs_log_level(); static bool is_special_name_folder_object(const char* path); static int chk_dir_object_type(const char* path, string& newpath, string& nowpath, string& nowcache, headers_t* pmeta = NULL, dirtype* pDirType = NULL); static int remove_old_type_dir(const string& path, dirtype type); static int get_object_attribute(const char* path, struct stat* pstbuf, headers_t* pmeta = NULL, bool overcheck = true, bool* pisforce = NULL, bool add_no_truncate_cache = false); static int check_object_access(const char* path, int mask, struct stat* pstbuf); static int check_object_owner(const char* path, struct stat* pstbuf); static int check_parent_object_access(const char* path, int mask); static FdEntity* get_local_fent(const char* path, bool is_load = false); static bool multi_head_callback(S3fsCurl* s3fscurl); static S3fsCurl* multi_head_retry_callback(S3fsCurl* s3fscurl); static int readdir_multi_head(const char* path, const S3ObjList& head, void* buf, fuse_fill_dir_t filler); static int list_bucket(const char* path, S3ObjList& head, const char* delimiter, bool check_content_only = false); static int directory_empty(const char* path); static bool is_truncated(xmlDocPtr doc); static int append_objects_from_xml_ex(const char* path, xmlDocPtr doc, xmlXPathContextPtr ctx, const char* ex_contents, const char* ex_key, const char* ex_etag, int isCPrefix, S3ObjList& head); static int append_objects_from_xml(const char* path, xmlDocPtr doc, S3ObjList& head); static bool GetXmlNsUrl(xmlDocPtr doc, string& nsurl); static xmlChar* get_base_exp(xmlDocPtr doc, const char* exp); static xmlChar* get_prefix(xmlDocPtr doc); static xmlChar* get_next_marker(xmlDocPtr doc); static char* get_object_name(xmlDocPtr doc, xmlNodePtr node, const char* path); static int put_headers(const char* path, headers_t& meta, bool is_copy); static int rename_large_object(const char* from, const char* to); static int create_file_object(const char* path, mode_t mode, uid_t uid, gid_t gid); static int create_directory_object(const char* path, mode_t mode, time_t time, uid_t uid, gid_t gid); static int rename_object(const char* from, const char* to); static int rename_object_nocopy(const char* from, const char* to); static int clone_directory_object(const char* from, const char* to); static int rename_directory(const char* from, const char* to); static int remote_mountpath_exists(const char* path); static xmlChar* get_exp_value_xml(xmlDocPtr doc, xmlXPathContextPtr ctx, const char* exp_key); static void print_incomp_mpu_list(incomp_mpu_list_t& list); static bool abort_incomp_mpu_list(incomp_mpu_list_t& list, time_t abort_time); static bool get_incomp_mpu_list(xmlDocPtr doc, incomp_mpu_list_t& list); static void free_xattrs(xattrs_t& xattrs); static bool parse_xattr_keyval(const std::string& xattrpair, string& key, PXATTRVAL& pval); static size_t parse_xattrs(const std::string& strxattrs, xattrs_t& xattrs); static std::string build_xattrs(const xattrs_t& xattrs); static int s3fs_utility_processing(time_t abort_time); static int s3fs_check_service(); static int parse_passwd_file(bucketkvmap_t& resmap); static int check_for_aws_format(const kvmap_t& kvmap); static int check_passwd_file_perms(); static int read_aws_credentials_file(const std::string &filename); static int read_passwd_file(); static int get_access_keys(); static bool set_mountpoint_attribute(struct stat& mpst); static int set_bucket(const char* arg); static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_args* outargs); // fuse interface functions static int s3fs_getattr(const char* path, struct stat* stbuf); static int s3fs_readlink(const char* path, char* buf, size_t size); static int s3fs_mknod(const char* path, mode_t mode, dev_t rdev); static int s3fs_mkdir(const char* path, mode_t mode); static int s3fs_unlink(const char* path); static int s3fs_rmdir(const char* path); static int s3fs_symlink(const char* from, const char* to); static int s3fs_rename(const char* from, const char* to); static int s3fs_link(const char* from, const char* to); static int s3fs_chmod(const char* path, mode_t mode); static int s3fs_chmod_nocopy(const char* path, mode_t mode); static int s3fs_chown(const char* path, uid_t uid, gid_t gid); static int s3fs_chown_nocopy(const char* path, uid_t uid, gid_t gid); static int s3fs_utimens(const char* path, const struct timespec ts[2]); static int s3fs_utimens_nocopy(const char* path, const struct timespec ts[2]); static int s3fs_truncate(const char* path, off_t size); static int s3fs_create(const char* path, mode_t mode, struct fuse_file_info* fi); static int s3fs_open(const char* path, struct fuse_file_info* fi); static int s3fs_read(const char* path, char* buf, size_t size, off_t offset, struct fuse_file_info* fi); static int s3fs_write(const char* path, const char* buf, size_t size, off_t offset, struct fuse_file_info* fi); static int s3fs_statfs(const char* path, struct statvfs* stbuf); static int s3fs_flush(const char* path, struct fuse_file_info* fi); static int s3fs_fsync(const char* path, int datasync, struct fuse_file_info* fi); static int s3fs_release(const char* path, struct fuse_file_info* fi); static int s3fs_opendir(const char* path, struct fuse_file_info* fi); static int s3fs_readdir(const char* path, void* buf, fuse_fill_dir_t filler, off_t offset, struct fuse_file_info* fi); static int s3fs_access(const char* path, int mask); static void* s3fs_init(struct fuse_conn_info* conn); static void s3fs_destroy(void*); #if defined(__APPLE__) static int s3fs_setxattr(const char* path, const char* name, const char* value, size_t size, int flags, uint32_t position); static int s3fs_getxattr(const char* path, const char* name, char* value, size_t size, uint32_t position); #else static int s3fs_setxattr(const char* path, const char* name, const char* value, size_t size, int flags); static int s3fs_getxattr(const char* path, const char* name, char* value, size_t size); #endif static int s3fs_listxattr(const char* path, char* list, size_t size); static int s3fs_removexattr(const char* path, const char* name); //------------------------------------------------------------------- // WTF8 macros //------------------------------------------------------------------- #define WTF8_ENCODE(ARG) \ std::string ARG##_buf; \ const char * ARG = _##ARG; \ if (use_wtf8 && s3fs_wtf8_encode( _##ARG, 0 )) { \ s3fs_wtf8_encode( _##ARG, &ARG##_buf); \ ARG = ARG##_buf.c_str(); \ } //------------------------------------------------------------------- // Functions //------------------------------------------------------------------- static void s3fs_usr2_handler(int sig) { if(SIGUSR2 == sig){ bumpup_s3fs_log_level(); } } static bool set_s3fs_usr2_handler() { struct sigaction sa; memset(&sa, 0, sizeof(struct sigaction)); sa.sa_handler = s3fs_usr2_handler; sa.sa_flags = SA_RESTART; if(0 != sigaction(SIGUSR2, &sa, NULL)){ return false; } return true; } static s3fs_log_level set_s3fs_log_level(s3fs_log_level level) { if(level == debug_level){ return debug_level; } s3fs_log_level old = debug_level; debug_level = level; setlogmask(LOG_UPTO(S3FS_LOG_LEVEL_TO_SYSLOG(debug_level))); S3FS_PRN_CRIT("change debug level from %sto %s", S3FS_LOG_LEVEL_STRING(old), S3FS_LOG_LEVEL_STRING(debug_level)); return old; } static s3fs_log_level bumpup_s3fs_log_level() { s3fs_log_level old = debug_level; debug_level = ( S3FS_LOG_CRIT == debug_level ? S3FS_LOG_ERR : S3FS_LOG_ERR == debug_level ? S3FS_LOG_WARN : S3FS_LOG_WARN == debug_level ? S3FS_LOG_INFO : S3FS_LOG_INFO == debug_level ? S3FS_LOG_DBG : S3FS_LOG_CRIT ); setlogmask(LOG_UPTO(S3FS_LOG_LEVEL_TO_SYSLOG(debug_level))); S3FS_PRN_CRIT("change debug level from %sto %s", S3FS_LOG_LEVEL_STRING(old), S3FS_LOG_LEVEL_STRING(debug_level)); return old; } static bool is_special_name_folder_object(const char* path) { if(!support_compat_dir){ // s3fs does not support compatibility directory type("_$folder$" etc) now, // thus always returns false. return false; } if(!path || '\0' == path[0]){ return false; } string strpath = path; headers_t header; if(string::npos == strpath.find("_$folder$", 0)){ if('/' == strpath[strpath.length() - 1]){ strpath = strpath.substr(0, strpath.length() - 1); } strpath += "_$folder$"; } S3fsCurl s3fscurl; if(0 != s3fscurl.HeadRequest(strpath.c_str(), header)){ return false; } header.clear(); S3FS_MALLOCTRIM(0); return true; } // [Detail] // This function is complicated for checking directory object type. // Arguments is used for deleting cache/path, and remake directory object. // Please see the codes which calls this function. // // path: target path // newpath: should be object path for making/putting/getting after checking // nowpath: now object name for deleting after checking // nowcache: now cache path for deleting after checking // pmeta: headers map // pDirType: directory object type // static int chk_dir_object_type(const char* path, string& newpath, string& nowpath, string& nowcache, headers_t* pmeta, dirtype* pDirType) { dirtype TypeTmp; int result = -1; bool isforce = false; dirtype* pType = pDirType ? pDirType : &TypeTmp; // Normalize new path. newpath = path; if('/' != newpath[newpath.length() - 1]){ string::size_type Pos; if(string::npos != (Pos = newpath.find("_$folder$", 0))){ newpath = newpath.substr(0, Pos); } newpath += "/"; } // Always check "dir/" at first. if(0 == (result = get_object_attribute(newpath.c_str(), NULL, pmeta, false, &isforce))){ // Found "dir/" cache --> Check for "_$folder$", "no dir object" nowcache = newpath; if(is_special_name_folder_object(newpath.c_str())){ // check support_compat_dir in this function // "_$folder$" type. (*pType) = DIRTYPE_FOLDER; nowpath = newpath.substr(0, newpath.length() - 1) + "_$folder$"; // cut and add }else if(isforce){ // "no dir object" type. (*pType) = DIRTYPE_NOOBJ; nowpath = ""; }else{ nowpath = newpath; if(0 < nowpath.length() && '/' == nowpath[nowpath.length() - 1]){ // "dir/" type (*pType) = DIRTYPE_NEW; }else{ // "dir" type (*pType) = DIRTYPE_OLD; } } }else if(support_compat_dir){ // Check "dir" when support_compat_dir is enabled nowpath = newpath.substr(0, newpath.length() - 1); if(0 == (result = get_object_attribute(nowpath.c_str(), NULL, pmeta, false, &isforce))){ // Found "dir" cache --> this case is only "dir" type. // Because, if object is "_$folder$" or "no dir object", the cache is "dir/" type. // (But "no dir object" is checked here.) nowcache = nowpath; if(isforce){ (*pType) = DIRTYPE_NOOBJ; nowpath = ""; }else{ (*pType) = DIRTYPE_OLD; } }else{ // Not found cache --> check for "_$folder$" and "no dir object". // (come here is that support_compat_dir is enabled) nowcache = ""; // This case is no cache. nowpath += "_$folder$"; if(is_special_name_folder_object(nowpath.c_str())){ // "_$folder$" type. (*pType) = DIRTYPE_FOLDER; result = 0; // result is OK. }else if(-ENOTEMPTY == directory_empty(newpath.c_str())){ // "no dir object" type. (*pType) = DIRTYPE_NOOBJ; nowpath = ""; // now path. result = 0; // result is OK. }else{ // Error: Unknown type. (*pType) = DIRTYPE_UNKNOWN; newpath = ""; nowpath = ""; } } } return result; } static int remove_old_type_dir(const string& path, dirtype type) { if(IS_RMTYPEDIR(type)){ S3fsCurl s3fscurl; int result = s3fscurl.DeleteRequest(path.c_str()); if(0 != result && -ENOENT != result){ return result; } // succeed removing or not found the directory }else{ // nothing to do } return 0; } // // Get object attributes with stat cache. // This function is base for s3fs_getattr(). // // [NOTICE] // Checking order is changed following list because of reducing the number of the requests. // 1) "dir" // 2) "dir/" // 3) "dir_$folder$" // static int get_object_attribute(const char* path, struct stat* pstbuf, headers_t* pmeta, bool overcheck, bool* pisforce, bool add_no_truncate_cache) { int result = -1; struct stat tmpstbuf; struct stat* pstat = pstbuf ? pstbuf : &tmpstbuf; headers_t tmpHead; headers_t* pheader = pmeta ? pmeta : &tmpHead; string strpath; S3fsCurl s3fscurl; bool forcedir = false; string::size_type Pos; S3FS_PRN_DBG("[path=%s]", path); if(!path || '\0' == path[0]){ return -ENOENT; } memset(pstat, 0, sizeof(struct stat)); if(0 == strcmp(path, "/") || 0 == strcmp(path, ".")){ pstat->st_nlink = 1; // see fuse faq pstat->st_mode = mp_mode; pstat->st_uid = is_s3fs_uid ? s3fs_uid : mp_uid; pstat->st_gid = is_s3fs_gid ? s3fs_gid : mp_gid; return 0; } // Check cache. pisforce = (NULL != pisforce ? pisforce : &forcedir); (*pisforce) = false; strpath = path; if(support_compat_dir && overcheck && string::npos != (Pos = strpath.find("_$folder$", 0))){ strpath = strpath.substr(0, Pos); strpath += "/"; } if(StatCache::getStatCacheData()->GetStat(strpath, pstat, pheader, overcheck, pisforce)){ StatCache::getStatCacheData()->ChangeNoTruncateFlag(strpath, add_no_truncate_cache); return 0; } if(StatCache::getStatCacheData()->IsNoObjectCache(strpath)){ // there is the path in the cache for no object, it is no object. return -ENOENT; } // At first, check path strpath = path; result = s3fscurl.HeadRequest(strpath.c_str(), (*pheader)); s3fscurl.DestroyCurlHandle(); // if not found target path object, do over checking if(0 != result){ if(overcheck){ // when support_compat_dir is disabled, strpath maybe have "_$folder$". if('/' != strpath[strpath.length() - 1] && string::npos == strpath.find("_$folder$", 0)){ // now path is "object", do check "object/" for over checking strpath += "/"; result = s3fscurl.HeadRequest(strpath.c_str(), (*pheader)); s3fscurl.DestroyCurlHandle(); } if(support_compat_dir && 0 != result){ // now path is "object/", do check "object_$folder$" for over checking strpath = strpath.substr(0, strpath.length() - 1); strpath += "_$folder$"; result = s3fscurl.HeadRequest(strpath.c_str(), (*pheader)); s3fscurl.DestroyCurlHandle(); if(0 != result){ // cut "_$folder$" for over checking "no dir object" after here if(string::npos != (Pos = strpath.find("_$folder$", 0))){ strpath = strpath.substr(0, Pos); } } } } if(support_compat_dir && 0 != result && string::npos == strpath.find("_$folder$", 0)){ // now path is "object" or "object/", do check "no dir object" which is not object but has only children. if('/' == strpath[strpath.length() - 1]){ strpath = strpath.substr(0, strpath.length() - 1); } if(-ENOTEMPTY == directory_empty(strpath.c_str())){ // found "no dir object". strpath += "/"; *pisforce = true; result = 0; } } }else{ if(support_compat_dir && '/' != strpath[strpath.length() - 1] && string::npos == strpath.find("_$folder$", 0) && is_need_check_obj_detail(*pheader)){ // check a case of that "object" does not have attribute and "object" is possible to be directory. if(-ENOTEMPTY == directory_empty(strpath.c_str())){ // found "no dir object". strpath += "/"; *pisforce = true; result = 0; } } } if(0 != result){ // finally, "path" object did not find. Add no object cache. strpath = path; // reset original StatCache::getStatCacheData()->AddNoObjectCache(strpath); return result; } // if path has "_$folder$", need to cut it. if(string::npos != (Pos = strpath.find("_$folder$", 0))){ strpath = strpath.substr(0, Pos); strpath += "/"; } // Set into cache // // [NOTE] // When add_no_truncate_cache is true, the stats is always cached. // This cached stats is only removed by DelStat(). // This is necessary for the case to access the attribute of opened file. // (ex. getxattr() is called while writing to the opened file.) // if(add_no_truncate_cache || 0 != StatCache::getStatCacheData()->GetCacheSize()){ // add into stat cache if(!StatCache::getStatCacheData()->AddStat(strpath, (*pheader), forcedir, add_no_truncate_cache)){ S3FS_PRN_ERR("failed adding stat cache [path=%s]", strpath.c_str()); return -ENOENT; } if(!StatCache::getStatCacheData()->GetStat(strpath, pstat, pheader, overcheck, pisforce)){ // There is not in cache.(why?) -> retry to convert. if(!convert_header_to_stat(strpath.c_str(), (*pheader), pstat, forcedir)){ S3FS_PRN_ERR("failed convert headers to stat[path=%s]", strpath.c_str()); return -ENOENT; } } }else{ // cache size is Zero -> only convert. if(!convert_header_to_stat(strpath.c_str(), (*pheader), pstat, forcedir)){ S3FS_PRN_ERR("failed convert headers to stat[path=%s]", strpath.c_str()); return -ENOENT; } } return 0; } // // Check the object uid and gid for write/read/execute. // The param "mask" is as same as access() function. // If there is not a target file, this function returns -ENOENT. // If the target file can be accessed, the result always is 0. // // path: the target object path // mask: bit field(F_OK, R_OK, W_OK, X_OK) like access(). // stat: NULL or the pointer of struct stat. // static int check_object_access(const char* path, int mask, struct stat* pstbuf) { int result; struct stat st; struct stat* pst = (pstbuf ? pstbuf : &st); struct fuse_context* pcxt; S3FS_PRN_DBG("[path=%s]", path); if(NULL == (pcxt = fuse_get_context())){ return -EIO; } if(0 != (result = get_object_attribute(path, pst))){ // If there is not the target file(object), result is -ENOENT. return result; } if(0 == pcxt->uid){ // root is allowed all accessing. return 0; } if(is_s3fs_uid && s3fs_uid == pcxt->uid){ // "uid" user is allowed all accessing. return 0; } if(F_OK == mask){ // if there is a file, always return allowed. return 0; } // for "uid", "gid" option uid_t obj_uid = (is_s3fs_uid ? s3fs_uid : pst->st_uid); gid_t obj_gid = (is_s3fs_gid ? s3fs_gid : pst->st_gid); // compare file mode and uid/gid + mask. mode_t mode; mode_t base_mask = S_IRWXO; if(is_s3fs_umask){ // If umask is set, all object attributes set ~umask. mode = ((S_IRWXU | S_IRWXG | S_IRWXO) & ~s3fs_umask); }else{ mode = pst->st_mode; } if(pcxt->uid == obj_uid){ base_mask |= S_IRWXU; } if(pcxt->gid == obj_gid){ base_mask |= S_IRWXG; } if(1 == is_uid_include_group(pcxt->uid, obj_gid)){ base_mask |= S_IRWXG; } mode &= base_mask; if(X_OK == (mask & X_OK)){ if(0 == (mode & (S_IXUSR | S_IXGRP | S_IXOTH))){ return -EPERM; } } if(W_OK == (mask & W_OK)){ if(0 == (mode & (S_IWUSR | S_IWGRP | S_IWOTH))){ return -EACCES; } } if(R_OK == (mask & R_OK)){ if(0 == (mode & (S_IRUSR | S_IRGRP | S_IROTH))){ return -EACCES; } } if(0 == mode){ return -EACCES; } return 0; } static int check_object_owner(const char* path, struct stat* pstbuf) { int result; struct stat st; struct stat* pst = (pstbuf ? pstbuf : &st); struct fuse_context* pcxt; S3FS_PRN_DBG("[path=%s]", path); if(NULL == (pcxt = fuse_get_context())){ return -EIO; } if(0 != (result = get_object_attribute(path, pst))){ // If there is not the target file(object), result is -ENOENT. return result; } // check owner if(0 == pcxt->uid){ // root is allowed all accessing. return 0; } if(is_s3fs_uid && s3fs_uid == pcxt->uid){ // "uid" user is allowed all accessing. return 0; } if(pcxt->uid == pst->st_uid){ return 0; } return -EPERM; } // // Check accessing the parent directories of the object by uid and gid. // static int check_parent_object_access(const char* path, int mask) { string parent; int result; S3FS_PRN_DBG("[path=%s]", path); if(0 == strcmp(path, "/") || 0 == strcmp(path, ".")){ // path is mount point. return 0; } if(X_OK == (mask & X_OK)){ for(parent = mydirname(path); !parent.empty(); parent = mydirname(parent)){ if(parent == "."){ parent = "/"; } if(0 != (result = check_object_access(parent.c_str(), X_OK, NULL))){ return result; } if(parent == "/" || parent == "."){ break; } } } mask = (mask & ~X_OK); if(0 != mask){ parent = mydirname(path); if(parent == "."){ parent = "/"; } if(0 != (result = check_object_access(parent.c_str(), mask, NULL))){ return result; } } return 0; } // // ssevalue is MD5 for SSE-C type, or KMS id for SSE-KMS // bool get_object_sse_type(const char* path, sse_type_t& ssetype, string& ssevalue) { if(!path){ return false; } headers_t meta; if(0 != get_object_attribute(path, NULL, &meta)){ S3FS_PRN_ERR("Failed to get object(%s) headers", path); return false; } ssetype = SSE_DISABLE; ssevalue.erase(); for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){ string key = (*iter).first; if(0 == strcasecmp(key.c_str(), "x-amz-server-side-encryption") && 0 == strcasecmp((*iter).second.c_str(), "AES256")){ ssetype = SSE_S3; }else if(0 == strcasecmp(key.c_str(), "x-amz-server-side-encryption-aws-kms-key-id")){ ssetype = SSE_KMS; ssevalue = (*iter).second; }else if(0 == strcasecmp(key.c_str(), "x-amz-server-side-encryption-customer-key-md5")){ ssetype = SSE_C; ssevalue = (*iter).second; } } return true; } static FdEntity* get_local_fent(const char* path, bool is_load) { struct stat stobj; FdEntity* ent; headers_t meta; S3FS_PRN_INFO2("[path=%s]", path); if(0 != get_object_attribute(path, &stobj, &meta)){ return NULL; } // open time_t mtime = (!S_ISREG(stobj.st_mode) || S_ISLNK(stobj.st_mode)) ? -1 : stobj.st_mtime; bool force_tmpfile = S_ISREG(stobj.st_mode) ? false : true; if(NULL == (ent = FdManager::get()->Open(path, &meta, stobj.st_size, mtime, force_tmpfile, true))){ S3FS_PRN_ERR("Could not open file. errno(%d)", errno); return NULL; } // load if(is_load && !ent->OpenAndLoadAll(&meta)){ S3FS_PRN_ERR("Could not load file. errno(%d)", errno); FdManager::get()->Close(ent); return NULL; } return ent; } /** * create or update s3 meta * ow_sse_flg is for over writing sse header by use_sse option. * @return fuse return code */ static int put_headers(const char* path, headers_t& meta, bool is_copy) { int result; S3fsCurl s3fscurl(true); struct stat buf; S3FS_PRN_INFO2("[path=%s]", path); // files larger than 5GB must be modified via the multipart interface // *** If there is not target object(a case of move command), // get_object_attribute() returns error with initializing buf. (void)get_object_attribute(path, &buf); if(buf.st_size >= FIVE_GB){ // multipart if(nocopyapi || nomultipart){ return -EFBIG; // File too large } if(0 != (result = s3fscurl.MultipartHeadRequest(path, buf.st_size, meta, is_copy))){ return result; } }else{ if(0 != (result = s3fscurl.PutHeadRequest(path, meta, is_copy))){ return result; } } // [NOTE] // if path is 'dir/', it does not have cache(could not open file for directory stat) // if('/' != path[strlen(path) - 1]){ FdEntity* ent = NULL; if(NULL == (ent = FdManager::get()->ExistOpen(path, -1, !FdManager::IsCacheDir()))){ // no opened fd if(FdManager::IsCacheDir()){ // create cache file if be needed ent = FdManager::get()->Open(path, &meta, buf.st_size, -1, false, true); } } if(ent){ time_t mtime = get_mtime(meta); ent->SetMtime(mtime); FdManager::get()->Close(ent); } } return 0; } static int s3fs_getattr(const char* _path, struct stat* stbuf) { WTF8_ENCODE(path) int result; S3FS_PRN_INFO("[path=%s]", path); // check parent directory attribute. if(0 != (result = check_parent_object_access(path, X_OK))){ return result; } if(0 != (result = check_object_access(path, F_OK, stbuf))){ return result; } // If has already opened fd, the st_size should be instead. // (See: Issue 241) if(stbuf){ FdEntity* ent; if(NULL != (ent = FdManager::get()->ExistOpen(path))){ struct stat tmpstbuf; if(ent->GetStats(tmpstbuf)){ stbuf->st_size = tmpstbuf.st_size; } FdManager::get()->Close(ent); } stbuf->st_blksize = 4096; stbuf->st_blocks = get_blocks(stbuf->st_size); S3FS_PRN_DBG("[path=%s] uid=%u, gid=%u, mode=%04o", path, (unsigned int)(stbuf->st_uid), (unsigned int)(stbuf->st_gid), stbuf->st_mode); } S3FS_MALLOCTRIM(0); return result; } static int s3fs_readlink(const char* _path, char* buf, size_t size) { if(!_path || !buf || 0 == size){ return 0; } WTF8_ENCODE(path) string strValue; // check symblic link cache if(!StatCache::getStatCacheData()->GetSymlink(string(path), strValue)){ // not found in cache, then open the path FdEntity* ent; if(NULL == (ent = get_local_fent(path))){ S3FS_PRN_ERR("could not get fent(file=%s)", path); return -EIO; } // Get size off_t readsize; if(!ent->GetSize(readsize)){ S3FS_PRN_ERR("could not get file size(file=%s)", path); FdManager::get()->Close(ent); return -EIO; } if(static_cast(size) <= readsize){ readsize = size - 1; } // Read ssize_t ressize; if(0 > (ressize = ent->Read(buf, 0, readsize))){ S3FS_PRN_ERR("could not read file(file=%s, ressize=%jd)", path, (intmax_t)ressize); FdManager::get()->Close(ent); return static_cast(ressize); } buf[ressize] = '\0'; // close FdManager::get()->Close(ent); // check buf if it has space words. strValue = trim(string(buf)); // decode wtf8. This will always be shorter if(use_wtf8){ strValue = s3fs_wtf8_decode(strValue); } // add symblic link cache if(!StatCache::getStatCacheData()->AddSymlink(string(path), strValue)){ S3FS_PRN_ERR("failed to add symbolic link cache for %s", path); } } // copy result strncpy(buf, strValue.c_str(), size); S3FS_MALLOCTRIM(0); return 0; } static int do_create_bucket() { S3FS_PRN_INFO2("/"); FILE* ptmpfp; int tmpfd; if(endpoint == "us-east-1"){ ptmpfp = NULL; tmpfd = -1; }else{ if(NULL == (ptmpfp = tmpfile()) || -1 == (tmpfd = fileno(ptmpfp)) || 0 >= fprintf(ptmpfp, "\n" " %s\n" "", endpoint.c_str()) || 0 != fflush(ptmpfp) || -1 == fseek(ptmpfp, 0L, SEEK_SET)){ S3FS_PRN_ERR("failed to create temporary file. err(%d)", errno); if(ptmpfp){ fclose(ptmpfp); } return (0 == errno ? -EIO : -errno); } } headers_t meta; S3fsCurl s3fscurl(true); int res = s3fscurl.PutRequest("/", meta, tmpfd); if(res < 0){ long responseCode = s3fscurl.GetLastResponseCode(); if((responseCode == 400 || responseCode == 403) && S3fsCurl::IsSignatureV4()){ S3FS_PRN_ERR("Could not connect, so retry to connect by signature version 2."); S3fsCurl::SetSignatureV4(false); // retry to check s3fscurl.DestroyCurlHandle(); res = s3fscurl.PutRequest("/", meta, tmpfd); }else if(responseCode == 409){ // bucket already exists res = 0; } } if(ptmpfp != NULL){ fclose(ptmpfp); } return res; } // common function for creation of a plain object static int create_file_object(const char* path, mode_t mode, uid_t uid, gid_t gid) { S3FS_PRN_INFO2("[path=%s][mode=%04o]", path, mode); time_t now = time(NULL); headers_t meta; meta["Content-Type"] = S3fsCurl::LookupMimeType(string(path)); meta["x-amz-meta-uid"] = str(uid); meta["x-amz-meta-gid"] = str(gid); meta["x-amz-meta-mode"] = str(mode); meta["x-amz-meta-ctime"] = str(now); meta["x-amz-meta-mtime"] = str(now); S3fsCurl s3fscurl(true); return s3fscurl.PutRequest(path, meta, -1); // fd=-1 means for creating zero byte object. } static int s3fs_mknod(const char *_path, mode_t mode, dev_t rdev) { WTF8_ENCODE(path) int result; struct fuse_context* pcxt; S3FS_PRN_INFO("[path=%s][mode=%04o][dev=%ju]", path, mode, (uintmax_t)rdev); if(NULL == (pcxt = fuse_get_context())){ return -EIO; } if(0 != (result = create_file_object(path, mode, pcxt->uid, pcxt->gid))){ S3FS_PRN_ERR("could not create object for special file(result=%d)", result); return result; } StatCache::getStatCacheData()->DelStat(path); S3FS_MALLOCTRIM(0); return result; } static int s3fs_create(const char* _path, mode_t mode, struct fuse_file_info* fi) { WTF8_ENCODE(path) int result; struct fuse_context* pcxt; S3FS_PRN_INFO("[path=%s][mode=%04o][flags=0x%x]", path, mode, fi->flags); if(NULL == (pcxt = fuse_get_context())){ return -EIO; } // check parent directory attribute. if(0 != (result = check_parent_object_access(path, X_OK))){ return result; } result = check_object_access(path, W_OK, NULL); if(-ENOENT == result){ if(0 != (result = check_parent_object_access(path, W_OK))){ return result; } }else if(0 != result){ return result; } result = create_file_object(path, mode, pcxt->uid, pcxt->gid); StatCache::getStatCacheData()->DelStat(path); if(result != 0){ return result; } FdEntity* ent; headers_t meta; get_object_attribute(path, NULL, &meta, true, NULL, true); // no truncate cache if(NULL == (ent = FdManager::get()->Open(path, &meta, 0, -1, false, true))){ StatCache::getStatCacheData()->DelStat(path); return -EIO; } fi->fh = ent->GetFd(); S3FS_MALLOCTRIM(0); return 0; } static int create_directory_object(const char* path, mode_t mode, time_t time, uid_t uid, gid_t gid) { S3FS_PRN_INFO1("[path=%s][mode=%04o][time=%lld][uid=%u][gid=%u]", path, mode, static_cast(time), (unsigned int)uid, (unsigned int)gid); if(!path || '\0' == path[0]){ return -1; } string tpath = path; if('/' != tpath[tpath.length() - 1]){ tpath += "/"; } headers_t meta; meta["x-amz-meta-uid"] = str(uid); meta["x-amz-meta-gid"] = str(gid); meta["x-amz-meta-mode"] = str(mode); meta["x-amz-meta-ctime"] = str(time); meta["x-amz-meta-mtime"] = str(time); S3fsCurl s3fscurl; return s3fscurl.PutRequest(tpath.c_str(), meta, -1); // fd=-1 means for creating zero byte object. } static int s3fs_mkdir(const char* _path, mode_t mode) { WTF8_ENCODE(path) int result; struct fuse_context* pcxt; S3FS_PRN_INFO("[path=%s][mode=%04o]", path, mode); if(NULL == (pcxt = fuse_get_context())){ return -EIO; } // check parent directory attribute. if(0 != (result = check_parent_object_access(path, W_OK | X_OK))){ return result; } if(-ENOENT != (result = check_object_access(path, F_OK, NULL))){ if(0 == result){ result = -EEXIST; } return result; } result = create_directory_object(path, mode, time(NULL), pcxt->uid, pcxt->gid); StatCache::getStatCacheData()->DelStat(path); S3FS_MALLOCTRIM(0); return result; } static int s3fs_unlink(const char* _path) { WTF8_ENCODE(path) int result; S3FS_PRN_INFO("[path=%s]", path); if(0 != (result = check_parent_object_access(path, W_OK | X_OK))){ return result; } S3fsCurl s3fscurl; result = s3fscurl.DeleteRequest(path); FdManager::DeleteCacheFile(path); StatCache::getStatCacheData()->DelStat(path); StatCache::getStatCacheData()->DelSymlink(path); S3FS_MALLOCTRIM(0); return result; } static int directory_empty(const char* path) { int result; S3ObjList head; if((result = list_bucket(path, head, "/", true)) != 0){ S3FS_PRN_ERR("list_bucket returns error."); return result; } if(!head.IsEmpty()){ return -ENOTEMPTY; } return 0; } static int s3fs_rmdir(const char* _path) { WTF8_ENCODE(path) int result; string strpath; struct stat stbuf; S3FS_PRN_INFO("[path=%s]", path); if(0 != (result = check_parent_object_access(path, W_OK | X_OK))){ return result; } // directory must be empty if(directory_empty(path) != 0){ return -ENOTEMPTY; } strpath = path; if('/' != strpath[strpath.length() - 1]){ strpath += "/"; } S3fsCurl s3fscurl; result = s3fscurl.DeleteRequest(strpath.c_str()); s3fscurl.DestroyCurlHandle(); StatCache::getStatCacheData()->DelStat(strpath.c_str()); // double check for old version(before 1.63) // The old version makes "dir" object, newer version makes "dir/". // A case, there is only "dir", the first removing object is "dir/". // Then "dir/" is not exists, but curl_delete returns 0. // So need to check "dir" and should be removed it. if('/' == strpath[strpath.length() - 1]){ strpath = strpath.substr(0, strpath.length() - 1); } if(0 == get_object_attribute(strpath.c_str(), &stbuf, NULL, false)){ if(S_ISDIR(stbuf.st_mode)){ // Found "dir" object. result = s3fscurl.DeleteRequest(strpath.c_str()); s3fscurl.DestroyCurlHandle(); StatCache::getStatCacheData()->DelStat(strpath.c_str()); } } // If there is no "dir" and "dir/" object(this case is made by s3cmd/s3sync), // the cache key is "dir/". So we get error only once(delete "dir/"). // check for "_$folder$" object. // This processing is necessary for other S3 clients compatibility. if(is_special_name_folder_object(strpath.c_str())){ strpath += "_$folder$"; result = s3fscurl.DeleteRequest(strpath.c_str()); } S3FS_MALLOCTRIM(0); return result; } static int s3fs_symlink(const char* _from, const char* _to) { WTF8_ENCODE(from) WTF8_ENCODE(to) int result; struct fuse_context* pcxt; S3FS_PRN_INFO("[from=%s][to=%s]", from, to); if(NULL == (pcxt = fuse_get_context())){ return -EIO; } if(0 != (result = check_parent_object_access(to, W_OK | X_OK))){ return result; } if(-ENOENT != (result = check_object_access(to, F_OK, NULL))){ if(0 == result){ result = -EEXIST; } return result; } time_t now = time(NULL); headers_t headers; headers["Content-Type"] = string("application/octet-stream"); // Static headers["x-amz-meta-mode"] = str(S_IFLNK | S_IRWXU | S_IRWXG | S_IRWXO); headers["x-amz-meta-ctime"] = str(now); headers["x-amz-meta-mtime"] = str(now); headers["x-amz-meta-uid"] = str(pcxt->uid); headers["x-amz-meta-gid"] = str(pcxt->gid); // open tmpfile FdEntity* ent; if(NULL == (ent = FdManager::get()->Open(to, &headers, 0, -1, true, true))){ S3FS_PRN_ERR("could not open tmpfile(errno=%d)", errno); return -errno; } // write(without space words) string strFrom = trim(string(from)); ssize_t from_size = static_cast(strFrom.length()); if(from_size != ent->Write(strFrom.c_str(), 0, from_size)){ S3FS_PRN_ERR("could not write tmpfile(errno=%d)", errno); FdManager::get()->Close(ent); return -errno; } // upload if(0 != (result = ent->Flush(true))){ S3FS_PRN_WARN("could not upload tmpfile(result=%d)", result); } FdManager::get()->Close(ent); StatCache::getStatCacheData()->DelStat(to); if(!StatCache::getStatCacheData()->AddSymlink(string(to), strFrom)){ S3FS_PRN_ERR("failed to add symbolic link cache for %s", to); } S3FS_MALLOCTRIM(0); return result; } static int rename_object(const char* from, const char* to) { int result; string s3_realpath; headers_t meta; S3FS_PRN_INFO1("[from=%s][to=%s]", from , to); if(0 != (result = check_parent_object_access(to, W_OK | X_OK))){ // not permit writing "to" object parent dir. return result; } if(0 != (result = check_parent_object_access(from, W_OK | X_OK))){ // not permit removing "from" object parent dir. return result; } if(0 != (result = get_object_attribute(from, NULL, &meta))){ return result; } s3_realpath = get_realpath(from); meta["x-amz-copy-source"] = urlEncode(service_path + bucket + s3_realpath); meta["Content-Type"] = S3fsCurl::LookupMimeType(string(to)); meta["x-amz-metadata-directive"] = "REPLACE"; if(0 != (result = put_headers(to, meta, true))){ return result; } FdManager::get()->Rename(from, to); result = s3fs_unlink(from); StatCache::getStatCacheData()->DelStat(to); return result; } static int rename_object_nocopy(const char* from, const char* to) { int result; S3FS_PRN_INFO1("[from=%s][to=%s]", from , to); if(0 != (result = check_parent_object_access(to, W_OK | X_OK))){ // not permit writing "to" object parent dir. return result; } if(0 != (result = check_parent_object_access(from, W_OK | X_OK))){ // not permit removing "from" object parent dir. return result; } // open & load FdEntity* ent; if(NULL == (ent = get_local_fent(from, true))){ S3FS_PRN_ERR("could not open and read file(%s)", from); return -EIO; } // Set header if(!ent->SetContentType(to)){ S3FS_PRN_ERR("could not set content-type for %s", to); return -EIO; } // upload if(0 != (result = ent->RowFlush(to, true))){ S3FS_PRN_ERR("could not upload file(%s): result=%d", to, result); FdManager::get()->Close(ent); return result; } FdManager::get()->Close(ent); // Remove file result = s3fs_unlink(from); // Stats StatCache::getStatCacheData()->DelStat(to); StatCache::getStatCacheData()->DelStat(from); return result; } static int rename_large_object(const char* from, const char* to) { int result; struct stat buf; headers_t meta; S3FS_PRN_INFO1("[from=%s][to=%s]", from , to); if(0 != (result = check_parent_object_access(to, W_OK | X_OK))){ // not permit writing "to" object parent dir. return result; } if(0 != (result = check_parent_object_access(from, W_OK | X_OK))){ // not permit removing "from" object parent dir. return result; } if(0 != (result = get_object_attribute(from, &buf, &meta, false))){ return result; } S3fsCurl s3fscurl(true); if(0 != (result = s3fscurl.MultipartRenameRequest(from, to, meta, buf.st_size))){ return result; } s3fscurl.DestroyCurlHandle(); StatCache::getStatCacheData()->DelStat(to); return s3fs_unlink(from); } static int clone_directory_object(const char* from, const char* to) { int result = -1; struct stat stbuf; S3FS_PRN_INFO1("[from=%s][to=%s]", from, to); // get target's attributes if(0 != (result = get_object_attribute(from, &stbuf))){ return result; } result = create_directory_object(to, stbuf.st_mode, stbuf.st_mtime, stbuf.st_uid, stbuf.st_gid); StatCache::getStatCacheData()->DelStat(to); return result; } static int rename_directory(const char* from, const char* to) { S3ObjList head; s3obj_list_t headlist; string strfrom = from ? from : ""; // from is without "/". string strto = to ? to : ""; // to is without "/" too. string basepath = strfrom + "/"; string newpath; // should be from name(not used) string nowcache; // now cache path(not used) dirtype DirType; bool normdir; MVNODE* mn_head = NULL; MVNODE* mn_tail = NULL; MVNODE* mn_cur; struct stat stbuf; int result; bool is_dir; S3FS_PRN_INFO1("[from=%s][to=%s]", from, to); // // Initiate and Add base directory into MVNODE struct. // strto += "/"; if(0 == chk_dir_object_type(from, newpath, strfrom, nowcache, NULL, &DirType) && DIRTYPE_UNKNOWN != DirType){ if(DIRTYPE_NOOBJ != DirType){ normdir = false; }else{ normdir = true; strfrom = from; // from directory is not removed, but from directory attr is needed. } if(NULL == (add_mvnode(&mn_head, &mn_tail, strfrom.c_str(), strto.c_str(), true, normdir))){ return -ENOMEM; } }else{ // Something wrong about "from" directory. } // // get a list of all the objects // // No delimiter is specified, the result(head) is all object keys. // (CommonPrefixes is empty, but all object is listed in Key.) if(0 != (result = list_bucket(basepath.c_str(), head, NULL))){ S3FS_PRN_ERR("list_bucket returns error."); return result; } head.GetNameList(headlist); // get name without "/". S3ObjList::MakeHierarchizedList(headlist, false); // add hierarchized dir. s3obj_list_t::const_iterator liter; for(liter = headlist.begin(); headlist.end() != liter; ++liter){ // make "from" and "to" object name. string from_name = basepath + (*liter); string to_name = strto + (*liter); string etag = head.GetETag((*liter).c_str()); // Check subdirectory. StatCache::getStatCacheData()->HasStat(from_name, etag.c_str()); // Check ETag if(0 != get_object_attribute(from_name.c_str(), &stbuf, NULL)){ S3FS_PRN_WARN("failed to get %s object attribute.", from_name.c_str()); continue; } if(S_ISDIR(stbuf.st_mode)){ is_dir = true; if(0 != chk_dir_object_type(from_name.c_str(), newpath, from_name, nowcache, NULL, &DirType) || DIRTYPE_UNKNOWN == DirType){ S3FS_PRN_WARN("failed to get %s%s object directory type.", basepath.c_str(), (*liter).c_str()); continue; } if(DIRTYPE_NOOBJ != DirType){ normdir = false; }else{ normdir = true; from_name = basepath + (*liter); // from directory is not removed, but from directory attr is needed. } }else{ is_dir = false; normdir = false; } // push this one onto the stack if(NULL == add_mvnode(&mn_head, &mn_tail, from_name.c_str(), to_name.c_str(), is_dir, normdir)){ return -ENOMEM; } } // // rename // // rename directory objects. for(mn_cur = mn_head; mn_cur; mn_cur = mn_cur->next){ if(mn_cur->is_dir && mn_cur->old_path && '\0' != mn_cur->old_path[0]){ if(0 != (result = clone_directory_object(mn_cur->old_path, mn_cur->new_path))){ S3FS_PRN_ERR("clone_directory_object returned an error(%d)", result); free_mvnodes(mn_head); return -EIO; } } } // iterate over the list - copy the files with rename_object // does a safe copy - copies first and then deletes old for(mn_cur = mn_head; mn_cur; mn_cur = mn_cur->next){ if(!mn_cur->is_dir){ // TODO: call s3fs_rename instead? if(!nocopyapi && !norenameapi){ result = rename_object(mn_cur->old_path, mn_cur->new_path); }else{ result = rename_object_nocopy(mn_cur->old_path, mn_cur->new_path); } if(0 != result){ S3FS_PRN_ERR("rename_object returned an error(%d)", result); free_mvnodes(mn_head); return -EIO; } } } // Iterate over old the directories, bottoms up and remove for(mn_cur = mn_tail; mn_cur; mn_cur = mn_cur->prev){ if(mn_cur->is_dir && mn_cur->old_path && '\0' != mn_cur->old_path[0]){ if(!(mn_cur->is_normdir)){ if(0 != (result = s3fs_rmdir(mn_cur->old_path))){ S3FS_PRN_ERR("s3fs_rmdir returned an error(%d)", result); free_mvnodes(mn_head); return -EIO; } }else{ // cache clear. StatCache::getStatCacheData()->DelStat(mn_cur->old_path); } } } free_mvnodes(mn_head); return 0; } static int s3fs_rename(const char* _from, const char* _to) { WTF8_ENCODE(from) WTF8_ENCODE(to) struct stat buf; int result; S3FS_PRN_INFO("[from=%s][to=%s]", from, to); if(0 != (result = check_parent_object_access(to, W_OK | X_OK))){ // not permit writing "to" object parent dir. return result; } if(0 != (result = check_parent_object_access(from, W_OK | X_OK))){ // not permit removing "from" object parent dir. return result; } if(0 != (result = get_object_attribute(from, &buf, NULL))){ return result; } // flush pending writes if file is open FdEntity *entity = FdManager::get()->ExistOpen(from); if(entity != NULL){ if(0 != (result = entity->Flush(true))){ S3FS_PRN_ERR("could not upload file(%s): result=%d", to, result); return result; } StatCache::getStatCacheData()->DelStat(from); FdManager::get()->Close(entity); entity = NULL; } // files larger than 5GB must be modified via the multipart interface if(S_ISDIR(buf.st_mode)){ result = rename_directory(from, to); }else if(!nomultipart && buf.st_size >= singlepart_copy_limit){ result = rename_large_object(from, to); }else{ if(!nocopyapi && !norenameapi){ result = rename_object(from, to); }else{ result = rename_object_nocopy(from, to); } } S3FS_MALLOCTRIM(0); return result; } static int s3fs_link(const char* _from, const char* _to) { WTF8_ENCODE(from) WTF8_ENCODE(to) S3FS_PRN_INFO("[from=%s][to=%s]", from, to); return -ENOTSUP; } static int s3fs_chmod(const char* _path, mode_t mode) { WTF8_ENCODE(path) int result; string strpath; string newpath; string nowcache; headers_t meta; struct stat stbuf; dirtype nDirType = DIRTYPE_UNKNOWN; S3FS_PRN_INFO("[path=%s][mode=%04o]", path, mode); if(0 == strcmp(path, "/")){ S3FS_PRN_ERR("Could not change mode for mount point."); return -EIO; } if(0 != (result = check_parent_object_access(path, X_OK))){ return result; } if(0 != (result = check_object_owner(path, &stbuf))){ return result; } if(S_ISDIR(stbuf.st_mode)){ result = chk_dir_object_type(path, newpath, strpath, nowcache, &meta, &nDirType); }else{ strpath = path; nowcache = strpath; result = get_object_attribute(strpath.c_str(), NULL, &meta); } if(0 != result){ return result; } if(S_ISDIR(stbuf.st_mode) && IS_REPLACEDIR(nDirType)){ // Should rebuild directory object(except new type) // Need to remove old dir("dir" etc) and make new dir("dir/") // At first, remove directory old object if(0 != (result = remove_old_type_dir(strpath, nDirType))){ return result; } StatCache::getStatCacheData()->DelStat(nowcache); // Make new directory object("dir/") if(0 != (result = create_directory_object(newpath.c_str(), mode, stbuf.st_mtime, stbuf.st_uid, stbuf.st_gid))){ return result; } }else{ // normal object or directory object of newer version meta["x-amz-meta-ctime"] = str(time(NULL)); meta["x-amz-meta-mode"] = str(mode); meta["x-amz-copy-source"] = urlEncode(service_path + bucket + get_realpath(strpath.c_str())); meta["x-amz-metadata-directive"] = "REPLACE"; if(put_headers(strpath.c_str(), meta, true) != 0){ return -EIO; } StatCache::getStatCacheData()->DelStat(nowcache); // check opened file handle. // // If we have already opened file handle, should set mode to it. // And new mode is set when the file handle is closed. // FdEntity* ent; if(NULL != (ent = FdManager::get()->ExistOpen(path))){ ent->UpdateCtime(); ent->SetMode(mode); // Set new mode to opened fd. FdManager::get()->Close(ent); } } S3FS_MALLOCTRIM(0); return 0; } static int s3fs_chmod_nocopy(const char* _path, mode_t mode) { WTF8_ENCODE(path) int result; string strpath; string newpath; string nowcache; struct stat stbuf; dirtype nDirType = DIRTYPE_UNKNOWN; S3FS_PRN_INFO1("[path=%s][mode=%04o]", path, mode); if(0 == strcmp(path, "/")){ S3FS_PRN_ERR("Could not change mode for mount point."); return -EIO; } if(0 != (result = check_parent_object_access(path, X_OK))){ return result; } if(0 != (result = check_object_owner(path, &stbuf))){ return result; } // Get attributes if(S_ISDIR(stbuf.st_mode)){ result = chk_dir_object_type(path, newpath, strpath, nowcache, NULL, &nDirType); }else{ strpath = path; nowcache = strpath; result = get_object_attribute(strpath.c_str(), NULL, NULL); } if(0 != result){ return result; } if(S_ISDIR(stbuf.st_mode)){ // Should rebuild all directory object // Need to remove old dir("dir" etc) and make new dir("dir/") // At first, remove directory old object if(0 != (result = remove_old_type_dir(strpath, nDirType))){ return result; } StatCache::getStatCacheData()->DelStat(nowcache); // Make new directory object("dir/") if(0 != (result = create_directory_object(newpath.c_str(), mode, stbuf.st_mtime, stbuf.st_uid, stbuf.st_gid))){ return result; } }else{ // normal object or directory object of newer version // open & load FdEntity* ent; if(NULL == (ent = get_local_fent(strpath.c_str(), true))){ S3FS_PRN_ERR("could not open and read file(%s)", strpath.c_str()); return -EIO; } ent->SetCtime(time(NULL)); // Change file mode ent->SetMode(mode); // upload if(0 != (result = ent->Flush(true))){ S3FS_PRN_ERR("could not upload file(%s): result=%d", strpath.c_str(), result); FdManager::get()->Close(ent); return result; } FdManager::get()->Close(ent); StatCache::getStatCacheData()->DelStat(nowcache); } S3FS_MALLOCTRIM(0); return result; } static int s3fs_chown(const char* _path, uid_t uid, gid_t gid) { WTF8_ENCODE(path) int result; string strpath; string newpath; string nowcache; headers_t meta; struct stat stbuf; dirtype nDirType = DIRTYPE_UNKNOWN; S3FS_PRN_INFO("[path=%s][uid=%u][gid=%u]", path, (unsigned int)uid, (unsigned int)gid); if(0 == strcmp(path, "/")){ S3FS_PRN_ERR("Could not change owner for mount point."); return -EIO; } if(0 != (result = check_parent_object_access(path, X_OK))){ return result; } if(0 != (result = check_object_owner(path, &stbuf))){ return result; } if((uid_t)(-1) == uid){ uid = stbuf.st_uid; } if((gid_t)(-1) == gid){ gid = stbuf.st_gid; } if(S_ISDIR(stbuf.st_mode)){ result = chk_dir_object_type(path, newpath, strpath, nowcache, &meta, &nDirType); }else{ strpath = path; nowcache = strpath; result = get_object_attribute(strpath.c_str(), NULL, &meta); } if(0 != result){ return result; } if(S_ISDIR(stbuf.st_mode) && IS_REPLACEDIR(nDirType)){ // Should rebuild directory object(except new type) // Need to remove old dir("dir" etc) and make new dir("dir/") // At first, remove directory old object if(0 != (result = remove_old_type_dir(strpath, nDirType))){ return result; } StatCache::getStatCacheData()->DelStat(nowcache); // Make new directory object("dir/") if(0 != (result = create_directory_object(newpath.c_str(), stbuf.st_mode, stbuf.st_mtime, uid, gid))){ return result; } }else{ meta["x-amz-meta-ctime"] = str(time(NULL)); meta["x-amz-meta-uid"] = str(uid); meta["x-amz-meta-gid"] = str(gid); meta["x-amz-copy-source"] = urlEncode(service_path + bucket + get_realpath(strpath.c_str())); meta["x-amz-metadata-directive"] = "REPLACE"; if(put_headers(strpath.c_str(), meta, true) != 0){ return -EIO; } StatCache::getStatCacheData()->DelStat(nowcache); } S3FS_MALLOCTRIM(0); return 0; } static int s3fs_chown_nocopy(const char* _path, uid_t uid, gid_t gid) { WTF8_ENCODE(path) int result; string strpath; string newpath; string nowcache; struct stat stbuf; dirtype nDirType = DIRTYPE_UNKNOWN; S3FS_PRN_INFO1("[path=%s][uid=%u][gid=%u]", path, (unsigned int)uid, (unsigned int)gid); if(0 == strcmp(path, "/")){ S3FS_PRN_ERR("Could not change owner for mount point."); return -EIO; } if(0 != (result = check_parent_object_access(path, X_OK))){ return result; } if(0 != (result = check_object_owner(path, &stbuf))){ return result; } if((uid_t)(-1) == uid){ uid = stbuf.st_uid; } if((gid_t)(-1) == gid){ gid = stbuf.st_gid; } // Get attributes if(S_ISDIR(stbuf.st_mode)){ result = chk_dir_object_type(path, newpath, strpath, nowcache, NULL, &nDirType); }else{ strpath = path; nowcache = strpath; result = get_object_attribute(strpath.c_str(), NULL, NULL); } if(0 != result){ return result; } if(S_ISDIR(stbuf.st_mode)){ // Should rebuild all directory object // Need to remove old dir("dir" etc) and make new dir("dir/") // At first, remove directory old object if(0 != (result = remove_old_type_dir(strpath, nDirType))){ return result; } StatCache::getStatCacheData()->DelStat(nowcache); // Make new directory object("dir/") if(0 != (result = create_directory_object(newpath.c_str(), stbuf.st_mode, stbuf.st_mtime, uid, gid))){ return result; } }else{ // normal object or directory object of newer version // open & load FdEntity* ent; if(NULL == (ent = get_local_fent(strpath.c_str(), true))){ S3FS_PRN_ERR("could not open and read file(%s)", strpath.c_str()); return -EIO; } ent->SetCtime(time(NULL)); // Change owner ent->SetUId(uid); ent->SetGId(gid); // upload if(0 != (result = ent->Flush(true))){ S3FS_PRN_ERR("could not upload file(%s): result=%d", strpath.c_str(), result); FdManager::get()->Close(ent); return result; } FdManager::get()->Close(ent); StatCache::getStatCacheData()->DelStat(nowcache); } S3FS_MALLOCTRIM(0); return result; } static int s3fs_utimens(const char* _path, const struct timespec ts[2]) { WTF8_ENCODE(path) int result; string strpath; string newpath; string nowcache; headers_t meta; struct stat stbuf; dirtype nDirType = DIRTYPE_UNKNOWN; S3FS_PRN_INFO("[path=%s][mtime=%lld]", path, static_cast(ts[1].tv_sec)); if(0 == strcmp(path, "/")){ S3FS_PRN_ERR("Could not change mtime for mount point."); return -EIO; } if(0 != (result = check_parent_object_access(path, X_OK))){ return result; } if(0 != (result = check_object_access(path, W_OK, &stbuf))){ if(0 != check_object_owner(path, &stbuf)){ return result; } } if(S_ISDIR(stbuf.st_mode)){ result = chk_dir_object_type(path, newpath, strpath, nowcache, &meta, &nDirType); }else{ strpath = path; nowcache = strpath; result = get_object_attribute(strpath.c_str(), NULL, &meta); } if(0 != result){ return result; } if(S_ISDIR(stbuf.st_mode) && IS_REPLACEDIR(nDirType)){ // Should rebuild directory object(except new type) // Need to remove old dir("dir" etc) and make new dir("dir/") // At first, remove directory old object if(0 != (result = remove_old_type_dir(strpath, nDirType))){ return result; } StatCache::getStatCacheData()->DelStat(nowcache); // Make new directory object("dir/") if(0 != (result = create_directory_object(newpath.c_str(), stbuf.st_mode, ts[1].tv_sec, stbuf.st_uid, stbuf.st_gid))){ return result; } }else{ meta["x-amz-meta-mtime"] = str(ts[1].tv_sec); meta["x-amz-copy-source"] = urlEncode(service_path + bucket + get_realpath(strpath.c_str())); meta["x-amz-metadata-directive"] = "REPLACE"; if(put_headers(strpath.c_str(), meta, true) != 0){ return -EIO; } StatCache::getStatCacheData()->DelStat(nowcache); } S3FS_MALLOCTRIM(0); return 0; } static int s3fs_utimens_nocopy(const char* _path, const struct timespec ts[2]) { WTF8_ENCODE(path) int result; string strpath; string newpath; string nowcache; struct stat stbuf; dirtype nDirType = DIRTYPE_UNKNOWN; S3FS_PRN_INFO1("[path=%s][mtime=%lld]", path, static_cast(ts[1].tv_sec)); if(0 == strcmp(path, "/")){ S3FS_PRN_ERR("Could not change mtime for mount point."); return -EIO; } if(0 != (result = check_parent_object_access(path, X_OK))){ return result; } if(0 != (result = check_object_access(path, W_OK, &stbuf))){ if(0 != check_object_owner(path, &stbuf)){ return result; } } // Get attributes if(S_ISDIR(stbuf.st_mode)){ result = chk_dir_object_type(path, newpath, strpath, nowcache, NULL, &nDirType); }else{ strpath = path; nowcache = strpath; result = get_object_attribute(strpath.c_str(), NULL, NULL); } if(0 != result){ return result; } if(S_ISDIR(stbuf.st_mode)){ // Should rebuild all directory object // Need to remove old dir("dir" etc) and make new dir("dir/") // At first, remove directory old object if(0 != (result = remove_old_type_dir(strpath, nDirType))){ return result; } StatCache::getStatCacheData()->DelStat(nowcache); // Make new directory object("dir/") if(0 != (result = create_directory_object(newpath.c_str(), stbuf.st_mode, ts[1].tv_sec, stbuf.st_uid, stbuf.st_gid))){ return result; } }else{ // normal object or directory object of newer version // open & load FdEntity* ent; if(NULL == (ent = get_local_fent(strpath.c_str(), true))){ S3FS_PRN_ERR("could not open and read file(%s)", strpath.c_str()); return -EIO; } // set mtime if(0 != (result = ent->SetMtime(ts[1].tv_sec))){ S3FS_PRN_ERR("could not set mtime to file(%s): result=%d", strpath.c_str(), result); FdManager::get()->Close(ent); return result; } // upload if(0 != (result = ent->Flush(true))){ S3FS_PRN_ERR("could not upload file(%s): result=%d", strpath.c_str(), result); FdManager::get()->Close(ent); return result; } FdManager::get()->Close(ent); StatCache::getStatCacheData()->DelStat(nowcache); } S3FS_MALLOCTRIM(0); return result; } static int s3fs_truncate(const char* _path, off_t size) { WTF8_ENCODE(path) int result; headers_t meta; FdEntity* ent = NULL; S3FS_PRN_INFO("[path=%s][size=%lld]", path, static_cast(size)); if(size < 0){ size = 0; } if(0 != (result = check_parent_object_access(path, X_OK))){ return result; } if(0 != (result = check_object_access(path, W_OK, NULL))){ return result; } // Get file information if(0 == (result = get_object_attribute(path, NULL, &meta))){ // Exists -> Get file(with size) if(NULL == (ent = FdManager::get()->Open(path, &meta, size, -1, false, true))){ S3FS_PRN_ERR("could not open file(%s): errno=%d", path, errno); return -EIO; } if(0 != (result = ent->Load(0, size))){ S3FS_PRN_ERR("could not download file(%s): result=%d", path, result); FdManager::get()->Close(ent); return result; } }else{ // Not found -> Make tmpfile(with size) struct fuse_context* pcxt; if(NULL == (pcxt = fuse_get_context())){ return -EIO; } time_t now = time(NULL); meta["Content-Type"] = string("application/octet-stream"); // Static meta["x-amz-meta-mode"] = str(S_IFLNK | S_IRWXU | S_IRWXG | S_IRWXO); meta["x-amz-meta-ctime"] = str(now); meta["x-amz-meta-mtime"] = str(now); meta["x-amz-meta-uid"] = str(pcxt->uid); meta["x-amz-meta-gid"] = str(pcxt->gid); if(NULL == (ent = FdManager::get()->Open(path, &meta, size, -1, true, true))){ S3FS_PRN_ERR("could not open file(%s): errno=%d", path, errno); return -EIO; } } // upload if(0 != (result = ent->Flush(true))){ S3FS_PRN_ERR("could not upload file(%s): result=%d", path, result); FdManager::get()->Close(ent); return result; } FdManager::get()->Close(ent); StatCache::getStatCacheData()->DelStat(path); S3FS_MALLOCTRIM(0); return result; } static int s3fs_open(const char* _path, struct fuse_file_info* fi) { WTF8_ENCODE(path) int result; struct stat st; bool needs_flush = false; S3FS_PRN_INFO("[path=%s][flags=0x%x]", path, fi->flags); // clear stat for reading fresh stat. // (if object stat is changed, we refresh it. then s3fs gets always // stat when s3fs open the object). if(StatCache::getStatCacheData()->HasStat(path)){ // flush any dirty data so that subsequent stat gets correct size if((result = s3fs_flush(_path, fi)) != 0){ S3FS_PRN_ERR("could not flush(%s): result=%d", path, result); } StatCache::getStatCacheData()->DelStat(path); } int mask = (O_RDONLY != (fi->flags & O_ACCMODE) ? W_OK : R_OK); if(0 != (result = check_parent_object_access(path, X_OK))){ return result; } result = check_object_access(path, mask, &st); if(-ENOENT == result){ if(0 != (result = check_parent_object_access(path, W_OK))){ return result; } }else if(0 != result){ return result; } if((unsigned int)fi->flags & O_TRUNC){ if(0 != st.st_size){ st.st_size = 0; needs_flush = true; } } if(!S_ISREG(st.st_mode) || S_ISLNK(st.st_mode)){ st.st_mtime = -1; } FdEntity* ent; headers_t meta; get_object_attribute(path, NULL, &meta, true, NULL, true); // no truncate cache if(NULL == (ent = FdManager::get()->Open(path, &meta, st.st_size, st.st_mtime, false, true))){ StatCache::getStatCacheData()->DelStat(path); return -EIO; } if (needs_flush){ if(0 != (result = ent->RowFlush(path, true))){ S3FS_PRN_ERR("could not upload file(%s): result=%d", path, result); FdManager::get()->Close(ent); StatCache::getStatCacheData()->DelStat(path); return result; } } fi->fh = ent->GetFd(); S3FS_MALLOCTRIM(0); return 0; } static int s3fs_read(const char* _path, char* buf, size_t size, off_t offset, struct fuse_file_info* fi) { WTF8_ENCODE(path) ssize_t res; S3FS_PRN_DBG("[path=%s][size=%zu][offset=%lld][fd=%llu]", path, size, static_cast(offset), (unsigned long long)(fi->fh)); FdEntity* ent; if(NULL == (ent = FdManager::get()->ExistOpen(path, static_cast(fi->fh)))){ S3FS_PRN_ERR("could not find opened fd(%s)", path); return -EIO; } if(ent->GetFd() != static_cast(fi->fh)){ S3FS_PRN_WARN("different fd(%d - %llu)", ent->GetFd(), (unsigned long long)(fi->fh)); } // check real file size off_t realsize = 0; if(!ent->GetSize(realsize) || 0 == realsize){ S3FS_PRN_DBG("file size is 0, so break to read."); FdManager::get()->Close(ent); return 0; } if(0 > (res = ent->Read(buf, offset, size, false))){ S3FS_PRN_WARN("failed to read file(%s). result=%jd", path, (intmax_t)res); } FdManager::get()->Close(ent); return static_cast(res); } static int s3fs_write(const char* _path, const char* buf, size_t size, off_t offset, struct fuse_file_info* fi) { WTF8_ENCODE(path) ssize_t res; S3FS_PRN_DBG("[path=%s][size=%zu][offset=%lld][fd=%llu]", path, size, static_cast(offset), (unsigned long long)(fi->fh)); FdEntity* ent; if(NULL == (ent = FdManager::get()->ExistOpen(path, static_cast(fi->fh)))){ S3FS_PRN_ERR("could not find opened fd(%s)", path); return -EIO; } if(ent->GetFd() != static_cast(fi->fh)){ S3FS_PRN_WARN("different fd(%d - %llu)", ent->GetFd(), (unsigned long long)(fi->fh)); } if(0 > (res = ent->Write(buf, offset, size))){ S3FS_PRN_WARN("failed to write file(%s). result=%jd", path, (intmax_t)res); } FdManager::get()->Close(ent); return static_cast(res); } static int s3fs_statfs(const char* _path, struct statvfs* stbuf) { // WTF8_ENCODE(path) // 256T stbuf->f_bsize = 0X1000000; stbuf->f_blocks = 0X1000000; stbuf->f_bfree = 0x1000000; stbuf->f_bavail = 0x1000000; stbuf->f_namemax = NAME_MAX; return 0; } static int s3fs_flush(const char* _path, struct fuse_file_info* fi) { WTF8_ENCODE(path) int result; S3FS_PRN_INFO("[path=%s][fd=%llu]", path, (unsigned long long)(fi->fh)); int mask = (O_RDONLY != (fi->flags & O_ACCMODE) ? W_OK : R_OK); if(0 != (result = check_parent_object_access(path, X_OK))){ return result; } result = check_object_access(path, mask, NULL); if(-ENOENT == result){ if(0 != (result = check_parent_object_access(path, W_OK))){ return result; } }else if(0 != result){ return result; } FdEntity* ent; if(NULL != (ent = FdManager::get()->ExistOpen(path, static_cast(fi->fh)))){ ent->UpdateMtime(); result = ent->Flush(false); FdManager::get()->Close(ent); } S3FS_MALLOCTRIM(0); return result; } // [NOTICE] // Assumption is a valid fd. // static int s3fs_fsync(const char* _path, int datasync, struct fuse_file_info* fi) { WTF8_ENCODE(path) int result = 0; S3FS_PRN_INFO("[path=%s][fd=%llu]", path, (unsigned long long)(fi->fh)); FdEntity* ent; if(NULL != (ent = FdManager::get()->ExistOpen(path, static_cast(fi->fh)))){ if(0 == datasync){ ent->UpdateMtime(); } result = ent->Flush(false); FdManager::get()->Close(ent); } S3FS_MALLOCTRIM(0); // Issue 320: Delete stat cache entry because st_size may have changed. StatCache::getStatCacheData()->DelStat(path); return result; } static int s3fs_release(const char* _path, struct fuse_file_info* fi) { WTF8_ENCODE(path) S3FS_PRN_INFO("[path=%s][fd=%llu]", path, (unsigned long long)(fi->fh)); // [NOTE] // All opened file's stats is cached with no truncate flag. // Thus we unset it here. StatCache::getStatCacheData()->ChangeNoTruncateFlag(string(path), false); // [NOTICE] // At first, we remove stats cache. // Because fuse does not wait for response from "release" function. :-( // And fuse runs next command before this function returns. // Thus we call deleting stats function ASSAP. // if((fi->flags & O_RDWR) || (fi->flags & O_WRONLY)){ StatCache::getStatCacheData()->DelStat(path); } FdEntity* ent; if(NULL == (ent = FdManager::get()->GetFdEntity(path, static_cast(fi->fh)))){ S3FS_PRN_ERR("could not find fd(file=%s)", path); return -EIO; } if(ent->GetFd() != static_cast(fi->fh)){ S3FS_PRN_WARN("different fd(%d - %llu)", ent->GetFd(), (unsigned long long)(fi->fh)); } // Once for the implicit refcnt from GetFdEntity and again for release ent->Close(); FdManager::get()->Close(ent); // check - for debug if(IS_S3FS_LOG_DBG()){ if(NULL != (ent = FdManager::get()->GetFdEntity(path, static_cast(fi->fh)))){ S3FS_PRN_WARN("file(%s),fd(%d) is still opened.", path, ent->GetFd()); } } S3FS_MALLOCTRIM(0); return 0; } static int s3fs_opendir(const char* _path, struct fuse_file_info* fi) { WTF8_ENCODE(path) int result; int mask = (O_RDONLY != (fi->flags & O_ACCMODE) ? W_OK : R_OK) | X_OK; S3FS_PRN_INFO("[path=%s][flags=0x%x]", path, fi->flags); if(0 == (result = check_object_access(path, mask, NULL))){ result = check_parent_object_access(path, mask); } S3FS_MALLOCTRIM(0); return result; } static bool multi_head_callback(S3fsCurl* s3fscurl) { if(!s3fscurl){ return false; } string saved_path = s3fscurl->GetSpacialSavedPath(); if(!StatCache::getStatCacheData()->AddStat(saved_path, *(s3fscurl->GetResponseHeaders()))){ S3FS_PRN_ERR("failed adding stat cache [path=%s]", saved_path.c_str()); return false; } return true; } static S3fsCurl* multi_head_retry_callback(S3fsCurl* s3fscurl) { if(!s3fscurl){ return NULL; } int ssec_key_pos= s3fscurl->GetLastPreHeadSeecKeyPos(); int retry_count = s3fscurl->GetMultipartRetryCount(); // retry next sse key. // if end of sse key, set retry master count is up. ssec_key_pos = (ssec_key_pos < 0 ? 0 : ssec_key_pos + 1); if(0 == S3fsCurl::GetSseKeyCount() || S3fsCurl::GetSseKeyCount() <= ssec_key_pos){ if(s3fscurl->IsOverMultipartRetryCount()){ S3FS_PRN_ERR("Over retry count(%d) limit(%s).", s3fscurl->GetMultipartRetryCount(), s3fscurl->GetSpacialSavedPath().c_str()); return NULL; } ssec_key_pos= -1; retry_count++; } S3fsCurl* newcurl = new S3fsCurl(s3fscurl->IsUseAhbe()); string path = s3fscurl->GetPath(); string base_path = s3fscurl->GetBasePath(); string saved_path = s3fscurl->GetSpacialSavedPath(); if(!newcurl->PreHeadRequest(path, base_path, saved_path, ssec_key_pos)){ S3FS_PRN_ERR("Could not duplicate curl object(%s).", saved_path.c_str()); delete newcurl; return NULL; } newcurl->SetMultipartRetryCount(retry_count); return newcurl; } static int readdir_multi_head(const char* path, const S3ObjList& head, void* buf, fuse_fill_dir_t filler) { S3fsMultiCurl curlmulti(S3fsCurl::GetMaxMultiRequest()); s3obj_list_t headlist; s3obj_list_t fillerlist; int result = 0; S3FS_PRN_INFO1("[path=%s][list=%zu]", path, headlist.size()); // Make base path list. head.GetNameList(headlist, true, false); // get name with "/". // Initialize S3fsMultiCurl curlmulti.SetSuccessCallback(multi_head_callback); curlmulti.SetRetryCallback(multi_head_retry_callback); s3obj_list_t::iterator iter; fillerlist.clear(); // Make single head request(with max). for(iter = headlist.begin(); headlist.end() != iter; iter = headlist.erase(iter)){ string disppath = path + (*iter); string etag = head.GetETag((*iter).c_str()); string fillpath = disppath; if('/' == disppath[disppath.length() - 1]){ fillpath = fillpath.substr(0, fillpath.length() -1); } fillerlist.push_back(fillpath); if(StatCache::getStatCacheData()->HasStat(disppath, etag.c_str())){ continue; } // First check for directory, start checking "not SSE-C". // If checking failed, retry to check with "SSE-C" by retry callback func when SSE-C mode. S3fsCurl* s3fscurl = new S3fsCurl(); if(!s3fscurl->PreHeadRequest(disppath, (*iter), disppath)){ // target path = cache key path.(ex "dir/") S3FS_PRN_WARN("Could not make curl object for head request(%s).", disppath.c_str()); delete s3fscurl; continue; } if(!curlmulti.SetS3fsCurlObject(s3fscurl)){ S3FS_PRN_WARN("Could not make curl object into multi curl(%s).", disppath.c_str()); delete s3fscurl; continue; } } // Multi request if(0 != (result = curlmulti.Request())){ // If result is -EIO, it is something error occurred. // This case includes that the object is encrypting(SSE) and s3fs does not have keys. // So s3fs set result to 0 in order to continue the process. if(-EIO == result){ S3FS_PRN_WARN("error occurred in multi request(errno=%d), but continue...", result); result = 0; }else{ S3FS_PRN_ERR("error occurred in multi request(errno=%d).", result); return result; } } // populate fuse buffer // here is best position, because a case is cache size < files in directory // for(iter = fillerlist.begin(); fillerlist.end() != iter; ++iter){ struct stat st; bool in_cache = StatCache::getStatCacheData()->GetStat((*iter), &st); string bpath = mybasename((*iter)); if(use_wtf8){ bpath = s3fs_wtf8_decode(bpath); } if(in_cache){ filler(buf, bpath.c_str(), &st, 0); }else{ S3FS_PRN_INFO2("Could not find %s file in stat cache.", (*iter).c_str()); filler(buf, bpath.c_str(), 0, 0); } } return result; } static int s3fs_readdir(const char* _path, void* buf, fuse_fill_dir_t filler, off_t offset, struct fuse_file_info* fi) { WTF8_ENCODE(path) S3ObjList head; int result; S3FS_PRN_INFO("[path=%s]", path); if(0 != (result = check_object_access(path, X_OK, NULL))){ return result; } // get a list of all the objects if((result = list_bucket(path, head, "/")) != 0){ S3FS_PRN_ERR("list_bucket returns error(%d).", result); return result; } // force to add "." and ".." name. filler(buf, ".", 0, 0); filler(buf, "..", 0, 0); if(head.IsEmpty()){ return 0; } // Send multi head request for stats caching. string strpath = path; if(strcmp(path, "/") != 0){ strpath += "/"; } if(0 != (result = readdir_multi_head(strpath.c_str(), head, buf, filler))){ S3FS_PRN_ERR("readdir_multi_head returns error(%d).", result); } S3FS_MALLOCTRIM(0); return result; } static int list_bucket(const char* path, S3ObjList& head, const char* delimiter, bool check_content_only) { string s3_realpath; string query_delimiter;; string query_prefix;; string query_maxkey;; string next_marker; bool truncated = true; S3fsCurl s3fscurl; xmlDocPtr doc; S3FS_PRN_INFO1("[path=%s]", path); if(delimiter && 0 < strlen(delimiter)){ query_delimiter += "delimiter="; query_delimiter += delimiter; query_delimiter += "&"; } query_prefix += "&prefix="; s3_realpath = get_realpath(path); if(0 == s3_realpath.length() || '/' != s3_realpath[s3_realpath.length() - 1]){ // last word must be "/" query_prefix += urlEncode(s3_realpath.substr(1) + "/"); }else{ query_prefix += urlEncode(s3_realpath.substr(1)); } if (check_content_only){ // Just need to know if there are child objects in dir // For dir with children, expect "dir/" and "dir/child" query_maxkey += "max-keys=2"; }else{ query_maxkey += "max-keys=" + str(max_keys_list_object); } while(truncated){ string each_query = query_delimiter; if(!next_marker.empty()){ each_query += "marker=" + urlEncode(next_marker) + "&"; next_marker = ""; } each_query += query_maxkey; each_query += query_prefix; // request int result; if(0 != (result = s3fscurl.ListBucketRequest(path, each_query.c_str()))){ S3FS_PRN_ERR("ListBucketRequest returns with error."); return result; } BodyData* body = s3fscurl.GetBodyData(); // xmlDocPtr if(NULL == (doc = xmlReadMemory(body->str(), static_cast(body->size()), "", NULL, 0))){ S3FS_PRN_ERR("xmlReadMemory returns with error."); return -1; } if(0 != append_objects_from_xml(path, doc, head)){ S3FS_PRN_ERR("append_objects_from_xml returns with error."); xmlFreeDoc(doc); return -1; } if(true == (truncated = is_truncated(doc))){ xmlChar* tmpch = get_next_marker(doc); if(tmpch){ next_marker = (char*)tmpch; xmlFree(tmpch); }else{ // If did not specify "delimiter", s3 did not return "NextMarker". // On this case, can use last name for next marker. // string lastname; if(!head.GetLastName(lastname)){ S3FS_PRN_WARN("Could not find next marker, thus break loop."); truncated = false; }else{ next_marker = s3_realpath.substr(1); if(0 == s3_realpath.length() || '/' != s3_realpath[s3_realpath.length() - 1]){ next_marker += "/"; } next_marker += lastname; } } } S3FS_XMLFREEDOC(doc); // reset(initialize) curl object s3fscurl.DestroyCurlHandle(); if(check_content_only){ break; } } S3FS_MALLOCTRIM(0); return 0; } static const char* c_strErrorObjectName = "FILE or SUBDIR in DIR"; static int append_objects_from_xml_ex(const char* path, xmlDocPtr doc, xmlXPathContextPtr ctx, const char* ex_contents, const char* ex_key, const char* ex_etag, int isCPrefix, S3ObjList& head) { xmlXPathObjectPtr contents_xp; xmlNodeSetPtr content_nodes; if(NULL == (contents_xp = xmlXPathEvalExpression((xmlChar*)ex_contents, ctx))){ S3FS_PRN_ERR("xmlXPathEvalExpression returns null."); return -1; } if(xmlXPathNodeSetIsEmpty(contents_xp->nodesetval)){ S3FS_PRN_DBG("contents_xp->nodesetval is empty."); S3FS_XMLXPATHFREEOBJECT(contents_xp); return 0; } content_nodes = contents_xp->nodesetval; bool is_dir; string stretag; int i; for(i = 0; i < content_nodes->nodeNr; i++){ ctx->node = content_nodes->nodeTab[i]; // object name xmlXPathObjectPtr key; if(NULL == (key = xmlXPathEvalExpression((xmlChar*)ex_key, ctx))){ S3FS_PRN_WARN("key is null. but continue."); continue; } if(xmlXPathNodeSetIsEmpty(key->nodesetval)){ S3FS_PRN_WARN("node is empty. but continue."); xmlXPathFreeObject(key); continue; } xmlNodeSetPtr key_nodes = key->nodesetval; char* name = get_object_name(doc, key_nodes->nodeTab[0]->xmlChildrenNode, path); if(!name){ S3FS_PRN_WARN("name is something wrong. but continue."); }else if((const char*)name != c_strErrorObjectName){ is_dir = isCPrefix ? true : false; stretag = ""; if(!isCPrefix && ex_etag){ // Get ETag xmlXPathObjectPtr ETag; if(NULL != (ETag = xmlXPathEvalExpression((xmlChar*)ex_etag, ctx))){ if(xmlXPathNodeSetIsEmpty(ETag->nodesetval)){ S3FS_PRN_INFO("ETag->nodesetval is empty."); }else{ xmlNodeSetPtr etag_nodes = ETag->nodesetval; xmlChar* petag = xmlNodeListGetString(doc, etag_nodes->nodeTab[0]->xmlChildrenNode, 1); if(petag){ stretag = (char*)petag; xmlFree(petag); } } xmlXPathFreeObject(ETag); } } if(!head.insert(name, (0 < stretag.length() ? stretag.c_str() : NULL), is_dir)){ S3FS_PRN_ERR("insert_object returns with error."); xmlXPathFreeObject(key); xmlXPathFreeObject(contents_xp); free(name); S3FS_MALLOCTRIM(0); return -1; } free(name); }else{ S3FS_PRN_DBG("name is file or subdir in dir. but continue."); } xmlXPathFreeObject(key); } S3FS_XMLXPATHFREEOBJECT(contents_xp); return 0; } static bool GetXmlNsUrl(xmlDocPtr doc, string& nsurl) { static time_t tmLast = 0; // cache for 60 sec. static string strNs; bool result = false; if(!doc){ return false; } if((tmLast + 60) < time(NULL)){ // refresh tmLast = time(NULL); strNs = ""; xmlNodePtr pRootNode = xmlDocGetRootElement(doc); if(pRootNode){ xmlNsPtr* nslist = xmlGetNsList(doc, pRootNode); if(nslist){ if(nslist[0] && nslist[0]->href){ strNs = (const char*)(nslist[0]->href); } S3FS_XMLFREE(nslist); } } } if(!strNs.empty()){ nsurl = strNs; result = true; } return result; } static int append_objects_from_xml(const char* path, xmlDocPtr doc, S3ObjList& head) { string xmlnsurl; string ex_contents = "//"; string ex_key; string ex_cprefix = "//"; string ex_prefix; string ex_etag; if(!doc){ return -1; } // If there is not , use path instead of it. xmlChar* pprefix = get_prefix(doc); string prefix = (pprefix ? (char*)pprefix : path ? path : ""); if(pprefix){ xmlFree(pprefix); } xmlXPathContextPtr ctx = xmlXPathNewContext(doc); if(!noxmlns && GetXmlNsUrl(doc, xmlnsurl)){ xmlXPathRegisterNs(ctx, (xmlChar*)"s3", (xmlChar*)xmlnsurl.c_str()); ex_contents+= "s3:"; ex_key += "s3:"; ex_cprefix += "s3:"; ex_prefix += "s3:"; ex_etag += "s3:"; } ex_contents+= "Contents"; ex_key += "Key"; ex_cprefix += "CommonPrefixes"; ex_prefix += "Prefix"; ex_etag += "ETag"; if(-1 == append_objects_from_xml_ex(prefix.c_str(), doc, ctx, ex_contents.c_str(), ex_key.c_str(), ex_etag.c_str(), 0, head) || -1 == append_objects_from_xml_ex(prefix.c_str(), doc, ctx, ex_cprefix.c_str(), ex_prefix.c_str(), NULL, 1, head) ) { S3FS_PRN_ERR("append_objects_from_xml_ex returns with error."); S3FS_XMLXPATHFREECONTEXT(ctx); return -1; } S3FS_XMLXPATHFREECONTEXT(ctx); return 0; } static xmlChar* get_base_exp(xmlDocPtr doc, const char* exp) { xmlXPathObjectPtr marker_xp; string xmlnsurl; string exp_string; if(!doc){ return NULL; } xmlXPathContextPtr ctx = xmlXPathNewContext(doc); if(!noxmlns && GetXmlNsUrl(doc, xmlnsurl)){ xmlXPathRegisterNs(ctx, (xmlChar*)"s3", (xmlChar*)xmlnsurl.c_str()); exp_string = "/s3:ListBucketResult/s3:"; } else { exp_string = "/ListBucketResult/"; } exp_string += exp; if(NULL == (marker_xp = xmlXPathEvalExpression((xmlChar *)exp_string.c_str(), ctx))){ xmlXPathFreeContext(ctx); return NULL; } if(xmlXPathNodeSetIsEmpty(marker_xp->nodesetval)){ S3FS_PRN_ERR("marker_xp->nodesetval is empty."); xmlXPathFreeObject(marker_xp); xmlXPathFreeContext(ctx); return NULL; } xmlNodeSetPtr nodes = marker_xp->nodesetval; xmlChar* result = xmlNodeListGetString(doc, nodes->nodeTab[0]->xmlChildrenNode, 1); xmlXPathFreeObject(marker_xp); xmlXPathFreeContext(ctx); return result; } static xmlChar* get_prefix(xmlDocPtr doc) { return get_base_exp(doc, "Prefix"); } static xmlChar* get_next_marker(xmlDocPtr doc) { return get_base_exp(doc, "NextMarker"); } static bool is_truncated(xmlDocPtr doc) { bool result = false; xmlChar* strTruncate = get_base_exp(doc, "IsTruncated"); if(!strTruncate){ return false; } if(0 == strcasecmp((const char*)strTruncate, "true")){ result = true; } xmlFree(strTruncate); return result; } // return: the pointer to object name on allocated memory. // the pointer to "c_strErrorObjectName".(not allocated) // NULL(a case of something error occurred) static char* get_object_name(xmlDocPtr doc, xmlNodePtr node, const char* path) { // Get full path xmlChar* fullpath = xmlNodeListGetString(doc, node, 1); if(!fullpath){ S3FS_PRN_ERR("could not get object full path name.."); return NULL; } // basepath(path) is as same as fullpath. if(0 == strcmp((char*)fullpath, path)){ xmlFree(fullpath); return (char*)c_strErrorObjectName; } // Make dir path and filename string strdirpath = mydirname(string((char*)fullpath)); string strmybpath = mybasename(string((char*)fullpath)); const char* dirpath = strdirpath.c_str(); const char* mybname = strmybpath.c_str(); const char* basepath= (path && '/' == path[0]) ? &path[1] : path; xmlFree(fullpath); if(!mybname || '\0' == mybname[0]){ return NULL; } // check subdir & file in subdir if(dirpath && 0 < strlen(dirpath)){ // case of "/" if(0 == strcmp(mybname, "/") && 0 == strcmp(dirpath, "/")){ return (char*)c_strErrorObjectName; } // case of "." if(0 == strcmp(mybname, ".") && 0 == strcmp(dirpath, ".")){ return (char*)c_strErrorObjectName; } // case of ".." if(0 == strcmp(mybname, "..") && 0 == strcmp(dirpath, ".")){ return (char*)c_strErrorObjectName; } // case of "name" if(0 == strcmp(dirpath, ".")){ // OK return strdup(mybname); }else{ if(basepath && 0 == strcmp(dirpath, basepath)){ // OK return strdup(mybname); }else if(basepath && 0 < strlen(basepath) && '/' == basepath[strlen(basepath) - 1] && 0 == strncmp(dirpath, basepath, strlen(basepath) - 1)){ string withdirname; if(strlen(dirpath) > strlen(basepath)){ withdirname = &dirpath[strlen(basepath)]; } if(0 < withdirname.length() && '/' != withdirname[withdirname.length() - 1]){ withdirname += "/"; } withdirname += mybname; return strdup(withdirname.c_str()); } } } // case of something wrong return (char*)c_strErrorObjectName; } static int remote_mountpath_exists(const char* path) { struct stat stbuf; S3FS_PRN_INFO1("[path=%s]", path); // getattr will prefix the path with the remote mountpoint if(0 != get_object_attribute("/", &stbuf, NULL)){ return -1; } if(!S_ISDIR(stbuf.st_mode)){ return -1; } return 0; } static void free_xattrs(xattrs_t& xattrs) { for(xattrs_t::iterator iter = xattrs.begin(); iter != xattrs.end(); ++iter){ delete iter->second; } xattrs.clear(); } static bool parse_xattr_keyval(const std::string& xattrpair, string& key, PXATTRVAL& pval) { // parse key and value size_t pos; string tmpval; if(string::npos == (pos = xattrpair.find_first_of(':'))){ S3FS_PRN_ERR("one of xattr pair(%s) is wrong format.", xattrpair.c_str()); return false; } key = xattrpair.substr(0, pos); tmpval = xattrpair.substr(pos + 1); if(!takeout_str_dquart(key) || !takeout_str_dquart(tmpval)){ S3FS_PRN_ERR("one of xattr pair(%s) is wrong format.", xattrpair.c_str()); return false; } pval = new XATTRVAL; pval->length = 0; pval->pvalue = s3fs_decode64(tmpval.c_str(), &pval->length); return true; } static size_t parse_xattrs(const std::string& strxattrs, xattrs_t& xattrs) { xattrs.clear(); // decode string jsonxattrs = urlDecode(strxattrs); // get from "{" to "}" string restxattrs; { size_t startpos; size_t endpos = string::npos; if(string::npos != (startpos = jsonxattrs.find_first_of('{'))){ endpos = jsonxattrs.find_last_of('}'); } if(startpos == string::npos || endpos == string::npos || endpos <= startpos){ S3FS_PRN_WARN("xattr header(%s) is not json format.", jsonxattrs.c_str()); return 0; } restxattrs = jsonxattrs.substr(startpos + 1, endpos - (startpos + 1)); } // parse each key:val for(size_t pair_nextpos = restxattrs.find_first_of(','); 0 < restxattrs.length(); restxattrs = (pair_nextpos != string::npos ? restxattrs.substr(pair_nextpos + 1) : string("")), pair_nextpos = restxattrs.find_first_of(',')){ string pair = pair_nextpos != string::npos ? restxattrs.substr(0, pair_nextpos) : restxattrs; string key; PXATTRVAL pval = NULL; if(!parse_xattr_keyval(pair, key, pval)){ // something format error, so skip this. continue; } xattrs[key] = pval; } return xattrs.size(); } static std::string build_xattrs(const xattrs_t& xattrs) { string strxattrs("{"); bool is_set = false; for(xattrs_t::const_iterator iter = xattrs.begin(); iter != xattrs.end(); ++iter){ if(is_set){ strxattrs += ','; }else{ is_set = true; } strxattrs += '\"'; strxattrs += iter->first; strxattrs += "\":\""; if(iter->second){ char* base64val = s3fs_base64((iter->second)->pvalue, (iter->second)->length); if(base64val){ strxattrs += base64val; delete[] base64val; } } strxattrs += '\"'; } strxattrs += '}'; strxattrs = urlEncode(strxattrs); return strxattrs; } static int set_xattrs_to_header(headers_t& meta, const char* name, const char* value, size_t size, int flags) { string strxattrs; xattrs_t xattrs; headers_t::iterator iter; if(meta.end() == (iter = meta.find("x-amz-meta-xattr"))){ #if defined(XATTR_REPLACE) if(XATTR_REPLACE == (flags & XATTR_REPLACE)){ // there is no xattr header but flags is replace, so failure. return -ENOATTR; } #endif }else{ #if defined(XATTR_CREATE) if(XATTR_CREATE == (flags & XATTR_CREATE)){ // found xattr header but flags is only creating, so failure. return -EEXIST; } #endif strxattrs = iter->second; } // get map as xattrs_t parse_xattrs(strxattrs, xattrs); // add name(do not care overwrite and empty name/value) xattrs_t::iterator xiter; if(xattrs.end() != (xiter = xattrs.find(string(name)))){ // found same head. free value. delete xiter->second; } PXATTRVAL pval = new XATTRVAL; pval->length = size; if(0 < size){ pval->pvalue = new unsigned char[size]; memcpy(pval->pvalue, value, size); }else{ pval->pvalue = NULL; } xattrs[string(name)] = pval; // build new strxattrs(not encoded) and set it to headers_t meta["x-amz-meta-xattr"] = build_xattrs(xattrs); free_xattrs(xattrs); return 0; } #if defined(__APPLE__) static int s3fs_setxattr(const char* path, const char* name, const char* value, size_t size, int flags, uint32_t position) #else static int s3fs_setxattr(const char* path, const char* name, const char* value, size_t size, int flags) #endif { S3FS_PRN_INFO("[path=%s][name=%s][value=%p][size=%zu][flags=0x%x]", path, name, value, size, flags); if((value && 0 == size) || (!value && 0 < size)){ S3FS_PRN_ERR("Wrong parameter: value(%p), size(%zu)", value, size); return 0; } #if defined(__APPLE__) if (position != 0) { // No resource fork support return -EINVAL; } #endif int result; string strpath; string newpath; string nowcache; headers_t meta; struct stat stbuf; dirtype nDirType = DIRTYPE_UNKNOWN; if(0 == strcmp(path, "/")){ S3FS_PRN_ERR("Could not change mode for mount point."); return -EIO; } if(0 != (result = check_parent_object_access(path, X_OK))){ return result; } if(0 != (result = check_object_owner(path, &stbuf))){ return result; } if(S_ISDIR(stbuf.st_mode)){ result = chk_dir_object_type(path, newpath, strpath, nowcache, &meta, &nDirType); }else{ strpath = path; nowcache = strpath; result = get_object_attribute(strpath.c_str(), NULL, &meta); } if(0 != result){ return result; } // make new header_t if(0 != (result = set_xattrs_to_header(meta, name, value, size, flags))){ return result; } if(S_ISDIR(stbuf.st_mode) && IS_REPLACEDIR(nDirType)){ // Should rebuild directory object(except new type) // Need to remove old dir("dir" etc) and make new dir("dir/") // At first, remove directory old object if(0 != (result = remove_old_type_dir(strpath, nDirType))){ return result; } StatCache::getStatCacheData()->DelStat(nowcache); // Make new directory object("dir/") if(0 != (result = create_directory_object(newpath.c_str(), stbuf.st_mode, stbuf.st_mtime, stbuf.st_uid, stbuf.st_gid))){ return result; } // need to set xattr header for directory. strpath = newpath; nowcache = strpath; } // set xattr all object meta["x-amz-meta-ctime"] = str(time(NULL)); meta["x-amz-copy-source"] = urlEncode(service_path + bucket + get_realpath(strpath.c_str())); meta["x-amz-metadata-directive"] = "REPLACE"; if(0 != put_headers(strpath.c_str(), meta, true)){ return -EIO; } StatCache::getStatCacheData()->DelStat(nowcache); return 0; } #if defined(__APPLE__) static int s3fs_getxattr(const char* path, const char* name, char* value, size_t size, uint32_t position) #else static int s3fs_getxattr(const char* path, const char* name, char* value, size_t size) #endif { S3FS_PRN_INFO("[path=%s][name=%s][value=%p][size=%zu]", path, name, value, size); if(!path || !name){ return -EIO; } #if defined(__APPLE__) if (position != 0) { // No resource fork support return -EINVAL; } #endif int result; headers_t meta; xattrs_t xattrs; // check parent directory attribute. if(0 != (result = check_parent_object_access(path, X_OK))){ return result; } // get headers if(0 != (result = get_object_attribute(path, NULL, &meta))){ return result; } // get xattrs headers_t::iterator hiter = meta.find("x-amz-meta-xattr"); if(meta.end() == hiter){ // object does not have xattrs return -ENOATTR; } string strxattrs = hiter->second; parse_xattrs(strxattrs, xattrs); // search name string strname = name; xattrs_t::iterator xiter = xattrs.find(strname); if(xattrs.end() == xiter){ // not found name in xattrs free_xattrs(xattrs); return -ENOATTR; } // decode size_t length = 0; unsigned char* pvalue = NULL; if(NULL != xiter->second){ length = xiter->second->length; pvalue = xiter->second->pvalue; } if(0 < size){ if(static_cast(size) < length){ // over buffer size free_xattrs(xattrs); return -ERANGE; } if(pvalue){ memcpy(value, pvalue, length); } } free_xattrs(xattrs); return static_cast(length); } static int s3fs_listxattr(const char* path, char* list, size_t size) { S3FS_PRN_INFO("[path=%s][list=%p][size=%zu]", path, list, size); if(!path){ return -EIO; } int result; headers_t meta; xattrs_t xattrs; // check parent directory attribute. if(0 != (result = check_parent_object_access(path, X_OK))){ return result; } // get headers if(0 != (result = get_object_attribute(path, NULL, &meta))){ return result; } // get xattrs headers_t::iterator iter; if(meta.end() == (iter = meta.find("x-amz-meta-xattr"))){ // object does not have xattrs return 0; } string strxattrs = iter->second; parse_xattrs(strxattrs, xattrs); // calculate total name length size_t total = 0; for(xattrs_t::const_iterator xiter = xattrs.begin(); xiter != xattrs.end(); ++xiter){ if(0 < xiter->first.length()){ total += xiter->first.length() + 1; } } if(0 == total){ free_xattrs(xattrs); return 0; } // check parameters if(0 == size){ free_xattrs(xattrs); return total; } if(!list || size < total){ free_xattrs(xattrs); return -ERANGE; } // copy to list char* setpos = list; for(xattrs_t::const_iterator xiter = xattrs.begin(); xiter != xattrs.end(); ++xiter){ if(0 < xiter->first.length()){ strcpy(setpos, xiter->first.c_str()); setpos = &setpos[strlen(setpos) + 1]; } } free_xattrs(xattrs); return total; } static int s3fs_removexattr(const char* path, const char* name) { S3FS_PRN_INFO("[path=%s][name=%s]", path, name); if(!path || !name){ return -EIO; } int result; string strpath; string newpath; string nowcache; headers_t meta; xattrs_t xattrs; struct stat stbuf; dirtype nDirType = DIRTYPE_UNKNOWN; if(0 == strcmp(path, "/")){ S3FS_PRN_ERR("Could not change mode for mount point."); return -EIO; } if(0 != (result = check_parent_object_access(path, X_OK))){ return result; } if(0 != (result = check_object_owner(path, &stbuf))){ return result; } if(S_ISDIR(stbuf.st_mode)){ result = chk_dir_object_type(path, newpath, strpath, nowcache, &meta, &nDirType); }else{ strpath = path; nowcache = strpath; result = get_object_attribute(strpath.c_str(), NULL, &meta); } if(0 != result){ return result; } // get xattrs headers_t::iterator hiter = meta.find("x-amz-meta-xattr"); if(meta.end() == hiter){ // object does not have xattrs return -ENOATTR; } string strxattrs = hiter->second; parse_xattrs(strxattrs, xattrs); // check name xattrs string strname = name; xattrs_t::iterator xiter = xattrs.find(strname); if(xattrs.end() == xiter){ free_xattrs(xattrs); return -ENOATTR; } // make new header_t after deleting name xattr delete xiter->second; xattrs.erase(xiter); // build new xattr if(!xattrs.empty()){ meta["x-amz-meta-xattr"] = build_xattrs(xattrs); }else{ meta.erase("x-amz-meta-xattr"); } if(S_ISDIR(stbuf.st_mode) && IS_REPLACEDIR(nDirType)){ // Should rebuild directory object(except new type) // Need to remove old dir("dir" etc) and make new dir("dir/") // At first, remove directory old object if(0 != (result = remove_old_type_dir(strpath, nDirType))){ return result; } StatCache::getStatCacheData()->DelStat(nowcache); // Make new directory object("dir/") if(0 != (result = create_directory_object(newpath.c_str(), stbuf.st_mode, stbuf.st_mtime, stbuf.st_uid, stbuf.st_gid))){ free_xattrs(xattrs); return result; } // need to set xattr header for directory. strpath = newpath; nowcache = strpath; } // set xattr all object meta["x-amz-copy-source"] = urlEncode(service_path + bucket + get_realpath(strpath.c_str())); meta["x-amz-metadata-directive"] = "REPLACE"; if(0 != put_headers(strpath.c_str(), meta, true)){ free_xattrs(xattrs); return -EIO; } StatCache::getStatCacheData()->DelStat(nowcache); free_xattrs(xattrs); return 0; } // s3fs_init calls this function to exit cleanly from the fuse event loop. // // There's no way to pass an exit status to the high-level event loop API, so // this function stores the exit value in a global for main() static void s3fs_exit_fuseloop(int exit_status) { S3FS_PRN_ERR("Exiting FUSE event loop due to errors\n"); s3fs_init_deferred_exit_status = exit_status; struct fuse_context *ctx = fuse_get_context(); if (NULL != ctx) { fuse_exit(ctx->fuse); } } static void* s3fs_init(struct fuse_conn_info* conn) { S3FS_PRN_INIT_INFO("init v%s(commit:%s) with %s", VERSION, COMMIT_HASH_VAL, s3fs_crypt_lib_name()); // cache(remove cache dirs at first) if(is_remove_cache && (!CacheFileStat::DeleteCacheFileStatDirectory() || !FdManager::DeleteCacheDirectory())){ S3FS_PRN_DBG("Could not initialize cache directory."); } // check loading IAM role name if(load_iamrole){ // load IAM role name from http://169.254.169.254/latest/meta-data/iam/security-credentials // S3fsCurl s3fscurl; if(!s3fscurl.LoadIAMRoleFromMetaData()){ S3FS_PRN_CRIT("could not load IAM role name from meta data."); s3fs_exit_fuseloop(EXIT_FAILURE); return NULL; } S3FS_PRN_INFO("loaded IAM role name = %s", S3fsCurl::GetIAMRole()); } if (create_bucket){ int result = do_create_bucket(); if(result != 0){ s3fs_exit_fuseloop(result); return NULL; } } // Check Bucket { int result; if(EXIT_SUCCESS != (result = s3fs_check_service())){ s3fs_exit_fuseloop(result); return NULL; } } // Investigate system capabilities #ifndef __APPLE__ if((unsigned int)conn->capable & FUSE_CAP_ATOMIC_O_TRUNC){ conn->want |= FUSE_CAP_ATOMIC_O_TRUNC; } #endif if((unsigned int)conn->capable & FUSE_CAP_BIG_WRITES){ conn->want |= FUSE_CAP_BIG_WRITES; } return NULL; } static void s3fs_destroy(void*) { S3FS_PRN_INFO("destroy"); // cache(remove at last) if(is_remove_cache && (!CacheFileStat::DeleteCacheFileStatDirectory() || !FdManager::DeleteCacheDirectory())){ S3FS_PRN_WARN("Could not remove cache directory."); } } static int s3fs_access(const char* path, int mask) { S3FS_PRN_INFO("[path=%s][mask=%s%s%s%s]", path, ((mask & R_OK) == R_OK) ? "R_OK " : "", ((mask & W_OK) == W_OK) ? "W_OK " : "", ((mask & X_OK) == X_OK) ? "X_OK " : "", (mask == F_OK) ? "F_OK" : ""); int result = check_object_access(path, mask, NULL); S3FS_MALLOCTRIM(0); return result; } static xmlChar* get_exp_value_xml(xmlDocPtr doc, xmlXPathContextPtr ctx, const char* exp_key) { if(!doc || !ctx || !exp_key){ return NULL; } xmlXPathObjectPtr exp; xmlNodeSetPtr exp_nodes; xmlChar* exp_value; // search exp_key tag if(NULL == (exp = xmlXPathEvalExpression((xmlChar*)exp_key, ctx))){ S3FS_PRN_ERR("Could not find key(%s).", exp_key); return NULL; } if(xmlXPathNodeSetIsEmpty(exp->nodesetval)){ S3FS_PRN_ERR("Key(%s) node is empty.", exp_key); S3FS_XMLXPATHFREEOBJECT(exp); return NULL; } // get exp_key value & set in struct exp_nodes = exp->nodesetval; if(NULL == (exp_value = xmlNodeListGetString(doc, exp_nodes->nodeTab[0]->xmlChildrenNode, 1))){ S3FS_PRN_ERR("Key(%s) value is empty.", exp_key); S3FS_XMLXPATHFREEOBJECT(exp); return NULL; } S3FS_XMLXPATHFREEOBJECT(exp); return exp_value; } static void print_incomp_mpu_list(incomp_mpu_list_t& list) { printf("\n"); printf("Lists the parts that have been uploaded for a specific multipart upload.\n"); printf("\n"); if(!list.empty()){ printf("---------------------------------------------------------------\n"); int cnt = 0; for(incomp_mpu_list_t::iterator iter = list.begin(); iter != list.end(); ++iter, ++cnt){ printf(" Path : %s\n", (*iter).key.c_str()); printf(" UploadId : %s\n", (*iter).id.c_str()); printf(" Date : %s\n", (*iter).date.c_str()); printf("\n"); } printf("---------------------------------------------------------------\n"); }else{ printf("There is no list.\n"); } } static bool abort_incomp_mpu_list(incomp_mpu_list_t& list, time_t abort_time) { if(list.empty()){ return true; } time_t now_time = time(NULL); // do removing. S3fsCurl s3fscurl; bool result = true; for(incomp_mpu_list_t::iterator iter = list.begin(); iter != list.end(); ++iter){ const char* tpath = (*iter).key.c_str(); string upload_id = (*iter).id; if(0 != abort_time){ // abort_time is 0, it means all. time_t date = 0; if(!get_unixtime_from_iso8601((*iter).date.c_str(), date)){ S3FS_PRN_DBG("date format is not ISO 8601 for %s multipart uploading object, skip this.", tpath); continue; } if(now_time <= (date + abort_time)){ continue; } } if(0 != s3fscurl.AbortMultipartUpload(tpath, upload_id)){ S3FS_PRN_EXIT("Failed to remove %s multipart uploading object.", tpath); result = false; }else{ printf("Succeed to remove %s multipart uploading object.\n", tpath); } // reset(initialize) curl object s3fscurl.DestroyCurlHandle(); } return result; } static bool get_incomp_mpu_list(xmlDocPtr doc, incomp_mpu_list_t& list) { if(!doc){ return false; } xmlXPathContextPtr ctx = xmlXPathNewContext(doc);; string xmlnsurl; string ex_upload = "//"; string ex_key; string ex_id; string ex_date; if(!noxmlns && GetXmlNsUrl(doc, xmlnsurl)){ xmlXPathRegisterNs(ctx, (xmlChar*)"s3", (xmlChar*)xmlnsurl.c_str()); ex_upload += "s3:"; ex_key += "s3:"; ex_id += "s3:"; ex_date += "s3:"; } ex_upload += "Upload"; ex_key += "Key"; ex_id += "UploadId"; ex_date += "Initiated"; // get "Upload" Tags xmlXPathObjectPtr upload_xp; if(NULL == (upload_xp = xmlXPathEvalExpression((xmlChar*)ex_upload.c_str(), ctx))){ S3FS_PRN_ERR("xmlXPathEvalExpression returns null."); return false; } if(xmlXPathNodeSetIsEmpty(upload_xp->nodesetval)){ S3FS_PRN_INFO("upload_xp->nodesetval is empty."); S3FS_XMLXPATHFREEOBJECT(upload_xp); S3FS_XMLXPATHFREECONTEXT(ctx); return true; } // Make list int cnt; xmlNodeSetPtr upload_nodes; list.clear(); for(cnt = 0, upload_nodes = upload_xp->nodesetval; cnt < upload_nodes->nodeNr; cnt++){ ctx->node = upload_nodes->nodeTab[cnt]; INCOMP_MPU_INFO part; xmlChar* ex_value; // search "Key" tag if(NULL == (ex_value = get_exp_value_xml(doc, ctx, ex_key.c_str()))){ continue; } if('/' != *((char*)ex_value)){ part.key = "/"; }else{ part.key = ""; } part.key += (char*)ex_value; S3FS_XMLFREE(ex_value); // search "UploadId" tag if(NULL == (ex_value = get_exp_value_xml(doc, ctx, ex_id.c_str()))){ continue; } part.id = (char*)ex_value; S3FS_XMLFREE(ex_value); // search "Initiated" tag if(NULL == (ex_value = get_exp_value_xml(doc, ctx, ex_date.c_str()))){ continue; } part.date = (char*)ex_value; S3FS_XMLFREE(ex_value); list.push_back(part); } S3FS_XMLXPATHFREEOBJECT(upload_xp); S3FS_XMLXPATHFREECONTEXT(ctx); return true; } static int s3fs_utility_processing(time_t abort_time) { if(NO_UTILITY_MODE == utility_mode){ return EXIT_FAILURE; } printf("\n*** s3fs run as utility mode.\n\n"); S3fsCurl s3fscurl; string body; int result = EXIT_SUCCESS; if(0 != s3fscurl.MultipartListRequest(body)){ S3FS_PRN_EXIT("Could not get list multipart upload.\nThere is no incomplete multipart uploaded object in bucket.\n"); result = EXIT_FAILURE; }else{ // parse result(incomplete multipart upload information) S3FS_PRN_DBG("response body = {\n%s\n}", body.c_str()); xmlDocPtr doc; if(NULL == (doc = xmlReadMemory(body.c_str(), static_cast(body.size()), "", NULL, 0))){ S3FS_PRN_DBG("xmlReadMemory exited with error."); result = EXIT_FAILURE; }else{ // make incomplete uploads list incomp_mpu_list_t list; if(!get_incomp_mpu_list(doc, list)){ S3FS_PRN_DBG("get_incomp_mpu_list exited with error."); result = EXIT_FAILURE; }else{ if(INCOMP_TYPE_LIST == utility_mode){ // print list print_incomp_mpu_list(list); }else if(INCOMP_TYPE_ABORT == utility_mode){ // remove if(!abort_incomp_mpu_list(list, abort_time)){ S3FS_PRN_DBG("an error occurred during removal process."); result = EXIT_FAILURE; } } } S3FS_XMLFREEDOC(doc); } } // ssl s3fs_destroy_global_ssl(); return result; } // // If calling with wrong region, s3fs gets following error body as 400 error code. // " // AuthorizationHeaderMalformed // The authorization header is malformed; the region 'us-east-1' is wrong; expecting 'ap-northeast-1' // ap-northeast-1 // ... // ... // " // // So this is cheap code but s3fs should get correct region automatically. // static bool check_region_error(const char* pbody, size_t len, string& expectregion) { if(!pbody){ return false; } std::string code; if(!simple_parse_xml(pbody, len, "Code", code) || code != "AuthorizationHeaderMalformed"){ return false; } if(!simple_parse_xml(pbody, len, "Region", expectregion)){ return false; } return true; } static int s3fs_check_service() { S3FS_PRN_INFO("check services."); // At first time for access S3, we check IAM role if it sets. if(!S3fsCurl::CheckIAMCredentialUpdate()){ S3FS_PRN_CRIT("Failed to check IAM role name(%s).", S3fsCurl::GetIAMRole()); return EXIT_FAILURE; } S3fsCurl s3fscurl; int res; if(0 > (res = s3fscurl.CheckBucket())){ // get response code long responseCode = s3fscurl.GetLastResponseCode(); // check wrong endpoint, and automatically switch endpoint if(300 <= responseCode && responseCode < 500){ // check region error(for putting message or retrying) BodyData* body = s3fscurl.GetBodyData(); string expectregion; if(check_region_error(body->str(), body->size(), expectregion)){ // [NOTE] // If endpoint is not specified(using us-east-1 region) and // an error is encountered accessing a different region, we // will retry the check on the expected region. // see) https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro // if(is_specified_endpoint){ const char* tmp_expect_ep = expectregion.c_str(); S3FS_PRN_CRIT("The bucket region is not '%s', it is correctly '%s'. You should specify 'endpoint=%s' option.", endpoint.c_str(), tmp_expect_ep, tmp_expect_ep); }else{ // current endpoint is wrong, so try to connect to expected region. S3FS_PRN_CRIT("Failed to connect region '%s'(default), so retry to connect region '%s'.", endpoint.c_str(), expectregion.c_str()); endpoint = expectregion; if(S3fsCurl::IsSignatureV4()){ if(host == "http://s3.amazonaws.com"){ host = "http://s3-" + endpoint + ".amazonaws.com"; }else if(host == "https://s3.amazonaws.com"){ host = "https://s3-" + endpoint + ".amazonaws.com"; } } // retry to check with new endpoint s3fscurl.DestroyCurlHandle(); res = s3fscurl.CheckBucket(); responseCode = s3fscurl.GetLastResponseCode(); } } } // try signature v2 if(0 > res && (responseCode == 400 || responseCode == 403) && S3fsCurl::IsSignatureV4()){ // switch sigv2 S3FS_PRN_CRIT("Failed to connect by sigv4, so retry to connect by signature version 2."); S3fsCurl::SetSignatureV4(false); // retry to check with sigv2 s3fscurl.DestroyCurlHandle(); res = s3fscurl.CheckBucket(); responseCode = s3fscurl.GetLastResponseCode(); } // check errors(after retrying) if(0 > res && responseCode != 200 && responseCode != 301){ if(responseCode == 400){ S3FS_PRN_CRIT("Bad Request(host=%s) - result of checking service.", host.c_str()); }else if(responseCode == 403){ S3FS_PRN_CRIT("invalid credentials(host=%s) - result of checking service.", host.c_str()); }else if(responseCode == 404){ S3FS_PRN_CRIT("bucket not found(host=%s) - result of checking service.", host.c_str()); }else{ // another error S3FS_PRN_CRIT("unable to connect(host=%s) - result of checking service.", host.c_str()); } return EXIT_FAILURE; } } s3fscurl.DestroyCurlHandle(); // make sure remote mountpath exists and is a directory if(!mount_prefix.empty()){ if(remote_mountpath_exists(mount_prefix.c_str()) != 0){ S3FS_PRN_CRIT("remote mountpath %s not found.", mount_prefix.c_str()); return EXIT_FAILURE; } } S3FS_MALLOCTRIM(0); return EXIT_SUCCESS; } // // Read and Parse passwd file // // The line of the password file is one of the following formats: // (1) "accesskey:secretkey" : AWS format for default(all) access key/secret key // (2) "bucket:accesskey:secretkey" : AWS format for bucket's access key/secret key // (3) "key=value" : Content-dependent KeyValue contents // // This function sets result into bucketkvmap_t, it bucket name and key&value mapping. // If bucket name is empty(1 or 3 format), bucket name for mapping is set "\t" or "". // // Return: 1 - OK(could parse and set mapping etc.) // 0 - NG(could not read any value) // -1 - Should shutdown immediately // static int parse_passwd_file(bucketkvmap_t& resmap) { string line; size_t first_pos; readline_t linelist; readline_t::iterator iter; // open passwd file ifstream PF(passwd_file.c_str()); if(!PF.good()){ S3FS_PRN_EXIT("could not open passwd file : %s", passwd_file.c_str()); return -1; } // read each line while(getline(PF, line)){ line = trim(line); if(line.empty()){ continue; } if('#' == line[0]){ continue; } if(string::npos != line.find_first_of(" \t")){ S3FS_PRN_EXIT("invalid line in passwd file, found whitespace character."); return -1; } if('[' == line[0]){ S3FS_PRN_EXIT("invalid line in passwd file, found a bracket \"[\" character."); return -1; } linelist.push_back(line); } // read '=' type kvmap_t kv; for(iter = linelist.begin(); iter != linelist.end(); ++iter){ first_pos = iter->find_first_of("="); if(first_pos == string::npos){ continue; } // formatted by "key=val" string key = trim(iter->substr(0, first_pos)); string val = trim(iter->substr(first_pos + 1, string::npos)); if(key.empty()){ continue; } if(kv.end() != kv.find(key)){ S3FS_PRN_WARN("same key name(%s) found in passwd file, skip this.", key.c_str()); continue; } kv[key] = val; } // set special key name resmap[string(keyval_fields_type)] = kv; // read ':' type for(iter = linelist.begin(); iter != linelist.end(); ++iter){ first_pos = iter->find_first_of(":"); size_t last_pos = iter->find_last_of(":"); if(first_pos == string::npos){ continue; } string bucketname; string accesskey; string secret; if(first_pos != last_pos){ // formatted by "bucket:accesskey:secretkey" bucketname = trim(iter->substr(0, first_pos)); accesskey = trim(iter->substr(first_pos + 1, last_pos - first_pos - 1)); secret = trim(iter->substr(last_pos + 1, string::npos)); }else{ // formatted by "accesskey:secretkey" bucketname = allbucket_fields_type; accesskey = trim(iter->substr(0, first_pos)); secret = trim(iter->substr(first_pos + 1, string::npos)); } if(resmap.end() != resmap.find(bucketname)){ S3FS_PRN_EXIT("there are multiple entries for the same bucket(%s) in the passwd file.", (bucketname.empty() ? "default" : bucketname.c_str())); return -1; } kv.clear(); kv[string(aws_accesskeyid)] = accesskey; kv[string(aws_secretkey)] = secret; resmap[bucketname] = kv; } return (resmap.empty() ? 0 : 1); } // // Return: 1 - OK(could read and set accesskey etc.) // 0 - NG(could not read) // -1 - Should shutdown immediately // static int check_for_aws_format(const kvmap_t& kvmap) { string str1(aws_accesskeyid); string str2(aws_secretkey); if(kvmap.empty()){ return 0; } kvmap_t::const_iterator str1_it = kvmap.find(str1); kvmap_t::const_iterator str2_it = kvmap.find(str2); if(kvmap.end() == str1_it && kvmap.end() == str2_it){ return 0; } if(kvmap.end() == str1_it || kvmap.end() == str2_it){ S3FS_PRN_EXIT("AWSAccesskey or AWSSecretkey is not specified."); return -1; } if(!S3fsCurl::SetAccessKey(str1_it->second.c_str(), str2_it->second.c_str())){ S3FS_PRN_EXIT("failed to set access key/secret key."); return -1; } return 1; } // // check_passwd_file_perms // // expect that global passwd_file variable contains // a non-empty value and is readable by the current user // // Check for too permissive access to the file // help save users from themselves via a security hole // // only two options: return or error out // static int check_passwd_file_perms() { struct stat info; // let's get the file info if(stat(passwd_file.c_str(), &info) != 0){ S3FS_PRN_EXIT("unexpected error from stat(%s).", passwd_file.c_str()); return EXIT_FAILURE; } // return error if any file has others permissions if( (info.st_mode & S_IROTH) || (info.st_mode & S_IWOTH) || (info.st_mode & S_IXOTH)) { S3FS_PRN_EXIT("credentials file %s should not have others permissions.", passwd_file.c_str()); return EXIT_FAILURE; } // Any local file should not have any group permissions // /etc/passwd-s3fs can have group permissions if(passwd_file != "/etc/passwd-s3fs"){ if( (info.st_mode & S_IRGRP) || (info.st_mode & S_IWGRP) || (info.st_mode & S_IXGRP)) { S3FS_PRN_EXIT("credentials file %s should not have group permissions.", passwd_file.c_str()); return EXIT_FAILURE; } }else{ // "/etc/passwd-s3fs" does not allow group write. if((info.st_mode & S_IWGRP)){ S3FS_PRN_EXIT("credentials file %s should not have group writable permissions.", passwd_file.c_str()); return EXIT_FAILURE; } } if((info.st_mode & S_IXUSR) || (info.st_mode & S_IXGRP)){ S3FS_PRN_EXIT("credentials file %s should not have executable permissions.", passwd_file.c_str()); return EXIT_FAILURE; } return EXIT_SUCCESS; } static int read_aws_credentials_file(const std::string &filename) { // open passwd file ifstream PF(filename.c_str()); if(!PF.good()){ return -1; } string profile; string accesskey; string secret; string session_token; // read each line string line; while(getline(PF, line)){ line = trim(line); if(line.empty()){ continue; } if('#' == line[0]){ continue; } if(line.size() > 2 && line[0] == '[' && line[line.size() - 1] == ']') { if(profile == aws_profile){ break; } profile = line.substr(1, line.size() - 2); accesskey.clear(); secret.clear(); session_token.clear(); } size_t pos = line.find_first_of('='); if(pos == string::npos){ continue; } string key = trim(line.substr(0, pos)); string value = trim(line.substr(pos + 1, string::npos)); if(key == "aws_access_key_id"){ accesskey = value; }else if(key == "aws_secret_access_key"){ secret = value; }else if(key == "aws_session_token"){ session_token = value; } } if(profile != aws_profile){ return EXIT_FAILURE; } if (session_token.empty()) { if (is_use_session_token) { S3FS_PRN_EXIT("AWS session token was expected but wasn't provided in aws/credentials file for profile: %s.", aws_profile.c_str()); return EXIT_FAILURE; } if(!S3fsCurl::SetAccessKey(accesskey.c_str(), secret.c_str())){ S3FS_PRN_EXIT("failed to set internal data for access key/secret key from aws credential file."); return EXIT_FAILURE; } } else { if (!S3fsCurl::SetAccessKeyWithSessionToken(accesskey.c_str(), secret.c_str(), session_token.c_str())) { S3FS_PRN_EXIT("session token is invalid."); return EXIT_FAILURE; } } return EXIT_SUCCESS; } // // read_passwd_file // // Support for per bucket credentials // // Format for the credentials file: // [bucket:]AccessKeyId:SecretAccessKey // // Lines beginning with # are considered comments // and ignored, as are empty lines // // Uncommented lines without the ":" character are flagged as // an error, so are lines with spaces or tabs // // only one default key pair is allowed, but not required // static int read_passwd_file() { bucketkvmap_t bucketmap; kvmap_t keyval; int result; // if you got here, the password file // exists and is readable by the // current user, check for permissions if(EXIT_SUCCESS != check_passwd_file_perms()){ return EXIT_FAILURE; } // // parse passwd file // result = parse_passwd_file(bucketmap); if(-1 == result){ return EXIT_FAILURE; } // // check key=value type format. // bucketkvmap_t::iterator it = bucketmap.find(keyval_fields_type); if(bucketmap.end() != it){ // aws format result = check_for_aws_format(it->second); if(-1 == result){ return EXIT_FAILURE; }else if(1 == result){ // success to set return EXIT_SUCCESS; } } string bucket_key = allbucket_fields_type; if(!bucket.empty() && bucketmap.end() != bucketmap.find(bucket)){ bucket_key = bucket; } it = bucketmap.find(bucket_key); if(bucketmap.end() == it){ S3FS_PRN_EXIT("Not found access key/secret key in passwd file."); return EXIT_FAILURE; } keyval = it->second; kvmap_t::iterator aws_accesskeyid_it = keyval.find(aws_accesskeyid); kvmap_t::iterator aws_secretkey_it = keyval.find(aws_secretkey); if(keyval.end() == aws_accesskeyid_it || keyval.end() == aws_secretkey_it){ S3FS_PRN_EXIT("Not found access key/secret key in passwd file."); return EXIT_FAILURE; } if(!S3fsCurl::SetAccessKey(aws_accesskeyid_it->second.c_str(), aws_secretkey_it->second.c_str())){ S3FS_PRN_EXIT("failed to set internal data for access key/secret key from passwd file."); return EXIT_FAILURE; } return EXIT_SUCCESS; } // // get_access_keys // // called only when were are not mounting a // public bucket // // Here is the order precedence for getting the // keys: // // 1 - from the command line (security risk) // 2 - from a password file specified on the command line // 3 - from environment variables // 3a - from the AWS_CREDENTIAL_FILE environment variable // 3b - from ${HOME}/.aws/credentials // 4 - from the users ~/.passwd-s3fs // 5 - from /etc/passwd-s3fs // static int get_access_keys() { // should be redundant if(S3fsCurl::IsPublicBucket()){ return EXIT_SUCCESS; } // access key loading is deferred if(load_iamrole || is_ecs){ return EXIT_SUCCESS; } // 1 - keys specified on the command line if(S3fsCurl::IsSetAccessKeys()){ return EXIT_SUCCESS; } // 2 - was specified on the command line if(!passwd_file.empty()){ ifstream PF(passwd_file.c_str()); if(PF.good()){ PF.close(); return read_passwd_file(); }else{ S3FS_PRN_EXIT("specified passwd_file is not readable."); return EXIT_FAILURE; } } // 3 - environment variables char* AWSACCESSKEYID = getenv("AWSACCESSKEYID"); char* AWSSECRETACCESSKEY = getenv("AWSSECRETACCESSKEY"); char* AWSSESSIONTOKEN = getenv("AWSSESSIONTOKEN"); if(AWSACCESSKEYID != NULL || AWSSECRETACCESSKEY != NULL){ if( (AWSACCESSKEYID == NULL && AWSSECRETACCESSKEY != NULL) || (AWSACCESSKEYID != NULL && AWSSECRETACCESSKEY == NULL) ){ S3FS_PRN_EXIT("if environment variable AWSACCESSKEYID is set then AWSSECRETACCESSKEY must be set too."); return EXIT_FAILURE; } S3FS_PRN_INFO2("access key from env variables"); if (AWSSESSIONTOKEN != NULL) { S3FS_PRN_INFO2("session token is available"); if (!S3fsCurl::SetAccessKeyWithSessionToken(AWSACCESSKEYID, AWSSECRETACCESSKEY, AWSSESSIONTOKEN)) { S3FS_PRN_EXIT("session token is invalid."); return EXIT_FAILURE; } } else { S3FS_PRN_INFO2("session token is not available"); if (is_use_session_token) { S3FS_PRN_EXIT("environment variable AWSSESSIONTOKEN is expected to be set."); return EXIT_FAILURE; } } if(!S3fsCurl::SetAccessKey(AWSACCESSKEYID, AWSSECRETACCESSKEY)){ S3FS_PRN_EXIT("if one access key is specified, both keys need to be specified."); return EXIT_FAILURE; } return EXIT_SUCCESS; } // 3a - from the AWS_CREDENTIAL_FILE environment variable char * AWS_CREDENTIAL_FILE; AWS_CREDENTIAL_FILE = getenv("AWS_CREDENTIAL_FILE"); if(AWS_CREDENTIAL_FILE != NULL){ passwd_file.assign(AWS_CREDENTIAL_FILE); if(!passwd_file.empty()){ ifstream PF(passwd_file.c_str()); if(PF.good()){ PF.close(); return read_passwd_file(); }else{ S3FS_PRN_EXIT("AWS_CREDENTIAL_FILE: \"%s\" is not readable.", passwd_file.c_str()); return EXIT_FAILURE; } } } // 3b - check ${HOME}/.aws/credentials std::string aws_credentials = std::string(getpwuid(getuid())->pw_dir) + "/.aws/credentials"; if(read_aws_credentials_file(aws_credentials) == EXIT_SUCCESS) { return EXIT_SUCCESS; }else if(aws_profile != "default"){ S3FS_PRN_EXIT("Could not find profile: %s in file: %s", aws_profile.c_str(), aws_credentials.c_str()); return EXIT_FAILURE; } // 4 - from the default location in the users home directory char * HOME; HOME = getenv ("HOME"); if(HOME != NULL){ passwd_file.assign(HOME); passwd_file.append("/.passwd-s3fs"); ifstream PF(passwd_file.c_str()); if(PF.good()){ PF.close(); if(EXIT_SUCCESS != read_passwd_file()){ return EXIT_FAILURE; } // It is possible that the user's file was there but // contained no key pairs i.e. commented out // in that case, go look in the final location if(S3fsCurl::IsSetAccessKeys()){ return EXIT_SUCCESS; } } } // 5 - from the system default location passwd_file.assign("/etc/passwd-s3fs"); ifstream PF(passwd_file.c_str()); if(PF.good()){ PF.close(); return read_passwd_file(); } S3FS_PRN_EXIT("could not determine how to establish security credentials."); return EXIT_FAILURE; } // // Check & Set attributes for mount point. // static bool set_mountpoint_attribute(struct stat& mpst) { mp_uid = geteuid(); mp_gid = getegid(); mp_mode = S_IFDIR | (allow_other ? (is_mp_umask ? (~mp_umask & (S_IRWXU | S_IRWXG | S_IRWXO)) : (S_IRWXU | S_IRWXG | S_IRWXO)) : S_IRWXU); S3FS_PRN_INFO2("PROC(uid=%u, gid=%u) - MountPoint(uid=%u, gid=%u, mode=%04o)", (unsigned int)mp_uid, (unsigned int)mp_gid, (unsigned int)(mpst.st_uid), (unsigned int)(mpst.st_gid), mpst.st_mode); // check owner if(0 == mp_uid || mpst.st_uid == mp_uid){ return true; } // check group permission if(mpst.st_gid == mp_gid || 1 == is_uid_include_group(mp_uid, mpst.st_gid)){ if(S_IRWXG == (mpst.st_mode & S_IRWXG)){ return true; } } // check other permission if(S_IRWXO == (mpst.st_mode & S_IRWXO)){ return true; } return false; } // // Set bucket and mount_prefix based on passed bucket name. // static int set_bucket(const char* arg) { char *bucket_name = (char*)arg; if(strstr(arg, ":")){ if(strstr(arg, "://")){ S3FS_PRN_EXIT("bucket name and path(\"%s\") is wrong, it must be \"bucket[:/path]\".", arg); return -1; } bucket = strtok(bucket_name, ":"); char* pmount_prefix = strtok(NULL, ""); if(pmount_prefix){ if(0 == strlen(pmount_prefix) || '/' != pmount_prefix[0]){ S3FS_PRN_EXIT("path(%s) must be prefix \"/\".", pmount_prefix); return -1; } mount_prefix = pmount_prefix; // remove trailing slash if(mount_prefix.at(mount_prefix.size() - 1) == '/'){ mount_prefix = mount_prefix.substr(0, mount_prefix.size() - 1); } } }else{ bucket = arg; } return 0; } // This is repeatedly called by the fuse option parser // if the key is equal to FUSE_OPT_KEY_OPT, it's an option passed in prefixed by // '-' or '--' e.g.: -f -d -ousecache=/tmp // // if the key is equal to FUSE_OPT_KEY_NONOPT, it's either the bucket name // or the mountpoint. The bucket name will always come before the mountpoint static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_args* outargs) { int ret; if(key == FUSE_OPT_KEY_NONOPT){ // the first NONOPT option is the bucket name if(bucket.empty()){ if ((ret = set_bucket(arg))){ return ret; } return 0; } else if (!strcmp(arg, "s3fs")) { return 0; } // the second NONOPT option is the mountpoint(not utility mode) if(mountpoint.empty() && NO_UTILITY_MODE == utility_mode){ // save the mountpoint and do some basic error checking mountpoint = arg; struct stat stbuf; if(stat(arg, &stbuf) == -1){ S3FS_PRN_EXIT("unable to access MOUNTPOINT %s: %s", mountpoint.c_str(), strerror(errno)); return -1; } if(!(S_ISDIR(stbuf.st_mode))){ S3FS_PRN_EXIT("MOUNTPOINT: %s is not a directory.", mountpoint.c_str()); return -1; } if(!set_mountpoint_attribute(stbuf)){ S3FS_PRN_EXIT("MOUNTPOINT: %s permission denied.", mountpoint.c_str()); return -1; } if(!nonempty){ struct dirent *ent; DIR *dp = opendir(mountpoint.c_str()); if(dp == NULL){ S3FS_PRN_EXIT("failed to open MOUNTPOINT: %s: %s", mountpoint.c_str(), strerror(errno)); return -1; } while((ent = readdir(dp)) != NULL){ if(strcmp(ent->d_name, ".") != 0 && strcmp(ent->d_name, "..") != 0){ closedir(dp); S3FS_PRN_EXIT("MOUNTPOINT directory %s is not empty. if you are sure this is safe, can use the 'nonempty' mount option.", mountpoint.c_str()); return -1; } } closedir(dp); } return 1; } // Unknown option if(NO_UTILITY_MODE == utility_mode){ S3FS_PRN_EXIT("specified unknown third option(%s).", arg); }else{ S3FS_PRN_EXIT("specified unknown second option(%s). you don't need to specify second option(mountpoint) for utility mode(-u).", arg); } return -1; }else if(key == FUSE_OPT_KEY_OPT){ if(0 == STR2NCMP(arg, "uid=")){ s3fs_uid = get_uid(strchr(arg, '=') + sizeof(char)); if(0 != geteuid() && 0 == s3fs_uid){ S3FS_PRN_EXIT("root user can only specify uid=0."); return -1; } is_s3fs_uid = true; return 1; // continue for fuse option } if(0 == STR2NCMP(arg, "gid=")){ s3fs_gid = get_gid(strchr(arg, '=') + sizeof(char)); if(0 != getegid() && 0 == s3fs_gid){ S3FS_PRN_EXIT("root user can only specify gid=0."); return -1; } is_s3fs_gid = true; return 1; // continue for fuse option } if(0 == STR2NCMP(arg, "umask=")){ s3fs_umask = s3fs_strtoofft(strchr(arg, '=') + sizeof(char), /*base=*/ 8); s3fs_umask &= (S_IRWXU | S_IRWXG | S_IRWXO); is_s3fs_umask = true; return 1; // continue for fuse option } if(0 == strcmp(arg, "allow_other")){ allow_other = true; return 1; // continue for fuse option } if(0 == STR2NCMP(arg, "mp_umask=")){ mp_umask = s3fs_strtoofft(strchr(arg, '=') + sizeof(char), /*base=*/ 8); mp_umask &= (S_IRWXU | S_IRWXG | S_IRWXO); is_mp_umask = true; return 0; } if(0 == STR2NCMP(arg, "default_acl=")){ const char* acl_string = strchr(arg, '=') + sizeof(char); acl_t acl = string_to_acl(acl_string); if(acl == INVALID_ACL){ S3FS_PRN_EXIT("unknown value for default_acl: %s", acl_string); return -1; } S3fsCurl::SetDefaultAcl(acl); return 0; } if(0 == STR2NCMP(arg, "retries=")){ S3fsCurl::SetRetries(static_cast(s3fs_strtoofft(strchr(arg, '=') + sizeof(char)))); return 0; } if(0 == STR2NCMP(arg, "use_cache=")){ FdManager::SetCacheDir(strchr(arg, '=') + sizeof(char)); return 0; } if(0 == STR2NCMP(arg, "check_cache_dir_exist")){ FdManager::SetCheckCacheDirExist(true); return 0; } if(0 == strcmp(arg, "del_cache")){ is_remove_cache = true; return 0; } if(0 == STR2NCMP(arg, "multireq_max=")){ int maxreq = static_cast(s3fs_strtoofft(strchr(arg, '=') + sizeof(char))); S3fsCurl::SetMaxMultiRequest(maxreq); return 0; } if(0 == strcmp(arg, "nonempty")){ nonempty = true; return 1; // need to continue for fuse. } if(0 == strcmp(arg, "nomultipart")){ nomultipart = true; return 0; } // old format for storage_class if(0 == strcmp(arg, "use_rrs") || 0 == STR2NCMP(arg, "use_rrs=")){ off_t rrs = 1; // for an old format. if(0 == STR2NCMP(arg, "use_rrs=")){ rrs = s3fs_strtoofft(strchr(arg, '=') + sizeof(char)); } if(0 == rrs){ S3fsCurl::SetStorageClass(STANDARD); }else if(1 == rrs){ S3fsCurl::SetStorageClass(REDUCED_REDUNDANCY); }else{ S3FS_PRN_EXIT("poorly formed argument to option: use_rrs"); return -1; } return 0; } if(0 == STR2NCMP(arg, "storage_class=")){ const char *storage_class = strchr(arg, '=') + sizeof(char); if(0 == strcmp(storage_class, "standard")){ S3fsCurl::SetStorageClass(STANDARD); }else if(0 == strcmp(storage_class, "standard_ia")){ S3fsCurl::SetStorageClass(STANDARD_IA); }else if(0 == strcmp(storage_class, "onezone_ia")){ S3fsCurl::SetStorageClass(ONEZONE_IA); }else if(0 == strcmp(storage_class, "reduced_redundancy")){ S3fsCurl::SetStorageClass(REDUCED_REDUNDANCY); }else if(0 == strcmp(storage_class, "intelligent_tiering")){ S3fsCurl::SetStorageClass(INTELLIGENT_TIERING); }else{ S3FS_PRN_EXIT("unknown value for storage_class: %s", storage_class); return -1; } return 0; } // // [NOTE] // use_sse Set Server Side Encrypting type to SSE-S3 // use_sse=1 // use_sse=file Set Server Side Encrypting type to Custom key(SSE-C) and load custom keys // use_sse=custom(c):file // use_sse=custom(c) Set Server Side Encrypting type to Custom key(SSE-C) // use_sse=kmsid(k):kms-key-id Set Server Side Encrypting type to AWS Key Management key id(SSE-KMS) and load KMS id // use_sse=kmsid(k) Set Server Side Encrypting type to AWS Key Management key id(SSE-KMS) // // load_sse_c=file Load Server Side Encrypting custom keys // // AWSSSECKEYS Loading Environment for Server Side Encrypting custom keys // AWSSSEKMSID Loading Environment for Server Side Encrypting Key id // if(0 == STR2NCMP(arg, "use_sse")){ if(0 == strcmp(arg, "use_sse") || 0 == strcmp(arg, "use_sse=1")){ // use_sse=1 is old type parameter // sse type is SSE_S3 if(!S3fsCurl::IsSseDisable() && !S3fsCurl::IsSseS3Type()){ S3FS_PRN_EXIT("already set SSE another type, so conflict use_sse option or environment."); return -1; } S3fsCurl::SetSseType(SSE_S3); }else if(0 == strcmp(arg, "use_sse=kmsid") || 0 == strcmp(arg, "use_sse=k")){ // sse type is SSE_KMS with out kmsid(expecting id is loaded by environment) if(!S3fsCurl::IsSseDisable() && !S3fsCurl::IsSseKmsType()){ S3FS_PRN_EXIT("already set SSE another type, so conflict use_sse option or environment."); return -1; } if(!S3fsCurl::IsSetSseKmsId()){ S3FS_PRN_EXIT("use_sse=kms but not loaded kms id by environment."); return -1; } S3fsCurl::SetSseType(SSE_KMS); }else if(0 == STR2NCMP(arg, "use_sse=kmsid:") || 0 == STR2NCMP(arg, "use_sse=k:")){ // sse type is SSE_KMS with kmsid if(!S3fsCurl::IsSseDisable() && !S3fsCurl::IsSseKmsType()){ S3FS_PRN_EXIT("already set SSE another type, so conflict use_sse option or environment."); return -1; } const char* kmsid; if(0 == STR2NCMP(arg, "use_sse=kmsid:")){ kmsid = &arg[strlen("use_sse=kmsid:")]; }else{ kmsid = &arg[strlen("use_sse=k:")]; } if(!S3fsCurl::SetSseKmsid(kmsid)){ S3FS_PRN_EXIT("failed to load use_sse kms id."); return -1; } S3fsCurl::SetSseType(SSE_KMS); }else if(0 == strcmp(arg, "use_sse=custom") || 0 == strcmp(arg, "use_sse=c")){ // sse type is SSE_C with out custom keys(expecting keys are loaded by environment or load_sse_c option) if(!S3fsCurl::IsSseDisable() && !S3fsCurl::IsSseCType()){ S3FS_PRN_EXIT("already set SSE another type, so conflict use_sse option or environment."); return -1; } // [NOTE] // do not check ckeys exists here. // S3fsCurl::SetSseType(SSE_C); }else if(0 == STR2NCMP(arg, "use_sse=custom:") || 0 == STR2NCMP(arg, "use_sse=c:")){ // sse type is SSE_C with custom keys if(!S3fsCurl::IsSseDisable() && !S3fsCurl::IsSseCType()){ S3FS_PRN_EXIT("already set SSE another type, so conflict use_sse option or environment."); return -1; } const char* ssecfile; if(0 == STR2NCMP(arg, "use_sse=custom:")){ ssecfile = &arg[strlen("use_sse=custom:")]; }else{ ssecfile = &arg[strlen("use_sse=c:")]; } if(!S3fsCurl::SetSseCKeys(ssecfile)){ S3FS_PRN_EXIT("failed to load use_sse custom key file(%s).", ssecfile); return -1; } S3fsCurl::SetSseType(SSE_C); }else if(0 == strcmp(arg, "use_sse=")){ // this type is old style(parameter is custom key file path) // SSE_C with custom keys. const char* ssecfile = &arg[strlen("use_sse=")]; if(!S3fsCurl::SetSseCKeys(ssecfile)){ S3FS_PRN_EXIT("failed to load use_sse custom key file(%s).", ssecfile); return -1; } S3fsCurl::SetSseType(SSE_C); }else{ // never come here. S3FS_PRN_EXIT("something wrong use_sse option."); return -1; } return 0; } // [NOTE] // Do only load SSE custom keys, care for set without set sse type. if(0 == STR2NCMP(arg, "load_sse_c=")){ const char* ssecfile = &arg[strlen("load_sse_c=")]; if(!S3fsCurl::SetSseCKeys(ssecfile)){ S3FS_PRN_EXIT("failed to load use_sse custom key file(%s).", ssecfile); return -1; } return 0; } if(0 == STR2NCMP(arg, "ssl_verify_hostname=")){ long sslvh = static_cast(s3fs_strtoofft(strchr(arg, '=') + sizeof(char))); if(-1 == S3fsCurl::SetSslVerifyHostname(sslvh)){ S3FS_PRN_EXIT("poorly formed argument to option: ssl_verify_hostname."); return -1; } return 0; } if(0 == STR2NCMP(arg, "passwd_file=")){ passwd_file = strchr(arg, '=') + sizeof(char); return 0; } if(0 == strcmp(arg, "ibm_iam_auth")){ S3fsCurl::SetIsIBMIAMAuth(true); S3fsCurl::SetIAMCredentialsURL("https://iam.bluemix.net/oidc/token"); S3fsCurl::SetIAMTokenField("access_token"); S3fsCurl::SetIAMExpiryField("expiration"); S3fsCurl::SetIAMFieldCount(2); is_ibm_iam_auth = true; return 0; } if (0 == STR2NCMP(arg, "use_session_token")) { is_use_session_token = true; } if(0 == STR2NCMP(arg, "ibm_iam_endpoint=")){ std::string endpoint_url; std::string iam_endpoint = strchr(arg, '=') + sizeof(char); // Check url for http / https protocol string if((iam_endpoint.compare(0, 8, "https://") != 0) && (iam_endpoint.compare(0, 7, "http://") != 0)) { S3FS_PRN_EXIT("option ibm_iam_endpoint has invalid format, missing http / https protocol"); return -1; } endpoint_url = iam_endpoint + "/oidc/token"; S3fsCurl::SetIAMCredentialsURL(endpoint_url.c_str()); return 0; } if(0 == strcmp(arg, "ecs")){ if (is_ibm_iam_auth) { S3FS_PRN_EXIT("option ecs cannot be used in conjunction with ibm"); return -1; } S3fsCurl::SetIsECS(true); S3fsCurl::SetIAMCredentialsURL("http://169.254.170.2"); S3fsCurl::SetIAMFieldCount(5); is_ecs = true; return 0; } if(0 == STR2NCMP(arg, "iam_role")){ if (is_ecs || is_ibm_iam_auth) { S3FS_PRN_EXIT("option iam_role cannot be used in conjunction with ecs or ibm"); return -1; } if(0 == strcmp(arg, "iam_role") || 0 == strcmp(arg, "iam_role=auto")){ // loading IAM role name in s3fs_init(), because we need to wait initializing curl. // load_iamrole = true; return 0; }else if(0 == STR2NCMP(arg, "iam_role=")){ const char* role = strchr(arg, '=') + sizeof(char); S3fsCurl::SetIAMRole(role); load_iamrole = false; return 0; } } if(0 == STR2NCMP(arg, "profile=")){ aws_profile = strchr(arg, '=') + sizeof(char); return 0; } if(0 == STR2NCMP(arg, "public_bucket=")){ off_t pubbucket = s3fs_strtoofft(strchr(arg, '=') + sizeof(char)); if(1 == pubbucket){ S3fsCurl::SetPublicBucket(true); // [NOTE] // if bucket is public(without credential), s3 do not allow copy api. // so s3fs sets nocopyapi mode. // nocopyapi = true; }else if(0 == pubbucket){ S3fsCurl::SetPublicBucket(false); }else{ S3FS_PRN_EXIT("poorly formed argument to option: public_bucket."); return -1; } return 0; } if(0 == STR2NCMP(arg, "bucket=")){ std::string bname = strchr(arg, '=') + sizeof(char); if ((ret = set_bucket(bname.c_str()))){ return ret; } return 0; } if(0 == strcmp(arg, "no_check_certificate")){ S3fsCurl::SetCheckCertificate(false); return 0; } if(0 == STR2NCMP(arg, "connect_timeout=")){ long contimeout = static_cast(s3fs_strtoofft(strchr(arg, '=') + sizeof(char))); S3fsCurl::SetConnectTimeout(contimeout); return 0; } if(0 == STR2NCMP(arg, "readwrite_timeout=")){ time_t rwtimeout = static_cast(s3fs_strtoofft(strchr(arg, '=') + sizeof(char))); S3fsCurl::SetReadwriteTimeout(rwtimeout); return 0; } if(0 == STR2NCMP(arg, "list_object_max_keys=")){ int max_keys = static_cast(s3fs_strtoofft(strchr(arg, '=') + sizeof(char))); if(max_keys < 1000){ S3FS_PRN_EXIT("argument should be over 1000: list_object_max_keys"); return -1; } max_keys_list_object = max_keys; return 0; } if(0 == STR2NCMP(arg, "max_stat_cache_size=")){ unsigned long cache_size = static_cast(s3fs_strtoofft(strchr(arg, '=') + sizeof(char))); StatCache::getStatCacheData()->SetCacheSize(cache_size); return 0; } if(0 == STR2NCMP(arg, "stat_cache_expire=")){ time_t expr_time = static_cast(s3fs_strtoofft(strchr(arg, '=') + sizeof(char))); StatCache::getStatCacheData()->SetExpireTime(expr_time); return 0; } // [NOTE] // This option is for compatibility old version. if(0 == STR2NCMP(arg, "stat_cache_interval_expire=")){ time_t expr_time = static_cast(s3fs_strtoofft(strchr(arg, '=') + sizeof(char))); StatCache::getStatCacheData()->SetExpireTime(expr_time, true); return 0; } if(0 == strcmp(arg, "enable_noobj_cache")){ StatCache::getStatCacheData()->EnableCacheNoObject(); return 0; } if(0 == strcmp(arg, "nodnscache")){ S3fsCurl::SetDnsCache(false); return 0; } if(0 == strcmp(arg, "nosscache")){ S3fsCurl::SetSslSessionCache(false); return 0; } if(0 == STR2NCMP(arg, "parallel_count=") || 0 == STR2NCMP(arg, "parallel_upload=")){ int maxpara = static_cast(s3fs_strtoofft(strchr(arg, '=') + sizeof(char))); if(0 >= maxpara){ S3FS_PRN_EXIT("argument should be over 1: parallel_count"); return -1; } S3fsCurl::SetMaxParallelCount(maxpara); return 0; } if(0 == STR2NCMP(arg, "fd_page_size=")){ S3FS_PRN_ERR("option fd_page_size is no longer supported, so skip this option."); return 0; } if(0 == STR2NCMP(arg, "multipart_size=")){ off_t size = static_cast(s3fs_strtoofft(strchr(arg, '=') + sizeof(char))); if(!S3fsCurl::SetMultipartSize(size)){ S3FS_PRN_EXIT("multipart_size option must be at least 5 MB."); return -1; } return 0; } if(0 == STR2NCMP(arg, "ensure_diskfree=")){ off_t dfsize = s3fs_strtoofft(strchr(arg, '=') + sizeof(char)) * 1024 * 1024; if(dfsize < S3fsCurl::GetMultipartSize()){ S3FS_PRN_WARN("specified size to ensure disk free space is smaller than multipart size, so set multipart size to it."); dfsize = S3fsCurl::GetMultipartSize(); } FdManager::SetEnsureFreeDiskSpace(dfsize); return 0; } if(0 == STR2NCMP(arg, "singlepart_copy_limit=")){ singlepart_copy_limit = static_cast(s3fs_strtoofft(strchr(arg, '=') + sizeof(char))) * 1024; return 0; } if(0 == STR2NCMP(arg, "ahbe_conf=")){ string ahbe_conf = strchr(arg, '=') + sizeof(char); if(!AdditionalHeader::get()->Load(ahbe_conf.c_str())){ S3FS_PRN_EXIT("failed to load ahbe_conf file(%s).", ahbe_conf.c_str()); return -1; } AdditionalHeader::get()->Dump(); return 0; } if(0 == strcmp(arg, "noxmlns")){ noxmlns = true; return 0; } if(0 == strcmp(arg, "nomixupload")){ FdEntity::SetNoMixMultipart(); return 0; } if(0 == strcmp(arg, "nocopyapi")){ nocopyapi = true; return 0; } if(0 == strcmp(arg, "norenameapi")){ norenameapi = true; return 0; } if(0 == strcmp(arg, "complement_stat")){ complement_stat = true; return 0; } if(0 == strcmp(arg, "notsup_compat_dir")){ support_compat_dir = false; return 0; } if(0 == strcmp(arg, "enable_content_md5")){ S3fsCurl::SetContentMd5(true); return 0; } if(0 == STR2NCMP(arg, "host=")){ host = strchr(arg, '=') + sizeof(char); return 0; } if(0 == STR2NCMP(arg, "servicepath=")){ service_path = strchr(arg, '=') + sizeof(char); return 0; } if(0 == STR2NCMP(arg, "url=")){ host = strchr(arg, '=') + sizeof(char); // strip the trailing '/', if any, off the end of the host // string size_t found, length; found = host.find_last_of('/'); length = host.length(); while(found == (length - 1) && length > 0){ host.erase(found); found = host.find_last_of('/'); length = host.length(); } // Check url for http / https protocol string if((host.compare(0, 8, "https://") != 0) && (host.compare(0, 7, "http://") != 0)) { S3FS_PRN_EXIT("option url has invalid format, missing http / https protocol"); return -1; } return 0; } if(0 == strcmp(arg, "sigv2")){ S3fsCurl::SetSignatureV4(false); return 0; } if(0 == strcmp(arg, "createbucket")){ create_bucket = true; return 0; } if(0 == STR2NCMP(arg, "endpoint=")){ endpoint = strchr(arg, '=') + sizeof(char); is_specified_endpoint = true; return 0; } if(0 == strcmp(arg, "use_path_request_style")){ pathrequeststyle = true; return 0; } if(0 == STR2NCMP(arg, "noua")){ S3fsCurl::SetUserAgentFlag(false); return 0; } if(0 == strcmp(arg, "use_xattr")){ is_use_xattr = true; return 0; }else if(0 == STR2NCMP(arg, "use_xattr=")){ const char* strflag = strchr(arg, '=') + sizeof(char); if(0 == strcmp(strflag, "1")){ is_use_xattr = true; }else if(0 == strcmp(strflag, "0")){ is_use_xattr = false; }else{ S3FS_PRN_EXIT("option use_xattr has unknown parameter(%s).", strflag); return -1; } return 0; } if(0 == STR2NCMP(arg, "cipher_suites=")){ cipher_suites = strchr(arg, '=') + sizeof(char); return 0; } if(0 == STR2NCMP(arg, "instance_name=")){ instance_name = strchr(arg, '=') + sizeof(char); instance_name = "[" + instance_name + "]"; return 0; } // // debug option for s3fs // if(0 == STR2NCMP(arg, "dbglevel=")){ const char* strlevel = strchr(arg, '=') + sizeof(char); if(0 == strcasecmp(strlevel, "silent") || 0 == strcasecmp(strlevel, "critical") || 0 == strcasecmp(strlevel, "crit")){ set_s3fs_log_level(S3FS_LOG_CRIT); }else if(0 == strcasecmp(strlevel, "error") || 0 == strcasecmp(strlevel, "err")){ set_s3fs_log_level(S3FS_LOG_ERR); }else if(0 == strcasecmp(strlevel, "wan") || 0 == strcasecmp(strlevel, "warn") || 0 == strcasecmp(strlevel, "warning")){ set_s3fs_log_level(S3FS_LOG_WARN); }else if(0 == strcasecmp(strlevel, "inf") || 0 == strcasecmp(strlevel, "info") || 0 == strcasecmp(strlevel, "information")){ set_s3fs_log_level(S3FS_LOG_INFO); }else if(0 == strcasecmp(strlevel, "dbg") || 0 == strcasecmp(strlevel, "debug")){ set_s3fs_log_level(S3FS_LOG_DBG); }else{ S3FS_PRN_EXIT("option dbglevel has unknown parameter(%s).", strlevel); return -1; } return 0; } // // debug option // // debug_level is S3FS_LOG_INFO, after second -d is passed to fuse. // if(0 == strcmp(arg, "-d") || 0 == strcmp(arg, "--debug")){ if(!IS_S3FS_LOG_INFO() && !IS_S3FS_LOG_DBG()){ set_s3fs_log_level(S3FS_LOG_INFO); return 0; } if(0 == strcmp(arg, "--debug")){ // fuse doesn't understand "--debug", but it understands -d. // but we can't pass -d back to fuse. return 0; } } // "f2" is not used no more. // (set S3FS_LOG_DBG) if(0 == strcmp(arg, "f2")){ set_s3fs_log_level(S3FS_LOG_DBG); return 0; } if(0 == strcmp(arg, "curldbg")){ S3fsCurl::SetVerbose(true); return 0; } if(0 == STR2NCMP(arg, "accessKeyId=")){ S3FS_PRN_EXIT("option accessKeyId is no longer supported."); return -1; } if(0 == STR2NCMP(arg, "secretAccessKey=")){ S3FS_PRN_EXIT("option secretAccessKey is no longer supported."); return -1; } if(0 == strcmp(arg, "use_wtf8")){ use_wtf8 = true; return 0; } if(0 == strcmp(arg, "requester_pays")){ S3fsCurl::SetRequesterPays(true); return 0; } // [NOTE] // following option will be discarding, because these are not for fuse. // (Referenced sshfs.c) // if(0 == strcmp(arg, "auto") || 0 == strcmp(arg, "noauto") || 0 == strcmp(arg, "user") || 0 == strcmp(arg, "nouser") || 0 == strcmp(arg, "users") || 0 == strcmp(arg, "_netdev")) { return 0; } } return 1; } int main(int argc, char* argv[]) { int ch; int fuse_res; int option_index = 0; struct fuse_operations s3fs_oper; time_t incomp_abort_time = (24 * 60 * 60); static const struct option long_opts[] = { {"help", no_argument, NULL, 'h'}, {"version", no_argument, 0, 0}, {"debug", no_argument, NULL, 'd'}, {"incomplete-mpu-list", no_argument, NULL, 'u'}, {"incomplete-mpu-abort", optional_argument, NULL, 'a'}, // 'a' is only identifier and is not option. {NULL, 0, NULL, 0} }; // init syslog(default CRIT) openlog("s3fs", LOG_PID | LOG_ODELAY | LOG_NOWAIT, LOG_USER); set_s3fs_log_level(debug_level); // init xml2 xmlInitParser(); LIBXML_TEST_VERSION init_sysconf_vars(); // get program name - emulate basename program_name.assign(argv[0]); size_t found = program_name.find_last_of('/'); if(found != string::npos){ program_name.replace(0, found+1, ""); } while((ch = getopt_long(argc, argv, "dho:fsu", long_opts, &option_index)) != -1){ switch(ch){ case 0: if(strcmp(long_opts[option_index].name, "version") == 0){ show_version(); exit(EXIT_SUCCESS); } break; case 'h': show_help(); exit(EXIT_SUCCESS); case 'o': break; case 'd': break; case 'f': foreground = true; break; case 's': break; case 'u': // --incomplete-mpu-list if(NO_UTILITY_MODE != utility_mode){ S3FS_PRN_EXIT("already utility mode option is specified."); exit(EXIT_FAILURE); } utility_mode = INCOMP_TYPE_LIST; break; case 'a': // --incomplete-mpu-abort if(NO_UTILITY_MODE != utility_mode){ S3FS_PRN_EXIT("already utility mode option is specified."); exit(EXIT_FAILURE); } utility_mode = INCOMP_TYPE_ABORT; // check expire argument if(NULL != optarg && 0 == strcasecmp(optarg, "all")){ // all is 0s incomp_abort_time = 0; }else if(NULL != optarg){ if(!convert_unixtime_from_option_arg(optarg, incomp_abort_time)){ S3FS_PRN_EXIT("--incomplete-mpu-abort option argument is wrong."); exit(EXIT_FAILURE); } } // if optarg is null, incomp_abort_time is 24H(default) break; default: exit(EXIT_FAILURE); } } // Load SSE environment if(!S3fsCurl::LoadEnvSse()){ S3FS_PRN_EXIT("something wrong about SSE environment."); exit(EXIT_FAILURE); } // ssl init if(!s3fs_init_global_ssl()){ S3FS_PRN_EXIT("could not initialize for ssl libraries."); exit(EXIT_FAILURE); } // init curl if(!S3fsCurl::InitS3fsCurl("/etc/mime.types")){ S3FS_PRN_EXIT("Could not initiate curl library."); s3fs_destroy_global_ssl(); exit(EXIT_FAILURE); } // clear this structure memset(&s3fs_oper, 0, sizeof(s3fs_oper)); // This is the fuse-style parser for the arguments // after which the bucket name and mountpoint names // should have been set struct fuse_args custom_args = FUSE_ARGS_INIT(argc, argv); if(0 != fuse_opt_parse(&custom_args, NULL, NULL, my_fuse_opt_proc)){ S3fsCurl::DestroyS3fsCurl(); s3fs_destroy_global_ssl(); exit(EXIT_FAILURE); } // [NOTE] // exclusive option check here. // if(REDUCED_REDUNDANCY == S3fsCurl::GetStorageClass() && !S3fsCurl::IsSseDisable()){ S3FS_PRN_EXIT("use_sse option could not be specified with storage class reduced_redundancy."); S3fsCurl::DestroyS3fsCurl(); s3fs_destroy_global_ssl(); exit(EXIT_FAILURE); } if(!S3fsCurl::FinalCheckSse()){ S3FS_PRN_EXIT("something wrong about SSE options."); S3fsCurl::DestroyS3fsCurl(); s3fs_destroy_global_ssl(); exit(EXIT_FAILURE); } // The first plain argument is the bucket if(bucket.empty()){ S3FS_PRN_EXIT("missing BUCKET argument."); show_usage(); S3fsCurl::DestroyS3fsCurl(); s3fs_destroy_global_ssl(); exit(EXIT_FAILURE); } // bucket names cannot contain upper case characters in virtual-hosted style if((!pathrequeststyle) && (lower(bucket) != bucket)){ S3FS_PRN_EXIT("BUCKET %s, name not compatible with virtual-hosted style.", bucket.c_str()); S3fsCurl::DestroyS3fsCurl(); s3fs_destroy_global_ssl(); exit(EXIT_FAILURE); } // check bucket name for illegal characters found = bucket.find_first_of("/:\\;!@#$%^&*?|+="); if(found != string::npos){ S3FS_PRN_EXIT("BUCKET %s -- bucket name contains an illegal character.", bucket.c_str()); S3fsCurl::DestroyS3fsCurl(); s3fs_destroy_global_ssl(); exit(EXIT_FAILURE); } if(!pathrequeststyle && STR2NCMP(host.c_str(), "https://") == 0 && bucket.find_first_of('.') != string::npos) { S3FS_PRN_EXIT("BUCKET %s -- cannot mount bucket with . while using HTTPS without use_path_request_style", bucket.c_str()); S3fsCurl::DestroyS3fsCurl(); s3fs_destroy_global_ssl(); exit(EXIT_FAILURE); } // The second plain argument is the mountpoint // if the option was given, we all ready checked for a // readable, non-empty directory, this checks determines // if the mountpoint option was ever supplied if(NO_UTILITY_MODE == utility_mode){ if(mountpoint.empty()){ S3FS_PRN_EXIT("missing MOUNTPOINT argument."); show_usage(); S3fsCurl::DestroyS3fsCurl(); s3fs_destroy_global_ssl(); exit(EXIT_FAILURE); } } // error checking of command line arguments for compatibility if(S3fsCurl::IsPublicBucket() && S3fsCurl::IsSetAccessKeys()){ S3FS_PRN_EXIT("specifying both public_bucket and the access keys options is invalid."); S3fsCurl::DestroyS3fsCurl(); s3fs_destroy_global_ssl(); exit(EXIT_FAILURE); } if(!passwd_file.empty() && S3fsCurl::IsSetAccessKeys()){ S3FS_PRN_EXIT("specifying both passwd_file and the access keys options is invalid."); S3fsCurl::DestroyS3fsCurl(); s3fs_destroy_global_ssl(); exit(EXIT_FAILURE); } if(!S3fsCurl::IsPublicBucket() && !load_iamrole && !is_ecs){ if(EXIT_SUCCESS != get_access_keys()){ S3fsCurl::DestroyS3fsCurl(); s3fs_destroy_global_ssl(); exit(EXIT_FAILURE); } if(!S3fsCurl::IsSetAccessKeys()){ S3FS_PRN_EXIT("could not establish security credentials, check documentation."); S3fsCurl::DestroyS3fsCurl(); s3fs_destroy_global_ssl(); exit(EXIT_FAILURE); } // More error checking on the access key pair can be done // like checking for appropriate lengths and characters } // check cache dir permission if(!FdManager::CheckCacheDirExist() || !FdManager::CheckCacheTopDir() || !CacheFileStat::CheckCacheFileStatTopDir()){ S3FS_PRN_EXIT("could not allow cache directory permission, check permission of cache directories."); S3fsCurl::DestroyS3fsCurl(); s3fs_destroy_global_ssl(); exit(EXIT_FAILURE); } // check IBM IAM requirements if(is_ibm_iam_auth){ // check that default ACL is either public-read or private acl_t defaultACL = S3fsCurl::GetDefaultAcl(); if(defaultACL != PRIVATE && defaultACL != PUBLIC_READ){ S3FS_PRN_EXIT("can only use 'public-read' or 'private' ACL while using ibm_iam_auth"); S3fsCurl::DestroyS3fsCurl(); s3fs_destroy_global_ssl(); exit(EXIT_FAILURE); } if(create_bucket && !S3fsCurl::IsSetAccessKeyID()){ S3FS_PRN_EXIT("missing service instance ID for bucket creation"); S3fsCurl::DestroyS3fsCurl(); s3fs_destroy_global_ssl(); exit(EXIT_FAILURE); } } // set user agent S3fsCurl::InitUserAgent(); // There's room for more command line error checking // Check to see if the bucket name contains periods and https (SSL) is // being used. This is a known limitation: // https://docs.amazonwebservices.com/AmazonS3/latest/dev/ // The Developers Guide suggests that either use HTTP of for us to write // our own certificate verification logic. // For now, this will be unsupported unless we get a request for it to // be supported. In that case, we have a couple of options: // - implement a command line option that bypasses the verify host // but doesn't bypass verifying the certificate // - write our own host verification (this might be complex) // See issue #128strncasecmp /* if(1 == S3fsCurl::GetSslVerifyHostname()){ found = bucket.find_first_of("."); if(found != string::npos){ found = host.find("https:"); if(found != string::npos){ S3FS_PRN_EXIT("Using https and a bucket name with periods is unsupported."); exit(1); } } } */ if(NO_UTILITY_MODE != utility_mode){ int exitcode = s3fs_utility_processing(incomp_abort_time); S3fsCurl::DestroyS3fsCurl(); s3fs_destroy_global_ssl(); exit(exitcode); } // Check multipart / copy api for mix multipart uploading if(nomultipart || nocopyapi || norenameapi){ FdEntity::SetNoMixMultipart(); } // check free disk space if(!FdManager::IsSafeDiskSpace(NULL, S3fsCurl::GetMultipartSize() * S3fsCurl::GetMaxParallelCount())){ S3FS_PRN_EXIT("There is no enough disk space for used as cache(or temporary) directory by s3fs."); S3fsCurl::DestroyS3fsCurl(); s3fs_destroy_global_ssl(); exit(EXIT_FAILURE); } s3fs_oper.getattr = s3fs_getattr; s3fs_oper.readlink = s3fs_readlink; s3fs_oper.mknod = s3fs_mknod; s3fs_oper.mkdir = s3fs_mkdir; s3fs_oper.unlink = s3fs_unlink; s3fs_oper.rmdir = s3fs_rmdir; s3fs_oper.symlink = s3fs_symlink; s3fs_oper.rename = s3fs_rename; s3fs_oper.link = s3fs_link; if(!nocopyapi){ s3fs_oper.chmod = s3fs_chmod; s3fs_oper.chown = s3fs_chown; s3fs_oper.utimens = s3fs_utimens; }else{ s3fs_oper.chmod = s3fs_chmod_nocopy; s3fs_oper.chown = s3fs_chown_nocopy; s3fs_oper.utimens = s3fs_utimens_nocopy; } s3fs_oper.truncate = s3fs_truncate; s3fs_oper.open = s3fs_open; s3fs_oper.read = s3fs_read; s3fs_oper.write = s3fs_write; s3fs_oper.statfs = s3fs_statfs; s3fs_oper.flush = s3fs_flush; s3fs_oper.fsync = s3fs_fsync; s3fs_oper.release = s3fs_release; s3fs_oper.opendir = s3fs_opendir; s3fs_oper.readdir = s3fs_readdir; s3fs_oper.init = s3fs_init; s3fs_oper.destroy = s3fs_destroy; s3fs_oper.access = s3fs_access; s3fs_oper.create = s3fs_create; // extended attributes if(is_use_xattr){ s3fs_oper.setxattr = s3fs_setxattr; s3fs_oper.getxattr = s3fs_getxattr; s3fs_oper.listxattr = s3fs_listxattr; s3fs_oper.removexattr = s3fs_removexattr; } // set signal handler for debugging if(!set_s3fs_usr2_handler()){ S3FS_PRN_EXIT("could not set signal handler for SIGUSR2."); S3fsCurl::DestroyS3fsCurl(); s3fs_destroy_global_ssl(); exit(EXIT_FAILURE); } // now passing things off to fuse, fuse will finish evaluating the command line args fuse_res = fuse_main(custom_args.argc, custom_args.argv, &s3fs_oper, NULL); fuse_opt_free_args(&custom_args); // Destroy curl if(!S3fsCurl::DestroyS3fsCurl()){ S3FS_PRN_WARN("Could not release curl library."); } s3fs_destroy_global_ssl(); // cleanup xml2 xmlCleanupParser(); S3FS_MALLOCTRIM(0); exit(fuse_res); } /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ s3fs-fuse-1.86/src/s3fs.h000066400000000000000000000053521361654130000151100ustar00rootroot00000000000000/* * s3fs - FUSE-based file system backed by Amazon S3 * * Copyright(C) 2007 Randy Rizun * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #ifndef S3FS_S3_H_ #define S3FS_S3_H_ #define FUSE_USE_VERSION 26 static const int64_t FIVE_GB = 5LL * 1024LL * 1024LL * 1024LL; #include #define S3FS_FUSE_EXIT() \ do{ \ struct fuse_context* pcxt = fuse_get_context(); \ if(pcxt){ \ fuse_exit(pcxt->fuse); \ } \ }while(0) // [NOTE] // s3fs use many small allocated chunk in heap area for stats // cache and parsing xml, etc. The OS may decide that giving // this little memory back to the kernel will cause too much // overhead and delay the operation. // Address of gratitude, this workaround quotes a document of // libxml2.( http://xmlsoft.org/xmlmem.html ) // // When valgrind is used to test memory leak of s3fs, a large // amount of chunk may be reported. You can check the memory // release accurately by defining the S3FS_MALLOC_TRIM flag // and building it. Also, when executing s3fs, you can define // the MMAP_THRESHOLD environment variable and check more // accurate memory leak.( see, man 3 free ) // #ifdef S3FS_MALLOC_TRIM #ifdef HAVE_MALLOC_TRIM #include #define S3FS_MALLOCTRIM(pad) malloc_trim(pad) #else // HAVE_MALLOC_TRIM #define S3FS_MALLOCTRIM(pad) #endif // HAVE_MALLOC_TRIM #else // S3FS_MALLOC_TRIM #define S3FS_MALLOCTRIM(pad) #endif // S3FS_MALLOC_TRIM #define S3FS_XMLFREEDOC(doc) \ do{ \ xmlFreeDoc(doc); \ S3FS_MALLOCTRIM(0); \ }while(0) #define S3FS_XMLFREE(ptr) \ do{ \ xmlFree(ptr); \ S3FS_MALLOCTRIM(0); \ }while(0) #define S3FS_XMLXPATHFREECONTEXT(ctx) \ do{ \ xmlXPathFreeContext(ctx); \ S3FS_MALLOCTRIM(0); \ }while(0) #define S3FS_XMLXPATHFREEOBJECT(obj) \ do{ \ xmlXPathFreeObject(obj); \ S3FS_MALLOCTRIM(0); \ }while(0) #endif // S3FS_S3_H_ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ s3fs-fuse-1.86/src/s3fs_auth.h000066400000000000000000000043321361654130000161260ustar00rootroot00000000000000/* * s3fs - FUSE-based file system backed by Amazon S3 * * Copyright(C) 2007 Randy Rizun * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #ifndef S3FS_AUTH_H_ #define S3FS_AUTH_H_ #include #include //------------------------------------------------------------------- // Utility functions for Authentication //------------------------------------------------------------------- // // in common_auth.cpp // std::string s3fs_get_content_md5(int fd); std::string s3fs_md5sum(int fd, off_t start, ssize_t size); std::string s3fs_sha256sum(int fd, off_t start, ssize_t size); // // in xxxxxx_auth.cpp // const char* s3fs_crypt_lib_name(void); bool s3fs_init_global_ssl(void); bool s3fs_destroy_global_ssl(void); bool s3fs_init_crypt_mutex(void); bool s3fs_destroy_crypt_mutex(void); bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen); bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen); size_t get_md5_digest_length(void); unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size); bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char** digest, unsigned int* digestlen); size_t get_sha256_digest_length(void); unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size); #endif // S3FS_AUTH_H_ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ s3fs-fuse-1.86/src/s3fs_util.cpp000066400000000000000000001427421361654130000165050ustar00rootroot00000000000000/* * s3fs - FUSE-based file system backed by Amazon S3 * * Copyright(C) 2007 Takeshi Nakatani * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "common.h" #include "s3fs_util.h" #include "string_util.h" #include "s3fs.h" #include "s3fs_auth.h" using namespace std; //------------------------------------------------------------------- // Global variables //------------------------------------------------------------------- std::string mount_prefix; static size_t max_password_size; static size_t max_group_name_length; //------------------------------------------------------------------- // Utility //------------------------------------------------------------------- string get_realpath(const char *path) { string realpath = mount_prefix; realpath += path; return realpath; } //------------------------------------------------------------------- // Class S3ObjList //------------------------------------------------------------------- // New class S3ObjList is base on old s3_object struct. // This class is for S3 compatible clients. // // If name is terminated by "/", it is forced dir type. // If name is terminated by "_$folder$", it is forced dir type. // If is_dir is true and name is not terminated by "/", the name is added "/". // bool S3ObjList::insert(const char* name, const char* etag, bool is_dir) { if(!name || '\0' == name[0]){ return false; } s3obj_t::iterator iter; string newname; string orgname = name; // Normalization string::size_type pos = orgname.find("_$folder$"); if(string::npos != pos){ newname = orgname.substr(0, pos); is_dir = true; }else{ newname = orgname; } if(is_dir){ if('/' != newname[newname.length() - 1]){ newname += "/"; } }else{ if('/' == newname[newname.length() - 1]){ is_dir = true; } } // Check derived name object. if(is_dir){ string chkname = newname.substr(0, newname.length() - 1); if(objects.end() != (iter = objects.find(chkname))){ // found "dir" object --> remove it. objects.erase(iter); } }else{ string chkname = newname + "/"; if(objects.end() != (iter = objects.find(chkname))){ // found "dir/" object --> not add new object. // and add normalization return insert_normalized(orgname.c_str(), chkname.c_str(), true); } } // Add object if(objects.end() != (iter = objects.find(newname))){ // Found same object --> update information. (*iter).second.normalname.erase(); (*iter).second.orgname = orgname; (*iter).second.is_dir = is_dir; if(etag){ (*iter).second.etag = string(etag); // over write } }else{ // add new object s3obj_entry newobject; newobject.orgname = orgname; newobject.is_dir = is_dir; if(etag){ newobject.etag = etag; } objects[newname] = newobject; } // add normalization return insert_normalized(orgname.c_str(), newname.c_str(), is_dir); } bool S3ObjList::insert_normalized(const char* name, const char* normalized, bool is_dir) { if(!name || '\0' == name[0] || !normalized || '\0' == normalized[0]){ return false; } if(0 == strcmp(name, normalized)){ return true; } s3obj_t::iterator iter; if(objects.end() != (iter = objects.find(name))){ // found name --> over write iter->second.orgname.erase(); iter->second.etag.erase(); iter->second.normalname = normalized; iter->second.is_dir = is_dir; }else{ // not found --> add new object s3obj_entry newobject; newobject.normalname = normalized; newobject.is_dir = is_dir; objects[name] = newobject; } return true; } const s3obj_entry* S3ObjList::GetS3Obj(const char* name) const { s3obj_t::const_iterator iter; if(!name || '\0' == name[0]){ return NULL; } if(objects.end() == (iter = objects.find(name))){ return NULL; } return &((*iter).second); } string S3ObjList::GetOrgName(const char* name) const { const s3obj_entry* ps3obj; if(!name || '\0' == name[0]){ return string(""); } if(NULL == (ps3obj = GetS3Obj(name))){ return string(""); } return ps3obj->orgname; } string S3ObjList::GetNormalizedName(const char* name) const { const s3obj_entry* ps3obj; if(!name || '\0' == name[0]){ return string(""); } if(NULL == (ps3obj = GetS3Obj(name))){ return string(""); } if(0 == (ps3obj->normalname).length()){ return string(name); } return ps3obj->normalname; } string S3ObjList::GetETag(const char* name) const { const s3obj_entry* ps3obj; if(!name || '\0' == name[0]){ return string(""); } if(NULL == (ps3obj = GetS3Obj(name))){ return string(""); } return ps3obj->etag; } bool S3ObjList::IsDir(const char* name) const { const s3obj_entry* ps3obj; if(NULL == (ps3obj = GetS3Obj(name))){ return false; } return ps3obj->is_dir; } bool S3ObjList::GetLastName(std::string& lastname) const { bool result = false; lastname = ""; for(s3obj_t::const_iterator iter = objects.begin(); iter != objects.end(); ++iter){ if((*iter).second.orgname.length()){ if(0 > strcmp(lastname.c_str(), (*iter).second.orgname.c_str())){ lastname = (*iter).second.orgname; result = true; } }else{ if(0 > strcmp(lastname.c_str(), (*iter).second.normalname.c_str())){ lastname = (*iter).second.normalname; result = true; } } } return result; } bool S3ObjList::GetNameList(s3obj_list_t& list, bool OnlyNormalized, bool CutSlash) const { s3obj_t::const_iterator iter; for(iter = objects.begin(); objects.end() != iter; ++iter){ if(OnlyNormalized && 0 != (*iter).second.normalname.length()){ continue; } string name = (*iter).first; if(CutSlash && 1 < name.length() && '/' == name[name.length() - 1]){ // only "/" string is skipped this. name = name.substr(0, name.length() - 1); } list.push_back(name); } return true; } typedef std::map s3obj_h_t; bool S3ObjList::MakeHierarchizedList(s3obj_list_t& list, bool haveSlash) { s3obj_h_t h_map; s3obj_h_t::iterator hiter; s3obj_list_t::const_iterator liter; for(liter = list.begin(); list.end() != liter; ++liter){ string strtmp = (*liter); if(1 < strtmp.length() && '/' == strtmp[strtmp.length() - 1]){ strtmp = strtmp.substr(0, strtmp.length() - 1); } h_map[strtmp] = true; // check hierarchized directory for(string::size_type pos = strtmp.find_last_of('/'); string::npos != pos; pos = strtmp.find_last_of('/')){ strtmp = strtmp.substr(0, pos); if(0 == strtmp.length() || "/" == strtmp){ break; } if(h_map.end() == h_map.find(strtmp)){ // not found h_map[strtmp] = false; } } } // check map and add lost hierarchized directory. for(hiter = h_map.begin(); hiter != h_map.end(); ++hiter){ if(false == (*hiter).second){ // add hierarchized directory. string strtmp = (*hiter).first; if(haveSlash){ strtmp += "/"; } list.push_back(strtmp); } } return true; } //------------------------------------------------------------------- // Utility functions for moving objects //------------------------------------------------------------------- MVNODE *create_mvnode(const char *old_path, const char *new_path, bool is_dir, bool normdir) { MVNODE *p; char *p_old_path; char *p_new_path; p = new MVNODE(); if(NULL == (p_old_path = strdup(old_path))){ delete p; printf("create_mvnode: could not allocation memory for p_old_path\n"); S3FS_FUSE_EXIT(); return NULL; } if(NULL == (p_new_path = strdup(new_path))){ delete p; free(p_old_path); printf("create_mvnode: could not allocation memory for p_new_path\n"); S3FS_FUSE_EXIT(); return NULL; } p->old_path = p_old_path; p->new_path = p_new_path; p->is_dir = is_dir; p->is_normdir = normdir; p->prev = NULL; p->next = NULL; return p; } // // Add sorted MVNODE data(Ascending order) // MVNODE *add_mvnode(MVNODE** head, MVNODE** tail, const char *old_path, const char *new_path, bool is_dir, bool normdir) { if(!head || !tail){ return NULL; } MVNODE* cur; MVNODE* mvnew; for(cur = *head; cur; cur = cur->next){ if(cur->is_dir == is_dir){ int nResult = strcmp(cur->old_path, old_path); if(0 == nResult){ // Found same old_path. return cur; }else if(0 > nResult){ // next check. // ex: cur("abc"), mvnew("abcd") // ex: cur("abc"), mvnew("abd") continue; }else{ // Add into before cur-pos. // ex: cur("abc"), mvnew("ab") // ex: cur("abc"), mvnew("abb") if(NULL == (mvnew = create_mvnode(old_path, new_path, is_dir, normdir))){ return NULL; } if(cur->prev){ (cur->prev)->next = mvnew; }else{ *head = mvnew; } mvnew->prev = cur->prev; mvnew->next = cur; cur->prev = mvnew; return mvnew; } } } // Add into tail. if(NULL == (mvnew = create_mvnode(old_path, new_path, is_dir, normdir))){ return NULL; } mvnew->prev = (*tail); if(*tail){ (*tail)->next = mvnew; } (*tail) = mvnew; if(!(*head)){ (*head) = mvnew; } return mvnew; } void free_mvnodes(MVNODE *head) { MVNODE *my_head; MVNODE *next; for(my_head = head, next = NULL; my_head; my_head = next){ next = my_head->next; free(my_head->old_path); free(my_head->new_path); delete my_head; } } //------------------------------------------------------------------- // Class AutoLock //------------------------------------------------------------------- AutoLock::AutoLock(pthread_mutex_t* pmutex, Type type) : auto_mutex(pmutex) { if (type == ALREADY_LOCKED) { is_lock_acquired = false; } else if (type == NO_WAIT) { int res = pthread_mutex_trylock(auto_mutex); if(res == 0){ is_lock_acquired = true; }else if(res == EBUSY){ is_lock_acquired = false; }else{ S3FS_PRN_CRIT("pthread_mutex_trylock returned: %d", res); abort(); } } else { int res = pthread_mutex_lock(auto_mutex); if(res == 0){ is_lock_acquired = true; }else{ S3FS_PRN_CRIT("pthread_mutex_lock returned: %d", res); abort(); } } } bool AutoLock::isLockAcquired() const { return is_lock_acquired; } AutoLock::~AutoLock() { if (is_lock_acquired) { pthread_mutex_unlock(auto_mutex); } } void init_sysconf_vars() { // SUSv4tc1 says the following about _SC_GETGR_R_SIZE_MAX and // _SC_GETPW_R_SIZE_MAX: // Note that sysconf(_SC_GETGR_R_SIZE_MAX) may return -1 if // there is no hard limit on the size of the buffer needed to // store all the groups returned. long res = sysconf(_SC_GETPW_R_SIZE_MAX); if(0 > res){ if (errno != 0){ S3FS_PRN_WARN("could not get max pw length."); abort(); } res = 1024; // default initial length } max_password_size = res; res = sysconf(_SC_GETGR_R_SIZE_MAX); if(0 > res) { if (errno != 0) { S3FS_PRN_ERR("could not get max name length."); abort(); } res = 1024; // default initial length } max_group_name_length = res; } //------------------------------------------------------------------- // Utility for UID/GID //------------------------------------------------------------------- // get user name from uid string get_username(uid_t uid) { size_t maxlen = max_password_size; int result; char* pbuf; struct passwd pwinfo; struct passwd* ppwinfo = NULL; // make buffer pbuf = new char[maxlen]; // get pw information while(ERANGE == (result = getpwuid_r(uid, &pwinfo, pbuf, maxlen, &ppwinfo))){ delete[] pbuf; maxlen *= 2; pbuf = new char[maxlen]; } if(0 != result){ S3FS_PRN_ERR("could not get pw information(%d).", result); delete[] pbuf; return string(""); } // check pw if(NULL == ppwinfo){ delete[] pbuf; return string(""); } string name = SAFESTRPTR(ppwinfo->pw_name); delete[] pbuf; return name; } int is_uid_include_group(uid_t uid, gid_t gid) { size_t maxlen = max_group_name_length; int result; char* pbuf; struct group ginfo; struct group* pginfo = NULL; // make buffer pbuf = new char[maxlen]; // get group information while(ERANGE == (result = getgrgid_r(gid, &ginfo, pbuf, maxlen, &pginfo))){ delete[] pbuf; maxlen *= 2; pbuf = new char[maxlen]; } if(0 != result){ S3FS_PRN_ERR("could not get group information(%d).", result); delete[] pbuf; return -result; } // check group if(NULL == pginfo){ // there is not gid in group. delete[] pbuf; return -EINVAL; } string username = get_username(uid); char** ppgr_mem; for(ppgr_mem = pginfo->gr_mem; ppgr_mem && *ppgr_mem; ppgr_mem++){ if(username == *ppgr_mem){ // Found username in group. delete[] pbuf; return 1; } } delete[] pbuf; return 0; } //------------------------------------------------------------------- // Utility for file and directory //------------------------------------------------------------------- // safe variant of dirname // dirname clobbers path so let it operate on a tmp copy string mydirname(const char* path) { if(!path || '\0' == path[0]){ return string(""); } return mydirname(string(path)); } string mydirname(const string& path) { return string(dirname((char*)path.c_str())); } // safe variant of basename // basename clobbers path so let it operate on a tmp copy string mybasename(const char* path) { if(!path || '\0' == path[0]){ return string(""); } return mybasename(string(path)); } string mybasename(const string& path) { return string(basename((char*)path.c_str())); } // mkdir --parents int mkdirp(const string& path, mode_t mode) { string base; string component; istringstream ss(path); while (getline(ss, component, '/')) { base += "/" + component; struct stat st; if(0 == stat(base.c_str(), &st)){ if(!S_ISDIR(st.st_mode)){ return EPERM; } }else{ if(0 != mkdir(base.c_str(), mode) && errno != EEXIST){ return errno; } } } return 0; } // get existed directory path string get_exist_directory_path(const string& path) { string existed("/"); // "/" is existed. string base; string component; istringstream ss(path); while (getline(ss, component, '/')) { if(base != "/"){ base += "/"; } base += component; struct stat st; if(0 == stat(base.c_str(), &st) && S_ISDIR(st.st_mode)){ existed = base; }else{ break; } } return existed; } bool check_exist_dir_permission(const char* dirpath) { if(!dirpath || '\0' == dirpath[0]){ return false; } // exists struct stat st; if(0 != stat(dirpath, &st)){ if(ENOENT == errno){ // dir does not exist return true; } if(EACCES == errno){ // could not access directory return false; } // something error occurred return false; } // check type if(!S_ISDIR(st.st_mode)){ // path is not directory return false; } // check permission uid_t myuid = geteuid(); if(myuid == st.st_uid){ if(S_IRWXU != (st.st_mode & S_IRWXU)){ return false; } }else{ if(1 == is_uid_include_group(myuid, st.st_gid)){ if(S_IRWXG != (st.st_mode & S_IRWXG)){ return false; } }else{ if(S_IRWXO != (st.st_mode & S_IRWXO)){ return false; } } } return true; } bool delete_files_in_dir(const char* dir, bool is_remove_own) { DIR* dp; struct dirent* dent; if(NULL == (dp = opendir(dir))){ S3FS_PRN_ERR("could not open dir(%s) - errno(%d)", dir, errno); return false; } for(dent = readdir(dp); dent; dent = readdir(dp)){ if(0 == strcmp(dent->d_name, "..") || 0 == strcmp(dent->d_name, ".")){ continue; } string fullpath = dir; fullpath += "/"; fullpath += dent->d_name; struct stat st; if(0 != lstat(fullpath.c_str(), &st)){ S3FS_PRN_ERR("could not get stats of file(%s) - errno(%d)", fullpath.c_str(), errno); closedir(dp); return false; } if(S_ISDIR(st.st_mode)){ // dir -> Reentrant if(!delete_files_in_dir(fullpath.c_str(), true)){ S3FS_PRN_ERR("could not remove sub dir(%s) - errno(%d)", fullpath.c_str(), errno); closedir(dp); return false; } }else{ if(0 != unlink(fullpath.c_str())){ S3FS_PRN_ERR("could not remove file(%s) - errno(%d)", fullpath.c_str(), errno); closedir(dp); return false; } } } closedir(dp); if(is_remove_own && 0 != rmdir(dir)){ S3FS_PRN_ERR("could not remove dir(%s) - errno(%d)", dir, errno); return false; } return true; } //------------------------------------------------------------------- // Utility functions for convert //------------------------------------------------------------------- time_t get_mtime(const char *str) { // [NOTE] // In rclone, there are cases where ns is set to x-amz-meta-mtime // with floating point number. s3fs uses x-amz-meta-mtime by // truncating the floating point or less (in seconds or less) to // correspond to this. // string strmtime; if(str && '\0' != *str){ strmtime = str; string::size_type pos = strmtime.find('.', 0); if(string::npos != pos){ strmtime = strmtime.substr(0, pos); } } return static_cast(s3fs_strtoofft(strmtime.c_str())); } static time_t get_time(headers_t& meta, bool overcheck, const char *header) { headers_t::const_iterator iter; if(meta.end() == (iter = meta.find(header))){ if(overcheck){ return get_lastmodified(meta); } return 0; } return get_mtime((*iter).second.c_str()); } time_t get_mtime(headers_t& meta, bool overcheck) { return get_time(meta, overcheck, "x-amz-meta-mtime"); } time_t get_ctime(headers_t& meta, bool overcheck) { return get_time(meta, overcheck, "x-amz-meta-ctime"); } off_t get_size(const char *s) { return s3fs_strtoofft(s); } off_t get_size(headers_t& meta) { headers_t::const_iterator iter = meta.find("Content-Length"); if(meta.end() == iter){ return 0; } return get_size((*iter).second.c_str()); } mode_t get_mode(const char *s) { return static_cast(s3fs_strtoofft(s)); } mode_t get_mode(headers_t& meta, const char* path, bool checkdir, bool forcedir) { mode_t mode = 0; bool isS3sync = false; headers_t::const_iterator iter; if(meta.end() != (iter = meta.find("x-amz-meta-mode"))){ mode = get_mode((*iter).second.c_str()); }else if(meta.end() != (iter = meta.find("x-amz-meta-permissions"))){ // for s3sync mode = get_mode((*iter).second.c_str()); isS3sync = true; }else{ // If another tool creates an object without permissions, default to owner // read-write and group readable. mode = path[strlen(path) - 1] == '/' ? 0750 : 0640; } // Checking the bitmask, if the last 3 bits are all zero then process as a regular // file type (S_IFDIR or S_IFREG), otherwise return mode unmodified so that S_IFIFO, // S_IFSOCK, S_IFCHR, S_IFLNK and S_IFBLK devices can be processed properly by fuse. if(!(mode & S_IFMT)){ if(!isS3sync){ if(checkdir){ if(forcedir){ mode |= S_IFDIR; }else{ if(meta.end() != (iter = meta.find("Content-Type"))){ string strConType = (*iter).second; // Leave just the mime type, remove any optional parameters (eg charset) string::size_type pos = strConType.find(';'); if(string::npos != pos){ strConType = strConType.substr(0, pos); } if(strConType == "application/x-directory" || strConType == "httpd/unix-directory"){ // Nextcloud uses this MIME type for directory objects when mounting bucket as external Storage mode |= S_IFDIR; }else if(path && 0 < strlen(path) && '/' == path[strlen(path) - 1]){ if(strConType == "binary/octet-stream" || strConType == "application/octet-stream"){ mode |= S_IFDIR; }else{ if(complement_stat){ // If complement lack stat mode, when the object has '/' character at end of name // and content type is text/plain and the object's size is 0 or 1, it should be // directory. off_t size = get_size(meta); if(strConType == "text/plain" && (0 == size || 1 == size)){ mode |= S_IFDIR; }else{ mode |= S_IFREG; } }else{ mode |= S_IFREG; } } }else{ mode |= S_IFREG; } }else{ mode |= S_IFREG; } } } // If complement lack stat mode, when it's mode is not set any permission, // the object is added minimal mode only for read permission. if(complement_stat && 0 == (mode & (S_IRWXU | S_IRWXG | S_IRWXO))){ mode |= (S_IRUSR | (0 == (mode & S_IFDIR) ? 0 : S_IXUSR)); } }else{ if(!checkdir){ // cut dir/reg flag. mode &= ~S_IFDIR; mode &= ~S_IFREG; } } } return mode; } uid_t get_uid(const char *s) { return static_cast(s3fs_strtoofft(s)); } uid_t get_uid(headers_t& meta) { headers_t::const_iterator iter; if(meta.end() != (iter = meta.find("x-amz-meta-uid"))){ return get_uid((*iter).second.c_str()); }else if(meta.end() != (iter = meta.find("x-amz-meta-owner"))){ // for s3sync return get_uid((*iter).second.c_str()); }else{ return geteuid(); } } gid_t get_gid(const char *s) { return static_cast(s3fs_strtoofft(s)); } gid_t get_gid(headers_t& meta) { headers_t::const_iterator iter; if(meta.end() != (iter = meta.find("x-amz-meta-gid"))){ return get_gid((*iter).second.c_str()); }else if(meta.end() != (iter = meta.find("x-amz-meta-group"))){ // for s3sync return get_gid((*iter).second.c_str()); }else{ return getegid(); } } blkcnt_t get_blocks(off_t size) { return size / 512 + 1; } time_t cvtIAMExpireStringToTime(const char* s) { struct tm tm; if(!s){ return 0L; } memset(&tm, 0, sizeof(struct tm)); strptime(s, "%Y-%m-%dT%H:%M:%S", &tm); return timegm(&tm); // GMT } time_t get_lastmodified(const char* s) { struct tm tm; if(!s){ return 0L; } memset(&tm, 0, sizeof(struct tm)); strptime(s, "%a, %d %b %Y %H:%M:%S %Z", &tm); return timegm(&tm); // GMT } time_t get_lastmodified(headers_t& meta) { headers_t::const_iterator iter = meta.find("Last-Modified"); if(meta.end() == iter){ return 0; } return get_lastmodified((*iter).second.c_str()); } // // Returns it whether it is an object with need checking in detail. // If this function returns true, the object is possible to be directory // and is needed checking detail(searching sub object). // bool is_need_check_obj_detail(headers_t& meta) { headers_t::const_iterator iter; // directory object is Content-Length as 0. if(0 != get_size(meta)){ return false; } // if the object has x-amz-meta information, checking is no more. if(meta.end() != meta.find("x-amz-meta-mode") || meta.end() != meta.find("x-amz-meta-mtime") || meta.end() != meta.find("x-amz-meta-uid") || meta.end() != meta.find("x-amz-meta-gid") || meta.end() != meta.find("x-amz-meta-owner") || meta.end() != meta.find("x-amz-meta-group") || meta.end() != meta.find("x-amz-meta-permissions") ) { return false; } // if there is not Content-Type, or Content-Type is "x-directory", // checking is no more. if(meta.end() == (iter = meta.find("Content-Type"))){ return false; } if("application/x-directory" == (*iter).second){ return false; } return true; } bool simple_parse_xml(const char* data, size_t len, const char* key, std::string& value) { bool result = false; if(!data || !key){ return false; } value.clear(); xmlDocPtr doc; if(NULL == (doc = xmlReadMemory(data, len, "", NULL, 0))){ return false; } if(NULL == doc->children){ S3FS_XMLFREEDOC(doc); return false; } for(xmlNodePtr cur_node = doc->children->children; NULL != cur_node; cur_node = cur_node->next){ // For DEBUG // string cur_node_name(reinterpret_cast(cur_node->name)); // printf("cur_node_name: %s\n", cur_node_name.c_str()); if(XML_ELEMENT_NODE == cur_node->type){ string elementName = reinterpret_cast(cur_node->name); // For DEBUG // printf("elementName: %s\n", elementName.c_str()); if(cur_node->children){ if(XML_TEXT_NODE == cur_node->children->type){ if(elementName == key) { value = reinterpret_cast(cur_node->children->content); result = true; break; } } } } } S3FS_XMLFREEDOC(doc); return result; } //------------------------------------------------------------------- // Help //------------------------------------------------------------------- void show_usage () { printf("Usage: %s BUCKET:[PATH] MOUNTPOINT [OPTION]...\n", program_name.c_str()); } void show_help () { show_usage(); printf( "\n" "Mount an Amazon S3 bucket as a file system.\n" "\n" "Usage:\n" " mounting\n" " s3fs bucket[:/path] mountpoint [options]\n" " s3fs mountpoint [options (must specify bucket= option)]\n" "\n" " unmounting\n" " umount mountpoint\n" "\n" " General forms for s3fs and FUSE/mount options:\n" " -o opt[,opt...]\n" " -o opt [-o opt] ...\n" "\n" " utility mode (remove interrupted multipart uploading objects)\n" " s3fs --incomplete-mpu-list (-u) bucket\n" " s3fs --incomplete-mpu-abort[=all | =] bucket\n" "\n" "s3fs Options:\n" "\n" " Most s3fs options are given in the form where \"opt\" is:\n" "\n" " =\n" "\n" " bucket\n" " - if it is not specified bucket name (and path) in command line,\n" " must specify this option after -o option for bucket name.\n" "\n" " default_acl (default=\"private\")\n" " - the default canned acl to apply to all written s3 objects,\n" " e.g., private, public-read. see\n" " https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl\n" " for the full list of canned acls\n" "\n" " retries (default=\"5\")\n" " - number of times to retry a failed S3 transaction\n" "\n" " use_cache (default=\"\" which means disabled)\n" " - local folder to use for local file cache\n" "\n" " check_cache_dir_exist (default is disable)\n" " - if use_cache is set, check if the cache directory exists.\n" " If this option is not specified, it will be created at runtime\n" " when the cache directory does not exist.\n" "\n" " del_cache (delete local file cache)\n" " - delete local file cache when s3fs starts and exits.\n" "\n" " storage_class (default=\"standard\")\n" " - store object with specified storage class. Possible values:\n" " standard, standard_ia, onezone_ia, reduced_redundancy and intelligent_tiering.\n" "\n" " use_rrs (default is disable)\n" " - use Amazon's Reduced Redundancy Storage.\n" " this option can not be specified with use_sse.\n" " (can specify use_rrs=1 for old version)\n" " this option has been replaced by new storage_class option.\n" "\n" " use_sse (default is disable)\n" " - Specify three type Amazon's Server-Site Encryption: SSE-S3,\n" " SSE-C or SSE-KMS. SSE-S3 uses Amazon S3-managed encryption\n" " keys, SSE-C uses customer-provided encryption keys, and\n" " SSE-KMS uses the master key which you manage in AWS KMS.\n" " You can specify \"use_sse\" or \"use_sse=1\" enables SSE-S3\n" " type (use_sse=1 is old type parameter).\n" " Case of setting SSE-C, you can specify \"use_sse=custom\",\n" " \"use_sse=custom:\" or\n" " \"use_sse=\" (only \n" " specified is old type parameter). You can use \"c\" for\n" " short \"custom\".\n" " The custom key file must be 600 permission. The file can\n" " have some lines, each line is one SSE-C key. The first line\n" " in file is used as Customer-Provided Encryption Keys for\n" " uploading and changing headers etc. If there are some keys\n" " after first line, those are used downloading object which\n" " are encrypted by not first key. So that, you can keep all\n" " SSE-C keys in file, that is SSE-C key history.\n" " If you specify \"custom\" (\"c\") without file path, you\n" " need to set custom key by load_sse_c option or AWSSSECKEYS\n" " environment. (AWSSSECKEYS environment has some SSE-C keys\n" " with \":\" separator.) This option is used to decide the\n" " SSE type. So that if you do not want to encrypt a object\n" " object at uploading, but you need to decrypt encrypted\n" " object at downloading, you can use load_sse_c option instead\n" " of this option.\n" " For setting SSE-KMS, specify \"use_sse=kmsid\" or\n" " \"use_sse=kmsid:\". You can use \"k\" for short \"kmsid\".\n" " If you san specify SSE-KMS type with your in AWS\n" " KMS, you can set it after \"kmsid:\" (or \"k:\"). If you\n" " specify only \"kmsid\" (\"k\"), you need to set AWSSSEKMSID\n" " environment which value is . You must be careful\n" " about that you can not use the KMS id which is not same EC2\n" " region.\n" "\n" " load_sse_c - specify SSE-C keys\n" " Specify the custom-provided encryption keys file path for decrypting\n" " at downloading.\n" " If you use the custom-provided encryption key at uploading, you\n" " specify with \"use_sse=custom\". The file has many lines, one line\n" " means one custom key. So that you can keep all SSE-C keys in file,\n" " that is SSE-C key history. AWSSSECKEYS environment is as same as this\n" " file contents.\n" "\n" " public_bucket (default=\"\" which means disabled)\n" " - anonymously mount a public bucket when set to 1, ignores the \n" " $HOME/.passwd-s3fs and /etc/passwd-s3fs files.\n" " S3 does not allow copy object api for anonymous users, then\n" " s3fs sets nocopyapi option automatically when public_bucket=1\n" " option is specified.\n" "\n" " passwd_file (default=\"\")\n" " - specify which s3fs password file to use\n" "\n" " ahbe_conf (default=\"\" which means disabled)\n" " - This option specifies the configuration file path which\n" " file is the additional HTTP header by file (object) extension.\n" " The configuration file format is below:\n" " -----------\n" " line = [file suffix or regex] HTTP-header [HTTP-values]\n" " file suffix = file (object) suffix, if this field is empty,\n" " it means \"reg:(.*)\".(=all object).\n" " regex = regular expression to match the file (object) path.\n" " this type starts with \"reg:\" prefix.\n" " HTTP-header = additional HTTP header name\n" " HTTP-values = additional HTTP header value\n" " -----------\n" " Sample:\n" " -----------\n" " .gz Content-Encoding gzip\n" " .Z Content-Encoding compress\n" " reg:^/MYDIR/(.*)[.]t2$ Content-Encoding text2\n" " -----------\n" " A sample configuration file is uploaded in \"test\" directory.\n" " If you specify this option for set \"Content-Encoding\" HTTP \n" " header, please take care for RFC 2616.\n" "\n" " profile (default=\"default\")\n" " - Choose a profile from ${HOME}/.aws/credentials to authenticate\n" " against S3. Note that this format matches the AWS CLI format and\n" " differs from the s3fs passwd format.\n" "\n" " connect_timeout (default=\"300\" seconds)\n" " - time to wait for connection before giving up\n" "\n" " readwrite_timeout (default=\"120\" seconds)\n" " - time to wait between read/write activity before giving up\n" "\n" " list_object_max_keys (default=\"1000\")\n" " - specify the maximum number of keys returned by S3 list object\n" " API. The default is 1000. you can set this value to 1000 or more.\n" "\n" " max_stat_cache_size (default=\"100,000\" entries (about 40MB))\n" " - maximum number of entries in the stat cache, and this maximum is\n" " also treated as the number of symbolic link cache.\n" "\n" " stat_cache_expire (default is no expire)\n" " - specify expire time (seconds) for entries in the stat cache.\n" " This expire time indicates the time since stat cached. and this\n" " is also set to the expire time of the symbolic link cache.\n" "\n" " stat_cache_interval_expire (default is no expire)\n" " - specify expire time (seconds) for entries in the stat cache(and\n" " symbolic link cache).\n" " This expire time is based on the time from the last access time\n" " of the stat cache. This option is exclusive with stat_cache_expire,\n" " and is left for compatibility with older versions.\n" "\n" " enable_noobj_cache (default is disable)\n" " - enable cache entries for the object which does not exist.\n" " s3fs always has to check whether file (or sub directory) exists \n" " under object (path) when s3fs does some command, since s3fs has \n" " recognized a directory which does not exist and has files or \n" " sub directories under itself. It increases ListBucket request \n" " and makes performance bad.\n" " You can specify this option for performance, s3fs memorizes \n" " in stat cache that the object (file or directory) does not exist.\n" "\n" " no_check_certificate\n" " - server certificate won't be checked against the available \n" " certificate authorities.\n" "\n" " ssl_verify_hostname (default=\"2\")\n" " - When 0, do not verify the SSL certificate against the hostname.\n" "\n" " nodnscache (disable dns cache)\n" " - s3fs is always using dns cache, this option make dns cache disable.\n" "\n" " nosscache (disable ssl session cache)\n" " - s3fs is always using ssl session cache, this option make ssl \n" " session cache disable.\n" "\n" " multireq_max (default=\"20\")\n" " - maximum number of parallel request for listing objects.\n" "\n" " parallel_count (default=\"5\")\n" " - number of parallel request for uploading big objects.\n" " s3fs uploads large object (over 20MB) by multipart post request, \n" " and sends parallel requests.\n" " This option limits parallel request count which s3fs requests \n" " at once. It is necessary to set this value depending on a CPU \n" " and a network band.\n" "\n" " multipart_size (default=\"10\")\n" " - part size, in MB, for each multipart request.\n" " The minimum value is 5 MB and the maximum value is 5 GB.\n" "\n" " ensure_diskfree (default 0)\n" " - sets MB to ensure disk free space. This option means the\n" " threshold of free space size on disk which is used for the\n" " cache file by s3fs. s3fs makes file for\n" " downloading, uploading and caching files. If the disk free\n" " space is smaller than this value, s3fs do not use diskspace\n" " as possible in exchange for the performance.\n" "\n" " singlepart_copy_limit (default=\"512\")\n" " - maximum size, in MB, of a single-part copy before trying \n" " multipart copy.\n" "\n" " host (default=\"https://s3.amazonaws.com\")\n" " - Set a non-Amazon host, e.g., https://example.com.\n" "\n" " servicepath (default=\"/\")\n" " - Set a service path when the non-Amazon host requires a prefix.\n" "\n" " url (default=\"https://s3.amazonaws.com\")\n" " - sets the url to use to access Amazon S3. If you want to use HTTP,\n" " then you can set \"url=http://s3.amazonaws.com\".\n" " If you do not use https, please specify the URL with the url\n" " option.\n" "\n" " endpoint (default=\"us-east-1\")\n" " - sets the endpoint to use on signature version 4\n" " If this option is not specified, s3fs uses \"us-east-1\" region as\n" " the default. If the s3fs could not connect to the region specified\n" " by this option, s3fs could not run. But if you do not specify this\n" " option, and if you can not connect with the default region, s3fs\n" " will retry to automatically connect to the other region. So s3fs\n" " can know the correct region name, because s3fs can find it in an\n" " error from the S3 server.\n" "\n" " sigv2 (default is signature version 4)\n" " - sets signing AWS requests by using Signature Version 2\n" "\n" " mp_umask (default is \"0000\")\n" " - sets umask for the mount point directory.\n" " If allow_other option is not set, s3fs allows access to the mount\n" " point only to the owner. In the opposite case s3fs allows access\n" " to all users as the default. But if you set the allow_other with\n" " this option, you can control the permissions of the\n" " mount point by this option like umask.\n" "\n" " umask (default is \"0000\")\n" " - sets umask for files under the mountpoint. This can allow\n" " users other than the mounting user to read and write to files\n" " that they did not create.\n" "\n" " nomultipart (disable multipart uploads)\n" "\n" " enable_content_md5 (default is disable)\n" " Allow S3 server to check data integrity of uploads via the\n" " Content-MD5 header. This can add CPU overhead to transfers.\n" "\n" " ecs (default is disable)\n" " - This option instructs s3fs to query the ECS container credential\n" " metadata address instead of the instance metadata address.\n" "\n" " iam_role (default is no IAM role)\n" " - This option requires the IAM role name or \"auto\". If you specify\n" " \"auto\", s3fs will automatically use the IAM role names that are set\n" " to an instance. If you specify this option without any argument, it\n" " is the same as that you have specified the \"auto\".\n" "\n" " ibm_iam_auth (default is not using IBM IAM authentication)\n" " - This option instructs s3fs to use IBM IAM authentication.\n" " In this mode, the AWSAccessKey and AWSSecretKey will be used as\n" " IBM's Service-Instance-ID and APIKey, respectively.\n" "\n" " ibm_iam_endpoint (default is https://iam.bluemix.net)\n" " - sets the URL to use for IBM IAM authentication.\n" "\n" " use_xattr (default is not handling the extended attribute)\n" " Enable to handle the extended attribute (xattrs).\n" " If you set this option, you can use the extended attribute.\n" " For example, encfs and ecryptfs need to support the extended attribute.\n" " Notice: if s3fs handles the extended attribute, s3fs can not work to\n" " copy command with preserve=mode.\n" "\n" " noxmlns (disable registering xml name space)\n" " disable registering xml name space for response of \n" " ListBucketResult and ListVersionsResult etc. Default name \n" " space is looked up from \"http://s3.amazonaws.com/doc/2006-03-01\".\n" " This option should not be specified now, because s3fs looks up\n" " xmlns automatically after v1.66.\n" "\n" " nomixupload (disable copy in multipart uploads)\n" " Disable to use PUT (copy api) when multipart uploading large size objects.\n" " By default, when doing multipart upload, the range of unchanged data\n" " will use PUT (copy api) whenever possible.\n" " When nocopyapi or norenameapi is specified, use of PUT (copy api) is\n" " invalidated even if this option is not specified.\n" "\n" " nocopyapi (for other incomplete compatibility object storage)\n" " For a distributed object storage which is compatibility S3\n" " API without PUT (copy api).\n" " If you set this option, s3fs do not use PUT with \n" " \"x-amz-copy-source\" (copy api). Because traffic is increased\n" " 2-3 times by this option, we do not recommend this.\n" "\n" " norenameapi (for other incomplete compatibility object storage)\n" " For a distributed object storage which is compatibility S3\n" " API without PUT (copy api).\n" " This option is a subset of nocopyapi option. The nocopyapi\n" " option does not use copy-api for all command (ex. chmod, chown,\n" " touch, mv, etc), but this option does not use copy-api for\n" " only rename command (ex. mv). If this option is specified with\n" " nocopyapi, then s3fs ignores it.\n" "\n" " use_path_request_style (use legacy API calling style)\n" " Enable compatibility with S3-like APIs which do not support\n" " the virtual-host request style, by using the older path request\n" " style.\n" "\n" " noua (suppress User-Agent header)\n" " Usually s3fs outputs of the User-Agent in \"s3fs/ (commit\n" " hash ; )\" format.\n" " If this option is specified, s3fs suppresses the output of the\n" " User-Agent.\n" "\n" " cipher_suites\n" " Customize the list of TLS cipher suites.\n" " Expects a colon separated list of cipher suite names.\n" " A list of available cipher suites, depending on your TLS engine,\n" " can be found on the CURL library documentation:\n" " https://curl.haxx.se/docs/ssl-ciphers.html\n" "\n" " instance_name - The instance name of the current s3fs mountpoint.\n" " This name will be added to logging messages and user agent headers sent by s3fs.\n" "\n" " complement_stat (complement lack of file/directory mode)\n" " s3fs complements lack of information about file/directory mode\n" " if a file or a directory object does not have x-amz-meta-mode\n" " header. As default, s3fs does not complements stat information\n" " for a object, then the object will not be able to be allowed to\n" " list/modify.\n" "\n" " notsup_compat_dir (not support compatibility directory types)\n" " As a default, s3fs supports objects of the directory type as\n" " much as possible and recognizes them as directories.\n" " Objects that can be recognized as directory objects are \"dir/\",\n" " \"dir\", \"dir_$folder$\", and there is a file object that does\n" " not have a directory object but contains that directory path.\n" " s3fs needs redundant communication to support all these\n" " directory types. The object as the directory created by s3fs\n" " is \"dir/\". By restricting s3fs to recognize only \"dir/\" as\n" " a directory, communication traffic can be reduced. This option\n" " is used to give this restriction to s3fs.\n" " However, if there is a directory object other than \"dir/\" in\n" " the bucket, specifying this option is not recommended. s3fs may\n" " not be able to recognize the object correctly if an object\n" " created by s3fs exists in the bucket.\n" " Please use this option when the directory in the bucket is\n" " only \"dir/\" object.\n" "\n" " use_wtf8 - support arbitrary file system encoding.\n" " S3 requires all object names to be valid utf-8. But some\n" " clients, notably Windows NFS clients, use their own encoding.\n" " This option re-encodes invalid utf-8 object names into valid\n" " utf-8 by mapping offending codes into a 'private' codepage of the\n" " Unicode set.\n" " Useful on clients not using utf-8 as their file system encoding.\n" "\n" " use_session_token - indicate that session token should be provided.\n" " If credentials are provided by environment variables this switch\n" " forces presence check of AWSSESSIONTOKEN variable.\n" " Otherwise an error is returned." "\n" " requester_pays (default is disable)\n" " This option instructs s3fs to enable requests involving\n" " Requester Pays buckets.\n" " It includes the 'x-amz-request-payer=requester' entry in the\n" " request header." "\n" " dbglevel (default=\"crit\")\n" " Set the debug message level. set value as crit (critical), err\n" " (error), warn (warning), info (information) to debug level.\n" " default debug level is critical. If s3fs run with \"-d\" option,\n" " the debug level is set information. When s3fs catch the signal\n" " SIGUSR2, the debug level is bumpup.\n" "\n" " curldbg - put curl debug message\n" " Put the debug message from libcurl when this option is specified.\n" "\n" "FUSE/mount Options:\n" "\n" " Most of the generic mount options described in 'man mount' are\n" " supported (ro, rw, suid, nosuid, dev, nodev, exec, noexec, atime,\n" " noatime, sync async, dirsync). Filesystems are mounted with\n" " '-onodev,nosuid' by default, which can only be overridden by a\n" " privileged user.\n" " \n" " There are many FUSE specific mount options that can be specified.\n" " e.g. allow_other See the FUSE's README for the full set.\n" "\n" "Utility mode Options:\n" "\n" " -u, --incomplete-mpu-list\n" " Lists multipart incomplete objects uploaded to the specified\n" " bucket.\n" " --incomplete-mpu-abort (=all or =)\n" " Delete the multipart incomplete object uploaded to the specified\n" " bucket.\n" " If \"all\" is specified for this option, all multipart incomplete\n" " objects will be deleted. If you specify no argument as an option,\n" " objects older than 24 hours (24H) will be deleted (This is the\n" " default value). You can specify an optional date format. It can\n" " be specified as year, month, day, hour, minute, second, and it is\n" " expressed as \"Y\", \"M\", \"D\", \"h\", \"m\", \"s\" respectively.\n" " For example, \"1Y6M10D12h30m30s\".\n" "\n" "Miscellaneous Options:\n" "\n" " -h, --help Output this help.\n" " --version Output version info.\n" " -d --debug Turn on DEBUG messages to syslog. Specifying -d\n" " twice turns on FUSE debug messages to STDOUT.\n" " -f FUSE foreground option - do not run as daemon.\n" " -s FUSE singlethreaded option\n" " disable multi-threaded operation\n" "\n" "\n" "s3fs home page: \n" ); } void show_version() { printf( "Amazon Simple Storage Service File System V%s (commit:%s) with %s\n" "Copyright (C) 2010 Randy Rizun \n" "License GPL2: GNU GPL version 2 \n" "This is free software: you are free to change and redistribute it.\n" "There is NO WARRANTY, to the extent permitted by law.\n", VERSION, COMMIT_HASH_VAL, s3fs_crypt_lib_name()); } /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ s3fs-fuse-1.86/src/s3fs_util.h000066400000000000000000000114331361654130000161420ustar00rootroot00000000000000/* * s3fs - FUSE-based file system backed by Amazon S3 * * Copyright(C) 2007 Randy Rizun * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #ifndef S3FS_S3FS_UTIL_H_ #define S3FS_S3FS_UTIL_H_ //------------------------------------------------------------------- // Typedef //------------------------------------------------------------------- // // Struct // struct s3obj_entry{ std::string normalname; // normalized name: if empty, object is normalized name. std::string orgname; // original name: if empty, object is original name. std::string etag; bool is_dir; s3obj_entry() : is_dir(false) {} }; typedef std::map s3obj_t; typedef std::list s3obj_list_t; // // Class // class S3ObjList { private: s3obj_t objects; private: bool insert_normalized(const char* name, const char* normalized, bool is_dir); const s3obj_entry* GetS3Obj(const char* name) const; s3obj_t::const_iterator begin(void) const { return objects.begin(); } s3obj_t::const_iterator end(void) const { return objects.end(); } public: S3ObjList() {} ~S3ObjList() {} bool IsEmpty(void) const { return objects.empty(); } bool insert(const char* name, const char* etag = NULL, bool is_dir = false); std::string GetOrgName(const char* name) const; std::string GetNormalizedName(const char* name) const; std::string GetETag(const char* name) const; bool IsDir(const char* name) const; bool GetNameList(s3obj_list_t& list, bool OnlyNormalized = true, bool CutSlash = true) const; bool GetLastName(std::string& lastname) const; static bool MakeHierarchizedList(s3obj_list_t& list, bool haveSlash); }; typedef struct mvnode { char *old_path; char *new_path; bool is_dir; bool is_normdir; struct mvnode *prev; struct mvnode *next; } MVNODE; class AutoLock { public: enum Type { NO_WAIT = 1, ALREADY_LOCKED = 2, NONE = 0 }; explicit AutoLock(pthread_mutex_t* pmutex, Type type = NONE); bool isLockAcquired() const; ~AutoLock(); private: pthread_mutex_t* const auto_mutex; bool is_lock_acquired; }; //------------------------------------------------------------------- // Functions //------------------------------------------------------------------- std::string get_realpath(const char *path); MVNODE *create_mvnode(const char *old_path, const char *new_path, bool is_dir, bool normdir = false); MVNODE *add_mvnode(MVNODE** head, MVNODE** tail, const char *old_path, const char *new_path, bool is_dir, bool normdir = false); void free_mvnodes(MVNODE *head); void init_sysconf_vars(); std::string get_username(uid_t uid); int is_uid_include_group(uid_t uid, gid_t gid); std::string mydirname(const char* path); std::string mydirname(const std::string& path); std::string mybasename(const char* path); std::string mybasename(const std::string& path); int mkdirp(const std::string& path, mode_t mode); std::string get_exist_directory_path(const std::string& path); bool check_exist_dir_permission(const char* dirpath); bool delete_files_in_dir(const char* dir, bool is_remove_own); time_t get_mtime(const char *s); time_t get_mtime(headers_t& meta, bool overcheck = true); time_t get_ctime(headers_t& meta, bool overcheck = true); off_t get_size(const char *s); off_t get_size(headers_t& meta); mode_t get_mode(const char *s); mode_t get_mode(headers_t& meta, const char* path = NULL, bool checkdir = false, bool forcedir = false); uid_t get_uid(const char *s); uid_t get_uid(headers_t& meta); gid_t get_gid(const char *s); gid_t get_gid(headers_t& meta); blkcnt_t get_blocks(off_t size); time_t cvtIAMExpireStringToTime(const char* s); time_t get_lastmodified(const char* s); time_t get_lastmodified(headers_t& meta); bool is_need_check_obj_detail(headers_t& meta); bool simple_parse_xml(const char* data, size_t len, const char* key, std::string& value); void show_usage(void); void show_help(void); void show_version(void); #endif // S3FS_S3FS_UTIL_H_ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ s3fs-fuse-1.86/src/string_util.cpp000066400000000000000000000357501361654130000171350ustar00rootroot00000000000000/* * s3fs - FUSE-based file system backed by Amazon S3 * * Copyright(C) 2007 Randy Rizun * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include #include #include #include #include #include #include #include #include #include #include #include "common.h" #include "string_util.h" using namespace std; template std::string str(T value) { std::ostringstream s; s << value; return s.str(); } template std::string str(short value); template std::string str(unsigned short value); template std::string str(int value); template std::string str(unsigned int value); template std::string str(long value); template std::string str(unsigned long value); template std::string str(long long value); template std::string str(unsigned long long value); static const char hexAlphabet[] = "0123456789ABCDEF"; // replacement for C++11 std::stoll off_t s3fs_strtoofft(const char* str, int base) { errno = 0; char *temp; long long result = strtoll(str, &temp, base); if(temp == str || *temp != '\0'){ throw std::invalid_argument("s3fs_strtoofft"); } if((result == LLONG_MIN || result == LLONG_MAX) && errno == ERANGE){ throw std::out_of_range("s3fs_strtoofft"); } return result; } string lower(string s) { // change each character of the string to lower case for(size_t i = 0; i < s.length(); i++){ s[i] = tolower(s[i]); } return s; } string trim_left(const string &s, const string &t /* = SPACES */) { string d(s); return d.erase(0, s.find_first_not_of(t)); } string trim_right(const string &s, const string &t /* = SPACES */) { string d(s); string::size_type i(d.find_last_not_of(t)); if(i == string::npos){ return ""; }else{ return d.erase(d.find_last_not_of(t) + 1); } } string trim(const string &s, const string &t /* = SPACES */) { return trim_left(trim_right(s, t), t); } /** * urlEncode a fuse path, * taking into special consideration "/", * otherwise regular urlEncode. */ string urlEncode(const string &s) { string result; for (size_t i = 0; i < s.length(); ++i) { char c = s[i]; if (c == '/' // Note- special case for fuse paths... || c == '.' || c == '-' || c == '_' || c == '~' || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9')) { result += c; } else { result += "%"; result += hexAlphabet[static_cast(c) / 16]; result += hexAlphabet[static_cast(c) % 16]; } } return result; } /** * urlEncode a fuse path, * taking into special consideration "/", * otherwise regular urlEncode. */ string urlEncode2(const string &s) { string result; for (size_t i = 0; i < s.length(); ++i) { char c = s[i]; if (c == '=' // Note- special case for fuse paths... || c == '&' // Note- special case for s3... || c == '%' || c == '.' || c == '-' || c == '_' || c == '~' || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9')) { result += c; } else { result += "%"; result += hexAlphabet[static_cast(c) / 16]; result += hexAlphabet[static_cast(c) % 16]; } } return result; } string urlDecode(const string& s) { string result; for(size_t i = 0; i < s.length(); ++i){ if(s[i] != '%'){ result += s[i]; }else{ int ch = 0; if(s.length() <= ++i){ break; // wrong format. } ch += ('0' <= s[i] && s[i] <= '9') ? (s[i] - '0') : ('A' <= s[i] && s[i] <= 'F') ? (s[i] - 'A' + 0x0a) : ('a' <= s[i] && s[i] <= 'f') ? (s[i] - 'a' + 0x0a) : 0x00; if(s.length() <= ++i){ break; // wrong format. } ch *= 16; ch += ('0' <= s[i] && s[i] <= '9') ? (s[i] - '0') : ('A' <= s[i] && s[i] <= 'F') ? (s[i] - 'A' + 0x0a) : ('a' <= s[i] && s[i] <= 'f') ? (s[i] - 'a' + 0x0a) : 0x00; result += static_cast(ch); } } return result; } bool takeout_str_dquart(string& str) { size_t pos; // '"' for start if(string::npos != (pos = str.find_first_of('\"'))){ str = str.substr(pos + 1); // '"' for end if(string::npos == (pos = str.find_last_of('\"'))){ return false; } str = str.substr(0, pos); if(string::npos != str.find_first_of('\"')){ return false; } } return true; } // // ex. target="http://......?keyword=value&..." // bool get_keyword_value(string& target, const char* keyword, string& value) { if(!keyword){ return false; } size_t spos; size_t epos; if(string::npos == (spos = target.find(keyword))){ return false; } spos += strlen(keyword); if('=' != target.at(spos)){ return false; } spos++; if(string::npos == (epos = target.find('&', spos))){ value = target.substr(spos); }else{ value = target.substr(spos, (epos - spos)); } return true; } /** * Returns the current date * in a format suitable for a HTTP request header. */ string get_date_rfc850() { char buf[100]; time_t t = time(NULL); struct tm res; strftime(buf, sizeof(buf), "%a, %d %b %Y %H:%M:%S GMT", gmtime_r(&t, &res)); return buf; } void get_date_sigv3(string& date, string& date8601) { time_t tm = time(NULL); date = get_date_string(tm); date8601 = get_date_iso8601(tm); } string get_date_string(time_t tm) { char buf[100]; struct tm res; strftime(buf, sizeof(buf), "%Y%m%d", gmtime_r(&tm, &res)); return buf; } string get_date_iso8601(time_t tm) { char buf[100]; struct tm res; strftime(buf, sizeof(buf), "%Y%m%dT%H%M%SZ", gmtime_r(&tm, &res)); return buf; } bool get_unixtime_from_iso8601(const char* pdate, time_t& unixtime) { if(!pdate){ return false; } struct tm tm; char* prest = strptime(pdate, "%Y-%m-%dT%T", &tm); if(prest == pdate){ // wrong format return false; } unixtime = mktime(&tm); return true; } // // Convert to unixtime from string which formatted by following: // "12Y12M12D12h12m12s", "86400s", "9h30m", etc // bool convert_unixtime_from_option_arg(const char* argv, time_t& unixtime) { if(!argv){ return false; } unixtime = 0; const char* ptmp; int last_unit_type = 0; // unit flag. bool is_last_number; time_t tmptime; for(ptmp = argv, is_last_number = true, tmptime = 0; ptmp && *ptmp; ++ptmp){ if('0' <= *ptmp && *ptmp <= '9'){ tmptime *= 10; tmptime += static_cast(*ptmp - '0'); is_last_number = true; }else if(is_last_number){ if('Y' == *ptmp && 1 > last_unit_type){ unixtime += (tmptime * (60 * 60 * 24 * 365)); // average 365 day / year last_unit_type = 1; }else if('M' == *ptmp && 2 > last_unit_type){ unixtime += (tmptime * (60 * 60 * 24 * 30)); // average 30 day / month last_unit_type = 2; }else if('D' == *ptmp && 3 > last_unit_type){ unixtime += (tmptime * (60 * 60 * 24)); last_unit_type = 3; }else if('h' == *ptmp && 4 > last_unit_type){ unixtime += (tmptime * (60 * 60)); last_unit_type = 4; }else if('m' == *ptmp && 5 > last_unit_type){ unixtime += (tmptime * 60); last_unit_type = 5; }else if('s' == *ptmp && 6 > last_unit_type){ unixtime += tmptime; last_unit_type = 6; }else{ return false; } tmptime = 0; is_last_number = false; }else{ return false; } } if(is_last_number){ return false; } return true; } std::string s3fs_hex(const unsigned char* input, size_t length) { std::string hex; for(size_t pos = 0; pos < length; ++pos){ char hexbuf[3]; snprintf(hexbuf, 3, "%02x", input[pos]); hex += hexbuf; } return hex; } char* s3fs_base64(const unsigned char* input, size_t length) { static const char* base = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/="; char* result; if(!input || 0 == length){ return NULL; } result = new char[((length / 3) + 1) * 4 + 1]; unsigned char parts[4]; size_t rpos; size_t wpos; for(rpos = 0, wpos = 0; rpos < length; rpos += 3){ parts[0] = (input[rpos] & 0xfc) >> 2; parts[1] = ((input[rpos] & 0x03) << 4) | ((((rpos + 1) < length ? input[rpos + 1] : 0x00) & 0xf0) >> 4); parts[2] = (rpos + 1) < length ? (((input[rpos + 1] & 0x0f) << 2) | ((((rpos + 2) < length ? input[rpos + 2] : 0x00) & 0xc0) >> 6)) : 0x40; parts[3] = (rpos + 2) < length ? (input[rpos + 2] & 0x3f) : 0x40; result[wpos++] = base[parts[0]]; result[wpos++] = base[parts[1]]; result[wpos++] = base[parts[2]]; result[wpos++] = base[parts[3]]; } result[wpos] = '\0'; return result; } inline unsigned char char_decode64(const char ch) { unsigned char by; if('A' <= ch && ch <= 'Z'){ // A - Z by = static_cast(ch - 'A'); }else if('a' <= ch && ch <= 'z'){ // a - z by = static_cast(ch - 'a' + 26); }else if('0' <= ch && ch <= '9'){ // 0 - 9 by = static_cast(ch - '0' + 52); }else if('+' == ch){ // + by = 62; }else if('/' == ch){ // / by = 63; }else if('=' == ch){ // = by = 64; }else{ // something wrong by = UCHAR_MAX; } return by; } unsigned char* s3fs_decode64(const char* input, size_t* plength) { unsigned char* result; if(!input || 0 == strlen(input) || !plength){ return NULL; } result = new unsigned char[strlen(input) + 1]; unsigned char parts[4]; size_t input_len = strlen(input); size_t rpos; size_t wpos; for(rpos = 0, wpos = 0; rpos < input_len; rpos += 4){ parts[0] = char_decode64(input[rpos]); parts[1] = (rpos + 1) < input_len ? char_decode64(input[rpos + 1]) : 64; parts[2] = (rpos + 2) < input_len ? char_decode64(input[rpos + 2]) : 64; parts[3] = (rpos + 3) < input_len ? char_decode64(input[rpos + 3]) : 64; result[wpos++] = ((parts[0] << 2) & 0xfc) | ((parts[1] >> 4) & 0x03); if(64 == parts[2]){ break; } result[wpos++] = ((parts[1] << 4) & 0xf0) | ((parts[2] >> 2) & 0x0f); if(64 == parts[3]){ break; } result[wpos++] = ((parts[2] << 6) & 0xc0) | (parts[3] & 0x3f); } result[wpos] = '\0'; *plength = wpos; return result; } /* * detect and rewrite invalid utf8. We take invalid bytes * and encode them into a private region of the unicode * space. This is sometimes known as wtf8, wobbly transformation format. * it is necessary because S3 validates the utf8 used for identifiers for * correctness, while some clients may provide invalid utf, notably * windows using cp1252. */ // Base location for transform. The range 0xE000 - 0xF8ff // is a private range, se use the start of this range. static unsigned int escape_base = 0xe000; // encode bytes into wobbly utf8. // 'result' can be null. returns true if transform was needed. bool s3fs_wtf8_encode(const char *s, string *result) { bool invalid = false; // Pass valid utf8 code through for (; *s; s++) { const unsigned char c = *s; // single byte encoding if (c <= 0x7f) { if (result) { *result += c; } continue; } // otherwise, it must be one of the valid start bytes if ( c >= 0xc2 && c <= 0xf5 ) { // two byte encoding // don't need bounds check, string is zero terminated if ((c & 0xe0) == 0xc0 && (s[1] & 0xc0) == 0x80) { // all two byte encodings starting higher than c1 are valid if (result) { *result += c; *result += *(++s); } continue; } // three byte encoding if ((c & 0xf0) == 0xe0 && (s[1] & 0xc0) == 0x80 && (s[2] & 0xc0) == 0x80) { const unsigned code = ((c & 0x0f) << 12) | ((s[1] & 0x3f) << 6) | (s[2] & 0x3f); if (code >= 0x800 && ! (code >= 0xd800 && code <= 0xd8ff)) { // not overlong and not a surrogate pair if (result) { *result += c; *result += *(++s); *result += *(++s); } continue; } } // four byte encoding if ((c & 0xf8) == 0xf0 && (s[1] & 0xc0) == 0x80 && (s[2] & 0xc0) == 0x80 && (s[3] & 0xc0) == 0x80) { const unsigned code = ((c & 0x07) << 18) | ((s[1] & 0x3f) << 12) | ((s[2] & 0x3f) << 6) | (s[3] & 0x3f); if (code >= 0x10000 && code <= 0x10ffff) { // not overlong and in defined unicode space if (result) { *result += c; *result += *(++s); *result += *(++s); *result += *(++s); } continue; } } } // printf("invalid %02x at %d\n", c, i); // Invalid utf8 code. Convert it to a private two byte area of unicode // e.g. the e000 - f8ff area. This will be a three byte encoding invalid = true; if (result) { unsigned escape = escape_base + c; *result += static_cast(0xe0 | ((escape >> 12) & 0x0f)); *result += static_cast(0x80 | ((escape >> 06) & 0x3f)); *result += static_cast(0x80 | ((escape >> 00) & 0x3f)); } } return invalid; } string s3fs_wtf8_encode(const string &s) { string result; s3fs_wtf8_encode(s.c_str(), &result); return result; } // The reverse operation, turn encoded bytes back into their original values // The code assumes that we map to a three-byte code point. bool s3fs_wtf8_decode(const char *s, string *result) { bool encoded = false; for (; *s; s++) { unsigned char c = *s; // look for a three byte tuple matching our encoding code if ((c & 0xf0) == 0xe0 && (s[1] & 0xc0) == 0x80 && (s[2] & 0xc0) == 0x80) { unsigned code = (c & 0x0f) << 12; code |= (s[1] & 0x3f) << 6; code |= (s[2] & 0x3f) << 0; if (code >= escape_base && code <= escape_base + 0xff) { // convert back encoded = true; if(result){ *result += static_cast(code - escape_base); } s+=2; continue; } } if (result) { *result += c; } } return encoded; } string s3fs_wtf8_decode(const string &s) { string result; s3fs_wtf8_decode(s.c_str(), &result); return result; } /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ s3fs-fuse-1.86/src/string_util.h000066400000000000000000000054241361654130000165750ustar00rootroot00000000000000/* * s3fs - FUSE-based file system backed by Amazon S3 * * Copyright(C) 2007 Randy Rizun * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #ifndef S3FS_STRING_UTIL_H_ #define S3FS_STRING_UTIL_H_ /* * A collection of string utilities for manipulating URLs and HTTP responses. */ #include #include #include #include static const std::string SPACES = " \t\r\n"; static inline int STR2NCMP(const char *str1, const char *str2) { return strncmp(str1, str2, strlen(str2)); } template std::string str(T value); // Convert string to off_t. Throws std::invalid_argument and std::out_of_range on bad input. off_t s3fs_strtoofft(const char* str, int base = 0); std::string trim_left(const std::string &s, const std::string &t = SPACES); std::string trim_right(const std::string &s, const std::string &t = SPACES); std::string trim(const std::string &s, const std::string &t = SPACES); std::string lower(std::string s); std::string get_date_rfc850(void); void get_date_sigv3(std::string& date, std::string& date8601); std::string get_date_string(time_t tm); std::string get_date_iso8601(time_t tm); bool get_unixtime_from_iso8601(const char* pdate, time_t& unixtime); bool convert_unixtime_from_option_arg(const char* argv, time_t& unixtime); std::string urlEncode(const std::string &s); std::string urlEncode2(const std::string &s); std::string urlDecode(const std::string& s); bool takeout_str_dquart(std::string& str); bool get_keyword_value(std::string& target, const char* keyword, std::string& value); std::string s3fs_hex(const unsigned char* input, size_t length); char* s3fs_base64(const unsigned char* input, size_t length); unsigned char* s3fs_decode64(const char* input, size_t* plength); bool s3fs_wtf8_encode(const char *s, std::string *result); std::string s3fs_wtf8_encode(const std::string &s); bool s3fs_wtf8_decode(const char *s, std::string *result); std::string s3fs_wtf8_decode(const std::string &s); #endif // S3FS_STRING_UTIL_H_ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ s3fs-fuse-1.86/src/test_string_util.cpp000066400000000000000000000120741361654130000201660ustar00rootroot00000000000000/* * s3fs - FUSE-based file system backed by Amazon S3 * * Copyright(C) 2014 Andrew Gaul * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include #include #include #include "string_util.h" #include "test_util.h" void test_trim() { ASSERT_EQUALS(std::string("1234"), trim(" 1234 ")); ASSERT_EQUALS(std::string("1234"), trim("1234 ")); ASSERT_EQUALS(std::string("1234"), trim(" 1234")); ASSERT_EQUALS(std::string("1234"), trim("1234")); ASSERT_EQUALS(std::string("1234 "), trim_left(" 1234 ")); ASSERT_EQUALS(std::string("1234 "), trim_left("1234 ")); ASSERT_EQUALS(std::string("1234"), trim_left(" 1234")); ASSERT_EQUALS(std::string("1234"), trim_left("1234")); ASSERT_EQUALS(std::string(" 1234"), trim_right(" 1234 ")); ASSERT_EQUALS(std::string("1234"), trim_right("1234 ")); ASSERT_EQUALS(std::string(" 1234"), trim_right(" 1234")); ASSERT_EQUALS(std::string("1234"), trim_right("1234")); ASSERT_EQUALS(std::string("0"), str(0)); ASSERT_EQUALS(std::string("1"), str(1)); ASSERT_EQUALS(std::string("-1"), str(-1)); ASSERT_EQUALS(std::string("9223372036854775807"), str(std::numeric_limits::max())); ASSERT_EQUALS(std::string("-9223372036854775808"), str(std::numeric_limits::min())); ASSERT_EQUALS(std::string("0"), str(std::numeric_limits::min())); ASSERT_EQUALS(std::string("18446744073709551615"), str(std::numeric_limits::max())); } void test_base64() { size_t len; ASSERT_STREQUALS(s3fs_base64(NULL, 0), NULL); ASSERT_STREQUALS(reinterpret_cast(s3fs_decode64(NULL, &len)), NULL); ASSERT_STREQUALS(s3fs_base64(reinterpret_cast(""), 0), NULL); ASSERT_STREQUALS(reinterpret_cast(s3fs_decode64("", &len)), NULL); ASSERT_STREQUALS(s3fs_base64(reinterpret_cast("1"), 1), "MQ=="); ASSERT_STREQUALS(reinterpret_cast(s3fs_decode64("MQ==", &len)), "1"); ASSERT_EQUALS(len, static_cast(1)); ASSERT_STREQUALS(s3fs_base64(reinterpret_cast("12"), 2), "MTI="); ASSERT_STREQUALS(reinterpret_cast(s3fs_decode64("MTI=", &len)), "12"); ASSERT_EQUALS(len, static_cast(2)); ASSERT_STREQUALS(s3fs_base64(reinterpret_cast("123"), 3), "MTIz"); ASSERT_STREQUALS(reinterpret_cast(s3fs_decode64("MTIz", &len)), "123"); ASSERT_EQUALS(len, static_cast(3)); ASSERT_STREQUALS(s3fs_base64(reinterpret_cast("1234"), 4), "MTIzNA=="); ASSERT_STREQUALS(reinterpret_cast(s3fs_decode64("MTIzNA==", &len)), "1234"); ASSERT_EQUALS(len, static_cast(4)); // TODO: invalid input } void test_strtoofft() { ASSERT_EQUALS(s3fs_strtoofft("0"), static_cast(0L)); ASSERT_EQUALS(s3fs_strtoofft("9"), static_cast(9L)); try{ s3fs_strtoofft("A"); abort(); }catch(std::exception &e){ // expected } ASSERT_EQUALS(s3fs_strtoofft("A", /*base=*/ 16), static_cast(10L)); ASSERT_EQUALS(s3fs_strtoofft("F", /*base=*/ 16), static_cast(15L)); ASSERT_EQUALS(s3fs_strtoofft("a", /*base=*/ 16), static_cast(10L)); ASSERT_EQUALS(s3fs_strtoofft("f", /*base=*/ 16), static_cast(15L)); ASSERT_EQUALS(s3fs_strtoofft("deadbeef", /*base=*/ 16), static_cast(3735928559L)); } void test_wtf8_encoding() { std::string ascii("normal string"); std::string utf8("Hyld\xc3\xbdpi \xc3\xbej\xc3\xb3\xc3\xb0""f\xc3\xa9lagsins vex \xc3\xbar k\xc3\xa6rkomnu b\xc3\xb6li \xc3\xad \xc3\xa1st"); std::string cp1252("Hyld\xfdpi \xfej\xf3\xf0""f\xe9lagsins vex \xfar k\xe6rkomnu b\xf6li \xed \xe1st"); std::string broken = utf8; broken[14] = 0x97; std::string mixed = ascii + utf8 + cp1252; ASSERT_EQUALS(s3fs_wtf8_encode(ascii), ascii); ASSERT_EQUALS(s3fs_wtf8_decode(ascii), ascii); ASSERT_EQUALS(s3fs_wtf8_encode(utf8), utf8); ASSERT_EQUALS(s3fs_wtf8_decode(utf8), utf8); ASSERT_NEQUALS(s3fs_wtf8_encode(cp1252), cp1252); ASSERT_EQUALS(s3fs_wtf8_decode(s3fs_wtf8_encode(cp1252)), cp1252); ASSERT_NEQUALS(s3fs_wtf8_encode(broken), broken); ASSERT_EQUALS(s3fs_wtf8_decode(s3fs_wtf8_encode(broken)), broken); ASSERT_NEQUALS(s3fs_wtf8_encode(mixed), mixed); ASSERT_EQUALS(s3fs_wtf8_decode(s3fs_wtf8_encode(mixed)), mixed); } int main(int argc, char *argv[]) { test_trim(); test_base64(); test_strtoofft(); test_wtf8_encoding(); return 0; } s3fs-fuse-1.86/src/test_util.h000066400000000000000000000055441361654130000162510ustar00rootroot00000000000000/* * s3fs - FUSE-based file system backed by Amazon S3 * * Copyright(C) 2014 Andrew Gaul * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include #include #include template void assert_equals(const T &x, const T &y, const char *file, int line) { if (x != y) { std::cerr << x << " != " << y << " at " << file << ":" << line << std::endl; std::cerr << std::endl; std::exit(1); } } template <> void assert_equals(const std::string &x, const std::string &y, const char *file, int line) { if (x != y) { std::cerr << x << " != " << y << " at " << file << ":" << line << std::endl; for (unsigned i=0; i void assert_nequals(const T &x, const T &y, const char *file, int line) { if (x == y) { std::cerr << x << " == " << y << " at " << file << ":" << line << std::endl; std::exit(1); } } template <> void assert_nequals(const std::string &x, const std::string &y, const char *file, int line) { if (x == y) { std::cerr << x << " == " << y << " at " << file << ":" << line << std::endl; for (unsigned i=0; i # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. ###################################################################### TESTS=small-integration-test.sh EXTRA_DIST = \ integration-test-common.sh \ require-root.sh \ small-integration-test.sh \ mergedir.sh \ sample_delcache.sh \ sample_ahbe.conf testdir = test s3fs-fuse-1.86/test/filter-suite-log.sh000077500000000000000000000113411361654130000177760ustar00rootroot00000000000000#!/bin/bash # # s3fs - FUSE-based file system backed by Amazon S3 # # Copyright 2007-2008 Randy Rizun # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # func_usage() { echo "" echo "Usage: $1 [-h] " echo " -h print help" echo " log file path path for test-suite.log" echo "" } PRGNAME=`basename $0` SCRIPTDIR=`dirname $0` S3FSDIR=`cd ${SCRIPTDIR}/..; pwd` TOPDIR=`cd ${S3FSDIR}/test; pwd` SUITELOG="${TOPDIR}/test-suite.log" TMP_LINENO_FILE="/tmp/.lineno.tmp" while [ $# -ne 0 ]; do if [ "X$1" = "X" ]; then break elif [ "X$1" = "X-h" -o "X$1" = "X-H" -o "X$1" = "X--help" -o "X$1" = "X--HELP" ]; then func_usage ${PRGNAME} exit 0 else SUITELOG=$1 fi shift done if [ ! -f ${SUITELOG} ]; then echo "[ERROR] not found ${SUITELOG} log file." exit 1 fi # # Extract keyword line numbers and types # # 0 : normal line # 1 : start line for one small test(specified in integration-test-main.sh) # 2 : passed line of end of one small test(specified in test-utils.sh) # 3 : failed line of end of one small test(specified in test-utils.sh) # grep -n -e 'test_.*: ".*"' -o -e 'test_.* passed' -o -e 'test_.* failed' ${SUITELOG} 2>/dev/null | sed 's/:test_.*: ".*"/ 1/g' | sed 's/:test_.* passed/ 2/g' | sed 's/:test_.* failed/ 3/g' > ${TMP_LINENO_FILE} # # Loop for printing result # prev_line_type=0 prev_line_number=1 while read line; do # line is " " number_type=($line) head_line_cnt=`expr ${number_type[0]} - 1` tail_line_cnt=`expr ${number_type[0]} - ${prev_line_number}` if [ ${number_type[1]} -eq 2 ]; then echo "" fi if [ ${prev_line_type} -eq 1 ]; then if [ ${number_type[1]} -eq 2 ]; then # if passed, cut s3fs information messages head -${head_line_cnt} ${SUITELOG} | tail -${tail_line_cnt} | grep -v -e '[0-9]\+\%' | grep -v -e '^s3fs: ' -a -e '\[INF\]' elif [ ${number_type[1]} -eq 3 ]; then # if failed, print all head -${head_line_cnt} ${SUITELOG} | tail -${tail_line_cnt} | grep -v -e '[0-9]\+\%' else # there is start keyword but not end keyword, so print all head -${head_line_cnt} ${SUITELOG} | tail -${tail_line_cnt} | grep -v -e '[0-9]\+\%' fi elif [ ${prev_line_type} -eq 2 -o ${prev_line_type} -eq 3 ]; then if [ ${number_type[1]} -eq 2 -o ${number_type[1]} -eq 3 ]; then # previous is end of chmpx, but this type is end of chmpx without start keyword. then print all head -${head_line_cnt} ${SUITELOG} | tail -${tail_line_cnt} | grep -v -e '[0-9]\+\%' else # this area is not from start to end, cut s3fs information messages head -${head_line_cnt} ${SUITELOG} | tail -${tail_line_cnt} | grep -v -e '[0-9]\+\%' | grep -v -e '^s3fs: ' -a -e '\[INF\]' fi else if [ ${number_type[1]} -eq 2 -o ${number_type[1]} -eq 3 ]; then # previous is normal, but this type is end of chmpx without start keyword. then print all head -${head_line_cnt} ${SUITELOG} | tail -${tail_line_cnt} | grep -v -e '[0-9]\+\%' else # this area is normal, cut s3fs information messages head -${head_line_cnt} ${SUITELOG} | tail -${tail_line_cnt} | grep -v -e '[0-9]\+\%' | grep -v -e '^s3fs: ' -a -e '\[INF\]' fi fi if [ ${number_type[1]} -eq 3 ]; then echo "" fi prev_line_type=${number_type[1]} prev_line_number=${number_type[0]} done < ${TMP_LINENO_FILE} # # Print rest lines # file_line_cnt=`wc -l ${SUITELOG} | awk '{print $1}'` tail_line_cnt=`expr ${file_line_cnt} - ${prev_line_number}` if [ ${prev_line_type} -eq 1 ]; then tail -${tail_line_cnt} ${SUITELOG} | grep -v -e '[0-9]\+\%' else tail -${tail_line_cnt} ${SUITELOG} | grep -v -e '[0-9]\+\%' | grep -v -e '^s3fs: ' -a -e '\[INF\]' fi # # Remove temp file # rm -f ${TMP_LINENO_FILE} exit 0 # # Local variables: # tab-width: 4 # c-basic-offset: 4 # End: # vim600: noet sw=4 ts=4 fdm=marker # vim<600: noet sw=4 ts=4 # s3fs-fuse-1.86/test/integration-test-common.sh000066400000000000000000000164251361654130000213760ustar00rootroot00000000000000#!/bin/bash # # Common code for starting an s3fs-fuse mountpoint and an S3Proxy instance # to run tests against S3Proxy locally. # # To run against an Amazon S3 or other S3 provider, specify the following # environment variables: # # S3FS_CREDENTIALS_FILE=keyfile s3fs format key file # TEST_BUCKET_1=bucketname Name of bucket to use # S3PROXY_BINARY="" Specify empty string to skip S3Proxy start # S3_URL="https://s3.amazonaws.com" Specify Amazon AWS as the S3 provider # # Example of running against Amazon S3 using a bucket named "bucket: # # S3FS_CREDENTIALS_FILE=keyfile TEST_BUCKET_1=bucket S3PROXY_BINARY="" S3_URL="https://s3.amazonaws.com" ./small-integration-test.sh # # To change the s3fs-fuse debug level: # # DBGLEVEL=debug ./small-integration-test.sh # # To stop and wait after the mount point is up for manual interaction. This allows you to # explore the mounted file system exactly as it would have been started for the test case # # INTERACT=1 DBGLEVEL=debug ./small-integration-test.sh # # Run all of the tests from the makefile # # S3FS_CREDENTIALS_FILE=keyfile TEST_BUCKET_1=bucket S3PROXY_BINARY="" S3_URL="https://s3.amazonaws.com" make check # # Run the tests with request auth turned off in both S3Proxy and s3fs-fuse. This can be # useful for poking around with plain old curl # # PUBLIC=1 INTERACT=1 ./small-integration-test.sh # # A valgrind tool can be specified # eg: VALGRIND="--tool=memcheck --leak-check=full" ./small-integration-test.sh set -o errexit set -o pipefail S3FS=../src/s3fs # Allow these defaulted values to be overridden : ${S3_URL:="https://127.0.0.1:8080"} : ${S3FS_CREDENTIALS_FILE:="passwd-s3fs"} : ${TEST_BUCKET_1:="s3fs-integration-test"} export TEST_BUCKET_1 export S3_URL export TEST_SCRIPT_DIR=`pwd` export TEST_BUCKET_MOUNT_POINT_1=${TEST_BUCKET_1} S3PROXY_VERSION="1.7.0" S3PROXY_BINARY=${S3PROXY_BINARY-"s3proxy-${S3PROXY_VERSION}"} if [ ! -f "$S3FS_CREDENTIALS_FILE" ] then echo "Missing credentials file: $S3FS_CREDENTIALS_FILE" exit 1 fi chmod 600 "$S3FS_CREDENTIALS_FILE" if [ ! -d $TEST_BUCKET_MOUNT_POINT_1 ] then mkdir -p $TEST_BUCKET_MOUNT_POINT_1 fi # This function execute the function parameters $1 times # before giving up, with 1 second delays. function retry { set +o errexit N=$1; shift; status=0 for i in $(seq $N); do echo "Trying: $*" "$@" status=$? if [ $status == 0 ]; then break fi sleep 1 echo "Retrying: $*" done if [ $status != 0 ]; then echo "timeout waiting for $*" fi set -o errexit return $status } # Proxy is not started if S3PROXY_BINARY is an empty string # PUBLIC unset: use s3proxy.conf # PUBLIC=1: use s3proxy-noauth.conf (no request signing) # function start_s3proxy { if [ -n "${PUBLIC}" ]; then S3PROXY_CONFIG="s3proxy-noauth.conf" else S3PROXY_CONFIG="s3proxy.conf" fi if [ -n "${S3PROXY_BINARY}" ] then if [ ! -e "${S3PROXY_BINARY}" ]; then wget "https://github.com/andrewgaul/s3proxy/releases/download/s3proxy-${S3PROXY_VERSION}/s3proxy" \ --quiet -O "${S3PROXY_BINARY}" chmod +x "${S3PROXY_BINARY}" fi stdbuf -oL -eL java -jar "$S3PROXY_BINARY" --properties $S3PROXY_CONFIG & S3PROXY_PID=$! # wait for S3Proxy to start for i in $(seq 30); do if exec 3<>"/dev/tcp/127.0.0.1/8080"; then exec 3<&- # Close for read exec 3>&- # Close for write break fi sleep 1 done fi } function stop_s3proxy { if [ -n "${S3PROXY_PID}" ] then kill $S3PROXY_PID fi } # Mount the bucket, function arguments passed to s3fs in addition to # a set of common arguments. function start_s3fs { # Public bucket if PUBLIC is set if [ -n "${PUBLIC}" ]; then AUTH_OPT="-o public_bucket=1" else AUTH_OPT="-o passwd_file=${S3FS_CREDENTIALS_FILE}" fi # If VALGRIND is set, pass it as options to valgrind. # start valgrind-listener in another shell. # eg: VALGRIND="--tool=memcheck --leak-check=full" ./small-integration-test.sh # Start valgrind-listener (default port is 1500) if [ -n "${VALGRIND}" ]; then VALGRIND_EXEC="valgrind ${VALGRIND} --log-socket=127.0.1.1" fi # On OSX only, we need to specify the direct_io and auto_cache flag. if [ `uname` = "Darwin" ]; then DIRECT_IO_OPT="-o direct_io -o auto_cache" else DIRECT_IO_OPT="" fi # Common s3fs options: # # TODO: Allow all these options to be overridden with env variables # # use_path_request_style # The test env doesn't have virtual hosts # createbucket # S3Proxy always starts with no buckets, this tests the s3fs-fuse # automatic bucket creation path. # $AUTH_OPT # Will be either "-o public_bucket=1" # or # "-o passwd_file=${S3FS_CREDENTIALS_FILE}" # dbglevel # error by default. override with DBGLEVEL env variable # -f # Keep s3fs in foreground instead of daemonizing # # subshell with set -x to log exact invocation of s3fs-fuse ( set -x stdbuf -oL -eL \ ${VALGRIND_EXEC} ${S3FS} \ $TEST_BUCKET_1 \ $TEST_BUCKET_MOUNT_POINT_1 \ -o use_path_request_style \ -o url=${S3_URL} \ -o no_check_certificate \ -o ssl_verify_hostname=0 \ -o use_xattr=1 \ -o createbucket \ ${AUTH_OPT} \ ${DIRECT_IO_OPT} \ -o stat_cache_expire=1 \ -o stat_cache_interval_expire=1 \ -o dbglevel=${DBGLEVEL:=info} \ -o retries=3 \ -f \ "${@}" | stdbuf -oL -eL sed $SED_BUFFER_FLAG "s/^/s3fs: /" & ) if [ `uname` = "Darwin" ]; then set +o errexit TRYCOUNT=0 while [ $TRYCOUNT -le 20 ]; do df | grep -q $TEST_BUCKET_MOUNT_POINT_1 if [ $? -eq 0 ]; then break; fi sleep 1 TRYCOUNT=`expr ${TRYCOUNT} + 1` done if [ $? -ne 0 ]; then exit 1 fi set -o errexit else retry 20 grep -q $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts || exit 1 fi # Quick way to start system up for manual testing with options under test if [[ -n ${INTERACT} ]]; then echo "Mountpoint $TEST_BUCKET_MOUNT_POINT_1 is ready" echo "control-C to quit" sleep infinity exit 0 fi } function stop_s3fs { # Retry in case file system is in use if [ `uname` = "Darwin" ]; then if df | grep -q $TEST_BUCKET_MOUNT_POINT_1; then retry 10 df | grep -q $TEST_BUCKET_MOUNT_POINT_1 && umount $TEST_BUCKET_MOUNT_POINT_1 fi else if grep -q $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts; then retry 10 grep -q $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts && fusermount -u $TEST_BUCKET_MOUNT_POINT_1 fi fi } # trap handlers do not stack. If a test sets its own, the new handler should call common_exit_handler function common_exit_handler { stop_s3fs stop_s3proxy } trap common_exit_handler EXIT s3fs-fuse-1.86/test/integration-test-main.sh000077500000000000000000000526111361654130000210320ustar00rootroot00000000000000#!/bin/bash set -o errexit set -o pipefail source test-utils.sh function test_append_file { describe "Testing append to file ..." TEST_INPUT="echo ${TEST_TEXT} to ${TEST_TEXT_FILE}" # Write a small test file for x in `seq 1 $TEST_TEXT_FILE_LENGTH` do echo $TEST_INPUT done > ${TEST_TEXT_FILE} check_file_size "${TEST_TEXT_FILE}" $(($TEST_TEXT_FILE_LENGTH * $(echo $TEST_INPUT | wc -c))) rm_test_file } function test_truncate_file { describe "Testing truncate file ..." # Write a small test file echo "${TEST_TEXT}" > ${TEST_TEXT_FILE} # Truncate file to 0 length. This should trigger open(path, O_RDWR | O_TRUNC...) : > ${TEST_TEXT_FILE} check_file_size "${TEST_TEXT_FILE}" 0 rm_test_file } function test_truncate_empty_file { describe "Testing truncate empty file ..." # Write an empty test file touch ${TEST_TEXT_FILE} # Truncate the file to 1024 length t_size=1024 truncate ${TEST_TEXT_FILE} -s $t_size check_file_size "${TEST_TEXT_FILE}" $t_size rm_test_file } function test_mv_file { describe "Testing mv file function ..." # if the rename file exists, delete it if [ -e $ALT_TEST_TEXT_FILE ] then rm $ALT_TEST_TEXT_FILE fi if [ -e $ALT_TEST_TEXT_FILE ] then echo "Could not delete file ${ALT_TEST_TEXT_FILE}, it still exists" return 1 fi # create the test file again mk_test_file # save file length ALT_TEXT_LENGTH=`wc -c $TEST_TEXT_FILE | awk '{print $1}'` #rename the test file mv $TEST_TEXT_FILE $ALT_TEST_TEXT_FILE if [ ! -e $ALT_TEST_TEXT_FILE ] then echo "Could not move file" return 1 fi #check the renamed file content-type if [ -f "/etc/mime.types" ] then check_content_type "$1/$ALT_TEST_TEXT_FILE" "text/plain" fi # Check the contents of the alt file ALT_FILE_LENGTH=`wc -c $ALT_TEST_TEXT_FILE | awk '{print $1}'` if [ "$ALT_FILE_LENGTH" -ne "$ALT_TEXT_LENGTH" ] then echo "moved file length is not as expected expected: $ALT_TEXT_LENGTH got: $ALT_FILE_LENGTH" return 1 fi # clean up rm_test_file $ALT_TEST_TEXT_FILE } function test_mv_empty_directory { describe "Testing mv directory function ..." if [ -e $TEST_DIR ]; then echo "Unexpected, this file/directory exists: ${TEST_DIR}" return 1 fi mk_test_dir mv ${TEST_DIR} ${TEST_DIR}_rename if [ ! -d "${TEST_DIR}_rename" ]; then echo "Directory ${TEST_DIR} was not renamed" return 1 fi rmdir ${TEST_DIR}_rename if [ -e "${TEST_DIR}_rename" ]; then echo "Could not remove the test directory, it still exists: ${TEST_DIR}_rename" return 1 fi } function test_mv_nonempty_directory { describe "Testing mv directory function ..." if [ -e $TEST_DIR ]; then echo "Unexpected, this file/directory exists: ${TEST_DIR}" return 1 fi mk_test_dir touch ${TEST_DIR}/file mv ${TEST_DIR} ${TEST_DIR}_rename if [ ! -d "${TEST_DIR}_rename" ]; then echo "Directory ${TEST_DIR} was not renamed" return 1 fi rm -r ${TEST_DIR}_rename if [ -e "${TEST_DIR}_rename" ]; then echo "Could not remove the test directory, it still exists: ${TEST_DIR}_rename" return 1 fi } function test_redirects { describe "Testing redirects ..." mk_test_file ABCDEF CONTENT=`cat $TEST_TEXT_FILE` if [ "${CONTENT}" != "ABCDEF" ]; then echo "CONTENT read is unexpected, got ${CONTENT}, expected ABCDEF" return 1 fi echo XYZ > $TEST_TEXT_FILE CONTENT=`cat $TEST_TEXT_FILE` if [ ${CONTENT} != "XYZ" ]; then echo "CONTENT read is unexpected, got ${CONTENT}, expected XYZ" return 1 fi echo 123456 >> $TEST_TEXT_FILE LINE1=`sed -n '1,1p' $TEST_TEXT_FILE` LINE2=`sed -n '2,2p' $TEST_TEXT_FILE` if [ ${LINE1} != "XYZ" ]; then echo "LINE1 was not as expected, got ${LINE1}, expected XYZ" return 1 fi if [ ${LINE2} != "123456" ]; then echo "LINE2 was not as expected, got ${LINE2}, expected 123456" return 1 fi # clean up rm_test_file } function test_mkdir_rmdir { describe "Testing creation/removal of a directory" if [ -e $TEST_DIR ]; then echo "Unexpected, this file/directory exists: ${TEST_DIR}" return 1 fi mk_test_dir rm_test_dir } function test_chmod { describe "Testing chmod file function ..." # create the test file again mk_test_file if [ `uname` = "Darwin" ]; then ORIGINAL_PERMISSIONS=$(stat -f "%p" $TEST_TEXT_FILE) else ORIGINAL_PERMISSIONS=$(stat --format=%a $TEST_TEXT_FILE) fi chmod 777 $TEST_TEXT_FILE; # if they're the same, we have a problem. if [ `uname` = "Darwin" ]; then CHANGED_PERMISSIONS=$(stat -f "%p" $TEST_TEXT_FILE) else CHANGED_PERMISSIONS=$(stat --format=%a $TEST_TEXT_FILE) fi if [ $CHANGED_PERMISSIONS == $ORIGINAL_PERMISSIONS ] then echo "Could not modify $TEST_TEXT_FILE permissions" return 1 fi # clean up rm_test_file } function test_chown { describe "Testing chown file function ..." # create the test file again mk_test_file if [ `uname` = "Darwin" ]; then ORIGINAL_PERMISSIONS=$(stat -f "%u:%g" $TEST_TEXT_FILE) else ORIGINAL_PERMISSIONS=$(stat --format=%u:%g $TEST_TEXT_FILE) fi # [NOTE] # Prevents test interruptions due to permission errors, etc. # If the chown command fails, an error will occur with the # following judgment statement. So skip the chown command error. # '|| true' was added due to a problem with Travis CI and MacOS # and ensure_diskfree option. # chown 1000:1000 $TEST_TEXT_FILE || true # if they're the same, we have a problem. if [ `uname` = "Darwin" ]; then CHANGED_PERMISSIONS=$(stat -f "%u:%g" $TEST_TEXT_FILE) else CHANGED_PERMISSIONS=$(stat --format=%u:%g $TEST_TEXT_FILE) fi if [ $CHANGED_PERMISSIONS == $ORIGINAL_PERMISSIONS ] then if [ $ORIGINAL_PERMISSIONS == "1000:1000" ] then echo "Could not be strict check because original file permission 1000:1000" else echo "Could not modify $TEST_TEXT_FILE ownership($ORIGINAL_PERMISSIONS to 1000:1000)" return 1 fi fi # clean up rm_test_file } function test_list { describe "Testing list" mk_test_file mk_test_dir file_cnt=$(ls -1 | wc -l) if [ $file_cnt != 2 ]; then echo "Expected 2 file but got $file_cnt" return 1 fi rm_test_file rm_test_dir } function test_remove_nonempty_directory { describe "Testing removing a non-empty directory" mk_test_dir touch "${TEST_DIR}/file" ( set +o pipefail rmdir "${TEST_DIR}" 2>&1 | grep -q "Directory not empty" ) rm "${TEST_DIR}/file" rm_test_dir } function test_external_modification { describe "Test external modification to an object" echo "old" > ${TEST_TEXT_FILE} OBJECT_NAME="$(basename $PWD)/${TEST_TEXT_FILE}" sleep 2 echo "new new" | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}" cmp ${TEST_TEXT_FILE} <(echo "new new") rm -f ${TEST_TEXT_FILE} } function test_read_external_object() { describe "create objects via aws CLI and read via s3fs" OBJECT_NAME="$(basename $PWD)/${TEST_TEXT_FILE}" sleep 3 echo "test" | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}" cmp ${TEST_TEXT_FILE} <(echo "test") rm -f ${TEST_TEXT_FILE} } function test_rename_before_close { describe "Testing rename before close ..." ( echo foo mv $TEST_TEXT_FILE ${TEST_TEXT_FILE}.new ) > $TEST_TEXT_FILE if ! cmp <(echo foo) ${TEST_TEXT_FILE}.new; then echo "rename before close failed" return 1 fi rm_test_file ${TEST_TEXT_FILE}.new rm -f ${TEST_TEXT_FILE} } function test_multipart_upload { describe "Testing multi-part upload ..." dd if=/dev/urandom of="/tmp/${BIG_FILE}" bs=$BIG_FILE_LENGTH count=1 dd if="/tmp/${BIG_FILE}" of="${BIG_FILE}" bs=$BIG_FILE_LENGTH count=1 # Verify contents of file echo "Comparing test file" if ! cmp "/tmp/${BIG_FILE}" "${BIG_FILE}" then return 1 fi rm -f "/tmp/${BIG_FILE}" rm_test_file "${BIG_FILE}" } function test_multipart_copy { describe "Testing multi-part copy ..." dd if=/dev/urandom of="/tmp/${BIG_FILE}" bs=$BIG_FILE_LENGTH count=1 dd if="/tmp/${BIG_FILE}" of="${BIG_FILE}" bs=$BIG_FILE_LENGTH count=1 mv "${BIG_FILE}" "${BIG_FILE}-copy" # Verify contents of file echo "Comparing test file" if ! cmp "/tmp/${BIG_FILE}" "${BIG_FILE}-copy" then return 1 fi #check the renamed file content-type check_content_type "$1/${BIG_FILE}-copy" "application/octet-stream" rm -f "/tmp/${BIG_FILE}" rm_test_file "${BIG_FILE}-copy" } function test_multipart_mix { describe "Testing multi-part mix ..." if [ `uname` = "Darwin" ]; then cat /dev/null > $BIG_FILE fi dd if=/dev/urandom of="/tmp/${BIG_FILE}" bs=$BIG_FILE_LENGTH seek=0 count=1 dd if="/tmp/${BIG_FILE}" of="${BIG_FILE}" bs=$BIG_FILE_LENGTH seek=0 count=1 # (1) Edit the middle of an existing file # modify directly(seek 7.5MB offset) # In the case of nomultipart and nocopyapi, # it makes no sense, but copying files is because it leaves no cache. # cp /tmp/${BIG_FILE} /tmp/${BIG_FILE}-mix cp ${BIG_FILE} ${BIG_FILE}-mix MODIFY_START_BLOCK=$((15*1024*1024/2/4)) echo -n "0123456789ABCDEF" | dd of="${BIG_FILE}-mix" bs=4 count=4 seek=$MODIFY_START_BLOCK conv=notrunc echo -n "0123456789ABCDEF" | dd of="/tmp/${BIG_FILE}-mix" bs=4 count=4 seek=$MODIFY_START_BLOCK conv=notrunc # Verify contents of file echo "Comparing test file (1)" if ! cmp "/tmp/${BIG_FILE}-mix" "${BIG_FILE}-mix" then return 1 fi # (2) Write to an area larger than the size of the existing file # modify directly(over file end offset) # cp /tmp/${BIG_FILE} /tmp/${BIG_FILE}-mix cp ${BIG_FILE} ${BIG_FILE}-mix OVER_FILE_BLOCK_POS=$((26*1024*1024/4)) echo -n "0123456789ABCDEF" | dd of="${BIG_FILE}-mix" bs=4 count=4 seek=$OVER_FILE_BLOCK_POS conv=notrunc echo -n "0123456789ABCDEF" | dd of="/tmp/${BIG_FILE}-mix" bs=4 count=4 seek=$OVER_FILE_BLOCK_POS conv=notrunc # Verify contents of file echo "Comparing test file (2)" if ! cmp "/tmp/${BIG_FILE}-mix" "${BIG_FILE}-mix" then return 1 fi # (3) Writing from the 0th byte # cp /tmp/${BIG_FILE} /tmp/${BIG_FILE}-mix cp ${BIG_FILE} ${BIG_FILE}-mix echo -n "0123456789ABCDEF" | dd of="${BIG_FILE}-mix" bs=4 count=4 seek=0 conv=notrunc echo -n "0123456789ABCDEF" | dd of="/tmp/${BIG_FILE}-mix" bs=4 count=4 seek=0 conv=notrunc # Verify contents of file echo "Comparing test file (3)" if ! cmp "/tmp/${BIG_FILE}-mix" "${BIG_FILE}-mix" then return 1 fi # (4) Write to the area within 5MB from the top # modify directly(seek 1MB offset) # cp /tmp/${BIG_FILE} /tmp/${BIG_FILE}-mix cp ${BIG_FILE} ${BIG_FILE}-mix MODIFY_START_BLOCK=$((1*1024*1024)) echo -n "0123456789ABCDEF" | dd of="${BIG_FILE}-mix" bs=4 count=4 seek=$MODIFY_START_BLOCK conv=notrunc echo -n "0123456789ABCDEF" | dd of="/tmp/${BIG_FILE}-mix" bs=4 count=4 seek=$MODIFY_START_BLOCK conv=notrunc # Verify contents of file echo "Comparing test file (4)" if ! cmp "/tmp/${BIG_FILE}-mix" "${BIG_FILE}-mix" then return 1 fi rm -f "/tmp/${BIG_FILE}" rm -f "/tmp/${BIG_FILE}-mix" rm_test_file "${BIG_FILE}" rm_test_file "${BIG_FILE}-mix" } function test_special_characters { describe "Testing special characters ..." ( set +o pipefail ls 'special' 2>&1 | grep -q 'No such file or directory' ls 'special?' 2>&1 | grep -q 'No such file or directory' ls 'special*' 2>&1 | grep -q 'No such file or directory' ls 'special~' 2>&1 | grep -q 'No such file or directory' ls 'specialµ' 2>&1 | grep -q 'No such file or directory' ) mkdir "TOYOTA TRUCK 8.2.2" } function test_symlink { describe "Testing symlinks ..." rm -f $TEST_TEXT_FILE rm -f $ALT_TEST_TEXT_FILE echo foo > $TEST_TEXT_FILE ln -s $TEST_TEXT_FILE $ALT_TEST_TEXT_FILE cmp $TEST_TEXT_FILE $ALT_TEST_TEXT_FILE rm -f $TEST_TEXT_FILE [ -L $ALT_TEST_TEXT_FILE ] [ ! -f $ALT_TEST_TEXT_FILE ] rm -f $ALT_TEST_TEXT_FILE } function test_extended_attributes { describe "Testing extended attributes ..." rm -f $TEST_TEXT_FILE touch $TEST_TEXT_FILE # set value set_xattr key1 value1 $TEST_TEXT_FILE get_xattr key1 $TEST_TEXT_FILE | grep -q '^value1$' # append value set_xattr key2 value2 $TEST_TEXT_FILE get_xattr key1 $TEST_TEXT_FILE | grep -q '^value1$' get_xattr key2 $TEST_TEXT_FILE | grep -q '^value2$' # remove value del_xattr key1 $TEST_TEXT_FILE ! get_xattr key1 $TEST_TEXT_FILE get_xattr key2 $TEST_TEXT_FILE | grep -q '^value2$' rm_test_file } function test_mtime_file { describe "Testing mtime preservation function ..." # if the rename file exists, delete it if [ -e $ALT_TEST_TEXT_FILE -o -L $ALT_TEST_TEXT_FILE ] then rm $ALT_TEST_TEXT_FILE fi if [ -e $ALT_TEST_TEXT_FILE ] then echo "Could not delete file ${ALT_TEST_TEXT_FILE}, it still exists" return 1 fi # create the test file again mk_test_file sleep 2 # allow for some time to pass to compare the timestamps between test & alt #copy the test file with preserve mode cp -p $TEST_TEXT_FILE $ALT_TEST_TEXT_FILE testmtime=`get_mtime $TEST_TEXT_FILE` altmtime=`get_mtime $ALT_TEST_TEXT_FILE` if [ "$testmtime" -ne "$altmtime" ] then echo "File times do not match: $testmtime != $altmtime" return 1 fi rm_test_file rm_test_file $ALT_TEST_TEXT_FILE } function test_update_time() { describe "Testing update time function ..." # create the test mk_test_file mtime=`get_ctime $TEST_TEXT_FILE` ctime=`get_mtime $TEST_TEXT_FILE` sleep 2 chmod +x $TEST_TEXT_FILE ctime2=`get_ctime $TEST_TEXT_FILE` mtime2=`get_mtime $TEST_TEXT_FILE` if [ $ctime -eq $ctime2 -o $mtime -ne $mtime2 ]; then echo "Expected updated ctime: $ctime != $ctime2 and same mtime: $mtime == $mtime2" return 1 fi sleep 2 chown $UID:$UID $TEST_TEXT_FILE; ctime3=`get_ctime $TEST_TEXT_FILE` mtime3=`get_mtime $TEST_TEXT_FILE` if [ $ctime2 -eq $ctime3 -o $mtime2 -ne $mtime3 ]; then echo "Expected updated ctime: $ctime2 != $ctime3 and same mtime: $mtime2 == $mtime3" return 1 fi sleep 2 set_xattr key value $TEST_TEXT_FILE ctime4=`get_ctime $TEST_TEXT_FILE` mtime4=`get_mtime $TEST_TEXT_FILE` if [ $ctime3 -eq $ctime4 -o $mtime3 -ne $mtime4 ]; then echo "Expected updated ctime: $ctime3 != $ctime4 and same mtime: $mtime3 == $mtime4" return 1 fi sleep 2 echo foo >> $TEST_TEXT_FILE ctime5=`get_ctime $TEST_TEXT_FILE` mtime5=`get_mtime $TEST_TEXT_FILE` if [ $ctime4 -eq $ctime5 -o $mtime4 -eq $mtime5 ]; then echo "Expected updated ctime: $ctime4 != $ctime5 and updated mtime: $mtime4 != $mtime5" return 1 fi rm_test_file } function test_rm_rf_dir { describe "Test that rm -rf will remove directory with contents" # Create a dir with some files and directories mkdir dir1 mkdir dir1/dir2 touch dir1/file1 touch dir1/dir2/file2 # Remove the dir with recursive rm rm -rf dir1 if [ -e dir1 ]; then echo "rm -rf did not remove $PWD/dir1" return 1 fi } function test_copy_file { describe "Test simple copy" dd if=/dev/urandom of=/tmp/simple_file bs=1024 count=1 cp /tmp/simple_file copied_simple_file cmp /tmp/simple_file copied_simple_file rm_test_file /tmp/simple_file rm_test_file copied_simple_file } function test_write_after_seek_ahead { describe "Test writes succeed after a seek ahead" dd if=/dev/zero of=testfile seek=1 count=1 bs=1024 rm_test_file testfile } function test_overwrite_existing_file_range { describe "Test overwrite range succeeds" dd if=<(seq 1000) of=${TEST_TEXT_FILE} dd if=/dev/zero of=${TEST_TEXT_FILE} seek=1 count=1 bs=1024 conv=notrunc cmp ${TEST_TEXT_FILE} <( seq 1000 | head -c 1024 dd if=/dev/zero count=1 bs=1024 seq 1000 | tail -c +2049 ) rm_test_file } function test_concurrency { describe "Test concurrent updates to a directory" for i in `seq 5`; do echo foo > $i; done for process in `seq 10`; do for i in `seq 5`; do file=$(ls `seq 5` | sed -n "$(($RANDOM % 5 + 1))p") cat $file >/dev/null || true rm -f $file echo foo > $file || true done & done wait rm -f `seq 5` } function test_concurrent_writes { describe "Test concurrent updates to a file" dd if=/dev/urandom of=${TEST_TEXT_FILE} bs=$BIG_FILE_LENGTH count=1 for process in `seq 10`; do dd if=/dev/zero of=${TEST_TEXT_FILE} seek=$(($RANDOM % $BIG_FILE_LENGTH)) count=1 bs=1024 conv=notrunc & done wait rm_test_file } function test_open_second_fd { describe "read from an open fd" rm_test_file second_fd_file RESULT=$( (echo foo ; wc -c < second_fd_file >&2) 2>& 1>second_fd_file) if [ "$RESULT" -ne 4 ]; then echo "size mismatch, expected: 4, was: ${RESULT}" return 1 fi rm_test_file second_fd_file } function test_write_multiple_offsets { describe "test writing to multiple offsets" ../../write_multiple_offsets.py ${TEST_TEXT_FILE} rm_test_file ${TEST_TEXT_FILE} } function test_clean_up_cache() { describe "Test clean up cache" dir="many_files" count=25 mkdir -p $dir for x in $(seq $count); do dd if=/dev/urandom of=$dir/file-$x bs=10485760 count=1 done file_cnt=$(ls $dir | wc -l) if [ $file_cnt != $count ]; then echo "Expected $count files but got $file_cnt" rm -rf $dir return 1 fi CACHE_DISK_AVAIL_SIZE=`get_disk_avail_size $CACHE_DIR` if [ "$CACHE_DISK_AVAIL_SIZE" -lt "$ENSURE_DISKFREE_SIZE" ];then echo "Cache disk avail size:$CACHE_DISK_AVAIL_SIZE less than ensure_diskfree size:$ENSURE_DISKFREE_SIZE" rm -rf $dir return 1 fi rm -rf $dir } function test_content_type() { describe "Test Content-Type detection" DIR_NAME="$(basename $PWD)" touch "test.txt" CONTENT_TYPE=$(aws_cli s3api head-object --bucket "${TEST_BUCKET_1}" --key "${DIR_NAME}/test.txt" | grep "ContentType") if [ `uname` = "Darwin" ]; then if ! echo $CONTENT_TYPE | grep -q "application/octet-stream"; then echo "Unexpected Content-Type(MacOS): $CONTENT_TYPE" return 1; fi else if ! echo $CONTENT_TYPE | grep -q "text/plain"; then echo "Unexpected Content-Type: $CONTENT_TYPE" return 1; fi fi touch "test.jpg" CONTENT_TYPE=$(aws_cli s3api head-object --bucket "${TEST_BUCKET_1}" --key "${DIR_NAME}/test.jpg" | grep "ContentType") if [ `uname` = "Darwin" ]; then if ! echo $CONTENT_TYPE | grep -q "application/octet-stream"; then echo "Unexpected Content-Type(MacOS): $CONTENT_TYPE" return 1; fi else if ! echo $CONTENT_TYPE | grep -q "image/jpeg"; then echo "Unexpected Content-Type: $CONTENT_TYPE" return 1; fi fi touch "test.bin" CONTENT_TYPE=$(aws_cli s3api head-object --bucket "${TEST_BUCKET_1}" --key "${DIR_NAME}/test.bin" | grep "ContentType") if ! echo $CONTENT_TYPE | grep -q "application/octet-stream"; then echo "Unexpected Content-Type: $CONTENT_TYPE" return 1; fi mkdir "test.dir" CONTENT_TYPE=$(aws_cli s3api head-object --bucket "${TEST_BUCKET_1}" --key "${DIR_NAME}/test.dir/" | grep "ContentType") if ! echo $CONTENT_TYPE | grep -q "application/x-directory"; then echo "Unexpected Content-Type: $CONTENT_TYPE" return 1; fi } function add_all_tests { if `ps -ef | grep -v grep | grep s3fs | grep -q ensure_diskfree` && ! `uname | grep -q Darwin`; then add_tests test_clean_up_cache fi add_tests test_append_file add_tests test_truncate_file add_tests test_truncate_empty_file add_tests test_mv_file add_tests test_mv_empty_directory add_tests test_mv_nonempty_directory add_tests test_redirects add_tests test_mkdir_rmdir add_tests test_chmod add_tests test_chown add_tests test_list add_tests test_remove_nonempty_directory add_tests test_external_modification add_tests test_read_external_object add_tests test_rename_before_close add_tests test_multipart_upload add_tests test_multipart_copy add_tests test_multipart_mix add_tests test_special_characters add_tests test_symlink add_tests test_extended_attributes add_tests test_mtime_file add_tests test_update_time add_tests test_rm_rf_dir add_tests test_copy_file add_tests test_write_after_seek_ahead add_tests test_overwrite_existing_file_range add_tests test_concurrency add_tests test_concurrent_writes add_tests test_open_second_fd add_tests test_write_multiple_offsets add_tests test_content_type } init_suite add_all_tests run_suite s3fs-fuse-1.86/test/keystore.jks000066400000000000000000000043031361654130000166220ustar00rootroot00000000000000jettyG /100 +*E'"bOh~*RPg p \ o_Jnkʲi[ιcTӽ7f%=M$1<ŝ?ǖ!vѰoF3ɰhۍ\H L9l,}VY%{#7w*"Z%$d_|ľ'_~&D]"DX %?t Ow#=X 4>ٛ#wuMfT75!c2Do3.[Ⱥ8#J9V̔y]9 #6'Ȁr֩6eJp*ƬJ^!> [HlsRN5ll 3;k dsk`GE?*m-ġ}Cz"ym %/jѬKkiac/{#)QddÑ5aDSM9ƍ߂del7ٮeB)˨$_mj3t??b]lSK /}f]Pk.Pf>D?EB`6<@M 3w 5TNZ4U_k9rFzܵ"@\YpƖDyڮ5&sb4\I3!p$(Ѷ:7&d-}Xq awCYb8cV@J, \#zx9fݞC@)Q1lN4D(oBhD3G13USOC#&$ D. D F`vz7PNʞd@Qv& Ar@*O@*UDܒ:=I 8}cƪ I3n*9]S ܄Ƀ'Q@SLܹDo-^)tRtN6ޏ[ <,u/[_jJdh%SΤܪ<6n7?{D5;s/Ȃ_slI㍓5#i߀7 <.Yβm)H]>*xNdv|a,D$xD~7dhrAc!U\0NoIz~o>xJU'NmmTh~sSMhڧA(kAAŧKKI]oDQ+#\t lKOxkKC%O"~v=F\ pɶC e+Mx^_)(j63qUJ7 [}f(X.509{0w0_8%0  *H  0l10UUnknown10UUnknown10UUnknown10U Unknown10U Unknown10UUnknown0 140814060639Z 141112060639Z0l10UUnknown10UUnknown10UUnknown10U Unknown10U Unknown10UUnknown0"0  *H 0 Tjs?-R[Po̼R2"̚!hupg9:n#0}w"ĩ_hy</S*D̪Mh ~`+VtWhvInXCXbA!:T!2)&R].E?tWg_Vm3g8B?*ٵtD7+>+iȞ+t[Vt(y%Zk!00UYWNuJ:A:ڹ*0  *H  ~X+a6zaOҰz㪍5ߌוdO~innԠ3/:K#{;̝UaAF^<о_Yew66y F#D\ DNi%\ŭA;pm%SM@/C3Kr@Nz:ZP<Sc81Cw BeHdd#51 NDeΙhGM:Sje{OK<᲌60N5$9Os3fs-fuse-1.86/test/mergedir.sh000077500000000000000000000104031361654130000163770ustar00rootroot00000000000000#!/bin/sh # # Merge old directory object to new. # For s3fs after v1.64 # ### ### UsageFunction ### UsageFunction() { echo "Usage: $1 [-h] [-y] [-all] " echo " -h print usage" echo " -y no confirm" echo " -all force all directories" echo " There is no -all option is only to merge for other S3 client." echo " If -all is specified, this shell script merge all directory" echo " for s3fs old version." echo "" } ### Check parameters WHOAMI=`whoami` OWNNAME=`basename $0` AUTOYES="no" ALLYES="no" DIRPARAM="" while [ "$1" != "" ]; do if [ "X$1" = "X-help" -o "X$1" = "X-h" -o "X$1" = "X-H" ]; then UsageFunction $OWNNAME exit 0 elif [ "X$1" = "X-y" -o "X$1" = "X-Y" ]; then AUTOYES="yes" elif [ "X$1" = "X-all" -o "X$1" = "X-ALL" ]; then ALLYES="yes" else if [ "X$DIRPARAM" != "X" ]; then echo "*** Input error." echo "" UsageFunction $OWNNAME exit 1 fi DIRPARAM=$1 fi shift done if [ "X$DIRPARAM" = "X" ]; then echo "*** Input error." echo "" UsageFunction $OWNNAME exit 1 fi if [ "$WHOAMI" != "root" ]; then echo "" echo "Warning: You run this script by $WHOAMI, should be root." echo "" fi ### Caution echo "#############################################################################" echo "[CAUTION]" echo "This program merges a directory made in s3fs which is older than version 1.64." echo "And made in other S3 client application." echo "This program may be have bugs which are not fixed yet." echo "Please execute this program by responsibility of your own." echo "#############################################################################" echo "" DATE=`date +'%Y%m%d-%H%M%S'` LOGFILE="$OWNNAME-$DATE.log" echo -n "Start to merge directory object... [$DIRPARAM]" echo "# Start to merge directory object... [$DIRPARAM]" >> $LOGFILE echo -n "# DATE : " >> $LOGFILE echo `date` >> $LOGFILE echo -n "# BASEDIR : " >> $LOGFILE echo `pwd` >> $LOGFILE echo -n "# TARGET PATH : " >> $LOGFILE echo $DIRPARAM >> $LOGFILE echo "" >> $LOGFILE if [ "$AUTOYES" = "yes" ]; then echo "(no confirmation)" else echo "" fi echo "" ### Get Directory list DIRLIST=`find $DIRPARAM -type d -print | grep -v ^\.$` # # Main loop # for DIR in $DIRLIST; do ### Skip "." and ".." directories BASENAME=`basename $DIR` if [ "$BASENAME" = "." -o "$BASENAME" = ".." ]; then continue fi if [ "$ALLYES" = "no" ]; then ### Skip "d---------" directories. ### Other clients make directory object "dir/" which don't have ### "x-amz-meta-mode" attribute. ### Then these directories is "d---------", it is target directory. DIRPERMIT=`ls -ld --time-style=+'%Y%m%d%H%M' $DIR | awk '{print $1}'` if [ "$DIRPERMIT" != "d---------" ]; then continue fi fi ### Confirm ANSWER="" if [ "$AUTOYES" = "yes" ]; then ANSWER="y" fi while [ "X$ANSWER" != "XY" -a "X$ANSWER" != "Xy" -a "X$ANSWER" != "XN" -a "X$ANSWER" != "Xn" ]; do echo -n "Do you merge $DIR? (y/n): " read ANSWER done if [ "X$ANSWER" != "XY" -a "X$ANSWER" != "Xy" ]; then continue fi ### Do CHOWN=`ls -ld --time-style=+'%Y%m%d%H%M' $DIR | awk '{print $3":"$4" "$7}'` CHMOD=`ls -ld --time-style=+'%Y%m%d%H%M' $DIR | awk '{print $7}'` TOUCH=`ls -ld --time-style=+'%Y%m%d%H%M' $DIR | awk '{print $6" "$7}'` echo -n "*** Merge $DIR : " echo -n " $DIR : " >> $LOGFILE chmod 755 $CHMOD > /dev/null 2>&1 RESULT=$? if [ $RESULT -ne 0 ]; then echo "Failed(chmod)" echo "Failed(chmod)" >> $LOGFILE continue fi chown $CHOWN > /dev/null 2>&1 RESULT=$? if [ $RESULT -ne 0 ]; then echo "Failed(chown)" echo "Failed(chown)" >> $LOGFILE continue fi touch -t $TOUCH > /dev/null 2>&1 RESULT=$? if [ $RESULT -ne 0 ]; then echo "Failed(touch)" echo "Failed(touch)" >> $LOGFILE continue fi echo "Succeed" echo "Succeed" >> $LOGFILE done echo "" echo "" >> $LOGFILE echo "Finished." echo -n "# Finished : " >> $LOGFILE echo `date` >> $LOGFILE # # END # s3fs-fuse-1.86/test/passwd-s3fs000066400000000000000000000000401361654130000163360ustar00rootroot00000000000000local-identity:local-credential s3fs-fuse-1.86/test/require-root.sh000077500000000000000000000001471361654130000172420ustar00rootroot00000000000000#!/bin/bash -e if [[ $EUID -ne 0 ]] then echo "This test script must be run as root" 1>&2 exit 1 fi s3fs-fuse-1.86/test/s3proxy.conf000066400000000000000000000004761361654130000165510ustar00rootroot00000000000000s3proxy.secure-endpoint=https://127.0.0.1:8080 s3proxy.authorization=aws-v2-or-v4 s3proxy.identity=local-identity s3proxy.credential=local-credential s3proxy.keystore-path=keystore.jks s3proxy.keystore-password=password jclouds.provider=transient jclouds.identity=remote-identity jclouds.credential=remote-credential s3fs-fuse-1.86/test/sample_ahbe.conf000066400000000000000000000036171361654130000173620ustar00rootroot00000000000000# S3FS: Sample ahbe_conf parameter file. # # This file is configuration file for additional header by extension(ahbe). # s3fs loads this file at starting. # # Format: # line = [file suffix or regex] HTTP-header [HTTP-header-values] # file suffix = file(object) suffix, if this field is empty, # it means "reg:(.*)".(=all object). # regex = regular expression to match the file(object) path. # this type starts with "reg:" prefix. # HTTP-header = additional HTTP header name # HTTP-header-values = additional HTTP header value # # # # Verification is done in the order in which they are described in the file. # That order is very important. # # Example: # " Content-Encoding gzip" --> all object # ".gz Content-Encoding gzip" --> only ".gz" extension file # "reg:^/DIR/(.*).t2$ Content-Encoding text2" --> "/DIR/*.t2" extension file # # Notice: # If you need to set all object, you can specify without "suffix" or regex # type "reg:(.*)". Then all of object(file) is added additional header. # If you have this configuration file for Content-Encoding, you should # know about RFC 2616. # # "The default (identity) encoding; the use of no transformation # whatsoever. This content-coding is used only in the Accept- # Encoding header, and SHOULD NOT be used in the Content-Encoding # header." # # file suffix type .gz Content-Encoding gzip .Z Content-Encoding compress .bz2 Content-Encoding bzip2 .svgz Content-Encoding gzip .svg.gz Content-Encoding gzip .tgz Content-Encoding gzip .tar.gz Content-Encoding gzip .taz Content-Encoding gzip .tz Content-Encoding gzip .tbz2 Content-Encoding gzip gz.js Content-Encoding gzip # regex type(test) reg:^/MYDIR/(.*)[.]t2$ Content-Encoding text2 s3fs-fuse-1.86/test/sample_delcache.sh000077500000000000000000000047131361654130000177010ustar00rootroot00000000000000#!/bin/sh # # This is unsupport sample deleting cache files script. # So s3fs's local cache files(stats and objects) grow up, # you need to delete these. # This script deletes these files with total size limit # by sorted atime of files. # You can modify this script for your system. # # [Usage] script [-silent] # func_usage() { echo "" echo "Usage: $1 [-silent]" echo " $1 -h" echo "Sample: $1 mybucket /tmp/s3fs/cache 1073741824" echo "" echo " bucket name = bucket name which specified s3fs option" echo " cache path = cache directory path which specified by" echo " use_cache s3fs option." echo " limit size = limit for total cache files size." echo " specify by BYTE" echo " -silent = silent mode" echo "" } PRGNAME=`basename $0` if [ "X$1" = "X-h" -o "X$1" = "X-H" ]; then func_usage $PRGNAME exit 0 fi if [ "X$1" = "X" -o "X$2" = "X" -o "X$3" = "X" ]; then func_usage $PRGNAME exit 1 fi BUCKET=$1 CDIR="$2" LIMIT=$3 SILENT=0 if [ "X$4" = "X-silent" ]; then SILENT=1 fi FILES_CDIR="${CDIR}/${BUCKET}" STATS_CDIR="${CDIR}/.${BUCKET}.stat" CURRENT_CACHE_SIZE=`du -sb "$FILES_CDIR" | awk '{print $1}'` # # Check total size # if [ $LIMIT -ge $CURRENT_CACHE_SIZE ]; then if [ $SILENT -ne 1 ]; then echo "$FILES_CDIR ($CURRENT_CACHE_SIZE) is below allowed $LIMIT" fi exit 0 fi # # Remove loop # TMP_ATIME=0 TMP_STATS="" TMP_CFILE="" # # Make file list by sorted access time # find "$STATS_CDIR" -type f -exec stat -c "%X:%n" "{}" \; | sort | while read part do echo Looking at $part TMP_ATIME=`echo "$part" | cut -d: -f1` TMP_STATS="`echo "$part" | cut -d: -f2`" TMP_CFILE=`echo "$TMP_STATS" | sed s/\.$BUCKET\.stat/$BUCKET/` if [ `stat -c %X "$TMP_STATS"` -eq $TMP_ATIME ]; then rm -f "$TMP_STATS" "$TMP_CFILE" > /dev/null 2>&1 if [ $? -ne 0 ]; then if [ $SILENT -ne 1 ]; then echo "ERROR: Could not remove files($TMP_STATS,$TMP_CFILE)" fi exit 1 else if [ $SILENT -ne 1 ]; then echo "remove file: $TMP_CFILE $TMP_STATS" fi fi fi if [ $LIMIT -ge `du -sb "$FILES_CDIR" | awk '{print $1}'` ]; then if [ $SILENT -ne 1 ]; then echo "finish removing files" fi break fi done if [ $SILENT -ne 1 ]; then TOTAL_SIZE=`du -sb "$FILES_CDIR" | awk '{print $1}'` echo "Finish: $FILES_CDIR total size is $TOTAL_SIZE" fi exit 0 # # End # s3fs-fuse-1.86/test/small-integration-test.sh000077500000000000000000000025511361654130000212140ustar00rootroot00000000000000#!/bin/bash # # Test s3fs-fuse file system operations with # set -o errexit set -o pipefail # Require root REQUIRE_ROOT=require-root.sh #source $REQUIRE_ROOT source integration-test-common.sh CACHE_DIR="/tmp/s3fs-cache" rm -rf "${CACHE_DIR}" mkdir "${CACHE_DIR}" #reserve 200MB for data cache source test-utils.sh CACHE_DISK_AVAIL_SIZE=`get_disk_avail_size $CACHE_DIR` if [ `uname` = "Darwin" ]; then # [FIXME] # Only on MacOS, there are cases where process or system # other than the s3fs cache uses disk space. # We can imagine that this is caused by Timemachine, but # there is no workaround, so s3fs cache size is set +1gb # for error bypass. # ENSURE_DISKFREE_SIZE=$((CACHE_DISK_AVAIL_SIZE - 1200)) else ENSURE_DISKFREE_SIZE=$((CACHE_DISK_AVAIL_SIZE - 200)) fi export CACHE_DIR export ENSURE_DISKFREE_SIZE FLAGS=( "use_cache=${CACHE_DIR} -o ensure_diskfree=${ENSURE_DISKFREE_SIZE}" enable_content_md5 enable_noobj_cache nocopyapi nomultipart notsup_compat_dir sigv2 singlepart_copy_limit=$((10 * 1024)) # limit size to exercise multipart code paths #use_sse # TODO: S3Proxy does not support SSE ) start_s3proxy for flag in "${FLAGS[@]}"; do echo "testing s3fs flag: $flag" start_s3fs -o $flag ./integration-test-main.sh stop_s3fs done stop_s3proxy echo "$0: tests complete." s3fs-fuse-1.86/test/test-utils.sh000066400000000000000000000136761361654130000167320ustar00rootroot00000000000000#!/bin/bash #### Test utils set -o errexit set -o pipefail # Configuration TEST_TEXT="HELLO WORLD" TEST_TEXT_FILE=test-s3fs.txt TEST_DIR=testdir ALT_TEST_TEXT_FILE=test-s3fs-ALT.txt TEST_TEXT_FILE_LENGTH=15 BIG_FILE=big-file-s3fs.txt BIG_FILE_LENGTH=$((25 * 1024 * 1024)) export RUN_DIR if [ `uname` = "Darwin" ]; then export SED_BUFFER_FLAG="-l" else export SED_BUFFER_FLAG="--unbuffered" fi function get_xattr() { if [ `uname` = "Darwin" ]; then xattr -p "$1" "$2" else getfattr -n "$1" --only-values "$2" fi } function set_xattr() { if [ `uname` = "Darwin" ]; then xattr -w "$1" "$2" "$3" else setfattr -n "$1" -v "$2" "$3" fi } function del_xattr() { if [ `uname` = "Darwin" ]; then xattr -d "$1" "$2" else setfattr -x "$1" "$2" fi } function get_size() { if [ `uname` = "Darwin" ]; then stat -f "%z" "$1" else stat -c %s "$1" fi } function check_file_size() { FILE_NAME="$1" EXPECTED_SIZE="$2" # Verify file is zero length via metadata size=$(get_size ${FILE_NAME}) if [ $size -ne $EXPECTED_SIZE ] then echo "error: expected ${FILE_NAME} to be zero length" return 1 fi # Verify file is zero length via data size=$(cat ${FILE_NAME} | wc -c) if [ $size -ne $EXPECTED_SIZE ] then echo "error: expected ${FILE_NAME} to be $EXPECTED_SIZE length, got $size" return 1 fi } function mk_test_file { if [ $# == 0 ]; then TEXT=$TEST_TEXT else TEXT=$1 fi echo $TEXT > $TEST_TEXT_FILE if [ ! -e $TEST_TEXT_FILE ] then echo "Could not create file ${TEST_TEXT_FILE}, it does not exist" exit 1 fi # wait & check BASE_TEXT_LENGTH=`echo $TEXT | wc -c | awk '{print $1}'` TRY_COUNT=10 while true; do MK_TEXT_LENGTH=`wc -c $TEST_TEXT_FILE | awk '{print $1}'` if [ $BASE_TEXT_LENGTH -eq $MK_TEXT_LENGTH ]; then break fi TRY_COUNT=`expr $TRY_COUNT - 1` if [ $TRY_COUNT -le 0 ]; then echo "Could not create file ${TEST_TEXT_FILE}, that file size is something wrong" fi sleep 1 done } function rm_test_file { if [ $# == 0 ]; then FILE=$TEST_TEXT_FILE else FILE=$1 fi rm -f $FILE if [ -e $FILE ] then echo "Could not cleanup file ${TEST_TEXT_FILE}" exit 1 fi } function mk_test_dir { mkdir ${TEST_DIR} if [ ! -d ${TEST_DIR} ]; then echo "Directory ${TEST_DIR} was not created" exit 1 fi } function rm_test_dir { rmdir ${TEST_DIR} if [ -e $TEST_DIR ]; then echo "Could not remove the test directory, it still exists: ${TEST_DIR}" exit 1 fi } # Create and cd to a unique directory for this test run # Sets RUN_DIR to the name of the created directory function cd_run_dir { if [ "$TEST_BUCKET_MOUNT_POINT_1" == "" ]; then echo "TEST_BUCKET_MOUNT_POINT variable not set" exit 1 fi RUN_DIR=${TEST_BUCKET_MOUNT_POINT_1}/${1} mkdir -p ${RUN_DIR} cd ${RUN_DIR} } function clean_run_dir { if [ -d ${RUN_DIR} ]; then rm -rf ${RUN_DIR} || echo "Error removing ${RUN_DIR}" fi } # Resets test suite function init_suite { TEST_LIST=() TEST_FAILED_LIST=() TEST_PASSED_LIST=() } # Report a passing test case # report_pass TEST_NAME function report_pass { echo "$1 passed" TEST_PASSED_LIST+=($1) } # Report a failing test case # report_fail TEST_NAME function report_fail { echo "$1 failed" TEST_FAILED_LIST+=($1) } # Add tests to the suite # add_tests TEST_NAME... function add_tests { TEST_LIST+=("$@") } # Log test name and description # describe [DESCRIPTION] function describe { echo "${FUNCNAME[1]}: \"$*\"" } # Runs each test in a suite and summarizes results. The list of # tests added by add_tests() is called with CWD set to a tmp # directory in the bucket. An attempt to clean this directory is # made after the test run. function run_suite { orig_dir=$PWD key_prefix="testrun-$RANDOM" cd_run_dir $key_prefix for t in "${TEST_LIST[@]}"; do # The following sequence runs tests in a subshell to allow continuation # on test failure, but still allowing errexit to be in effect during # the test. # # See: # https://groups.google.com/d/msg/gnu.bash.bug/NCK_0GmIv2M/dkeZ9MFhPOIJ # Other ways of trying to capture the return value will also disable # errexit in the function due to bash... compliance with POSIX? set +o errexit (set -o errexit; $t $key_prefix) if [[ $? == 0 ]]; then report_pass $t else report_fail $t fi set -o errexit done cd ${orig_dir} clean_run_dir for t in "${TEST_PASSED_LIST[@]}"; do echo "PASS: $t" done for t in "${TEST_FAILED_LIST[@]}"; do echo "FAIL: $t" done passed=${#TEST_PASSED_LIST[@]} failed=${#TEST_FAILED_LIST[@]} echo "SUMMARY for $0: $passed tests passed. $failed tests failed." if [[ $failed != 0 ]]; then return 1 else return 0 fi } function get_ctime() { if [ `uname` = "Darwin" ]; then stat -f "%c" "$1" else stat -c %Z "$1" fi } function get_mtime() { if [ `uname` = "Darwin" ]; then stat -f "%m" "$1" else stat -c %Y "$1" fi } function check_content_type() { INFO_STR=`aws_cli s3api head-object --bucket ${TEST_BUCKET_1} --key $1` if [[ "${INFO_STR}" != *"$2"* ]] then echo "moved file content-type is not as expected expected:$2 got:${INFO_STR}" exit 1 fi } function get_disk_avail_size() { DISK_AVAIL_SIZE=`BLOCKSIZE=$((1024 * 1024)) df $1 | awk '{print $4}' | tail -n 1` echo ${DISK_AVAIL_SIZE} } function aws_cli() { AWS_ACCESS_KEY_ID=local-identity AWS_SECRET_ACCESS_KEY=local-credential aws $* --endpoint-url "${S3_URL}" --no-verify-ssl } s3fs-fuse-1.86/test/write_multiple_offsets.py000077500000000000000000000005671361654130000214270ustar00rootroot00000000000000#!/usr/bin/env python3 import os import sys filename = sys.argv[1] data = bytes('a', 'utf-8') fd = os.open(filename, os.O_CREAT | os.O_TRUNC | os.O_WRONLY) try: os.pwrite(fd, data, 1024) os.pwrite(fd, data, 16 * 1024 * 1024) os.pwrite(fd, data, 18 * 1024 * 1024) finally: os.close(fd) stat = os.lstat(filename) assert stat.st_size == 18 * 1024 * 1024 + 1