LVM2.2.02.176/0000755000000000000120000000000013176752421011324 5ustar rootwheelLVM2.2.02.176/Makefile.in0000644000000000000120000001571513176752421013402 0ustar rootwheel# # Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved. # Copyright (C) 2004-2015 Red Hat, Inc. All rights reserved. # # This file is part of LVM2. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA srcdir = @srcdir@ top_srcdir = @top_srcdir@ top_builddir = @top_builddir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ SUBDIRS = conf daemons include lib libdaemon libdm man scripts tools ifeq ("@UDEV_RULES@", "yes") SUBDIRS += udev endif ifeq ("@INTL@", "yes") SUBDIRS += po endif ifeq ("@APPLIB@", "yes") SUBDIRS += liblvm endif ifeq ("@PYTHON_BINDINGS@", "yes") SUBDIRS += python endif ifeq ($(MAKECMDGOALS),clean) SUBDIRS += test endif # FIXME Should use intermediate Makefiles here! ifeq ($(MAKECMDGOALS),distclean) SUBDIRS = conf include man test scripts \ libdaemon lib tools daemons libdm \ udev po liblvm python \ unit-tests/datastruct unit-tests/mm unit-tests/regex tools.distclean: test.distclean endif DISTCLEAN_DIRS += lcov_reports* DISTCLEAN_TARGETS += config.cache config.log config.status make.tmpl include make.tmpl libdm: include libdaemon: include lib: libdm libdaemon liblvm: lib daemons: lib libdaemon tools tools: lib libdaemon device-mapper po: tools daemons man: tools all_man: tools scripts: liblvm libdm lib.device-mapper: include.device-mapper libdm.device-mapper: include.device-mapper liblvm.device-mapper: include.device-mapper daemons.device-mapper: libdm.device-mapper tools.device-mapper: libdm.device-mapper scripts.device-mapper: include.device-mapper device-mapper: tools.device-mapper daemons.device-mapper man.device-mapper ifeq ("@INTL@", "yes") lib.pofile: include.pofile tools.pofile: lib.pofile daemons.pofile: lib.pofile po.pofile: tools.pofile daemons.pofile pofile: po.pofile endif ifeq ("@PYTHON_BINDINGS@", "yes") python: liblvm endif ifneq ("$(CFLOW_CMD)", "") tools.cflow: libdm.cflow lib.cflow daemons.cflow: tools.cflow cflow: include.cflow endif ifneq ("@CSCOPE_CMD@", "") cscope.out: @CSCOPE_CMD@ -b -R -s$(top_srcdir) all: cscope.out endif DISTCLEAN_TARGETS += cscope.out CLEAN_DIRS += autom4te.cache check check_system check_cluster check_local check_lvmetad check_lvmpolld check_lvmlockd_test check_lvmlockd_dlm check_lvmlockd_sanlock unit: all $(MAKE) -C test $(@) conf.generate man.generate: tools # how to use parenthesis in makefiles leftparen:=( LVM_VER := $(firstword $(subst $(leftparen), ,$(LVM_VERSION))) VER := LVM2.$(LVM_VER) # release file name FILE_VER := $(VER).tgz CLEAN_TARGETS += $(FILE_VER) CLEAN_DIRS += $(rpmbuilddir) dist: @echo "Generating $(FILE_VER)";\ (cd $(top_srcdir); git ls-tree -r HEAD --name-only | xargs tar --transform "s,^,$(VER)/," -c) | gzip >$(FILE_VER) rpm: dist $(RM) -r $(rpmbuilddir)/SOURCES $(MKDIR_P) $(rpmbuilddir)/SOURCES $(LN_S) -f $(abs_top_builddir)/$(FILE_VER) $(rpmbuilddir)/SOURCES $(LN_S) -f $(abs_top_srcdir)/spec/build.inc $(rpmbuilddir)/SOURCES $(LN_S) -f $(abs_top_srcdir)/spec/macros.inc $(rpmbuilddir)/SOURCES $(LN_S) -f $(abs_top_srcdir)/spec/packages.inc $(rpmbuilddir)/SOURCES DM_VER=$$(cut -d- -f1 $(top_srcdir)/VERSION_DM);\ GIT_VER=$$(cd $(top_srcdir); git describe | cut -d- --output-delimiter=. -f2,3 || echo 0);\ sed -e "s,\(device_mapper_version\) [0-9.]*$$,\1 $$DM_VER," \ -e "s,^\(Version:[^0-9%]*\)[0-9.]*$$,\1 $(LVM_VER)," \ -e "s,^\(Release:[^0-9%]*\)[0-9.]\+,\1 $$GIT_VER," \ $(top_srcdir)/spec/source.inc >$(rpmbuilddir)/SOURCES/source.inc rpmbuild -v --define "_topdir $(rpmbuilddir)" -ba $(top_srcdir)/spec/lvm2.spec generate: conf.generate man.generate $(MAKE) -C conf generate $(MAKE) -C man generate all_man: $(MAKE) -C man all_man install_system_dirs: $(INSTALL_DIR) $(DESTDIR)$(DEFAULT_SYS_DIR) $(INSTALL_ROOT_DIR) $(DESTDIR)$(DEFAULT_ARCHIVE_DIR) $(INSTALL_ROOT_DIR) $(DESTDIR)$(DEFAULT_BACKUP_DIR) $(INSTALL_ROOT_DIR) $(DESTDIR)$(DEFAULT_CACHE_DIR) $(INSTALL_ROOT_DIR) $(DESTDIR)$(DEFAULT_LOCK_DIR) $(INSTALL_ROOT_DIR) $(DESTDIR)$(DEFAULT_RUN_DIR) $(INSTALL_ROOT_DATA) /dev/null $(DESTDIR)$(DEFAULT_CACHE_DIR)/.cache install_initscripts: $(MAKE) -C scripts install_initscripts install_systemd_generators: $(MAKE) -C scripts install_systemd_generators $(MAKE) -C man install_systemd_generators install_systemd_units: $(MAKE) -C scripts install_systemd_units install_all_man: $(MAKE) -C man install_all_man ifeq ("@PYTHON_BINDINGS@", "yes") install_python_bindings: $(MAKE) -C liblvm/python install_python_bindings endif install_tmpfiles_configuration: $(MAKE) -C scripts install_tmpfiles_configuration LCOV_TRACES = libdm.info lib.info liblvm.info tools.info \ libdaemon/client.info libdaemon/server.info \ daemons/clvmd.info \ daemons/dmeventd.info \ daemons/lvmetad.info \ daemons/lvmlockd.info \ daemons/lvmpolld.info CLEAN_TARGETS += $(LCOV_TRACES) ifneq ("$(LCOV)", "") .PHONY: lcov-reset lcov lcov-dated $(LCOV_TRACES) ifeq ($(MAKECMDGOALS),lcov-dated) LCOV_REPORTS_DIR := lcov_reports-$(shell date +%Y%m%d%k%M%S) lcov-dated: lcov else LCOV_REPORTS_DIR := lcov_reports endif lcov-reset: $(LCOV) --zerocounters $(addprefix -d , $(basename $(LCOV_TRACES))) # maybe use subdirs processing to create tracefiles... $(LCOV_TRACES): $(LCOV) -b $(basename $@) -d $(basename $@) \ --ignore-errors source -c -o - | $(SED) \ -e "s/\(dmeventd_lvm.[ch]\)/plugins\/lvm2\/\1/" \ -e "s/dmeventd_\(mirror\|snapshot\|thin\|raid\)\.c/plugins\/\1\/dmeventd_\1\.c/" \ >$@ ifneq ("$(GENHTML)", "") lcov: $(LCOV_TRACES) $(RM) -r $(LCOV_REPORTS_DIR) $(MKDIR_P) $(LCOV_REPORTS_DIR) for i in $(LCOV_TRACES); do \ test -s $$i -a $$(wc -w <$$i) -ge 100 && lc="$$lc $$i"; \ done; \ test -z "$$lc" || $(GENHTML) -p @abs_top_builddir@ \ -o $(LCOV_REPORTS_DIR) $$lc endif endif ifeq ("$(TESTING)", "yes") # testing and report generation RUBY=ruby1.9 -Ireport-generators/lib -Ireport-generators/test .PHONY: unit-test ruby-test test-programs # FIXME: put dependencies on libdm and liblvm # FIXME: Should be handled by Makefiles in subdirs, not here at top level. test-programs: cd unit-tests/regex && $(MAKE) cd unit-tests/datastruct && $(MAKE) cd unit-tests/mm && $(MAKE) unit-test: test-programs $(RUBY) report-generators/unit_test.rb $(shell find . -name TESTS) $(RUBY) report-generators/title_page.rb memcheck: test-programs $(RUBY) report-generators/memcheck.rb $(shell find . -name TESTS) $(RUBY) report-generators/title_page.rb ruby-test: $(RUBY) report-generators/test/ts.rb endif ifneq ($(shell which ctags),) .PHONY: tags tags: test -z "$(shell find $(top_srcdir) -type f -name '*.[ch]' -newer tags 2>/dev/null | head -1)" || $(RM) tags test -f tags || find $(top_srcdir) -maxdepth 5 -type f -name '*.[ch]' -exec ctags -a '{}' + CLEAN_TARGETS += tags endif LVM2.2.02.176/po/0000755000000000000120000000000013176752421011742 5ustar rootwheelLVM2.2.02.176/po/Makefile.in0000644000000000000120000000400113176752421014002 0ustar rootwheel# # Copyright (C) 2004 Red Hat, Inc. All rights reserved. # # This file is part of LVM2. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA srcdir = @srcdir@ top_srcdir = @top_srcdir@ top_builddir = @top_builddir@ LANGS=de #TARGETS=$(LANGS:%=lvm2_%.mo) $(LANGS:%=dm_%.mo) DM_POSOURCES = $(top_builddir)/tools/dmsetup.pot $(top_builddir)/libdm/*.pot \ $(top_builddir)/libdm/*/*.pot LVM_POSOURCES = $(top_builddir)/tools/*.pot $(top_builddir)/lib/*/*.pot include $(top_builddir)/make.tmpl lvm2.po: Makefile $(LVM_POSOURCES) @echo Compiling string table @xgettext -C -F --keyword=print_log --keyword=log_debug \ --keyword=log_info --keyword=_ --keyword=N_ \ --keyword=log_notice --keyword=log_warn --keyword=log_err \ --keyword=log_fatal --keyword=log_debug --keyword=log_error \ --keyword=log_print --keyword=log_verbose \ --keyword=log_very_verbose -d - \ $(LVM_POSOURCES) > $@ device-mapper.po: Makefile $(DM_POSOURCES) @echo Compiling string table @xgettext -C -F --keyword=dm_log --keyword=log_debug \ --keyword=log_info --keyword=_ --keyword=N_ \ --keyword=log_notice --keyword=log_warn --keyword=log_err \ --keyword=log_fatal --keyword=log_debug --keyword=log_error \ --keyword=log_print --keyword=log_verbose \ --keyword=log_very_verbose -d - \ $(DM_POSOURCES) > $@ pofile: lvm2.po device-mapper.po # FIXME install: $(TARGETS) @echo Installing translation files in $(localedir) @( \ for lang in $(LANGS); do \ $(INSTALL_DATA) -D $$lang.mo \ $(localedir)/$$lang/LC_MESSAGES/lvm2.mo;\ done; \ ) @( \ for lang in $(LANGS); do \ $(INSTALL_DATA) -D $$lang.mo \ $(localedir)/$$lang/LC_MESSAGES/device-mapper.mo;\ done; \ ) LVM2.2.02.176/po/pogen.h0000644000000000000120000000170413176752421013225 0ustar rootwheel/* * Copyright (C) 2004 Red Hat, Inc. All rights reserved. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License v.2. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /* * Macros to change log messages into a format that xgettext can handle. * * Note that different PRI* definitions lead to different strings for * different architectures. */ #define print_log(level, dm_errno, file, line, format, args...) print_log(format, args) #define dm_log(level, file, line, format, args...) dm_log(format, args) #define dm_log_with_errno(level, dm_errno, file, line, format, args...) \ dm_log(level, file, line, format, args) LVM2.2.02.176/po/de.po0000644000000000000120000000042313176752421012671 0ustar rootwheel# Dummy test file msgid "" msgstr "" "PO-Revision-Date: 2004-02-13 20:35+0000\n" "Last-Translator: Nobody \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=ISO-8859-15\n" "Content-Transfer-Encoding: 8bit\n" LVM2.2.02.176/po/lvm2.po0000644000000000000120000051375413176752421013201 0ustar rootwheel# SOME DESCRIPTIVE TITLE. # Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER # This file is distributed under the same license as the PACKAGE package. # FIRST AUTHOR , YEAR. # #, fuzzy msgid "" msgstr "" "Project-Id-Version: PACKAGE VERSION\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2007-04-27 21:46+0100\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=CHARSET\n" "Content-Transfer-Encoding: 8bit\n" #: activate/activate.c:44 msgid "LVM1 proc global snprintf failed" msgstr "" #: activate/activate.c:63 msgid "module string allocation failed" msgstr "" #: activate/activate.c:74 activate/activate.c:91 activate/activate.c:109 #: activate/activate.c:364 activate/activate.c:417 activate/activate.c:438 #: activate/activate.c:445 activate/activate.c:492 activate/activate.c:495 #: activate/activate.c:514 activate/activate.c:520 activate/activate.c:523 #: activate/activate.c:536 activate/activate.c:548 activate/activate.c:561 #: activate/activate.c:564 activate/activate.c:576 activate/activate.c:579 #: activate/activate.c:591 activate/activate.c:594 activate/activate.c:606 #: activate/activate.c:609 activate/activate.c:764 activate/activate.c:768 #: activate/activate.c:776 activate/activate.c:785 activate/activate.c:791 #: activate/activate.c:836 activate/activate.c:848 activate/activate.c:882 #: activate/activate.c:894 activate/activate.c:953 activate/activate.c:967 #: activate/activate.c:996 activate/dev_manager.c:104 #: activate/dev_manager.c:130 activate/dev_manager.c:139 #: activate/dev_manager.c:142 activate/dev_manager.c:168 #: activate/dev_manager.c:176 activate/dev_manager.c:250 #: activate/dev_manager.c:258 activate/dev_manager.c:261 #: activate/dev_manager.c:339 activate/dev_manager.c:347 #: activate/dev_manager.c:350 activate/dev_manager.c:379 #: activate/dev_manager.c:434 activate/dev_manager.c:439 #: activate/dev_manager.c:452 activate/dev_manager.c:489 #: activate/dev_manager.c:492 activate/dev_manager.c:500 #: activate/dev_manager.c:523 activate/dev_manager.c:535 #: activate/dev_manager.c:611 activate/dev_manager.c:628 #: activate/dev_manager.c:631 activate/dev_manager.c:654 #: activate/dev_manager.c:658 activate/dev_manager.c:661 #: activate/dev_manager.c:664 activate/dev_manager.c:682 #: activate/dev_manager.c:689 activate/dev_manager.c:698 #: activate/dev_manager.c:737 activate/dev_manager.c:757 #: activate/dev_manager.c:760 activate/dev_manager.c:780 #: activate/dev_manager.c:783 activate/dev_manager.c:788 #: activate/dev_manager.c:842 activate/dev_manager.c:851 #: activate/dev_manager.c:854 activate/dev_manager.c:860 #: activate/dev_manager.c:866 activate/dev_manager.c:869 #: activate/dev_manager.c:871 activate/dev_manager.c:877 #: activate/dev_manager.c:891 activate/dev_manager.c:894 #: activate/dev_manager.c:920 activate/dev_manager.c:929 #: activate/dev_manager.c:996 activate/dev_manager.c:1010 #: activate/dev_manager.c:1018 activate/dev_manager.c:1025 #: activate/dev_manager.c:1030 activate/dev_manager.c:1038 #: activate/dev_manager.c:1044 activate/dev_manager.c:1048 #: activate/dev_manager.c:1052 activate/dev_manager.c:1075 #: activate/dev_manager.c:1138 activate/fs.c:179 activate/fs.c:229 #: activate/fs.c:236 activate/fs.c:243 activate/fs.c:246 activate/fs.c:320 #: archiver.c:68 archiver.c:75 archiver.c:87 archiver.c:163 archiver.c:236 #: archiver.c:286 archiver.c:303 archiver.c:345 archiver.c:350 #: cache/lvmcache.c:486 cache/lvmcache.c:490 cache/lvmcache.c:704 #: cache/lvmcache.c:724 cache/lvmcache.c:750 cache/lvmcache.c:810 #: commands/toolcontext.c:276 commands/toolcontext.c:295 #: commands/toolcontext.c:302 commands/toolcontext.c:379 #: commands/toolcontext.c:394 commands/toolcontext.c:418 #: commands/toolcontext.c:469 commands/toolcontext.c:685 #: commands/toolcontext.c:781 config/config.c:148 config/config.c:161 #: config/config.c:176 config/config.c:194 config/config.c:215 #: config/config.c:235 config/config.c:282 config/config.c:285 #: config/config.c:467 config/config.c:485 config/config.c:490 #: config/config.c:500 config/config.c:514 config/config.c:530 #: config/config.c:586 config/config.c:777 datastruct/btree.c:90 #: datastruct/str_list.c:24 datastruct/str_list.c:38 datastruct/str_list.c:47 #: datastruct/str_list.c:77 device/dev-cache.c:240 device/dev-cache.c:253 #: device/dev-cache.c:298 device/dev-cache.c:302 device/dev-cache.c:373 #: device/dev-cache.c:404 device/dev-cache.c:443 device/dev-cache.c:511 #: device/dev-cache.c:547 device/dev-cache.c:552 device/dev-cache.c:567 #: device/dev-io.c:174 device/dev-io.c:204 device/dev-io.c:358 #: device/dev-io.c:556 device/dev-io.c:606 device/dev-io.c:624 #: device/dev-io.c:643 device/dev-io.c:671 device/dev-md.c:41 #: device/dev-md.c:49 device/dev-md.c:66 device/device.c:61 device/device.c:66 #: device/device.c:90 display/display.c:243 display/display.c:274 #: display/display.c:333 display/display.c:379 display/display.c:605 #: display/display.c:641 error/errseg.c:101 filters/filter-composite.c:54 #: filters/filter-persistent.c:46 filters/filter-persistent.c:110 #: filters/filter-persistent.c:114 filters/filter-persistent.c:117 #: filters/filter-persistent.c:197 filters/filter-persistent.c:299 #: filters/filter-persistent.c:305 filters/filter-persistent.c:316 #: filters/filter-regex.c:74 filters/filter-regex.c:101 #: filters/filter-regex.c:119 filters/filter-regex.c:142 #: filters/filter-regex.c:196 filters/filter-regex.c:201 #: filters/filter-regex.c:206 filters/filter-regex.c:209 #: filters/filter-sysfs.c:288 filters/filter.c:278 format1/disk-rep.c:221 #: format1/disk-rep.c:233 format1/disk-rep.c:238 format1/disk-rep.c:257 #: format1/disk-rep.c:260 format1/disk-rep.c:291 format1/disk-rep.c:294 #: format1/disk-rep.c:313 format1/disk-rep.c:316 format1/disk-rep.c:334 #: format1/disk-rep.c:351 format1/disk-rep.c:361 format1/disk-rep.c:421 #: format1/disk-rep.c:428 format1/disk-rep.c:522 format1/disk-rep.c:547 #: format1/disk-rep.c:563 format1/disk-rep.c:591 format1/disk-rep.c:609 #: format1/disk-rep.c:646 format1/disk-rep.c:711 format1/disk-rep.c:718 #: format1/disk-rep.c:734 format1/format1.c:134 format1/format1.c:137 #: format1/format1.c:149 format1/format1.c:154 format1/format1.c:157 #: format1/format1.c:160 format1/format1.c:163 format1/format1.c:166 #: format1/format1.c:171 format1/format1.c:186 format1/format1.c:195 #: format1/format1.c:198 format1/format1.c:213 format1/format1.c:227 #: format1/format1.c:245 format1/format1.c:256 format1/format1.c:271 #: format1/format1.c:297 format1/format1.c:302 format1/format1.c:307 #: format1/format1.c:312 format1/format1.c:348 format1/format1.c:394 #: format1/format1.c:410 format1/format1.c:415 format1/format1.c:421 #: format1/format1.c:431 format1/format1.c:477 format1/format1.c:498 #: format1/format1.c:507 format1/format1.c:551 format1/import-export.c:63 #: format1/import-export.c:118 format1/import-export.c:151 #: format1/import-export.c:168 format1/import-export.c:185 #: format1/import-export.c:193 format1/import-export.c:228 #: format1/import-export.c:233 format1/import-export.c:238 #: format1/import-export.c:316 format1/import-export.c:448 #: format1/import-export.c:453 format1/import-export.c:474 #: format1/import-export.c:481 format1/import-export.c:503 #: format1/import-export.c:524 format1/import-export.c:529 #: format1/import-export.c:538 format1/import-export.c:548 #: format1/import-export.c:558 format1/import-export.c:563 #: format1/import-export.c:666 format1/import-export.c:714 #: format1/import-extents.c:63 format1/import-extents.c:68 #: format1/import-extents.c:71 format1/import-extents.c:122 #: format1/import-extents.c:193 format1/import-extents.c:220 #: format1/import-extents.c:235 format1/import-extents.c:284 #: format1/import-extents.c:314 format1/import-extents.c:338 #: format1/import-extents.c:354 format1/import-extents.c:369 #: format1/layout.c:126 format1/lvm1-label.c:75 format1/vg_number.c:37 #: format1/vg_number.c:42 format_pool/disk_rep.c:49 format_pool/disk_rep.c:102 #: format_pool/disk_rep.c:256 format_pool/disk_rep.c:358 #: format_pool/disk_rep.c:368 format_pool/disk_rep.c:373 #: format_pool/format_pool.c:132 format_pool/format_pool.c:137 #: format_pool/format_pool.c:142 format_pool/format_pool.c:152 #: format_pool/format_pool.c:161 format_pool/format_pool.c:166 #: format_pool/format_pool.c:186 format_pool/format_pool.c:195 #: format_pool/format_pool.c:201 format_pool/format_pool.c:231 #: format_pool/format_pool.c:236 format_pool/format_pool.c:246 #: format_pool/format_pool.c:251 format_pool/import_export.c:93 #: format_pool/import_export.c:180 format_pool/import_export.c:218 #: format_pool/import_export.c:232 format_pool/import_export.c:256 #: format_pool/import_export.c:276 format_pool/import_export.c:304 #: format_pool/import_export.c:309 format_text/archive.c:117 #: format_text/archive.c:138 format_text/archive.c:165 #: format_text/archive.c:258 format_text/archive.c:274 #: format_text/archive.c:350 format_text/archive.c:370 #: format_text/archiver.c:82 format_text/archiver.c:89 #: format_text/archiver.c:101 format_text/archiver.c:189 #: format_text/archiver.c:267 format_text/archiver.c:317 #: format_text/archiver.c:334 format_text/archiver.c:376 #: format_text/archiver.c:381 format_text/export.c:138 #: format_text/export.c:198 format_text/export.c:206 format_text/export.c:293 #: format_text/export.c:294 format_text/export.c:295 format_text/export.c:296 #: format_text/export.c:298 format_text/export.c:299 format_text/export.c:300 #: format_text/export.c:303 format_text/export.c:313 format_text/export.c:317 #: format_text/export.c:319 format_text/export.c:322 format_text/export.c:325 #: format_text/export.c:329 format_text/export.c:332 format_text/export.c:336 #: format_text/export.c:340 format_text/export.c:343 format_text/export.c:344 #: format_text/export.c:348 format_text/export.c:349 format_text/export.c:373 #: format_text/export.c:380 format_text/export.c:384 format_text/export.c:385 #: format_text/export.c:389 format_text/export.c:393 format_text/export.c:395 #: format_text/export.c:398 format_text/export.c:401 format_text/export.c:404 #: format_text/export.c:408 format_text/export.c:411 format_text/export.c:415 #: format_text/export.c:419 format_text/export.c:422 format_text/export.c:427 #: format_text/export.c:431 format_text/export.c:440 format_text/export.c:443 #: format_text/export.c:446 format_text/export.c:450 format_text/export.c:451 #: format_text/export.c:455 format_text/export.c:458 format_text/export.c:463 #: format_text/export.c:468 format_text/export.c:479 format_text/export.c:481 #: format_text/export.c:488 format_text/export.c:492 format_text/export.c:497 #: format_text/export.c:508 format_text/export.c:518 format_text/export.c:519 #: format_text/export.c:524 format_text/export.c:528 format_text/export.c:531 #: format_text/export.c:534 format_text/export.c:538 format_text/export.c:541 #: format_text/export.c:545 format_text/export.c:549 format_text/export.c:551 #: format_text/export.c:553 format_text/export.c:554 format_text/export.c:555 #: format_text/export.c:560 format_text/export.c:566 format_text/export.c:581 #: format_text/export.c:591 format_text/export.c:600 format_text/export.c:606 #: format_text/export.c:624 format_text/export.c:627 format_text/export.c:634 #: format_text/export.c:637 format_text/export.c:640 format_text/export.c:652 #: format_text/export.c:657 format_text/export.c:660 format_text/export.c:665 #: format_text/export.c:667 format_text/export.c:669 format_text/export.c:671 #: format_text/export.c:673 format_text/export.c:677 format_text/export.c:680 #: format_text/export.c:702 format_text/export.c:729 format_text/export.c:747 #: format_text/flags.c:94 format_text/flags.c:138 #: format_text/format-text.c:158 format_text/format-text.c:161 #: format_text/format-text.c:195 format_text/format-text.c:199 #: format_text/format-text.c:238 format_text/format-text.c:295 #: format_text/format-text.c:346 format_text/format-text.c:378 #: format_text/format-text.c:420 format_text/format-text.c:425 #: format_text/format-text.c:433 format_text/format-text.c:451 #: format_text/format-text.c:456 format_text/format-text.c:481 #: format_text/format-text.c:494 format_text/format-text.c:542 #: format_text/format-text.c:547 format_text/format-text.c:587 #: format_text/format-text.c:601 format_text/format-text.c:619 #: format_text/format-text.c:650 format_text/format-text.c:700 #: format_text/format-text.c:757 format_text/format-text.c:762 #: format_text/format-text.c:785 format_text/format-text.c:799 #: format_text/format-text.c:1059 format_text/format-text.c:1064 #: format_text/format-text.c:1072 format_text/format-text.c:1082 #: format_text/format-text.c:1103 format_text/format-text.c:1107 #: format_text/format-text.c:1113 format_text/format-text.c:1125 #: format_text/format-text.c:1309 format_text/format-text.c:1365 #: format_text/format-text.c:1370 format_text/format-text.c:1380 #: format_text/format-text.c:1382 format_text/format-text.c:1390 #: format_text/format-text.c:1430 format_text/format-text.c:1436 #: format_text/format-text.c:1621 format_text/format-text.c:1627 #: format_text/format-text.c:1666 format_text/format-text.c:1711 #: format_text/format-text.c:1730 format_text/format-text.c:1746 #: format_text/format-text.c:1751 format_text/format-text.c:1765 #: format_text/format-text.c:1777 format_text/format-text.c:1783 #: format_text/format-text.c:1813 format_text/format-text.c:1818 #: format_text/format-text.c:1823 format_text/format-text.c:1832 #: format_text/format-text.c:1935 format_text/import.c:47 #: format_text/import.c:52 format_text/import.c:63 format_text/import.c:98 #: format_text/import.c:115 format_text/import_vsn1.c:123 #: format_text/import_vsn1.c:134 format_text/import_vsn1.c:167 #: format_text/import_vsn1.c:237 format_text/import_vsn1.c:303 #: format_text/import_vsn1.c:309 format_text/import_vsn1.c:322 #: format_text/import_vsn1.c:387 format_text/import_vsn1.c:429 #: format_text/import_vsn1.c:457 format_text/import_vsn1.c:465 #: format_text/import_vsn1.c:482 format_text/import_vsn1.c:489 #: format_text/import_vsn1.c:518 format_text/import_vsn1.c:576 #: format_text/import_vsn1.c:629 format_text/import_vsn1.c:654 #: format_text/import_vsn1.c:664 format_text/import_vsn1.c:667 #: format_text/import_vsn1.c:735 format_text/import_vsn1.c:846 #: format_text/tags.c:28 format_text/tags.c:35 format_text/tags.c:42 #: format_text/tags.c:48 format_text/tags.c:67 format_text/text_label.c:210 #: format_text/text_label.c:246 label/label.c:90 label/label.c:207 #: label/label.c:258 label/label.c:274 label/label.c:284 label/label.c:291 #: label/label.c:321 label/label.c:329 label/label.c:341 label/label.c:360 #: label/label.c:364 label/label.c:370 locking/cluster_locking.c:85 #: locking/cluster_locking.c:420 locking/cluster_locking.c:432 #: locking/cluster_locking.c:436 locking/external_locking.c:77 lvchange.c:57 #: lvchange.c:99 lvchange.c:116 lvchange.c:122 lvchange.c:136 lvchange.c:143 #: lvchange.c:150 lvchange.c:268 lvchange.c:282 lvchange.c:353 lvchange.c:361 #: lvchange.c:395 lvchange.c:472 lvchange.c:479 lvchange.c:526 lvchange.c:534 #: lvconvert.c:96 lvconvert.c:147 lvconvert.c:211 lvconvert.c:222 #: lvconvert.c:273 lvconvert.c:285 lvconvert.c:298 lvconvert.c:332 #: lvconvert.c:354 lvconvert.c:369 lvconvert.c:378 lvconvert.c:397 #: lvconvert.c:404 lvconvert.c:470 lvconvert.c:481 lvconvert.c:544 #: lvconvert.c:585 lvcreate.c:133 lvcreate.c:349 lvcreate.c:373 lvcreate.c:399 #: lvcreate.c:529 lvcreate.c:661 lvcreate.c:698 lvcreate.c:728 lvcreate.c:755 #: lvcreate.c:763 lvcreate.c:769 lvcreate.c:776 lvcreate.c:868 #: lvmcmdline.c:830 lvmcmdline.c:836 lvmcmdline.c:839 lvmcmdline.c:842 #: lvmcmdline.c:846 lvmcmdline.c:853 lvmcmdline.c:885 lvmcmdline.c:896 #: lvmcmdline.c:906 lvmcmdline.c:936 lvmcmdline.c:1022 lvremove.c:99 #: lvrename.c:85 lvrename.c:164 lvrename.c:175 lvrename.c:182 lvrename.c:188 #: lvresize.c:466 lvresize.c:522 lvresize.c:529 lvresize.c:536 lvresize.c:547 #: lvresize.c:554 lvresize.c:560 lvresize.c:579 lvresize.c:593 lvresize.c:618 #: metadata/lv_manip.c:85 metadata/lv_manip.c:91 metadata/lv_manip.c:192 #: metadata/lv_manip.c:227 metadata/lv_manip.c:258 metadata/lv_manip.c:316 #: metadata/lv_manip.c:325 metadata/lv_manip.c:340 metadata/lv_manip.c:349 #: metadata/lv_manip.c:379 metadata/lv_manip.c:580 metadata/lv_manip.c:588 #: metadata/lv_manip.c:623 metadata/lv_manip.c:735 metadata/lv_manip.c:738 #: metadata/lv_manip.c:748 metadata/lv_manip.c:846 metadata/lv_manip.c:874 #: metadata/lv_manip.c:1048 metadata/lv_manip.c:1095 metadata/lv_manip.c:1100 #: metadata/lv_manip.c:1130 metadata/lv_manip.c:1221 metadata/lv_manip.c:1228 #: metadata/lv_manip.c:1265 metadata/lv_manip.c:1277 metadata/lv_manip.c:1306 #: metadata/lv_manip.c:1316 metadata/lv_manip.c:1364 metadata/lv_manip.c:1429 #: metadata/lv_manip.c:1436 metadata/lv_manip.c:1548 metadata/lv_manip.c:1619 #: metadata/merge.c:253 metadata/merge.c:292 metadata/merge.c:297 #: metadata/metadata.c:119 metadata/metadata.c:154 metadata/metadata.c:182 #: metadata/metadata.c:252 metadata/metadata.c:276 metadata/metadata.c:284 #: metadata/metadata.c:322 metadata/metadata.c:372 metadata/metadata.c:378 #: metadata/metadata.c:384 metadata/metadata.c:395 metadata/metadata.c:401 #: metadata/metadata.c:413 metadata/metadata.c:419 metadata/metadata.c:431 #: metadata/metadata.c:439 metadata/metadata.c:446 metadata/metadata.c:453 #: metadata/metadata.c:460 metadata/metadata.c:473 metadata/metadata.c:481 #: metadata/metadata.c:490 metadata/metadata.c:549 metadata/metadata.c:564 #: metadata/metadata.c:754 metadata/metadata.c:779 metadata/metadata.c:815 #: metadata/metadata.c:846 metadata/metadata.c:874 metadata/metadata.c:880 #: metadata/metadata.c:887 metadata/metadata.c:898 metadata/metadata.c:903 #: metadata/metadata.c:925 metadata/metadata.c:947 metadata/metadata.c:964 #: metadata/metadata.c:1063 metadata/metadata.c:1068 metadata/metadata.c:1079 #: metadata/metadata.c:1137 metadata/metadata.c:1142 metadata/metadata.c:1168 #: metadata/metadata.c:1183 metadata/metadata.c:1191 metadata/metadata.c:1246 #: metadata/metadata.c:1250 metadata/metadata.c:1399 metadata/metadata.c:1433 #: metadata/metadata.c:1490 metadata/metadata.c:1494 metadata/metadata.c:1527 #: metadata/mirror.c:106 metadata/mirror.c:109 metadata/mirror.c:112 #: metadata/mirror.c:205 metadata/mirror.c:484 metadata/mirror.c:526 #: metadata/mirror.c:560 metadata/mirror.c:599 metadata/mirror.c:608 #: metadata/mirror.c:736 metadata/mirror.c:757 metadata/mirror.c:762 #: metadata/mirror.c:836 metadata/pv_manip.c:54 metadata/pv_manip.c:73 #: metadata/pv_manip.c:94 metadata/pv_manip.c:131 metadata/pv_manip.c:156 #: metadata/pv_manip.c:197 metadata/pv_manip.c:332 metadata/pv_map.c:44 #: metadata/pv_map.c:92 metadata/pv_map.c:112 metadata/pv_map.c:122 #: metadata/pv_map.c:149 metadata/pv_map.c:159 metadata/snapshot_manip.c:70 #: metadata/snapshot_manip.c:77 mirror/mirrored.c:144 mirror/mirrored.c:149 #: mirror/mirrored.c:151 mirror/mirrored.c:304 mirror/mirrored.c:328 #: mirror/mirrored.c:331 mirror/mirrored.c:501 mirror/mirrored.c:552 #: misc/lvm-file.c:291 misc/timestamp.c:44 pvchange.c:191 pvmove.c:102 #: pvmove.c:107 pvmove.c:192 pvmove.c:220 pvmove.c:227 pvmove.c:292 #: pvmove.c:299 pvmove.c:308 pvmove.c:337 pvmove.c:349 pvmove.c:356 #: pvmove.c:363 pvmove.c:371 pvmove.c:383 pvmove.c:524 pvresize.c:165 #: pvscan.c:55 report/report.c:187 report/report.c:513 report/report.c:543 #: report/report.c:699 reporter.c:289 snapshot/snapshot.c:74 #: snapshot/snapshot.c:83 snapshot/snapshot.c:84 snapshot/snapshot.c:85 #: snapshot/snapshot.c:169 striped/striped.c:89 striped/striped.c:169 #: striped/striped.c:172 striped/striped.c:216 toollib.c:912 toollib.c:962 #: toollib.c:1020 toollib.c:1060 toollib.c:1085 toollib.c:1194 toollib.c:1332 #: toollib.c:1337 toollib.c:1350 toollib.c:1357 uuid/uuid.c:90 uuid/uuid.c:94 #: vgcfgbackup.c:69 vgcfgbackup.c:78 vgcfgbackup.c:85 vgchange.c:420 #: vgmerge.c:193 vgreduce.c:29 vgreduce.c:96 vgreduce.c:102 vgreduce.c:124 #: vgreduce.c:130 vgreduce.c:148 vgreduce.c:196 vgreduce.c:217 vgreduce.c:241 #: vgreduce.c:307 vgreduce.c:353 zero/zero.c:99 msgid "" msgstr "" #: activate/activate.c:81 msgid "snap_seg module string allocation failed" msgstr "" #: activate/activate.c:245 msgid "Activation enabled. Device-mapper kernel driver will be used." msgstr "" #: activate/activate.c:248 msgid "" "WARNING: Activation disabled. No device-mapper interaction will be attempted." msgstr "" #: activate/activate.c:281 msgid "Ignoring invalid string in config file activation/volume_list" msgstr "" #: activate/activate.c:287 msgid "Ignoring empty string in config file activation/volume_list" msgstr "" #: activate/activate.c:296 msgid "Ignoring empty tag in config file activation/volume_list" msgstr "" #: activate/activate.c:326 #, c-format msgid "dm_snprintf error from %s/%s" msgstr "" #: activate/activate.c:350 msgid "Getting driver version" msgstr "" #: activate/activate.c:362 #, c-format msgid "Getting target version for %s" msgstr "" #: activate/activate.c:367 #, c-format msgid "Failed to get %s target version" msgstr "" #: activate/activate.c:411 #, c-format msgid "target_present module name too long: %s" msgstr "" #: activate/activate.c:440 #, c-format msgid "Getting device info for %s" msgstr "" #: activate/activate.c:771 #, c-format msgid "Skipping: Suspending '%s'." msgstr "" #: activate/activate.c:831 #, c-format msgid "Skipping: Resuming '%s'." msgstr "" #: activate/activate.c:877 #, c-format msgid "Skipping: Deactivating '%s'." msgstr "" #: activate/activate.c:888 #, c-format msgid "LV %s/%s in use: not deactivating" msgstr "" #: activate/activate.c:917 activate/activate.c:942 #, c-format msgid "Not activating %s/%s due to config file settings" msgstr "" #: activate/activate.c:948 #, c-format msgid "Skipping: Activating '%s'." msgstr "" #: activate/dev_manager.c:75 #, c-format msgid "_build_dlid: pool allocation failed for %zu %s %s." msgstr "" #: activate/dev_manager.c:136 activate/dev_manager.c:255 #: activate/dev_manager.c:344 msgid "Failed to disable open_count" msgstr "" #: activate/dev_manager.c:163 msgid "Failed to allocate dm_task struct to check dev status" msgstr "" #: activate/dev_manager.c:171 msgid "Failed to get state of mapped device" msgstr "" #: activate/dev_manager.c:229 activate/dev_manager.c:528 #, c-format msgid "dlid build failed for %s" msgstr "" #: activate/dev_manager.c:360 activate/dev_manager.c:384 #, c-format msgid "Number of segments in active LV %s does not match metadata" msgstr "" #: activate/dev_manager.c:394 #, c-format msgid "LV percent: %f" msgstr "" #: activate/dev_manager.c:497 #, c-format msgid "Getting device status percentage for %s" msgstr "" #: activate/dev_manager.c:532 #, c-format msgid "Getting device mirror status percentage for %s" msgstr "" #: activate/dev_manager.c:633 #, c-format msgid "Getting device info for %s [%s]" msgstr "" #: activate/dev_manager.c:635 #, c-format msgid "Failed to get info for %s [%s]." msgstr "" #: activate/dev_manager.c:640 #, c-format msgid "Failed to add device (%u:%u) to dtree" msgstr "" #: activate/dev_manager.c:677 #, c-format msgid "Partial dtree creation failed for %s." msgstr "" #: activate/dev_manager.c:741 #, c-format msgid "Internal error: Unassigned area found in LV %s." msgstr "" #: activate/dev_manager.c:775 #, c-format msgid "Couldn't find snapshot for '%s'." msgstr "" #: activate/dev_manager.c:800 #, c-format msgid "_emit_target: Internal error: Can't handle segment type %s" msgstr "" #: activate/dev_manager.c:828 #, c-format msgid "Checking kernel supports %s segment type for %s%s%s" msgstr "" #: activate/dev_manager.c:834 #, c-format msgid "Can't expand LV %s: %s target support missing from kernel?" msgstr "" #: activate/dev_manager.c:847 msgid "Clustered snapshots are not yet supported" msgstr "" #: activate/dev_manager.c:902 #, c-format msgid "_add_new_lv_to_dtree: pool alloc failed for %s %s." msgstr "" #: activate/dev_manager.c:961 #, c-format msgid "_create_lv_symlinks: Couldn't split up old device name %s" msgstr "" #: activate/dev_manager.c:987 #, c-format msgid "_clean_tree: Couldn't split up device name %s." msgstr "" #: activate/dev_manager.c:1013 activate/dev_manager.c:1133 msgid "Lost dependency tree root node" msgstr "" #: activate/dev_manager.c:1055 #, c-format msgid "Failed to create symlinks for %s." msgstr "" #: activate/dev_manager.c:1060 #, c-format msgid "_tree_action: Action %u not supported." msgstr "" #: activate/dev_manager.c:1119 msgid "partial dtree creation failed" msgstr "" #: activate/dev_manager.c:1124 #, c-format msgid "Failed to add device %s (%u:%u) to dtree" msgstr "" #: activate/fs.c:35 activate/fs.c:58 msgid "Couldn't construct name of volume group directory." msgstr "" #: activate/fs.c:43 #, c-format msgid "Creating directory %s" msgstr "" #: activate/fs.c:45 activate/fs.c:80 activate/fs.c:100 activate/fs.c:153 #: activate/fs.c:166 activate/fs.c:173 activate/fs.c:208 #: commands/toolcontext.c:342 commands/toolcontext.c:820 config/config.c:209 #: config/config.c:247 config/config.c:262 config/config.c:328 #: config/config.c:428 config/config.c:452 device/dev-cache.c:208 #: device/dev-cache.c:212 device/dev-cache.c:394 device/dev-cache.c:417 #: device/dev-cache.c:424 device/dev-cache.c:681 device/dev-cache.c:683 #: device/dev-io.c:131 device/dev-io.c:231 device/dev-io.c:249 #: device/dev-io.c:254 device/dev-io.c:256 device/dev-io.c:262 #: device/dev-io.c:396 device/dev-io.c:398 device/dev-io.c:481 #: filters/filter-persistent.c:203 filters/filter-persistent.c:207 #: filters/filter-persistent.c:230 filters/filter-persistent.c:243 #: filters/filter-sysfs.c:42 filters/filter-sysfs.c:58 #: filters/filter-sysfs.c:156 filters/filter-sysfs.c:163 #: filters/filter-sysfs.c:182 filters/filter-sysfs.c:225 filters/filter.c:164 #: filters/filter.c:221 filters/filter.c:232 filters/filter.c:240 #: filters/filter.c:253 format_text/archive.c:214 format_text/archive.c:223 #: format_text/archive.c:253 format_text/archive.c:260 #: format_text/archive.c:265 format_text/format-text.c:873 #: format_text/format-text.c:875 format_text/format-text.c:884 #: format_text/format-text.c:889 format_text/format-text.c:891 #: format_text/format-text.c:896 format_text/format-text.c:921 #: format_text/format-text.c:983 format_text/format-text.c:988 #: format_text/format-text.c:1013 format_text/format-text.c:1040 #: locking/file_locking.c:61 locking/file_locking.c:69 #: locking/file_locking.c:72 locking/file_locking.c:105 #: locking/file_locking.c:167 locking/file_locking.c:171 #: locking/file_locking.c:187 locking/file_locking.c:296 #: locking/file_locking.c:301 locking/locking.c:45 locking/locking.c:50 #: locking/locking.c:66 locking/locking.c:221 log/log.c:69 lvmcmdline.c:1092 #: lvmcmdline.c:1130 misc/lvm-exec.c:42 misc/lvm-file.c:47 misc/lvm-file.c:70 #: misc/lvm-file.c:97 misc/lvm-file.c:107 misc/lvm-file.c:157 #: misc/lvm-file.c:170 misc/lvm-file.c:199 misc/lvm-file.c:208 #: misc/lvm-file.c:236 misc/lvm-file.c:241 misc/lvm-file.c:244 #: misc/lvm-file.c:289 misc/lvm-file.c:297 misc/timestamp.c:47 mm/memlock.c:97 #: mm/memlock.c:105 mm/memlock.c:116 uuid/uuid.c:83 uuid/uuid.c:88 #, c-format msgid "%s: %s failed: %s" msgstr "" #: activate/fs.c:64 #, c-format msgid "Removing directory %s" msgstr "" #: activate/fs.c:91 #, c-format msgid "Couldn't create path for %s" msgstr "" #: activate/fs.c:98 activate/fs.c:151 activate/fs.c:164 #, c-format msgid "Removing %s" msgstr "" #: activate/fs.c:114 #, c-format msgid "Couldn't create path for volume group dir %s" msgstr "" #: activate/fs.c:121 #, c-format msgid "Couldn't create source pathname for logical volume link %s" msgstr "" #: activate/fs.c:128 #, c-format msgid "Couldn't create destination pathname for logical volume link for %s" msgstr "" #: activate/fs.c:135 #, c-format msgid "Couldn't create pathname for LVM1 group file for %s" msgstr "" #: activate/fs.c:146 #, c-format msgid "Non-LVM1 character device found at %s" msgstr "" #: activate/fs.c:159 #, c-format msgid "Symbolic link %s not created: file exists" msgstr "" #: activate/fs.c:171 #, c-format msgid "Linking %s -> %s" msgstr "" #: activate/fs.c:195 msgid "Couldn't determine link pathname." msgstr "" #: activate/fs.c:202 #, c-format msgid "%s not symbolic link - not removing" msgstr "" #: activate/fs.c:206 #, c-format msgid "Removing link %s" msgstr "" #: activate/fs.c:282 msgid "No space to stack fs operation" msgstr "" #: archiver.c:40 format_text/archiver.c:53 msgid "Couldn't copy archive directory name." msgstr "" #: archiver.c:102 format_text/archiver.c:116 msgid "Test mode: Skipping archiving of volume group." msgstr "" #: archiver.c:109 #, c-format msgid "Archiving volume group \"%s\" metadata." msgstr "" #: archiver.c:111 format_text/archiver.c:131 #, c-format msgid "Volume group \"%s\" metadata archive failed." msgstr "" #: archiver.c:138 format_text/archiver.c:164 msgid "Couldn't copy backup directory name." msgstr "" #: archiver.c:169 format_text/archiver.c:195 msgid "Failed to generate volume group metadata backup filename." msgstr "" #: archiver.c:180 format_text/archiver.c:206 msgid "WARNING: This metadata update is NOT backed up" msgstr "" #: archiver.c:185 format_text/archiver.c:211 msgid "Test mode: Skipping volume group backup." msgstr "" #: archiver.c:193 format_text/archiver.c:224 #, c-format msgid "Backup of volume group %s metadata failed." msgstr "" #: archiver.c:207 format_text/archiver.c:238 msgid "Failed to generate backup filename (for removal)." msgstr "" #: archiver.c:230 format_text/archiver.c:261 msgid "Couldn't create text format object." msgstr "" #: archiver.c:259 format_text/archiver.c:290 msgid "Failed to allocate format instance" msgstr "" #: archiver.c:267 format_text/archiver.c:298 #, c-format msgid "PV %s missing from cache" msgstr "" #: archiver.c:272 #, c-format msgid "PV %s is a different format (%s)" msgstr "" #: archiver.c:279 format_text/archiver.c:310 #, c-format msgid "Format-specific setup for %s failed" msgstr "" #: archiver.c:316 format_text/archiver.c:347 msgid "Failed to generate backup filename (for restore)." msgstr "" #: archiver.c:333 #, c-format msgid "Creating volume group backup \"%s\"" msgstr "" #: archiver.c:338 format_text/archiver.c:369 msgid "Couldn't create backup object." msgstr "" #: cache/lvmcache.c:56 cache/lvmcache.c:235 cache/lvmcache.c:740 msgid "Internal cache initialisation failed" msgstr "" #: cache/lvmcache.c:61 #, c-format msgid "Cache locking failure for %s" msgstr "" #: cache/lvmcache.c:127 msgid "device_list element allocation failed" msgstr "" #: cache/lvmcache.c:245 toollib.c:638 msgid "dev_iter creation failed" msgstr "" #: cache/lvmcache.c:278 msgid "vgids list allocation failed" msgstr "" #: cache/lvmcache.c:285 cache/lvmcache.c:308 cache/lvmcache.c:334 #: toollib.c:271 toollib.c:306 toollib.c:314 toollib.c:326 toollib.c:405 #: toollib.c:547 toollib.c:561 toollib.c:698 msgid "strlist allocation failed" msgstr "" #: cache/lvmcache.c:301 msgid "vgnames list allocation failed" msgstr "" #: cache/lvmcache.c:324 msgid "pvids list allocation failed" msgstr "" #: cache/lvmcache.c:395 #, c-format msgid "vg hash re-insertion failed: %s" msgstr "" #: cache/lvmcache.c:440 #, c-format msgid "_lvmcache_update: pvid insertion failed: %s" msgstr "" #: cache/lvmcache.c:456 #, c-format msgid "lvmcache: %s: clearing VGID" msgstr "" #: cache/lvmcache.c:463 #, c-format msgid "_lvmcache_update: vgid hash insertion failed: %s" msgstr "" #: cache/lvmcache.c:468 #, c-format msgid "lvmcache: %s: setting %s VGID to %s" msgstr "" #: cache/lvmcache.c:502 #, c-format msgid "" "WARNING: Duplicate VG name %s: Existing %s takes precedence over exported %s" msgstr "" #: cache/lvmcache.c:508 #, c-format msgid "WARNING: Duplicate VG name %s: %s takes precedence over exported %s" msgstr "" #: cache/lvmcache.c:516 #, c-format msgid "" "WARNING: Duplicate VG name %s: Existing %s (created here) takes precedence " "over %s" msgstr "" #: cache/lvmcache.c:521 #, c-format msgid "" "WARNING: Duplicate VG name %s: %s (with creation_host) takes precedence over " "%s" msgstr "" #: cache/lvmcache.c:529 #, c-format msgid "" "WARNING: Duplicate VG name %s: %s (created here) takes precedence over %s" msgstr "" #: cache/lvmcache.c:547 #, c-format msgid "cache_update: vg hash insertion failed: %s" msgstr "" #: cache/lvmcache.c:619 msgid "lvmcache_update_vgname: list alloc failed" msgstr "" #: cache/lvmcache.c:625 #, c-format msgid "cache vgname alloc failed for %s" msgstr "" #: cache/lvmcache.c:652 #, c-format msgid "lvmcache: %s: now %s%s%s%s%s" msgstr "" #: cache/lvmcache.c:668 #, c-format msgid "lvmcache: %s: VG %s %s exported" msgstr "" #: cache/lvmcache.c:685 #, c-format msgid "cache creation host alloc failed for %s" msgstr "" #: cache/lvmcache.c:690 #, c-format msgid "lvmcache: %s: VG %s: Set creation host to %s." msgstr "" #: cache/lvmcache.c:754 msgid "lvmcache_info allocation failed" msgstr "" #: cache/lvmcache.c:769 #, c-format msgid "Ignoring duplicate PV %s on %s - using md %s" msgstr "" #: cache/lvmcache.c:776 #, c-format msgid "Ignoring duplicate PV %s on %s - using dm %s" msgstr "" #: cache/lvmcache.c:783 #, c-format msgid "Duplicate PV %s on %s - using md %s" msgstr "" #: cache/lvmcache.c:789 #, c-format msgid "Duplicate PV %s on %s - using dm %s" msgstr "" #: cache/lvmcache.c:798 #, c-format msgid "Found duplicate PV %s: using %s not %s" msgstr "" #: cache/lvmcache.c:872 msgid "Wiping internal VG cache" msgstr "" #: commands/toolcontext.c:70 msgid "LVM_SYSTEM_DIR environment variable is too long." msgstr "" #: commands/toolcontext.c:146 #, c-format msgid "Logging initialised at %s" msgstr "" #: commands/toolcontext.c:165 #, c-format msgid "Set umask to %04o" msgstr "" #: commands/toolcontext.c:171 commands/toolcontext.c:182 msgid "Device directory given in config file too long" msgstr "" #: commands/toolcontext.c:187 #, c-format msgid "Warning: proc dir %s not found - some checks will be bypassed" msgstr "" #: commands/toolcontext.c:207 lvmcmdline.c:723 msgid "Invalid units specification" msgstr "" #: commands/toolcontext.c:216 #, c-format msgid "Setting host tag: %s" msgstr "" #: commands/toolcontext.c:219 #, c-format msgid "_set_tag: str_list_add %s failed" msgstr "" #: commands/toolcontext.c:243 #, c-format msgid "Invalid hostname string for tag %s" msgstr "" #: commands/toolcontext.c:254 msgid "host_filter not supported yet" msgstr "" #: commands/toolcontext.c:289 #, c-format msgid "Invalid tag in config file: %s" msgstr "" #: commands/toolcontext.c:322 msgid "LVM_SYSTEM_DIR or tag was too long" msgstr "" #: commands/toolcontext.c:327 msgid "config_tree_list allocation failed" msgstr "" #: commands/toolcontext.c:332 msgid "config_tree allocation failed" msgstr "" #: commands/toolcontext.c:347 #, c-format msgid "Loading config file: %s" msgstr "" #: commands/toolcontext.c:349 #, c-format msgid "Failed to load config file %s" msgstr "" #: commands/toolcontext.c:372 commands/toolcontext.c:410 msgid "Failed to create config tree" msgstr "" #: commands/toolcontext.c:473 msgid "Failed to add /dev to internal device cache" msgstr "" #: commands/toolcontext.c:477 msgid "device/scan not in config file: Defaulting to /dev" msgstr "" #: commands/toolcontext.c:484 msgid "Invalid string in config file: devices/scan" msgstr "" #: commands/toolcontext.c:490 format_text/format-text.c:1980 #, c-format msgid "Failed to add %s to internal device cache" msgstr "" #: commands/toolcontext.c:501 msgid "Invalid string in config file: devices/loopfiles" msgstr "" #: commands/toolcontext.c:507 #, c-format msgid "Failed to add loopfile %s to internal device cache" msgstr "" #: commands/toolcontext.c:546 msgid "devices/filter not found in config file: no regex filter installed" msgstr "" #: commands/toolcontext.c:550 msgid "Failed to create regex device filter" msgstr "" #: commands/toolcontext.c:557 msgid "Failed to create lvm type filter" msgstr "" #: commands/toolcontext.c:602 commands/toolcontext.c:610 msgid "Persistent cache filename too long." msgstr "" #: commands/toolcontext.c:615 msgid "Failed to create persistent device filter" msgstr "" #: commands/toolcontext.c:634 #, c-format msgid "Failed to load existing device cache from %s" msgstr "" #: commands/toolcontext.c:679 msgid "Invalid string in config file: global/format_libraries" msgstr "" #: commands/toolcontext.c:690 #, c-format msgid "Shared library %s does not contain format functions" msgstr "" #: commands/toolcontext.c:722 #, c-format msgid "_init_formats: Default format (%s) not found" msgstr "" #: commands/toolcontext.c:775 msgid "Invalid string in config file: global/segment_libraries" msgstr "" #: commands/toolcontext.c:786 #, c-format msgid "Shared library %s does not contain segment type functions" msgstr "" #: commands/toolcontext.c:801 #, c-format msgid "Duplicate segment type %s: unloading shared library %s" msgstr "" #: commands/toolcontext.c:825 msgid "_init_hostname: dm_pool_strdup failed" msgstr "" #: commands/toolcontext.c:830 msgid "_init_hostname: dm_pool_strdup kernel_vsn failed" msgstr "" #: commands/toolcontext.c:844 msgid "WARNING: Metadata changes will NOT be backed up" msgstr "" #: commands/toolcontext.c:864 #, c-format msgid "Couldn't create default archive path '%s/%s'." msgstr "" #: commands/toolcontext.c:873 commands/toolcontext.c:893 msgid "backup_init failed." msgstr "" #: commands/toolcontext.c:885 #, c-format msgid "Couldn't create default backup path '%s/%s'." msgstr "" #: commands/toolcontext.c:911 msgid "setlocale failed" msgstr "" #: commands/toolcontext.c:920 msgid "Failed to allocate command context" msgstr "" #: commands/toolcontext.c:940 msgid "" "Failed to create LVM2 system dir for metadata backups, config files and " "internal cache." msgstr "" #: commands/toolcontext.c:942 msgid "" "Set environment variable LVM_SYSTEM_DIR to alternative location or empty " "string." msgstr "" #: commands/toolcontext.c:948 msgid "Library memory pool creation failed" msgstr "" #: commands/toolcontext.c:979 msgid "Command memory pool creation failed" msgstr "" #: commands/toolcontext.c:1042 msgid "Reloading config files" msgstr "" #: config/config.c:111 msgid "Failed to allocate config pool." msgstr "" #: config/config.c:116 msgid "Failed to allocate config tree." msgstr "" #: config/config.c:165 msgid "Failed to allocate config tree parser." msgstr "" #: config/config.c:228 #, c-format msgid "%s: Checksum error" msgstr "" #: config/config.c:268 #, c-format msgid "%s is not a regular file" msgstr "" #: config/config.c:276 #, c-format msgid "%s is empty" msgstr "" #: config/config.c:324 #, c-format msgid "Config file %s has disappeared!" msgstr "" #: config/config.c:329 msgid "Failed to reload configuration files" msgstr "" #: config/config.c:334 #, c-format msgid "Configuration file %s is not a regular file" msgstr "" #: config/config.c:344 #, c-format msgid "Detected config file change to %s" msgstr "" #: config/config.c:368 #, c-format msgid "_write_value: Unknown value type: %d" msgstr "" #: config/config.c:432 #, c-format msgid "Dumping configuration to %s" msgstr "" #: config/config.c:435 config/config.c:441 #, c-format msgid "Failure while writing to %s" msgstr "" #: config/config.c:445 #, c-format msgid "Configuration node %s not found" msgstr "" #: config/config.c:494 config/config.c:497 config/config.c:510 #: config/config.c:512 config/config.c:527 config/config.c:541 #: config/config.c:543 config/config.c:572 config/config.c:578 #: config/config.c:590 #, c-format msgid "Parse error at byte %td (line %d): unexpected token" msgstr "" #: config/config.c:594 #, c-format msgid "Parse error at byte %td (line %d): expected a value" msgstr "" #: config/config.c:810 #, c-format msgid "WARNING: Ignoring duplicate config node: %s (seeking %s)" msgstr "" #: config/config.c:858 #, c-format msgid "Setting %s to %s" msgstr "" #: config/config.c:863 #, c-format msgid "%s not found in config: defaulting to %s" msgstr "" #: config/config.c:881 #, c-format msgid "Setting %s to %ld" msgstr "" #: config/config.c:885 #, c-format msgid "%s not found in config: defaulting to %ld" msgstr "" #: config/config.c:903 #, c-format msgid "Setting %s to %f" msgstr "" #: config/config.c:907 #, c-format msgid "%s not found in config: defaulting to %f" msgstr "" #: device/dev-cache.c:64 device/dev-cache.c:81 device/dev-cache.c:118 msgid "struct device allocation failed" msgstr "" #: device/dev-cache.c:68 device/dev-cache.c:85 msgid "struct str_list allocation failed" msgstr "" #: device/dev-cache.c:73 device/dev-cache.c:90 device/dev-cache.c:95 msgid "filename strdup failed" msgstr "" #: device/dev-cache.c:142 #, c-format msgid "%s: New preferred name" msgstr "" #: device/dev-cache.c:247 #, c-format msgid "%s: Already in device cache" msgstr "" #: device/dev-cache.c:260 #, c-format msgid "%s: Aliased to %s in device cache%s" msgstr "" #: device/dev-cache.c:264 #, c-format msgid "%s: Added to device cache" msgstr "" #: device/dev-cache.c:307 msgid "Couldn't insert device into binary tree." msgstr "" #: device/dev-cache.c:314 msgid "Couldn't add alias to dev cache." msgstr "" #: device/dev-cache.c:319 msgid "Couldn't add name to hash in dev cache." msgstr "" #: device/dev-cache.c:399 #, c-format msgid "%s: Not a regular file" msgstr "" #: device/dev-cache.c:429 #, c-format msgid "%s: Symbolic link to directory" msgstr "" #: device/dev-cache.c:438 #, c-format msgid "%s: Not a block device" msgstr "" #: device/dev-cache.c:496 msgid "" "devices/preferred_names not found in config file: using built-in preferences" msgstr "" #: device/dev-cache.c:503 msgid "preferred_names patterns must be enclosed in quotes" msgstr "" #: device/dev-cache.c:514 msgid "Failed to allocate preferred device name pattern list." msgstr "" #: device/dev-cache.c:521 msgid "Failed to allocate a preferred device name pattern." msgstr "" #: device/dev-cache.c:529 msgid "Preferred device name pattern matcher creation failed." msgstr "" #: device/dev-cache.c:559 msgid "Couldn't create binary tree for dev-cache." msgstr "" #: device/dev-cache.c:579 #, c-format msgid "Device '%s' has been left open." msgstr "" #: device/dev-cache.c:617 device/dev-cache.c:643 #, c-format msgid "Ignoring %s: %s" msgstr "" #: device/dev-cache.c:623 #, c-format msgid "Ignoring %s: Not a directory" msgstr "" #: device/dev-cache.c:628 msgid "dir_list allocation failed" msgstr "" #: device/dev-cache.c:649 #, c-format msgid "Ignoring %s: Not a regular file" msgstr "" #: device/dev-cache.c:654 msgid "dir_list allocation failed for file" msgstr "" #: device/dev-cache.c:686 device/dev-cache.c:690 #, c-format msgid "Path %s no longer valid for device(%d,%d)" msgstr "" #: device/dev-cache.c:707 #, c-format msgid "Aborting - please provide new pathname for what used to be %s" msgstr "" #: device/dev-cache.c:747 msgid "dev_iter allocation failed" msgstr "" #: device/dev-io.c:67 #, c-format msgid "Attempt to read an unopened device (%s)." msgstr "" #: device/dev-io.c:79 #, c-format msgid "Read size too large: %lu" msgstr "" #: device/dev-io.c:84 #, c-format msgid "%s: lseek %lu failed: %s" msgstr "" #: device/dev-io.c:98 #, c-format msgid "%s: %s failed after %lu of %lu at %lu: %s" msgstr "" #: device/dev-io.c:134 #, c-format msgid "%s: block size is %u bytes" msgstr "" #: device/dev-io.c:191 msgid "Bounce buffer alloca failed" msgstr "" #: device/dev-io.c:238 device/dev-io.c:264 #, c-format msgid "%s: size is %lu sectors" msgstr "" #: device/dev-io.c:343 #, c-format msgid "WARNING: %s already opened read-only" msgstr "" #: device/dev-io.c:352 #, c-format msgid "WARNING: dev_open(%s) called while suspended" msgstr "" #: device/dev-io.c:364 #, c-format msgid "%s: stat failed: Has device name changed?" msgstr "" #: device/dev-io.c:390 #, c-format msgid "%s: Not using O_DIRECT" msgstr "" #: device/dev-io.c:422 #, c-format msgid "%s: fstat failed: Has device name changed?" msgstr "" #: device/dev-io.c:437 #, c-format msgid "Opened %s %s%s%s" msgstr "" #: device/dev-io.c:486 #, c-format msgid "Closed %s" msgstr "" #: device/dev-io.c:501 #, c-format msgid "Attempt to close device '%s' which is not open." msgstr "" #: device/dev-io.c:515 #, c-format msgid "%s: Immediate close attempt while still referenced" msgstr "" #: device/dev-io.c:576 #, c-format msgid "Read from %s failed" msgstr "" #: device/dev-io.c:588 #, c-format msgid "Circular read from %s failed" msgstr "" #: device/dev-io.c:648 #, c-format msgid "Wiping %s at %lu length %zu" msgstr "" #: device/dev-io.c:651 #, c-format msgid "Wiping %s at sector %lu length %zu sectors" msgstr "" #: display/display.c:145 #, c-format msgid "Unrecognised allocation policy %s" msgstr "" #: display/display.c:172 msgid "no memory for size display buffer" msgstr "" #: display/display.c:247 #, c-format msgid "%s:%s:%lu:-1:%u:%u:-1:%u:%u:%u:%u:%s" msgstr "" #: display/display.c:278 #, c-format msgid "--- %sPhysical volume ---" msgstr "" #: display/display.c:279 #, c-format msgid "PV Name %s" msgstr "" #: display/display.c:280 #, c-format msgid "VG Name %s%s" msgstr "" #: display/display.c:290 #, c-format msgid "PV Size %s / not usable %s" msgstr "" #: display/display.c:296 #, c-format msgid "PV Size %s" msgstr "" #: display/display.c:304 #, c-format msgid "Allocatable yes %s" msgstr "" #: display/display.c:307 msgid "Allocatable NO" msgstr "" #: display/display.c:312 #, c-format msgid "PE Size (KByte) %u" msgstr "" #: display/display.c:313 display/display.c:592 #, c-format msgid "Total PE %u" msgstr "" #: display/display.c:314 #, c-format msgid "Free PE %u" msgstr "" #: display/display.c:315 #, c-format msgid "Allocated PE %u" msgstr "" #: display/display.c:316 display/display.c:339 #, c-format msgid "PV UUID %s" msgstr "" #: display/display.c:317 display/display.c:345 display/display.c:476 #: display/display.c:527 display/display.c:610 format_text/archive.c:315 #: lvmcmdline.c:769 mirror/mirrored.c:73 striped/striped.c:49 msgid " " msgstr "" #: display/display.c:337 #, c-format msgid "PV Name %s " msgstr "" #: display/display.c:340 #, c-format msgid "PV Status %sallocatable" msgstr "" #: display/display.c:342 #, c-format msgid "Total PE / Free PE %u / %u" msgstr "" #: display/display.c:355 #, c-format msgid "%s%s/%s:%s:%d:%d:-1:%d:%lu:%d:-1:%d:%d:%d:%d" msgstr "" #: display/display.c:385 msgid "--- Logical volume ---" msgstr "" #: display/display.c:387 #, c-format msgid "LV Name %s%s/%s" msgstr "" #: display/display.c:389 #, c-format msgid "VG Name %s" msgstr "" #: display/display.c:391 #, c-format msgid "LV UUID %s" msgstr "" #: display/display.c:393 #, c-format msgid "LV Write Access %s" msgstr "" #: display/display.c:397 msgid "LV snapshot status source of" msgstr "" #: display/display.c:406 #, c-format msgid " %s%s/%s [%s]" msgstr "" #: display/display.c:419 #, c-format msgid "LV snapshot status %s destination for %s%s/%s" msgstr "" #: display/display.c:426 msgid "LV Status suspended" msgstr "" #: display/display.c:428 #, c-format msgid "LV Status %savailable" msgstr "" #: display/display.c:436 #, c-format msgid "# open %u" msgstr "" #: display/display.c:438 #, c-format msgid "LV Size %s" msgstr "" #: display/display.c:442 #, c-format msgid "Current LE %u" msgstr "" #: display/display.c:446 #, c-format msgid "COW-table size %s" msgstr "" #: display/display.c:448 #, c-format msgid "COW-table LE %u" msgstr "" #: display/display.c:451 #, c-format msgid "Allocated to snapshot %.2f%% " msgstr "" #: display/display.c:453 #, c-format msgid "Snapshot chunk size %s" msgstr "" #: display/display.c:457 #, c-format msgid "Segments %u" msgstr "" #: display/display.c:463 #, c-format msgid "Allocation %s" msgstr "" #: display/display.c:464 #, c-format msgid "Read ahead sectors %u" msgstr "" #: display/display.c:468 #, c-format msgid "Persistent major %d" msgstr "" #: display/display.c:469 #, c-format msgid "Persistent minor %d" msgstr "" #: display/display.c:473 #, c-format msgid "Block device %d:%d" msgstr "" #: display/display.c:486 #, c-format msgid "%sPhysical volume\t%s" msgstr "" #: display/display.c:492 #, c-format msgid "%sPhysical extents\t%d to %d" msgstr "" #: display/display.c:497 #, c-format msgid "%sLogical volume\t%s" msgstr "" #: display/display.c:502 #, c-format msgid "%sLogical extents\t%d to %d" msgstr "" #: display/display.c:507 #, c-format msgid "%sUnassigned area" msgstr "" #: display/display.c:515 msgid "--- Segments ---" msgstr "" #: display/display.c:518 #, c-format msgid "Logical extent %u to %u:" msgstr "" #: display/display.c:521 #, c-format msgid " Type\t\t%s" msgstr "" #: display/display.c:547 msgid "--- Volume group ---" msgstr "" #: display/display.c:548 #, c-format msgid "VG Name %s" msgstr "" #: display/display.c:549 #, c-format msgid "System ID %s" msgstr "" #: display/display.c:550 #, c-format msgid "Format %s" msgstr "" #: display/display.c:552 #, c-format msgid "Metadata Areas %d" msgstr "" #: display/display.c:554 #, c-format msgid "Metadata Sequence No %d" msgstr "" #: display/display.c:557 #, c-format msgid "VG Access %s%s%s%s" msgstr "" #: display/display.c:562 #, c-format msgid "VG Status %s%sresizable" msgstr "" #: display/display.c:569 msgid "Clustered yes" msgstr "" #: display/display.c:570 #, c-format msgid "Shared %s" msgstr "" #: display/display.c:573 #, c-format msgid "MAX LV %u" msgstr "" #: display/display.c:574 #, c-format msgid "Cur LV %u" msgstr "" #: display/display.c:575 #, c-format msgid "Open LV %u" msgstr "" #: display/display.c:581 #, c-format msgid "Max PV %u" msgstr "" #: display/display.c:582 #, c-format msgid "Cur PV %u" msgstr "" #: display/display.c:583 #, c-format msgid "Act PV %u" msgstr "" #: display/display.c:585 #, c-format msgid "VG Size %s" msgstr "" #: display/display.c:589 #, c-format msgid "PE Size %s" msgstr "" #: display/display.c:594 #, c-format msgid "Alloc PE / Size %u / %s" msgstr "" #: display/display.c:600 #, c-format msgid "Free PE / Size %u / %s" msgstr "" #: display/display.c:609 #, c-format msgid "VG UUID %s" msgstr "" #: display/display.c:645 #, c-format msgid "%s:%s:%d:-1:%u:%u:%u:-1:%u:%u:%u:%lu:%u:%u:%u:%u:%s" msgstr "" #: display/display.c:669 #, c-format msgid "\"%s\" %-9s [%-9s used / %s free]" msgstr "" #: display/display.c:686 display/display.c:695 pvscan.c:34 #, c-format msgid "%s" msgstr "" #: error/errseg.c:73 msgid "error module string list allocation failed" msgstr "" #: error/errseg.c:109 mirror/mirrored.c:562 snapshot/snapshot.c:179 #: striped/striped.c:227 zero/zero.c:109 #, c-format msgid "Initialised segtype: %s" msgstr "" #: filters/filter-composite.c:31 #, c-format msgid "Using %s" msgstr "" #: filters/filter-composite.c:59 msgid "composite filters allocation failed" msgstr "" #: filters/filter-composite.c:67 msgid "compsoite filters allocation failed" msgstr "" #: filters/filter-md.c:31 #, c-format msgid "%s: Skipping md component device" msgstr "" #: filters/filter-md.c:36 #, c-format msgid "%s: Skipping: error in md component detection" msgstr "" #: filters/filter-md.c:54 msgid "md filter allocation failed" msgstr "" #: filters/filter-persistent.c:57 msgid "Wiping cache of LVM-capable devices" msgstr "" #: filters/filter-persistent.c:73 #, c-format msgid "Couldn't find %s array in '%s'" msgstr "" #: filters/filter-persistent.c:84 msgid "Devices array contains a value which is not a string ... ignoring" msgstr "" #: filters/filter-persistent.c:90 #, c-format msgid "Couldn't add '%s' to filter ... ignoring" msgstr "" #: filters/filter-persistent.c:108 #, c-format msgid "%s: stat failed: %s" msgstr "" #: filters/filter-persistent.c:132 #, c-format msgid "Loaded persistent filter cache from %s" msgstr "" #: filters/filter-persistent.c:183 #, c-format msgid "Internal persistent device cache empty - not writing to %s" msgstr "" #: filters/filter-persistent.c:188 #, c-format msgid "Device cache incomplete - not writing to %s" msgstr "" #: filters/filter-persistent.c:193 #, c-format msgid "Dumping persistent device cache to %s" msgstr "" #: filters/filter-persistent.c:248 format_text/format-text.c:902 #: format_text/format-text.c:928 format_text/format-text.c:965 #: misc/lvm-file.c:91 #, c-format msgid "%s: rename to %s failed: %s" msgstr "" #: filters/filter-persistent.c:276 #, c-format msgid "%s: Skipping (cached)" msgstr "" #: filters/filter-persistent.c:311 msgid "Couldn't create hash table for persistent filter." msgstr "" #: filters/filter-regex.c:44 msgid "pattern must begin with 'a' or 'r'" msgstr "" #: filters/filter-regex.c:83 msgid "invalid separator at end of regex" msgstr "" #: filters/filter-regex.c:108 msgid "filter patterns must be enclosed in quotes" msgstr "" #: filters/filter-regex.c:133 msgid "invalid filter pattern" msgstr "" #: filters/filter-regex.c:174 #, c-format msgid "%s: Skipping (regex)" msgstr "" #: filters/filter-sysfs.c:31 msgid "No proc filesystem found: skipping sysfs filter" msgstr "" #: filters/filter-sysfs.c:37 msgid "Failed to create /proc/mounts string" msgstr "" #: filters/filter-sysfs.c:137 #, c-format msgid "Empty sysfs device file: %s" msgstr "" #: filters/filter-sysfs.c:142 msgid "sysfs device file not correct format" msgstr "" #: filters/filter-sysfs.c:192 #, c-format msgid "sysfs path name too long: %s in %s" msgstr "" #: filters/filter-sysfs.c:255 #, c-format msgid "%s: Skipping (sysfs)" msgstr "" #: filters/filter-sysfs.c:278 msgid "sysfs pool creation failed" msgstr "" #: filters/filter-sysfs.c:283 msgid "sysfs dev_set creation failed" msgstr "" #: filters/filter.c:90 #, c-format msgid "%s: Skipping: Unrecognised LVM device type %lu" msgstr "" #: filters/filter.c:98 #, c-format msgid "%s: Skipping: Suspended dm device" msgstr "" #: filters/filter.c:104 #, c-format msgid "%s: Skipping: open failed" msgstr "" #: filters/filter.c:110 #, c-format msgid "%s: Skipping: dev_get_size failed" msgstr "" #: filters/filter.c:115 #, c-format msgid "%s: Skipping: Too small to hold a PV" msgstr "" #: filters/filter.c:120 #, c-format msgid "%s: Skipping: Partition table signature found" msgstr "" #: filters/filter.c:147 msgid "No proc filesystem found: using all block device types" msgstr "" #: filters/filter.c:159 msgid "Failed to create /proc/devices string" msgstr "" #: filters/filter.c:218 msgid "Expecting string in devices/types in config file" msgstr "" #: filters/filter.c:228 #, c-format msgid "Max partition count missing for %s in devices/types in config file" msgstr "" #: filters/filter.c:236 #, c-format msgid "Zero partition count invalid for %s in devices/types in config file" msgstr "" #: filters/filter.c:269 msgid "LVM type filter allocation failed" msgstr "" #: format1/disk-rep.c:190 #, c-format msgid "%s does not have a valid LVM1 PV identifier" msgstr "" #: format1/disk-rep.c:196 #, c-format msgid "format1: Unknown metadata version %d found on %s" msgstr "" #: format1/disk-rep.c:210 format_pool/disk_rep.c:43 #, c-format msgid "Failed to read PV data from %s" msgstr "" #: format1/disk-rep.c:367 #, c-format msgid "%s is not a member of any format1 VG" msgstr "" #: format1/disk-rep.c:374 #, c-format msgid "Failed to read VG data from PV (%s)" msgstr "" #: format1/disk-rep.c:380 #, c-format msgid "%s is not a member of the VG %s" msgstr "" #: format1/disk-rep.c:390 #, c-format msgid "Failed to read PV uuid list from %s" msgstr "" #: format1/disk-rep.c:395 #, c-format msgid "Failed to read LV's from %s" msgstr "" #: format1/disk-rep.c:400 #, c-format msgid "Failed to read extents from %s" msgstr "" #: format1/disk-rep.c:404 #, c-format msgid "Found %s in %sVG %s" msgstr "" #: format1/disk-rep.c:443 format_pool/disk_rep.c:67 #, c-format msgid "Ignoring duplicate PV %s on %s" msgstr "" #: format1/disk-rep.c:448 format_pool/disk_rep.c:72 #, c-format msgid "Duplicate PV %s - using md %s" msgstr "" #: format1/disk-rep.c:494 msgid "read_pvs_in_vg: dev_iter_create failed" msgstr "" #: format1/disk-rep.c:517 #, c-format msgid "Writing %s VG metadata to %s at %lu len %zu" msgstr "" #: format1/disk-rep.c:537 #, c-format msgid "Too many uuids to fit on %s" msgstr "" #: format1/disk-rep.c:542 #, c-format msgid "Writing %s uuidlist to %s at %lu len %d" msgstr "" #: format1/disk-rep.c:557 #, c-format msgid "Writing %s LV %s metadata to %s at %lu len %zu" msgstr "" #: format1/disk-rep.c:578 #, c-format msgid "Couldn't zero lv area on device '%s'" msgstr "" #: format1/disk-rep.c:586 #, c-format msgid "lv_number %d too large" msgstr "" #: format1/disk-rep.c:603 #, c-format msgid "Writing %s extents metadata to %s at %lu len %zu" msgstr "" #: format1/disk-rep.c:623 msgid "Invalid PV structure size." msgstr "" #: format1/disk-rep.c:632 msgid "Couldn't allocate temporary PV buffer." msgstr "" #: format1/disk-rep.c:639 #, c-format msgid "Writing %s PV metadata to %s at %lu len %zu" msgstr "" #: format1/disk-rep.c:662 #, c-format msgid "Failed to write PV structure onto %s" msgstr "" #: format1/disk-rep.c:681 #, c-format msgid "Failed to write VG data to %s" msgstr "" #: format1/disk-rep.c:686 #, c-format msgid "Failed to write PV uuid list to %s" msgstr "" #: format1/disk-rep.c:691 #, c-format msgid "Failed to write LV's to %s" msgstr "" #: format1/disk-rep.c:696 #, c-format msgid "Failed to write extents to %s" msgstr "" #: format1/disk-rep.c:736 #, c-format msgid "Successfully wrote data to %s" msgstr "" #: format1/format1.c:72 #, c-format msgid "VG data differs between PVs %s and %s" msgstr "" #: format1/format1.c:74 format1/format1.c:89 #, c-format msgid "VG data on %s: %s %s %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u" msgstr "" #: format1/format1.c:115 #, c-format msgid "%d PV(s) found for VG %s: expected %d" msgstr "" #: format1/format1.c:294 format_pool/format_pool.c:228 #, c-format msgid "Reading physical volume data %s from disk" msgstr "" #: format1/format1.c:335 #, c-format msgid "Physical volumes cannot be bigger than %s" msgstr "" #: format1/format1.c:355 msgid "Metadata would overwrite physical extents" msgstr "" #: format1/format1.c:370 #, c-format msgid "logical volumes cannot contain more than %d extents." msgstr "" #: format1/format1.c:375 #, c-format msgid "logical volumes cannot be larger than %s" msgstr "" #: format1/format1.c:451 #, c-format msgid "Extent size must be between %s and %s" msgstr "" #: format1/format1.c:459 #, c-format msgid "Extent size must be multiple of %s" msgstr "" #: format1/format1.c:466 format_text/format-text.c:79 msgid "Extent size must be power of 2" msgstr "" #: format1/format1.c:563 msgid "Couldn't create lvm1 label handler." msgstr "" #: format1/format1.c:568 msgid "Couldn't register lvm1 label handler." msgstr "" #: format1/format1.c:572 format_pool/format_pool.c:354 #: format_text/format-text.c:1994 #, c-format msgid "Initialised format: %s" msgstr "" #: format1/import-export.c:75 #, c-format msgid "System ID %s on %s differs from %s for volume group" msgstr "" #: format1/import-export.c:98 format_text/import_vsn1.c:220 #: metadata/metadata.c:569 metadata/metadata.c:1542 pvresize.c:121 #: vgreduce.c:395 vgremove.c:62 #, c-format msgid "%s: Couldn't get size." msgstr "" #: format1/import-export.c:101 format_text/import_vsn1.c:223 #, c-format msgid "Fixing up missing format1 size (%s) for PV %s" msgstr "" #: format1/import-export.c:108 format_text/import_vsn1.c:230 #, c-format msgid "WARNING: Physical Volume %s is too large for underlying device" msgstr "" #: format1/import-export.c:130 msgid "Generated system_id too long" msgstr "" #: format1/import-export.c:174 #, c-format msgid "Volume group name %s too long to export" msgstr "" #: format1/import-export.c:412 #, c-format msgid "Segment type %s in LV %s: unsupported by format1" msgstr "" #: format1/import-export.c:418 #, c-format msgid "Non-PV stripe found in LV %s: unsupported by format1" msgstr "" #: format1/import-export.c:610 msgid "Logical volume number out of bounds." msgstr "" #: format1/import-export.c:617 #, c-format msgid "Couldn't find logical volume '%s'." msgstr "" #: format1/import-export.c:637 #, c-format msgid "Couldn't find origin logical volume for snapshot '%s'." msgstr "" #: format1/import-export.c:650 msgid "Couldn't add snapshot." msgstr "" #: format1/import-extents.c:53 msgid "Unable to create hash table for holding extent maps." msgstr "" #: format1/import-extents.c:92 #, c-format msgid "Physical volume (%s) contains an unknown logical volume (%s)." msgstr "" #: format1/import-extents.c:137 #, c-format msgid "Invalid LV in extent map (PV %s, PE %u, LV %u, LE %u)" msgstr "" #: format1/import-extents.c:149 msgid "logical extent number out of bounds" msgstr "" #: format1/import-extents.c:155 #, c-format msgid "logical extent (%u) already mapped." msgstr "" #: format1/import-extents.c:175 #, c-format msgid "Logical volume (%s) contains an incomplete mapping table." msgstr "" #: format1/import-extents.c:229 msgid "Failed to allocate linear segment." msgstr "" #: format1/import-extents.c:276 #, c-format msgid "" "Number of stripes (%u) incompatible with logical extent count (%u) for %s" msgstr "" #: format1/import-extents.c:303 msgid "Failed to allocate striped segment." msgstr "" #: format1/import-extents.c:359 msgid "Couldn't allocate logical volume maps." msgstr "" #: format1/import-extents.c:364 msgid "Couldn't fill logical volume maps." msgstr "" #: format1/import-extents.c:374 msgid "Couldn't build extent segments." msgstr "" #: format1/layout.c:79 #, c-format msgid "MaxLogicalVolumes of %d exceeds format limit of %d for VG '%s'" msgstr "" #: format1/layout.c:86 #, c-format msgid "MaxPhysicalVolumes of %d exceeds format limit of %d for VG '%s'" msgstr "" #: format1/layout.c:105 msgid "Insufficient space for metadata and PE's." msgstr "" #: format1/layout.c:141 #, c-format msgid "Too few extents on %s. Try smaller extent size." msgstr "" #: format1/layout.c:162 #, c-format msgid "Metadata extent limit (%u) exceeded for %s - %u required" msgstr "" #: format1/lvm1-label.c:29 #, c-format msgid "The '%s' operation is not supported for the lvm1 labeller." msgstr "" #: format1/lvm1-label.c:120 format_pool/pool_label.c:99 #: format_text/text_label.c:285 msgid "Couldn't allocate labeller object." msgstr "" #: format_pool/disk_rep.c:94 format_pool/disk_rep.c:98 #, c-format msgid "Calculated uuid %s for %s" msgstr "" #: format_pool/disk_rep.c:274 #, c-format msgid "Unable to allocate %d 32-bit uints" msgstr "" #: format_pool/disk_rep.c:341 #, c-format msgid "No devices for vg %s found in cache" msgstr "" #: format_pool/disk_rep.c:363 msgid "Unable to allocate pool list structure" msgstr "" #: format_pool/format_pool.c:44 #, c-format msgid "Unable to allocate %d subpool structures" msgstr "" #: format_pool/format_pool.c:64 #, c-format msgid "Unable to allocate %d pool_device structures" msgstr "" #: format_pool/format_pool.c:87 #, c-format msgid "Missing subpool %d in pool %s" msgstr "" #: format_pool/format_pool.c:92 #, c-format msgid "Missing device %u for subpool %d in pool %s" msgstr "" #: format_pool/format_pool.c:113 msgid "Unable to allocate volume group structure" msgstr "" #: format_pool/format_pool.c:279 msgid "Unable to allocate format instance structure for pool format" msgstr "" #: format_pool/format_pool.c:289 msgid "Unable to allocate metadata area structure for pool format" msgstr "" #: format_pool/format_pool.c:332 msgid "Unable to allocate format type structure for pool format" msgstr "" #: format_pool/format_pool.c:345 msgid "Couldn't create pool label handler." msgstr "" #: format_pool/format_pool.c:350 msgid "Couldn't register pool label handler." msgstr "" #: format_pool/import_export.c:64 msgid "Unable to allocate lv list structure" msgstr "" #: format_pool/import_export.c:69 msgid "Unable to allocate logical volume structure" msgstr "" #: format_pool/import_export.c:98 #, c-format msgid "Calculated lv uuid for lv %s: %s" msgstr "" #: format_pool/import_export.c:133 msgid "Unable to allocate pv list structure" msgstr "" #: format_pool/import_export.c:137 msgid "Unable to allocate pv structure" msgstr "" #: format_pool/import_export.c:165 msgid "Unable to duplicate vg_name string" msgstr "" #: format_pool/import_export.c:195 #, c-format msgid "Found sptype %X and converted it to %s" msgstr "" #: format_pool/import_export.c:210 msgid "Stripe size must be a power of 2" msgstr "" #: format_pool/import_export.c:226 msgid "Unable to allocate striped lv_segment structure" msgstr "" #: format_pool/import_export.c:267 msgid "Unable to allocate linear lv_segment structure" msgstr "" #: format_pool/pool_label.c:28 #, c-format msgid "The '%s' operation is not supported for the pool labeller." msgstr "" #: format_text/archive.c:146 #, c-format msgid "Couldn't scan the archive directory (%s)." msgstr "" #: format_text/archive.c:173 msgid "Couldn't create new archive file." msgstr "" #: format_text/archive.c:221 #, c-format msgid "Expiring archive %s" msgstr "" #: format_text/archive.c:246 msgid "Couldn't create temporary archive name." msgstr "" #: format_text/archive.c:251 msgid "Couldn't create FILE object for archive." msgstr "" #: format_text/archive.c:288 msgid "Archive file name too long." msgstr "" #: format_text/archive.c:299 #, c-format msgid "Archive rename failed for %s" msgstr "" #: format_text/archive.c:316 #, c-format msgid "File:\t\t%s" msgstr "" #: format_text/archive.c:321 msgid "Couldn't create text instance object." msgstr "" #: format_text/archive.c:331 msgid "Unable to read archive file." msgstr "" #: format_text/archive.c:336 #, c-format msgid "VG name: \t%s" msgstr "" #: format_text/archive.c:337 #, c-format msgid "Description:\t%s" msgstr "" #: format_text/archive.c:338 #, c-format msgid "Backup Time:\t%s" msgstr "" #: format_text/archive.c:355 #, c-format msgid "No archives found in %s." msgstr "" #: format_text/archiver.c:43 format_text/archiver.c:155 msgid "archive_params alloc failed" msgstr "" #: format_text/archiver.c:128 #, c-format msgid "Archiving volume group \"%s\" metadata (seqno %u)." msgstr "" #: format_text/archiver.c:303 #, c-format msgid "PV %s is a different format (seqno %s)" msgstr "" #: format_text/archiver.c:364 #, c-format msgid "Creating volume group backup \"%s\" (seqno %u)." msgstr "" #: format_text/archiver.c:402 msgid "Failed to generate backup filename." msgstr "" #: format_text/export.c:80 #, c-format msgid "uname failed: %s" msgstr "" #: format_text/export.c:101 msgid "Internal error tracking indentation" msgstr "" #: format_text/export.c:120 #, c-format msgid "Doubling metadata output buffer to %u" msgstr "" #: format_text/export.c:124 msgid "Buffer reallocation failed." msgstr "" #: format_text/export.c:737 msgid "text_export buffer allocation failed" msgstr "" #: format_text/flags.c:79 msgid "Unknown flag set requested." msgstr "" #: format_text/flags.c:125 msgid "Metadata inconsistency: Not all flags successfully exported." msgstr "" #: format_text/flags.c:147 msgid "Status value is not a string." msgstr "" #: format_text/flags.c:158 #, c-format msgid "Unknown status flag '%s'." msgstr "" #: format_text/format-text.c:152 #, c-format msgid "Found text metadata area, offset=%lu, size=%lu" msgstr "" #: format_text/format-text.c:207 #, c-format msgid "" "Found LVM2 metadata record at offset=%lu, size=%lu, offset2=%lu size2=%lu" msgstr "" #: format_text/format-text.c:259 #, c-format msgid "Random lvid creation failed for %s/%s." msgstr "" #: format_text/format-text.c:290 msgid "struct mda_header allocation failed" msgstr "" #: format_text/format-text.c:302 msgid "Incorrect metadata area header checksum" msgstr "" #: format_text/format-text.c:309 msgid "Wrong magic number in metadata area header" msgstr "" #: format_text/format-text.c:314 #, c-format msgid "Incompatible metadata area header version: %d" msgstr "" #: format_text/format-text.c:320 #, c-format msgid "Incorrect start sector in metadata area header: %lu" msgstr "" #: format_text/format-text.c:461 #, c-format msgid "VG %s not found on %s" msgstr "" #: format_text/format-text.c:469 format_text/format-text.c:574 #, c-format msgid "VG %s metadata too large for circular buffer" msgstr "" #: format_text/format-text.c:484 #, c-format msgid "Read %s %smetadata (%u) from %s at %lu size %lu" msgstr "" #: format_text/format-text.c:557 #, c-format msgid "VG %s metadata writing failed" msgstr "" #: format_text/format-text.c:579 #, c-format msgid "Writing %s metadata to %s at %lu len %lu" msgstr "" #: format_text/format-text.c:592 #, c-format msgid "Writing metadata to %s at %lu len %u" msgstr "" #: format_text/format-text.c:681 #, c-format msgid "%sCommitting %s metadata (%u) to %s header at %lu" msgstr "" #: format_text/format-text.c:685 #, c-format msgid "Wiping pre-committed %s metadata from %s header at %lu" msgstr "" #: format_text/format-text.c:691 format_text/format-text.c:777 msgid "Failed to write metadata area header" msgstr "" #: format_text/format-text.c:810 #, c-format msgid "'%s' does not contain volume group '%s'." msgstr "" #: format_text/format-text.c:814 #, c-format msgid "Read volume group %s from %s" msgstr "" #: format_text/format-text.c:863 msgid "Text format failed to determine directory." msgstr "" #: format_text/format-text.c:868 msgid "Couldn't create temporary text file name." msgstr "" #: format_text/format-text.c:879 #, c-format msgid "Writing %s metadata to %s" msgstr "" #: format_text/format-text.c:882 #, c-format msgid "Failed to write metadata to %s." msgstr "" #: format_text/format-text.c:901 format_text/format-text.c:926 #: format_text/format-text.c:960 #, c-format msgid "Renaming %s to %s" msgstr "" #: format_text/format-text.c:917 #, c-format msgid "Test mode: Skipping committing %s metadata (%u)" msgstr "" #: format_text/format-text.c:920 #, c-format msgid "Unlinking %s" msgstr "" #: format_text/format-text.c:925 #, c-format msgid "Committing %s metadata (%u)" msgstr "" #: format_text/format-text.c:962 msgid "Test mode: Skipping rename" msgstr "" #: format_text/format-text.c:1025 format_text/format-text.c:1723 #, c-format msgid "Name too long %s/%s" msgstr "" #: format_text/format-text.c:1089 #, c-format msgid "%s: metadata too large for circular buffer" msgstr "" #: format_text/format-text.c:1118 #, c-format msgid "%s: Found metadata at %lu size %lu for %s (%s)" msgstr "" #: format_text/format-text.c:1186 #, c-format msgid "Physical extents end beyond end of device %s!" msgstr "" #: format_text/format-text.c:1207 #, c-format msgid "Warning: metadata area fills disk leaving no space for data on %s." msgstr "" #: format_text/format-text.c:1238 format_text/format-text.c:1283 msgid "Failed to wipe new metadata area" msgstr "" #: format_text/format-text.c:1329 #, c-format msgid "Creating metadata area on %s at sector %lu size %lu sectors" msgstr "" #: format_text/format-text.c:1410 msgid "_add_raw allocation failed" msgstr "" #: format_text/format-text.c:1470 #, c-format msgid "Must be exactly one data area (found %d) on PV %s" msgstr "" #: format_text/format-text.c:1485 format_text/format-text.c:1489 msgid "metadata_area allocation failed" msgstr "" #: format_text/format-text.c:1650 #, c-format msgid "PV %s too large for extent size %s." msgstr "" #: format_text/format-text.c:1693 msgid "Couldn't allocate format instance object." msgstr "" #: format_text/format-text.c:1699 msgid "Couldn't allocate text_fid_context." msgstr "" #: format_text/format-text.c:1807 #, c-format msgid "%s: Volume group filename may not end in .tmp" msgstr "" #: format_text/format-text.c:1841 msgid "Couldn't allocate text format context object." msgstr "" #: format_text/format-text.c:1863 msgid "_add_dir allocation failed" msgstr "" #: format_text/format-text.c:1866 #, c-format msgid "Adding text format metadata dir: %s" msgstr "" #: format_text/format-text.c:1883 msgid "Empty metadata disk_area section of config file" msgstr "" #: format_text/format-text.c:1888 msgid "Missing start_sector in metadata disk_area section of config file" msgstr "" #: format_text/format-text.c:1895 msgid "Missing size in metadata disk_area section of config file" msgstr "" #: format_text/format-text.c:1902 msgid "Missing uuid in metadata disk_area section of config file" msgstr "" #: format_text/format-text.c:1908 #, c-format msgid "Invalid uuid in metadata disk_area section of config file: %s" msgstr "" #: format_text/format-text.c:1917 format_text/import_vsn1.c:155 msgid "Couldn't find device." msgstr "" #: format_text/format-text.c:1919 format_text/import_vsn1.c:157 #, c-format msgid "Couldn't find device with uuid '%s'." msgstr "" #: format_text/format-text.c:1948 msgid "Failed to allocate dir_list" msgstr "" #: format_text/format-text.c:1960 msgid "Couldn't create text label handler." msgstr "" #: format_text/format-text.c:1966 msgid "Couldn't register text label handler." msgstr "" #: format_text/format-text.c:1974 msgid "Invalid string in config file: metadata/dirs" msgstr "" #: format_text/import.c:103 msgid "Couldn't read volume group metadata." msgstr "" #: format_text/import_vsn1.c:46 #, c-format msgid "Can't process text format file - %s." msgstr "" #: format_text/import_vsn1.c:94 msgid "Couldn't find uuid." msgstr "" #: format_text/import_vsn1.c:100 msgid "uuid must be a string." msgstr "" #: format_text/import_vsn1.c:105 msgid "Invalid uuid." msgstr "" #: format_text/import_vsn1.c:139 msgid "Empty pv section." msgstr "" #: format_text/import_vsn1.c:144 msgid "Couldn't read uuid for volume group." msgstr "" #: format_text/import_vsn1.c:174 msgid "Couldn't find status flags for physical volume." msgstr "" #: format_text/import_vsn1.c:179 msgid "Couldn't read status flags for physical volume." msgstr "" #: format_text/import_vsn1.c:187 msgid "Couldn't read extent size for volume group." msgstr "" #: format_text/import_vsn1.c:192 msgid "Couldn't find extent count (pe_count) for physical volume." msgstr "" #: format_text/import_vsn1.c:203 #, c-format msgid "Couldn't read tags for physical volume %s in %s." msgstr "" #: format_text/import_vsn1.c:275 msgid "Empty segment section." msgstr "" #: format_text/import_vsn1.c:280 #, c-format msgid "Couldn't read 'start_extent' for segment '%s'." msgstr "" #: format_text/import_vsn1.c:286 #, c-format msgid "Couldn't read 'extent_count' for segment '%s'." msgstr "" #: format_text/import_vsn1.c:296 msgid "Segment type must be a string." msgstr "" #: format_text/import_vsn1.c:316 msgid "Segment allocation failed" msgstr "" #: format_text/import_vsn1.c:329 #, c-format msgid "Couldn't read tags for a segment of %s/%s." msgstr "" #: format_text/import_vsn1.c:358 #, c-format msgid "Zero areas not allowed for segment '%s'" msgstr "" #: format_text/import_vsn1.c:394 #, c-format msgid "Couldn't find volume '%s' for segment '%s'." msgstr "" #: format_text/import_vsn1.c:407 #, c-format msgid "Incorrect number of areas in area array for segment '%s'." msgstr "" #: format_text/import_vsn1.c:437 msgid "Only one segment permitted for snapshot" msgstr "" #: format_text/import_vsn1.c:443 msgid "Couldn't read segment count for logical volume." msgstr "" #: format_text/import_vsn1.c:448 msgid "segment_count and actual number of segments disagree." msgstr "" #: format_text/import_vsn1.c:494 format_text/import_vsn1.c:562 msgid "Empty logical volume section." msgstr "" #: format_text/import_vsn1.c:499 msgid "Couldn't find status flags for logical volume." msgstr "" #: format_text/import_vsn1.c:504 msgid "Couldn't read status flags for logical volume." msgstr "" #: format_text/import_vsn1.c:512 format_text/import_vsn1.c:729 msgid "allocation_policy must be a string." msgstr "" #: format_text/import_vsn1.c:535 #, c-format msgid "Couldn't read tags for logical volume %s/%s." msgstr "" #: format_text/import_vsn1.c:555 #, c-format msgid "Lost logical volume reference %s" msgstr "" #: format_text/import_vsn1.c:568 #, c-format msgid "Couldn't read uuid for logical volume %s." msgstr "" #: format_text/import_vsn1.c:595 #, c-format msgid "Couldn't read minor number for logical volume %s." msgstr "" #: format_text/import_vsn1.c:603 #, c-format msgid "Couldn't read major number for logical volume %s." msgstr "" #: format_text/import_vsn1.c:620 #, c-format msgid "Couldn't find section '%s'." msgstr "" #: format_text/import_vsn1.c:649 format_text/import_vsn1.c:841 msgid "Couldn't find volume group in file." msgstr "" #: format_text/import_vsn1.c:673 msgid "system_id must be a string" msgstr "" #: format_text/import_vsn1.c:680 format_text/import_vsn1.c:851 #, c-format msgid "Couldn't read uuid for volume group %s." msgstr "" #: format_text/import_vsn1.c:685 #, c-format msgid "Couldn't read 'seqno' for volume group %s." msgstr "" #: format_text/import_vsn1.c:691 format_text/import_vsn1.c:856 #, c-format msgid "Couldn't find status flags for volume group %s." msgstr "" #: format_text/import_vsn1.c:697 format_text/import_vsn1.c:862 #, c-format msgid "Couldn't read status flags for volume group %s." msgstr "" #: format_text/import_vsn1.c:703 #, c-format msgid "Couldn't read extent size for volume group %s." msgstr "" #: format_text/import_vsn1.c:714 #, c-format msgid "Couldn't read 'max_lv' for volume group %s." msgstr "" #: format_text/import_vsn1.c:720 #, c-format msgid "Couldn't read 'max_pv' for volume group %s." msgstr "" #: format_text/import_vsn1.c:745 msgid "Couldn't create hash table." msgstr "" #: format_text/import_vsn1.c:752 #, c-format msgid "Couldn't find all physical volumes for volume group %s." msgstr "" #: format_text/import_vsn1.c:763 #, c-format msgid "Couldn't read tags for volume group %s." msgstr "" #: format_text/import_vsn1.c:769 #, c-format msgid "Couldn't read all logical volume names for volume group %s." msgstr "" #: format_text/import_vsn1.c:776 #, c-format msgid "Couldn't read all logical volumes for volume group %s." msgstr "" #: format_text/import_vsn1.c:782 #, c-format msgid "Failed to fixup mirror pointers after import for volume group %s." msgstr "" #: format_text/tags.c:62 msgid "Found a tag that is not a string" msgstr "" #: format_text/text_label.c:98 format_text/text_label.c:103 msgid "struct data_area_list allocation failed" msgstr "" #: format_text/text_label.c:138 format_text/text_label.c:149 msgid "struct mda_list allocation failed" msgstr "" #: format_text/text_label.c:143 format_text/text_label.c:154 msgid "struct mda_context allocation failed" msgstr "" #: label/label.c:49 msgid "Couldn't allocate memory for labeller list object." msgstr "" #: label/label.c:123 label/label.c:218 #, c-format msgid "%s: Failed to read label area" msgstr "" #: label/label.c:135 label/label.c:164 #, c-format msgid "Ignoring additional label on %s at sector %lu" msgstr "" #: label/label.c:140 #, c-format msgid "%s: Label for sector %lu found at sector %lu - ignoring" msgstr "" #: label/label.c:150 #, c-format msgid "Label checksum incorrect on %s - ignoring" msgstr "" #: label/label.c:161 #, c-format msgid "%s: %s label detected" msgstr "" #: label/label.c:185 #, c-format msgid "%s: No label detected" msgstr "" #: label/label.c:204 #, c-format msgid "Scanning for labels to wipe from %s" msgstr "" #: label/label.c:244 #, c-format msgid "%s: Wiping label at sector %lu" msgstr "" #: label/label.c:248 #, c-format msgid "Failed to remove label from %s at sector %lu" msgstr "" #: label/label.c:304 msgid "Label handler does not support label writes" msgstr "" #: label/label.c:309 #, c-format msgid "Label sector %lu beyond range (%ld)" msgstr "" #: label/label.c:333 #, c-format msgid "%s: Writing label to sector %lu" msgstr "" #: label/label.c:336 #, c-format msgid "Failed to write label to %s" msgstr "" #: label/label.c:386 msgid "label allocaction failed" msgstr "" #: locking/cluster_locking.c:69 #, c-format msgid "Local socket creation failed: %s" msgstr "" #: locking/cluster_locking.c:82 #, c-format msgid "connect() failed on local socket: %s" msgstr "" #: locking/cluster_locking.c:109 #, c-format msgid "Error writing data to clvmd: %s" msgstr "" #: locking/cluster_locking.c:118 #, c-format msgid "Error reading data from clvmd: %s" msgstr "" #: locking/cluster_locking.c:123 msgid "EOF reading CLVMD" msgstr "" #: locking/cluster_locking.c:156 #, c-format msgid "cluster request failed: %s" msgstr "" #: locking/cluster_locking.c:346 #, c-format msgid "clvmd not running on node %s" msgstr "" #: locking/cluster_locking.c:351 #, c-format msgid "Error locking on node %s: %s" msgstr "" #: locking/cluster_locking.c:402 locking/file_locking.c:266 #: locking/locking.c:265 locking/no_locking.c:71 #, c-format msgid "Unrecognised lock scope: %d" msgstr "" #: locking/cluster_locking.c:408 #, c-format msgid "Locking %s at 0x%x" msgstr "" #: locking/external_locking.c:64 msgid "External locking already initialised" msgstr "" #: locking/external_locking.c:86 #, c-format msgid "Shared library %s does not contain locking functions" msgstr "" #: locking/external_locking.c:93 #, c-format msgid "Loaded external locking library %s" msgstr "" #: locking/file_locking.c:59 #, c-format msgid "Unlocking %s" msgstr "" #: locking/file_locking.c:111 msgid "CTRL-c detected: giving up waiting for lock" msgstr "" #: locking/file_locking.c:149 #, c-format msgid "Unrecognised lock type: %d" msgstr "" #: locking/file_locking.c:163 #, c-format msgid "Locking %s %c%c" msgstr "" #: locking/file_locking.c:237 #, c-format msgid "Unlocking LV %s" msgstr "" #: locking/file_locking.c:242 #, c-format msgid "Locking LV %s (NL)" msgstr "" #: locking/file_locking.c:247 #, c-format msgid "Locking LV %s (R)" msgstr "" #: locking/file_locking.c:252 #, c-format msgid "Locking LV %s (W)" msgstr "" #: locking/file_locking.c:257 #, c-format msgid "Locking LV %s (EX)" msgstr "" #: locking/locking.c:133 msgid "" "WARNING: Locking disabled. Be careful! This could corrupt your metadata." msgstr "" #: locking/locking.c:138 msgid "File-based locking selected." msgstr "" #: locking/locking.c:146 msgid "External locking selected." msgstr "" #: locking/locking.c:156 msgid "Falling back to internal clustered locking." msgstr "" #: locking/locking.c:160 msgid "Cluster locking selected." msgstr "" #: locking/locking.c:167 msgid "Unknown locking type requested." msgstr "" #: locking/locking.c:174 msgid "WARNING: Falling back to local file-based locking." msgstr "" #: locking/locking.c:175 msgid "Volume Groups with the clustered attribute will be inaccessible." msgstr "" #: locking/locking.c:185 msgid "Locking disabled - only read operations permitted." msgstr "" #: locking/locking.c:212 #, c-format msgid "LVM1 proc VG pathname too long for %s" msgstr "" #: locking/locking.c:217 #, c-format msgid "%s exists: Is the original LVM driver using this volume group?" msgstr "" #: locking/locking.c:302 lvresize.c:573 #, c-format msgid "Failed to suspend %s" msgstr "" #: locking/locking.c:323 #, c-format msgid "Failed to activate %s" msgstr "" #: log/log.c:145 msgid "Test mode: Metadata will NOT be updated." msgstr "" #: lvchange.c:27 #, c-format msgid "Logical volume \"%s\" is already writable" msgstr "" #: lvchange.c:33 #, c-format msgid "Logical volume \"%s\" is already read only" msgstr "" #: lvchange.c:40 #, c-format msgid "Cannot change permissions of mirror \"%s\" while active." msgstr "" #: lvchange.c:47 #, c-format msgid "Setting logical volume \"%s\" read/write" msgstr "" #: lvchange.c:51 #, c-format msgid "Setting logical volume \"%s\" read-only" msgstr "" #: lvchange.c:55 lvchange.c:314 lvchange.c:350 lvchange.c:393 lvchange.c:470 #: lvchange.c:524 lvconvert.c:401 #, c-format msgid "Updating logical volume \"%s\" on disk(s)" msgstr "" #: lvchange.c:64 lvchange.c:402 lvconvert.c:409 metadata/mirror.c:227 #, c-format msgid "Failed to lock %s" msgstr "" #: lvchange.c:74 lvchange.c:412 #, c-format msgid "Updating permissions for \"%s\" in kernel" msgstr "" #: lvchange.c:76 lvchange.c:414 lvconvert.c:422 lvresize.c:585 #: metadata/mirror.c:240 #, c-format msgid "Problem reactivating %s" msgstr "" #: lvchange.c:89 #, c-format msgid "Logical volume, %s, is not active" msgstr "" #: lvchange.c:113 #, c-format msgid "Deactivating logical volume \"%s\" locally" msgstr "" #: lvchange.c:120 #, c-format msgid "Deactivating logical volume \"%s\"" msgstr "" #: lvchange.c:127 #, c-format msgid "Locking failed: ignoring clustered logical volume %s" msgstr "" #: lvchange.c:133 #, c-format msgid "Activating logical volume \"%s\" exclusively" msgstr "" #: lvchange.c:140 #, c-format msgid "Activating logical volume \"%s\" locally" msgstr "" #: lvchange.c:147 #, c-format msgid "Activating logical volume \"%s\"" msgstr "" #: lvchange.c:157 #, c-format msgid "Spawning background pvmove process for %s" msgstr "" #: lvchange.c:168 #, c-format msgid "Refreshing logical volume \"%s\" (if active)" msgstr "" #: lvchange.c:183 #, c-format msgid "Unable to resync %s because it is not mirrored." msgstr "" #: lvchange.c:189 #, c-format msgid "Unable to resync pvmove volume %s" msgstr "" #: lvchange.c:194 #, c-format msgid "Unable to resync locked volume %s" msgstr "" #: lvchange.c:200 #, c-format msgid "Can't resync open logical volume \"%s\"" msgstr "" #: lvchange.c:210 #, c-format msgid "Logical volume \"%s\" not resynced" msgstr "" #: lvchange.c:220 #, c-format msgid "Can't get exclusive access to clustered volume %s" msgstr "" #: lvchange.c:226 #, c-format msgid "Unable to deactivate %s for resync" msgstr "" #: lvchange.c:232 #, c-format msgid "Starting resync of %s%s%s mirror \"%s\"" msgstr "" #: lvchange.c:246 #, c-format msgid "Failed to reactivate %s to resynchronize mirror" msgstr "" #: lvchange.c:262 msgid "Failed to write intermediate VG metadata." msgstr "" #: lvchange.c:276 msgid "Failed to commit intermediate VG metadata." msgstr "" #: lvchange.c:288 #, c-format msgid "Unable to activate %s for mirror log resync" msgstr "" #: lvchange.c:293 #, c-format msgid "Clearing log device %s" msgstr "" #: lvchange.c:295 #, c-format msgid "Unable to reset sync status for %s" msgstr "" #: lvchange.c:297 msgid "Failed to deactivate log LV after wiping failed" msgstr "" #: lvchange.c:303 #, c-format msgid "Unable to deactivate log LV %s after wiping for resync" msgstr "" #: lvchange.c:316 msgid "Failed to update metadata on disk." msgstr "" #: lvchange.c:321 #, c-format msgid "Failed to reactivate %s after resync" msgstr "" #: lvchange.c:338 #, c-format msgid "Allocation policy of logical volume \"%s\" is already %s" msgstr "" #: lvchange.c:347 #, c-format msgid "Setting contiguous allocation policy for \"%s\" to %s" msgstr "" #: lvchange.c:383 #, c-format msgid "Read ahead is already %u for \"%s\"" msgstr "" #: lvchange.c:390 #, c-format msgid "Setting read ahead to %u for \"%s\"" msgstr "" #: lvchange.c:429 #, c-format msgid "Minor number is already not persistent for \"%s\"" msgstr "" #: lvchange.c:436 #, c-format msgid "Disabling persistent device number for \"%s\"" msgstr "" #: lvchange.c:440 msgid "Minor number must be specified with -My" msgstr "" #: lvchange.c:444 msgid "Major number must be specified with -My" msgstr "" #: lvchange.c:453 #, c-format msgid "%s device number not changed." msgstr "" #: lvchange.c:457 #, c-format msgid "Ensuring %s is inactive." msgstr "" #: lvchange.c:459 #, c-format msgid "%s: deactivation failed" msgstr "" #: lvchange.c:465 #, c-format msgid "Setting persistent device number to (%d, %d) for \"%s\"" msgstr "" #: lvchange.c:484 #, c-format msgid "Re-activating logical volume \"%s\"" msgstr "" #: lvchange.c:486 #, c-format msgid "%s: reactivation failed" msgstr "" #: lvchange.c:500 lvcreate.c:680 pvchange.c:49 vgchange.c:440 vgcreate.c:107 msgid "Failed to get tag" msgstr "" #: lvchange.c:505 #, c-format msgid "Logical volume %s/%s does not support tags" msgstr "" #: lvchange.c:512 lvcreate.c:746 #, c-format msgid "Failed to add tag %s to %s/%s" msgstr "" #: lvchange.c:518 #, c-format msgid "Failed to remove tag %s from %s/%s" msgstr "" #: lvchange.c:551 #, c-format msgid "Only -a permitted with read-only volume group \"%s\"" msgstr "" #: lvchange.c:560 #, c-format msgid "Can't change logical volume \"%s\" under snapshot" msgstr "" #: lvchange.c:566 #, c-format msgid "Can't change snapshot logical volume \"%s\"" msgstr "" #: lvchange.c:572 #, c-format msgid "Unable to change pvmove LV %s" msgstr "" #: lvchange.c:574 msgid "Use 'pvmove --abort' to abandon a pvmove" msgstr "" #: lvchange.c:579 #, c-format msgid "Unable to change mirror log LV %s directly" msgstr "" #: lvchange.c:584 #, c-format msgid "Unable to change mirror image LV %s directly" msgstr "" #: lvchange.c:590 #, c-format msgid "Unable to change internal LV %s directly" msgstr "" #: lvchange.c:648 #, c-format msgid "Logical volume \"%s\" changed" msgstr "" #: lvchange.c:683 msgid "" "Need 1 or more of -a, -C, -j, -m, -M, -p, -r, --resync, --refresh, --alloc, " "--addtag, --deltag or --monitor" msgstr "" #: lvchange.c:694 msgid "Only -a permitted with --ignorelockingfailure" msgstr "" #: lvchange.c:699 msgid "Please give logical volume path(s)" msgstr "" #: lvchange.c:705 msgid "--major and --minor require -My" msgstr "" #: lvchange.c:710 msgid "Only give one logical volume when specifying minor" msgstr "" #: lvchange.c:715 msgid "Only one of --alloc and --contiguous permitted" msgstr "" #: lvconvert.c:50 lvcreate.c:69 msgid "Please specify a logical volume to act as the snapshot origin." msgstr "" #: lvconvert.c:58 lvcreate.c:77 msgid "The origin name should include the volume group." msgstr "" #: lvconvert.c:69 msgid "Please provide logical volume path" msgstr "" #: lvconvert.c:79 lvrename.c:38 #, c-format msgid "Please use a single volume group name (\"%s\" or \"%s\")" msgstr "" #: lvconvert.c:88 lvrename.c:52 msgid "Please provide a valid volume group name" msgstr "" #: lvconvert.c:110 msgid "Exactly one of --mirrors or --snapshot arguments required." msgstr "" #: lvconvert.c:129 msgid "--regionsize is only available with mirrors" msgstr "" #: lvconvert.c:134 lvcreate.c:336 msgid "Negative chunk size is invalid" msgstr "" #: lvconvert.c:140 lvcreate.c:342 msgid "Chunk size must be a power of 2 in the range 4K to 512K" msgstr "" #: lvconvert.c:144 lvcreate.c:346 #, c-format msgid "Setting chunksize to %d sectors." msgstr "" #: lvconvert.c:156 msgid "--chunksize is only available with snapshots" msgstr "" #: lvconvert.c:162 msgid "--zero is only available with snapshots" msgstr "" #: lvconvert.c:174 lvcreate.c:253 msgid "Negative regionsize is invalid" msgstr "" #: lvconvert.c:184 lvcreate.c:262 msgid "Negative regionsize in configuration file is invalid" msgstr "" #: lvconvert.c:192 lvcreate.c:276 #, c-format msgid "Region size (%u) must be a multiple of machine memory page size (%d)" msgstr "" #: lvconvert.c:200 lvcreate.c:270 #, c-format msgid "Region size (%u) must be a power of 2" msgstr "" #: lvconvert.c:206 lvcreate.c:283 msgid "Non-zero region size must be supplied." msgstr "" #: lvconvert.c:216 lvcreate.c:390 metadata/mirror.c:566 #, c-format msgid "%s: Required device-mapper target(s) not detected in your kernel" msgstr "" #: lvconvert.c:249 #, c-format msgid "Logical volume %s only has %u mirrors." msgstr "" #: lvconvert.c:259 msgid "Mirror log region size cannot be changed on an existing mirror." msgstr "" #: lvconvert.c:266 #, c-format msgid "Logical volume %s is already not mirrored." msgstr "" #: lvconvert.c:277 #, c-format msgid "Logical volume %s has multiple mirror segments." msgstr "" #: lvconvert.c:287 lvconvert.c:320 msgid "Unable to determine mirror sync status." msgstr "" #: lvconvert.c:311 lvconvert.c:389 lvcreate.c:721 msgid "Failed to create mirror log." msgstr "" #: lvconvert.c:335 #, c-format msgid "Logical volume %s already has %u mirror(s)." msgstr "" #: lvconvert.c:346 msgid "Adding mirror images is not supported yet." msgstr "" #: lvconvert.c:363 msgid "Mirrors of striped volumes are not yet supported." msgstr "" #: lvconvert.c:419 metadata/mirror.c:237 #, c-format msgid "Updating \"%s\" in kernel" msgstr "" #: lvconvert.c:426 #, c-format msgid "Logical volume %s converted." msgstr "" #: lvconvert.c:438 lvcreate.c:608 #, c-format msgid "Couldn't find origin volume '%s'." msgstr "" #: lvconvert.c:443 #, c-format msgid "Unable to create a snapshot of a %s LV." msgstr "" #: lvconvert.c:450 lvcreate.c:799 #, c-format msgid "WARNING: \"%s\" not zeroed" msgstr "" #: lvconvert.c:452 msgid "Aborting. Failed to wipe snapshot exception store." msgstr "" #: lvconvert.c:458 #, c-format msgid "Couldn't deactivate LV %s." msgstr "" #: lvconvert.c:464 lvcreate.c:812 msgid "Couldn't create snapshot." msgstr "" #: lvconvert.c:475 lvcreate.c:821 #, c-format msgid "Failed to suspend origin %s" msgstr "" #: lvconvert.c:484 lvcreate.c:830 #, c-format msgid "Problem reactivating origin %s" msgstr "" #: lvconvert.c:488 #, c-format msgid "Logical volume %s converted to snapshot." msgstr "" #: lvconvert.c:499 #, c-format msgid "Cannot convert locked LV %s" msgstr "" #: lvconvert.c:504 #, c-format msgid "Can't convert logical volume \"%s\" under snapshot" msgstr "" #: lvconvert.c:510 #, c-format msgid "Can't convert snapshot logical volume \"%s\"" msgstr "" #: lvconvert.c:516 #, c-format msgid "Unable to convert pvmove LV %s" msgstr "" #: lvconvert.c:548 lvrename.c:100 vgrename.c:62 #, c-format msgid "Checking for existing volume group \"%s\"" msgstr "" #: lvconvert.c:551 lvcreate.c:863 lvrename.c:103 lvresize.c:613 pvchange.c:59 #: pvmove.c:59 pvresize.c:69 vgcreate.c:140 vgextend.c:53 vgmerge.c:34 #: vgmerge.c:65 vgreduce.c:476 vgrename.c:94 vgrename.c:133 vgsplit.c:240 #: vgsplit.c:277 #, c-format msgid "Can't get lock for %s" msgstr "" #: lvconvert.c:556 lvcreate.c:492 lvrename.c:108 pvmove.c:64 vgdisplay.c:24 #: vgmerge.c:39 vgmerge.c:72 vgreduce.c:482 vgsplit.c:245 #, c-format msgid "Volume group \"%s\" doesn't exist" msgstr "" #: lvconvert.c:562 lvcreate.c:498 lvrename.c:114 lvresize.c:146 pvchange.c:72 #: pvdisplay.c:41 pvmove.c:71 pvresize.c:83 reporter.c:76 reporter.c:124 #: toollib.c:363 toollib.c:383 toollib.c:490 toollib.c:741 vgextend.c:64 #: vgmerge.c:46 vgmerge.c:78 vgreduce.c:489 vgreduce.c:511 vgrename.c:107 #: vgsplit.c:252 #, c-format msgid "Skipping clustered volume group %s" msgstr "" #: lvconvert.c:567 lvcreate.c:503 lvrename.c:119 metadata/metadata.c:1377 #: polldaemon.c:195 pvchange.c:78 pvmove.c:76 pvresize.c:89 toollib.c:163 #: vgchange.c:534 vgck.c:34 vgconvert.c:54 vgextend.c:69 vgmerge.c:52 #: vgmerge.c:83 vgreduce.c:541 vgremove.c:35 vgrename.c:113 vgsplit.c:258 #, c-format msgid "Volume group \"%s\" is exported" msgstr "" #: lvconvert.c:572 lvcreate.c:508 lvremove.c:28 lvrename.c:124 pvchange.c:84 #: pvmove.c:82 pvresize.c:95 vgchange.c:529 vgconvert.c:49 vgexport.c:42 #: vgextend.c:74 vgmerge.c:58 vgmerge.c:88 vgreduce.c:547 vgrename.c:117 #: vgsplit.c:270 #, c-format msgid "Volume group \"%s\" is read-only" msgstr "" #: lvconvert.c:577 #, c-format msgid "Logical volume \"%s\" not found in volume group \"%s\"" msgstr "" #: lvcreate.c:93 lvresize.c:105 msgid "Please provide a volume group name" msgstr "" #: lvcreate.c:100 msgid "Volume group name expected (no slash)" msgstr "" #: lvcreate.c:115 #, c-format msgid "Inconsistent volume group names given: \"%s\" and \"%s\"" msgstr "" #: lvcreate.c:138 #, c-format msgid "Logical volume name \"%s\" is invalid" msgstr "" #: lvcreate.c:151 lvresize.c:65 msgid "Please specify either size or extents (not both)" msgstr "" #: lvcreate.c:157 msgid "Negative number of extents is invalid" msgstr "" #: lvcreate.c:167 msgid "Negative size is invalid" msgstr "" #: lvcreate.c:189 msgid "Negative stripesize is invalid" msgstr "" #: lvcreate.c:194 lvresize.c:192 #, c-format msgid "Stripe size cannot be larger than %s" msgstr "" #: lvcreate.c:202 msgid "Ignoring stripesize argument with single stripe" msgstr "" #: lvcreate.c:210 lvresize.c:330 #, c-format msgid "Using default stripesize %s" msgstr "" #: lvcreate.c:215 #, c-format msgid "Too few physical volumes on command line for %d-way striping" msgstr "" #: lvcreate.c:221 #, c-format msgid "Number of stripes (%d) must be between %d and %d" msgstr "" #: lvcreate.c:229 lvresize.c:407 #, c-format msgid "Invalid stripe size %s" msgstr "" #: lvcreate.c:246 #, c-format msgid "Too few physical volumes on command line for %d-way mirroring" msgstr "" #: lvcreate.c:309 msgid "Redundant stripes argument: default is 1" msgstr "" #: lvcreate.c:323 msgid "Redundant mirrors argument: default is 0" msgstr "" #: lvcreate.c:325 lvresize.c:180 msgid "Mirrors argument may not be negative" msgstr "" #: lvcreate.c:332 msgid "-Z is incompatible with snapshots" msgstr "" #: lvcreate.c:354 msgid "-c is only available with snapshots" msgstr "" #: lvcreate.c:361 msgid "mirrors and snapshots are currently incompatible" msgstr "" #: lvcreate.c:367 msgid "mirrors and stripes are currently incompatible" msgstr "" #: lvcreate.c:378 msgid "--corelog is only available with mirrors" msgstr "" #: lvcreate.c:383 msgid "--nosync is only available with mirrors" msgstr "" #: lvcreate.c:419 msgid "Conflicting contiguous and alloc arguments" msgstr "" #: lvcreate.c:448 msgid "Please specify minor number with --minor when using -My" msgstr "" #: lvcreate.c:453 msgid "Please specify major number with --major when using -My" msgstr "" #: lvcreate.c:459 msgid "--major and --minor incompatible with -Mn" msgstr "" #: lvcreate.c:489 pvmove.c:305 toollib.c:481 vgreduce.c:474 #, c-format msgid "Finding volume group \"%s\"" msgstr "" #: lvcreate.c:513 lvrename.c:129 #, c-format msgid "Logical volume \"%s\" already exists in volume group \"%s\"" msgstr "" #: lvcreate.c:519 msgid "Metadata does not support mirroring." msgstr "" #: lvcreate.c:536 #, c-format msgid "Reducing requested stripe size %s to maximum, physical extent size %s" msgstr "" #: lvcreate.c:547 #, c-format msgid "Stripe size may not exceed %s" msgstr "" #: lvcreate.c:559 lvresize.c:237 #, c-format msgid "Rounding up size to full physical extent %s" msgstr "" #: lvcreate.c:564 #, c-format msgid "Volume too large (%s) for extent size %s. Upper limit is %s." msgstr "" #: lvcreate.c:583 #, c-format msgid "Please express size as %%VG or %%FREE." msgstr "" #: lvcreate.c:590 #, c-format msgid "Rounding size (%d extents) up to stripe boundary size (%d extents)" msgstr "" #: lvcreate.c:598 msgid "Can't create snapshot without using device-mapper kernel driver" msgstr "" #: lvcreate.c:604 msgid "Clustered snapshots are not yet supported." msgstr "" #: lvcreate.c:613 msgid "Snapshots of snapshots are not supported yet." msgstr "" #: lvcreate.c:618 msgid "Snapshots of locked devices are not supported yet" msgstr "" #: lvcreate.c:625 msgid "Snapshots and mirrors may not yet be mixed." msgstr "" #: lvcreate.c:634 msgid "Unable to create new logical volume with no extents" msgstr "" #: lvcreate.c:640 #, c-format msgid "Insufficient free extents (%u) in volume group %s: %u required" msgstr "" #: lvcreate.c:646 #, c-format msgid "Number of stripes (%u) must not exceed number of physical volumes (%d)" msgstr "" #: lvcreate.c:653 msgid "Can't create mirror without using device-mapper kernel driver." msgstr "" #: lvcreate.c:672 msgid "Failed to generate LV name." msgstr "" #: lvcreate.c:685 vgchange.c:445 #, c-format msgid "Volume group %s does not support tags" msgstr "" #: lvcreate.c:709 msgid "" "WARNING: New mirror won't be synchronised. Don't read what you didn't write!" msgstr "" #: lvcreate.c:733 msgid "Setting read ahead sectors" msgstr "" #: lvcreate.c:741 #, c-format msgid "Setting device number to (%d, %d)" msgstr "" #: lvcreate.c:782 msgid "" "Aborting. Failed to activate snapshot exception store. Remove new LV and " "retry." msgstr "" #: lvcreate.c:787 msgid "Failed to activate new LV." msgstr "" #: lvcreate.c:794 msgid "" "Aborting. Failed to wipe snapshot exception store. Remove new LV and retry." msgstr "" #: lvcreate.c:837 #, c-format msgid "Logical volume \"%s\" created" msgstr "" #: lvdisplay.c:39 lvdisplay.c:48 pvdisplay.c:89 pvdisplay.c:99 vgdisplay.c:67 #: vgdisplay.c:76 msgid "Incompatible options selected" msgstr "" #: lvdisplay.c:53 msgid "Options -v and -c are incompatible" msgstr "" #: lvmchange.c:21 msgid "With LVM2 and the device mapper, this program is obsolete." msgstr "" #: lvmcmdline.c:289 msgid "Minor number outside range 0-255" msgstr "" #: lvmcmdline.c:304 msgid "Major number outside range 0-255" msgstr "" #: lvmcmdline.c:402 msgid "Couldn't allocate memory." msgstr "" #: lvmcmdline.c:451 msgid "Out of memory." msgstr "" #: lvmcmdline.c:504 #, c-format msgid "" "%s: %s\n" "\n" "%s" msgstr "" #: lvmcmdline.c:598 msgid "Unrecognised option." msgstr "" #: lvmcmdline.c:604 #, c-format msgid "Option%s%c%s%s may not be repeated" msgstr "" #: lvmcmdline.c:613 msgid "Option requires argument." msgstr "" #: lvmcmdline.c:620 #, c-format msgid "Invalid argument %s" msgstr "" #: lvmcmdline.c:639 #, c-format msgid "%s and %s are synonyms. Please only supply one." msgstr "" #: lvmcmdline.c:667 #, c-format msgid "LVM version: %s" msgstr "" #: lvmcmdline.c:669 #, c-format msgid "Library version: %s" msgstr "" #: lvmcmdline.c:671 #, c-format msgid "Driver version: %s" msgstr "" #: lvmcmdline.c:706 msgid "Partial mode. Incomplete volume groups will be activated read-only." msgstr "" #: lvmcmdline.c:729 msgid "--trustcache is incompatible with --all" msgstr "" #: lvmcmdline.c:733 msgid "" "WARNING: Cache file of PVs will be trusted. New devices holding PVs may get " "ignored." msgstr "" #: lvmcmdline.c:767 msgid "Available lvm commands:" msgstr "" #: lvmcmdline.c:768 msgid "Use 'lvm help ' for more information" msgstr "" #: lvmcmdline.c:774 #, c-format msgid "%-16.16s%s" msgstr "" #: lvmcmdline.c:794 msgid "Failed to set overridden configuration entries." msgstr "" #: lvmcmdline.c:858 msgid "Couldn't copy command line." msgstr "" #: lvmcmdline.c:871 #, c-format msgid "Parsing: %s" msgstr "" #: lvmcmdline.c:877 msgid "Error during parsing of command line." msgstr "" #: lvmcmdline.c:890 msgid "Updated config file invalid. Aborting." msgstr "" #: lvmcmdline.c:899 #, c-format msgid "Processing: %s" msgstr "" #: lvmcmdline.c:902 msgid "O_DIRECT will be used" msgstr "" #: lvmcmdline.c:915 #, c-format msgid "Locking type %d initialisation failed." msgstr "" #: lvmcmdline.c:927 msgid "Test mode: Wiping internal cache" msgstr "" #: lvmcmdline.c:951 #, c-format msgid "Completed: %s" msgstr "" #: lvmcmdline.c:1073 #, c-format msgid "Line too long (max 255) beginning: %s" msgstr "" #: lvmcmdline.c:1080 #, c-format msgid "Too many arguments: %s" msgstr "" #: lvmcmdline.c:1125 msgid "Failed to create LVM1 tool pathname" msgstr "" #: lvmcmdline.c:1173 msgid "Falling back to LVM1 tools, but no command specified." msgstr "" #: lvmcmdline.c:1189 msgid "Please supply an LVM command." msgstr "" #: lvmcmdline.c:1203 msgid "No such command. Try 'help'." msgstr "" #: lvmdiskscan.c:38 lvmdiskscan.c:108 msgid "dev_iter_create failed" msgstr "" #: lvmdiskscan.c:66 #, c-format msgid "%-*s [%15s] %s" msgstr "" #: lvmdiskscan.c:83 lvmdiskscan.c:117 #, c-format msgid "Couldn't get size of \"%s\"" msgstr "" #: lvmdiskscan.c:88 #, c-format msgid "dev_close on \"%s\" failed" msgstr "" #: lvmdiskscan.c:103 msgid "WARNING: only considering LVM devices" msgstr "" #: lvmdiskscan.c:137 #, c-format msgid "%d disk%s" msgstr "" #: lvmdiskscan.c:139 #, c-format msgid "%d partition%s" msgstr "" #: lvmdiskscan.c:142 #, c-format msgid "%d LVM physical volume whole disk%s" msgstr "" #: lvmdiskscan.c:144 #, c-format msgid "%d LVM physical volume%s" msgstr "" #: lvremove.c:33 #, c-format msgid "Can't remove logical volume \"%s\" under snapshot" msgstr "" #: lvremove.c:39 #, c-format msgid "Can't remove logical volume %s used by a mirror" msgstr "" #: lvremove.c:45 #, c-format msgid "Can't remove logical volume %s used as mirror log" msgstr "" #: lvremove.c:51 #, c-format msgid "Can't remove locked LV %s" msgstr "" #: lvremove.c:59 #, c-format msgid "Can't remove open logical volume \"%s\"" msgstr "" #: lvremove.c:68 #, c-format msgid "Logical volume \"%s\" not removed" msgstr "" #: lvremove.c:82 #, c-format msgid "Can't get exclusive access to volume \"%s\"" msgstr "" #: lvremove.c:90 #, c-format msgid "Unable to deactivate logical volume \"%s\"" msgstr "" #: lvremove.c:97 #, c-format msgid "Removing snapshot %s" msgstr "" #: lvremove.c:104 #, c-format msgid "Releasing logical volume \"%s\"" msgstr "" #: lvremove.c:106 #, c-format msgid "Error releasing logical volume \"%s\"" msgstr "" #: lvremove.c:122 #, c-format msgid "Failed to refresh %s without snapshot." msgstr "" #: lvremove.c:124 #, c-format msgid "Failed to resume %s." msgstr "" #: lvremove.c:127 #, c-format msgid "Logical volume \"%s\" successfully removed" msgstr "" #: lvremove.c:134 msgid "Please enter one or more logical volume paths" msgstr "" #: lvrename.c:47 msgid "Old and new logical volume names required" msgstr "" #: lvrename.c:59 #, c-format msgid "Logical volume names must have the same volume group (\"%s\" or \"%s\")" msgstr "" #: lvrename.c:74 #, c-format msgid "New logical volume path exceeds maximum length of %zu!" msgstr "" #: lvrename.c:80 msgid "New logical volume name may not be blank" msgstr "" #: lvrename.c:90 #, c-format msgid "New logical volume name \"%s\" is invalid" msgstr "" #: lvrename.c:96 msgid "Old and new logical volume names must differ" msgstr "" #: lvrename.c:135 #, c-format msgid "Existing logical volume \"%s\" not found in volume group \"%s\"" msgstr "" #: lvrename.c:143 #, c-format msgid "Cannot rename locked LV %s" msgstr "" #: lvrename.c:150 lvrename.c:158 #, c-format msgid "Mirrored LV, \"%s\" cannot be renamed: %s" msgstr "" #: lvrename.c:169 msgid "Failed to allocate space for new name" msgstr "" #: lvrename.c:173 vgmerge.c:223 vgrename.c:165 msgid "Writing out updated volume group" msgstr "" #: lvrename.c:197 #, c-format msgid "Renamed \"%s\" to \"%s\" in volume group \"%s\"" msgstr "" #: lvresize.c:83 msgid "Negative argument not permitted - use lvreduce" msgstr "" #: lvresize.c:88 msgid "Positive sign not permitted - use lvextend" msgstr "" #: lvresize.c:96 msgid "Please provide the logical volume name" msgstr "" #: lvresize.c:140 #, c-format msgid "Volume group %s doesn't exist" msgstr "" #: lvresize.c:151 #, c-format msgid "Volume group %s is exported" msgstr "" #: lvresize.c:156 #, c-format msgid "Volume group %s is read-only" msgstr "" #: lvresize.c:162 #, c-format msgid "Logical volume %s not found in volume group %s" msgstr "" #: lvresize.c:171 msgid "Varied striping not supported. Ignoring." msgstr "" #: lvresize.c:178 msgid "Mirrors not supported. Ignoring." msgstr "" #: lvresize.c:187 msgid "Stripesize may not be negative." msgstr "" #: lvresize.c:198 msgid "Varied stripesize not supported. Ignoring." msgstr "" #: lvresize.c:200 #, c-format msgid "Reducing stripe size %s to maximum, physical extent size %s" msgstr "" #: lvresize.c:211 msgid "Mirrors and striping cannot be combined yet." msgstr "" #: lvresize.c:215 msgid "Stripe size must be power of 2" msgstr "" #: lvresize.c:223 #, c-format msgid "Can't resize locked LV %s" msgstr "" #: lvresize.c:263 #, c-format msgid "Unable to reduce %s below 1 extent" msgstr "" #: lvresize.c:272 msgid "New size of 0 not permitted" msgstr "" #: lvresize.c:277 lvresize.c:414 #, c-format msgid "New size (%d extents) matches existing size (%d extents)" msgstr "" #: lvresize.c:291 #, c-format msgid "VolumeType does not match (%s)" msgstr "" #: lvresize.c:308 msgid "Please specify number of stripes (-i) and stripesize (-I)" msgstr "" #: lvresize.c:322 #, c-format msgid "Using stripesize of last segment %s" msgstr "" #: lvresize.c:346 #, c-format msgid "Extending %u mirror images." msgstr "" #: lvresize.c:352 msgid "Cannot vary number of mirrors in LV yet." msgstr "" #: lvresize.c:362 msgid "Ignoring stripes, stripesize and mirrors arguments when reducing" msgstr "" #: lvresize.c:391 msgid "Stripesize for striped segment should not be 0!" msgstr "" #: lvresize.c:400 #, c-format msgid "" "Rounding size (%d extents) down to stripe boundary size for segment (%d " "extents)" msgstr "" #: lvresize.c:421 #, c-format msgid "New size given (%d extents) not larger than existing size (%d extents)" msgstr "" #: lvresize.c:431 #, c-format msgid "New size given (%d extents) not less than existing size (%d extents)" msgstr "" #: lvresize.c:441 msgid "Mirrors cannot be resized while active yet." msgstr "" #: lvresize.c:447 msgid "Snapshot origin volumes cannot be reduced in size yet." msgstr "" #: lvresize.c:455 msgid "" "Snapshot origin volumes can be resized only while inactive: try lvchange -an" msgstr "" #: lvresize.c:463 msgid "Ignoring PVs on command line when reducing" msgstr "" #: lvresize.c:474 msgid "lv_info failed: aborting" msgstr "" #: lvresize.c:479 #, c-format msgid "Logical volume %s must be activated before resizing filesystem" msgstr "" #: lvresize.c:485 #, c-format msgid "WARNING: Reducing active%s logical volume to %s" msgstr "" #: lvresize.c:490 msgid "THIS MAY DESTROY YOUR DATA (filesystem etc.)" msgstr "" #: lvresize.c:497 #, c-format msgid "Logical volume %s NOT reduced" msgstr "" #: lvresize.c:508 #, c-format msgid "Couldn't create LV path for %s" msgstr "" #: lvresize.c:516 msgid "Couldn't generate new LV size string" msgstr "" #: lvresize.c:540 #, c-format msgid "%sing logical volume %s to %s" msgstr "" #: lvresize.c:589 #, c-format msgid "Logical volume %s successfully resized" msgstr "" #: lvresize.c:611 #, c-format msgid "Finding volume group %s" msgstr "" #: lvscan.c:64 #, c-format msgid "%s%s '%s%s/%s' [%s] %s" msgstr "" #: lvscan.c:79 msgid "No additional command line arguments allowed" msgstr "" #: metadata/lv_manip.c:96 msgid "alloc_lv_segment: Missing segtype." msgstr "" #: metadata/lv_manip.c:131 msgid "Failed to find snapshot segtype" msgstr "" #: metadata/lv_manip.c:139 msgid "Couldn't allocate new snapshot segment." msgstr "" #: metadata/lv_manip.c:280 #, c-format msgid "Segment extent reduction %unot divisible by #stripes %u" msgstr "" #: metadata/lv_manip.c:445 msgid "Striped mirrors are not supported yet" msgstr "" #: metadata/lv_manip.c:450 msgid "Can't mix striping or mirroring with creation of a mirrored PV yet" msgstr "" #: metadata/lv_manip.c:456 msgid "Can't mix striping or pvmove with a mirror log yet." msgstr "" #: metadata/lv_manip.c:471 msgid "allocation handle allocation failed" msgstr "" #: metadata/lv_manip.c:481 msgid "allocation pool creation failed" msgstr "" #: metadata/lv_manip.c:516 report/report.c:92 report/report.c:152 msgid "dm_pool_begin_object failed" msgstr "" #: metadata/lv_manip.c:523 metadata/lv_manip.c:528 metadata/lv_manip.c:535 #: report/report.c:112 report/report.c:123 report/report.c:129 #: report/report.c:135 report/report.c:159 report/report.c:165 msgid "dm_pool_grow_object failed" msgstr "" #: metadata/lv_manip.c:541 #, c-format msgid "Parallel PVs at LE %u length %u: %s" msgstr "" #: metadata/lv_manip.c:574 msgid "Couldn't allocate new LV segment." msgstr "" #: metadata/lv_manip.c:654 msgid "alloced_area allocation failed" msgstr "" #: metadata/lv_manip.c:705 #, c-format msgid "Failed to find segment for %s extent %u" msgstr "" #: metadata/lv_manip.c:907 #, c-format msgid "Insufficient free space: %u extents needed, but only %u available" msgstr "" #: metadata/lv_manip.c:1081 msgid "_allocate called with no work to do!" msgstr "" #: metadata/lv_manip.c:1105 msgid "Not enough PVs with free space available for parallel allocation." msgstr "" #: metadata/lv_manip.c:1107 msgid "Consider --alloc anywhere if desperate." msgstr "" #: metadata/lv_manip.c:1120 msgid "Couldn't allocate areas array." msgstr "" #: metadata/lv_manip.c:1137 #, c-format msgid "" "Insufficient suitable %sallocatable extents for logical volume %s: %u more " "required" msgstr "" #: metadata/lv_manip.c:1147 #, c-format msgid "Insufficient extents for log allocation for logical volume %s." msgstr "" #: metadata/lv_manip.c:1168 msgid "Couldn't allocate new zero segment." msgstr "" #: metadata/lv_manip.c:1201 msgid "allocate_extents does not handle virtual segments" msgstr "" #: metadata/lv_manip.c:1207 #, c-format msgid "Metadata format (%s) does not support required LV segment type (%s)." msgstr "" #: metadata/lv_manip.c:1210 msgid "Consider changing the metadata format by running vgconvert." msgstr "" #: metadata/lv_manip.c:1251 msgid "Missing segtype in lv_add_segment()." msgstr "" #: metadata/lv_manip.c:1256 msgid "lv_add_segment cannot handle virtual segments" msgstr "" #: metadata/lv_manip.c:1270 msgid "Couldn't merge segments after extending logical volume." msgstr "" #: metadata/lv_manip.c:1292 msgid "Log segments can only be added to an empty LV" msgstr "" #: metadata/lv_manip.c:1301 msgid "Couldn't allocate new mirror log segment." msgstr "" #: metadata/lv_manip.c:1339 #, c-format msgid "Log LV %s is empty." msgstr "" #: metadata/lv_manip.c:1349 msgid "Couldn't allocate new mirror segment." msgstr "" #: metadata/lv_manip.c:1384 msgid "Mirrored LV must only have one segment." msgstr "" #: metadata/lv_manip.c:1394 #, c-format msgid "Failed to allocate widened LV segment for %s." msgstr "" #: metadata/lv_manip.c:1446 #, c-format msgid "Aborting. Failed to extend %s." msgstr "" #: metadata/lv_manip.c:1499 #, c-format msgid "Maximum number of logical volumes (%u) reached in volume group %s" msgstr "" #: metadata/lv_manip.c:1506 msgid "Failed to generate unique name for the new logical volume" msgstr "" #: metadata/lv_manip.c:1512 #, c-format msgid "Creating logical volume %s" msgstr "" #: metadata/lv_manip.c:1516 msgid "lv_list allocation failed" msgstr "" #: metadata/lv_manip.c:1526 msgid "lv name strdup failed" msgstr "" #: metadata/lv_manip.c:1574 metadata/metadata.c:986 msgid "pv_list allocation failed" msgstr "" #: metadata/lv_manip.c:1596 msgid "parallel_areas allocation failed" msgstr "" #: metadata/lv_manip.c:1604 msgid "allocation failed" msgstr "" #: metadata/merge.c:72 #, c-format msgid "LV %s invalid: segment %u should begin at LE %u (found %u)." msgstr "" #: metadata/merge.c:82 #, c-format msgid "LV %s: segment %u has inconsistent area_len %u" msgstr "" #: metadata/merge.c:90 #, c-format msgid "LV %s: segment %u has log LV but is not mirrored" msgstr "" #: metadata/merge.c:97 #, c-format msgid "LV %s: segment %u log LV %s is not a mirror log" msgstr "" #: metadata/merge.c:105 #, c-format msgid "LV %s: segment %u log LV does not point back to mirror segment" msgstr "" #: metadata/merge.c:115 #, c-format msgid "LV %s: segment %u mirror image is not mirrored" msgstr "" #: metadata/merge.c:124 #, c-format msgid "LV %s: segment %u has unassigned area %u." msgstr "" #: metadata/merge.c:132 #, c-format msgid "LV %s: segment %u has inconsistent PV area %u" msgstr "" #: metadata/merge.c:141 #, c-format msgid "LV %s: segment %u has inconsistent LV area %u" msgstr "" #: metadata/merge.c:152 #, c-format msgid "LV %s: segment %u mirror image %u missing mirror ptr" msgstr "" #: metadata/merge.c:174 #, c-format msgid "LV %s: inconsistent LE count %u != %u" msgstr "" #: metadata/merge.c:195 #, c-format msgid "Unable to split the %s segment at LE %u in LV %s" msgstr "" #: metadata/merge.c:208 msgid "Couldn't allocate cloned LV segment." msgstr "" #: metadata/merge.c:213 msgid "LV segment tags duplication failed" msgstr "" #: metadata/merge.c:240 #, c-format msgid "Split %s:%u[%u] at %u: %s LE %u" msgstr "" #: metadata/merge.c:256 #, c-format msgid "Split %s:%u[%u] at %u: %s PE %u" msgstr "" #: metadata/merge.c:263 metadata/metadata.c:495 #, c-format msgid "Unassigned area %u found in segment" msgstr "" #: metadata/merge.c:282 #, c-format msgid "Segment with extent %u in LV %s not found" msgstr "" #: metadata/metadata.c:43 #, c-format msgid "Adding physical volume '%s' to volume group '%s'" msgstr "" #: metadata/metadata.c:47 metadata/metadata.c:1008 #, c-format msgid "pv_list allocation for '%s' failed" msgstr "" #: metadata/metadata.c:53 #, c-format msgid "%s not identified as an existing physical volume" msgstr "" #: metadata/metadata.c:59 #, c-format msgid "Physical volume '%s' is already in volume group '%s'" msgstr "" #: metadata/metadata.c:65 #, c-format msgid "Physical volume %s is of different format type (%s)" msgstr "" #: metadata/metadata.c:72 #, c-format msgid "Physical volume %s might be constructed from same volume group %s" msgstr "" #: metadata/metadata.c:78 metadata/metadata.c:199 #, c-format msgid "vg->name allocation failed for '%s'" msgstr "" #: metadata/metadata.c:100 #, c-format msgid "Format-specific setup of physical volume '%s' failed." msgstr "" #: metadata/metadata.c:106 #, c-format msgid "Physical volume '%s' listed more than once." msgstr "" #: metadata/metadata.c:112 #, c-format msgid "No space for '%s' - volume group '%s' holds max %d physical volume(s)." msgstr "" #: metadata/metadata.c:127 #, c-format msgid "Unable to add %s to %s: new extent count (%lu) exceeds limit (%u)." msgstr "" #: metadata/metadata.c:148 msgid "PV tags duplication failed" msgstr "" #: metadata/metadata.c:170 #, c-format msgid "get_pv_from_vg_by_id: vg_read failed to read VG %s" msgstr "" #: metadata/metadata.c:176 #, c-format msgid "Warning: Volume group %s is not consistent" msgstr "" #: metadata/metadata.c:205 #, c-format msgid "pv->vg_name allocation failed for '%s'" msgstr "" #: metadata/metadata.c:222 #, c-format msgid "Unable to add physical volume '%s' to volume group '%s'." msgstr "" #: metadata/metadata.c:260 #, c-format msgid "A volume group called '%s' already exists." msgstr "" #: metadata/metadata.c:266 #, c-format msgid "Couldn't create uuid for volume group '%s'." msgstr "" #: metadata/metadata.c:309 metadata/metadata.c:1085 metadata/metadata.c:1151 msgid "Failed to create format instance" msgstr "" #: metadata/metadata.c:315 #, c-format msgid "Format specific setup of volume group '%s' failed." msgstr "" #: metadata/metadata.c:338 #, c-format msgid "New size %lu for %s%s not an exact number of new extents." msgstr "" #: metadata/metadata.c:346 #, c-format msgid "New extent count %lu for %s%s exceeds 32 bits." msgstr "" #: metadata/metadata.c:556 #, c-format msgid "Failed to create random uuid for %s." msgstr "" #: metadata/metadata.c:575 pvresize.c:128 #, c-format msgid "WARNING: %s: Overriding real size. You could lose data." msgstr "" #: metadata/metadata.c:577 #, c-format msgid "%s: Pretending size is %lu sectors." msgstr "" #: metadata/metadata.c:583 pvresize.c:136 #, c-format msgid "%s: Size must exceed minimum of %ld sectors." msgstr "" #: metadata/metadata.c:601 #, c-format msgid "%s: Format-specific setup of physical volume failed." msgstr "" #: metadata/metadata.c:699 #, c-format msgid "Physical volume %s not found" msgstr "" #: metadata/metadata.c:704 #, c-format msgid "Physical volume %s not in a volume group" msgstr "" #: metadata/metadata.c:780 #, c-format msgid "Internal error: Duplicate PV id %s detected for %s in %s." msgstr "" #: metadata/metadata.c:789 #, c-format msgid "Internal error: VG name for PV %s is corrupted" msgstr "" #: metadata/metadata.c:796 metadata/metadata.c:1278 #, c-format msgid "Internal error: PV segments corrupted in %s." msgstr "" #: metadata/metadata.c:806 #, c-format msgid "Internal error: Duplicate LV name %s detected in %s." msgstr "" #: metadata/metadata.c:816 #, c-format msgid "Internal error: Duplicate LV id %s detected for %s and %s in %s." msgstr "" #: metadata/metadata.c:827 metadata/metadata.c:1285 #, c-format msgid "Internal error: LV segments corrupted in %s." msgstr "" #: metadata/metadata.c:851 #, c-format msgid "Cannot change metadata for partial volume group %s" msgstr "" #: metadata/metadata.c:857 msgid "Aborting vg_write: No metadata areas to write to!" msgstr "" #: metadata/metadata.c:866 msgid "Format does not support writing volumegroup metadata areas" msgstr "" #: metadata/metadata.c:969 msgid "vg allocation failed" msgstr "" #: metadata/metadata.c:977 msgid "vg name allocation failed" msgstr "" #: metadata/metadata.c:1049 msgid "Internal error: vg_read requires vgname with pre-commit." msgstr "" #: metadata/metadata.c:1113 metadata/metadata.c:1122 #, c-format msgid "Cached VG %s had incorrect PV list" msgstr "" #: metadata/metadata.c:1201 #, c-format msgid "Inconsistent pre-commit metadata copies for volume group %s" msgstr "" #: metadata/metadata.c:1212 #, c-format msgid "Inconsistent metadata copies found for partial volume group %s" msgstr "" #: metadata/metadata.c:1220 #, c-format msgid "Inconsistent metadata UUIDs found for volume group %s" msgstr "" #: metadata/metadata.c:1226 #, c-format msgid "Inconsistent metadata found for VG %s - updating to use version %u" msgstr "" #: metadata/metadata.c:1230 msgid "Automatic metadata correction failed" msgstr "" #: metadata/metadata.c:1235 msgid "Automatic metadata correction commit failed" msgstr "" #: metadata/metadata.c:1247 #, c-format msgid "Removing PV %s (%s) that no longer belongs to VG %s" msgstr "" #: metadata/metadata.c:1257 #, c-format msgid "WARNING: Interrupted pvmove detected in volume group %s" msgstr "" #: metadata/metadata.c:1259 msgid "Please restore the metadata by running vgcfgrestore." msgstr "" #: metadata/metadata.c:1316 metadata/metadata.c:1348 #, c-format msgid "Volume group %s metadata is inconsistent" msgstr "" #: metadata/metadata.c:1335 msgid "vg_read_by_vgid: get_vgs failed" msgstr "" #: metadata/metadata.c:1369 #, c-format msgid "Finding volume group for uuid %s" msgstr "" #: metadata/metadata.c:1371 #, c-format msgid "Volume group for uuid not found: %s" msgstr "" #: metadata/metadata.c:1375 #, c-format msgid "Found volume group \"%s\"" msgstr "" #: metadata/metadata.c:1381 #, c-format msgid "Can't find logical volume id %s" msgstr "" #: metadata/metadata.c:1405 #, c-format msgid "No physical volume label read from %s" msgstr "" #: metadata/metadata.c:1415 #, c-format msgid "pv allocation for '%s' failed" msgstr "" #: metadata/metadata.c:1424 #, c-format msgid "Failed to read existing physical volume '%s'" msgstr "" #: metadata/metadata.c:1466 msgid "PV list allocation failed" msgstr "" #: metadata/metadata.c:1474 msgid "get_pvs: get_vgs failed" msgstr "" #: metadata/metadata.c:1498 #, c-format msgid "Warning: Volume Group %s is not consistent" msgstr "" #: metadata/metadata.c:1516 msgid "Format does not support writing physical volumes" msgstr "" #: metadata/metadata.c:1521 #, c-format msgid "Assertion failed: can't _pv_write non-orphan PV (in VG %s)" msgstr "" #: metadata/metadata.c:1547 vgreduce.c:410 #, c-format msgid "" "Failed to clear metadata from physical volume \"%s\" after removal from \"%s" "\"" msgstr "" #: metadata/metadata.c:1570 pvcreate.c:81 #, c-format msgid "Device %s not found (or ignored by filtering)." msgstr "" #: metadata/metadata.c:1579 #, c-format msgid "Could not find LVM label on %s" msgstr "" #: metadata/metadata.c:1584 #, c-format msgid "Found label on %s, sector %lu, type=%s" msgstr "" #: metadata/mirror.c:52 mirror/mirrored.c:322 #, c-format msgid "Using reduced mirror region size of %u sectors" msgstr "" #: metadata/mirror.c:94 msgid "Aborting. Unable to tag." msgstr "" #: metadata/mirror.c:100 msgid "Intermediate VG commit for orphan volume failed." msgstr "" #: metadata/mirror.c:138 #, c-format msgid "Reducing mirror set from %u to %u image(s)%s." msgstr "" #: metadata/mirror.c:183 msgid "No mirror images found using specified PVs." msgstr "" #: metadata/mirror.c:222 msgid "intermediate VG write failed." msgstr "" #: metadata/mirror.c:277 msgid "Bad activation/mirror_log_fault_policy" msgstr "" #: metadata/mirror.c:279 msgid "Bad activation/mirror_device_fault_policy" msgstr "" #: metadata/mirror.c:317 #, c-format msgid "WARNING: Failed to replace mirror device in %s/%s" msgstr "" #: metadata/mirror.c:321 #, c-format msgid "" "WARNING: Use 'lvconvert -m %d %s/%s --corelog' to replace failed devices" msgstr "" #: metadata/mirror.c:324 metadata/mirror.c:341 #, c-format msgid "WARNING: Use 'lvconvert -m %d %s/%s' to replace failed devices" msgstr "" #: metadata/mirror.c:338 #, c-format msgid "WARNING: Failed to replace mirror log device in %s/%s" msgstr "" #: metadata/mirror.c:362 #, c-format msgid "WARNING: Unable to determine mirror sync status of %s/%s." msgstr "" #: metadata/mirror.c:380 #, c-format msgid "WARNING: Bad device removed from mirror volume, %s/%s" msgstr "" #: metadata/mirror.c:393 #, c-format msgid "WARNING: Unable to find substitute device for mirror volume, %s/%s" msgstr "" #: metadata/mirror.c:397 #, c-format msgid "" "WARNING: Mirror volume, %s/%s restored - substitute for failed device found." msgstr "" #: metadata/mirror.c:402 #, c-format msgid "" "WARNING: Mirror volume, %s/%s converted to linear due to device failure." msgstr "" #: metadata/mirror.c:405 #, c-format msgid "WARNING: Mirror volume, %s/%s disk log removed due to device failure." msgstr "" #: metadata/mirror.c:428 metadata/mirror.c:434 msgid "img_name allocation failed. Remove new LV and retry." msgstr "" #: metadata/mirror.c:443 msgid "Aborting. Failed to create mirror image LV. Remove new LV and retry." msgstr "" #: metadata/mirror.c:455 #, c-format msgid "" "Aborting. Failed to add mirror image segment to %s. Remove new LV and retry." msgstr "" #: metadata/mirror.c:477 metadata/mirror.c:518 msgid "img_lvs allocation failed. Remove new LV and retry." msgstr "" #: metadata/mirror.c:499 msgid "Aborting. Failed to add mirror segment. Remove new LV and retry." msgstr "" #: metadata/mirror.c:632 #, c-format msgid "Matched PE range %u-%u against %s %u len %u" msgstr "" #: metadata/mirror.c:641 metadata/mirror.c:872 vgreduce.c:139 msgid "lv_list alloc failed" msgstr "" #: metadata/mirror.c:651 #, c-format msgid "Moving %s:%u-%u of %s/%s" msgstr "" #: metadata/mirror.c:664 msgid "Unable to allocate temporary LV for pvmove." msgstr "" #: metadata/mirror.c:679 #, c-format msgid "Moving %u extents of logical volume %s/%s" msgstr "" #: metadata/mirror.c:711 msgid "No segment found with LE" msgstr "" #: metadata/mirror.c:722 msgid "Incompatible segments" msgstr "" #: metadata/mirror.c:747 msgid "Missing error segtype" msgstr "" #: metadata/mirror.c:853 msgid "lvs list alloc failed" msgstr "" #: metadata/pv_manip.c:30 msgid "pv_segment allocation failed" msgstr "" #: metadata/pv_manip.c:121 #, c-format msgid "Segment with extent %u in PV %s not found" msgstr "" #: metadata/pv_manip.c:161 #, c-format msgid "Missing PV segment on %s at %u." msgstr "" #: metadata/pv_manip.c:178 #, c-format msgid "release_pv_segment with unallocated segment: %s PE %u" msgstr "" #: metadata/pv_manip.c:238 #, c-format msgid "%s %u: %6u %6u: %s(%u:%u)" msgstr "" #: metadata/pv_manip.c:244 #, c-format msgid "Gap in pvsegs: %u, %u" msgstr "" #: metadata/pv_manip.c:250 msgid "Wrong lvseg area type" msgstr "" #: metadata/pv_manip.c:254 msgid "Inconsistent pvseg pointers" msgstr "" #: metadata/pv_manip.c:258 #, c-format msgid "Inconsistent length: %u %u" msgstr "" #: metadata/pv_manip.c:269 #, c-format msgid "PV segment pe_count mismatch: %u != %u" msgstr "" #: metadata/pv_manip.c:275 #, c-format msgid "PV segment pe_alloc_count mismatch: %u != %u" msgstr "" #: metadata/pv_manip.c:285 #, c-format msgid "PV segment VG pv_count mismatch: %u != %u" msgstr "" #: metadata/pv_manip.c:291 #, c-format msgid "PV segment VG free_count mismatch: %u != %u" msgstr "" #: metadata/pv_manip.c:297 #, c-format msgid "PV segment VG extent_count mismatch: %u != %u" msgstr "" #: metadata/pv_manip.c:311 #, c-format msgid "%s: cannot resize to %u extents as %u are allocated." msgstr "" #: metadata/pv_manip.c:324 #, c-format msgid "%s: cannot resize to %u extents as later ones are allocated." msgstr "" #: metadata/pv_manip.c:356 #, c-format msgid "%s: cannot resize to %u extents as there is only room for %lu." msgstr "" #: metadata/pv_manip.c:385 #, c-format msgid "No change to size of physical volume %s." msgstr "" #: metadata/pv_manip.c:390 #, c-format msgid "Resizing physical volume %s from %u to %u extents." msgstr "" #: metadata/pv_map.c:48 #, c-format msgid "Allowing allocation on %s start PE %u length %u" msgstr "" #: metadata/pv_map.c:176 msgid "create_pv_maps alloc failed" msgstr "" #: metadata/pv_map.c:183 #, c-format msgid "Couldn't create physical volume maps in %s" msgstr "" #: metadata/segtype.c:30 #, c-format msgid "Unrecognised segment type %s" msgstr "" #: metadata/snapshot_manip.c:63 #, c-format msgid "'%s' is already in use as a snapshot." msgstr "" #: metadata/snapshot_manip.c:104 #, c-format msgid "Failed to remove internal snapshot LV %s" msgstr "" #: mirror/mirrored.c:57 #, c-format msgid " Mirrors\t\t%u" msgstr "" #: mirror/mirrored.c:58 #, c-format msgid " Mirror size\t\t%u" msgstr "" #: mirror/mirrored.c:60 #, c-format msgid " Mirror log volume\t%s" msgstr "" #: mirror/mirrored.c:65 #, c-format msgid " Mirror region size\t%s" msgstr "" #: mirror/mirrored.c:68 msgid " Mirror original:" msgstr "" #: mirror/mirrored.c:70 msgid " Mirror destinations:" msgstr "" #: mirror/mirrored.c:79 #, c-format msgid "Couldn't read 'mirror_count' for segment '%s'." msgstr "" #: mirror/mirrored.c:98 #, c-format msgid "Couldn't read 'extents_moved' for segment '%s'." msgstr "" #: mirror/mirrored.c:107 #, c-format msgid "Couldn't read 'region_size' for segment '%s'." msgstr "" #: mirror/mirrored.c:115 msgid "Mirror log type must be a string." msgstr "" #: mirror/mirrored.c:120 #, c-format msgid "Unrecognised mirror log in segment %s." msgstr "" #: mirror/mirrored.c:128 #, c-format msgid "Missing region size for mirror log for segment '%s'." msgstr "" #: mirror/mirrored.c:134 #, c-format msgid "Couldn't find mirrors array for segment '%s'." msgstr "" #: mirror/mirrored.c:163 msgid "struct mirr_state allocation failed" msgstr "" #: mirror/mirrored.c:193 #, c-format msgid "Mirror status: %s" msgstr "" #: mirror/mirrored.c:196 #, c-format msgid "Failure parsing mirror status mirror count: %s" msgstr "" #: mirror/mirrored.c:204 #, c-format msgid "Failure parsing mirror status devices: %s" msgstr "" #: mirror/mirrored.c:213 #, c-format msgid "Failure parsing mirror status fraction: %s" msgstr "" #: mirror/mirrored.c:245 #, c-format msgid "Failed to build uuid for log LV %s." msgstr "" #: mirror/mirrored.c:252 #, c-format msgid "Failed to build uuid for mirror LV %s." msgstr "" #: mirror/mirrored.c:310 msgid "Missing region size for mirror segment." msgstr "" #: mirror/mirrored.c:505 msgid "cluster log string list allocation failed" msgstr "" #: mirror/mirrored.c:510 msgid "mirror string list allocation failed" msgstr "" #: misc/lvm-exec.c:31 #, c-format msgid "Executing: %s %s %s %s" msgstr "" #: misc/lvm-exec.c:34 polldaemon.c:39 #, c-format msgid "fork failed: %s" msgstr "" #: misc/lvm-exec.c:48 #, c-format msgid "wait4 child process %u failed: %s" msgstr "" #: misc/lvm-exec.c:54 #, c-format msgid "Child %u exited abnormally" msgstr "" #: misc/lvm-exec.c:59 #, c-format msgid "%s failed: %u" msgstr "" #: misc/lvm-file.c:55 msgid "Not enough space to build temporary file string." msgstr "" #: misc/lvm-file.c:102 #, c-format msgid "%s: rename to %s failed" msgstr "" #: misc/lvm-file.c:148 #, c-format msgid "Creating directory \"%s\"" msgstr "" #: misc/lvm-file.c:189 #, c-format msgid "Directory \"%s\" not found" msgstr "" #: misc/lvm-file.c:220 msgid "sync_dir failed in strdup" msgstr "" #: misc/lvm-file.c:269 msgid "fcntl_lock_file failed in strdup." msgstr "" #: misc/lvm-file.c:283 #, c-format msgid "Locking %s (%s, %hd)" msgstr "" #: misc/lvm-file.c:313 #, c-format msgid "Unlocking fd %d" msgstr "" #: misc/lvm-file.c:316 #, c-format msgid "fcntl unlock failed on fd %d: %s" msgstr "" #: misc/lvm-file.c:320 #, c-format msgid "lock file close failed on fd %d: %s" msgstr "" #: misc/lvm-string.c:107 #, c-format msgid "build_dm_name: Allocation failed for %zu for %s %s %s." msgstr "" #: misc/sharedlib.c:48 #, c-format msgid "Not loading shared %s library %s in static mode." msgstr "" #: misc/sharedlib.c:55 #, c-format msgid "Opening shared %s library %s" msgstr "" #: misc/sharedlib.c:59 misc/sharedlib.c:62 #, c-format msgid "Unable to open external %s library %s: %s" msgstr "" #: mm/memlock.c:99 msgid "Locking memory" msgstr "" #: mm/memlock.c:108 mm/memlock.c:122 #, c-format msgid "setpriority %u failed: %s" msgstr "" #: mm/memlock.c:118 msgid "Unlocking memory" msgstr "" #: mm/memlock.c:130 #, c-format msgid "memlock_count inc to %d" msgstr "" #: mm/memlock.c:137 #, c-format msgid "memlock_count dec to %d" msgstr "" #: polldaemon.c:34 msgid "Forking background process" msgstr "" #: polldaemon.c:49 #, c-format msgid "Background process failed to setsid: %s" msgstr "" #: polldaemon.c:80 msgid "Failed to generate list of copied LVs: can't abort." msgstr "" #: polldaemon.c:90 msgid "ABORTING: Mirror percentage check failed." msgstr "" #: polldaemon.c:96 polldaemon.c:98 #, c-format msgid "%s: Moved: %.1f%%" msgstr "" #: polldaemon.c:107 msgid "ABORTING: Failed to generate list of copied LVs" msgstr "" #: polldaemon.c:119 msgid "ABORTING: Segment progression failed." msgstr "" #: polldaemon.c:149 #, c-format msgid "ABORTING: Can't reread VG for %s" msgstr "" #: polldaemon.c:156 #, c-format msgid "ABORTING: Can't find mirror LV in %s for %s" msgstr "" #: polldaemon.c:184 #, c-format msgid "Couldn't read volume group %s" msgstr "" #: polldaemon.c:189 #, c-format msgid "Volume Group %s inconsistent - skipping" msgstr "" #: polldaemon.c:241 #, c-format msgid "Checking progress every %u seconds" msgstr "" #: pvchange.c:55 #, c-format msgid "Finding volume group of physical volume \"%s\"" msgstr "" #: pvchange.c:65 pvresize.c:75 #, c-format msgid "Unable to find volume group of \"%s\"" msgstr "" #: pvchange.c:90 pvresize.c:101 #, c-format msgid "Unable to find \"%s\" in volume group \"%s\"" msgstr "" #: pvchange.c:97 #, c-format msgid "Volume group containing %s does not support tags" msgstr "" #: pvchange.c:103 #, c-format msgid "Volume group containing %s has active logical volumes" msgstr "" #: pvchange.c:112 #, c-format msgid "Can't change tag on Physical Volume %s not in volume group" msgstr "" #: pvchange.c:117 pvresize.c:48 msgid "Can't get lock for orphans" msgstr "" #: pvchange.c:123 pvresize.c:54 #, c-format msgid "Unable to read PV \"%s\"" msgstr "" #: pvchange.c:132 #, c-format msgid "Allocatability not supported by orphan %s format PV %s" msgstr "" #: pvchange.c:140 #, c-format msgid "Physical volume \"%s\" is already allocatable" msgstr "" #: pvchange.c:150 #, c-format msgid "Physical volume \"%s\" is already unallocatable" msgstr "" #: pvchange.c:160 #, c-format msgid "Setting physical volume \"%s\" allocatable" msgstr "" #: pvchange.c:164 #, c-format msgid "Setting physical volume \"%s\" NOT allocatable" msgstr "" #: pvchange.c:172 #, c-format msgid "Failed to add tag %s to physical volume %s" msgstr "" #: pvchange.c:178 #, c-format msgid "Failed to remove tag %s from physical volume%s" msgstr "" #: pvchange.c:186 #, c-format msgid "Failed to generate new random UUID for %s." msgstr "" #: pvchange.c:194 #, c-format msgid "Changing uuid of %s to %s." msgstr "" #: pvchange.c:201 #, c-format msgid "pv_write with new uuid failed for %s." msgstr "" #: pvchange.c:210 pvresize.c:174 #, c-format msgid "Updating physical volume \"%s\"" msgstr "" #: pvchange.c:214 pvresize.c:178 #, c-format msgid "Failed to store physical volume \"%s\" in volume group \"%s\"" msgstr "" #: pvchange.c:223 pvresize.c:187 #, c-format msgid "Failed to store physical volume \"%s\"" msgstr "" #: pvchange.c:230 pvresize.c:194 #, c-format msgid "Physical volume \"%s\" changed" msgstr "" #: pvchange.c:252 msgid "Please give exactly one option of -x, -uuid, --addtag or --deltag" msgstr "" #: pvchange.c:258 msgid "Please give a physical volume path" msgstr "" #: pvchange.c:263 msgid "Option a and PhysicalVolumePath are exclusive" msgstr "" #: pvchange.c:268 toollib.c:683 msgid "Using physical volume(s) on command line" msgstr "" #: pvchange.c:273 #, c-format msgid "Failed to read physical volume %s" msgstr "" #: pvchange.c:281 toollib.c:766 msgid "Scanning for physical volume names" msgstr "" #: pvchange.c:292 #, c-format msgid "%d physical volume%s changed / %d physical volume%s not changed" msgstr "" #: pvck.c:32 #, c-format msgid "Scanning %s" msgstr "" #: pvcreate.c:37 pvremove.c:31 #, c-format msgid "%s: Not LVM partition type: use -f to override" msgstr "" #: pvcreate.c:49 #, c-format msgid "" "Can't initialize physical volume \"%s\" of volume group \"%s\" without -ff" msgstr "" #: pvcreate.c:57 #, c-format msgid "%s: physical volume not initialized" msgstr "" #: pvcreate.c:72 pvcreate.c:168 pvremove.c:81 vgcreate.c:135 vgextend.c:40 #: vgremove.c:96 msgid "Can't get lock for orphan PVs" msgstr "" #: pvcreate.c:86 #, c-format msgid "Can't open %s exclusively. Mounted filesystem?" msgstr "" #: pvcreate.c:98 #, c-format msgid "Wiping software RAID md superblock on %s" msgstr "" #: pvcreate.c:100 #, c-format msgid "Failed to wipe RAID md superblock on %s" msgstr "" #: pvcreate.c:107 #, c-format msgid "WARNING: Forcing physical volume creation on %s%s%s%s" msgstr "" #: pvcreate.c:140 #, c-format msgid "uuid %s already in use on \"%s\"" msgstr "" #: pvcreate.c:152 #, c-format msgid "Unable to read volume group from %s" msgstr "" #: pvcreate.c:158 #, c-format msgid "Can't find uuid %s in backup file %s" msgstr "" #: pvcreate.c:176 pvresize.c:212 msgid "Physical volume size may not be negative" msgstr "" #: pvcreate.c:182 vgconvert.c:66 msgid "Metadata size may not be negative" msgstr "" #: pvcreate.c:199 pvremove.c:89 #, c-format msgid "%s: Couldn't find device. Check your filters?" msgstr "" #: pvcreate.c:208 vgconvert.c:127 #, c-format msgid "Failed to setup physical volume \"%s\"" msgstr "" #: pvcreate.c:212 vgconvert.c:138 #, c-format msgid "Set up physical volume for \"%s\" with %lu available sectors" msgstr "" #: pvcreate.c:217 vgconvert.c:143 #, c-format msgid "Failed to wipe existing label on %s" msgstr "" #: pvcreate.c:222 #, c-format msgid "Zeroing start of device %s" msgstr "" #: pvcreate.c:224 #, c-format msgid "%s not opened: device not zeroed" msgstr "" #: pvcreate.c:229 #, c-format msgid "%s not wiped: aborting" msgstr "" #: pvcreate.c:236 vgconvert.c:150 #, c-format msgid "Writing physical volume data to disk \"%s\"" msgstr "" #: pvcreate.c:240 vgconvert.c:155 #, c-format msgid "Failed to write physical volume \"%s\"" msgstr "" #: pvcreate.c:244 vgconvert.c:161 #, c-format msgid "Physical volume \"%s\" successfully created" msgstr "" #: pvcreate.c:261 pvremove.c:123 msgid "Please enter a physical volume path" msgstr "" #: pvcreate.c:266 msgid "--uuid is required with --restorefile" msgstr "" #: pvcreate.c:271 msgid "Can only set uuid on one volume at once" msgstr "" #: pvcreate.c:276 pvremove.c:128 msgid "Option y can only be given with option f" msgstr "" #: pvcreate.c:281 vgconvert.c:205 #, c-format msgid "labelsector must be less than %lu" msgstr "" #: pvcreate.c:289 vgconvert.c:213 msgid "Metadata parameters only apply to text format" msgstr "" #: pvcreate.c:295 vgconvert.c:219 msgid "Metadatacopies may only be 0, 1 or 2" msgstr "" #: pvdisplay.c:30 reporter.c:65 reporter.c:113 toollib.c:347 toollib.c:477 #, c-format msgid "Can't lock %s: skipping" msgstr "" #: pvdisplay.c:35 reporter.c:70 reporter.c:118 #, c-format msgid "Can't read %s: skipping" msgstr "" #: pvdisplay.c:54 #, c-format msgid "Device \"%s\" has a capacity of %s" msgstr "" #: pvdisplay.c:60 #, c-format msgid "Physical volume \"%s\" of volume group \"%s\" is exported" msgstr "" #: pvdisplay.c:64 #, c-format msgid "\"%s\" is a new physical volume of \"%s\"" msgstr "" #: pvdisplay.c:104 msgid "Option -v not allowed with option -c" msgstr "" #: pvmove.c:34 msgid "--name takes a logical volume name" msgstr "" #: pvmove.c:39 msgid "Named LV and old PV must be in the same VG" msgstr "" #: pvmove.c:45 msgid "Incomplete LV name supplied with --name" msgstr "" #: pvmove.c:127 msgid "No extents available for allocation" msgstr "" #: pvmove.c:150 msgid "Creation of temporary pvmove LV failed" msgstr "" #: pvmove.c:157 msgid "lvs_changed list struct allocation failed" msgstr "" #: pvmove.c:170 #, c-format msgid "Skipping snapshot-related LV %s" msgstr "" #: pvmove.c:174 #, c-format msgid "Skipping mirror LV %s" msgstr "" #: pvmove.c:178 #, c-format msgid "Skipping mirror log LV %s" msgstr "" #: pvmove.c:182 #, c-format msgid "Skipping mirror image LV %s" msgstr "" #: pvmove.c:186 #, c-format msgid "Skipping locked LV %s" msgstr "" #: pvmove.c:199 #, c-format msgid "No data to move for %s" msgstr "" #: pvmove.c:210 msgid "Updating volume group metadata" msgstr "" #: pvmove.c:212 pvmove.c:236 msgid "ABORTING: Volume group metadata update failed." msgstr "" #: pvmove.c:249 msgid "ABORTING: Temporary mirror activation failed. Run pvmove --abort." msgstr "" #: pvmove.c:257 pvmove.c:438 #, c-format msgid "Unable to reactivate logical volume \"%s\"" msgstr "" #: pvmove.c:265 msgid "Unable to resume logical volumes" msgstr "" #: pvmove.c:313 #, c-format msgid "Detected pvmove in progress for %s" msgstr "" #: pvmove.c:315 msgid "Ignoring remaining command line arguments" msgstr "" #: pvmove.c:318 msgid "ABORTING: Failed to generate list of moving LVs" msgstr "" #: pvmove.c:326 msgid "ABORTING: Temporary mirror activation failed." msgstr "" #: pvmove.c:403 msgid "ABORTING: Removal of temporary mirror failed" msgstr "" #: pvmove.c:409 pvmove.c:428 pvmove.c:462 msgid "ABORTING: Failed to write new data locations to disk." msgstr "" #: pvmove.c:416 msgid "Locking LVs to remove temporary mirror failed" msgstr "" #: pvmove.c:422 msgid "Suspension of temporary mirror LV failed" msgstr "" #: pvmove.c:448 #, c-format msgid "ABORTING: Unable to deactivate temporary logical volume \"%s\"" msgstr "" #: pvmove.c:453 msgid "Removing temporary pvmove LV" msgstr "" #: pvmove.c:455 msgid "ABORTING: Removal of temporary pvmove LV failed" msgstr "" #: pvmove.c:460 msgid "Writing out final volume group after pvmove" msgstr "" #: pvmove.c:480 #, c-format msgid "ABORTING: Can't reread PV %s" msgstr "" #: pvmove.c:516 toollib.c:1074 msgid "Failed to clone PV name" msgstr "" #: pvremove.c:41 vgsplit.c:107 #, c-format msgid "Physical Volume %s not found" msgstr "" #: pvremove.c:52 #, c-format msgid "" "Can't pvremove physical volume \"%s\" of volume group \"%s\" without -ff" msgstr "" #: pvremove.c:60 #, c-format msgid "%s: physical volume label not removed" msgstr "" #: pvremove.c:65 #, c-format msgid "WARNING: Wiping physical volume label from %s%s%s%s" msgstr "" #: pvremove.c:95 #, c-format msgid "Can't open %s exclusively - not removing. Mounted filesystem?" msgstr "" #: pvremove.c:102 #, c-format msgid "Failed to wipe existing label(s) on %s" msgstr "" #: pvremove.c:106 #, c-format msgid "Labels on physical volume \"%s\" successfully wiped" msgstr "" #: pvresize.c:60 #, c-format msgid "%s: too many metadata areas for pvresize" msgstr "" #: pvresize.c:113 #, c-format msgid "Physical volume %s format does not support resizing." msgstr "" #: pvresize.c:130 #, c-format msgid "%s: Pretending size is %lu not %lu sectors." msgstr "" #: pvresize.c:143 #, c-format msgid "%s: Size must exceed physical extent start of %lu sectors." msgstr "" #: pvresize.c:156 #, c-format msgid "" "%s: Size must leave space for at least one physical extent of %u sectors." msgstr "" #: pvresize.c:171 #, c-format msgid "Resizing volume \"%s\" to %lu sectors." msgstr "" #: pvresize.c:207 msgid "Please supply physical volume(s)" msgstr "" #: pvresize.c:224 #, c-format msgid "%d physical volume(s) resized / %d physical volume(s) not resized" msgstr "" #: pvscan.c:66 #, c-format msgid "PV %-*s %-*s %s [%s]" msgstr "" #: pvscan.c:76 #, c-format msgid "PV %-*s is in exported VG %s [%s / %s free]" msgstr "" #: pvscan.c:89 #, c-format msgid "PV %-*s VG %-*s %s [%s / %s free]" msgstr "" #: pvscan.c:117 msgid "Options -e and -n are incompatible" msgstr "" #: pvscan.c:122 #, c-format msgid "WARNING: only considering physical volumes %s" msgstr "" #: pvscan.c:129 msgid "Walking through all physical volumes" msgstr "" #: pvscan.c:182 msgid "No matching physical volumes found" msgstr "" #: pvscan.c:186 #, c-format msgid "Total: %d [%s] / in use: %d [%s] / in no VG: %d [%s]" msgstr "" #: report/report.c:118 msgid "Extent number dm_snprintf failed" msgstr "" #: report/report.c:182 msgid "modules str_list allocation failed" msgstr "" #: report/report.c:259 report/report.c:342 report/report.c:368 #: report/report.c:466 report/report.c:523 report/report.c:553 #: report/report.c:694 report/report.c:750 report/report.c:768 #: report/report.c:793 report/report.c:807 msgid "dm_pool_alloc failed" msgstr "" #: report/report.c:471 msgid "lvname snprintf failed" msgstr "" #: report/report.c:476 report/report.c:518 report/report.c:548 msgid "dm_pool_strdup failed" msgstr "" #: report/report.c:773 msgid "snapshot percentage too large" msgstr "" #: report/report.c:812 msgid "copy percentage too large" msgstr "" #: reporter.c:24 reporter.c:146 reporter.c:158 #, c-format msgid "Volume group %s not found" msgstr "" #: reporter.c:254 #, c-format msgid "Invalid options string: %s" msgstr "" #: reporter.c:260 msgid "options string allocation failed" msgstr "" #: reporter.c:297 msgid "Can't report LV and PV fields at the same time" msgstr "" #: snapshot/snapshot.c:40 msgid "Couldn't read chunk size for snapshot." msgstr "" #: snapshot/snapshot.c:48 msgid "Snapshot cow storage not specified." msgstr "" #: snapshot/snapshot.c:54 msgid "Snapshot origin not specified." msgstr "" #: snapshot/snapshot.c:61 msgid "Unknown logical volume specified for snapshot cow store." msgstr "" #: snapshot/snapshot.c:67 msgid "Unknown logical volume specified for snapshot origin." msgstr "" #: snapshot/snapshot.c:135 msgid "snapshot string list allocation failed" msgstr "" #: striped/striped.c:41 #, c-format msgid " Stripes\t\t%u" msgstr "" #: striped/striped.c:42 #, c-format msgid " Stripe size\t\t%u KB" msgstr "" #: striped/striped.c:45 #, c-format msgid " Stripe %d:" msgstr "" #: striped/striped.c:55 #, c-format msgid "Couldn't read 'stripe_count' for segment '%s'." msgstr "" #: striped/striped.c:70 #, c-format msgid "Couldn't read stripe_size for segment '%s'." msgstr "" #: striped/striped.c:76 #, c-format msgid "Couldn't find stripes array for segment '%s'." msgstr "" #: striped/striped.c:163 #, c-format msgid "Internal error: striped add_target_line called with no areas for %s." msgstr "" #: stub.h:24 stub.h:31 msgid "Command not implemented yet." msgstr "" #: stub.h:38 msgid "There's no 'pvdata' command in LVM2." msgstr "" #: stub.h:39 msgid "" "Use lvs, pvs, vgs instead; or use vgcfgbackup and read the text file backup." msgstr "" #: stub.h:40 msgid "" "Metadata in LVM1 format can still be displayed using LVM1's pvdata command." msgstr "" #: toollib.c:115 #, c-format msgid "skip_dev_dir: Couldn't split up device name %s" msgstr "" #: toollib.c:124 toollib.c:322 msgid "vg/lv string alloc failed" msgstr "" #: toollib.c:215 msgid "One or more specified logical volume(s) not found." msgstr "" #: toollib.c:251 msgid "Using logical volume(s) on command line" msgstr "" #: toollib.c:264 toollib.c:540 toollib.c:689 toollib.c:1051 #, c-format msgid "Skipping invalid tag %s" msgstr "" #: toollib.c:281 toollib.c:807 toollib.c:818 #, c-format msgid "\"%s\": Invalid path for Logical Volume" msgstr "" #: toollib.c:335 msgid "Finding all logical volumes" msgstr "" #: toollib.c:337 toollib.c:572 msgid "No volume groups found" msgstr "" #: toollib.c:357 toollib.c:483 toollib.c:731 vgcfgbackup.c:59 vgck.c:24 #: vgreduce.c:505 vgscan.c:23 #, c-format msgid "Volume group \"%s\" not found" msgstr "" #: toollib.c:369 vgchange.c:523 vgck.c:29 vgconvert.c:43 vgscan.c:30 #, c-format msgid "Volume group \"%s\" inconsistent" msgstr "" #: toollib.c:534 msgid "Using volume group(s) on command line" msgstr "" #: toollib.c:555 #, c-format msgid "Invalid volume group name: %s" msgstr "" #: toollib.c:570 msgid "Finding all volume groups" msgstr "" #: toollib.c:705 toollib.c:1080 #, c-format msgid "Physical Volume \"%s\" not found in Volume Group \"%s\"" msgstr "" #: toollib.c:716 #, c-format msgid "Failed to read physical volume \"%s\"" msgstr "" #: toollib.c:755 msgid "Using all physical volume(s) in volume group" msgstr "" #: toollib.c:825 msgid "Allocation of vg_name failed" msgstr "" #: toollib.c:835 #, c-format msgid "Path required for Logical Volume \"%s\"" msgstr "" #: toollib.c:858 #, c-format msgid "Environment Volume Group in LVM_VG_NAME invalid: \"%s\"" msgstr "" #: toollib.c:874 #, c-format msgid "Adding PE range: start PE %u length %u on %s" msgstr "" #: toollib.c:882 #, c-format msgid "Overlapping PE ranges specified (%u-%u, %u-%u) on %s" msgstr "" #: toollib.c:892 toollib.c:1039 toollib.c:1103 msgid "Allocation of list failed" msgstr "" #: toollib.c:956 #, c-format msgid "PE range error: start extent %u to end extent %u" msgstr "" #: toollib.c:971 #, c-format msgid "Physical extent parsing error at %s" msgstr "" #: toollib.c:984 #, c-format msgid "Physical volume %s not allocatable" msgstr "" #: toollib.c:990 #, c-format msgid "No free extents on physical volume \"%s\"" msgstr "" #: toollib.c:1002 toollib.c:1110 msgid "Unable to allocate physical volume list." msgstr "" #: toollib.c:1009 msgid "Allocation of pe_ranges list failed" msgstr "" #: toollib.c:1091 msgid "No specified PVs have space available" msgstr "" #: toollib.c:1137 #, c-format msgid "Can't lock %s for metadata recovery: skipping" msgstr "" #: toollib.c:1148 msgid "" "Names starting \"snapshot\" are reserved. Please choose a different LV name." msgstr "" #: toollib.c:1154 msgid "" "Names starting \"pvmove\" are reserved. Please choose a different LV name." msgstr "" #: toollib.c:1160 msgid "" "Names including \"_mlog\" are reserved. Please choose a different LV name." msgstr "" #: toollib.c:1166 msgid "" "Names including \"_mimage\" are reserved. Please choose a different LV name." msgstr "" #: toollib.c:1183 #, c-format msgid "%s: already exists in filesystem" msgstr "" #: toollib.c:1227 msgid "Name allocation failed - device not cleared" msgstr "" #: toollib.c:1233 #, c-format msgid "Name too long - device not cleared (%s)" msgstr "" #: toollib.c:1237 #, c-format msgid "Clearing start of logical volume \"%s\"" msgstr "" #: toollib.c:1240 #, c-format msgid "%s: not found: device not cleared" msgstr "" #: toollib.c:1276 #, c-format msgid "Name allocation failed - log header not written (%s)" msgstr "" #: toollib.c:1283 #, c-format msgid "Name too long - log header not written (%s)" msgstr "" #: toollib.c:1287 #, c-format msgid "Writing log header to device, %s" msgstr "" #: toollib.c:1290 #, c-format msgid "%s: not found: log header not written" msgstr "" #: toollib.c:1298 #, c-format msgid "Failed to write log header to %s" msgstr "" #: toollib.c:1324 msgid "log_name allocation failed. Remove new LV and retry." msgstr "" #: toollib.c:1344 msgid "Aborting. Unable to tag mirror log." msgstr "" #: toollib.c:1362 msgid "" "Aborting. Unable to create in-sync mirror log while activation is disabled." msgstr "" #: toollib.c:1368 msgid "Aborting. Failed to activate mirror log. Remove new LVs and retry." msgstr "" #: toollib.c:1375 #, c-format msgid "Failed to remove tag %s from mirror log." msgstr "" #: toollib.c:1380 msgid "Aborting. Failed to wipe mirror log. Remove new LV and retry." msgstr "" #: toollib.c:1386 msgid "Aborting. Failed to write mirror log header. Remove new LV and retry." msgstr "" #: toollib.c:1392 msgid "Aborting. Failed to deactivate mirror log. Remove new LV and retry." msgstr "" #: uuid/uuid.c:132 msgid "UUID contains invalid character" msgstr "" #: uuid/uuid.c:156 msgid "Couldn't write uuid, buffer too small." msgstr "" #: uuid/uuid.c:184 msgid "Too many characters to be uuid." msgstr "" #: uuid/uuid.c:192 msgid "Couldn't read uuid, incorrect number of characters." msgstr "" #: vgcfgbackup.c:27 msgid "Failed to allocate filename." msgstr "" #: vgcfgbackup.c:32 #, c-format msgid "Error processing filename template %s" msgstr "" #: vgcfgbackup.c:39 #, c-format msgid "" "VGs must be backed up into different files. Use %%s in filename for VG name." msgstr "" #: vgcfgbackup.c:64 #, c-format msgid "Warning: Volume group \"%s\" inconsistent" msgstr "" #: vgcfgbackup.c:76 msgid "No backup taken: specify filename with -f to backup an inconsistent VG" msgstr "" #: vgcfgbackup.c:90 #, c-format msgid "Volume group \"%s\" successfully backed up." msgstr "" #: vgcfgrestore.c:23 msgid "Please specify a *single* volume group to restore." msgstr "" #: vgcfgrestore.c:30 vgextend.c:45 vgreduce.c:469 vgsplit.c:228 #, c-format msgid "Volume group name \"%s\" is invalid" msgstr "" #: vgcfgrestore.c:46 msgid "Unable to lock orphans" msgstr "" #: vgcfgrestore.c:51 #, c-format msgid "Unable to lock volume group %s" msgstr "" #: vgcfgrestore.c:62 msgid "Restore failed." msgstr "" #: vgcfgrestore.c:66 #, c-format msgid "Restored volume group %s" msgstr "" #: vgchange.c:92 #, c-format msgid "Spawning background process for %s %s" msgstr "" #: vgchange.c:111 #, c-format msgid "%d logical volume(s) in volume group \"%s\" %smonitored" msgstr "" #: vgchange.c:132 #, c-format msgid "Can't deactivate volume group \"%s\" with %d open logical volume(s)" msgstr "" #: vgchange.c:138 #, c-format msgid "Locking inactive: ignoring clustered volume group %s" msgstr "" #: vgchange.c:148 #, c-format msgid "%d logical volume(s) in volume group \"%s\" already active" msgstr "" #: vgchange.c:152 #, c-format msgid "%d existing logical volume(s) in volume group \"%s\" %smonitored" msgstr "" #: vgchange.c:160 #, c-format msgid "Activated logical volumes in volume group \"%s\"" msgstr "" #: vgchange.c:164 #, c-format msgid "Deactivated logical volumes in volume group \"%s\"" msgstr "" #: vgchange.c:167 #, c-format msgid "%d logical volume(s) in volume group \"%s\" now active" msgstr "" #: vgchange.c:179 vgcreate.c:47 msgid "Volume Group allocation policy cannot inherit from anything" msgstr "" #: vgchange.c:185 #, c-format msgid "Volume group allocation policy is already %s" msgstr "" #: vgchange.c:200 vgchange.c:235 vgchange.c:282 vgchange.c:324 vgchange.c:371 #: vgchange.c:429 vgchange.c:471 vgchange.c:504 #, c-format msgid "Volume group \"%s\" successfully changed" msgstr "" #: vgchange.c:211 #, c-format msgid "Volume group \"%s\" is already resizeable" msgstr "" #: vgchange.c:217 #, c-format msgid "Volume group \"%s\" is already not resizeable" msgstr "" #: vgchange.c:247 #, c-format msgid "Volume group \"%s\" is already clustered" msgstr "" #: vgchange.c:253 #, c-format msgid "Volume group \"%s\" is already not clustered" msgstr "" #: vgchange.c:261 #, c-format msgid "Volume group %s contains snapshots that are not yet supported." msgstr "" #: vgchange.c:293 #, c-format msgid "Volume group \"%s\" must be resizeable to change MaxLogicalVolume" msgstr "" #: vgchange.c:302 msgid "MaxLogicalVolume limit is 255" msgstr "" #: vgchange.c:308 #, c-format msgid "MaxLogicalVolume is less than the current number %d of LVs for \"%s\"" msgstr "" #: vgchange.c:335 #, c-format msgid "Volume group \"%s\" must be resizeable to change MaxPhysicalVolumes" msgstr "" #: vgchange.c:341 msgid "MaxPhysicalVolumes may not be negative" msgstr "" #: vgchange.c:349 msgid "MaxPhysicalVolume limit is 255" msgstr "" #: vgchange.c:355 #, c-format msgid "MaxPhysicalVolumes is less than the current number %d of PVs for \"%s\"" msgstr "" #: vgchange.c:381 #, c-format msgid "Volume group \"%s\" must be resizeable to change PE size" msgstr "" #: vgchange.c:387 vgcreate.c:64 msgid "Physical extent size may not be negative" msgstr "" #: vgchange.c:393 vgcreate.c:83 msgid "Physical extent size may not be zero" msgstr "" #: vgchange.c:398 #, c-format msgid "Physical extent size of VG %s is already %s" msgstr "" #: vgchange.c:404 msgid "Physical extent size must be a power of 2." msgstr "" #: vgchange.c:411 msgid "New extent size is not a perfect fit" msgstr "" #: vgchange.c:454 vgcreate.c:117 #, c-format msgid "Failed to add tag %s to volume group %s" msgstr "" #: vgchange.c:460 #, c-format msgid "Failed to remove tag %s from volume group %s" msgstr "" #: vgchange.c:482 msgid "Volume group has active logical volumes" msgstr "" #: vgchange.c:490 #, c-format msgid "Failed to generate new random UUID for VG %s." msgstr "" #: vgchange.c:516 vgconvert.c:36 vgexport.c:27 #, c-format msgid "Unable to find volume group \"%s\"" msgstr "" #: vgchange.c:588 msgid "" "One of -a, -c, -l, -p, -s, -x, --uuid, --alloc, --addtag or --deltag required" msgstr "" #: vgchange.c:600 msgid "" "Only one of -a, -c, -l, -p, -s, -x, --uuid, --alloc, --addtag or --deltag " "allowed" msgstr "" #: vgchange.c:607 msgid "--ignorelockingfailure only available with -a" msgstr "" #: vgchange.c:613 msgid "-A option not necessary with -a option" msgstr "" #: vgconvert.c:59 #, c-format msgid "Volume group \"%s\" already uses format %s" msgstr "" #: vgconvert.c:87 #, c-format msgid "Archive of \"%s\" metadata failed." msgstr "" #: vgconvert.c:100 #, c-format msgid "Logical volume %s must be deactivated before conversion." msgstr "" #: vgconvert.c:130 vgconvert.c:145 vgconvert.c:157 vgconvert.c:170 #: vgconvert.c:186 msgid "Use pvcreate and vgcfgrestore to repair from archived metadata." msgstr "" #: vgconvert.c:166 #, c-format msgid "Deleting existing metadata for VG %s" msgstr "" #: vgconvert.c:168 #, c-format msgid "Removal of existing metadata for %s failed." msgstr "" #: vgconvert.c:177 #, c-format msgid "Test mode: Skipping metadata writing for VG %s in format %s" msgstr "" #: vgconvert.c:182 #, c-format msgid "Writing metadata for VG %s using format %s" msgstr "" #: vgconvert.c:185 #, c-format msgid "Conversion failed for volume group %s." msgstr "" #: vgconvert.c:190 #, c-format msgid "Volume group %s successfully converted" msgstr "" #: vgconvert.c:200 msgid "Please enter volume group(s)" msgstr "" #: vgcreate.c:31 msgid "Please provide volume group name and physical volumes" msgstr "" #: vgcreate.c:37 msgid "Please enter physical volume name(s)" msgstr "" #: vgcreate.c:58 msgid "Number of volumes may not exceed 255" msgstr "" #: vgcreate.c:69 msgid "Max Logical Volumes may not be negative" msgstr "" #: vgcreate.c:74 msgid "Max Physical Volumes may not be negative" msgstr "" #: vgcreate.c:88 vgrename.c:52 vgsplit.c:290 #, c-format msgid "New volume group name \"%s\" is invalid" msgstr "" #: vgcreate.c:98 #, c-format msgid "Warning: Setting maxlogicalvolumes to %d (0 means unlimited)" msgstr "" #: vgcreate.c:102 #, c-format msgid "Warning: Setting maxphysicalvolumes to %d (0 means unlimited)" msgstr "" #: vgcreate.c:112 msgid "Volume group format does not support tags" msgstr "" #: vgcreate.c:163 #, c-format msgid "Volume group \"%s\" successfully created" msgstr "" #: vgdisplay.c:29 #, c-format msgid "WARNING: Volume group \"%s\" inconsistent" msgstr "" #: vgdisplay.c:32 #, c-format msgid "WARNING: volume group \"%s\" is exported" msgstr "" #: vgdisplay.c:52 msgid "--- Physical volumes ---" msgstr "" #: vgdisplay.c:81 msgid "Option -c is not allowed with option -s" msgstr "" #: vgdisplay.c:86 msgid "Option -A is not allowed with volume group names" msgstr "" #: vgexport.c:32 #, c-format msgid "Volume group %s inconsistent" msgstr "" #: vgexport.c:37 #, c-format msgid "Volume group \"%s\" is already exported" msgstr "" #: vgexport.c:47 #, c-format msgid "Volume group \"%s\" has active logical volumes" msgstr "" #: vgexport.c:67 #, c-format msgid "Volume group \"%s\" successfully exported" msgstr "" #: vgexport.c:78 vgimport.c:68 msgid "Please supply volume groups or use -a for all." msgstr "" #: vgexport.c:83 vgimport.c:73 msgid "No arguments permitted when using -a for all." msgstr "" #: vgextend.c:25 msgid "Please enter volume group name and physical volume(s)" msgstr "" #: vgextend.c:31 msgid "Please enter physical volume(s)" msgstr "" #: vgextend.c:50 vgmerge.c:32 vgmerge.c:63 vgsplit.c:238 vgsplit.c:275 #, c-format msgid "Checking for volume group \"%s\"" msgstr "" #: vgextend.c:58 #, c-format msgid "Volume group \"%s\" not found." msgstr "" #: vgextend.c:79 #, c-format msgid "Volume group \"%s\" is not resizeable." msgstr "" #: vgextend.c:98 #, c-format msgid "Volume group \"%s\" will be extended by %d new physical volumes" msgstr "" #: vgextend.c:110 #, c-format msgid "Volume group \"%s\" successfully extended" msgstr "" #: vgimport.c:27 #, c-format msgid "Unable to find exported volume group \"%s\"" msgstr "" #: vgimport.c:33 #, c-format msgid "Volume group \"%s\" is not exported" msgstr "" #: vgimport.c:38 #, c-format msgid "Volume group \"%s\" is partially missing" msgstr "" #: vgimport.c:57 #, c-format msgid "Volume group \"%s\" successfully imported" msgstr "" #: vgmerge.c:28 vgsplit.c:234 #, c-format msgid "Duplicate volume group name \"%s\"" msgstr "" #: vgmerge.c:93 vgsplit.c:297 #, c-format msgid "Logical volumes in \"%s\" must be inactive" msgstr "" #: vgmerge.c:100 #, c-format msgid "Extent sizes differ: %d (%s) and %d (%s)" msgstr "" #: vgmerge.c:108 #, c-format msgid "Maximum number of physical volumes (%d) exceeded for \"%s\" and \"%s\"" msgstr "" #: vgmerge.c:116 #, c-format msgid "Maximum number of logical volumes (%d) exceeded for \"%s\" and \"%s\"" msgstr "" #: vgmerge.c:130 #, c-format msgid "Duplicate logical volume name \"%s\" in \"%s\" and \"%s\"" msgstr "" #: vgmerge.c:142 vgmerge.c:151 #, c-format msgid "Physical volume %s might be constructed from same volume group %s." msgstr "" #: vgmerge.c:186 #, c-format msgid "Failed to generate new random LVID for %s" msgstr "" #: vgmerge.c:197 #, c-format msgid "Changed LVID for %s to %s" msgstr "" #: vgmerge.c:235 #, c-format msgid "Volume group \"%s\" successfully merged into \"%s\"" msgstr "" #: vgmerge.c:252 msgid "Please enter 2 or more volume groups to merge" msgstr "" #: vgreduce.c:24 msgid "Volume Groups must always contain at least one PV" msgstr "" #: vgreduce.c:33 #, c-format msgid "Removing PV with UUID %s from VG %s" msgstr "" #: vgreduce.c:36 #, c-format msgid "LVs still present on PV with UUID %s: Can't remove from VG %s" msgstr "" #: vgreduce.c:61 #, c-format msgid "%s/%s has missing extents: removing (including dependencies)" msgstr "" #: vgreduce.c:68 #, c-format msgid "Deactivating (if active) logical volume %s (origin of %s)" msgstr "" #: vgreduce.c:72 vgreduce.c:89 vgreduce.c:333 #, c-format msgid "Failed to deactivate LV %s" msgstr "" #: vgreduce.c:99 vgreduce.c:146 vgreduce.c:348 #, c-format msgid "Removing LV %s from VG %s" msgstr "" #: vgreduce.c:191 #, c-format msgid "Non-mirror-image LV %s found: can't remove." msgstr "" #: vgreduce.c:207 msgid "Aborting because --mirrorsonly was specified." msgstr "" #: vgreduce.c:232 vgreduce.c:529 #, c-format msgid "Failed to write out a consistent VG for %s" msgstr "" #: vgreduce.c:250 #, c-format msgid "Failed to commit consistent VG for %s" msgstr "" #: vgreduce.c:258 msgid "Failed to resume LVs using error segments." msgstr "" #: vgreduce.c:290 #, c-format msgid "The log device for %s/%s has failed." msgstr "" #: vgreduce.c:296 #, c-format msgid "Log device for %s/%s has failed." msgstr "" #: vgreduce.c:312 #, c-format msgid "Failed to write out updated VG for %s" msgstr "" #: vgreduce.c:318 #, c-format msgid "Failed to commit updated VG for %s" msgstr "" #: vgreduce.c:329 #, c-format msgid "Deactivating (if active) logical volume %s" msgstr "" #: vgreduce.c:371 #, c-format msgid "Physical volume \"%s\" still in use" msgstr "" #: vgreduce.c:376 #, c-format msgid "Can't remove final physical volume \"%s\" from volume group \"%s\"" msgstr "" #: vgreduce.c:386 #, c-format msgid "Removing \"%s\" from volume group \"%s\"" msgstr "" #: vgreduce.c:404 #, c-format msgid "Removal of physical volume \"%s\" from \"%s\" failed" msgstr "" #: vgreduce.c:418 #, c-format msgid "Removed \"%s\" from volume group \"%s\"" msgstr "" #: vgreduce.c:431 msgid "Please give volume group name and physical volume paths" msgstr "" #: vgreduce.c:437 msgid "Please give volume group name" msgstr "" #: vgreduce.c:443 msgid "--mirrorsonly requires --removemissing" msgstr "" #: vgreduce.c:449 msgid "Please enter physical volume paths or option -a" msgstr "" #: vgreduce.c:454 msgid "Option -a and physical volume paths mutually exclusive" msgstr "" #: vgreduce.c:460 msgid "Please only specify the volume group" msgstr "" #: vgreduce.c:496 #, c-format msgid "Volume group \"%s\" is already consistent" msgstr "" #: vgreduce.c:537 #, c-format msgid "Wrote out consistent volume group %s" msgstr "" #: vgreduce.c:553 #, c-format msgid "Volume group \"%s\" is not reducible" msgstr "" #: vgremove.c:27 #, c-format msgid "Volume group \"%s\" not found or inconsistent." msgstr "" #: vgremove.c:29 msgid "Consider vgreduce --removemissing if metadata is inconsistent." msgstr "" #: vgremove.c:40 #, c-format msgid "Volume group \"%s\" still contains %d logical volume(s)" msgstr "" #: vgremove.c:49 #, c-format msgid "vg_remove %s failed" msgstr "" #: vgremove.c:56 #, c-format msgid "Removing physical volume \"%s\" from volume group \"%s\"" msgstr "" #: vgremove.c:69 #, c-format msgid "Failed to remove physical volume \"%s\" from volume group \"%s\"" msgstr "" #: vgremove.c:79 #, c-format msgid "Volume group \"%s\" successfully removed" msgstr "" #: vgremove.c:81 #, c-format msgid "Volume group \"%s\" not properly removed" msgstr "" #: vgremove.c:91 msgid "Please enter one or more volume group paths" msgstr "" #: vgrename.c:34 msgid "Old and new volume group names need specifying" msgstr "" #: vgrename.c:46 #, c-format msgid "New volume group path exceeds maximum length of %d!" msgstr "" #: vgrename.c:58 msgid "Old and new volume group names must differ" msgstr "" #: vgrename.c:66 msgid "No complete volume groups found" msgstr "" #: vgrename.c:76 #, c-format msgid "Found more than one VG called %s. Please supply VG uuid." msgstr "" #: vgrename.c:99 #, c-format msgid "Volume group %s %s%s%snot found." msgstr "" #: vgrename.c:123 #, c-format msgid "Volume group \"%s\" still has active LVs" msgstr "" #: vgrename.c:129 #, c-format msgid "Checking for new volume group \"%s\"" msgstr "" #: vgrename.c:139 #, c-format msgid "New volume group \"%s\" already exists" msgstr "" #: vgrename.c:154 #, c-format msgid "Renaming \"%s\" to \"%s\"" msgstr "" #: vgrename.c:156 msgid "Test mode: Skipping rename." msgstr "" #: vgrename.c:158 #, c-format msgid "Renaming \"%s\" to \"%s\" failed: %s" msgstr "" #: vgrename.c:177 #, c-format msgid "Volume group \"%s\" successfully renamed to \"%s\"" msgstr "" #: vgscan.c:36 #, c-format msgid "Found %svolume group \"%s\" using metadata type %s" msgstr "" #: vgscan.c:50 msgid "Too many parameters on command line" msgstr "" #: vgscan.c:57 msgid "Reading all physical volumes. This may take a while..." msgstr "" #: vgsplit.c:25 #, c-format msgid "Physical volume %s not in volume group %s" msgstr "" #: vgsplit.c:90 #, c-format msgid "Can't split Logical Volume %s between two Volume Groups" msgstr "" #: vgsplit.c:152 #, c-format msgid "Snapshot %s split" msgstr "" #: vgsplit.c:193 #, c-format msgid "Mirror %s split" msgstr "" #: vgsplit.c:218 msgid "Existing VG, new VG and physical volumes required." msgstr "" #: vgsplit.c:264 #, c-format msgid "Volume group \"%s\" is not resizeable" msgstr "" #: vgsplit.c:285 #, c-format msgid "Volume group \"%s\" already exists" msgstr "" #: vgsplit.c:339 msgid "Cannot split: Nowhere to store metadata for new Volume Group" msgstr "" #: vgsplit.c:348 msgid "Writing out updated volume groups" msgstr "" #: vgsplit.c:370 #, c-format msgid "Volume group \"%s\" became inconsistent: please fix manually" msgstr "" #: vgsplit.c:385 #, c-format msgid "Volume group \"%s\" successfully split from \"%s\"" msgstr "" #: zero/zero.c:71 msgid "zero module string list allocation failed" msgstr "" LVM2.2.02.176/unit-tests/0000755000000000000120000000000013176752421013443 5ustar rootwheelLVM2.2.02.176/unit-tests/mm/0000755000000000000120000000000013176752421014054 5ustar rootwheelLVM2.2.02.176/unit-tests/mm/Makefile.in0000644000000000000120000000154713176752421016130 0ustar rootwheel# # Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved. # Copyright (C) 2004 Red Hat, Inc. All rights reserved. # # This file is part of LVM2. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA srcdir = @srcdir@ top_srcdir = @top_srcdir@ top_builddir = @top_builddir@ VPATH = @srcdir@ SOURCES=\ pool_valgrind_t.c TARGETS=\ pool_valgrind_t include $(top_builddir)/make.tmpl DM_LIBS = -ldevmapper $(LIBS) pool_valgrind_t: pool_valgrind_t.o $(CC) $(CFLAGS) -o $@ pool_valgrind_t.o $(LDFLAGS) $(DM_LIBS) LVM2.2.02.176/unit-tests/mm/TESTS0000644000000000000120000000011213176752421014673 0ustar rootwheelvalgrind pool awareness:valgrind ./pool_valgrind_t 2>&1 | ./check_results LVM2.2.02.176/unit-tests/mm/pool_valgrind_t.c0000644000000000000120000001131613176752421017404 0ustar rootwheel#include "libdevmapper.h" #include /* * Checks that valgrind is picking up unallocated pool memory as * uninitialised, even if the chunk has been recycled. * * $ valgrind --track-origins=yes ./pool_valgrind_t * * ==7023== Memcheck, a memory error detector * ==7023== Copyright (C) 2002-2009, and GNU GPL'd, by Julian Seward et al. * ==7023== Using Valgrind-3.6.0.SVN-Debian and LibVEX; rerun with -h for copyright info * ==7023== Command: ./pool_valgrind_t * ==7023== * first branch worked (as expected) * ==7023== Conditional jump or move depends on uninitialised value(s) * ==7023== at 0x4009AC: main (in /home/ejt/work/lvm2/unit-tests/mm/pool_valgrind_t) * ==7023== Uninitialised value was created by a client request * ==7023== at 0x4E40CB8: dm_pool_free (in /home/ejt/work/lvm2/libdm/ioctl/libdevmapper.so.1.02) * ==7023== by 0x4009A8: main (in /home/ejt/work/lvm2/unit-tests/mm/pool_valgrind_t) * ==7023== * second branch worked (valgrind should have flagged this as an error) * ==7023== * ==7023== HEAP SUMMARY: * ==7023== in use at exit: 0 bytes in 0 blocks * ==7023== total heap usage: 2 allocs, 2 frees, 2,104 bytes allocated * ==7023== * ==7023== All heap blocks were freed -- no leaks are possible * ==7023== * ==7023== For counts of detected and suppressed errors, rerun with: -v * ==7023== ERROR SUMMARY: 1 errors from 1 contexts (suppressed: 4 from 4) */ #define COUNT 10 static void check_free() { int i; char *blocks[COUNT]; struct dm_pool *p = dm_pool_create("blah", 1024); for (i = 0; i < COUNT; i++) blocks[i] = dm_pool_alloc(p, 37); /* check we can access the last block */ blocks[COUNT - 1][0] = 'E'; if (blocks[COUNT - 1][0] == 'E') printf("first branch worked (as expected)\n"); dm_pool_free(p, blocks[5]); if (blocks[COUNT - 1][0] == 'E') printf("second branch worked (valgrind should have flagged this as an error)\n"); dm_pool_destroy(p); } /* Checks that freed chunks are marked NOACCESS */ static void check_free2() { struct dm_pool *p = dm_pool_create("", 900); /* 900 will get * rounded up to 1024, * 1024 would have got * rounded up to * 2048 */ char *data1, *data2; assert(p); data1 = dm_pool_alloc(p, 123); assert(data1); data1 = dm_pool_alloc(p, 1024); assert(data1); data2 = dm_pool_alloc(p, 123); assert(data2); data2[0] = 'A'; /* should work fine */ dm_pool_free(p, data1); /* * so now the first chunk is active, the second chunk has become * the free one. */ data2[0] = 'B'; /* should prompt an invalid write error */ dm_pool_destroy(p); } static void check_alignment() { /* * Pool always tries to allocate blocks with particular alignment. * So there are potentially small gaps between allocations. This * test checks that valgrind is spotting illegal accesses to these * gaps. */ int i, sum; struct dm_pool *p = dm_pool_create("blah", 1024); char *data1, *data2; char buffer[16]; data1 = dm_pool_alloc_aligned(p, 1, 4); assert(data1); data2 = dm_pool_alloc_aligned(p, 1, 4); assert(data1); snprintf(buffer, sizeof(buffer), "%c", *(data1 + 1)); /* invalid read size 1 */ dm_pool_destroy(p); } /* * Looking at the code I'm not sure allocations that are near the chunk * size are working. So this test is trying to exhibit a specific problem. */ static void check_allocation_near_chunk_size() { int i; char *data; struct dm_pool *p = dm_pool_create("", 900); /* * allocate a lot and then free everything so we know there * is a spare chunk. */ for (i = 0; i < 1000; i++) { data = dm_pool_alloc(p, 37); memset(data, 0, 37); assert(data); } dm_pool_empty(p); /* now we allocate something close to the chunk size ... */ data = dm_pool_alloc(p, 1020); assert(data); memset(data, 0, 1020); dm_pool_destroy(p); } /* FIXME: test the dbg_malloc at exit (this test should be in dbg_malloc) */ static void check_leak_detection() { int i; struct dm_pool *p = dm_pool_create("", 1024); for (i = 0; i < 10; i++) dm_pool_alloc(p, (i + 1) * 37); } /* we shouldn't get any errors from this one */ static void check_object_growth() { int i; struct dm_pool *p = dm_pool_create("", 32); char data[100] = { 0 }; void *obj; dm_pool_begin_object(p, 43); for (i = 1; i < 100; i++) dm_pool_grow_object(p, data, i); obj = dm_pool_end_object(p); dm_pool_destroy(p); } int main(int argc, char **argv) { check_free(); check_free2(); check_alignment(); check_allocation_near_chunk_size(); check_leak_detection(); check_object_growth(); return 0; } LVM2.2.02.176/unit-tests/mm/check_results0000755000000000000120000000104513176752421016640 0ustar rootwheel#!/usr/bin/env ruby1.9 require 'pp' patterns = [ /Invalid read of size 1/, /Invalid write of size 1/, /Invalid read of size 1/, /still reachable: [0-9,]+ bytes in 3 blocks/ ] lines = STDIN.readlines pp lines result = catch(:done) do patterns.each do |pat| loop do throw(:done, false) if lines.size == 0 line = lines.shift if line =~ pat STDERR.puts "matched #{pat}" break; end end end throw(:done, true) end exit(result ? 0 : 1) LVM2.2.02.176/unit-tests/regex/0000755000000000000120000000000013176752421014555 5ustar rootwheelLVM2.2.02.176/unit-tests/regex/devices.list0000644000000000000120000002573213176752421017105 0ustar rootwheel/dev /dev/.devfsd /dev/cpu /dev/cpu/mtrr /dev/netlink /dev/netlink/route /dev/netlink/skip /dev/netlink/USERSOCK /dev/netlink/fwmonitor /dev/netlink/ARPD /dev/netlink/ROUTE6 /dev/netlink/IP6_FW /dev/netlink/tap0 /dev/netlink/tap1 /dev/netlink/tap2 /dev/netlink/tap3 /dev/netlink/tap4 /dev/netlink/tap5 /dev/netlink/tap6 /dev/netlink/tap7 /dev/netlink/tap8 /dev/netlink/tap9 /dev/netlink/tap10 /dev/netlink/tap11 /dev/netlink/tap12 /dev/netlink/tap13 /dev/netlink/tap14 /dev/netlink/tap15 /dev/shm /dev/mem /dev/kmem /dev/null /dev/port /dev/zero /dev/full /dev/random /dev/urandom /dev/tty /dev/console /dev/vc /dev/vc/1 /dev/vc/2 /dev/vc/3 /dev/vc/4 /dev/vc/5 /dev/vc/6 /dev/vc/7 /dev/vc/8 /dev/vc/9 /dev/vc/10 /dev/vc/11 /dev/vc/12 /dev/vc/13 /dev/vc/14 /dev/vc/15 /dev/vc/16 /dev/vc/17 /dev/vc/18 /dev/vc/19 /dev/vc/20 /dev/vc/21 /dev/vc/22 /dev/vc/23 /dev/vc/24 /dev/vc/25 /dev/vc/26 /dev/vc/27 /dev/vc/28 /dev/vc/29 /dev/vc/30 /dev/vc/31 /dev/vc/32 /dev/vc/33 /dev/vc/34 /dev/vc/35 /dev/vc/36 /dev/vc/37 /dev/vc/38 /dev/vc/39 /dev/vc/40 /dev/vc/41 /dev/vc/42 /dev/vc/43 /dev/vc/44 /dev/vc/45 /dev/vc/46 /dev/vc/47 /dev/vc/48 /dev/vc/49 /dev/vc/50 /dev/vc/51 /dev/vc/52 /dev/vc/53 /dev/vc/54 /dev/vc/55 /dev/vc/56 /dev/vc/57 /dev/vc/58 /dev/vc/59 /dev/vc/60 /dev/vc/61 /dev/vc/62 /dev/vc/63 /dev/vc/0 /dev/ptmx /dev/misc /dev/misc/psaux /dev/pty /dev/pty/m0 /dev/pty/m1 /dev/pty/m2 /dev/pty/m3 /dev/pty/m4 /dev/pty/m5 /dev/pty/m6 /dev/pty/m7 /dev/pty/m8 /dev/pty/m9 /dev/pty/m10 /dev/pty/m11 /dev/pty/m12 /dev/pty/m13 /dev/pty/m14 /dev/pty/m15 /dev/pty/m16 /dev/pty/m17 /dev/pty/m18 /dev/pty/m19 /dev/pty/m20 /dev/pty/m21 /dev/pty/m22 /dev/pty/m23 /dev/pty/m24 /dev/pty/m25 /dev/pty/m26 /dev/pty/m27 /dev/pty/m28 /dev/pty/m29 /dev/pty/m30 /dev/pty/m31 /dev/pty/m32 /dev/pty/m33 /dev/pty/m34 /dev/pty/m35 /dev/pty/m36 /dev/pty/m37 /dev/pty/m38 /dev/pty/m39 /dev/pty/m40 /dev/pty/m41 /dev/pty/m42 /dev/pty/m43 /dev/pty/m44 /dev/pty/m45 /dev/pty/m46 /dev/pty/m47 /dev/pty/m48 /dev/pty/m49 /dev/pty/m50 /dev/pty/m51 /dev/pty/m52 /dev/pty/m53 /dev/pty/m54 /dev/pty/m55 /dev/pty/m56 /dev/pty/m57 /dev/pty/m58 /dev/pty/m59 /dev/pty/m60 /dev/pty/m61 /dev/pty/m62 /dev/pty/m63 /dev/pty/m64 /dev/pty/m65 /dev/pty/m66 /dev/pty/m67 /dev/pty/m68 /dev/pty/m69 /dev/pty/m70 /dev/pty/m71 /dev/pty/m72 /dev/pty/m73 /dev/pty/m74 /dev/pty/m75 /dev/pty/m76 /dev/pty/m77 /dev/pty/m78 /dev/pty/m79 /dev/pty/m80 /dev/pty/m81 /dev/pty/m82 /dev/pty/m83 /dev/pty/m84 /dev/pty/m85 /dev/pty/m86 /dev/pty/m87 /dev/pty/m88 /dev/pty/m89 /dev/pty/m90 /dev/pty/m91 /dev/pty/m92 /dev/pty/m93 /dev/pty/m94 /dev/pty/m95 /dev/pty/m96 /dev/pty/m97 /dev/pty/m98 /dev/pty/m99 /dev/pty/m100 /dev/pty/m101 /dev/pty/m102 /dev/pty/m103 /dev/pty/m104 /dev/pty/m105 /dev/pty/m106 /dev/pty/m107 /dev/pty/m108 /dev/pty/m109 /dev/pty/m110 /dev/pty/m111 /dev/pty/m112 /dev/pty/m113 /dev/pty/m114 /dev/pty/m115 /dev/pty/m116 /dev/pty/m117 /dev/pty/m118 /dev/pty/m119 /dev/pty/m120 /dev/pty/m121 /dev/pty/m122 /dev/pty/m123 /dev/pty/m124 /dev/pty/m125 /dev/pty/m126 /dev/pty/m127 /dev/pty/m128 /dev/pty/m129 /dev/pty/m130 /dev/pty/m131 /dev/pty/m132 /dev/pty/m133 /dev/pty/m134 /dev/pty/m135 /dev/pty/m136 /dev/pty/m137 /dev/pty/m138 /dev/pty/m139 /dev/pty/m140 /dev/pty/m141 /dev/pty/m142 /dev/pty/m143 /dev/pty/m144 /dev/pty/m145 /dev/pty/m146 /dev/pty/m147 /dev/pty/m148 /dev/pty/m149 /dev/pty/m150 /dev/pty/m151 /dev/pty/m152 /dev/pty/m153 /dev/pty/m154 /dev/pty/m155 /dev/pty/m156 /dev/pty/m157 /dev/pty/m158 /dev/pty/m159 /dev/pty/m160 /dev/pty/m161 /dev/pty/m162 /dev/pty/m163 /dev/pty/m164 /dev/pty/m165 /dev/pty/m166 /dev/pty/m167 /dev/pty/m168 /dev/pty/m169 /dev/pty/m170 /dev/pty/m171 /dev/pty/m172 /dev/pty/m173 /dev/pty/m174 /dev/pty/m175 /dev/pty/m176 /dev/pty/m177 /dev/pty/m178 /dev/pty/m179 /dev/pty/m180 /dev/pty/m181 /dev/pty/m182 /dev/pty/m183 /dev/pty/m184 /dev/pty/m185 /dev/pty/m186 /dev/pty/m187 /dev/pty/m188 /dev/pty/m189 /dev/pty/m190 /dev/pty/m191 /dev/pty/m192 /dev/pty/m193 /dev/pty/m194 /dev/pty/m195 /dev/pty/m196 /dev/pty/m197 /dev/pty/m198 /dev/pty/m199 /dev/pty/m200 /dev/pty/m201 /dev/pty/m202 /dev/pty/m203 /dev/pty/m204 /dev/pty/m205 /dev/pty/m206 /dev/pty/m207 /dev/pty/m208 /dev/pty/m209 /dev/pty/m210 /dev/pty/m211 /dev/pty/m212 /dev/pty/m213 /dev/pty/m214 /dev/pty/m215 /dev/pty/m216 /dev/pty/m217 /dev/pty/m218 /dev/pty/m219 /dev/pty/m220 /dev/pty/m221 /dev/pty/m222 /dev/pty/m223 /dev/pty/m224 /dev/pty/m225 /dev/pty/m226 /dev/pty/m227 /dev/pty/m228 /dev/pty/m229 /dev/pty/m230 /dev/pty/m231 /dev/pty/m232 /dev/pty/m233 /dev/pty/m234 /dev/pty/m235 /dev/pty/m236 /dev/pty/m237 /dev/pty/m238 /dev/pty/m239 /dev/pty/m240 /dev/pty/m241 /dev/pty/m242 /dev/pty/m243 /dev/pty/m244 /dev/pty/m245 /dev/pty/m246 /dev/pty/m247 /dev/pty/m248 /dev/pty/m249 /dev/pty/m250 /dev/pty/m251 /dev/pty/m252 /dev/pty/m253 /dev/pty/m254 /dev/pty/m255 /dev/pts /dev/pts/0 /dev/pts/1 /dev/pts/2 /dev/pts/3 /dev/pts/4 /dev/pts/5 /dev/pts/6 /dev/pts/7 /dev/vcc /dev/vcc/0 /dev/vcc/a /dev/vcc/1 /dev/vcc/a1 /dev/vcc/2 /dev/vcc/a2 /dev/vcc/3 /dev/vcc/a3 /dev/vcc/5 /dev/vcc/a5 /dev/vcc/4 /dev/vcc/a4 /dev/vcc/6 /dev/vcc/a6 /dev/vcc/7 /dev/vcc/a7 /dev/tts /dev/tts/0 /dev/cua /dev/cua/0 /dev/ide /dev/ide/host0 /dev/ide/host0/bus0 /dev/ide/host0/bus0/target0 /dev/ide/host0/bus0/target0/lun0 /dev/ide/host0/bus0/target0/lun0/disc /dev/ide/host0/bus0/target0/lun0/part1 /dev/ide/host0/bus0/target0/lun0/part2 /dev/ide/host0/bus0/target0/lun0/part3 /dev/ide/host0/bus0/target0/lun0/part4 /dev/ide/host0/bus0/target0/lun0/part5 /dev/ide/host0/bus0/target0/lun0/part6 /dev/ide/host0/bus0/target0/lun0/part7 /dev/ide/host0/bus0/target0/lun0/part8 /dev/ide/host0/bus0/target1 /dev/ide/host0/bus0/target1/lun0 /dev/ide/host0/bus0/target1/lun0/disc /dev/ide/host0/bus0/target1/lun0/part1 /dev/ide/host0/bus1 /dev/ide/host0/bus1/target0 /dev/ide/host0/bus1/target0/lun0 /dev/ide/host0/bus1/target0/lun0/disc /dev/ide/host0/bus1/target0/lun0/part1 /dev/ide/host0/bus1/target1 /dev/ide/host0/bus1/target1/lun0 /dev/discs /dev/discs/disc0 /dev/discs/disc1 /dev/discs/disc2 /dev/floppy /dev/floppy/0u1440 /dev/floppy/0u1680 /dev/floppy/0u1722 /dev/floppy/0u1743 /dev/floppy/0u1760 /dev/floppy/0u1920 /dev/floppy/0u1840 /dev/floppy/0u1600 /dev/floppy/0u360 /dev/floppy/0u720 /dev/floppy/0u820 /dev/floppy/0u830 /dev/floppy/0u1040 /dev/floppy/0u1120 /dev/floppy/0u800 /dev/floppy/0 /dev/loop /dev/loop/0 /dev/loop/1 /dev/loop/2 /dev/loop/3 /dev/loop/4 /dev/loop/5 /dev/loop/6 /dev/loop/7 /dev/cdroms /dev/sound /dev/sound/dsp /dev/sound/dsp1 /dev/sound/mixer /dev/sound/midi /dev/usb /dev/root /dev/initctl /dev/xconsole /dev/fd /dev/stdin /dev/stdout /dev/stderr /dev/route /dev/skip /dev/USERSOCK /dev/fwmonitor /dev/ARPD /dev/ROUTE6 /dev/IP6_FW /dev/tap0 /dev/tap1 /dev/tap2 /dev/tap3 /dev/tap4 /dev/tap5 /dev/tap6 /dev/tap7 /dev/tap8 /dev/tap9 /dev/tap10 /dev/tap11 /dev/tap12 /dev/tap13 /dev/tap14 /dev/tap15 /dev/tty1 /dev/tty2 /dev/tty3 /dev/tty4 /dev/tty5 /dev/tty6 /dev/tty7 /dev/tty8 /dev/tty9 /dev/tty10 /dev/tty11 /dev/tty12 /dev/tty13 /dev/tty14 /dev/tty15 /dev/tty16 /dev/tty17 /dev/tty18 /dev/tty19 /dev/tty20 /dev/tty21 /dev/tty22 /dev/tty23 /dev/tty24 /dev/tty25 /dev/tty26 /dev/tty27 /dev/tty28 /dev/tty29 /dev/tty30 /dev/tty31 /dev/tty32 /dev/tty33 /dev/tty34 /dev/tty35 /dev/tty36 /dev/tty37 /dev/tty38 /dev/tty39 /dev/tty40 /dev/tty41 /dev/tty42 /dev/tty43 /dev/tty44 /dev/tty45 /dev/tty46 /dev/tty47 /dev/tty48 /dev/tty49 /dev/tty50 /dev/tty51 /dev/tty52 /dev/tty53 /dev/tty54 /dev/tty55 /dev/tty56 /dev/tty57 /dev/tty58 /dev/tty59 /dev/tty60 /dev/tty61 /dev/tty62 /dev/tty63 /dev/tty0 /dev/psaux /dev/ptyp0 /dev/ptyp1 /dev/ptyp2 /dev/ptyp3 /dev/ptyp4 /dev/ptyp5 /dev/ptyp6 /dev/ptyp7 /dev/ptyp8 /dev/ptyp9 /dev/ptypa /dev/ptypb /dev/ptypc /dev/ptypd /dev/ptype /dev/ptypf /dev/ptyq0 /dev/ptyq1 /dev/ptyq2 /dev/ptyq3 /dev/ptyq4 /dev/ptyq5 /dev/ptyq6 /dev/ptyq7 /dev/ptyq8 /dev/ptyq9 /dev/ptyqa /dev/ptyqb /dev/ptyqc /dev/ptyqd /dev/ptyqe /dev/ptyqf /dev/ptyr0 /dev/ptyr1 /dev/ptyr2 /dev/ptyr3 /dev/ptyr4 /dev/ptyr5 /dev/ptyr6 /dev/ptyr7 /dev/ptyr8 /dev/ptyr9 /dev/ptyra /dev/ptyrb /dev/ptyrc /dev/ptyrd /dev/ptyre /dev/ptyrf /dev/ptys0 /dev/ptys1 /dev/ptys2 /dev/ptys3 /dev/ptys4 /dev/ptys5 /dev/ptys6 /dev/ptys7 /dev/ptys8 /dev/ptys9 /dev/ptysa /dev/ptysb /dev/ptysc /dev/ptysd /dev/ptyse /dev/ptysf /dev/ptyt0 /dev/ptyt1 /dev/ptyt2 /dev/ptyt3 /dev/ptyt4 /dev/ptyt5 /dev/ptyt6 /dev/ptyt7 /dev/ptyt8 /dev/ptyt9 /dev/ptyta /dev/ptytb /dev/ptytc /dev/ptytd /dev/ptyte /dev/ptytf /dev/ptyu0 /dev/ptyu1 /dev/ptyu2 /dev/ptyu3 /dev/ptyu4 /dev/ptyu5 /dev/ptyu6 /dev/ptyu7 /dev/ptyu8 /dev/ptyu9 /dev/ptyua /dev/ptyub /dev/ptyuc /dev/ptyud /dev/ptyue /dev/ptyuf /dev/ptyv0 /dev/ptyv1 /dev/ptyv2 /dev/ptyv3 /dev/ptyv4 /dev/ptyv5 /dev/ptyv6 /dev/ptyv7 /dev/ptyv8 /dev/ptyv9 /dev/ptyva /dev/ptyvb /dev/ptyvc /dev/ptyvd /dev/ptyve /dev/ptyvf /dev/ptyw0 /dev/ptyw1 /dev/ptyw2 /dev/ptyw3 /dev/ptyw4 /dev/ptyw5 /dev/ptyw6 /dev/ptyw7 /dev/ptyw8 /dev/ptyw9 /dev/ptywa /dev/ptywb /dev/ptywc /dev/ptywd /dev/ptywe /dev/ptywf /dev/ptyx0 /dev/ptyx1 /dev/ptyx2 /dev/ptyx3 /dev/ptyx4 /dev/ptyx5 /dev/ptyx6 /dev/ptyx7 /dev/ptyx8 /dev/ptyx9 /dev/ptyxa /dev/ptyxb /dev/ptyxc /dev/ptyxd /dev/ptyxe /dev/ptyxf /dev/ptyy0 /dev/ptyy1 /dev/ptyy2 /dev/ptyy3 /dev/ptyy4 /dev/ptyy5 /dev/ptyy6 /dev/ptyy7 /dev/ptyy8 /dev/ptyy9 /dev/ptyya /dev/ptyyb /dev/ptyyc /dev/ptyyd /dev/ptyye /dev/ptyyf /dev/ptyz0 /dev/ptyz1 /dev/ptyz2 /dev/ptyz3 /dev/ptyz4 /dev/ptyz5 /dev/ptyz6 /dev/ptyz7 /dev/ptyz8 /dev/ptyz9 /dev/ptyza /dev/ptyzb /dev/ptyzc /dev/ptyzd /dev/ptyze /dev/ptyzf /dev/ptya0 /dev/ptya1 /dev/ptya2 /dev/ptya3 /dev/ptya4 /dev/ptya5 /dev/ptya6 /dev/ptya7 /dev/ptya8 /dev/ptya9 /dev/ptyaa /dev/ptyab /dev/ptyac /dev/ptyad /dev/ptyae /dev/ptyaf /dev/ptyb0 /dev/ptyb1 /dev/ptyb2 /dev/ptyb3 /dev/ptyb4 /dev/ptyb5 /dev/ptyb6 /dev/ptyb7 /dev/ptyb8 /dev/ptyb9 /dev/ptyba /dev/ptybb /dev/ptybc /dev/ptybd /dev/ptybe /dev/ptybf /dev/ptyc0 /dev/ptyc1 /dev/ptyc2 /dev/ptyc3 /dev/ptyc4 /dev/ptyc5 /dev/ptyc6 /dev/ptyc7 /dev/ptyc8 /dev/ptyc9 /dev/ptyca /dev/ptycb /dev/ptycc /dev/ptycd /dev/ptyce /dev/ptycf /dev/ptyd0 /dev/ptyd1 /dev/ptyd2 /dev/ptyd3 /dev/ptyd4 /dev/ptyd5 /dev/ptyd6 /dev/ptyd7 /dev/ptyd8 /dev/ptyd9 /dev/ptyda /dev/ptydb /dev/ptydc /dev/ptydd /dev/ptyde /dev/ptydf /dev/ptye0 /dev/ptye1 /dev/ptye2 /dev/ptye3 /dev/ptye4 /dev/ptye5 /dev/ptye6 /dev/ptye7 /dev/ptye8 /dev/ptye9 /dev/ptyea /dev/ptyeb /dev/ptyec /dev/ptyed /dev/ptyee /dev/ptyef /dev/vcs /dev/vcsa /dev/vcs1 /dev/vcsa1 /dev/ttyS0 /dev/cua0 /dev/hda /dev/hda1 /dev/hda2 /dev/hda3 /dev/hda4 /dev/hda5 /dev/hda6 /dev/hda7 /dev/hda8 /dev/hdb /dev/hdb1 /dev/hdc /dev/hdc1 /dev/fd0u1440 /dev/fd0u1680 /dev/fd0u1722 /dev/fd0u1743 /dev/fd0u1760 /dev/fd0u1920 /dev/fd0u1840 /dev/fd0u1600 /dev/fd0u360 /dev/fd0u720 /dev/fd0u820 /dev/fd0u830 /dev/fd0u1040 /dev/fd0u1120 /dev/fd0u800 /dev/fd0 /dev/loop0 /dev/loop1 /dev/loop2 /dev/loop3 /dev/loop4 /dev/loop5 /dev/loop6 /dev/loop7 /dev/dsp /dev/dsp1 /dev/mixer /dev/midi /dev/lvm /dev/vg0 /dev/vg0/group /dev/vg0/packages /dev/vg0/photos /dev/vg0/music /dev/log /dev/MAKEDEV /dev/printer /dev/vcs2 /dev/vcsa2 /dev/vcs3 /dev/vcsa3 /dev/vcs5 /dev/vcsa5 /dev/vcs4 /dev/vcsa4 /dev/vcs6 /dev/vcsa6 /dev/nvidia0 /dev/nvidia1 /dev/nvidia2 /dev/nvidia3 /dev/nvidiactl /dev/vcs7 /dev/vcsa7 LVM2.2.02.176/unit-tests/regex/Makefile.in0000644000000000000120000000201013176752421016613 0ustar rootwheel# # Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved. # Copyright (C) 2004-2010 Red Hat, Inc. All rights reserved. # # This file is part of LVM2. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA srcdir = @srcdir@ top_srcdir = @top_srcdir@ top_builddir = @top_builddir@ SOURCES=\ parse_t.c \ matcher_t.c TARGETS=\ parse_t \ matcher_t include $(top_builddir)/make.tmpl INCLUDES += -I$(top_srcdir)/libdm DM_DEPS = $(top_builddir)/libdm/libdevmapper.so DM_LIBS = -ldevmapper $(LIBS) parse_t: parse_t.o $(DM_DEPS) $(CC) $(CFLAGS) $(LDFLAGS) -o $@ parse_t.o $(DM_LIBS) matcher_t: matcher_t.o $(DM_DEPS) $(CC) $(CFLAGS) $(LDFLAGS) -o $@ matcher_t.o $(DM_LIBS) LVM2.2.02.176/unit-tests/regex/TESTS0000644000000000000120000000067213176752421015407 0ustar rootwheeldfa matching:$TEST_TOOL ./matcher_t --fingerprint dev_patterns < devices.list > matcher_t.output && diff -u matcher_t.expected matcher_t.output dfa matching:$TEST_TOOL ./matcher_t --fingerprint random_regexes < /dev/null > matcher_t.output && diff -u matcher_t.expected2 matcher_t.output dfa with non-print regex chars:$TEST_TOOL ./matcher_t nonprint_regexes < nonprint_input > matcher_t.output && diff -u matcher_t.expected3 matcher_t.outputLVM2.2.02.176/unit-tests/regex/random_regexes0000644000000000000120000000562013176752421017505 0ustar rootwheel"(((a?)(([Ub]*)|z))((([qr]|X)+)([Qn]*)))+" "[HZejtuw]*" "((B|s)*)|(((([Fv]l)(N+))(([el]|C)(tJ)))?)" "((([Ma]?)|(t*))*)|((([cm]E)|(M?))|(([BE][EV])|([Qj][Mh])))" "(((([bw]*)|([IO]*))((zK)*))|(((pU)|(i|q))|((z?)|([HL]?))))*" "((([Pt]?)|[Tr])?)((Hq)*)" "[HOXcfgikosvwxz]" "[BCEFGHNPTUWfjlprsy]" "((((aD)*)|([Xo]+))+)(([HKn](([Eq]|[JQ])(I*)))*)" "([LNWYeghv]|e)*" "(((y(L*))*)|((([EP]+)(W+))*))*" "U*" "((((R+)(W|[Qr]))|([py]+))+)([LM]*)" "(([DOjx](D(b?)))|([Ke]*))*" "((([ls](c|[FT]))*)([JS]*))*" "((l?)|(([Gz]+)|(D*)))*" "[ABgjn]" "(((q|[dg])?)|([Uk]*))((([Fl]?)|([Ry]+))|(([IR]|c)|(T?)))" "((([an]|P)|[Jw])((a*)|(m*)))*" "((((R[ht])(h+))?)|(([pz](n?))+))+" "(((([Dc]b)([Sp][Ii]))|((k|F)*))|[Uiovz])*" "[Res]*" "[Zl]|a" "^[ANZdf]$" "[En]|(((Q+)(U+))([pt]*))" "[ADEIMQUWXZhklrsvz]" "(((S(y*))*)|(j*))*" "n*" "[NUau]*" "((((Z*)(D|[Nd]))|(([np]|B)+))|(([Xy][Fi])*))+" "((([EZ]?)|(d[HR]))*)((([Hg]|q)(P+))*)" "q" "((m*)|(p|B))|((((x?)|(t+))(([Sb][PX])(O|[HM])))+)" "((((A*)(z[RS]))*)|(((z+)(Q*))+))*" "(((M*)([Uu]*))+)|[Uk]" "[imv]" "[GLSchtw](([Yw]((F[Dd])|([Tw]+)))?)" "([MOZj]*)(S|[Wknr])" "((G|q)*)[BHKN]" "((((NW)|([Ao]?))|((l|[UV])+))+)|((i|(z*))*)" "((((Z+)|([IR]?))|(L*))|([JKQ]+))+" "([Bdin](S*))+" "[HLNSTp]*" "(((J*)([Bq]|[Yu]))*)|([Kv]*)" "(((([BJ]|[Zy])(wI))*)(y*))+" "(((hF)+)|(H*))*" "((([QU][Pj])([GQ]?))+)|[PWo]" "(((([cq][BX])?)|((f[DI])*))*)(([GM]*)[SVYr])" "(([Zt]*)|((qx)|(([BV]+)(f?))))*" "[ILWYhsx]*" "(([Uy]*)|[sv])|([NSc]*)" "((c*)|([JUfhy]?))+" "(((q*)([So]*))(((g[jq])(j?))+))*" "((b+)|(((T+)([fw]T))?))*" "((([DS]?)|([Th]|u))(Q*))*" "[FKLX]|((([fw](L?))(([gq]*)|(O?)))?)" "((([HZ]+)u)*)|[APWijn]" "(e*)|(((v?)|((J+)(Hb)))?)" "(e|((w+)f))*" "[BEHKPQVdelnqy]" "((((B|N)(s*))|[Rr])(((g?)|([rv]+))+))+" "(((s*)|(K*))([AP]G))*" "[CELTp]" "(([Fq]?)|([Al]+))*" "((((r?)|(y[jx]))|([mp]*))+)|((B(S*))*)" "((([Eq]+)|(Y[ds]))|(x|(i|[Ku])))[IJNrvy]" "((([NO]*)[Ix])+)([Jenq]+)" "(((([HP]*)(j|y))*)[Ylqvy])*" "[PTv]+" "[AINSZhpx]|([EOYZ]*)" "([ABCFQv]*)((([Zx]|h)+)|([ej]*))" "((([pr]*)|(([Dq]|p)|(H?)))?)([NRUXmoq]*)" "(([er]*)|([mx]*))(((nV)([am]?))+)" "[BHPRlpu]" "(((([Ah]|[tx])|(e|[uy]))?)((([fl]+)([Vz]|v))*))*" "[AGdm]" "(((K*)^(O*)$)|(B?))*" "((([Ks]|[Ka])*)|([FSTab]?))?" "(([kw]+)[ei])(([Hy]*)(([Mc]*)|(G|f)))" "((((e*)|(Zf))|(R|[nq]))((([Jz]v)([Rj]+))+))*" "(((a?)|(e?))(([Uc]*)(S+)))*" "((((E+)([MZ]?))+)|(((s|[Az])|z)*))?" "((((i[MO])*)|((LH)*))|(((BA)|([AI]+))|[Ug]))*" "[EGHILcho]*" "(((Z[vw])?)((z|g)+))(((H|U)([iv]Q))|([qw]?))" "(([ehmr]|((L[Uw])*))+)((a+)I)" "[EKNSWYagj](((v|[TX])|([Uk]+))*)" "(((R[Mo])|(O*))|([Fm]|([qw]*)))((m*)|((S|[Ki])?))" "((((kP)|c)?)((([do]+)|([Gi]?))*))*" "((^(B|W)$|([Ww]+))([no]*))|((([iv]?)|(M*))|((x|L)?))" "[AEGPRSbcfhsy]" "[Wbcf]|((([MO]?)|([NT]|m))(([Oo]?)([Wg]*)))" "(((YZ)*)[PQVei])*" "[GJKYt][AEGWdegmnt]" "^[CDEGJKNUVYZagkv]$" "([DPWbx]*)|(((q|B)|(P|u))((M[Bq])*))" "[FHIJRTVYZdiorsuvz]*" "([MWoqvz]*)|^(l*)" "(((I|[Rx])*)((X[Mf])([Xa]L)))([Ha]|([HY]*))" "(((l|[Sd])*)((([Ix]+)|([XY]?))(Z*)))+" LVM2.2.02.176/unit-tests/regex/nonprint_regexes0000644000000000000120000000002613176752421020067 0ustar rootwheel"foo€bar" "fooÂb" "€" LVM2.2.02.176/unit-tests/regex/matcher_t.expected20000644000000000000120000000002613176752421020326 0ustar rootwheelfingerprint: eed8ceb8 LVM2.2.02.176/unit-tests/regex/matcher_t.expected0000644000000000000120000000063413176752421020251 0ustar rootwheelfingerprint: 352b6c4f /dev/loop/0 : loop/[0-9]+ /dev/loop/1 : loop/[0-9]+ /dev/loop/2 : loop/[0-9]+ /dev/loop/3 : loop/[0-9]+ /dev/loop/4 : loop/[0-9]+ /dev/loop/5 : loop/[0-9]+ /dev/loop/6 : loop/[0-9]+ /dev/loop/7 : loop/[0-9]+ /dev/hda1 : hd[a-d][0-5]+ /dev/hda2 : hd[a-d][0-5]+ /dev/hda3 : hd[a-d][0-5]+ /dev/hda4 : hd[a-d][0-5]+ /dev/hda5 : hd[a-d][0-5]+ /dev/hdb1 : hd[a-d][0-5]+ /dev/hdc1 : hd[a-d][0-5]+ LVM2.2.02.176/unit-tests/regex/dev_patterns0000644000000000000120000000003613176752421017175 0ustar rootwheel"loop/[0-9]+" "hd[a-d][0-5]+" LVM2.2.02.176/unit-tests/regex/matcher_t.expected30000644000000000000120000000004013176752421020323 0ustar rootwheelfoo€bar : € fooÂb : fooÂb € : € LVM2.2.02.176/unit-tests/regex/nonprint_input0000644000000000000120000000003013176752421017557 0ustar rootwheelfoo.bar foo€bar fooÂb € LVM2.2.02.176/unit-tests/regex/parse_t.c0000644000000000000120000000426313176752421016363 0ustar rootwheel/* * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved. * Copyright (C) 2004-2010 Red Hat, Inc. All rights reserved. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License v.2. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /* hack - using unexported internal function */ #define DEBUG #include "regex/parse_rx.c" #include #include static void _pretty_print(struct rx_node *rx, int depth) { int i; for (i = 0; i < depth; i++) printf(" "); /* display info about the node */ switch (rx->type) { case CAT: printf("Cat"); break; case OR: printf("Or"); break; case STAR: printf("Star"); break; case PLUS: printf("Plus"); break; case QUEST: printf("Quest"); break; case CHARSET: printf("Charset : "); for (i = 0; i < 256; i++) { if (dm_bit(rx->charset, i) && isprint(i)) printf("%c", (char) i); } break; default: printf("Unknown type"); } printf("\n"); if (rx->left) _pretty_print(rx->left, depth + 1); if (rx->right) _pretty_print(rx->right, depth + 1); } int main(int argc, char **argv) { struct dm_pool *mem; struct rx_node *rx; int regex_print = 0; int show_nodes = 0; int regex_arg = 1; if (argc == 3 && !strcmp(argv[1], "-r")) { regex_print++; regex_arg++; argc--; } if (argc == 3 && !strcmp(argv[1], "-R")) { regex_print++; show_nodes++; regex_arg++; argc--; } if (argc != 2) { fprintf(stderr, "Usage : %s [-r] \n", argv[0]); exit(0); } dm_log_init_verbose(_LOG_DEBUG); if (!(mem = dm_pool_create("parse_regex", 1024))) { fprintf(stderr, "Couldn't create pool\n"); exit(1); } if (!(rx = rx_parse_str(mem, argv[regex_arg]))) { dm_pool_destroy(mem); fprintf(stderr, "Couldn't parse regex\n"); exit(1); } if (regex_print) _regex_print(rx, 0, show_nodes); else _pretty_print(rx, 0); dm_pool_destroy(mem); return 0; } LVM2.2.02.176/unit-tests/regex/matcher_t.c0000644000000000000120000000626213176752421016675 0ustar rootwheel/* * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved. * Copyright (C) 2004-2010 Red Hat, Inc. All rights reserved. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License v.2. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libdevmapper.h" #include "log.h" #include #include #include #include #include #include #include #include static int _read_spec(const char *file, char ***regex, int *nregex) { char buffer[1024], *start, *ptr; FILE *fp = fopen(file, "r"); int asize = 100; char **rx = dm_malloc(sizeof(*rx) * asize); int nr = 0; if (!fp) return 0; while (fgets(buffer, sizeof(buffer),fp)) { /* trim leading whitespace */ for (ptr = buffer; *ptr && isspace((int) *ptr); ptr++); if (!*ptr || *ptr == '#') continue; if (*ptr == '\"') { ptr++; start = ptr; while (*ptr && *ptr != '\"') { if (*ptr == '\\') ptr++; ptr++; } if (!*ptr) { fprintf(stderr, "Formatting error : " "No terminating quote\n"); return 0; } rx[nr] = dm_malloc((ptr - start) + 1); strncpy(rx[nr], start, ptr - start); rx[nr][ptr - start] = '\0'; nr++; } else { fprintf(stderr, "%s", ptr); fprintf(stderr, "Formatting error : \"\" " "\n"); return 0; } } *regex = rx; *nregex = nr; return 1; } static void _free_regex(char **regex, int nregex) { int i; for (i = 0; i < nregex; i++) dm_free(regex[i]); dm_free(regex); } static void _scan_input(struct dm_regex *m, char **regex) { char buffer[256], *ptr; int r; while (fgets(buffer, sizeof(buffer), stdin)) { if ((ptr = strchr(buffer, '\n'))) *ptr = '\0'; r = dm_regex_match(m, buffer); if (r >= 0) printf("%s : %s\n", buffer, regex[r]); } } int main(int argc, char **argv) { struct dm_pool *mem; struct dm_regex *scanner; char **regex; int nregex; int ret = 0; int want_finger_print = 0, i; const char *pattern_file = NULL; for (i = 1; i < argc; i++) if (!strcmp(argv[i], "--fingerprint")) want_finger_print = 1; else pattern_file = argv[i]; if (!pattern_file) { fprintf(stderr, "Usage : %s [--fingerprint] \n", argv[0]); exit(1); } dm_log_init_verbose(_LOG_DEBUG); if (!(mem = dm_pool_create("match_regex", 10 * 1024))) { fprintf(stderr, "Couldn't create pool\n"); ret = 2; goto err; } if (!_read_spec(pattern_file, ®ex, &nregex)) { fprintf(stderr, "Couldn't read the lex specification\n"); ret = 3; goto err; } if (!(scanner = dm_regex_create(mem, (const char **)regex, nregex))) { fprintf(stderr, "Couldn't build the lexer\n"); ret = 4; goto err; } if (want_finger_print) printf("fingerprint: %x\n", dm_regex_fingerprint(scanner)); _scan_input(scanner, regex); _free_regex(regex, nregex); err: dm_pool_destroy(mem); return ret; } LVM2.2.02.176/unit-tests/datastruct/0000755000000000000120000000000013176752421015621 5ustar rootwheelLVM2.2.02.176/unit-tests/datastruct/Makefile.in0000644000000000000120000000162513176752421017672 0ustar rootwheel# # Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved. # Copyright (C) 2004-2010 Red Hat, Inc. All rights reserved. # # This file is part of LVM2. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA srcdir = @srcdir@ top_srcdir = @top_srcdir@ top_builddir = @top_builddir@ SOURCES=\ bitset_t.c TARGETS=\ bitset_t include $(top_builddir)/make.tmpl INCLUDES += -I$(top_srcdir)/libdm DM_DEPS = $(top_builddir)/libdm/libdevmapper.so DM_LIBS = -ldevmapper $(LIBS) bitset_t: bitset_t.o $(DM_DEPS) $(CC) $(CFLAGS) $(LDFLAGS) -o $@ bitset_t.o $(DM_LIBS) LVM2.2.02.176/unit-tests/datastruct/TESTS0000644000000000000120000000004613176752421016446 0ustar rootwheelbitset iteration:$TEST_TOOL ./bitset_tLVM2.2.02.176/unit-tests/datastruct/bitset_t.c0000644000000000000120000000674713176752421017620 0ustar rootwheel/* * Copyright (C) 2010 Red Hat, Inc. All rights reserved. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License v.2. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libdevmapper.h" #include enum { NR_BITS = 137 }; static void test_get_next(struct dm_pool *mem) { int i, j, last, first; dm_bitset_t bs = dm_bitset_create(mem, NR_BITS); for (i = 0; i < NR_BITS; i++) assert(!dm_bit(bs, i)); for (i = 0, j = 1; i < NR_BITS; i += j, j++) dm_bit_set(bs, i); first = 1; for (i = 0, j = 1; i < NR_BITS; i += j, j++) { if (first) { last = dm_bit_get_first(bs); first = 0; } else last = dm_bit_get_next(bs, last); assert(last == i); } assert(dm_bit_get_next(bs, last) == -1); } static void bit_flip(dm_bitset_t bs, int bit) { int old = dm_bit(bs, bit); if (old) dm_bit_clear(bs, bit); else dm_bit_set(bs, bit); } static void test_equal(struct dm_pool *mem) { dm_bitset_t bs1 = dm_bitset_create(mem, NR_BITS); dm_bitset_t bs2 = dm_bitset_create(mem, NR_BITS); int i, j; for (i = 0, j = 1; i < NR_BITS; i += j, j++) { dm_bit_set(bs1, i); dm_bit_set(bs2, i); } assert(dm_bitset_equal(bs1, bs2)); assert(dm_bitset_equal(bs2, bs1)); for (i = 0; i < NR_BITS; i++) { bit_flip(bs1, i); assert(!dm_bitset_equal(bs1, bs2)); assert(!dm_bitset_equal(bs2, bs1)); assert(dm_bitset_equal(bs1, bs1)); /* comparing with self */ bit_flip(bs1, i); } } static void test_and(struct dm_pool *mem) { dm_bitset_t bs1 = dm_bitset_create(mem, NR_BITS); dm_bitset_t bs2 = dm_bitset_create(mem, NR_BITS); dm_bitset_t bs3 = dm_bitset_create(mem, NR_BITS); int i, j; for (i = 0, j = 1; i < NR_BITS; i += j, j++) { dm_bit_set(bs1, i); dm_bit_set(bs2, i); } dm_bit_and(bs3, bs1, bs2); assert(dm_bitset_equal(bs1, bs2)); assert(dm_bitset_equal(bs1, bs3)); assert(dm_bitset_equal(bs2, bs3)); dm_bit_clear_all(bs1); dm_bit_clear_all(bs2); for (i = 0; i < NR_BITS; i++) { if (i % 2) dm_bit_set(bs1, i); else dm_bit_set(bs2, i); } dm_bit_and(bs3, bs1, bs2); for (i = 0; i < NR_BITS; i++) assert(!dm_bit(bs3, i)); } int main(int argc, char **argv) { typedef void (*test_fn)(struct dm_pool *); static test_fn tests[] = { test_get_next, test_equal, test_and }; int i; for (i = 0; i < DM_ARRAY_SIZE(tests); ++i) { struct dm_pool *mem = dm_pool_create("bitset test", 1024); assert(mem); tests[i](mem); dm_pool_destroy(mem); } return 0; } LVM2.2.02.176/aclocal.m40000644000000000000120000004613313176752421013173 0ustar rootwheel# generated automatically by aclocal 1.15 -*- Autoconf -*- # Copyright (C) 1996-2014 Free Software Foundation, Inc. # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. m4_ifndef([AC_CONFIG_MACRO_DIRS], [m4_defun([_AM_CONFIG_MACRO_DIRS], [])m4_defun([AC_CONFIG_MACRO_DIRS], [_AM_CONFIG_MACRO_DIRS($@)])]) # =========================================================================== # http://www.gnu.org/software/autoconf-archive/ax_python_module.html # =========================================================================== # # SYNOPSIS # # AX_PYTHON_MODULE(modname[, fatal, python]) # # DESCRIPTION # # Checks for Python module. # # If fatal is non-empty then absence of a module will trigger an error. # The third parameter can either be "python" for Python 2 or "python3" for # Python 3; defaults to Python 3. # # LICENSE # # Copyright (c) 2008 Andrew Collier # # Copying and distribution of this file, with or without modification, are # permitted in any medium without royalty provided the copyright notice # and this notice are preserved. This file is offered as-is, without any # warranty. #serial 8 AU_ALIAS([AC_PYTHON_MODULE], [AX_PYTHON_MODULE]) AC_DEFUN([AX_PYTHON_MODULE],[ if test -z $PYTHON; then if test -z "$3"; then PYTHON="python3" else PYTHON="$3" fi fi PYTHON_NAME=`basename $PYTHON` AC_MSG_CHECKING($PYTHON_NAME module: $1) $PYTHON -c "import $1" 2>/dev/null if test $? -eq 0; then AC_MSG_RESULT(yes) eval AS_TR_CPP(HAVE_PYMOD_$1)=yes else AC_MSG_RESULT(no) eval AS_TR_CPP(HAVE_PYMOD_$1)=no # if test -n "$2" then AC_MSG_ERROR(failed to find required module $1) exit 1 fi fi ]) # pkg.m4 - Macros to locate and utilise pkg-config. -*- Autoconf -*- # serial 1 (pkg-config-0.24) # # Copyright © 2004 Scott James Remnant . # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. # PKG_PROG_PKG_CONFIG([MIN-VERSION]) # ---------------------------------- AC_DEFUN([PKG_PROG_PKG_CONFIG], [m4_pattern_forbid([^_?PKG_[A-Z_]+$]) m4_pattern_allow([^PKG_CONFIG(_(PATH|LIBDIR|SYSROOT_DIR|ALLOW_SYSTEM_(CFLAGS|LIBS)))?$]) m4_pattern_allow([^PKG_CONFIG_(DISABLE_UNINSTALLED|TOP_BUILD_DIR|DEBUG_SPEW)$]) AC_ARG_VAR([PKG_CONFIG], [path to pkg-config utility]) AC_ARG_VAR([PKG_CONFIG_PATH], [directories to add to pkg-config's search path]) AC_ARG_VAR([PKG_CONFIG_LIBDIR], [path overriding pkg-config's built-in search path]) if test "x$ac_cv_env_PKG_CONFIG_set" != "xset"; then AC_PATH_TOOL([PKG_CONFIG], [pkg-config]) fi if test -n "$PKG_CONFIG"; then _pkg_min_version=m4_default([$1], [0.9.0]) AC_MSG_CHECKING([pkg-config is at least version $_pkg_min_version]) if $PKG_CONFIG --atleast-pkgconfig-version $_pkg_min_version; then AC_MSG_RESULT([yes]) else AC_MSG_RESULT([no]) PKG_CONFIG="" fi fi[]dnl ])# PKG_PROG_PKG_CONFIG # PKG_CHECK_EXISTS(MODULES, [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND]) # # Check to see whether a particular set of modules exists. Similar # to PKG_CHECK_MODULES(), but does not set variables or print errors. # # Please remember that m4 expands AC_REQUIRE([PKG_PROG_PKG_CONFIG]) # only at the first occurence in configure.ac, so if the first place # it's called might be skipped (such as if it is within an "if", you # have to call PKG_CHECK_EXISTS manually # -------------------------------------------------------------- AC_DEFUN([PKG_CHECK_EXISTS], [AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl if test -n "$PKG_CONFIG" && \ AC_RUN_LOG([$PKG_CONFIG --exists --print-errors "$1"]); then m4_default([$2], [:]) m4_ifvaln([$3], [else $3])dnl fi]) # _PKG_CONFIG([VARIABLE], [COMMAND], [MODULES]) # --------------------------------------------- m4_define([_PKG_CONFIG], [if test -n "$$1"; then pkg_cv_[]$1="$$1" elif test -n "$PKG_CONFIG"; then PKG_CHECK_EXISTS([$3], [pkg_cv_[]$1=`$PKG_CONFIG --[]$2 "$3" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes ], [pkg_failed=yes]) else pkg_failed=untried fi[]dnl ])# _PKG_CONFIG # _PKG_SHORT_ERRORS_SUPPORTED # ----------------------------- AC_DEFUN([_PKG_SHORT_ERRORS_SUPPORTED], [AC_REQUIRE([PKG_PROG_PKG_CONFIG]) if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi[]dnl ])# _PKG_SHORT_ERRORS_SUPPORTED # PKG_CHECK_MODULES(VARIABLE-PREFIX, MODULES, [ACTION-IF-FOUND], # [ACTION-IF-NOT-FOUND]) # # # Note that if there is a possibility the first call to # PKG_CHECK_MODULES might not happen, you should be sure to include an # explicit call to PKG_PROG_PKG_CONFIG in your configure.ac # # # -------------------------------------------------------------- AC_DEFUN([PKG_CHECK_MODULES], [AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl AC_ARG_VAR([$1][_CFLAGS], [C compiler flags for $1, overriding pkg-config])dnl AC_ARG_VAR([$1][_LIBS], [linker flags for $1, overriding pkg-config])dnl pkg_failed=no AC_MSG_CHECKING([for $1]) _PKG_CONFIG([$1][_CFLAGS], [cflags], [$2]) _PKG_CONFIG([$1][_LIBS], [libs], [$2]) m4_define([_PKG_TEXT], [Alternatively, you may set the environment variables $1[]_CFLAGS and $1[]_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details.]) if test $pkg_failed = yes; then AC_MSG_RESULT([no]) _PKG_SHORT_ERRORS_SUPPORTED if test $_pkg_short_errors_supported = yes; then $1[]_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$2" 2>&1` else $1[]_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$2" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$$1[]_PKG_ERRORS" >&AS_MESSAGE_LOG_FD m4_default([$4], [AC_MSG_ERROR( [Package requirements ($2) were not met: $$1_PKG_ERRORS Consider adjusting the PKG_CONFIG_PATH environment variable if you installed software in a non-standard prefix. _PKG_TEXT])[]dnl ]) elif test $pkg_failed = untried; then AC_MSG_RESULT([no]) m4_default([$4], [AC_MSG_FAILURE( [The pkg-config script could not be found or is too old. Make sure it is in your PATH or set the PKG_CONFIG environment variable to the full path to pkg-config. _PKG_TEXT To get pkg-config, see .])[]dnl ]) else $1[]_CFLAGS=$pkg_cv_[]$1[]_CFLAGS $1[]_LIBS=$pkg_cv_[]$1[]_LIBS AC_MSG_RESULT([yes]) $3 fi[]dnl ])# PKG_CHECK_MODULES # PKG_INSTALLDIR(DIRECTORY) # ------------------------- # Substitutes the variable pkgconfigdir as the location where a module # should install pkg-config .pc files. By default the directory is # $libdir/pkgconfig, but the default can be changed by passing # DIRECTORY. The user can override through the --with-pkgconfigdir # parameter. AC_DEFUN([PKG_INSTALLDIR], [m4_pushdef([pkg_default], [m4_default([$1], ['${libdir}/pkgconfig'])]) m4_pushdef([pkg_description], [pkg-config installation directory @<:@]pkg_default[@:>@]) AC_ARG_WITH([pkgconfigdir], [AS_HELP_STRING([--with-pkgconfigdir], pkg_description)],, [with_pkgconfigdir=]pkg_default) AC_SUBST([pkgconfigdir], [$with_pkgconfigdir]) m4_popdef([pkg_default]) m4_popdef([pkg_description]) ]) dnl PKG_INSTALLDIR # PKG_NOARCH_INSTALLDIR(DIRECTORY) # ------------------------- # Substitutes the variable noarch_pkgconfigdir as the location where a # module should install arch-independent pkg-config .pc files. By # default the directory is $datadir/pkgconfig, but the default can be # changed by passing DIRECTORY. The user can override through the # --with-noarch-pkgconfigdir parameter. AC_DEFUN([PKG_NOARCH_INSTALLDIR], [m4_pushdef([pkg_default], [m4_default([$1], ['${datadir}/pkgconfig'])]) m4_pushdef([pkg_description], [pkg-config arch-independent installation directory @<:@]pkg_default[@:>@]) AC_ARG_WITH([noarch-pkgconfigdir], [AS_HELP_STRING([--with-noarch-pkgconfigdir], pkg_description)],, [with_noarch_pkgconfigdir=]pkg_default) AC_SUBST([noarch_pkgconfigdir], [$with_noarch_pkgconfigdir]) m4_popdef([pkg_default]) m4_popdef([pkg_description]) ]) dnl PKG_NOARCH_INSTALLDIR # PKG_CHECK_VAR(VARIABLE, MODULE, CONFIG-VARIABLE, # [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND]) # ------------------------------------------- # Retrieves the value of the pkg-config variable for the given module. AC_DEFUN([PKG_CHECK_VAR], [AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl AC_ARG_VAR([$1], [value of $3 for $2, overriding pkg-config])dnl _PKG_CONFIG([$1], [variable="][$3]["], [$2]) AS_VAR_COPY([$1], [pkg_cv_][$1]) AS_VAR_IF([$1], [""], [$5], [$4])dnl ])# PKG_CHECK_VAR # Copyright (C) 1999-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_PATH_PYTHON([MINIMUM-VERSION], [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND]) # --------------------------------------------------------------------------- # Adds support for distributing Python modules and packages. To # install modules, copy them to $(pythondir), using the python_PYTHON # automake variable. To install a package with the same name as the # automake package, install to $(pkgpythondir), or use the # pkgpython_PYTHON automake variable. # # The variables $(pyexecdir) and $(pkgpyexecdir) are provided as # locations to install python extension modules (shared libraries). # Another macro is required to find the appropriate flags to compile # extension modules. # # If your package is configured with a different prefix to python, # users will have to add the install directory to the PYTHONPATH # environment variable, or create a .pth file (see the python # documentation for details). # # If the MINIMUM-VERSION argument is passed, AM_PATH_PYTHON will # cause an error if the version of python installed on the system # doesn't meet the requirement. MINIMUM-VERSION should consist of # numbers and dots only. AC_DEFUN([AM_PATH_PYTHON], [ dnl Find a Python interpreter. Python versions prior to 2.0 are not dnl supported. (2.0 was released on October 16, 2000). m4_define_default([_AM_PYTHON_INTERPRETER_LIST], [python python2 python3 python3.3 python3.2 python3.1 python3.0 python2.7 dnl python2.6 python2.5 python2.4 python2.3 python2.2 python2.1 python2.0]) AC_ARG_VAR([PYTHON], [the Python interpreter]) m4_if([$1],[],[ dnl No version check is needed. # Find any Python interpreter. if test -z "$PYTHON"; then AC_PATH_PROGS([PYTHON], _AM_PYTHON_INTERPRETER_LIST, :) fi am_display_PYTHON=python ], [ dnl A version check is needed. if test -n "$PYTHON"; then # If the user set $PYTHON, use it and don't search something else. AC_MSG_CHECKING([whether $PYTHON version is >= $1]) AM_PYTHON_CHECK_VERSION([$PYTHON], [$1], [AC_MSG_RESULT([yes])], [AC_MSG_RESULT([no]) AC_MSG_ERROR([Python interpreter is too old])]) am_display_PYTHON=$PYTHON else # Otherwise, try each interpreter until we find one that satisfies # VERSION. AC_CACHE_CHECK([for a Python interpreter with version >= $1], [am_cv_pathless_PYTHON],[ for am_cv_pathless_PYTHON in _AM_PYTHON_INTERPRETER_LIST none; do test "$am_cv_pathless_PYTHON" = none && break AM_PYTHON_CHECK_VERSION([$am_cv_pathless_PYTHON], [$1], [break]) done]) # Set $PYTHON to the absolute path of $am_cv_pathless_PYTHON. if test "$am_cv_pathless_PYTHON" = none; then PYTHON=: else AC_PATH_PROG([PYTHON], [$am_cv_pathless_PYTHON]) fi am_display_PYTHON=$am_cv_pathless_PYTHON fi ]) if test "$PYTHON" = :; then dnl Run any user-specified action, or abort. m4_default([$3], [AC_MSG_ERROR([no suitable Python interpreter found])]) else dnl Query Python for its version number. Getting [:3] seems to be dnl the best way to do this; it's what "site.py" does in the standard dnl library. AC_CACHE_CHECK([for $am_display_PYTHON version], [am_cv_python_version], [am_cv_python_version=`$PYTHON -c "import sys; sys.stdout.write(sys.version[[:3]])"`]) AC_SUBST([PYTHON_VERSION], [$am_cv_python_version]) dnl Use the values of $prefix and $exec_prefix for the corresponding dnl values of PYTHON_PREFIX and PYTHON_EXEC_PREFIX. These are made dnl distinct variables so they can be overridden if need be. However, dnl general consensus is that you shouldn't need this ability. AC_SUBST([PYTHON_PREFIX], ['${prefix}']) AC_SUBST([PYTHON_EXEC_PREFIX], ['${exec_prefix}']) dnl At times (like when building shared libraries) you may want dnl to know which OS platform Python thinks this is. AC_CACHE_CHECK([for $am_display_PYTHON platform], [am_cv_python_platform], [am_cv_python_platform=`$PYTHON -c "import sys; sys.stdout.write(sys.platform)"`]) AC_SUBST([PYTHON_PLATFORM], [$am_cv_python_platform]) # Just factor out some code duplication. am_python_setup_sysconfig="\ import sys # Prefer sysconfig over distutils.sysconfig, for better compatibility # with python 3.x. See automake bug#10227. try: import sysconfig except ImportError: can_use_sysconfig = 0 else: can_use_sysconfig = 1 # Can't use sysconfig in CPython 2.7, since it's broken in virtualenvs: # try: from platform import python_implementation if python_implementation() == 'CPython' and sys.version[[:3]] == '2.7': can_use_sysconfig = 0 except ImportError: pass" dnl Set up 4 directories: dnl pythondir -- where to install python scripts. This is the dnl site-packages directory, not the python standard library dnl directory like in previous automake betas. This behavior dnl is more consistent with lispdir.m4 for example. dnl Query distutils for this directory. AC_CACHE_CHECK([for $am_display_PYTHON script directory], [am_cv_python_pythondir], [if test "x$prefix" = xNONE then am_py_prefix=$ac_default_prefix else am_py_prefix=$prefix fi am_cv_python_pythondir=`$PYTHON -c " $am_python_setup_sysconfig if can_use_sysconfig: sitedir = sysconfig.get_path('purelib', vars={'base':'$am_py_prefix'}) else: from distutils import sysconfig sitedir = sysconfig.get_python_lib(0, 0, prefix='$am_py_prefix') sys.stdout.write(sitedir)"` case $am_cv_python_pythondir in $am_py_prefix*) am__strip_prefix=`echo "$am_py_prefix" | sed 's|.|.|g'` am_cv_python_pythondir=`echo "$am_cv_python_pythondir" | sed "s,^$am__strip_prefix,$PYTHON_PREFIX,"` ;; *) case $am_py_prefix in /usr|/System*) ;; *) am_cv_python_pythondir=$PYTHON_PREFIX/lib/python$PYTHON_VERSION/site-packages ;; esac ;; esac ]) AC_SUBST([pythondir], [$am_cv_python_pythondir]) dnl pkgpythondir -- $PACKAGE directory under pythondir. Was dnl PYTHON_SITE_PACKAGE in previous betas, but this naming is dnl more consistent with the rest of automake. AC_SUBST([pkgpythondir], [\${pythondir}/$PACKAGE]) dnl pyexecdir -- directory for installing python extension modules dnl (shared libraries) dnl Query distutils for this directory. AC_CACHE_CHECK([for $am_display_PYTHON extension module directory], [am_cv_python_pyexecdir], [if test "x$exec_prefix" = xNONE then am_py_exec_prefix=$am_py_prefix else am_py_exec_prefix=$exec_prefix fi am_cv_python_pyexecdir=`$PYTHON -c " $am_python_setup_sysconfig if can_use_sysconfig: sitedir = sysconfig.get_path('platlib', vars={'platbase':'$am_py_prefix'}) else: from distutils import sysconfig sitedir = sysconfig.get_python_lib(1, 0, prefix='$am_py_prefix') sys.stdout.write(sitedir)"` case $am_cv_python_pyexecdir in $am_py_exec_prefix*) am__strip_prefix=`echo "$am_py_exec_prefix" | sed 's|.|.|g'` am_cv_python_pyexecdir=`echo "$am_cv_python_pyexecdir" | sed "s,^$am__strip_prefix,$PYTHON_EXEC_PREFIX,"` ;; *) case $am_py_exec_prefix in /usr|/System*) ;; *) am_cv_python_pyexecdir=$PYTHON_EXEC_PREFIX/lib/python$PYTHON_VERSION/site-packages ;; esac ;; esac ]) AC_SUBST([pyexecdir], [$am_cv_python_pyexecdir]) dnl pkgpyexecdir -- $(pyexecdir)/$(PACKAGE) AC_SUBST([pkgpyexecdir], [\${pyexecdir}/$PACKAGE]) dnl Run any user-specified action. $2 fi ]) # AM_PYTHON_CHECK_VERSION(PROG, VERSION, [ACTION-IF-TRUE], [ACTION-IF-FALSE]) # --------------------------------------------------------------------------- # Run ACTION-IF-TRUE if the Python interpreter PROG has version >= VERSION. # Run ACTION-IF-FALSE otherwise. # This test uses sys.hexversion instead of the string equivalent (first # word of sys.version), in order to cope with versions such as 2.2c1. # This supports Python 2.0 or higher. (2.0 was released on October 16, 2000). AC_DEFUN([AM_PYTHON_CHECK_VERSION], [prog="import sys # split strings by '.' and convert to numeric. Append some zeros # because we need at least 4 digits for the hex conversion. # map returns an iterator in Python 3.0 and a list in 2.x minver = list(map(int, '$2'.split('.'))) + [[0, 0, 0]] minverhex = 0 # xrange is not present in Python 3.0 and range returns an iterator for i in list(range(0, 4)): minverhex = (minverhex << 8) + minver[[i]] sys.exit(sys.hexversion < minverhex)" AS_IF([AM_RUN_LOG([$1 -c "$prog"])], [$3], [$4])]) # Copyright (C) 2001-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_RUN_LOG(COMMAND) # ------------------- # Run COMMAND, save the exit status in ac_status, and log it. # (This has been adapted from Autoconf's _AC_RUN_LOG macro.) AC_DEFUN([AM_RUN_LOG], [{ echo "$as_me:$LINENO: $1" >&AS_MESSAGE_LOG_FD ($1) >&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD (exit $ac_status); }]) m4_include([acinclude.m4]) LVM2.2.02.176/daemons/0000755000000000000120000000000013176752421012752 5ustar rootwheelLVM2.2.02.176/daemons/dmfilemapd/0000755000000000000120000000000013176752421015054 5ustar rootwheelLVM2.2.02.176/daemons/dmfilemapd/Makefile.in0000644000000000000120000000367413176752421017133 0ustar rootwheel# # Copyright (C) 2016 Red Hat, Inc. All rights reserved. # # This file is part of the device-mapper userspace tools. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU Lesser General Public License v.2.1. # # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA srcdir = @srcdir@ top_srcdir = @top_srcdir@ top_builddir = @top_builddir@ SOURCES = dmfilemapd.c TARGETS = dmfilemapd .PHONY: install_dmfilemapd install_dmfilemapd_static INSTALL_DMFILEMAPD_TARGETS = install_dmfilemapd_dynamic CLEAN_TARGETS = dmfilemapd.static CFLOW_LIST = $(SOURCES) CFLOW_LIST_TARGET = $(LIB_NAME).cflow CFLOW_TARGET = dmfilemapd include $(top_builddir)/make.tmpl all: device-mapper device-mapper: $(TARGETS) CFLAGS_dmfilemapd.o += $(EXTRA_EXEC_CFLAGS) LIBS += -ldevmapper dmfilemapd: $(LIB_SHARED) dmfilemapd.o $(CC) $(CFLAGS) $(LDFLAGS) $(EXTRA_EXEC_LDFLAGS) $(ELDFLAGS) \ -o $@ dmfilemapd.o $(DL_LIBS) $(LIBS) dmfilemapd.static: $(LIB_STATIC) dmfilemapd.o $(interfacebuilddir)/libdevmapper.a $(CC) $(CFLAGS) $(LDFLAGS) $(ELDFLAGS) -static -L$(interfacebuilddir) \ -o $@ dmfilemapd.o $(DL_LIBS) $(LIBS) $(STATIC_LIBS) ifneq ("$(CFLOW_CMD)", "") CFLOW_SOURCES = $(addprefix $(srcdir)/, $(SOURCES)) -include $(top_builddir)/libdm/libdevmapper.cflow -include $(top_builddir)/lib/liblvm-internal.cflow -include $(top_builddir)/lib/liblvm2cmd.cflow -include $(top_builddir)/daemons/dmfilemapd/$(LIB_NAME).cflow endif install_dmfilemapd_dynamic: dmfilemapd $(INSTALL_PROGRAM) -D $< $(sbindir)/$( #include #include #include #include #include #include #ifdef __linux__ # include "kdev_t.h" #else # define MAJOR(x) major((x)) # define MINOR(x) minor((x)) # define MKDEV(x,y) makedev((x),(y)) #endif /* limit to two updates/sec */ #define FILEMAPD_WAIT_USECS 500000 /* how long to wait for unlinked files */ #define FILEMAPD_NOFILE_WAIT_USECS 100000 #define FILEMAPD_NOFILE_WAIT_TRIES 10 struct filemap_monitor { dm_filemapd_mode_t mode; const char *program_id; uint64_t group_id; char *path; int fd; int inotify_fd; int inotify_watch_fd; /* monitoring heuristics */ int64_t blocks; /* allocated blocks, from stat.st_blocks */ uint64_t nr_regions; int deleted; }; static int _foreground; static int _verbose; const char *const _usage = "dmfilemapd " "[[]]"; /* * Daemon logging. By default, all messages are thrown away: messages * are only written to the terminal if the daemon is run in the foreground. */ __attribute__((format(printf, 5, 0))) static void _dmfilemapd_log_line(int level, const char *file __attribute__((unused)), int line __attribute__((unused)), int dm_errno_or_class, const char *f, va_list ap) { static int _abort_on_internal_errors = -1; FILE *out = log_stderr(level) ? stderr : stdout; level = log_level(level); if (level <= _LOG_WARN || _verbose) { if (level < _LOG_WARN) out = stderr; vfprintf(out, f, ap); fputc('\n', out); } if (_abort_on_internal_errors < 0) /* Set when env DM_ABORT_ON_INTERNAL_ERRORS is not "0" */ _abort_on_internal_errors = strcmp(getenv("DM_ABORT_ON_INTERNAL_ERRORS") ? : "0", "0"); if (_abort_on_internal_errors && !strncmp(f, INTERNAL_ERROR, sizeof(INTERNAL_ERROR) - 1)) abort(); } __attribute__((format(printf, 5, 6))) static void _dmfilemapd_log_with_errno(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...) { va_list ap; va_start(ap, f); _dmfilemapd_log_line(level, file, line, dm_errno_or_class, f, ap); va_end(ap); } /* * Only used for reporting errors before daemonise(). */ __attribute__((format(printf, 1, 2))) static void _early_log(const char *fmt, ...) { va_list ap; va_start(ap, fmt); vfprintf(stderr, fmt, ap); fputc('\n', stderr); va_end(ap); } static void _setup_logging(void) { dm_log_init_verbose(_verbose - 1); dm_log_with_errno_init(_dmfilemapd_log_with_errno); } #define PROC_FD_DELETED_STR "(deleted)" /* * Scan the /proc//fd directory for pid and check for an fd * symlink whose contents match path. */ static int _is_open_in_pid(pid_t pid, const char *path) { char deleted_path[PATH_MAX + sizeof(PROC_FD_DELETED_STR)]; struct dirent *pid_dp = NULL; char path_buf[PATH_MAX]; char link_buf[PATH_MAX]; DIR *pid_d = NULL; ssize_t len; if (pid == getpid()) return 0; if (dm_snprintf(path_buf, sizeof(path_buf), DEFAULT_PROC_DIR "%d/fd", pid) < 0) { log_error("Could not format pid path."); return 0; } /* * Test for the kernel 'file (deleted)' form when scanning. */ if (dm_snprintf(deleted_path, sizeof(deleted_path), "%s %s", path, PROC_FD_DELETED_STR) < 0) { log_error("Could not format check path."); return 0; } pid_d = opendir(path_buf); if (!pid_d) { log_error("Could not open proc path: %s.", path_buf); return 0; } while ((pid_dp = readdir(pid_d)) != NULL) { if (pid_dp->d_name[0] == '.') continue; if ((len = readlinkat(dirfd(pid_d), pid_dp->d_name, link_buf, sizeof(link_buf))) < 0) { log_error("readlink failed for " DEFAULT_PROC_DIR "/%d/fd/.", pid); goto bad; } link_buf[len] = '\0'; if (!strcmp(deleted_path, link_buf)) { if (closedir(pid_d)) log_sys_error("closedir", path_buf); return 1; } } bad: if (closedir(pid_d)) log_sys_error("closedir", path_buf); return 0; } /* * Attempt to determine whether a file is open by any process by * scanning symbolic links in /proc//fd. * * This is a heuristic since it cannot guarantee to detect brief * access in all cases: a process that opens and then closes the * file rapidly may never be seen by the scan. * * The method will also give false-positives if a process exists * that has a deleted file open that had the same path, but a * different inode number, to the file being monitored. * * For this reason the daemon only uses _is_open() for unlinked * files when the mode is DM_FILEMAPD_FOLLOW_INODE, since these * files can no longer be newly opened by processes. * * In this situation !is_open(path) provides an indication that * the daemon should shut down: the file has been unlinked from * the file system and we appear to hold the final reference. */ static int _is_open(const char *path) { struct dirent *proc_dp = NULL; DIR *proc_d = NULL; pid_t pid; proc_d = opendir(DEFAULT_PROC_DIR); if (!proc_d) return 0; while ((proc_dp = readdir(proc_d)) != NULL) { if (!isdigit(proc_dp->d_name[0])) continue; errno = 0; pid = (pid_t) strtol(proc_dp->d_name, NULL, 10); if (errno || !pid) continue; if (_is_open_in_pid(pid, path)) { if (closedir(proc_d)) log_sys_error("closedir", DEFAULT_PROC_DIR); return 1; } } if (closedir(proc_d)) log_sys_error("closedir", DEFAULT_PROC_DIR); return 0; } static void _filemap_monitor_wait(uint64_t usecs) { if (_verbose) { if (usecs == FILEMAPD_WAIT_USECS) log_very_verbose("Waiting for check interval"); if (usecs == FILEMAPD_NOFILE_WAIT_USECS) log_very_verbose("Waiting for unlinked path"); } usleep((useconds_t) usecs); } static int _parse_args(int argc, char **argv, struct filemap_monitor *fm) { char *endptr; /* we don't care what is in argv[0]. */ argc--; argv++; if (argc < 5) { _early_log("Wrong number of arguments."); _early_log("usage: %s", _usage); return 0; } /* * We don't know the true nr_regions at daemon start time, * and it is not worth a dm_stats_list()/group walk to count: * we can assume that there is at least one region or the * daemon would not have been started. * * A correct value will be obtained following the first update * of the group's regions. */ fm->nr_regions = 1; /* parse */ errno = 0; fm->fd = (int) strtol(argv[0], &endptr, 10); if (errno || *endptr) { _early_log("Could not parse file descriptor: %s", argv[0]); return 0; } argc--; argv++; /* parse */ errno = 0; fm->group_id = strtoull(argv[0], &endptr, 10); if (*endptr || errno) { _early_log("Could not parse group identifier: %s", argv[0]); return 0; } argc--; argv++; /* parse */ if (!argv[0] || !strlen(argv[0])) { _early_log("Path argument is required."); return 0; } if (*argv[0] != '/') { _early_log("Path argument must specify an absolute path."); return 0; } fm->path = dm_strdup(argv[0]); if (!fm->path) { _early_log("Could not allocate memory for path argument."); return 0; } argc--; argv++; /* parse */ if (!argv[0] || !strlen(argv[0])) { _early_log("Mode argument is required."); return 0; } fm->mode = dm_filemapd_mode_from_string(argv[0]); if (fm->mode == DM_FILEMAPD_FOLLOW_NONE) return 0; argc--; argv++; /* parse [[]] */ if (argc) { errno = 0; _foreground = (int) strtol(argv[0], &endptr, 10); if (errno || *endptr) { _early_log("Could not parse debug argument: %s.", argv[0]); return 0; } argc--; argv++; if (argc) { errno = 0; _verbose = (int) strtol(argv[0], &endptr, 10); if (errno || *endptr) { _early_log("Could not parse verbose " "argument: %s", argv[0]); return 0; } if (_verbose < 0 || _verbose > 3) { _early_log("Verbose argument out of range: %d.", _verbose); return 0; } } } return 1; } static int _filemap_fd_update_blocks(struct filemap_monitor *fm) { struct stat buf; if (fm->fd < 0) { log_error("Filemap fd is not open."); return 0; } if (fstat(fm->fd, &buf)) { log_error("Failed to fstat filemap file descriptor."); return 0; } fm->blocks = buf.st_blocks; return 1; } static int _filemap_fd_check_changed(struct filemap_monitor *fm) { int64_t old_blocks; old_blocks = fm->blocks; if (!_filemap_fd_update_blocks(fm)) return -1; return (fm->blocks != old_blocks); } static void _filemap_monitor_close_fd(struct filemap_monitor *fm) { if (close(fm->fd)) log_error("Error closing file descriptor."); fm->fd = -1; } static void _filemap_monitor_end_notify(struct filemap_monitor *fm) { inotify_rm_watch(fm->inotify_fd, fm->inotify_watch_fd); } static int _filemap_monitor_set_notify(struct filemap_monitor *fm) { int inotify_fd, watch_fd; /* * Set IN_NONBLOCK since we do not want to block in event read() * calls. Do not set IN_CLOEXEC as dmfilemapd is single-threaded * and does not fork or exec. */ if ((inotify_fd = inotify_init1(IN_NONBLOCK)) < 0) { log_sys_error("inotify_init1", "IN_NONBLOCK"); return 0; } if ((watch_fd = inotify_add_watch(inotify_fd, fm->path, IN_MODIFY | IN_DELETE_SELF)) < 0) { log_sys_error("inotify_add_watch", fm->path); return 0; } fm->inotify_fd = inotify_fd; fm->inotify_watch_fd = watch_fd; return 1; } static int _filemap_monitor_reopen_fd(struct filemap_monitor *fm) { int tries = FILEMAPD_NOFILE_WAIT_TRIES; /* * In DM_FILEMAPD_FOLLOW_PATH mode, inotify watches must be * re-established whenever the file at the watched path is * changed. * * FIXME: stat file and skip if inode is unchanged. */ if (fm->fd > 0) log_error("Filemap file descriptor already open."); while ((fm->fd < 0) && --tries) if (((fm->fd = open(fm->path, O_RDONLY)) < 0) && tries) _filemap_monitor_wait(FILEMAPD_NOFILE_WAIT_USECS); if (!tries && (fm->fd < 0)) { log_error("Could not re-open file descriptor."); return 0; } return _filemap_monitor_set_notify(fm); } static int _filemap_monitor_get_events(struct filemap_monitor *fm) { /* alignment as per man(7) inotify */ char buf[sizeof(struct inotify_event) + NAME_MAX + 1] __attribute__ ((aligned(__alignof__(struct inotify_event)))); struct inotify_event *event; int check = 0; ssize_t len; char *ptr; /* * Close the file descriptor for the file being monitored here * when mode=path: this will allow the inode to be de-allocated, * and an IN_DELETE_SELF event generated in the case that the * daemon is holding the last open reference to the file. */ if (fm->mode == DM_FILEMAPD_FOLLOW_PATH) { _filemap_monitor_end_notify(fm); _filemap_monitor_close_fd(fm); } len = read(fm->inotify_fd, (void *) &buf, sizeof(buf)); /* no events to read? */ if (len < 0 && (errno == EAGAIN)) goto out; /* interrupted by signal? */ if (len < 0 && (errno == EINTR)) goto out; if (len < 0) return -1; if (!len) goto out; for (ptr = buf; ptr < buf + len; ptr += sizeof(*event) + event->len) { event = (struct inotify_event *) ptr; if (event->mask & IN_DELETE_SELF) fm->deleted = 1; if (event->mask & IN_MODIFY) check = 1; /* * Event IN_IGNORED is generated when a file has been deleted * and IN_DELETE_SELF generated, and indicates that the file * watch has been automatically removed. * * This can only happen for the DM_FILEMAPD_FOLLOW_PATH mode, * since inotify IN_DELETE events are generated at the time * the inode is destroyed: DM_FILEMAPD_FOLLOW_INODE will hold * the file descriptor open, meaning that the event will not * be generated until after the daemon closes the file. * * The event is ignored here since inotify monitoring will * be reestablished (or the daemon will terminate) following * deletion of a DM_FILEMAPD_FOLLOW_PATH monitored file. */ if (event->mask & IN_IGNORED) log_very_verbose("Inotify watch removed: IN_IGNORED " "in event->mask"); } out: /* * Re-open file descriptor if required and log disposition. */ if (fm->mode == DM_FILEMAPD_FOLLOW_PATH) if (!_filemap_monitor_reopen_fd(fm)) return -1; log_very_verbose("exiting _filemap_monitor_get_events() with " "deleted=%d, check=%d", fm->deleted, check); return check; } static void _filemap_monitor_destroy(struct filemap_monitor *fm) { if (fm->fd > 0) { _filemap_monitor_end_notify(fm); _filemap_monitor_close_fd(fm); } dm_free((void *) fm->program_id); dm_free(fm->path); } static int _filemap_monitor_check_same_file(int fd1, int fd2) { struct stat buf1, buf2; if ((fd1 < 0) || (fd2 < 0)) return 0; if (fstat(fd1, &buf1)) { log_error("Failed to fstat file descriptor %d", fd1); return -1; } if (fstat(fd2, &buf2)) { log_error("Failed to fstat file descriptor %d", fd2); return -1; } return ((buf1.st_dev == buf2.st_dev) && (buf1.st_ino == buf2.st_ino)); } static int _filemap_monitor_check_file_unlinked(struct filemap_monitor *fm) { char path_buf[PATH_MAX]; char link_buf[PATH_MAX]; int same, fd; ssize_t len; fm->deleted = 0; same = 0; if ((fd = open(fm->path, O_RDONLY)) < 0) goto check_unlinked; same = _filemap_monitor_check_same_file(fm->fd, fd); if (close(fd)) log_error("Error closing fd %d", fd); if (same < 0) return 0; if (same) return 1; check_unlinked: /* * The file has been unlinked from its original location: test * whether it is still reachable in the filesystem, or if it is * unlinked and anonymous. */ if (dm_snprintf(path_buf, sizeof(path_buf), DEFAULT_PROC_DIR "/%d/fd/%d", getpid(), fm->fd) < 0) { log_error("Could not format pid path."); return 0; } if ((len = readlink(path_buf, link_buf, sizeof(link_buf) - 1)) < 0) { log_error("readlink failed for " DEFAULT_PROC_DIR "/%d/fd/%d.", getpid(), fm->fd); return 0; } link_buf[len] = '\0'; /* * Try to re-open the file, from the path now reported in /proc/pid/fd. */ if ((fd = open(link_buf, O_RDONLY)) < 0) fm->deleted = 1; else same = _filemap_monitor_check_same_file(fm->fd, fd); if ((fd >= 0) && close(fd)) log_error("Error closing fd %d", fd); if (same < 0) return 0; /* Should not happen with normal /proc. */ if ((fd > 0) && !same) { log_error("File descriptor mismatch: %d and %s (read from %s) " "are not the same file!", fm->fd, link_buf, path_buf); return 0; } return 1; } static int _daemonise(struct filemap_monitor *fm) { pid_t pid = 0, sid; int fd; if (!(sid = setsid())) { _early_log("setsid failed."); return 0; } if ((pid = fork()) < 0) { _early_log("Failed to fork daemon process."); return 0; } if (pid > 0) { if (_verbose) _early_log("Started dmfilemapd with pid=%d", pid); exit(0); } if (chdir("/")) { _early_log("Failed to change directory."); return 0; } if (!_verbose) { if (close(STDIN_FILENO)) _early_log("Error closing stdin"); if (close(STDOUT_FILENO)) _early_log("Error closing stdout"); if (close(STDERR_FILENO)) _early_log("Error closing stderr"); if ((open("/dev/null", O_RDONLY) < 0) || (open("/dev/null", O_WRONLY) < 0) || (open("/dev/null", O_WRONLY) < 0)) { _early_log("Error opening stdio streams."); return 0; } } /* TODO: Use libdaemon/server/daemon-server.c _daemonise() */ for (fd = (int) sysconf(_SC_OPEN_MAX) - 1; fd > STDERR_FILENO; fd--) { if (fd == fm->fd) continue; (void) close(fd); } return 1; } static int _update_regions(struct dm_stats *dms, struct filemap_monitor *fm) { uint64_t *regions = NULL, *region, nr_regions = 0; regions = dm_stats_update_regions_from_fd(dms, fm->fd, fm->group_id); if (!regions) { log_error("Failed to update filemap regions for group_id=" FMTu64 ".", fm->group_id); return 0; } for (region = regions; *region != DM_STATS_REGIONS_ALL; region++) nr_regions++; if (!nr_regions) log_warn("File contains no extents: exiting."); if (nr_regions && (regions[0] != fm->group_id)) { log_warn("group_id changed from " FMTu64 " to " FMTu64, fm->group_id, regions[0]); fm->group_id = regions[0]; } dm_free(regions); fm->nr_regions = nr_regions; return 1; } static int _dmfilemapd(struct filemap_monitor *fm) { int running = 1, check = 0, open = 0; const char *program_id; struct dm_stats *dms; /* * The correct program_id is retrieved from the group leader * following the call to dm_stats_list(). */ if (!(dms = dm_stats_create(NULL))) goto_bad; if (!dm_stats_bind_from_fd(dms, fm->fd)) { log_error("Could not bind dm_stats handle to file descriptor " "%d", fm->fd); goto bad; } if (!_filemap_monitor_set_notify(fm)) goto bad; if (!_filemap_fd_update_blocks(fm)) goto bad; if (!dm_stats_list(dms, DM_STATS_ALL_PROGRAMS)) { log_error("Failed to list stats handle."); goto bad; } /* * Take the program_id for new regions (created by calls to * dm_stats_update_regions_from_fd()) from the value used by * the group leader. */ program_id = dm_stats_get_region_program_id(dms, fm->group_id); if (program_id) fm->program_id = dm_strdup(program_id); else fm->program_id = NULL; dm_stats_set_program_id(dms, 1, program_id); do { if (!dm_stats_group_present(dms, fm->group_id)) { log_info("Filemap group removed: exiting."); running = 0; continue; } if ((check = _filemap_monitor_get_events(fm)) < 0) goto bad; if (!check) goto wait; if ((check = _filemap_fd_check_changed(fm)) < 0) goto bad; if (check && !_update_regions(dms, fm)) goto bad; running = !!fm->nr_regions; if (!running) continue; wait: _filemap_monitor_wait(FILEMAPD_WAIT_USECS); /* mode=inode termination condions */ if (fm->mode == DM_FILEMAPD_FOLLOW_INODE) { if (!_filemap_monitor_check_file_unlinked(fm)) goto bad; if (fm->deleted && !(open = _is_open(fm->path))) { log_info("File unlinked and closed: exiting."); running = 0; } else if (fm->deleted && open) log_verbose("File unlinked and open: " "continuing."); } if (!dm_stats_list(dms, NULL)) { log_error("Failed to list stats handle."); goto bad; } } while (running); _filemap_monitor_destroy(fm); dm_stats_destroy(dms); return 0; bad: _filemap_monitor_destroy(fm); dm_stats_destroy(dms); log_error("Exiting"); return 1; } static const char * _mode_names[] = { "inode", "path" }; /* * dmfilemapd [[]] */ int main(int argc, char **argv) { struct filemap_monitor fm; memset(&fm, 0, sizeof(fm)); if (!_parse_args(argc, argv, &fm)) { dm_free(fm.path); return 1; } _setup_logging(); log_info("Starting dmfilemapd with fd=%d, group_id=" FMTu64 " " "mode=%s, path=%s", fm.fd, fm.group_id, _mode_names[fm.mode], fm.path); if (!_foreground && !_daemonise(&fm)) return 1; return _dmfilemapd(&fm); } LVM2.2.02.176/daemons/Makefile.in0000644000000000000120000000260313176752421015020 0ustar rootwheel# # Copyright (C) 2004-2015 Red Hat, Inc. All rights reserved. # # This file is part of LVM2. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA srcdir = @srcdir@ top_srcdir = @top_srcdir@ top_builddir = @top_builddir@ .PHONY: dmeventd clvmd cmirrord lvmetad lvmpolld lvmlockd ifneq ("@CLVMD@", "none") SUBDIRS += clvmd endif ifeq ("@BUILD_CMIRRORD@", "yes") SUBDIRS += cmirrord endif ifeq ("@BUILD_DMEVENTD@", "yes") SUBDIRS += dmeventd ifneq ("$(CFLOW_CMD)", "") daemons.cflow: dmeventd.cflow endif endif ifeq ("@BUILD_LVMETAD@", "yes") SUBDIRS += lvmetad endif ifeq ("@BUILD_LVMPOLLD@", "yes") SUBDIRS += lvmpolld endif ifeq ("@BUILD_LVMLOCKD@", "yes") SUBDIRS += lvmlockd endif ifeq ("@BUILD_LVMDBUSD@", "yes") SUBDIRS += lvmdbusd endif ifeq ("@BUILD_DMFILEMAPD@", "yes") SUBDIRS += dmfilemapd endif ifeq ($(MAKECMDGOALS),distclean) SUBDIRS = clvmd cmirrord dmeventd lvmetad lvmpolld lvmlockd lvmdbusd dmfilemapd endif include $(top_builddir)/make.tmpl ifeq ("@BUILD_DMEVENTD@", "yes") device-mapper: dmeventd.device-mapper endif LVM2.2.02.176/daemons/cmirrord/0000755000000000000120000000000013176752421014573 5ustar rootwheelLVM2.2.02.176/daemons/cmirrord/Makefile.in0000644000000000000120000000224413176752421016642 0ustar rootwheel# # Copyright (C) 2009-2010 Red Hat, Inc. All rights reserved. # # This file is part of LVM2. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA srcdir = @srcdir@ top_srcdir = @top_srcdir@ top_builddir = @top_builddir@ CPG_LIBS = @CPG_LIBS@ CPG_CFLAGS = @CPG_CFLAGS@ SACKPT_LIBS = @SACKPT_LIBS@ SACKPT_CFLAGS = @SACKPT_CFLAGS@ SOURCES = clogd.c cluster.c compat.c functions.c link_mon.c local.c logging.c TARGETS = cmirrord include $(top_builddir)/make.tmpl LIBS += -ldevmapper LMLIBS += $(CPG_LIBS) $(SACKPT_LIBS) CFLAGS += $(CPG_CFLAGS) $(SACKPT_CFLAGS) $(EXTRA_EXEC_CFLAGS) LDFLAGS += $(EXTRA_EXEC_LDFLAGS) $(ELDFLAGS) cmirrord: $(OBJECTS) $(top_builddir)/lib/liblvm-internal.a $(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(OBJECTS) \ $(LVMLIBS) $(LMLIBS) $(LIBS) install: $(TARGETS) $(INSTALL_PROGRAM) -D cmirrord $(usrsbindir)/cmirrord LVM2.2.02.176/daemons/cmirrord/cluster.c0000644000000000000120000014420613176752421016427 0ustar rootwheel/* * Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "logging.h" #include "cluster.h" #include "common.h" #include "compat.h" #include "functions.h" #include "link_mon.h" #include "local.h" #include "xlate.h" #include #include #include #include #if CMIRROR_HAS_CHECKPOINT #include #include /* Open AIS error codes */ #define str_ais_error(x) \ ((x) == SA_AIS_OK) ? "SA_AIS_OK" : \ ((x) == SA_AIS_ERR_LIBRARY) ? "SA_AIS_ERR_LIBRARY" : \ ((x) == SA_AIS_ERR_VERSION) ? "SA_AIS_ERR_VERSION" : \ ((x) == SA_AIS_ERR_INIT) ? "SA_AIS_ERR_INIT" : \ ((x) == SA_AIS_ERR_TIMEOUT) ? "SA_AIS_ERR_TIMEOUT" : \ ((x) == SA_AIS_ERR_TRY_AGAIN) ? "SA_AIS_ERR_TRY_AGAIN" : \ ((x) == SA_AIS_ERR_INVALID_PARAM) ? "SA_AIS_ERR_INVALID_PARAM" : \ ((x) == SA_AIS_ERR_NO_MEMORY) ? "SA_AIS_ERR_NO_MEMORY" : \ ((x) == SA_AIS_ERR_BAD_HANDLE) ? "SA_AIS_ERR_BAD_HANDLE" : \ ((x) == SA_AIS_ERR_BUSY) ? "SA_AIS_ERR_BUSY" : \ ((x) == SA_AIS_ERR_ACCESS) ? "SA_AIS_ERR_ACCESS" : \ ((x) == SA_AIS_ERR_NOT_EXIST) ? "SA_AIS_ERR_NOT_EXIST" : \ ((x) == SA_AIS_ERR_NAME_TOO_LONG) ? "SA_AIS_ERR_NAME_TOO_LONG" : \ ((x) == SA_AIS_ERR_EXIST) ? "SA_AIS_ERR_EXIST" : \ ((x) == SA_AIS_ERR_NO_SPACE) ? "SA_AIS_ERR_NO_SPACE" : \ ((x) == SA_AIS_ERR_INTERRUPT) ? "SA_AIS_ERR_INTERRUPT" : \ ((x) == SA_AIS_ERR_NAME_NOT_FOUND) ? "SA_AIS_ERR_NAME_NOT_FOUND" : \ ((x) == SA_AIS_ERR_NO_RESOURCES) ? "SA_AIS_ERR_NO_RESOURCES" : \ ((x) == SA_AIS_ERR_NOT_SUPPORTED) ? "SA_AIS_ERR_NOT_SUPPORTED" : \ ((x) == SA_AIS_ERR_BAD_OPERATION) ? "SA_AIS_ERR_BAD_OPERATION" : \ ((x) == SA_AIS_ERR_FAILED_OPERATION) ? "SA_AIS_ERR_FAILED_OPERATION" : \ ((x) == SA_AIS_ERR_MESSAGE_ERROR) ? "SA_AIS_ERR_MESSAGE_ERROR" : \ ((x) == SA_AIS_ERR_QUEUE_FULL) ? "SA_AIS_ERR_QUEUE_FULL" : \ ((x) == SA_AIS_ERR_QUEUE_NOT_AVAILABLE) ? "SA_AIS_ERR_QUEUE_NOT_AVAILABLE" : \ ((x) == SA_AIS_ERR_BAD_FLAGS) ? "SA_AIS_ERR_BAD_FLAGS" : \ ((x) == SA_AIS_ERR_TOO_BIG) ? "SA_AIS_ERR_TOO_BIG" : \ ((x) == SA_AIS_ERR_NO_SECTIONS) ? "SA_AIS_ERR_NO_SECTIONS" : \ "ais_error_unknown" #else #define str_ais_error(x) \ ((x) == CS_OK) ? "CS_OK" : \ ((x) == CS_ERR_LIBRARY) ? "CS_ERR_LIBRARY" : \ ((x) == CS_ERR_VERSION) ? "CS_ERR_VERSION" : \ ((x) == CS_ERR_INIT) ? "CS_ERR_INIT" : \ ((x) == CS_ERR_TIMEOUT) ? "CS_ERR_TIMEOUT" : \ ((x) == CS_ERR_TRY_AGAIN) ? "CS_ERR_TRY_AGAIN" : \ ((x) == CS_ERR_INVALID_PARAM) ? "CS_ERR_INVALID_PARAM" : \ ((x) == CS_ERR_NO_MEMORY) ? "CS_ERR_NO_MEMORY" : \ ((x) == CS_ERR_BAD_HANDLE) ? "CS_ERR_BAD_HANDLE" : \ ((x) == CS_ERR_BUSY) ? "CS_ERR_BUSY" : \ ((x) == CS_ERR_ACCESS) ? "CS_ERR_ACCESS" : \ ((x) == CS_ERR_NOT_EXIST) ? "CS_ERR_NOT_EXIST" : \ ((x) == CS_ERR_NAME_TOO_LONG) ? "CS_ERR_NAME_TOO_LONG" : \ ((x) == CS_ERR_EXIST) ? "CS_ERR_EXIST" : \ ((x) == CS_ERR_NO_SPACE) ? "CS_ERR_NO_SPACE" : \ ((x) == CS_ERR_INTERRUPT) ? "CS_ERR_INTERRUPT" : \ ((x) == CS_ERR_NAME_NOT_FOUND) ? "CS_ERR_NAME_NOT_FOUND" : \ ((x) == CS_ERR_NO_RESOURCES) ? "CS_ERR_NO_RESOURCES" : \ ((x) == CS_ERR_NOT_SUPPORTED) ? "CS_ERR_NOT_SUPPORTED" : \ ((x) == CS_ERR_BAD_OPERATION) ? "CS_ERR_BAD_OPERATION" : \ ((x) == CS_ERR_FAILED_OPERATION) ? "CS_ERR_FAILED_OPERATION" : \ ((x) == CS_ERR_MESSAGE_ERROR) ? "CS_ERR_MESSAGE_ERROR" : \ ((x) == CS_ERR_QUEUE_FULL) ? "CS_ERR_QUEUE_FULL" : \ ((x) == CS_ERR_QUEUE_NOT_AVAILABLE) ? "CS_ERR_QUEUE_NOT_AVAILABLE" : \ ((x) == CS_ERR_BAD_FLAGS) ? "CS_ERR_BAD_FLAGS" : \ ((x) == CS_ERR_TOO_BIG) ? "CS_ERR_TOO_BIG" : \ ((x) == CS_ERR_NO_SECTIONS) ? "CS_ERR_NO_SECTIONS" : \ ((x) == CS_ERR_CONTEXT_NOT_FOUND) ? "CS_ERR_CONTEXT_NOT_FOUND" : \ ((x) == CS_ERR_TOO_MANY_GROUPS) ? "CS_ERR_TOO_MANY_GROUPS" : \ ((x) == CS_ERR_SECURITY) ? "CS_ERR_SECURITY" : \ "cs_error_unknown" #endif #define _RQ_TYPE(x) \ ((x) == DM_ULOG_CHECKPOINT_READY) ? "DM_ULOG_CHECKPOINT_READY": \ ((x) == DM_ULOG_MEMBER_JOIN) ? "DM_ULOG_MEMBER_JOIN": \ RQ_TYPE((x) & ~DM_ULOG_RESPONSE) static uint32_t my_cluster_id = 0xDEAD; #if CMIRROR_HAS_CHECKPOINT static SaCkptHandleT ckpt_handle = 0; static SaCkptCallbacksT callbacks = { 0, 0 }; static SaVersionT version = { 'B', 1, 1 }; #endif #define DEBUGGING_HISTORY 100 #define DEBUGGING_BUFLEN 128 #define LOG_SPRINT(cc, f, arg...) do { \ cc->idx++; \ cc->idx = cc->idx % DEBUGGING_HISTORY; \ snprintf(cc->debugging[cc->idx], DEBUGGING_BUFLEN, f, ## arg); \ } while (0) static int log_resp_rec = 0; #define RECOVERING_REGION_SECTION_SIZE 64 struct checkpoint_data { uint32_t requester; char uuid[CPG_MAX_NAME_LENGTH]; int bitmap_size; /* in bytes */ char *sync_bits; char *clean_bits; char *recovering_region; struct checkpoint_data *next; }; #define INVALID 0 #define VALID 1 #define LEAVING 2 #define MAX_CHECKPOINT_REQUESTERS 10 struct clog_cpg { struct dm_list list; uint32_t lowest_id; cpg_handle_t handle; struct cpg_name name; uint64_t luid; /* Are we the first, or have we received checkpoint? */ int state; int cpg_state; /* FIXME: debugging */ int free_me; int delay; int resend_requests; struct dm_list startup_list; struct dm_list working_list; int checkpoints_needed; uint32_t checkpoint_requesters[MAX_CHECKPOINT_REQUESTERS]; struct checkpoint_data *checkpoint_list; int idx; char debugging[DEBUGGING_HISTORY][DEBUGGING_BUFLEN]; }; static struct dm_list clog_cpg_list; /* * cluster_send * @rq * * Returns: 0 on success, -Exxx on error */ int cluster_send(struct clog_request *rq) { int r; int found = 0; struct iovec iov; struct clog_cpg *entry; dm_list_iterate_items(entry, &clog_cpg_list) if (!strncmp(entry->name.value, rq->u_rq.uuid, CPG_MAX_NAME_LENGTH)) { found = 1; break; } if (!found) { rq->u_rq.error = -ENOENT; return -ENOENT; } /* * Once the request heads for the cluster, the luid loses * all its meaning. */ rq->u_rq.luid = 0; iov.iov_base = rq; iov.iov_len = sizeof(struct clog_request) + rq->u_rq.data_size; rq->u.version[0] = xlate64(CLOG_TFR_VERSION); rq->u.version[1] = CLOG_TFR_VERSION; r = clog_request_to_network(rq); if (r < 0) /* FIXME: Better error code for byteswap failure? */ return -EINVAL; if (entry->cpg_state != VALID) return -EINVAL; #if CMIRROR_HAS_CHECKPOINT do { int count = 0; r = cpg_mcast_joined(entry->handle, CPG_TYPE_AGREED, &iov, 1); if (r != SA_AIS_ERR_TRY_AGAIN) break; count++; if (count < 10) LOG_PRINT("[%s] Retry #%d of cpg_mcast_joined: %s", SHORT_UUID(rq->u_rq.uuid), count, str_ais_error(r)); else if ((count < 100) && !(count % 10)) LOG_ERROR("[%s] Retry #%d of cpg_mcast_joined: %s", SHORT_UUID(rq->u_rq.uuid), count, str_ais_error(r)); else if ((count < 1000) && !(count % 100)) LOG_ERROR("[%s] Retry #%d of cpg_mcast_joined: %s", SHORT_UUID(rq->u_rq.uuid), count, str_ais_error(r)); else if ((count < 10000) && !(count % 1000)) LOG_ERROR("[%s] Retry #%d of cpg_mcast_joined: %s - " "OpenAIS not handling the load?", SHORT_UUID(rq->u_rq.uuid), count, str_ais_error(r)); usleep(1000); } while (1); #else r = cpg_mcast_joined(entry->handle, CPG_TYPE_AGREED, &iov, 1); #endif if (r == CS_OK) return 0; /* error codes found in openais/cpg.h */ LOG_ERROR("cpg_mcast_joined error: %d", r); rq->u_rq.error = -EBADE; return -EBADE; } static struct clog_request *get_matching_rq(struct clog_request *rq, struct dm_list *l) { struct clog_request *match, *n; dm_list_iterate_items_gen_safe(match, n, l, u.list) if (match->u_rq.seq == rq->u_rq.seq) { dm_list_del(&match->u.list); return match; } return NULL; } static char rq_buffer[DM_ULOG_REQUEST_SIZE]; static int handle_cluster_request(struct clog_cpg *entry __attribute__((unused)), struct clog_request *rq, int server) { int r = 0; struct clog_request *tmp = (struct clog_request *)rq_buffer; /* * We need a separate dm_ulog_request struct, one that can carry * a return payload. Otherwise, the memory address after * rq will be altered - leading to problems */ memset(rq_buffer, 0, sizeof(rq_buffer)); memcpy(tmp, rq, sizeof(struct clog_request) + rq->u_rq.data_size); /* * With resumes, we only handle our own. * Resume is a special case that requires * local action (to set up CPG), followed by * a cluster action to co-ordinate reading * the disk and checkpointing */ if (tmp->u_rq.request_type == DM_ULOG_RESUME) { if (tmp->originator == my_cluster_id) { r = do_request(tmp, server); r = kernel_send(&tmp->u_rq); if (r < 0) LOG_ERROR("Failed to send resume response to kernel"); } return r; } r = do_request(tmp, server); if (server && (tmp->u_rq.request_type != DM_ULOG_CLEAR_REGION) && (tmp->u_rq.request_type != DM_ULOG_POSTSUSPEND)) { tmp->u_rq.request_type |= DM_ULOG_RESPONSE; /* * Errors from previous functions are in the rq struct. */ r = cluster_send(tmp); if (r < 0) LOG_ERROR("cluster_send failed: %s", strerror(-r)); } return r; } static int handle_cluster_response(struct clog_cpg *entry, struct clog_request *rq) { int r = 0; struct clog_request *orig_rq; /* * If I didn't send it, then I don't care about the response */ if (rq->originator != my_cluster_id) return 0; rq->u_rq.request_type &= ~DM_ULOG_RESPONSE; orig_rq = get_matching_rq(rq, &entry->working_list); if (!orig_rq) { /* Unable to find match for response */ LOG_ERROR("[%s] No match for cluster response: %s:%u", SHORT_UUID(rq->u_rq.uuid), _RQ_TYPE(rq->u_rq.request_type), rq->u_rq.seq); LOG_ERROR("Current local list:"); if (dm_list_empty(&entry->working_list)) LOG_ERROR(" [none]"); dm_list_iterate_items_gen(orig_rq, &entry->working_list, u.list) LOG_ERROR(" [%s] %s:%u", SHORT_UUID(orig_rq->u_rq.uuid), _RQ_TYPE(orig_rq->u_rq.request_type), orig_rq->u_rq.seq); return -EINVAL; } if (log_resp_rec > 0) { LOG_COND(log_resend_requests, "[%s] Response received to %s/#%u", SHORT_UUID(rq->u_rq.uuid), _RQ_TYPE(rq->u_rq.request_type), rq->u_rq.seq); log_resp_rec--; } /* FIXME: Ensure memcpy cannot explode */ memcpy(orig_rq, rq, sizeof(*rq) + rq->u_rq.data_size); r = kernel_send(&orig_rq->u_rq); if (r) LOG_ERROR("Failed to send response to kernel"); free(orig_rq); return r; } static struct clog_cpg *find_clog_cpg(cpg_handle_t handle) { struct clog_cpg *match; dm_list_iterate_items(match, &clog_cpg_list) if (match->handle == handle) return match; return NULL; } /* * prepare_checkpoint * @entry: clog_cpg describing the log * @cp_requester: nodeid requesting the checkpoint * * Creates and fills in a new checkpoint_data struct. * * Returns: checkpoint_data on success, NULL on error */ static struct checkpoint_data *prepare_checkpoint(struct clog_cpg *entry, uint32_t cp_requester) { int r; struct checkpoint_data *new; if (entry->state != VALID) { /* * We can't store bitmaps yet, because the log is not * valid yet. */ LOG_ERROR("Forced to refuse checkpoint for nodeid %u - log not valid yet", cp_requester); return NULL; } new = malloc(sizeof(*new)); if (!new) { LOG_ERROR("Unable to create checkpoint data for %u", cp_requester); return NULL; } memset(new, 0, sizeof(*new)); new->requester = cp_requester; strncpy(new->uuid, entry->name.value, entry->name.length); new->bitmap_size = push_state(entry->name.value, entry->luid, "clean_bits", &new->clean_bits, cp_requester); if (new->bitmap_size <= 0) { LOG_ERROR("Failed to store clean_bits to checkpoint for node %u", new->requester); free(new); return NULL; } new->bitmap_size = push_state(entry->name.value, entry->luid, "sync_bits", &new->sync_bits, cp_requester); if (new->bitmap_size <= 0) { LOG_ERROR("Failed to store sync_bits to checkpoint for node %u", new->requester); free(new->clean_bits); free(new); return NULL; } r = push_state(entry->name.value, entry->luid, "recovering_region", &new->recovering_region, cp_requester); if (r <= 0) { LOG_ERROR("Failed to store recovering_region to checkpoint for node %u", new->requester); free(new->sync_bits); free(new->clean_bits); free(new); return NULL; } LOG_DBG("[%s] Checkpoint prepared for node %u:", SHORT_UUID(new->uuid), new->requester); LOG_DBG(" bitmap_size = %d", new->bitmap_size); return new; } /* * free_checkpoint * @cp: the checkpoint_data struct to free * */ static void free_checkpoint(struct checkpoint_data *cp) { free(cp->recovering_region); free(cp->sync_bits); free(cp->clean_bits); free(cp); } #if CMIRROR_HAS_CHECKPOINT static int export_checkpoint(struct checkpoint_data *cp) { SaCkptCheckpointCreationAttributesT attr; SaCkptCheckpointHandleT h; SaCkptSectionIdT section_id; SaCkptSectionCreationAttributesT section_attr; SaCkptCheckpointOpenFlagsT flags; SaNameT name; SaAisErrorT rv; struct clog_request *rq; int len, r = 0; char buf[32]; LOG_DBG("Sending checkpointed data to %u", cp->requester); len = snprintf((char *)(name.value), SA_MAX_NAME_LENGTH, "bitmaps_%s_%u", SHORT_UUID(cp->uuid), cp->requester); name.length = (SaUint16T)len; len = (int)strlen(cp->recovering_region) + 1; attr.creationFlags = SA_CKPT_WR_ALL_REPLICAS; attr.checkpointSize = cp->bitmap_size * 2 + len; attr.retentionDuration = SA_TIME_MAX; attr.maxSections = 4; /* don't know why we need +1 */ attr.maxSectionSize = (cp->bitmap_size > len) ? cp->bitmap_size : len; attr.maxSectionIdSize = 22; flags = SA_CKPT_CHECKPOINT_READ | SA_CKPT_CHECKPOINT_WRITE | SA_CKPT_CHECKPOINT_CREATE; open_retry: rv = saCkptCheckpointOpen(ckpt_handle, &name, &attr, flags, 0, &h); if (rv == SA_AIS_ERR_TRY_AGAIN) { LOG_ERROR("export_checkpoint: ckpt open retry"); usleep(1000); goto open_retry; } if (rv == SA_AIS_ERR_EXIST) { LOG_DBG("export_checkpoint: checkpoint already exists"); return -EEXIST; } if (rv != SA_AIS_OK) { LOG_ERROR("[%s] Failed to open checkpoint for %u: %s", SHORT_UUID(cp->uuid), cp->requester, str_ais_error(rv)); return -EIO; /* FIXME: better error */ } /* * Add section for sync_bits */ section_id.idLen = (SaUint16T)snprintf(buf, 32, "sync_bits"); section_id.id = (unsigned char *)buf; section_attr.sectionId = §ion_id; section_attr.expirationTime = SA_TIME_END; sync_create_retry: rv = saCkptSectionCreate(h, §ion_attr, cp->sync_bits, cp->bitmap_size); if (rv == SA_AIS_ERR_TRY_AGAIN) { LOG_ERROR("Sync checkpoint section create retry"); usleep(1000); goto sync_create_retry; } if (rv == SA_AIS_ERR_EXIST) { LOG_DBG("Sync checkpoint section already exists"); saCkptCheckpointClose(h); return -EEXIST; } if (rv != SA_AIS_OK) { LOG_ERROR("Sync checkpoint section creation failed: %s", str_ais_error(rv)); saCkptCheckpointClose(h); return -EIO; /* FIXME: better error */ } /* * Add section for clean_bits */ section_id.idLen = snprintf(buf, 32, "clean_bits"); section_id.id = (unsigned char *)buf; section_attr.sectionId = §ion_id; section_attr.expirationTime = SA_TIME_END; clean_create_retry: rv = saCkptSectionCreate(h, §ion_attr, cp->clean_bits, cp->bitmap_size); if (rv == SA_AIS_ERR_TRY_AGAIN) { LOG_ERROR("Clean checkpoint section create retry"); usleep(1000); goto clean_create_retry; } if (rv == SA_AIS_ERR_EXIST) { LOG_DBG("Clean checkpoint section already exists"); saCkptCheckpointClose(h); return -EEXIST; } if (rv != SA_AIS_OK) { LOG_ERROR("Clean checkpoint section creation failed: %s", str_ais_error(rv)); saCkptCheckpointClose(h); return -EIO; /* FIXME: better error */ } /* * Add section for recovering_region */ section_id.idLen = snprintf(buf, 32, "recovering_region"); section_id.id = (unsigned char *)buf; section_attr.sectionId = §ion_id; section_attr.expirationTime = SA_TIME_END; rr_create_retry: rv = saCkptSectionCreate(h, §ion_attr, cp->recovering_region, strlen(cp->recovering_region) + 1); if (rv == SA_AIS_ERR_TRY_AGAIN) { LOG_ERROR("RR checkpoint section create retry"); usleep(1000); goto rr_create_retry; } if (rv == SA_AIS_ERR_EXIST) { LOG_DBG("RR checkpoint section already exists"); saCkptCheckpointClose(h); return -EEXIST; } if (rv != SA_AIS_OK) { LOG_ERROR("RR checkpoint section creation failed: %s", str_ais_error(rv)); saCkptCheckpointClose(h); return -EIO; /* FIXME: better error */ } LOG_DBG("export_checkpoint: closing checkpoint"); saCkptCheckpointClose(h); rq = malloc(DM_ULOG_REQUEST_SIZE); if (!rq) { LOG_ERROR("export_checkpoint: Unable to allocate transfer structs"); return -ENOMEM; } memset(rq, 0, sizeof(*rq)); dm_list_init(&rq->u.list); rq->u_rq.request_type = DM_ULOG_CHECKPOINT_READY; rq->originator = cp->requester; /* FIXME: hack to overload meaning of originator */ strncpy(rq->u_rq.uuid, cp->uuid, CPG_MAX_NAME_LENGTH); rq->u_rq.seq = my_cluster_id; r = cluster_send(rq); if (r) LOG_ERROR("Failed to send checkpoint ready notice: %s", strerror(-r)); free(rq); return 0; } #else static int export_checkpoint(struct checkpoint_data *cp) { int r, rq_size; struct clog_request *rq; rq_size = sizeof(*rq); rq_size += RECOVERING_REGION_SECTION_SIZE; rq_size += cp->bitmap_size * 2; /* clean|sync_bits */ rq = malloc(rq_size); if (!rq) { LOG_ERROR("export_checkpoint: " "Unable to allocate transfer structs"); return -ENOMEM; } memset(rq, 0, rq_size); dm_list_init(&rq->u.list); rq->u_rq.request_type = DM_ULOG_CHECKPOINT_READY; rq->originator = cp->requester; strncpy(rq->u_rq.uuid, cp->uuid, CPG_MAX_NAME_LENGTH); rq->u_rq.seq = my_cluster_id; rq->u_rq.data_size = rq_size - sizeof(*rq); /* Sync bits */ memcpy(rq->u_rq.data, cp->sync_bits, cp->bitmap_size); /* Clean bits */ memcpy(rq->u_rq.data + cp->bitmap_size, cp->clean_bits, cp->bitmap_size); /* Recovering region */ memcpy(rq->u_rq.data + (cp->bitmap_size * 2), cp->recovering_region, strlen(cp->recovering_region)); r = cluster_send(rq); if (r) LOG_ERROR("Failed to send checkpoint ready notice: %s", strerror(-r)); free(rq); return 0; } #endif /* CMIRROR_HAS_CHECKPOINT */ #if CMIRROR_HAS_CHECKPOINT static int import_checkpoint(struct clog_cpg *entry, int no_read, struct clog_request *rq __attribute__((unused))) { int rtn = 0; SaCkptCheckpointHandleT h; SaCkptSectionIterationHandleT itr; SaCkptSectionDescriptorT desc; SaCkptIOVectorElementT iov; SaNameT name; SaAisErrorT rv; char *bitmap = NULL; int len; bitmap = malloc(1024*1024); if (!bitmap) return -ENOMEM; len = snprintf((char *)(name.value), SA_MAX_NAME_LENGTH, "bitmaps_%s_%u", SHORT_UUID(entry->name.value), my_cluster_id); name.length = (SaUint16T)len; open_retry: rv = saCkptCheckpointOpen(ckpt_handle, &name, NULL, SA_CKPT_CHECKPOINT_READ, 0, &h); if (rv == SA_AIS_ERR_TRY_AGAIN) { LOG_ERROR("import_checkpoint: ckpt open retry"); usleep(1000); goto open_retry; } if (rv != SA_AIS_OK) { LOG_ERROR("[%s] Failed to open checkpoint: %s", SHORT_UUID(entry->name.value), str_ais_error(rv)); free(bitmap); return -EIO; /* FIXME: better error */ } unlink_retry: rv = saCkptCheckpointUnlink(ckpt_handle, &name); if (rv == SA_AIS_ERR_TRY_AGAIN) { LOG_ERROR("import_checkpoint: ckpt unlink retry"); usleep(1000); goto unlink_retry; } if (no_read) { LOG_DBG("Checkpoint for this log already received"); goto no_read; } init_retry: rv = saCkptSectionIterationInitialize(h, SA_CKPT_SECTIONS_ANY, SA_TIME_END, &itr); if (rv == SA_AIS_ERR_TRY_AGAIN) { LOG_ERROR("import_checkpoint: sync create retry"); usleep(1000); goto init_retry; } if (rv != SA_AIS_OK) { LOG_ERROR("[%s] Sync checkpoint section creation failed: %s", SHORT_UUID(entry->name.value), str_ais_error(rv)); free(bitmap); return -EIO; /* FIXME: better error */ } len = 0; while (1) { rv = saCkptSectionIterationNext(itr, &desc); if (rv == SA_AIS_OK) len++; else if ((rv == SA_AIS_ERR_NO_SECTIONS) && len) break; else if (rv != SA_AIS_ERR_TRY_AGAIN) { LOG_ERROR("saCkptSectionIterationNext failure: %d", rv); break; } } saCkptSectionIterationFinalize(itr); if (len != 3) { LOG_ERROR("import_checkpoint: %d checkpoint sections found", len); usleep(1000); goto init_retry; } saCkptSectionIterationInitialize(h, SA_CKPT_SECTIONS_ANY, SA_TIME_END, &itr); while (1) { rv = saCkptSectionIterationNext(itr, &desc); if (rv == SA_AIS_ERR_NO_SECTIONS) break; if (rv == SA_AIS_ERR_TRY_AGAIN) { LOG_ERROR("import_checkpoint: ckpt iternext retry"); usleep(1000); continue; } if (rv != SA_AIS_OK) { LOG_ERROR("import_checkpoint: clean checkpoint section " "creation failed: %s", str_ais_error(rv)); rtn = -EIO; /* FIXME: better error */ goto fail; } if (!desc.sectionSize) { LOG_ERROR("Checkpoint section empty"); continue; } memset(bitmap, 0, sizeof(*bitmap)); iov.sectionId = desc.sectionId; iov.dataBuffer = bitmap; iov.dataSize = desc.sectionSize; iov.dataOffset = 0; read_retry: rv = saCkptCheckpointRead(h, &iov, 1, NULL); if (rv == SA_AIS_ERR_TRY_AGAIN) { LOG_ERROR("ckpt read retry"); usleep(1000); goto read_retry; } if (rv != SA_AIS_OK) { LOG_ERROR("import_checkpoint: ckpt read error: %s", str_ais_error(rv)); rtn = -EIO; /* FIXME: better error */ goto fail; } if (iov.readSize) { if (pull_state(entry->name.value, entry->luid, (char *)desc.sectionId.id, bitmap, iov.readSize)) { LOG_ERROR("Error loading state"); rtn = -EIO; goto fail; } } else { /* Need to request new checkpoint */ rtn = -EAGAIN; goto fail; } } fail: saCkptSectionIterationFinalize(itr); no_read: saCkptCheckpointClose(h); free(bitmap); return rtn; } #else static int import_checkpoint(struct clog_cpg *entry, int no_read, struct clog_request *rq) { int bitmap_size; if (no_read) { LOG_DBG("Checkpoint for this log already received"); return 0; } bitmap_size = (rq->u_rq.data_size - RECOVERING_REGION_SECTION_SIZE) / 2; if (bitmap_size < 0) { LOG_ERROR("Checkpoint has invalid payload size."); return -EINVAL; } if (pull_state(entry->name.value, entry->luid, "sync_bits", rq->u_rq.data, bitmap_size) || pull_state(entry->name.value, entry->luid, "clean_bits", rq->u_rq.data + bitmap_size, bitmap_size) || pull_state(entry->name.value, entry->luid, "recovering_region", rq->u_rq.data + (bitmap_size * 2), RECOVERING_REGION_SECTION_SIZE)) { LOG_ERROR("Error loading bitmap state from checkpoint."); return -EIO; } return 0; } #endif /* CMIRROR_HAS_CHECKPOINT */ static void do_checkpoints(struct clog_cpg *entry, int leaving) { struct checkpoint_data *cp; for (cp = entry->checkpoint_list; cp;) { /* * FIXME: Check return code. Could send failure * notice in rq in export_checkpoint function * by setting rq->error */ switch (export_checkpoint(cp)) { case -EEXIST: LOG_SPRINT(entry, "[%s] Checkpoint for %u already handled%s", SHORT_UUID(entry->name.value), cp->requester, (leaving) ? "(L)": ""); LOG_COND(log_checkpoint, "[%s] Checkpoint for %u already handled%s", SHORT_UUID(entry->name.value), cp->requester, (leaving) ? "(L)": ""); entry->checkpoint_list = cp->next; free_checkpoint(cp); cp = entry->checkpoint_list; break; case 0: LOG_SPRINT(entry, "[%s] Checkpoint data available for node %u%s", SHORT_UUID(entry->name.value), cp->requester, (leaving) ? "(L)": ""); LOG_COND(log_checkpoint, "[%s] Checkpoint data available for node %u%s", SHORT_UUID(entry->name.value), cp->requester, (leaving) ? "(L)": ""); entry->checkpoint_list = cp->next; free_checkpoint(cp); cp = entry->checkpoint_list; break; default: /* FIXME: Skipping will cause list corruption */ LOG_ERROR("[%s] Failed to export checkpoint for %u%s", SHORT_UUID(entry->name.value), cp->requester, (leaving) ? "(L)": ""); } } } static int resend_requests(struct clog_cpg *entry) { int r = 0; struct clog_request *rq, *n; if (!entry->resend_requests || entry->delay) return 0; if (entry->state != VALID) return 0; entry->resend_requests = 0; dm_list_iterate_items_gen_safe(rq, n, &entry->working_list, u.list) { dm_list_del(&rq->u.list); if (strcmp(entry->name.value, rq->u_rq.uuid)) { LOG_ERROR("[%s] Stray request from another log (%s)", SHORT_UUID(entry->name.value), SHORT_UUID(rq->u_rq.uuid)); free(rq); continue; } switch (rq->u_rq.request_type) { case DM_ULOG_SET_REGION_SYNC: /* * Some requests simply do not need to be resent. * If it is a request that just changes log state, * then it doesn't need to be resent (everyone makes * updates). */ LOG_COND(log_resend_requests, "[%s] Skipping resend of %s/#%u...", SHORT_UUID(entry->name.value), _RQ_TYPE(rq->u_rq.request_type), rq->u_rq.seq); LOG_SPRINT(entry, "### No resend: [%s] %s/%u ###", SHORT_UUID(entry->name.value), _RQ_TYPE(rq->u_rq.request_type), rq->u_rq.seq); rq->u_rq.data_size = 0; if (kernel_send(&rq->u_rq)) LOG_ERROR("Failed to respond to kernel [%s]", RQ_TYPE(rq->u_rq.request_type)); break; default: /* * If an action or a response is required, then * the request must be resent. */ LOG_COND(log_resend_requests, "[%s] Resending %s(#%u) due to new server(%u)", SHORT_UUID(entry->name.value), _RQ_TYPE(rq->u_rq.request_type), rq->u_rq.seq, entry->lowest_id); LOG_SPRINT(entry, "*** Resending: [%s] %s/%u ***", SHORT_UUID(entry->name.value), _RQ_TYPE(rq->u_rq.request_type), rq->u_rq.seq); r = cluster_send(rq); if (r < 0) LOG_ERROR("Failed resend"); } free(rq); } return r; } static int do_cluster_work(void *data __attribute__((unused))) { int r = CS_OK; struct clog_cpg *entry, *tmp; dm_list_iterate_items_safe(entry, tmp, &clog_cpg_list) { r = cpg_dispatch(entry->handle, CS_DISPATCH_ALL); if (r != CS_OK) { if ((r == CS_ERR_BAD_HANDLE) && ((entry->state == INVALID) || (entry->state == LEAVING))) /* It's ok if we've left the cluster */ r = CS_OK; else LOG_ERROR("cpg_dispatch failed: %s", str_ais_error(r)); } if (entry->free_me) { free(entry); continue; } do_checkpoints(entry, 0); resend_requests(entry); } return (r == CS_OK) ? 0 : -1; /* FIXME: good error number? */ } static int flush_startup_list(struct clog_cpg *entry) { int r = 0; int i_was_server; struct clog_request *rq, *n; struct checkpoint_data *new; dm_list_iterate_items_gen_safe(rq, n, &entry->startup_list, u.list) { dm_list_del(&rq->u.list); if (rq->u_rq.request_type == DM_ULOG_MEMBER_JOIN) { new = prepare_checkpoint(entry, rq->originator); if (!new) { /* * FIXME: Need better error handling. Other nodes * will be trying to send the checkpoint too, and we * must continue processing the list; so report error * but continue. */ LOG_ERROR("Failed to prepare checkpoint for %u!!!", rq->originator); free(rq); continue; } LOG_SPRINT(entry, "[%s] Checkpoint prepared for %u", SHORT_UUID(entry->name.value), rq->originator); LOG_COND(log_checkpoint, "[%s] Checkpoint prepared for %u", SHORT_UUID(entry->name.value), rq->originator); new->next = entry->checkpoint_list; entry->checkpoint_list = new; } else { LOG_DBG("[%s] Processing delayed request: %s", SHORT_UUID(rq->u_rq.uuid), _RQ_TYPE(rq->u_rq.request_type)); i_was_server = (rq->pit_server == my_cluster_id) ? 1 : 0; r = handle_cluster_request(entry, rq, i_was_server); if (r) /* * FIXME: If we error out here, we will never get * another opportunity to retry these requests */ LOG_ERROR("Error while processing delayed CPG message"); } free(rq); } return 0; } static void cpg_message_callback(cpg_handle_t handle, const struct cpg_name *gname __attribute__((unused)), uint32_t nodeid, uint32_t pid __attribute__((unused)), void *msg, size_t msg_len) { int i; int r = 0; int i_am_server; int response = 0; struct clog_request *rq = msg; struct clog_request *tmp_rq, *tmp_rq2; struct clog_cpg *match; match = find_clog_cpg(handle); if (!match) { LOG_ERROR("Unable to find clog_cpg for cluster message"); return; } /* * Perform necessary endian and version compatibility conversions */ if (clog_request_from_network(rq, msg_len) < 0) /* Any error messages come from 'clog_request_from_network' */ return; if ((nodeid == my_cluster_id) && !(rq->u_rq.request_type & DM_ULOG_RESPONSE) && (rq->u_rq.request_type != DM_ULOG_RESUME) && (rq->u_rq.request_type != DM_ULOG_CLEAR_REGION) && (rq->u_rq.request_type != DM_ULOG_CHECKPOINT_READY)) { tmp_rq = malloc(DM_ULOG_REQUEST_SIZE); if (!tmp_rq) { /* * FIXME: It may be possible to continue... but we * would not be able to resend any messages that might * be necessary during membership changes */ LOG_ERROR("[%s] Unable to record request: -ENOMEM", SHORT_UUID(rq->u_rq.uuid)); return; } memcpy(tmp_rq, rq, sizeof(*rq) + rq->u_rq.data_size); dm_list_init(&tmp_rq->u.list); dm_list_add(&match->working_list, &tmp_rq->u.list); } if (rq->u_rq.request_type == DM_ULOG_POSTSUSPEND) { /* * If the server (lowest_id) indicates it is leaving, * then we must resend any outstanding requests. However, * we do not want to resend them if the next server in * line is in the process of leaving. */ if (nodeid == my_cluster_id) { LOG_COND(log_resend_requests, "[%s] I am leaving.1.....", SHORT_UUID(rq->u_rq.uuid)); } else { if (nodeid < my_cluster_id) { if (nodeid == match->lowest_id) { match->resend_requests = 1; LOG_COND(log_resend_requests, "[%s] %u is leaving, resend required%s", SHORT_UUID(rq->u_rq.uuid), nodeid, (dm_list_empty(&match->working_list)) ? " -- working_list empty": ""); dm_list_iterate_items_gen(tmp_rq, &match->working_list, u.list) LOG_COND(log_resend_requests, "[%s] %s/%u", SHORT_UUID(tmp_rq->u_rq.uuid), _RQ_TYPE(tmp_rq->u_rq.request_type), tmp_rq->u_rq.seq); } match->delay++; LOG_COND(log_resend_requests, "[%s] %u is leaving, delay = %d", SHORT_UUID(rq->u_rq.uuid), nodeid, match->delay); } rq->originator = nodeid; /* don't really need this, but nice for debug */ goto out; } } /* * We can receive messages after we do a cpg_leave but before we * get our config callback. However, since we can't respond after * leaving, we simply return. */ if (match->state == LEAVING) return; i_am_server = (my_cluster_id == match->lowest_id) ? 1 : 0; if (rq->u_rq.request_type == DM_ULOG_CHECKPOINT_READY) { if (my_cluster_id == rq->originator) { /* Redundant checkpoints ignored if match->valid */ LOG_SPRINT(match, "[%s] CHECKPOINT_READY notification from %u", SHORT_UUID(rq->u_rq.uuid), nodeid); if (import_checkpoint(match, (match->state != INVALID), rq)) { LOG_SPRINT(match, "[%s] Failed to import checkpoint from %u", SHORT_UUID(rq->u_rq.uuid), nodeid); LOG_ERROR("[%s] Failed to import checkpoint from %u", SHORT_UUID(rq->u_rq.uuid), nodeid); kill(getpid(), SIGUSR1); /* Could we retry? */ goto out; } else if (match->state == INVALID) { LOG_SPRINT(match, "[%s] Checkpoint data received from %u. Log is now valid", SHORT_UUID(match->name.value), nodeid); LOG_COND(log_checkpoint, "[%s] Checkpoint data received from %u. Log is now valid", SHORT_UUID(match->name.value), nodeid); match->state = VALID; flush_startup_list(match); } else { LOG_SPRINT(match, "[%s] Redundant checkpoint from %u ignored.", SHORT_UUID(rq->u_rq.uuid), nodeid); } } goto out; } if (rq->u_rq.request_type & DM_ULOG_RESPONSE) { response = 1; r = handle_cluster_response(match, rq); } else { rq->originator = nodeid; if (match->state == LEAVING) { LOG_ERROR("[%s] Ignoring %s from %u. Reason: I'm leaving", SHORT_UUID(rq->u_rq.uuid), _RQ_TYPE(rq->u_rq.request_type), rq->originator); goto out; } if (match->state == INVALID) { LOG_DBG("Log not valid yet, storing request"); if (!(tmp_rq2 = malloc(DM_ULOG_REQUEST_SIZE))) { LOG_ERROR("cpg_message_callback: Unable to" " allocate transfer structs"); r = -ENOMEM; /* FIXME: Better error #? */ goto out; } memcpy(tmp_rq2, rq, sizeof(*rq) + rq->u_rq.data_size); tmp_rq2->pit_server = match->lowest_id; dm_list_init(&tmp_rq2->u.list); dm_list_add(&match->startup_list, &tmp_rq2->u.list); goto out; } r = handle_cluster_request(match, rq, i_am_server); } /* * If the log is now valid, we can queue the checkpoints */ for (i = match->checkpoints_needed; i; ) { struct checkpoint_data *new; if (log_get_state(&rq->u_rq) != LOG_RESUMED) { LOG_DBG("[%s] Withholding checkpoints until log is valid (%s from %u)", SHORT_UUID(rq->u_rq.uuid), _RQ_TYPE(rq->u_rq.request_type), nodeid); break; } i--; new = prepare_checkpoint(match, match->checkpoint_requesters[i]); if (!new) { /* FIXME: Need better error handling */ LOG_ERROR("[%s] Failed to prepare checkpoint for %u!!!", SHORT_UUID(rq->u_rq.uuid), match->checkpoint_requesters[i]); break; } LOG_SPRINT(match, "[%s] Checkpoint prepared for %u* (%s)", SHORT_UUID(rq->u_rq.uuid), match->checkpoint_requesters[i], (log_get_state(&rq->u_rq) != LOG_RESUMED)? "LOG_RESUMED": "LOG_SUSPENDED"); LOG_COND(log_checkpoint, "[%s] Checkpoint prepared for %u*", SHORT_UUID(rq->u_rq.uuid), match->checkpoint_requesters[i]); match->checkpoints_needed--; new->next = match->checkpoint_list; match->checkpoint_list = new; } out: /* nothing happens after this point. It is just for debugging */ if (r) { LOG_ERROR("[%s] Error while processing CPG message, %s: %s", SHORT_UUID(rq->u_rq.uuid), _RQ_TYPE(rq->u_rq.request_type & ~DM_ULOG_RESPONSE), strerror(-r)); LOG_ERROR("[%s] Response : %s", SHORT_UUID(rq->u_rq.uuid), (response) ? "YES" : "NO"); LOG_ERROR("[%s] Originator: %u", SHORT_UUID(rq->u_rq.uuid), rq->originator); if (response) LOG_ERROR("[%s] Responder : %u", SHORT_UUID(rq->u_rq.uuid), nodeid); LOG_ERROR("HISTORY::"); for (i = 0; i < DEBUGGING_HISTORY; i++) { match->idx++; match->idx = match->idx % DEBUGGING_HISTORY; if (match->debugging[match->idx][0] == '\0') continue; LOG_ERROR("%d:%d) %s", i, match->idx, match->debugging[match->idx]); } } else if (!(rq->u_rq.request_type & DM_ULOG_RESPONSE) || (rq->originator == my_cluster_id)) { if (!response) LOG_SPRINT(match, "SEQ#=%u, UUID=%s, TYPE=%s, ORIG=%u, RESP=%s", rq->u_rq.seq, SHORT_UUID(rq->u_rq.uuid), _RQ_TYPE(rq->u_rq.request_type), rq->originator, (response) ? "YES" : "NO"); else LOG_SPRINT(match, "SEQ#=%u, UUID=%s, TYPE=%s, ORIG=%u, RESP=%s, RSPR=%u, error=%d", rq->u_rq.seq, SHORT_UUID(rq->u_rq.uuid), _RQ_TYPE(rq->u_rq.request_type), rq->originator, (response) ? "YES" : "NO", nodeid, rq->u_rq.error); } } static void cpg_join_callback(struct clog_cpg *match, const struct cpg_address *joined, const struct cpg_address *member_list, size_t member_list_entries) { unsigned i; uint32_t my_pid = (uint32_t)getpid(); uint32_t lowest = match->lowest_id; struct clog_request *rq; char dbuf[64] = { 0 }; char *dbuf_p = dbuf; size_t dbuf_rem = sizeof dbuf; /* Assign my_cluster_id */ if ((my_cluster_id == 0xDEAD) && (joined->pid == my_pid)) my_cluster_id = joined->nodeid; /* Am I the very first to join? */ if (member_list_entries == 1) { match->lowest_id = joined->nodeid; match->state = VALID; } /* If I am part of the joining list, I do not send checkpoints */ if (joined->nodeid == my_cluster_id) goto out; for (i = 0; i < member_list_entries - 1; i++) { int written = snprintf(dbuf_p, dbuf_rem, "%u-", member_list[i].nodeid); if (written < 0) continue; /* impossible */ if ((unsigned)written >= dbuf_rem) { dbuf_rem = 0; break; } dbuf_rem -= written; dbuf_p += written; } snprintf(dbuf_p, dbuf_rem, "(%u)", joined->nodeid); LOG_COND(log_checkpoint, "[%s] Joining node, %u needs checkpoint [%s]", SHORT_UUID(match->name.value), joined->nodeid, dbuf); /* * FIXME: remove checkpoint_requesters/checkpoints_needed, and use * the startup_list interface exclusively */ if (dm_list_empty(&match->startup_list) && (match->state == VALID) && (match->checkpoints_needed < MAX_CHECKPOINT_REQUESTERS)) { match->checkpoint_requesters[match->checkpoints_needed++] = joined->nodeid; goto out; } rq = malloc(DM_ULOG_REQUEST_SIZE); if (!rq) { LOG_ERROR("cpg_config_callback: " "Unable to allocate transfer structs"); LOG_ERROR("cpg_config_callback: " "Unable to perform checkpoint"); goto out; } rq->u_rq.request_type = DM_ULOG_MEMBER_JOIN; rq->originator = joined->nodeid; dm_list_init(&rq->u.list); dm_list_add(&match->startup_list, &rq->u.list); out: /* Find the lowest_id, i.e. the server */ match->lowest_id = member_list[0].nodeid; for (i = 0; i < member_list_entries; i++) if (match->lowest_id > member_list[i].nodeid) match->lowest_id = member_list[i].nodeid; if (lowest == 0xDEAD) LOG_COND(log_membership_change, "[%s] Server change -> %u (%u %s)", SHORT_UUID(match->name.value), match->lowest_id, joined->nodeid, (member_list_entries == 1) ? "is first to join" : "joined"); else if (lowest != match->lowest_id) LOG_COND(log_membership_change, "[%s] Server change %u -> %u (%u joined)", SHORT_UUID(match->name.value), lowest, match->lowest_id, joined->nodeid); else LOG_COND(log_membership_change, "[%s] Server unchanged at %u (%u joined)", SHORT_UUID(match->name.value), lowest, joined->nodeid); LOG_SPRINT(match, "+++ UUID=%s %u join +++", SHORT_UUID(match->name.value), joined->nodeid); } static void cpg_leave_callback(struct clog_cpg *match, const struct cpg_address *left, const struct cpg_address *member_list, size_t member_list_entries) { unsigned i; int j, fd; uint32_t lowest = match->lowest_id; struct clog_request *rq, *n; struct checkpoint_data *p_cp, *c_cp; LOG_SPRINT(match, "--- UUID=%s %u left ---", SHORT_UUID(match->name.value), left->nodeid); /* Am I leaving? */ if (my_cluster_id == left->nodeid) { LOG_DBG("Finalizing leave..."); dm_list_del(&match->list); cpg_fd_get(match->handle, &fd); links_unregister(fd); cluster_postsuspend(match->name.value, match->luid); dm_list_iterate_items_gen_safe(rq, n, &match->working_list, u.list) { dm_list_del(&rq->u.list); if (rq->u_rq.request_type == DM_ULOG_POSTSUSPEND) if (kernel_send(&rq->u_rq)) LOG_ERROR("Failed to respond to kernel [%s]", RQ_TYPE(rq->u_rq.request_type)); free(rq); } cpg_finalize(match->handle); match->free_me = 1; match->lowest_id = 0xDEAD; match->state = INVALID; } /* Remove any pending checkpoints for the leaving node. */ for (p_cp = NULL, c_cp = match->checkpoint_list; c_cp && (c_cp->requester != left->nodeid); p_cp = c_cp, c_cp = c_cp->next); if (c_cp) { if (p_cp) p_cp->next = c_cp->next; else match->checkpoint_list = c_cp->next; LOG_COND(log_checkpoint, "[%s] Removing pending checkpoint (%u is leaving)", SHORT_UUID(match->name.value), left->nodeid); free_checkpoint(c_cp); } dm_list_iterate_items_gen_safe(rq, n, &match->startup_list, u.list) { if ((rq->u_rq.request_type == DM_ULOG_MEMBER_JOIN) && (rq->originator == left->nodeid)) { LOG_COND(log_checkpoint, "[%s] Removing pending ckpt from startup list (%u is leaving)", SHORT_UUID(match->name.value), left->nodeid); dm_list_del(&rq->u.list); free(rq); } } for (i = 0, j = 0; (int) i < match->checkpoints_needed; i++, j++) { match->checkpoint_requesters[j] = match->checkpoint_requesters[i]; if (match->checkpoint_requesters[i] == left->nodeid) { LOG_ERROR("[%s] Removing pending ckpt from needed list (%u is leaving)", SHORT_UUID(match->name.value), left->nodeid); j--; } } match->checkpoints_needed = j; if (left->nodeid < my_cluster_id) { match->delay = (match->delay > 0) ? match->delay - 1 : 0; if (!match->delay && dm_list_empty(&match->working_list)) match->resend_requests = 0; LOG_COND(log_resend_requests, "[%s] %u has left, delay = %d%s", SHORT_UUID(match->name.value), left->nodeid, match->delay, (dm_list_empty(&match->working_list)) ? " -- working_list empty": ""); } /* Find the lowest_id, i.e. the server */ if (!member_list_entries) { match->lowest_id = 0xDEAD; LOG_COND(log_membership_change, "[%s] Server change %u -> " "(%u is last to leave)", SHORT_UUID(match->name.value), left->nodeid, left->nodeid); return; } match->lowest_id = member_list[0].nodeid; for (i = 0; i < member_list_entries; i++) if (match->lowest_id > member_list[i].nodeid) match->lowest_id = member_list[i].nodeid; if (lowest != match->lowest_id) { LOG_COND(log_membership_change, "[%s] Server change %u -> %u (%u left)", SHORT_UUID(match->name.value), lowest, match->lowest_id, left->nodeid); } else LOG_COND(log_membership_change, "[%s] Server unchanged at %u (%u left)", SHORT_UUID(match->name.value), lowest, left->nodeid); if ((match->state == INVALID) && !match->free_me) { /* * If all CPG members are waiting for checkpoints and they * are all present in my startup_list, then I was the first to * join and I must assume control. * * We do not normally end up here, but if there was a quick * 'resume -> suspend -> resume' across the cluster, we may * have initially thought we were not the first to join because * of the presence of out-going (and unable to respond) members. */ i = 1; /* We do not have a DM_ULOG_MEMBER_JOIN entry of our own */ dm_list_iterate_items_gen(rq, &match->startup_list, u.list) if (rq->u_rq.request_type == DM_ULOG_MEMBER_JOIN) i++; if (i == member_list_entries) { /* * Last node who could have given me a checkpoint just left. * Setting log state to VALID and acting as 'first join'. */ match->state = VALID; flush_startup_list(match); } } } static void cpg_config_callback(cpg_handle_t handle, const struct cpg_name *gname __attribute__((unused)), const struct cpg_address *member_list, size_t member_list_entries, const struct cpg_address *left_list, size_t left_list_entries, const struct cpg_address *joined_list, size_t joined_list_entries) { struct clog_cpg *match; int found = 0; dm_list_iterate_items(match, &clog_cpg_list) if (match->handle == handle) { found = 1; break; } if (!found) { LOG_ERROR("Unable to find match for CPG config callback"); return; } if ((joined_list_entries + left_list_entries) > 1) LOG_ERROR("[%s] More than one node joining/leaving", SHORT_UUID(match->name.value)); if (joined_list_entries) cpg_join_callback(match, joined_list, member_list, member_list_entries); else cpg_leave_callback(match, left_list, member_list, member_list_entries); } cpg_callbacks_t cpg_callbacks = { .cpg_deliver_fn = cpg_message_callback, .cpg_confchg_fn = cpg_config_callback, }; /* * remove_checkpoint * @entry * * Returns: 1 if checkpoint removed, 0 if no checkpoints, -EXXX on error */ static int remove_checkpoint(struct clog_cpg *entry) { #if CMIRROR_HAS_CHECKPOINT int len; SaNameT name; SaAisErrorT rv; SaCkptCheckpointHandleT h; len = snprintf((char *)(name.value), SA_MAX_NAME_LENGTH, "bitmaps_%s_%u", SHORT_UUID(entry->name.value), my_cluster_id); name.length = len; open_retry: rv = saCkptCheckpointOpen(ckpt_handle, &name, NULL, SA_CKPT_CHECKPOINT_READ, 0, &h); if (rv == SA_AIS_ERR_TRY_AGAIN) { LOG_ERROR("abort_startup: ckpt open retry"); usleep(1000); goto open_retry; } if (rv != SA_AIS_OK) return 0; LOG_DBG("[%s] Removing checkpoint", SHORT_UUID(entry->name.value)); unlink_retry: rv = saCkptCheckpointUnlink(ckpt_handle, &name); if (rv == SA_AIS_ERR_TRY_AGAIN) { LOG_ERROR("abort_startup: ckpt unlink retry"); usleep(1000); goto unlink_retry; } if (rv != SA_AIS_OK) { LOG_ERROR("[%s] Failed to unlink checkpoint: %s", SHORT_UUID(entry->name.value), str_ais_error(rv)); return -EIO; } saCkptCheckpointClose(h); return 1; #else /* No checkpoint to remove, so 'success' */ return 1; #endif } int create_cluster_cpg(char *uuid, uint64_t luid) { int r; size_t size; struct clog_cpg *new = NULL; struct clog_cpg *tmp; dm_list_iterate_items(tmp, &clog_cpg_list) if (!strncmp(tmp->name.value, uuid, CPG_MAX_NAME_LENGTH)) { LOG_ERROR("Log entry already exists: %s", uuid); return -EEXIST; } new = malloc(sizeof(*new)); if (!new) { LOG_ERROR("Unable to allocate memory for clog_cpg"); return -ENOMEM; } memset(new, 0, sizeof(*new)); dm_list_init(&new->list); new->lowest_id = 0xDEAD; dm_list_init(&new->startup_list); dm_list_init(&new->working_list); size = ((strlen(uuid) + 1) > CPG_MAX_NAME_LENGTH) ? CPG_MAX_NAME_LENGTH : (strlen(uuid) + 1); strncpy(new->name.value, uuid, size); new->name.length = (uint32_t)size; new->luid = luid; /* * Ensure there are no stale checkpoints around before we join */ if (remove_checkpoint(new) == 1) LOG_COND(log_checkpoint, "[%s] Removing checkpoints left from previous session", SHORT_UUID(new->name.value)); r = cpg_initialize(&new->handle, &cpg_callbacks); if (r != CS_OK) { LOG_ERROR("cpg_initialize failed: Cannot join cluster"); free(new); return -EPERM; } r = cpg_join(new->handle, &new->name); if (r != CS_OK) { LOG_ERROR("cpg_join failed: Cannot join cluster"); free(new); return -EPERM; } new->cpg_state = VALID; dm_list_add(&clog_cpg_list, &new->list); LOG_DBG("New handle: %llu", (unsigned long long)new->handle); LOG_DBG("New name: %s", new->name.value); /* FIXME: better variable */ cpg_fd_get(new->handle, &r); links_register(r, "cluster", do_cluster_work, NULL); return 0; } static void abort_startup(struct clog_cpg *del) { struct clog_request *rq, *n; LOG_DBG("[%s] CPG teardown before checkpoint received", SHORT_UUID(del->name.value)); dm_list_iterate_items_gen_safe(rq, n, &del->startup_list, u.list) { dm_list_del(&rq->u.list); LOG_DBG("[%s] Ignoring request from %u: %s", SHORT_UUID(del->name.value), rq->originator, _RQ_TYPE(rq->u_rq.request_type)); free(rq); } remove_checkpoint(del); } static int _destroy_cluster_cpg(struct clog_cpg *del) { int r; int state; LOG_COND(log_resend_requests, "[%s] I am leaving.2.....", SHORT_UUID(del->name.value)); /* * We must send any left over checkpoints before * leaving. If we don't, an incoming node could * be stuck with no checkpoint and stall. do_checkpoints(del); --- THIS COULD BE CAUSING OUR PROBLEMS: - Incoming node deletes old checkpoints before joining - A stale checkpoint is issued here by leaving node - (leaving node leaves) - Incoming node joins cluster and finds stale checkpoint. - (leaving node leaves - option 2) */ do_checkpoints(del, 1); state = del->state; del->cpg_state = INVALID; del->state = LEAVING; /* * If the state is VALID, we might be processing the * startup list. If so, we certainly don't want to * clear the startup_list here by calling abort_startup */ if (!dm_list_empty(&del->startup_list) && (state != VALID)) abort_startup(del); r = cpg_leave(del->handle, &del->name); if (r != CS_OK) LOG_ERROR("Error leaving CPG!"); return 0; } int destroy_cluster_cpg(char *uuid) { struct clog_cpg *del, *tmp; dm_list_iterate_items_safe(del, tmp, &clog_cpg_list) if (!strncmp(del->name.value, uuid, CPG_MAX_NAME_LENGTH)) _destroy_cluster_cpg(del); return 0; } int init_cluster(void) { #if CMIRROR_HAS_CHECKPOINT SaAisErrorT rv; rv = saCkptInitialize(&ckpt_handle, &callbacks, &version); if (rv != SA_AIS_OK) return EXIT_CLUSTER_CKPT_INIT; #endif dm_list_init(&clog_cpg_list); return 0; } void cleanup_cluster(void) { #if CMIRROR_HAS_CHECKPOINT SaAisErrorT err; err = saCkptFinalize(ckpt_handle); if (err != SA_AIS_OK) LOG_ERROR("Failed to finalize checkpoint handle"); #endif } void cluster_debug(void) { struct checkpoint_data *cp; struct clog_cpg *entry; struct clog_request *rq; int i; LOG_ERROR(""); LOG_ERROR("CLUSTER COMPONENT DEBUGGING::"); dm_list_iterate_items(entry, &clog_cpg_list) { LOG_ERROR("%s::", SHORT_UUID(entry->name.value)); LOG_ERROR(" lowest_id : %u", entry->lowest_id); LOG_ERROR(" state : %s", (entry->state == INVALID) ? "INVALID" : (entry->state == VALID) ? "VALID" : (entry->state == LEAVING) ? "LEAVING" : "UNKNOWN"); LOG_ERROR(" cpg_state : %d", entry->cpg_state); LOG_ERROR(" free_me : %d", entry->free_me); LOG_ERROR(" delay : %d", entry->delay); LOG_ERROR(" resend_requests : %d", entry->resend_requests); LOG_ERROR(" checkpoints_needed: %d", entry->checkpoints_needed); for (i = 0, cp = entry->checkpoint_list; i < MAX_CHECKPOINT_REQUESTERS; i++) if (cp) cp = cp->next; else break; LOG_ERROR(" CKPTs waiting : %d", i); LOG_ERROR(" Working list:"); dm_list_iterate_items_gen(rq, &entry->working_list, u.list) LOG_ERROR(" %s/%u", _RQ_TYPE(rq->u_rq.request_type), rq->u_rq.seq); LOG_ERROR(" Startup list:"); dm_list_iterate_items_gen(rq, &entry->startup_list, u.list) LOG_ERROR(" %s/%u", _RQ_TYPE(rq->u_rq.request_type), rq->u_rq.seq); LOG_ERROR("Command History:"); for (i = 0; i < DEBUGGING_HISTORY; i++) { entry->idx++; entry->idx = entry->idx % DEBUGGING_HISTORY; if (entry->debugging[entry->idx][0] == '\0') continue; LOG_ERROR("%d:%d) %s", i, entry->idx, entry->debugging[entry->idx]); } } } LVM2.2.02.176/daemons/cmirrord/local.h0000644000000000000120000000123513176752421016037 0ustar rootwheel/* * Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef _LVM_CLOG_LOCAL_H #define _LVM_CLOG_LOCAL_H int init_local(void); void cleanup_local(void); int kernel_send(struct dm_ulog_request *rq); #endif /* _LVM_CLOG_LOCAL_H */ LVM2.2.02.176/daemons/cmirrord/functions.h0000644000000000000120000000215013176752421016752 0ustar rootwheel/* * Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef _LVM_CLOG_FUNCTIONS_H #define _LVM_CLOG_FUNCTIONS_H #include "dm-log-userspace.h" #include "cluster.h" #define LOG_RESUMED 1 #define LOG_SUSPENDED 2 int local_resume(struct dm_ulog_request *rq); int cluster_postsuspend(char *, uint64_t); int do_request(struct clog_request *rq, int server); int push_state(const char *uuid, uint64_t luid, const char *which, char **buf, uint32_t debug_who); int pull_state(const char *uuid, uint64_t luid, const char *which, char *buf, int size); int log_get_state(struct dm_ulog_request *rq); int log_status(void); void log_debug(void); #endif /* _LVM_CLOG_FUNCTIONS_H */ LVM2.2.02.176/daemons/cmirrord/compat.c0000644000000000000120000001173013176752421016224 0ustar rootwheel/* * Copyright (C) 2010 Red Hat, Inc. All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. */ #include "logging.h" #include "cluster.h" #include "compat.h" #include "xlate.h" #include /* * Older versions of the log daemon communicate with different * versions of the inter-machine communication structure, which * varies in size and fields. The older versions append the * standard upstream version of the structure to every request. * COMPAT_OFFSET is where the upstream structure starts. */ #define COMPAT_OFFSET 256 static void v5_data_endian_switch(struct clog_request *rq, int to_network __attribute__((unused))) { int i, end; int64_t *pi64; uint64_t *pu64; uint32_t rq_type = rq->u_rq.request_type & ~DM_ULOG_RESPONSE; if (rq->u_rq.request_type & DM_ULOG_RESPONSE) { switch (rq_type) { case DM_ULOG_CTR: case DM_ULOG_DTR: LOG_ERROR("Invalid response type in endian switch"); exit(EXIT_FAILURE); case DM_ULOG_PRESUSPEND: case DM_ULOG_POSTSUSPEND: case DM_ULOG_RESUME: case DM_ULOG_FLUSH: case DM_ULOG_MARK_REGION: case DM_ULOG_CLEAR_REGION: case DM_ULOG_SET_REGION_SYNC: case DM_ULOG_CHECKPOINT_READY: case DM_ULOG_MEMBER_JOIN: case DM_ULOG_STATUS_INFO: case DM_ULOG_STATUS_TABLE: /* No outbound data */ break; case DM_ULOG_GET_REGION_SIZE: case DM_ULOG_GET_SYNC_COUNT: pu64 = (uint64_t *)rq->u_rq.data; *pu64 = xlate64(*pu64); break; case DM_ULOG_IS_CLEAN: case DM_ULOG_IN_SYNC: pi64 = (int64_t *)rq->u_rq.data; *pi64 = xlate64(*pi64); break; case DM_ULOG_GET_RESYNC_WORK: case DM_ULOG_IS_REMOTE_RECOVERING: pi64 = (int64_t *)rq->u_rq.data; pu64 = ((uint64_t *)rq->u_rq.data) + 1; *pi64 = xlate64(*pi64); *pu64 = xlate64(*pu64); break; default: LOG_ERROR("Unknown request type, %u", rq_type); return; } } else { switch (rq_type) { case DM_ULOG_CTR: case DM_ULOG_DTR: LOG_ERROR("Invalid request type in endian switch"); exit(EXIT_FAILURE); case DM_ULOG_PRESUSPEND: case DM_ULOG_POSTSUSPEND: case DM_ULOG_RESUME: case DM_ULOG_GET_REGION_SIZE: case DM_ULOG_FLUSH: case DM_ULOG_GET_RESYNC_WORK: case DM_ULOG_GET_SYNC_COUNT: case DM_ULOG_STATUS_INFO: case DM_ULOG_STATUS_TABLE: case DM_ULOG_CHECKPOINT_READY: case DM_ULOG_MEMBER_JOIN: /* No incoming data */ break; case DM_ULOG_IS_CLEAN: case DM_ULOG_IN_SYNC: case DM_ULOG_IS_REMOTE_RECOVERING: pu64 = (uint64_t *)rq->u_rq.data; *pu64 = xlate64(*pu64); break; case DM_ULOG_MARK_REGION: case DM_ULOG_CLEAR_REGION: end = rq->u_rq.data_size/sizeof(uint64_t); pu64 = (uint64_t *)rq->u_rq.data; for (i = 0; i < end; i++) pu64[i] = xlate64(pu64[i]); break; case DM_ULOG_SET_REGION_SYNC: pu64 = (uint64_t *)rq->u_rq.data; pi64 = ((int64_t *)rq->u_rq.data) + 1; *pu64 = xlate64(*pu64); *pi64 = xlate64(*pi64); break; default: LOG_ERROR("Unknown request type, %u", rq_type); exit(EXIT_FAILURE); } } } static int v5_endian_to_network(struct clog_request *rq) { int size; struct dm_ulog_request *u_rq = &rq->u_rq; size = sizeof(*rq) + u_rq->data_size; u_rq->error = xlate32(u_rq->error); u_rq->seq = xlate32(u_rq->seq); rq->originator = xlate32(rq->originator); v5_data_endian_switch(rq, 1); u_rq->request_type = xlate32(u_rq->request_type); u_rq->data_size = xlate32(u_rq->data_size); return size; } int clog_request_to_network(struct clog_request *rq) { int r; /* FIXME: Remove this safety check */ if (rq->u.version[0] != xlate64(rq->u.version[1])) { LOG_ERROR("Programmer error: version[0] must be LE"); exit(EXIT_FAILURE); } /* * Are we already running in the endian mode we send * over the wire? */ if (rq->u.version[0] == rq->u.version[1]) return 0; r = v5_endian_to_network(rq); if (r < 0) return r; return 0; } static int v5_endian_from_network(struct clog_request *rq) { int size; struct dm_ulog_request *u_rq = &rq->u_rq; u_rq->error = xlate32(u_rq->error); u_rq->seq = xlate32(u_rq->seq); u_rq->request_type = xlate32(u_rq->request_type); u_rq->data_size = xlate32(u_rq->data_size); rq->originator = xlate32(rq->originator); size = sizeof(*rq) + u_rq->data_size; v5_data_endian_switch(rq, 0); return size; } int clog_request_from_network(void *data, size_t data_len) { uint64_t *vp = data; uint64_t version = xlate64(vp[0]); struct clog_request *rq = data; switch (version) { case 5: /* Upstream */ if (version == vp[0]) return 0; break; case 4: /* RHEL 5.[45] */ case 3: /* RHEL 5.3 */ case 2: /* RHEL 5.2 */ /* FIXME: still need to account for payload */ if (data_len < (COMPAT_OFFSET + sizeof(*rq))) return -ENOSPC; rq = (struct clog_request *)((char *)data + COMPAT_OFFSET); break; default: LOG_ERROR("Unable to process cluster message: " "Incompatible version"); return -EINVAL; } v5_endian_from_network(rq); return 0; } LVM2.2.02.176/daemons/cmirrord/clogd.c0000644000000000000120000001367113176752421016037 0ustar rootwheel/* * Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License v.2. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "logging.h" #include "common.h" #include "functions.h" #include "link_mon.h" #include "local.h" #include #include #include #include #include #include #include static volatile sig_atomic_t exit_now = 0; /* FIXME Review signal handling. Should be volatile sig_atomic_t */ static sigset_t signal_mask; static volatile sig_atomic_t signal_received; static void process_signals(void); static void daemonize(void); static void init_all(void); static void cleanup_all(void); static void usage (FILE *dest) { fprintf (dest, "Usage: cmirrord [options]\n" " -f, --foreground stay in the foreground, log to the terminal\n" " -h, --help print this help\n"); } int main(int argc, char *argv[]) { int foreground_mode = 0; struct option longopts[] = { { "foreground", no_argument, NULL, 'f' }, { "help" , no_argument, NULL, 'h' }, { 0, 0, 0, 0 } }; int opt; while ((opt = getopt_long (argc, argv, "fh", longopts, NULL)) != -1) { switch (opt) { case 'f': foreground_mode = 1; break; case 'h': usage (stdout); exit (0); default: usage (stderr); exit (2); } } if (optind < argc) { usage (stderr); exit (2); } if (!foreground_mode) daemonize(); init_all(); /* Parent can now exit, we're ready to handle requests */ if (!foreground_mode) kill(getppid(), SIGTERM); LOG_PRINT("Starting cmirrord:"); LOG_PRINT(" Built: "__DATE__" "__TIME__"\n"); LOG_DBG(" Compiled with debugging."); while (!exit_now) { links_monitor(); links_issue_callbacks(); process_signals(); } exit(EXIT_SUCCESS); } /* * parent_exit_handler: exit the parent * @sig: the signal * */ static void parent_exit_handler(int sig __attribute__((unused))) { exit_now = 1; } static void sig_handler(int sig) { /* FIXME Races - don't touch signal_mask here. */ sigaddset(&signal_mask, sig); signal_received = 1; } static void process_signal(int sig){ int r = 0; switch(sig) { case SIGINT: case SIGQUIT: case SIGTERM: case SIGHUP: r += log_status(); break; case SIGUSR1: case SIGUSR2: log_debug(); /*local_debug();*/ cluster_debug(); return; default: LOG_PRINT("Unknown signal received... ignoring"); return; } if (!r) { LOG_DBG("No current cluster logs... safe to exit."); cleanup_all(); exit(EXIT_SUCCESS); } LOG_ERROR("Cluster logs exist. Refusing to exit."); } static void process_signals(void) { int x; if (!signal_received) return; signal_received = 0; for (x = 1; x < _NSIG; x++) { if (sigismember(&signal_mask, x)) { sigdelset(&signal_mask, x); process_signal(x); } } } static void remove_lockfile(void) { if (unlink(CMIRRORD_PIDFILE)) LOG_ERROR("Unable to remove \"" CMIRRORD_PIDFILE "\" %s", strerror(errno)); } /* * daemonize * * Performs the steps necessary to become a daemon. */ static void daemonize(void) { int pid; int status; int devnull; if ((devnull = open("/dev/null", O_RDWR)) == -1) { LOG_ERROR("Can't open /dev/null: %s", strerror(errno)); exit(EXIT_FAILURE); } signal(SIGTERM, &parent_exit_handler); pid = fork(); if (pid < 0) { LOG_ERROR("Unable to fork()"); exit(EXIT_FAILURE); } if (pid) { /* Parent waits here for child to get going */ while (!waitpid(pid, &status, WNOHANG) && !exit_now); if (exit_now) exit(EXIT_SUCCESS); switch (WEXITSTATUS(status)) { case EXIT_LOCKFILE: LOG_ERROR("Failed to create lockfile"); LOG_ERROR("Process already running?"); break; case EXIT_KERNEL_SOCKET: LOG_ERROR("Unable to create netlink socket"); break; case EXIT_KERNEL_BIND: LOG_ERROR("Unable to bind to netlink socket"); break; case EXIT_KERNEL_SETSOCKOPT: LOG_ERROR("Unable to setsockopt on netlink socket"); break; case EXIT_CLUSTER_CKPT_INIT: LOG_ERROR("Unable to initialize checkpoint service"); LOG_ERROR("Has the cluster infrastructure been started?"); break; case EXIT_FAILURE: LOG_ERROR("Failed to start: Generic error"); break; default: LOG_ERROR("Failed to start: Unknown error"); break; } exit(EXIT_FAILURE); } setsid(); if (chdir("/")) { LOG_ERROR("Failed to chdir /: %s", strerror(errno)); exit(EXIT_FAILURE); } umask(0); if (close(0) || close(1) || close(2)) { LOG_ERROR("Failed to close terminal FDs"); exit(EXIT_FAILURE); } if ((dup2(devnull, 0) < 0) || /* reopen stdin */ (dup2(devnull, 1) < 0) || /* reopen stdout */ (dup2(devnull, 2) < 0)) /* reopen stderr */ exit(EXIT_FAILURE); if ((devnull > STDERR_FILENO) && close(devnull)) { LOG_ERROR("Failed to close descriptor %d: %s", devnull, strerror(errno)); exit(EXIT_FAILURE); } LOG_OPEN("cmirrord", LOG_PID, LOG_DAEMON); } /* * init_all * * Initialize modules. Exit on failure. */ static void init_all(void) { int r; (void) dm_prepare_selinux_context(CMIRRORD_PIDFILE, S_IFREG); if (dm_create_lockfile(CMIRRORD_PIDFILE) == 0) exit(EXIT_LOCKFILE); (void) dm_prepare_selinux_context(NULL, 0); atexit(remove_lockfile); /* FIXME Replace with sigaction. (deprecated) */ signal(SIGINT, &sig_handler); signal(SIGQUIT, &sig_handler); signal(SIGTERM, &sig_handler); signal(SIGHUP, &sig_handler); signal(SIGPIPE, SIG_IGN); signal(SIGUSR1, &sig_handler); signal(SIGUSR2, &sig_handler); sigemptyset(&signal_mask); signal_received = 0; if ((r = init_local()) || (r = init_cluster())) { exit(r); } } /* * cleanup_all * * Clean up before exiting */ static void cleanup_all(void) { cleanup_local(); cleanup_cluster(); } LVM2.2.02.176/daemons/cmirrord/logging.h0000644000000000000120000000420113176752421016367 0ustar rootwheel/* * Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef _LVM_CLOG_LOGGING_H #define _LVM_CLOG_LOGGING_H #define _GNU_SOURCE #define _FILE_OFFSET_BITS 64 #include "configure.h" #include #include #include /* SHORT_UUID - print last 8 chars of a string */ #define SHORT_UUID(x) (strlen(x) > 8) ? ((x) + (strlen(x) - 8)) : (x) extern const char *__rq_types_off_by_one[]; #define RQ_TYPE(x) __rq_types_off_by_one[(x) - 1] extern int log_tabbing; extern int log_is_open; extern int log_membership_change; extern int log_checkpoint; extern int log_resend_requests; #define LOG_OPEN(ident, option, facility) do { \ openlog(ident, option, facility); \ log_is_open = 1; \ } while (0) #define LOG_CLOSE(void) do { \ log_is_open = 0; \ closelog(); \ } while (0) #define LOG_OUTPUT(level, f, arg...) do { \ int __i; \ char __buffer[16]; \ FILE *fp = (level > LOG_NOTICE) ? stderr : stdout; \ if (log_is_open) { \ for (__i = 0; (__i < log_tabbing) && (__i < 15); __i++) \ __buffer[__i] = '\t'; \ __buffer[__i] = '\0'; \ syslog(level, "%s" f "\n", __buffer, ## arg); \ } else { \ for (__i = 0; __i < log_tabbing; __i++) \ fprintf(fp, "\t"); \ fprintf(fp, f "\n", ## arg); \ } \ } while (0) #ifdef DEBUG #define LOG_DBG(f, arg...) LOG_OUTPUT(LOG_DEBUG, f, ## arg) #else /* DEBUG */ #define LOG_DBG(f, arg...) do {} while (0) #endif /* DEBUG */ #define LOG_COND(__X, f, arg...) do {\ if (__X) { \ LOG_OUTPUT(LOG_NOTICE, f, ## arg); \ } \ } while (0) #define LOG_PRINT(f, arg...) LOG_OUTPUT(LOG_NOTICE, f, ## arg) #define LOG_ERROR(f, arg...) LOG_OUTPUT(LOG_ERR, f, ## arg) #endif /* _LVM_CLOG_LOGGING_H */ LVM2.2.02.176/daemons/cmirrord/cluster.h0000644000000000000120000000425413176752421016432 0ustar rootwheel/* * Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef _LVM_CLOG_CLUSTER_H #define _LVM_CLOG_CLUSTER_H #include "dm-log-userspace.h" #include "libdevmapper.h" #define DM_ULOG_RESPONSE 0x1000U /* in last byte of 32-bit value */ #define DM_ULOG_CHECKPOINT_READY 21 #define DM_ULOG_MEMBER_JOIN 22 /* * There is other information in addition to what can * be found in the dm_ulog_request structure that we * need for processing. 'clog_request' is the wrapping * structure we use to make the additional fields * available. */ struct clog_request { /* * If we don't use a union, the structure size will * vary between 32-bit and 64-bit machines. So, we * pack two 64-bit version numbers in there to force * the size of the structure to be the same. * * The two version numbers also help us with endian * issues. The first is always little endian, while * the second is in native format of the sending * machine. If the two are equal, there is no need * to do endian conversions. */ union { uint64_t version[2]; /* LE version and native version */ struct dm_list list; } u; /* * 'originator' is the machine from which the requests * was made. */ uint32_t originator; /* * 'pit_server' is the "point-in-time" server for the * request. (I.e. The machine that was the server at * the time the request was issued - only important during * startup. */ uint32_t pit_server; /* * The request from the kernel that is being processed */ struct dm_ulog_request u_rq; }; int init_cluster(void); void cleanup_cluster(void); void cluster_debug(void); int create_cluster_cpg(char *uuid, uint64_t luid); int destroy_cluster_cpg(char *uuid); int cluster_send(struct clog_request *rq); #endif /* _LVM_CLOG_CLUSTER_H */ LVM2.2.02.176/daemons/cmirrord/functions.c0000644000000000000120000013322613176752421016756 0ustar rootwheel/* * Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "logging.h" #include "functions.h" #include #include #include #include #include #include #include #include #define BYTE_SHIFT 3 /* * Magic for persistent mirrors: "MiRr" * Following on-disk header information is stolen from * drivers/md/dm-log.c */ #define MIRROR_MAGIC 0x4D695272 #define MIRROR_DISK_VERSION 2 #define LOG_OFFSET 2 #define RESYNC_HISTORY 50 #define RESYNC_BUFLEN 128 //static char resync_history[RESYNC_HISTORY][128]; //static int idx = 0; #define LOG_SPRINT(_lc, f, arg...) do { \ lc->idx++; \ lc->idx = lc->idx % RESYNC_HISTORY; \ snprintf(lc->resync_history[lc->idx], RESYNC_BUFLEN, f, ## arg); \ } while (0) struct log_header { uint32_t magic; uint32_t version; uint64_t nr_regions; }; struct log_c { struct dm_list list; char uuid[DM_UUID_LEN]; uint64_t luid; time_t delay; /* limits how fast a resume can happen after suspend */ int touched; int in_sync; /* An in-sync that stays set until suspend/resume */ uint32_t region_size; uint32_t region_count; uint64_t sync_count; dm_bitset_t clean_bits; dm_bitset_t sync_bits; uint32_t recoverer; uint64_t recovering_region; /* -1 means not recovering */ uint64_t skip_bit_warning; /* used to warn if region skipped */ int sync_search; int resume_override; uint32_t block_on_error; enum sync { DEFAULTSYNC, /* Synchronize if necessary */ NOSYNC, /* Devices known to be already in sync */ FORCESYNC, /* Force a sync to happen */ } sync; uint32_t state; /* current operational state of the log */ struct dm_list mark_list; uint32_t recovery_halted; struct recovery_request *recovery_request_list; int disk_fd; /* -1 means no disk log */ int log_dev_failed; uint64_t disk_nr_regions; size_t disk_size; /* size of disk_buffer in bytes */ void *disk_buffer; /* aligned memory for O_DIRECT */ int idx; char resync_history[RESYNC_HISTORY][RESYNC_BUFLEN]; }; struct mark_entry { struct dm_list list; uint32_t nodeid; uint64_t region; }; struct recovery_request { uint64_t region; struct recovery_request *next; }; static DM_LIST_INIT(log_list); static DM_LIST_INIT(log_pending_list); static int log_test_bit(dm_bitset_t bs, int bit) { return dm_bit(bs, bit) ? 1 : 0; } static void log_set_bit(struct log_c *lc, dm_bitset_t bs, int bit) { dm_bit_set(bs, bit); lc->touched = 1; } static void log_clear_bit(struct log_c *lc, dm_bitset_t bs, int bit) { dm_bit_clear(bs, bit); lc->touched = 1; } static uint64_t find_next_zero_bit(dm_bitset_t bs, unsigned start) { for (; dm_bit(bs, start); start++) if (start >= *bs) return (uint64_t)-1; return start; } static uint64_t count_bits32(dm_bitset_t bs) { unsigned i, size = bs[0]/(unsigned)DM_BITS_PER_INT + 1; unsigned count = 0; for (i = 1; i <= size; i++) count += hweight32(bs[i]); return (uint64_t)count; } /* * get_log * * Returns: log if found, NULL otherwise */ static struct log_c *get_log(const char *uuid, uint64_t luid) { struct log_c *lc; dm_list_iterate_items(lc, &log_list) if (!strcmp(lc->uuid, uuid) && (!luid || (luid == lc->luid))) return lc; return NULL; } /* * get_pending_log * * Pending logs are logs that have been 'clog_ctr'ed, but * have not joined the CPG (via clog_resume). * * Returns: log if found, NULL otherwise */ static struct log_c *get_pending_log(const char *uuid, uint64_t luid) { struct log_c *lc; dm_list_iterate_items(lc, &log_pending_list) if (!strcmp(lc->uuid, uuid) && (!luid || (luid == lc->luid))) return lc; return NULL; } static void header_to_disk(struct log_header *mem, struct log_header *disk) { memcpy(disk, mem, sizeof(struct log_header)); } static void header_from_disk(struct log_header *mem, struct log_header *disk) { memcpy(mem, disk, sizeof(struct log_header)); } static int rw_log(struct log_c *lc, int do_write) { int r; r = (int)lseek(lc->disk_fd, 0, SEEK_SET); if (r < 0) { LOG_ERROR("[%s] rw_log: lseek failure: %s", SHORT_UUID(lc->uuid), strerror(errno)); return -errno; } if (do_write) { /* FIXME Cope with full set of non-error conditions */ r = write(lc->disk_fd, lc->disk_buffer, lc->disk_size); if (r < 0) { LOG_ERROR("[%s] rw_log: write failure: %s", SHORT_UUID(lc->uuid), strerror(errno)); return -EIO; /* Failed disk write */ } return 0; } /* Read */ /* FIXME Cope with full set of non-error conditions */ r = read(lc->disk_fd, lc->disk_buffer, lc->disk_size); if (r < 0) LOG_ERROR("[%s] rw_log: read failure: %s", SHORT_UUID(lc->uuid), strerror(errno)); if (r != lc->disk_size) return -EIO; /* Failed disk read */ return 0; } /* * read_log * @lc * * Valid return codes: * -EINVAL: Invalid header, bits not copied * -EIO: Unable to read disk log * 0: Valid header, disk bit -> lc->clean_bits * * Returns: 0 on success, -EXXX on failure */ static int read_log(struct log_c *lc) { struct log_header lh = { 0 }; size_t bitset_size; if (rw_log(lc, 0)) return -EIO; /* Failed disk read */ header_from_disk(&lh, lc->disk_buffer); if (lh.magic != MIRROR_MAGIC) return -EINVAL; lc->disk_nr_regions = lh.nr_regions; /* Read disk bits into sync_bits */ bitset_size = lc->region_count / 8; bitset_size += (lc->region_count % 8) ? 1 : 0; /* 'lc->clean_bits + 1' becasue dm_bitset_t leads with a uint32_t */ memcpy(lc->clean_bits + 1, (char *)lc->disk_buffer + 1024, bitset_size); return 0; } /* * write_log * @lc * * Returns: 0 on success, -EIO on failure */ static int write_log(struct log_c *lc) { struct log_header lh; size_t bitset_size; lh.magic = MIRROR_MAGIC; lh.version = MIRROR_DISK_VERSION; lh.nr_regions = lc->region_count; header_to_disk(&lh, lc->disk_buffer); /* Write disk bits from clean_bits */ bitset_size = lc->region_count / 8; bitset_size += (lc->region_count % 8) ? 1 : 0; /* 'lc->clean_bits + 1' becasue dm_bitset_t leads with a uint32_t */ memcpy((char *)lc->disk_buffer + 1024, lc->clean_bits + 1, bitset_size); if (rw_log(lc, 1)) { lc->log_dev_failed = 1; return -EIO; /* Failed disk write */ } return 0; } /* FIXME Rewrite this function taking advantage of the udev changes (where in use) to improve its efficiency! */ static int find_disk_path(char *major_minor_str, char *path_rtn, int *unlink_path __attribute__((unused))) { int r; DIR *dp; struct dirent *dep; struct stat statbuf; int major, minor; if (!strstr(major_minor_str, ":")) { r = stat(major_minor_str, &statbuf); if (r) return -errno; if (!S_ISBLK(statbuf.st_mode)) return -EINVAL; sprintf(path_rtn, "%s", major_minor_str); return 0; } r = sscanf(major_minor_str, "%d:%d", &major, &minor); if (r != 2) return -EINVAL; /* FIXME dm_dir() */ LOG_DBG("Checking /dev/mapper for device %d:%d", major, minor); /* Check /dev/mapper dir */ dp = opendir("/dev/mapper"); if (!dp) return -ENOENT; while ((dep = readdir(dp)) != NULL) { /* * FIXME: This is racy. By the time the path is used, * it may point to something else. 'fstat' will be * required upon opening to ensure we got what we * wanted. */ sprintf(path_rtn, "/dev/mapper/%s", dep->d_name); if (stat(path_rtn, &statbuf) < 0) { LOG_DBG("Unable to stat %s", path_rtn); continue; } if (S_ISBLK(statbuf.st_mode) && (major(statbuf.st_rdev) == major) && (minor(statbuf.st_rdev) == minor)) { LOG_DBG(" %s: YES", dep->d_name); if (closedir(dp)) LOG_DBG("Unable to closedir /dev/mapper %s", strerror(errno)); return 0; } else { LOG_DBG(" %s: NO", dep->d_name); } } if (closedir(dp)) LOG_DBG("Unable to closedir /dev/mapper %s", strerror(errno)); /* FIXME Find out why this was here and deal with underlying problem. */ LOG_DBG("Path not found for %d/%d", major, minor); return -ENOENT; // LOG_DBG("Creating /dev/mapper/%d-%d", major, minor); // sprintf(path_rtn, "/dev/mapper/%d-%d", major, minor); // r = mknod(path_rtn, S_IFBLK | S_IRUSR | S_IWUSR, MKDEV(major, minor)); /* * If we have to make the path, we unlink it after we open it */ // *unlink_path = 1; // return r ? -errno : 0; } static int _clog_ctr(char *uuid, uint64_t luid, int argc, char **argv, uint64_t device_size) { int i; int r = 0; char *p; uint64_t region_size; uint64_t region_count; struct log_c *lc = NULL; enum sync log_sync = DEFAULTSYNC; uint32_t block_on_error = 0; int disk_log; char disk_path[PATH_MAX]; int unlink_path = 0; long page_size; int pages; /* If core log request, then argv[0] will be region_size */ if (!strtoll(argv[0], &p, 0) || *p) { disk_log = 1; if ((argc < 2) || (argc > 4)) { LOG_ERROR("Too %s arguments to clustered-disk log type", (argc < 3) ? "few" : "many"); r = -EINVAL; goto fail; } r = find_disk_path(argv[0], disk_path, &unlink_path); if (r) { LOG_ERROR("Unable to find path to device %s", argv[0]); goto fail; } LOG_DBG("Clustered log disk is %s", disk_path); } else { disk_log = 0; if ((argc < 1) || (argc > 3)) { LOG_ERROR("Too %s arguments to clustered-core log type", (argc < 2) ? "few" : "many"); r = -EINVAL; goto fail; } } if (!(region_size = strtoll(argv[disk_log], &p, 0)) || *p) { LOG_ERROR("Invalid region_size argument to clustered-%s log type", (disk_log) ? "disk" : "core"); r = -EINVAL; goto fail; } region_count = device_size / region_size; if (device_size % region_size) { /* * I can't remember if device_size must be a multiple * of region_size, so check it anyway. */ region_count++; } for (i = 0; i < argc; i++) { if (!strcmp(argv[i], "sync")) log_sync = FORCESYNC; else if (!strcmp(argv[i], "nosync")) log_sync = NOSYNC; else if (!strcmp(argv[i], "block_on_error")) block_on_error = 1; } lc = dm_zalloc(sizeof(*lc)); if (!lc) { LOG_ERROR("Unable to allocate cluster log context"); r = -ENOMEM; goto fail; } lc->region_size = region_size; lc->region_count = region_count; lc->sync = log_sync; lc->block_on_error = block_on_error; lc->sync_search = 0; lc->recovering_region = (uint64_t)-1; lc->skip_bit_warning = region_count; lc->disk_fd = -1; lc->log_dev_failed = 0; strncpy(lc->uuid, uuid, DM_UUID_LEN); lc->luid = luid; if (get_log(lc->uuid, lc->luid) || get_pending_log(lc->uuid, lc->luid)) { LOG_ERROR("[%s/%" PRIu64 "u] Log already exists, unable to create.", SHORT_UUID(lc->uuid), lc->luid); dm_free(lc); return -EINVAL; } dm_list_init(&lc->mark_list); lc->clean_bits = dm_bitset_create(NULL, region_count); if (!lc->clean_bits) { LOG_ERROR("Unable to allocate clean bitset"); r = -ENOMEM; goto fail; } lc->sync_bits = dm_bitset_create(NULL, region_count); if (!lc->sync_bits) { LOG_ERROR("Unable to allocate sync bitset"); r = -ENOMEM; goto fail; } if (log_sync == NOSYNC) dm_bit_set_all(lc->sync_bits); lc->sync_count = (log_sync == NOSYNC) ? region_count : 0; if (disk_log) { if ((page_size = sysconf(_SC_PAGESIZE)) < 0) { LOG_ERROR("Unable to read pagesize: %s", strerror(errno)); r = errno; goto fail; } pages = *(lc->clean_bits) / page_size; pages += *(lc->clean_bits) % page_size ? 1 : 0; pages += 1; /* for header */ r = open(disk_path, O_RDWR | O_DIRECT); if (r < 0) { LOG_ERROR("Unable to open log device, %s: %s", disk_path, strerror(errno)); r = errno; goto fail; } if (unlink_path) if (unlink(disk_path) < 0) { LOG_DBG("Warning: Unable to unlink log device, %s: %s", disk_path, strerror(errno)); } lc->disk_fd = r; lc->disk_size = pages * page_size; r = posix_memalign(&(lc->disk_buffer), page_size, lc->disk_size); if (r) { LOG_ERROR("Unable to allocate memory for disk_buffer"); goto fail; } memset(lc->disk_buffer, 0, lc->disk_size); LOG_DBG("Disk log ready"); } dm_list_add(&log_pending_list, &lc->list); return 0; fail: if (lc) { if (lc->disk_fd >= 0 && close(lc->disk_fd)) LOG_ERROR("Close device error, %s: %s", disk_path, strerror(errno)); free(lc->disk_buffer); dm_free(lc->sync_bits); dm_free(lc->clean_bits); dm_free(lc); } return r; } /* * clog_ctr * @rq * * rq->data should contain constructor string as follows: * [disk] [[no]sync] * The kernel is responsible for adding the argument * to the end; otherwise, we cannot compute the region_count. * * FIXME: Currently relies on caller to fill in rq->error */ static int clog_dtr(struct dm_ulog_request *rq); static int clog_ctr(struct dm_ulog_request *rq) { int argc, i, r = 0; char *p, **argv = NULL; char *dev_size_str; uint64_t device_size; /* Sanity checks */ if (!rq->data_size) { LOG_ERROR("Received constructor request with no data"); return -EINVAL; } if (strlen(rq->data) > rq->data_size) { LOG_ERROR("Received constructor request with bad data"); LOG_ERROR("strlen(rq->data)[%d] != rq->data_size[%llu]", (int)strlen(rq->data), (unsigned long long)rq->data_size); LOG_ERROR("rq->data = '%s' [%d]", rq->data, (int)strlen(rq->data)); return -EINVAL; } /* Split up args */ for (argc = 0, p = rq->data; (p = strstr(p, " ")); p++, argc++) *p = '\0'; if (!argc) { LOG_ERROR("Received constructor request with bad data %s", rq->data); return -EINVAL; } argv = malloc(argc * sizeof(char *)); if (!argv) return -ENOMEM; p = dev_size_str = rq->data; p += strlen(p) + 1; for (i = 0; i < argc; i++, p = p + strlen(p) + 1) argv[i] = p; if (strcmp(argv[0], "clustered-disk") && strcmp(argv[0], "clustered-core")) { LOG_ERROR("Unsupported userspace log type, \"%s\"", argv[0]); free(argv); return -EINVAL; } if (!(device_size = strtoll(dev_size_str, &p, 0)) || *p) { LOG_ERROR("Invalid device size argument: %s", dev_size_str); free(argv); return -EINVAL; } r = _clog_ctr(rq->uuid, rq->luid, argc - 1, argv + 1, device_size); /* We join the CPG when we resume */ /* No returning data */ if ((rq->version > 1) && !strcmp(argv[0], "clustered-disk")) rq->data_size = sprintf(rq->data, "%s", argv[1]) + 1; else rq->data_size = 0; if (r) { LOG_ERROR("Failed to create cluster log (%s)", rq->uuid); for (i = 0; i < argc; i++) LOG_ERROR("argv[%d] = %s", i, argv[i]); } else LOG_DBG("[%s] Cluster log created", SHORT_UUID(rq->uuid)); free(argv); return r; } /* * clog_dtr * @rq * */ static int clog_dtr(struct dm_ulog_request *rq) { struct log_c *lc = get_log(rq->uuid, rq->luid); if (lc) { /* * The log should not be on the official list. There * should have been a suspend first. */ LOG_ERROR("[%s] DTR before SUS: leaving CPG", SHORT_UUID(rq->uuid)); destroy_cluster_cpg(rq->uuid); } else if (!(lc = get_pending_log(rq->uuid, rq->luid))) { LOG_ERROR("clog_dtr called on log that is not official or pending"); return -EINVAL; } LOG_DBG("[%s] Cluster log removed", SHORT_UUID(lc->uuid)); dm_list_del(&lc->list); if (lc->disk_fd != -1 && close(lc->disk_fd)) LOG_ERROR("Failed to close disk log: %s", strerror(errno)); if (lc->disk_buffer) free(lc->disk_buffer); dm_free(lc->clean_bits); dm_free(lc->sync_bits); dm_free(lc); return 0; } /* * clog_presuspend * @rq * */ static int clog_presuspend(struct dm_ulog_request *rq) { struct log_c *lc = get_log(rq->uuid, rq->luid); if (!lc) return -EINVAL; if (lc->touched) LOG_DBG("WARNING: log still marked as 'touched' during suspend"); lc->recovery_halted = 1; return 0; } /* * clog_postsuspend * @rq * */ static int clog_postsuspend(struct dm_ulog_request *rq) { struct log_c *lc = get_log(rq->uuid, rq->luid); if (!lc) return -EINVAL; LOG_DBG("[%s] clog_postsuspend: leaving CPG", SHORT_UUID(lc->uuid)); destroy_cluster_cpg(rq->uuid); lc->state = LOG_SUSPENDED; lc->recovering_region = (uint64_t)-1; lc->recoverer = (uint32_t)-1; lc->delay = time(NULL); return 0; } /* * cluster_postsuspend * @rq * */ int cluster_postsuspend(char *uuid, uint64_t luid) { struct log_c *lc = get_log(uuid, luid); if (!lc) return -EINVAL; LOG_DBG("[%s] clog_postsuspend: finalizing", SHORT_UUID(lc->uuid)); lc->resume_override = 0; /* move log to pending list */ dm_list_del(&lc->list); dm_list_add(&log_pending_list, &lc->list); return 0; } /* * clog_resume * @rq * * Does the main work of resuming. */ static int clog_resume(struct dm_ulog_request *rq) { uint32_t i; int commit_log = 0; struct log_c *lc = get_log(rq->uuid, rq->luid); if (!lc) return -EINVAL; lc->in_sync = 0; switch (lc->resume_override) { case 1000: LOG_ERROR("[%s] Additional resume issued before suspend", SHORT_UUID(rq->uuid)); #ifdef DEBUG kill(getpid(), SIGUSR1); #endif return 0; case 0: lc->resume_override = 1000; if (lc->disk_fd == -1) { LOG_DBG("[%s] Master resume.", SHORT_UUID(lc->uuid)); goto no_disk; } LOG_DBG("[%s] Master resume: reading disk log", SHORT_UUID(lc->uuid)); commit_log = 1; break; case 1: LOG_ERROR("Error:: partial bit loading (just sync_bits)"); return -EINVAL; case 2: LOG_ERROR("Error:: partial bit loading (just clean_bits)"); return -EINVAL; case 3: LOG_DBG("[%s] Non-master resume: bits pre-loaded", SHORT_UUID(lc->uuid)); lc->resume_override = 1000; goto out; default: LOG_ERROR("Error:: multiple loading of bits (%d)", lc->resume_override); return -EINVAL; } if (lc->log_dev_failed) { LOG_ERROR("Log device has failed, unable to read bits"); rq->error = 0; /* We can handle this so far */ lc->disk_nr_regions = 0; } else rq->error = read_log(lc); switch (rq->error) { case 0: if (lc->disk_nr_regions < lc->region_count) LOG_DBG("[%s] Mirror has grown, updating log bits", SHORT_UUID(lc->uuid)); else if (lc->disk_nr_regions > lc->region_count) LOG_DBG("[%s] Mirror has shrunk, updating log bits", SHORT_UUID(lc->uuid)); break; case -EINVAL: LOG_DBG("[%s] (Re)initializing mirror log - resync issued.", SHORT_UUID(lc->uuid)); lc->disk_nr_regions = 0; break; default: LOG_ERROR("Failed to read disk log"); lc->disk_nr_regions = 0; break; } no_disk: /* If mirror has grown, set bits appropriately */ if (lc->sync == NOSYNC) for (i = lc->disk_nr_regions; i < lc->region_count; i++) log_set_bit(lc, lc->clean_bits, i); else for (i = lc->disk_nr_regions; i < lc->region_count; i++) log_clear_bit(lc, lc->clean_bits, i); /* Clear any old bits if device has shrunk */ for (i = lc->region_count; i % 32; i++) log_clear_bit(lc, lc->clean_bits, i); /* copy clean across to sync */ dm_bit_copy(lc->sync_bits, lc->clean_bits); if (commit_log && (lc->disk_fd >= 0)) { rq->error = write_log(lc); if (rq->error) LOG_ERROR("Failed initial disk log write"); else LOG_DBG("Disk log initialized"); lc->touched = 0; } out: /* * Clear any old bits if device has shrunk - necessary * for non-master resume */ for (i = lc->region_count; i % 32; i++) { log_clear_bit(lc, lc->clean_bits, i); log_clear_bit(lc, lc->sync_bits, i); } lc->sync_count = count_bits32(lc->sync_bits); LOG_SPRINT(lc, "[%s] Initial sync_count = %llu", SHORT_UUID(lc->uuid), (unsigned long long)lc->sync_count); lc->sync_search = 0; lc->state = LOG_RESUMED; lc->recovery_halted = 0; return rq->error; } /* * local_resume * @rq * * If the log is pending, we must first join the cpg and * put the log in the official list. * */ int local_resume(struct dm_ulog_request *rq) { int r; time_t t; struct log_c *lc = get_log(rq->uuid, rq->luid); if (!lc) { /* Is the log in the pending list? */ lc = get_pending_log(rq->uuid, rq->luid); if (!lc) { LOG_ERROR("clog_resume called on log that is not official or pending"); return -EINVAL; } t = time(NULL); t -= lc->delay; /* * This should be considered a temporary fix. It addresses * a problem that exists when nodes suspend/resume in rapid * succession. While the problem is very rare, it has been * seen to happen in real-world-like testing. * * The problem: * - Node A joins cluster * - Node B joins cluster * - Node A prepares checkpoint * - Node A gets ready to write checkpoint * - Node B leaves * - Node B joins * - Node A finishes write of checkpoint * - Node B receives checkpoint meant for previous session * -- Node B can now be non-coherent * * This timer will solve the problem for now, but could be * replaced by a generation number sent with the resume * command from the kernel. The generation number would * be included in the name of the checkpoint to prevent * reading stale data. */ if ((t < 3) && (t >= 0)) sleep(3 - t); /* Join the CPG */ r = create_cluster_cpg(rq->uuid, rq->luid); if (r) { LOG_ERROR("clog_resume: Failed to create cluster CPG"); return r; } /* move log to official list */ dm_list_del(&lc->list); dm_list_add(&log_list, &lc->list); } return 0; } /* * clog_get_region_size * @rq * * Since this value doesn't change, the kernel * should not need to talk to server to get this * The function is here for completness * * Returns: 0 on success, -EXXX on failure */ static int clog_get_region_size(struct dm_ulog_request *rq) { uint64_t *rtn = (uint64_t *)rq->data; struct log_c *lc = get_log(rq->uuid, rq->luid); if (!lc && !(lc = get_pending_log(rq->uuid, rq->luid))) return -EINVAL; *rtn = lc->region_size; rq->data_size = sizeof(*rtn); return 0; } /* * clog_is_clean * @rq * * Returns: 1 if clean, 0 otherwise */ static int clog_is_clean(struct dm_ulog_request *rq) { int64_t *rtn = (int64_t *)rq->data; uint64_t *region = (uint64_t *)rq->data; struct log_c *lc = get_log(rq->uuid, rq->luid); if (!lc) return -EINVAL; *rtn = log_test_bit(lc->clean_bits, *region); rq->data_size = sizeof(*rtn); return 0; } /* * clog_in_sync * @rq * * We ignore any request for non-block. That * should be handled elsewhere. (If the request * has come this far, it has already blocked.) * * Returns: 1 if in-sync, 0 otherwise */ static int clog_in_sync(struct dm_ulog_request *rq) { int64_t *rtn = (int64_t *)rq->data; uint64_t *region_p = (uint64_t *)rq->data; uint64_t region = *region_p; struct log_c *lc = get_log(rq->uuid, rq->luid); if (!lc) return -EINVAL; if (region > lc->region_count) return -EINVAL; *rtn = log_test_bit(lc->sync_bits, region); /* * If the mirror was successfully recovered, we want to always * force every machine to write to all devices - otherwise, * corruption will occur. Here's how: * Node1 suffers a failure and marks a region out-of-sync * Node2 attempts a write, gets by is_remote_recovering, * and queries the sync status of the region - finding * it out-of-sync. * Node2 thinks the write should be a nosync write, but it * hasn't suffered the drive failure that Node1 has yet. * It then issues a generic_make_request directly to * the primary image only - which is exactly the device * that has suffered the failure. * Node2 suffers a lost write - which completely bypasses the * mirror layer because it had gone through generic_m_r. * The file system will likely explode at this point due to * I/O errors. If it wasn't the primary that failed, it is * easily possible in this case to issue writes to just one * of the remaining images - also leaving the mirror inconsistent. * * We let in_sync() return 1 in a cluster regardless of what is * in the bitmap once recovery has successfully completed on a * mirror. This ensures the mirroring code will continue to * attempt to write to all mirror images. The worst that can * happen for reads is that additional read attempts may be * taken. * * Futher investigation may be required to determine if there are * similar possible outcomes when the mirror is in the process of * recovering. In that case, lc->in_sync would not have been set * yet. */ if (!*rtn && lc->in_sync) *rtn = 1; if (*rtn) LOG_DBG("[%s] Region is in-sync: %llu", SHORT_UUID(lc->uuid), (unsigned long long)region); else LOG_DBG("[%s] Region is not in-sync: %llu", SHORT_UUID(lc->uuid), (unsigned long long)region); rq->data_size = sizeof(*rtn); return 0; } /* * clog_flush * @rq * */ static int clog_flush(struct dm_ulog_request *rq, int server) { int r = 0; struct log_c *lc = get_log(rq->uuid, rq->luid); if (!lc) return -EINVAL; if (!lc->touched) return 0; /* * Do the actual flushing of the log only * if we are the server. */ if (server && (lc->disk_fd >= 0)) { r = rq->error = write_log(lc); if (r) LOG_ERROR("[%s] Error writing to disk log", SHORT_UUID(lc->uuid)); else LOG_DBG("[%s] Disk log written", SHORT_UUID(lc->uuid)); } lc->touched = 0; return r; } /* * mark_region * @lc * @region * @who * * Put a mark region request in the tree for tracking. * * Returns: 0 on success, -EXXX on error */ static int mark_region(struct log_c *lc, uint64_t region, uint32_t who) { int found = 0; struct mark_entry *m; dm_list_iterate_items(m, &lc->mark_list) if (m->region == region) { found = 1; if (m->nodeid == who) return 0; } if (!found) log_clear_bit(lc, lc->clean_bits, region); /* * Save allocation until here - if there is a failure, * at least we have cleared the bit. */ m = malloc(sizeof(*m)); if (!m) { LOG_ERROR("Unable to allocate space for mark_entry: %llu/%u", (unsigned long long)region, who); return -ENOMEM; } m->nodeid = who; m->region = region; dm_list_add(&lc->mark_list, &m->list); return 0; } /* * clog_mark_region * @rq * * rq may contain more than one mark request. We * can determine the number from the 'data_size' field. * * Returns: 0 on success, -EXXX on failure */ static int clog_mark_region(struct dm_ulog_request *rq, uint32_t originator) { int r; int count; uint64_t *region; struct log_c *lc = get_log(rq->uuid, rq->luid); if (!lc) return -EINVAL; if (rq->data_size % sizeof(uint64_t)) { LOG_ERROR("Bad data size given for mark_region request"); return -EINVAL; } count = rq->data_size / sizeof(uint64_t); region = (uint64_t *)&rq->data; for (; count > 0; count--, region++) { r = mark_region(lc, *region, originator); if (r) return r; } rq->data_size = 0; return 0; } static int clear_region(struct log_c *lc, uint64_t region, uint32_t who) { int other_matches = 0; struct mark_entry *m, *n; dm_list_iterate_items_safe(m, n, &lc->mark_list) if (m->region == region) { if (m->nodeid == who) { dm_list_del(&m->list); free(m); } else other_matches = 1; } /* * Clear region if: * 1) It is in-sync * 2) There are no other machines that have it marked */ if (!other_matches && log_test_bit(lc->sync_bits, region)) log_set_bit(lc, lc->clean_bits, region); return 0; } /* * clog_clear_region * @rq * * rq may contain more than one clear request. We * can determine the number from the 'data_size' field. * * Returns: 0 on success, -EXXX on failure */ static int clog_clear_region(struct dm_ulog_request *rq, uint32_t originator) { int r; int count; uint64_t *region; struct log_c *lc = get_log(rq->uuid, rq->luid); if (!lc) return -EINVAL; if (rq->data_size % sizeof(uint64_t)) { LOG_ERROR("Bad data size given for clear_region request"); return -EINVAL; } count = rq->data_size / sizeof(uint64_t); region = (uint64_t *)&rq->data; for (; count > 0; count--, region++) { r = clear_region(lc, *region, originator); if (r) return r; } rq->data_size = 0; return 0; } /* * clog_get_resync_work * @rq * */ static int clog_get_resync_work(struct dm_ulog_request *rq, uint32_t originator) { struct { int64_t i; uint64_t r; } *pkg = (void *)rq->data; struct log_c *lc = get_log(rq->uuid, rq->luid); if (!lc) return -EINVAL; rq->data_size = sizeof(*pkg); pkg->i = 0; if (lc->sync_search >= lc->region_count) { /* * FIXME: handle intermittent errors during recovery * by resetting sync_search... but not to many times. */ LOG_SPRINT(lc, "GET - SEQ#=%u, UUID=%s, nodeid = %u:: " "Recovery finished", rq->seq, SHORT_UUID(lc->uuid), originator); return 0; } if (lc->recovering_region != (uint64_t)-1) { if (lc->recoverer == originator) { LOG_SPRINT(lc, "GET - SEQ#=%u, UUID=%s, nodeid = %u:: " "Re-requesting work (%llu)", rq->seq, SHORT_UUID(lc->uuid), originator, (unsigned long long)lc->recovering_region); pkg->r = lc->recovering_region; pkg->i = 1; LOG_COND(log_resend_requests, "***** RE-REQUEST *****"); } else { LOG_SPRINT(lc, "GET - SEQ#=%u, UUID=%s, nodeid = %u:: " "Someone already recovering (%llu)", rq->seq, SHORT_UUID(lc->uuid), originator, (unsigned long long)lc->recovering_region); } return 0; } while (lc->recovery_request_list) { struct recovery_request *del; del = lc->recovery_request_list; lc->recovery_request_list = del->next; pkg->r = del->region; free(del); if (!log_test_bit(lc->sync_bits, pkg->r)) { LOG_SPRINT(lc, "GET - SEQ#=%u, UUID=%s, nodeid = %u:: " "Assigning priority resync work (%llu)", rq->seq, SHORT_UUID(lc->uuid), originator, (unsigned long long)pkg->r); pkg->i = 1; lc->recovering_region = pkg->r; lc->recoverer = originator; return 0; } } pkg->r = find_next_zero_bit(lc->sync_bits, lc->sync_search); if (pkg->r >= lc->region_count) { LOG_SPRINT(lc, "GET - SEQ#=%u, UUID=%s, nodeid = %u:: " "Resync work complete.", rq->seq, SHORT_UUID(lc->uuid), originator); lc->sync_search = lc->region_count + 1; return 0; } lc->sync_search = pkg->r + 1; LOG_SPRINT(lc, "GET - SEQ#=%u, UUID=%s, nodeid = %u:: " "Assigning resync work (%llu)", rq->seq, SHORT_UUID(lc->uuid), originator, (unsigned long long)pkg->r); pkg->i = 1; lc->recovering_region = pkg->r; lc->recoverer = originator; return 0; } /* * clog_set_region_sync * @rq */ static int clog_set_region_sync(struct dm_ulog_request *rq, uint32_t originator) { struct { uint64_t region; int64_t in_sync; } *pkg = (void *)rq->data; struct log_c *lc = get_log(rq->uuid, rq->luid); if (!lc) return -EINVAL; lc->recovering_region = (uint64_t)-1; if (pkg->in_sync) { if (log_test_bit(lc->sync_bits, pkg->region)) { LOG_SPRINT(lc, "SET - SEQ#=%u, UUID=%s, nodeid = %u:: " "Region already set (%llu)", rq->seq, SHORT_UUID(lc->uuid), originator, (unsigned long long)pkg->region); } else { log_set_bit(lc, lc->sync_bits, pkg->region); lc->sync_count++; /* The rest of this section is all for debugging */ LOG_SPRINT(lc, "SET - SEQ#=%u, UUID=%s, nodeid = %u:: " "Setting region (%llu)", rq->seq, SHORT_UUID(lc->uuid), originator, (unsigned long long)pkg->region); if (pkg->region == lc->skip_bit_warning) lc->skip_bit_warning = lc->region_count; if (pkg->region > (lc->skip_bit_warning + 5)) { LOG_SPRINT(lc, "*** Region #%llu skipped during recovery ***", (unsigned long long)lc->skip_bit_warning); lc->skip_bit_warning = lc->region_count; #ifdef DEBUG kill(getpid(), SIGUSR1); #endif } if (!log_test_bit(lc->sync_bits, (pkg->region) ? pkg->region - 1 : 0)) { LOG_SPRINT(lc, "*** Previous bit not set ***"); lc->skip_bit_warning = (pkg->region) ? pkg->region - 1 : 0; } } } else if (log_test_bit(lc->sync_bits, pkg->region)) { lc->sync_count--; log_clear_bit(lc, lc->sync_bits, pkg->region); LOG_SPRINT(lc, "SET - SEQ#=%u, UUID=%s, nodeid = %u:: " "Unsetting region (%llu)", rq->seq, SHORT_UUID(lc->uuid), originator, (unsigned long long)pkg->region); } if (lc->sync_count != count_bits32(lc->sync_bits)) { unsigned long long reset = count_bits32(lc->sync_bits); LOG_SPRINT(lc, "SET - SEQ#=%u, UUID=%s, nodeid = %u:: " "sync_count(%llu) != bitmap count(%llu)", rq->seq, SHORT_UUID(lc->uuid), originator, (unsigned long long)lc->sync_count, reset); #ifdef DEBUG kill(getpid(), SIGUSR1); #endif lc->sync_count = reset; } if (lc->sync_count > lc->region_count) LOG_SPRINT(lc, "SET - SEQ#=%u, UUID=%s, nodeid = %u:: " "(lc->sync_count > lc->region_count) - this is bad", rq->seq, SHORT_UUID(lc->uuid), originator); if (lc->sync_count == lc->region_count) lc->in_sync = 1; rq->data_size = 0; return 0; } /* * clog_get_sync_count * @rq */ static int clog_get_sync_count(struct dm_ulog_request *rq, uint32_t originator) { uint64_t *sync_count = (uint64_t *)rq->data; struct log_c *lc = get_log(rq->uuid, rq->luid); /* * FIXME: Mirror requires us to be able to ask for * the sync count while pending... but I don't like * it because other machines may not be suspended and * the stored value may not be accurate. */ if (!lc) lc = get_pending_log(rq->uuid, rq->luid); if (!lc) return -EINVAL; *sync_count = lc->sync_count; rq->data_size = sizeof(*sync_count); if (lc->sync_count != count_bits32(lc->sync_bits)) { unsigned long long reset = count_bits32(lc->sync_bits); LOG_SPRINT(lc, "get_sync_count - SEQ#=%u, UUID=%s, nodeid = %u:: " "sync_count(%llu) != bitmap count(%llu)", rq->seq, SHORT_UUID(lc->uuid), originator, (unsigned long long)lc->sync_count, reset); #ifdef DEBUG kill(getpid(), SIGUSR1); #endif lc->sync_count = reset; } return 0; } static int core_status_info(struct log_c *lc __attribute__((unused)), struct dm_ulog_request *rq) { int r; char *data = (char *)rq->data; r = sprintf(data, "1 clustered-core"); if (r < 0) return r; rq->data_size = r; return 0; } static int disk_status_info(struct log_c *lc, struct dm_ulog_request *rq) { int r; char *data = (char *)rq->data; struct stat statbuf; if (fstat(lc->disk_fd, &statbuf)) { rq->error = -errno; return -errno; } r = sprintf(data, "3 clustered-disk %d:%d %c", major(statbuf.st_rdev), minor(statbuf.st_rdev), (lc->log_dev_failed) ? 'D' : 'A'); if (r < 0) return r; rq->data_size = r; return 0; } /* * clog_status_info * @rq * */ static int clog_status_info(struct dm_ulog_request *rq) { int r; struct log_c *lc = get_log(rq->uuid, rq->luid); if (!lc) lc = get_pending_log(rq->uuid, rq->luid); if (!lc) return -EINVAL; if (lc->disk_fd == -1) r = core_status_info(lc, rq); else r = disk_status_info(lc, rq); return r; } static int core_status_table(struct log_c *lc, struct dm_ulog_request *rq) { int r; char *data = (char *)rq->data; r = sprintf(data, "clustered-core %u %s%s ", lc->region_size, (lc->sync == DEFAULTSYNC) ? "" : (lc->sync == NOSYNC) ? "nosync " : "sync ", (lc->block_on_error) ? "block_on_error" : ""); if (r < 0) return r; rq->data_size = r; return 0; } static int disk_status_table(struct log_c *lc, struct dm_ulog_request *rq) { int r; char *data = (char *)rq->data; struct stat statbuf; if (fstat(lc->disk_fd, &statbuf)) { rq->error = -errno; return -errno; } r = sprintf(data, "clustered-disk %d:%d %u %s%s ", major(statbuf.st_rdev), minor(statbuf.st_rdev), lc->region_size, (lc->sync == DEFAULTSYNC) ? "" : (lc->sync == NOSYNC) ? "nosync " : "sync ", (lc->block_on_error) ? "block_on_error" : ""); if (r < 0) return r; rq->data_size = r; return 0; } /* * clog_status_table * @rq * */ static int clog_status_table(struct dm_ulog_request *rq) { int r; struct log_c *lc = get_log(rq->uuid, rq->luid); if (!lc) lc = get_pending_log(rq->uuid, rq->luid); if (!lc) return -EINVAL; if (lc->disk_fd == -1) r = core_status_table(lc, rq); else r = disk_status_table(lc, rq); return r; } /* * clog_is_remote_recovering * @rq * */ static int clog_is_remote_recovering(struct dm_ulog_request *rq) { uint64_t *region_p = (uint64_t *)rq->data; uint64_t region = *region_p; struct { int64_t is_recovering; uint64_t in_sync_hint; } *pkg = (void *)rq->data; struct log_c *lc = get_log(rq->uuid, rq->luid); if (!lc) return -EINVAL; if (region > lc->region_count) return -EINVAL; if (lc->recovery_halted) { LOG_DBG("[%s] Recovery halted... [not remote recovering]: %llu", SHORT_UUID(lc->uuid), (unsigned long long)region); pkg->is_recovering = 0; pkg->in_sync_hint = lc->region_count; /* none are recovering */ } else { pkg->is_recovering = !log_test_bit(lc->sync_bits, region); /* * Remember, 'lc->sync_search' is 1 plus the region * currently being recovered. So, we must take off 1 * to account for that; but only if 'sync_search > 1'. */ pkg->in_sync_hint = lc->sync_search ? (lc->sync_search - 1) : 0; LOG_DBG("[%s] Region is %s: %llu", SHORT_UUID(lc->uuid), (region == lc->recovering_region) ? "currently remote recovering" : (pkg->is_recovering) ? "pending remote recovery" : "not remote recovering", (unsigned long long)region); } if (pkg->is_recovering && (region != lc->recovering_region)) { struct recovery_request *rr; /* Already in the list? */ for (rr = lc->recovery_request_list; rr; rr = rr->next) if (rr->region == region) goto out; /* Failure to allocated simply means we can't prioritize it */ rr = malloc(sizeof(*rr)); if (!rr) goto out; LOG_DBG("[%s] Adding region to priority list: %llu", SHORT_UUID(lc->uuid), (unsigned long long)region); rr->region = region; rr->next = lc->recovery_request_list; lc->recovery_request_list = rr; } out: rq->data_size = sizeof(*pkg); return 0; } /* * do_request * @rq: the request * @server: is this request performed by the server * * An inability to perform this function will return an error * from this function. However, an inability to successfully * perform the request will fill in the 'rq->error' field. * * 'rq' (or more correctly, rq->u_rq.data) should be of sufficient * size to hold any returning data. Currently, local.c uses 2kiB * to hold 'rq' - leaving ~1.5kiB for return data... more than * enough for all the implemented functions here. * * Returns: 0 on success, -EXXX on error */ int do_request(struct clog_request *rq, int server) { int r; if (!rq) return 0; if (rq->u_rq.error) LOG_DBG("Programmer error: rq struct has error set"); switch (rq->u_rq.request_type) { case DM_ULOG_CTR: r = clog_ctr(&rq->u_rq); break; case DM_ULOG_DTR: r = clog_dtr(&rq->u_rq); break; case DM_ULOG_PRESUSPEND: r = clog_presuspend(&rq->u_rq); break; case DM_ULOG_POSTSUSPEND: r = clog_postsuspend(&rq->u_rq); break; case DM_ULOG_RESUME: r = clog_resume(&rq->u_rq); break; case DM_ULOG_GET_REGION_SIZE: r = clog_get_region_size(&rq->u_rq); break; case DM_ULOG_IS_CLEAN: r = clog_is_clean(&rq->u_rq); break; case DM_ULOG_IN_SYNC: r = clog_in_sync(&rq->u_rq); break; case DM_ULOG_FLUSH: r = clog_flush(&rq->u_rq, server); break; case DM_ULOG_MARK_REGION: r = clog_mark_region(&rq->u_rq, rq->originator); break; case DM_ULOG_CLEAR_REGION: r = clog_clear_region(&rq->u_rq, rq->originator); break; case DM_ULOG_GET_RESYNC_WORK: r = clog_get_resync_work(&rq->u_rq, rq->originator); break; case DM_ULOG_SET_REGION_SYNC: r = clog_set_region_sync(&rq->u_rq, rq->originator); break; case DM_ULOG_GET_SYNC_COUNT: r = clog_get_sync_count(&rq->u_rq, rq->originator); break; case DM_ULOG_STATUS_INFO: r = clog_status_info(&rq->u_rq); break; case DM_ULOG_STATUS_TABLE: r = clog_status_table(&rq->u_rq); break; case DM_ULOG_IS_REMOTE_RECOVERING: r = clog_is_remote_recovering(&rq->u_rq); break; default: LOG_ERROR("Unknown request"); r = rq->u_rq.error = -EINVAL; break; } if (r && !rq->u_rq.error) rq->u_rq.error = r; else if (r != rq->u_rq.error) LOG_DBG("Warning: error from function != rq->u_rq.error"); if (rq->u_rq.error && rq->u_rq.data_size) { /* Make sure I'm handling errors correctly above */ LOG_DBG("Programmer error: rq->u_rq.error && rq->u_rq.data_size"); rq->u_rq.data_size = 0; } return 0; } static void print_bits(dm_bitset_t bs, int print) { int i, size; char outbuf[128] = { 0 }; unsigned char *buf = (unsigned char *)(bs + 1); size = (*bs % 8) ? 1 : 0; size += (*bs / 8); for (i = 0; i < size; i++) { if (!(i % 16)) { if (outbuf[0] != '\0') { if (print) LOG_PRINT("%s", outbuf); else LOG_DBG("%s", outbuf); } memset(outbuf, 0, sizeof(outbuf)); sprintf(outbuf, "[%3d - %3d]", i, i+15); } sprintf(outbuf + strlen(outbuf), " %.2X", (unsigned char)buf[i]); } if (outbuf[0] != '\0') { if (print) LOG_PRINT("%s", outbuf); else LOG_DBG("%s", outbuf); } } /* int store_bits(const char *uuid, const char *which, char **buf)*/ int push_state(const char *uuid, uint64_t luid, const char *which, char **buf, uint32_t debug_who) { int bitset_size; struct log_c *lc; if (*buf) LOG_ERROR("store_bits: *buf != NULL"); lc = get_log(uuid, luid); if (!lc) { LOG_ERROR("store_bits: No log found for %s", uuid); return -EINVAL; } if (!strcmp(which, "recovering_region")) { *buf = malloc(64); /* easily handles the 2 written numbers */ if (!*buf) return -ENOMEM; sprintf(*buf, "%llu %u", (unsigned long long)lc->recovering_region, lc->recoverer); LOG_SPRINT(lc, "CKPT SEND - SEQ#=X, UUID=%s, nodeid = %u:: " "recovering_region=%llu, recoverer=%u, sync_count=%llu", SHORT_UUID(lc->uuid), debug_who, (unsigned long long)lc->recovering_region, lc->recoverer, (unsigned long long)count_bits32(lc->sync_bits)); return 64; } /* Size in 'int's */ bitset_size = (*(lc->clean_bits) / DM_BITS_PER_INT) + 1; /* Size in bytes */ bitset_size *= 4; *buf = malloc(bitset_size); if (!*buf) { LOG_ERROR("store_bits: Unable to allocate memory"); return -ENOMEM; } if (!strncmp(which, "sync_bits", 9)) { memcpy(*buf, lc->sync_bits + 1, bitset_size); LOG_DBG("[%s] storing sync_bits (sync_count = %llu):", SHORT_UUID(uuid), (unsigned long long) count_bits32(lc->sync_bits)); print_bits(lc->sync_bits, 0); } else if (!strncmp(which, "clean_bits", 9)) { memcpy(*buf, lc->clean_bits + 1, bitset_size); LOG_DBG("[%s] storing clean_bits:", SHORT_UUID(lc->uuid)); print_bits(lc->clean_bits, 0); } return bitset_size; } /*int load_bits(const char *uuid, const char *which, char *buf, int size)*/ int pull_state(const char *uuid, uint64_t luid, const char *which, char *buf, int size) { int bitset_size; struct log_c *lc; if (!buf) { LOG_ERROR("pull_state: buf == NULL"); return -EINVAL; } lc = get_log(uuid, luid); if (!lc) { LOG_ERROR("pull_state: No log found for %s", uuid); return -EINVAL; } if (!strncmp(which, "recovering_region", 17)) { if (sscanf(buf, "%llu %u", (unsigned long long *)&lc->recovering_region, &lc->recoverer) != 2) { LOG_ERROR("cannot parse recovering region from: %s", buf); return -EINVAL; } LOG_SPRINT(lc, "CKPT INIT - SEQ#=X, UUID=%s, nodeid = X:: " "recovering_region=%llu, recoverer=%u", SHORT_UUID(lc->uuid), (unsigned long long)lc->recovering_region, lc->recoverer); return 0; } /* Size in 'int's */ bitset_size = (*(lc->clean_bits) /DM_BITS_PER_INT) + 1; /* Size in bytes */ bitset_size *= 4; if (bitset_size != size) { LOG_ERROR("pull_state(%s): bad bitset_size (%d vs %d)", which, size, bitset_size); return -EINVAL; } if (!strncmp(which, "sync_bits", 9)) { lc->resume_override += 1; memcpy(lc->sync_bits + 1, buf, bitset_size); LOG_DBG("[%s] loading sync_bits (sync_count = %llu):", SHORT_UUID(lc->uuid),(unsigned long long) count_bits32(lc->sync_bits)); print_bits(lc->sync_bits, 0); } else if (!strncmp(which, "clean_bits", 9)) { lc->resume_override += 2; memcpy(lc->clean_bits + 1, buf, bitset_size); LOG_DBG("[%s] loading clean_bits:", SHORT_UUID(lc->uuid)); print_bits(lc->clean_bits, 0); } return 0; } int log_get_state(struct dm_ulog_request *rq) { struct log_c *lc; lc = get_log(rq->uuid, rq->luid); if (!lc) /* FIXME Callers are ignoring this */ return -EINVAL; return (int)lc->state; } /* * log_status * * Returns: 1 if logs are still present, 0 otherwise */ int log_status(void) { if (!dm_list_empty(&log_list) || !dm_list_empty(&log_pending_list)) return 1; return 0; } void log_debug(void) { struct log_c *lc; uint64_t r; int i; LOG_ERROR(""); LOG_ERROR("LOG COMPONENT DEBUGGING::"); LOG_ERROR("Official log list:"); LOG_ERROR("Pending log list:"); dm_list_iterate_items(lc, &log_pending_list) { LOG_ERROR("%s", lc->uuid); LOG_ERROR("sync_bits:"); print_bits(lc->sync_bits, 1); LOG_ERROR("clean_bits:"); print_bits(lc->clean_bits, 1); } dm_list_iterate_items(lc, &log_list) { LOG_ERROR("%s", lc->uuid); LOG_ERROR(" recoverer : %" PRIu32, lc->recoverer); LOG_ERROR(" recovering_region: %" PRIu64, lc->recovering_region); LOG_ERROR(" recovery_halted : %s", (lc->recovery_halted) ? "YES" : "NO"); LOG_ERROR("sync_bits:"); print_bits(lc->sync_bits, 1); LOG_ERROR("clean_bits:"); print_bits(lc->clean_bits, 1); LOG_ERROR("Validating %s::", SHORT_UUID(lc->uuid)); r = find_next_zero_bit(lc->sync_bits, 0); LOG_ERROR(" lc->region_count = %" PRIu32, lc->region_count); LOG_ERROR(" lc->sync_count = %" PRIu64, lc->sync_count); LOG_ERROR(" next zero bit = %" PRIu64, r); if ((r > lc->region_count) || ((r == lc->region_count) && (lc->sync_count > lc->region_count))) { LOG_ERROR("ADJUSTING SYNC_COUNT"); lc->sync_count = lc->region_count; } LOG_ERROR("Resync request history:"); for (i = 0; i < RESYNC_HISTORY; i++) { lc->idx++; lc->idx = lc->idx % RESYNC_HISTORY; if (lc->resync_history[lc->idx][0] == '\0') continue; LOG_ERROR("%d:%d) %s", i, lc->idx, lc->resync_history[lc->idx]); } } } LVM2.2.02.176/daemons/cmirrord/common.h0000644000000000000120000000227413176752421016241 0ustar rootwheel/* * Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef _LVM_CLOG_COMMON_H #define _LVM_CLOG_COMMON_H /* * If there are problems when forking off to become a daemon, * the child will exist with one of these codes. This allows * the parent to know the reason for the failure and print it * to the launching terminal. * * #define EXIT_SUCCESS 0 (from stdlib.h) * #define EXIT_FAILURE 1 (from stdlib.h) */ #define EXIT_LOCKFILE 2 #define EXIT_KERNEL_SOCKET 3 /* Failed netlink socket create */ #define EXIT_KERNEL_BIND 4 #define EXIT_KERNEL_SETSOCKOPT 5 #define EXIT_CLUSTER_CKPT_INIT 6 /* Failed to init checkpoint */ #define EXIT_QUEUE_NOMEM 7 #define DM_ULOG_REQUEST_SIZE 1024 #endif /* _LVM_CLOG_COMMON_H */ LVM2.2.02.176/daemons/cmirrord/link_mon.c0000644000000000000120000000575013176752421016554 0ustar rootwheel/* * Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "logging.h" #include "link_mon.h" #include #include #include struct link_callback { int fd; const char *name; void *data; int (*callback)(void *data); struct link_callback *next; }; static unsigned used_pfds = 0; static unsigned free_pfds = 0; static struct pollfd *pfds = NULL; static struct link_callback *callbacks = NULL; int links_register(int fd, const char *name, int (*callback)(void *data), void *data) { unsigned i; struct link_callback *lc; for (i = 0; i < used_pfds; i++) { if (fd == pfds[i].fd) { LOG_ERROR("links_register: Duplicate file descriptor"); return -EINVAL; } } lc = malloc(sizeof(*lc)); if (!lc) return -ENOMEM; lc->fd = fd; lc->name = name; lc->data = data; lc->callback = callback; if (!free_pfds) { struct pollfd *tmp; tmp = realloc(pfds, sizeof(struct pollfd) * ((used_pfds*2) + 1)); if (!tmp) { free(lc); return -ENOMEM; } pfds = tmp; free_pfds = used_pfds + 1; } free_pfds--; pfds[used_pfds].fd = fd; pfds[used_pfds].events = POLLIN; pfds[used_pfds].revents = 0; used_pfds++; lc->next = callbacks; callbacks = lc; LOG_DBG("Adding %s/%d", lc->name, lc->fd); LOG_DBG(" used_pfds = %u, free_pfds = %u", used_pfds, free_pfds); return 0; } int links_unregister(int fd) { unsigned i; struct link_callback *p, *c; for (i = 0; i < used_pfds; i++) if (fd == pfds[i].fd) { /* entire struct is copied (overwritten) */ pfds[i] = pfds[used_pfds - 1]; used_pfds--; free_pfds++; } for (p = NULL, c = callbacks; c; p = c, c = c->next) if (fd == c->fd) { LOG_DBG("Freeing up %s/%d", c->name, c->fd); LOG_DBG(" used_pfds = %u, free_pfds = %u", used_pfds, free_pfds); if (p) p->next = c->next; else callbacks = c->next; free(c); break; } return 0; } int links_monitor(void) { unsigned i; int r; for (i = 0; i < used_pfds; i++) { pfds[i].revents = 0; } r = poll(pfds, used_pfds, -1); if (r <= 0) return r; r = 0; /* FIXME: handle POLLHUP */ for (i = 0; i < used_pfds; i++) if (pfds[i].revents & POLLIN) { LOG_DBG("Data ready on %d", pfds[i].fd); /* FIXME: Add this back return 1;*/ r++; } return r; } int links_issue_callbacks(void) { unsigned i; struct link_callback *lc; for (i = 0; i < used_pfds; i++) if (pfds[i].revents & POLLIN) for (lc = callbacks; lc; lc = lc->next) if (pfds[i].fd == lc->fd) { LOG_DBG("Issuing callback on %s/%d", lc->name, lc->fd); lc->callback(lc->data); break; } return 0; } LVM2.2.02.176/daemons/cmirrord/compat.h0000644000000000000120000000125713176752421016234 0ustar rootwheel/* * Copyright (C) 2010 Red Hat, Inc. All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. */ #ifndef _LVM_CLOG_COMPAT_H #define _LVM_CLOG_COMPAT_H /* * The intermachine communication structure version are: * 0: Unused * 1: Never in the wild * 2: RHEL 5.2 * 3: RHEL 5.3 * 4: RHEL 5.4, RHEL 5.5 * 5: RHEL 6, Current Upstream Format */ #define CLOG_TFR_VERSION 5 int clog_request_to_network(struct clog_request *rq); int clog_request_from_network(void *data, size_t data_len); #endif /* _LVM_CLOG_COMPAT_H */ LVM2.2.02.176/daemons/cmirrord/logging.c0000644000000000000120000000242713176752421016372 0ustar rootwheel/* * Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "logging.h" const char *__rq_types_off_by_one[] = { "DM_ULOG_CTR", "DM_ULOG_DTR", "DM_ULOG_PRESUSPEND", "DM_ULOG_POSTSUSPEND", "DM_ULOG_RESUME", "DM_ULOG_GET_REGION_SIZE", "DM_ULOG_IS_CLEAN", "DM_ULOG_IN_SYNC", "DM_ULOG_FLUSH", "DM_ULOG_MARK_REGION", "DM_ULOG_CLEAR_REGION", "DM_ULOG_GET_RESYNC_WORK", "DM_ULOG_SET_REGION_SYNC", "DM_ULOG_GET_SYNC_COUNT", "DM_ULOG_STATUS_INFO", "DM_ULOG_STATUS_TABLE", "DM_ULOG_IS_REMOTE_RECOVERING", NULL }; int log_tabbing = 0; int log_is_open = 0; /* * Variables for various conditional logging */ #ifdef MEMB int log_membership_change = 1; #else int log_membership_change = 0; #endif #ifdef CKPT int log_checkpoint = 1; #else int log_checkpoint = 0; #endif #ifdef RESEND int log_resend_requests = 1; #else int log_resend_requests = 0; #endif LVM2.2.02.176/daemons/cmirrord/local.c0000644000000000000120000002352213176752421016035 0ustar rootwheel/* * Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "logging.h" #include "common.h" #include "functions.h" #include "link_mon.h" #include "local.h" #include #include #include #include #include #ifndef CN_IDX_DM /* Kernel 2.6.31 is required to run this code */ #define CN_IDX_DM 0x7 /* Device Mapper */ #define CN_VAL_DM_USERSPACE_LOG 0x1 #endif static int cn_fd = -1; /* Connector (netlink) socket fd */ static char recv_buf[2048]; static char send_buf[2048]; /* FIXME: merge this function with kernel_send_helper */ static int kernel_ack(uint32_t seq, int error) { int r; struct nlmsghdr *nlh = (struct nlmsghdr *)send_buf; struct cn_msg *msg = NLMSG_DATA(nlh); if (error < 0) { LOG_ERROR("Programmer error: error codes must be positive"); return -EINVAL; } memset(send_buf, 0, sizeof(send_buf)); nlh->nlmsg_seq = 0; nlh->nlmsg_pid = getpid(); nlh->nlmsg_type = NLMSG_DONE; nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct cn_msg)); nlh->nlmsg_flags = 0; msg->len = 0; msg->id.idx = CN_IDX_DM; msg->id.val = CN_VAL_DM_USERSPACE_LOG; msg->seq = seq; msg->ack = error; r = send(cn_fd, nlh, NLMSG_LENGTH(sizeof(struct cn_msg)), 0); /* FIXME: do better error processing */ if (r <= 0) return -EBADE; return 0; } /* * kernel_recv * @rq: the newly allocated request from kernel * * Read requests from the kernel and allocate space for the new request. * If there is no request from the kernel, *rq is NULL. * * This function is not thread safe due to returned stack pointer. In fact, * the returned pointer must not be in-use when this function is called again. * * Returns: 0 on success, -EXXX on error */ static int kernel_recv(struct clog_request **rq) { int r = 0; ssize_t len; char *foo; struct cn_msg *msg; struct dm_ulog_request *u_rq; struct nlmsghdr *nlmsg_h; *rq = NULL; memset(recv_buf, 0, sizeof(recv_buf)); len = recv(cn_fd, recv_buf, sizeof(recv_buf), 0); if (len < 0) { LOG_ERROR("Failed to recv message from kernel"); r = -errno; goto fail; } nlmsg_h = (struct nlmsghdr *)recv_buf; switch (nlmsg_h->nlmsg_type) { case NLMSG_ERROR: LOG_ERROR("Unable to recv message from kernel: NLMSG_ERROR"); r = -EBADE; goto fail; case NLMSG_DONE: msg = (struct cn_msg *)NLMSG_DATA((struct nlmsghdr *)recv_buf); len -= (ssize_t)sizeof(struct nlmsghdr); if (len < (ssize_t)sizeof(struct cn_msg)) { LOG_ERROR("Incomplete request from kernel received"); r = -EBADE; goto fail; } if (msg->len > DM_ULOG_REQUEST_SIZE) { LOG_ERROR("Not enough space to receive kernel request (%d/%d)", msg->len, DM_ULOG_REQUEST_SIZE); r = -EBADE; goto fail; } if (!msg->len) LOG_ERROR("Zero length message received"); len -= (ssize_t)sizeof(struct cn_msg); if (len < msg->len) LOG_ERROR("len = %zd, msg->len = %" PRIu16, len, msg->len); msg->data[msg->len] = '\0'; /* Cleaner way to ensure this? */ u_rq = (struct dm_ulog_request *)msg->data; if (!u_rq->request_type) { LOG_DBG("Bad transmission, requesting resend [%u]", msg->seq); r = -EAGAIN; if (kernel_ack(msg->seq, EAGAIN)) { LOG_ERROR("Failed to NACK kernel transmission [%u]", msg->seq); r = -EBADE; } } /* * Now we've got sizeof(struct cn_msg) + sizeof(struct nlmsghdr) * worth of space that precede the request structure from the * kernel. Since that space isn't going to be used again, we * can take it for our purposes; rather than allocating a whole * new structure and doing a memcpy. * * We should really make sure 'clog_request' doesn't grow * beyond what is available to us, but we need only check it * once... perhaps at compile time? */ foo = (char *)u_rq; foo -= (sizeof(struct clog_request) - sizeof(struct dm_ulog_request)); *rq = (struct clog_request *) foo; /* Clear the wrapper container fields */ memset(*rq, 0, (size_t)((char *)u_rq - (char *)(*rq))); break; default: LOG_ERROR("Unknown nlmsg_type"); r = -EBADE; } fail: if (r) *rq = NULL; return (r == -EAGAIN) ? 0 : r; } static int kernel_send_helper(void *data, uint16_t out_size) { int r; struct nlmsghdr *nlh; struct cn_msg *msg; memset(send_buf, 0, sizeof(send_buf)); nlh = (struct nlmsghdr *)send_buf; nlh->nlmsg_seq = 0; /* FIXME: Is this used? */ nlh->nlmsg_pid = getpid(); nlh->nlmsg_type = NLMSG_DONE; nlh->nlmsg_len = NLMSG_LENGTH(out_size + sizeof(struct cn_msg)); nlh->nlmsg_flags = 0; msg = NLMSG_DATA(nlh); memcpy(msg->data, data, out_size); msg->len = out_size; msg->id.idx = CN_IDX_DM; msg->id.val = CN_VAL_DM_USERSPACE_LOG; msg->seq = 0; r = send(cn_fd, nlh, NLMSG_LENGTH(out_size + sizeof(struct cn_msg)), 0); /* FIXME: do better error processing */ if (r <= 0) return -EBADE; return 0; } /* * do_local_work * * Any processing errors are placed in the 'rq' * structure to be reported back to the kernel. * It may be pointless for this function to * return an int. * * Returns: 0 on success, -EXXX on failure */ static int do_local_work(void *data __attribute__((unused))) { int r; struct clog_request *rq; struct dm_ulog_request *u_rq = NULL; r = kernel_recv(&rq); if (r) return r; if (!rq) return 0; u_rq = &rq->u_rq; LOG_DBG("[%s] Request from kernel received: [%s/%u]", SHORT_UUID(u_rq->uuid), RQ_TYPE(u_rq->request_type), u_rq->seq); switch (u_rq->request_type) { case DM_ULOG_CTR: case DM_ULOG_DTR: case DM_ULOG_GET_REGION_SIZE: case DM_ULOG_IN_SYNC: case DM_ULOG_GET_SYNC_COUNT: case DM_ULOG_STATUS_TABLE: case DM_ULOG_PRESUSPEND: /* We do not specify ourselves as server here */ r = do_request(rq, 0); if (r) LOG_DBG("Returning failed request to kernel [%s]", RQ_TYPE(u_rq->request_type)); r = kernel_send(u_rq); if (r) LOG_ERROR("Failed to respond to kernel [%s]", RQ_TYPE(u_rq->request_type)); break; case DM_ULOG_RESUME: /* * Resume is a special case that requires a local * component to join the CPG, and a cluster component * to handle the request. */ r = local_resume(u_rq); if (r) { LOG_DBG("Returning failed request to kernel [%s]", RQ_TYPE(u_rq->request_type)); r = kernel_send(u_rq); if (r) LOG_ERROR("Failed to respond to kernel [%s]", RQ_TYPE(u_rq->request_type)); break; } /* ELSE, fall through */ case DM_ULOG_IS_CLEAN: case DM_ULOG_FLUSH: case DM_ULOG_MARK_REGION: case DM_ULOG_GET_RESYNC_WORK: case DM_ULOG_SET_REGION_SYNC: case DM_ULOG_STATUS_INFO: case DM_ULOG_IS_REMOTE_RECOVERING: case DM_ULOG_POSTSUSPEND: r = cluster_send(rq); if (r) { u_rq->data_size = 0; u_rq->error = r; if (kernel_send(u_rq)) LOG_ERROR("Failed to respond to kernel [%s]", RQ_TYPE(u_rq->request_type)); } break; case DM_ULOG_CLEAR_REGION: r = kernel_ack(u_rq->seq, 0); r = cluster_send(rq); if (r) { /* * FIXME: store error for delivery on flush * This would allow us to optimize MARK_REGION * too. */ } break; default: LOG_ERROR("Invalid log request received (%u), ignoring.", u_rq->request_type); return 0; } if (r && !u_rq->error) u_rq->error = r; return r; } /* * kernel_send * @u_rq: result to pass back to kernel * * This function returns the u_rq structure * (containing the results) to the kernel. * It then frees the structure. * * WARNING: should the structure be freed if * there is an error? I vote 'yes'. If the * kernel doesn't get the response, it should * resend the request. * * Returns: 0 on success, -EXXX on failure */ int kernel_send(struct dm_ulog_request *u_rq) { int r; uint16_t size; if (!u_rq) return -EINVAL; size = (uint16_t)(sizeof(struct dm_ulog_request) + u_rq->data_size); if (!u_rq->data_size && !u_rq->error) { /* An ACK is all that is needed */ /* FIXME: add ACK code */ } else if (size > DM_ULOG_REQUEST_SIZE) { /* * If we gotten here, we've already overrun * our allotted space somewhere. * * We must do something, because the kernel * is waiting for a response. */ LOG_ERROR("Not enough space to respond to server"); u_rq->error = -ENOSPC; size = sizeof(struct dm_ulog_request); } r = kernel_send_helper(u_rq, size); if (r) LOG_ERROR("Failed to send msg to kernel."); return r; } /* * init_local * * Initialize kernel communication socket (netlink) * * Returns: 0 on success, values from common.h on failure */ int init_local(void) { int r = 0; unsigned opt; struct sockaddr_nl addr; cn_fd = socket(PF_NETLINK, SOCK_DGRAM, NETLINK_CONNECTOR); if (cn_fd < 0) return EXIT_KERNEL_SOCKET; /* memset to fix valgrind complaint */ memset(&addr, 0, sizeof(struct sockaddr_nl)); addr.nl_family = AF_NETLINK; addr.nl_groups = CN_IDX_DM; addr.nl_pid = 0; r = bind(cn_fd, (struct sockaddr *) &addr, sizeof(addr)); if (r < 0) { if (close(cn_fd)) LOG_ERROR("Failed to close socket: %s", strerror(errno)); return EXIT_KERNEL_BIND; } opt = addr.nl_groups; r = setsockopt(cn_fd, 270, NETLINK_ADD_MEMBERSHIP, &opt, sizeof(opt)); if (r) { if (close(cn_fd)) LOG_ERROR("Failed to close socket: %s", strerror(errno)); return EXIT_KERNEL_SETSOCKOPT; } /* r = fcntl(cn_fd, F_SETFL, FNDELAY); */ links_register(cn_fd, "local", do_local_work, NULL); return 0; } /* * cleanup_local * * Clean up before exiting */ void cleanup_local(void) { links_unregister(cn_fd); if (cn_fd >= 0 && close(cn_fd)) LOG_ERROR("Failed to close socket: %s", strerror(errno)); } LVM2.2.02.176/daemons/cmirrord/link_mon.h0000644000000000000120000000136713176752421016561 0ustar rootwheel/* * Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef _LVM_CLOG_LINK_MON_H #define _LVM_CLOG_LINK_MON_H int links_register(int fd, const char *name, int (*callback)(void *data), void *data); int links_unregister(int fd); int links_monitor(void); int links_issue_callbacks(void); #endif /* _LVM_CLOG_LINK_MON_H */ LVM2.2.02.176/daemons/lvmlockd/0000755000000000000120000000000013176752421014565 5ustar rootwheelLVM2.2.02.176/daemons/lvmlockd/Makefile.in0000644000000000000120000000320013176752421016625 0ustar rootwheel# # Copyright (C) 2014-2015 Red Hat, Inc. # # This file is part of LVM2. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU Lesser General Public License v.2.1. # # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA srcdir = @srcdir@ top_srcdir = @top_srcdir@ top_builddir = @top_builddir@ SOURCES = lvmlockd-core.c ifeq ("@BUILD_LOCKDSANLOCK@", "yes") SOURCES += lvmlockd-sanlock.c LOCK_LIBS += -lsanlock_client endif ifeq ("@BUILD_LOCKDDLM@", "yes") SOURCES += lvmlockd-dlm.c LOCK_LIBS += -ldlm_lt endif TARGETS = lvmlockd lvmlockctl .PHONY: install_lvmlockd include $(top_builddir)/make.tmpl CFLAGS += $(EXTRA_EXEC_CFLAGS) INCLUDES += -I$(top_srcdir)/libdaemon/server LDFLAGS += -L$(top_builddir)/libdaemon/server $(EXTRA_EXEC_LDFLAGS) $(ELDFLAGS) LIBS += $(RT_LIBS) $(DAEMON_LIBS) -ldevmapper $(PTHREAD_LIBS) lvmlockd: $(OBJECTS) $(top_builddir)/libdaemon/client/libdaemonclient.a \ $(top_builddir)/libdaemon/server/libdaemonserver.a $(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(OBJECTS) $(LOCK_LIBS) -ldaemonserver $(LIBS) lvmlockctl: lvmlockctl.o $(top_builddir)/libdaemon/client/libdaemonclient.a $(CC) $(CFLAGS) $(LDFLAGS) -o $@ lvmlockctl.o $(LIBS) install_lvmlockd: lvmlockd $(INSTALL_PROGRAM) -D $< $(sbindir)/$( #include #include #include #include #include #include #include struct lm_dlm { dlm_lshandle_t *dh; }; struct rd_dlm { struct dlm_lksb lksb; struct val_blk *vb; }; int lm_data_size_dlm(void) { return sizeof(struct rd_dlm); } /* * lock_args format * * vg_lock_args format for dlm is * vg_version_string:undefined:cluster_name * * lv_lock_args are not used for dlm * * version_string is MAJOR.MINOR.PATCH * undefined may contain ":" */ #define VG_LOCK_ARGS_MAJOR 1 #define VG_LOCK_ARGS_MINOR 0 #define VG_LOCK_ARGS_PATCH 0 static int dlm_has_lvb_bug; static int cluster_name_from_args(char *vg_args, char *clustername) { return last_string_from_args(vg_args, clustername); } static int check_args_version(char *vg_args) { unsigned int major = 0; int rv; rv = version_from_args(vg_args, &major, NULL, NULL); if (rv < 0) { log_error("check_args_version %s error %d", vg_args, rv); return rv; } if (major > VG_LOCK_ARGS_MAJOR) { log_error("check_args_version %s major %d %d", vg_args, major, VG_LOCK_ARGS_MAJOR); return -1; } return 0; } /* This will be set after dlm_controld is started. */ #define DLM_CLUSTER_NAME_PATH "/sys/kernel/config/dlm/cluster/cluster_name" static int read_cluster_name(char *clustername) { static const char close_error_msg[] = "read_cluster_name: close_error %d"; char *n; int fd; int rv; if (daemon_test) { sprintf(clustername, "%s", "test"); return 0; } fd = open(DLM_CLUSTER_NAME_PATH, O_RDONLY); if (fd < 0) { log_debug("read_cluster_name: open error %d, check dlm_controld", fd); return fd; } rv = read(fd, clustername, MAX_ARGS); if (rv < 0) { log_error("read_cluster_name: cluster name read error %d, check dlm_controld", fd); if (close(fd)) log_error(close_error_msg, fd); return rv; } n = strstr(clustername, "\n"); if (n) *n = '\0'; if (close(fd)) log_error(close_error_msg, fd); return 0; } int lm_init_vg_dlm(char *ls_name, char *vg_name, uint32_t flags, char *vg_args) { char clustername[MAX_ARGS+1]; char lock_args_version[MAX_ARGS+1]; int rv; memset(clustername, 0, sizeof(clustername)); memset(lock_args_version, 0, sizeof(lock_args_version)); snprintf(lock_args_version, MAX_ARGS, "%u.%u.%u", VG_LOCK_ARGS_MAJOR, VG_LOCK_ARGS_MINOR, VG_LOCK_ARGS_PATCH); rv = read_cluster_name(clustername); if (rv < 0) return -EMANAGER; if (strlen(clustername) + strlen(lock_args_version) + 2 > MAX_ARGS) { log_error("init_vg_dlm args too long"); return -EARGS; } snprintf(vg_args, MAX_ARGS, "%s:%s", lock_args_version, clustername); rv = 0; log_debug("init_vg_dlm done %s vg_args %s", ls_name, vg_args); return rv; } int lm_prepare_lockspace_dlm(struct lockspace *ls) { char sys_clustername[MAX_ARGS+1]; char arg_clustername[MAX_ARGS+1]; uint32_t major = 0, minor = 0, patch = 0; struct lm_dlm *lmd; int rv; if (daemon_test) goto skip_args; memset(sys_clustername, 0, sizeof(sys_clustername)); memset(arg_clustername, 0, sizeof(arg_clustername)); rv = read_cluster_name(sys_clustername); if (rv < 0) return -EMANAGER; rv = dlm_kernel_version(&major, &minor, &patch); if (rv < 0) { log_error("prepare_lockspace_dlm kernel_version not detected %d", rv); dlm_has_lvb_bug = 1; } if ((major == 6) && (minor == 0) && (patch == 1)) { log_debug("dlm kernel version %u.%u.%u has lvb bug", major, minor, patch); dlm_has_lvb_bug = 1; } if (!ls->vg_args[0]) { /* global lockspace has no vg args */ goto skip_args; } rv = check_args_version(ls->vg_args); if (rv < 0) return -EARGS; rv = cluster_name_from_args(ls->vg_args, arg_clustername); if (rv < 0) { log_error("prepare_lockspace_dlm %s no cluster name from args %s", ls->name, ls->vg_args); return -EARGS; } if (strcmp(sys_clustername, arg_clustername)) { log_error("prepare_lockspace_dlm %s mismatching cluster names sys %s arg %s", ls->name, sys_clustername, arg_clustername); return -EARGS; } skip_args: lmd = malloc(sizeof(struct lm_dlm)); if (!lmd) return -ENOMEM; ls->lm_data = lmd; return 0; } int lm_add_lockspace_dlm(struct lockspace *ls, int adopt) { struct lm_dlm *lmd = (struct lm_dlm *)ls->lm_data; if (daemon_test) return 0; if (adopt) lmd->dh = dlm_open_lockspace(ls->name); else lmd->dh = dlm_new_lockspace(ls->name, 0600, DLM_LSFL_NEWEXCL); if (!lmd->dh) { log_error("add_lockspace_dlm %s adopt %d error", ls->name, adopt); free(lmd); ls->lm_data = NULL; return -1; } return 0; } int lm_rem_lockspace_dlm(struct lockspace *ls, int free_vg) { struct lm_dlm *lmd = (struct lm_dlm *)ls->lm_data; int rv; if (daemon_test) goto out; /* * If free_vg is set, it means we are doing vgremove, and we may want * to tell any other nodes to leave the lockspace. This is not really * necessary since there should be no harm in having an unused * lockspace sitting around. A new "notification lock" would need to * be added with a callback to signal this. */ rv = dlm_release_lockspace(ls->name, lmd->dh, 1); if (rv < 0) { log_error("rem_lockspace_dlm error %d", rv); return rv; } out: free(lmd); ls->lm_data = NULL; return 0; } static int lm_add_resource_dlm(struct lockspace *ls, struct resource *r, int with_lock_nl) { struct lm_dlm *lmd = (struct lm_dlm *)ls->lm_data; struct rd_dlm *rdd = (struct rd_dlm *)r->lm_data; uint32_t flags = 0; char *buf; int rv; if (r->type == LD_RT_GL || r->type == LD_RT_VG) { buf = malloc(sizeof(struct val_blk) + DLM_LVB_LEN); if (!buf) return -ENOMEM; memset(buf, 0, sizeof(struct val_blk) + DLM_LVB_LEN); rdd->vb = (struct val_blk *)buf; rdd->lksb.sb_lvbptr = buf + sizeof(struct val_blk); flags |= LKF_VALBLK; } if (!with_lock_nl) goto out; /* because this is a new NL lock request */ flags |= LKF_EXPEDITE; if (daemon_test) goto out; rv = dlm_ls_lock_wait(lmd->dh, LKM_NLMODE, &rdd->lksb, flags, r->name, strlen(r->name), 0, NULL, NULL, NULL); if (rv < 0) { log_error("S %s R %s add_resource_dlm lock error %d", ls->name, r->name, rv); return rv; } out: return 0; } int lm_rem_resource_dlm(struct lockspace *ls, struct resource *r) { struct lm_dlm *lmd = (struct lm_dlm *)ls->lm_data; struct rd_dlm *rdd = (struct rd_dlm *)r->lm_data; struct dlm_lksb *lksb; int rv = 0; if (daemon_test) goto out; lksb = &rdd->lksb; if (!lksb->sb_lkid) goto out; rv = dlm_ls_unlock_wait(lmd->dh, lksb->sb_lkid, 0, lksb); if (rv < 0) { log_error("S %s R %s rem_resource_dlm unlock error %d", ls->name, r->name, rv); } out: if (rdd->vb) free(rdd->vb); memset(rdd, 0, sizeof(struct rd_dlm)); r->lm_init = 0; return rv; } static int to_dlm_mode(int ld_mode) { switch (ld_mode) { case LD_LK_EX: return LKM_EXMODE; case LD_LK_SH: return LKM_PRMODE; }; return -1; } static int lm_adopt_dlm(struct lockspace *ls, struct resource *r, int ld_mode, struct val_blk *vb_out) { struct lm_dlm *lmd = (struct lm_dlm *)ls->lm_data; struct rd_dlm *rdd = (struct rd_dlm *)r->lm_data; struct dlm_lksb *lksb; uint32_t flags = 0; int mode; int rv; memset(vb_out, 0, sizeof(struct val_blk)); if (!r->lm_init) { rv = lm_add_resource_dlm(ls, r, 0); if (rv < 0) return rv; r->lm_init = 1; } lksb = &rdd->lksb; flags |= LKF_PERSISTENT; flags |= LKF_ORPHAN; if (rdd->vb) flags |= LKF_VALBLK; mode = to_dlm_mode(ld_mode); if (mode < 0) { log_error("adopt_dlm invalid mode %d", ld_mode); rv = -EINVAL; goto fail; } log_debug("S %s R %s adopt_dlm", ls->name, r->name); if (daemon_test) return 0; /* * dlm returns 0 for success, -EAGAIN if an orphan is * found with another mode, and -ENOENT if no orphan. * * cast/bast/param are (void *)1 because the kernel * returns errors if some are null. */ rv = dlm_ls_lockx(lmd->dh, mode, lksb, flags, r->name, strlen(r->name), 0, (void *)1, (void *)1, (void *)1, NULL, NULL); if (rv == -1 && errno == -EAGAIN) { log_debug("S %s R %s adopt_dlm adopt mode %d try other mode", ls->name, r->name, ld_mode); rv = -EUCLEAN; goto fail; } if (rv < 0) { log_debug("S %s R %s adopt_dlm mode %d flags %x error %d errno %d", ls->name, r->name, mode, flags, rv, errno); goto fail; } /* * FIXME: For GL/VG locks we probably want to read the lvb, * especially if adopting an ex lock, because when we * release this adopted ex lock we may want to write new * lvb values based on the current lvb values (at lease * in the GL case where we increment the current values.) * * It should be possible to read the lvb by requesting * this lock in the same mode it's already in. */ return rv; fail: lm_rem_resource_dlm(ls, r); return rv; } /* * Use PERSISTENT so that if lvmlockd exits while holding locks, * the locks will remain orphaned in the dlm, still protecting what * they were acquired to protect. */ int lm_lock_dlm(struct lockspace *ls, struct resource *r, int ld_mode, struct val_blk *vb_out, int adopt) { struct lm_dlm *lmd = (struct lm_dlm *)ls->lm_data; struct rd_dlm *rdd = (struct rd_dlm *)r->lm_data; struct dlm_lksb *lksb; struct val_blk vb; uint32_t flags = 0; int mode; int rv; if (adopt) { /* When adopting, we don't follow the normal method of acquiring a NL lock then converting it to the desired mode. */ return lm_adopt_dlm(ls, r, ld_mode, vb_out); } if (!r->lm_init) { rv = lm_add_resource_dlm(ls, r, 1); if (rv < 0) return rv; r->lm_init = 1; } lksb = &rdd->lksb; flags |= LKF_CONVERT; flags |= LKF_NOQUEUE; flags |= LKF_PERSISTENT; if (rdd->vb) flags |= LKF_VALBLK; mode = to_dlm_mode(ld_mode); if (mode < 0) { log_error("lock_dlm invalid mode %d", ld_mode); return -EINVAL; } log_debug("S %s R %s lock_dlm", ls->name, r->name); if (daemon_test) { if (rdd->vb) { vb_out->version = le16_to_cpu(rdd->vb->version); vb_out->flags = le16_to_cpu(rdd->vb->flags); vb_out->r_version = le32_to_cpu(rdd->vb->r_version); } return 0; } /* * The dlm lvb bug means that converting NL->EX will not return * the latest lvb, so we have to convert NL->PR->EX to reread it. */ if (dlm_has_lvb_bug && (ld_mode == LD_LK_EX)) { rv = dlm_ls_lock_wait(lmd->dh, LKM_PRMODE, lksb, flags, r->name, strlen(r->name), 0, NULL, NULL, NULL); if (rv == -1) { log_debug("S %s R %s lock_dlm acquire mode PR for %d rv %d", ls->name, r->name, mode, rv); goto lockrv; } /* Fall through to request EX. */ } rv = dlm_ls_lock_wait(lmd->dh, mode, lksb, flags, r->name, strlen(r->name), 0, NULL, NULL, NULL); lockrv: if (rv == -1 && errno == EAGAIN) { log_debug("S %s R %s lock_dlm acquire mode %d rv EAGAIN", ls->name, r->name, mode); return -EAGAIN; } if (rv < 0) { log_error("S %s R %s lock_dlm acquire error %d errno %d", ls->name, r->name, rv, errno); return rv; } if (rdd->vb) { if (lksb->sb_flags & DLM_SBF_VALNOTVALID) { log_debug("S %s R %s lock_dlm VALNOTVALID", ls->name, r->name); memset(rdd->vb, 0, sizeof(struct val_blk)); memset(vb_out, 0, sizeof(struct val_blk)); goto out; } /* * 'vb' contains disk endian values, not host endian. * It is copied directly to rdd->vb which is also kept * in disk endian form. * vb_out is returned to the caller in host endian form. */ memcpy(&vb, lksb->sb_lvbptr, sizeof(struct val_blk)); memcpy(rdd->vb, &vb, sizeof(vb)); vb_out->version = le16_to_cpu(vb.version); vb_out->flags = le16_to_cpu(vb.flags); vb_out->r_version = le32_to_cpu(vb.r_version); } out: return 0; } int lm_convert_dlm(struct lockspace *ls, struct resource *r, int ld_mode, uint32_t r_version) { struct lm_dlm *lmd = (struct lm_dlm *)ls->lm_data; struct rd_dlm *rdd = (struct rd_dlm *)r->lm_data; struct dlm_lksb *lksb = &rdd->lksb; uint32_t mode; uint32_t flags = 0; int rv; log_debug("S %s R %s convert_dlm", ls->name, r->name); flags |= LKF_CONVERT; flags |= LKF_NOQUEUE; flags |= LKF_PERSISTENT; if (rdd->vb && r_version && (r->mode == LD_LK_EX)) { if (!rdd->vb->version) { /* first time vb has been written */ rdd->vb->version = cpu_to_le16(VAL_BLK_VERSION); } rdd->vb->r_version = cpu_to_le32(r_version); memcpy(lksb->sb_lvbptr, rdd->vb, sizeof(struct val_blk)); log_debug("S %s R %s convert_dlm set r_version %u", ls->name, r->name, r_version); flags |= LKF_VALBLK; } mode = to_dlm_mode(ld_mode); if (daemon_test) return 0; rv = dlm_ls_lock_wait(lmd->dh, mode, lksb, flags, r->name, strlen(r->name), 0, NULL, NULL, NULL); if (rv == -1 && errno == EAGAIN) { /* FIXME: When does this happen? Should something different be done? */ log_error("S %s R %s convert_dlm mode %d rv EAGAIN", ls->name, r->name, mode); return -EAGAIN; } if (rv < 0) { log_error("S %s R %s convert_dlm error %d", ls->name, r->name, rv); } return rv; } int lm_unlock_dlm(struct lockspace *ls, struct resource *r, uint32_t r_version, uint32_t lmu_flags) { struct lm_dlm *lmd = (struct lm_dlm *)ls->lm_data; struct rd_dlm *rdd = (struct rd_dlm *)r->lm_data; struct dlm_lksb *lksb = &rdd->lksb; struct val_blk vb_prev; struct val_blk vb_next; uint32_t flags = 0; int new_vb = 0; int rv; /* * Do not set PERSISTENT, because we don't need an orphan * NL lock to protect anything. */ flags |= LKF_CONVERT; if (rdd->vb && (r->mode == LD_LK_EX)) { /* vb_prev and vb_next are in disk endian form */ memcpy(&vb_prev, rdd->vb, sizeof(struct val_blk)); memcpy(&vb_next, rdd->vb, sizeof(struct val_blk)); if (!vb_prev.version) { vb_next.version = cpu_to_le16(VAL_BLK_VERSION); new_vb = 1; } if ((lmu_flags & LMUF_FREE_VG) && (r->type == LD_RT_VG)) { vb_next.flags = cpu_to_le16(VBF_REMOVED); new_vb = 1; } if (r_version) { vb_next.r_version = cpu_to_le32(r_version); new_vb = 1; } if (new_vb) { memcpy(rdd->vb, &vb_next, sizeof(struct val_blk)); memcpy(lksb->sb_lvbptr, &vb_next, sizeof(struct val_blk)); log_debug("S %s R %s unlock_dlm vb old %x %x %u new %x %x %u", ls->name, r->name, le16_to_cpu(vb_prev.version), le16_to_cpu(vb_prev.flags), le32_to_cpu(vb_prev.r_version), le16_to_cpu(vb_next.version), le16_to_cpu(vb_next.flags), le32_to_cpu(vb_next.r_version)); } else { log_debug("S %s R %s unlock_dlm vb unchanged", ls->name, r->name); } flags |= LKF_VALBLK; } else { log_debug("S %s R %s unlock_dlm", ls->name, r->name); } if (daemon_test) return 0; rv = dlm_ls_lock_wait(lmd->dh, LKM_NLMODE, lksb, flags, r->name, strlen(r->name), 0, NULL, NULL, NULL); if (rv < 0) { log_error("S %s R %s unlock_dlm error %d", ls->name, r->name, rv); } return rv; } /* * This list could be read from dlm_controld via libdlmcontrol, * but it's simpler to get it from sysfs. */ #define DLM_LOCKSPACES_PATH "/sys/kernel/config/dlm/cluster/spaces" /* * FIXME: this should be implemented differently. * It's not nice to use an aspect of the dlm clustering * implementation, which could change. It would be * better to do something like use a special lock in the * lockspace that was held PR by all nodes, and then an * EX request on it could check if it's started (and * possibly also notify others to stop it automatically). * Or, possibly an enhancement to libdlm that would give * info about lockspace members. * * (We could let the VG be removed while others still * have the lockspace running, which largely works, but * introduces problems if another VG with the same name is * recreated while others still have the lockspace running * for the previous VG. We'd also want a way to clean up * the stale lockspaces on the others eventually.) */ int lm_hosts_dlm(struct lockspace *ls, int notify) { static const char closedir_err_msg[] = "lm_hosts_dlm: closedir failed"; char ls_nodes_path[PATH_MAX]; struct dirent *de; DIR *ls_dir; int count = 0; if (daemon_test) return 0; memset(ls_nodes_path, 0, sizeof(ls_nodes_path)); snprintf(ls_nodes_path, PATH_MAX-1, "%s/%s/nodes", DLM_LOCKSPACES_PATH, ls->name); if (!(ls_dir = opendir(ls_nodes_path))) return -ECONNREFUSED; while ((de = readdir(ls_dir))) { if (de->d_name[0] == '.') continue; count++; } if (closedir(ls_dir)) log_error(closedir_err_msg); if (!count) { log_error("lm_hosts_dlm found no nodes in %s", ls_nodes_path); return 0; } /* * Assume that a count of one node represents ourself, * and any value over one represents other nodes. */ return count - 1; } int lm_get_lockspaces_dlm(struct list_head *ls_rejoin) { static const char closedir_err_msg[] = "lm_get_lockspace_dlm: closedir failed"; struct lockspace *ls; struct dirent *de; DIR *ls_dir; if (!(ls_dir = opendir(DLM_LOCKSPACES_PATH))) return -ECONNREFUSED; while ((de = readdir(ls_dir))) { if (de->d_name[0] == '.') continue; if (strncmp(de->d_name, LVM_LS_PREFIX, strlen(LVM_LS_PREFIX))) continue; if (!(ls = alloc_lockspace())) { if (closedir(ls_dir)) log_error(closedir_err_msg); return -ENOMEM; } ls->lm_type = LD_LM_DLM; strncpy(ls->name, de->d_name, MAX_NAME); strncpy(ls->vg_name, ls->name + strlen(LVM_LS_PREFIX), MAX_NAME); list_add_tail(&ls->list, ls_rejoin); } if (closedir(ls_dir)) log_error(closedir_err_msg); return 0; } int lm_is_running_dlm(void) { char sys_clustername[MAX_ARGS+1]; int rv; if (daemon_test) return gl_use_dlm; memset(sys_clustername, 0, sizeof(sys_clustername)); rv = read_cluster_name(sys_clustername); if (rv < 0) return 0; return 1; } LVM2.2.02.176/daemons/lvmlockd/lvmlockd-sanlock.c0000644000000000000120000014611013176752421020177 0ustar rootwheel/* * Copyright (C) 2014-2015 Red Hat, Inc. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. */ #define _XOPEN_SOURCE 500 /* pthread */ #define _ISOC99_SOURCE #include "tool.h" #include "daemon-server.h" #include "xlate.h" #include "lvmlockd-internal.h" #include "lvmlockd-client.h" #include "sanlock.h" #include "sanlock_rv.h" #include "sanlock_admin.h" #include "sanlock_resource.h" #include #include #include #include /* ------------------------------------------------------------------------------- For each VG, lvmlockd creates a sanlock lockspace that holds the leases for that VG. There's a lease for the VG lock, and there's a lease for each active LV. sanlock maintains (reads/writes) these leases, which exist on storage. That storage is a hidden LV within the VG: /dev/vg/lvmlock. lvmlockd gives the path of this internal LV to sanlock, which then reads/writes the leases on it. # lvs -a cc -o+uuid LV VG Attr LSize LV UUID lv1 cc -wi-a----- 2.00g 7xoDtu-yvNM-iwQx-C94t-BbYs-UzBl-o8hAIa lv2 cc -wi-a----- 100.00g exxNPX-wZdO-uCNy-yiGa-aJGT-JKVl-arfcYT [lvmlock] cc -wi-ao---- 256.00m iLpDel-hR0T-hJ3u-rnVo-PcDh-mcjt-sF9egM # sanlock status s lvm_cc:1:/dev/mapper/cc-lvmlock:0 r lvm_cc:exxNPX-wZdO-uCNy-yiGa-aJGT-JKVl-arfcYT:/dev/mapper/cc-lvmlock:71303168:13 p 26099 r lvm_cc:7xoDtu-yvNM-iwQx-C94t-BbYs-UzBl-o8hAIa:/dev/mapper/cc-lvmlock:70254592:3 p 26099 This shows that sanlock is maintaining leases on /dev/mapper/cc-lvmlock. sanlock acquires a lockspace lease when the lockspace is joined, i.e. when the VG is started by 'vgchange --lock-start cc'. This lockspace lease exists at /dev/mapper/cc-lvmlock offset 0, and sanlock regularly writes to it to maintain ownership of it. Joining the lockspace (by acquiring the lockspace lease in it) then allows standard resource leases to be acquired in the lockspace for whatever the application wants. lvmlockd uses resource leases for the VG lock and LV locks. sanlock acquires a resource lease for each actual lock that lvm commands use. Above, there are two LV locks that are held because the two LVs are active. These are on /dev/mapper/cc-lvmlock at offsets 71303168 and 70254592. sanlock does not write to these resource leases except when acquiring and releasing them (e.g. lvchange -ay/-an). The renewal of the lockspace lease maintains ownership of all the resource leases in the lockspace. If the host loses access to the disk that the sanlock lv lives on, then sanlock can no longer renew its lockspace lease. The lockspace lease will eventually expire, at which point the host will lose ownership of it, and of all resource leases it holds in the lockspace. Eventually, other hosts will be able to acquire those leases. sanlock ensures that another host will not be able to acquire one of the expired leases until the current host has quit using it. It is important that the host "quit using" the leases it is holding if the sanlock storage is lost and they begin expiring. If the host cannot quit using the leases and release them within a limited time, then sanlock will use the local watchdog to forcibly reset the host before any other host can acquire them. This is severe, but preferable to possibly corrupting the data protected by the lease. It ensures that two nodes will not be using the same lease at once. For LV leases, that means that another host will not be able to activate the LV while another host still has it active. sanlock notifies the application that it cannot renew the lockspace lease. The application needs to quit using all leases in the lockspace and release them as quickly as possible. In the initial version, lvmlockd ignored this notification, so sanlock would eventually reach the point where it would use the local watchdog to reset the host. However, it's better to attempt a response. If that response succeeds, the host can avoid being reset. If the response fails, then sanlock will eventually reset the host as the last resort. sanlock gives the application about 40 seconds to complete its response and release its leases before resetting the host. An application can specify the path and args of a program that sanlock should run to notify it if the lockspace lease cannot be renewed. This program should carry out the application's response to the expiring leases: attempt to quit using the leases and then release them. lvmlockd gives this command to sanlock for each VG when that VG is started: 'lvmlockctl --kill vg_name' If sanlock loses access to lease storage in that VG, it runs lvmlockctl --kill, which: 1. Uses syslog to explain what is happening. 2. Notifies lvmlockd that the VG is being killed, so lvmlockd can immediatley return an error for this condition if any new lock requests are made. (This step would not be strictly necessary.) 3. Attempts to quit using the VG. This is not yet implemented, but will eventually use blkdeactivate on the VG (or a more forceful equivalent.) 4. If step 3 was successful at terminating all use of the VG, then lvmlockd is told to release all the leases for the VG. If this is all done without about 40 seconds, the host can avoid being reset. Until steps 3 and 4 are fully implemented, manual steps can be substituted. This is primarily for testing since the problem needs to be noticed and responded to in a very short time. The manual alternative to step 3 is to kill any processes using file systems on LV's in the VG, unmount all file systems on the LVs, and deactivate all the LVs. Once this is done, the manual alternative to step 4 is to run 'lvmlockctl --drop vg_name', which tells lvmlockd to release all the leases for the VG. ------------------------------------------------------------------------------- */ /* * Each lockspace thread has its own sanlock daemon connection. * If they shared one, sanlock acquire/release calls would be * serialized. Some aspects of sanlock expect a single connection * from each pid: signals due to a sanlock_request, and * acquire/release/convert/inquire. The later can probably be * addressed with a flag to indicate that the pid field should be * interpretted as 'ci' (which the caller would need to figure * out somehow.) */ struct lm_sanlock { struct sanlk_lockspace ss; int align_size; int sock; /* sanlock daemon connection */ }; struct rd_sanlock { union { struct sanlk_resource rs; char buf[sizeof(struct sanlk_resource) + sizeof(struct sanlk_disk)]; }; struct val_blk *vb; }; struct sanlk_resourced { union { struct sanlk_resource rs; char buf[sizeof(struct sanlk_resource) + sizeof(struct sanlk_disk)]; }; }; int lm_data_size_sanlock(void) { return sizeof(struct rd_sanlock); } /* * lock_args format * * vg_lock_args format for sanlock is * vg_version_string:undefined:lock_lv_name * * lv_lock_args format for sanlock is * lv_version_string:undefined:offset * * version_string is MAJOR.MINOR.PATCH * undefined may contain ":" * * If a new version of the lock_args string cannot be * handled by an old version of lvmlockd, then the * new lock_args string should contain a larger major number. */ #define VG_LOCK_ARGS_MAJOR 1 #define VG_LOCK_ARGS_MINOR 0 #define VG_LOCK_ARGS_PATCH 0 #define LV_LOCK_ARGS_MAJOR 1 #define LV_LOCK_ARGS_MINOR 0 #define LV_LOCK_ARGS_PATCH 0 /* * offset 0 is lockspace * offset align_size * 1 is unused * offset align_size * 2 is unused * ... * offset align_size * 64 is unused * offset align_size * 65 is gl lock * offset align_size * 66 is vg lock * offset align_size * 67 is first lv lock * offset align_size * 68 is second lv lock * ... */ #define LS_BEGIN 0 #define GL_LOCK_BEGIN UINT64_C(65) #define VG_LOCK_BEGIN UINT64_C(66) #define LV_LOCK_BEGIN UINT64_C(67) static uint64_t daemon_test_lv_count; static int lock_lv_name_from_args(char *vg_args, char *lock_lv_name) { return last_string_from_args(vg_args, lock_lv_name); } static int lock_lv_offset_from_args(char *lv_args, uint64_t *lock_lv_offset) { char offset_str[MAX_ARGS+1]; int rv; memset(offset_str, 0, sizeof(offset_str)); rv = last_string_from_args(lv_args, offset_str); if (rv < 0) return rv; errno = 0; *lock_lv_offset = strtoull(offset_str, NULL, 10); if (errno) return -1; return 0; } static int check_args_version(char *args, unsigned int our_major) { unsigned int major = 0; int rv; rv = version_from_args(args, &major, NULL, NULL); if (rv < 0) { log_error("check_args_version %s error %d", args, rv); return rv; } if (major > our_major) { log_error("check_args_version %s major %u %u", args, major, our_major); return -1; } return 0; } #define MAX_LINE 64 static int read_host_id_file(void) { FILE *file; char line[MAX_LINE]; char key_str[MAX_LINE]; char val_str[MAX_LINE]; char *key, *val, *sep; int host_id = 0; file = fopen(daemon_host_id_file, "r"); if (!file) goto out; while (fgets(line, MAX_LINE, file)) { if (line[0] == '#' || line[0] == '\n') continue; key = line; sep = strstr(line, "="); val = sep + 1; if (!sep || !val) continue; *sep = '\0'; memset(key_str, 0, sizeof(key_str)); memset(val_str, 0, sizeof(val_str)); (void) sscanf(key, "%s", key_str); (void) sscanf(val, "%s", val_str); if (!strcmp(key_str, "host_id")) { host_id = atoi(val_str); break; } } if (fclose(file)) log_error("failed to close host id file %s", daemon_host_id_file); out: log_debug("host_id %d from %s", host_id, daemon_host_id_file); return host_id; } /* * vgcreate * * For init_vg, vgcreate passes the internal lv name as vg_args. * This constructs the full/proper vg_args format, containing the * version and lv name, and returns the real lock_args in vg_args. */ int lm_init_vg_sanlock(char *ls_name, char *vg_name, uint32_t flags, char *vg_args) { struct sanlk_lockspace ss; struct sanlk_resourced rd; struct sanlk_disk disk; char lock_lv_name[MAX_ARGS+1]; char lock_args_version[MAX_ARGS+1]; const char *gl_name = NULL; uint32_t daemon_version; uint32_t daemon_proto; uint64_t offset; int align_size; int i, rv; memset(&ss, 0, sizeof(ss)); memset(&rd, 0, sizeof(rd)); memset(&disk, 0, sizeof(disk)); memset(lock_lv_name, 0, sizeof(lock_lv_name)); memset(lock_args_version, 0, sizeof(lock_args_version)); if (!vg_args || !vg_args[0] || !strcmp(vg_args, "none")) { log_error("S %s init_vg_san vg_args missing", ls_name); return -EARGS; } snprintf(lock_args_version, MAX_ARGS, "%u.%u.%u", VG_LOCK_ARGS_MAJOR, VG_LOCK_ARGS_MINOR, VG_LOCK_ARGS_PATCH); /* see comment above about input vg_args being only lock_lv_name */ snprintf(lock_lv_name, MAX_ARGS, "%s", vg_args); if (strlen(lock_lv_name) + strlen(lock_args_version) + 2 > MAX_ARGS) return -EARGS; snprintf(disk.path, SANLK_PATH_LEN-1, "/dev/mapper/%s-%s", vg_name, lock_lv_name); log_debug("S %s init_vg_san path %s", ls_name, disk.path); if (daemon_test) { if (!gl_lsname_sanlock[0]) strncpy(gl_lsname_sanlock, ls_name, MAX_NAME); snprintf(vg_args, MAX_ARGS, "%s:%s", lock_args_version, lock_lv_name); return 0; } rv = sanlock_version(0, &daemon_version, &daemon_proto); if (rv < 0) { log_error("S %s init_vg_san failed to connect to sanlock daemon", ls_name); return -EMANAGER; } log_debug("sanlock daemon version %08x proto %08x", daemon_version, daemon_proto); rv = sanlock_align(&disk); if (rv <= 0) { if (rv == -EACCES) { log_error("S %s init_vg_san sanlock error -EACCES: no permission to access %s", ls_name, disk.path); return -EDEVOPEN; } else { log_error("S %s init_vg_san sanlock error %d trying to get align size of %s", ls_name, rv, disk.path); return -EARGS; } } else align_size = rv; strncpy(ss.name, ls_name, SANLK_NAME_LEN); memcpy(ss.host_id_disk.path, disk.path, SANLK_PATH_LEN); ss.host_id_disk.offset = LS_BEGIN * align_size; rv = sanlock_write_lockspace(&ss, 0, 0, sanlock_io_timeout); if (rv < 0) { log_error("S %s init_vg_san write_lockspace error %d %s", ls_name, rv, ss.host_id_disk.path); return rv; } /* * We want to create the global lock in the first sanlock vg. * If other sanlock vgs exist, then one of them must contain * the gl. If gl_lsname_sanlock is not set, then perhaps * the sanlock vg with the gl has been removed or has not yet * been seen. (Would vgcreate get this far in that case?) * If dlm vgs exist, then we choose to use the dlm gl and * not a sanlock gl. */ if (flags & LD_AF_ENABLE) gl_name = R_NAME_GL; else if (flags & LD_AF_DISABLE) gl_name = R_NAME_GL_DISABLED; else if (!gl_use_sanlock || gl_lsname_sanlock[0] || !lockspaces_empty()) gl_name = R_NAME_GL_DISABLED; else gl_name = R_NAME_GL; memcpy(rd.rs.lockspace_name, ss.name, SANLK_NAME_LEN); strncpy(rd.rs.name, gl_name, SANLK_NAME_LEN); memcpy(rd.rs.disks[0].path, disk.path, SANLK_PATH_LEN); rd.rs.disks[0].offset = align_size * GL_LOCK_BEGIN; rd.rs.num_disks = 1; rv = sanlock_write_resource(&rd.rs, 0, 0, 0); if (rv < 0) { log_error("S %s init_vg_san write_resource gl error %d %s", ls_name, rv, rd.rs.disks[0].path); return rv; } memcpy(rd.rs.lockspace_name, ss.name, SANLK_NAME_LEN); strncpy(rd.rs.name, R_NAME_VG, SANLK_NAME_LEN); memcpy(rd.rs.disks[0].path, disk.path, SANLK_PATH_LEN); rd.rs.disks[0].offset = align_size * VG_LOCK_BEGIN; rd.rs.num_disks = 1; rv = sanlock_write_resource(&rd.rs, 0, 0, 0); if (rv < 0) { log_error("S %s init_vg_san write_resource vg error %d %s", ls_name, rv, rd.rs.disks[0].path); return rv; } if (!strcmp(gl_name, R_NAME_GL)) strncpy(gl_lsname_sanlock, ls_name, MAX_NAME); snprintf(vg_args, MAX_ARGS, "%s:%s", lock_args_version, lock_lv_name); log_debug("S %s init_vg_san done vg_args %s", ls_name, vg_args); /* * Go through all lv resource slots and initialize them with the * correct lockspace name but a special resource name that indicates * it is unused. */ memset(&rd, 0, sizeof(rd)); rd.rs.num_disks = 1; memcpy(rd.rs.disks[0].path, disk.path, SANLK_PATH_LEN); strncpy(rd.rs.lockspace_name, ls_name, SANLK_NAME_LEN); strcpy(rd.rs.name, "#unused"); offset = align_size * LV_LOCK_BEGIN; log_debug("S %s init_vg_san clearing lv lease areas", ls_name); for (i = 0; ; i++) { rd.rs.disks[0].offset = offset; rv = sanlock_write_resource(&rd.rs, 0, 0, 0); if (rv == -EMSGSIZE || rv == -ENOSPC) { /* This indicates the end of the device is reached. */ rv = -EMSGSIZE; break; } if (rv) { log_error("clear lv resource area %llu error %d", (unsigned long long)offset, rv); break; } offset += align_size; } return 0; } /* * lvcreate * * The offset at which the lv lease is written is passed * all the way back to the lvcreate command so that it * can be saved in the lv's lock_args in the vg metadata. */ int lm_init_lv_sanlock(char *ls_name, char *vg_name, char *lv_name, char *vg_args, char *lv_args, uint64_t free_offset) { struct sanlk_resourced rd; char lock_lv_name[MAX_ARGS+1]; char lock_args_version[MAX_ARGS+1]; uint64_t offset; int align_size; int rv; memset(&rd, 0, sizeof(rd)); memset(lock_lv_name, 0, sizeof(lock_lv_name)); memset(lock_args_version, 0, sizeof(lock_args_version)); rv = lock_lv_name_from_args(vg_args, lock_lv_name); if (rv < 0) { log_error("S %s init_lv_san lock_lv_name_from_args error %d %s", ls_name, rv, vg_args); return rv; } snprintf(lock_args_version, MAX_ARGS, "%u.%u.%u", LV_LOCK_ARGS_MAJOR, LV_LOCK_ARGS_MINOR, LV_LOCK_ARGS_PATCH); if (daemon_test) { align_size = 1048576; snprintf(lv_args, MAX_ARGS, "%s:%llu", lock_args_version, (unsigned long long)((align_size * LV_LOCK_BEGIN) + (align_size * daemon_test_lv_count))); daemon_test_lv_count++; return 0; } strncpy(rd.rs.lockspace_name, ls_name, SANLK_NAME_LEN); rd.rs.num_disks = 1; snprintf(rd.rs.disks[0].path, SANLK_PATH_LEN-1, "/dev/mapper/%s-%s", vg_name, lock_lv_name); align_size = sanlock_align(&rd.rs.disks[0]); if (align_size <= 0) { log_error("S %s init_lv_san align error %d", ls_name, align_size); return -EINVAL; } if (free_offset) offset = free_offset; else offset = align_size * LV_LOCK_BEGIN; rd.rs.disks[0].offset = offset; while (1) { rd.rs.disks[0].offset = offset; memset(rd.rs.name, 0, SANLK_NAME_LEN); rv = sanlock_read_resource(&rd.rs, 0); if (rv == -EMSGSIZE || rv == -ENOSPC) { /* This indicates the end of the device is reached. */ log_debug("S %s init_lv_san read limit offset %llu", ls_name, (unsigned long long)offset); rv = -EMSGSIZE; return rv; } if (rv && rv != SANLK_LEADER_MAGIC) { log_error("S %s init_lv_san read error %d offset %llu", ls_name, rv, (unsigned long long)offset); break; } if (!strncmp(rd.rs.name, lv_name, SANLK_NAME_LEN)) { log_error("S %s init_lv_san resource name %s already exists at %llu", ls_name, lv_name, (unsigned long long)offset); return -EEXIST; } /* * If we read newly extended space, it will not be initialized * with an "#unused" resource, but will return SANLK_LEADER_MAGIC * indicating an uninitialized paxos structure on disk. */ if ((rv == SANLK_LEADER_MAGIC) || !strcmp(rd.rs.name, "#unused")) { log_debug("S %s init_lv_san %s found unused area at %llu", ls_name, lv_name, (unsigned long long)offset); strncpy(rd.rs.name, lv_name, SANLK_NAME_LEN); rv = sanlock_write_resource(&rd.rs, 0, 0, 0); if (!rv) { snprintf(lv_args, MAX_ARGS, "%s:%llu", lock_args_version, (unsigned long long)offset); } else { log_error("S %s init_lv_san write error %d offset %llu", ls_name, rv, (unsigned long long)rv); } break; } offset += align_size; } return rv; } /* * Read the lockspace and each resource, replace the lockspace name, * and write it back. */ int lm_rename_vg_sanlock(char *ls_name, char *vg_name, uint32_t flags, char *vg_args) { struct sanlk_lockspace ss; struct sanlk_resourced rd; struct sanlk_disk disk; char lock_lv_name[MAX_ARGS+1]; uint64_t offset; uint32_t io_timeout; int align_size; int i, rv; memset(&disk, 0, sizeof(disk)); memset(lock_lv_name, 0, sizeof(lock_lv_name)); if (!vg_args || !vg_args[0] || !strcmp(vg_args, "none")) { log_error("S %s rename_vg_san vg_args missing", ls_name); return -EINVAL; } rv = lock_lv_name_from_args(vg_args, lock_lv_name); if (rv < 0) { log_error("S %s init_lv_san lock_lv_name_from_args error %d %s", ls_name, rv, vg_args); return rv; } snprintf(disk.path, SANLK_PATH_LEN-1, "/dev/mapper/%s-%s", vg_name, lock_lv_name); log_debug("S %s rename_vg_san path %s", ls_name, disk.path); if (daemon_test) return 0; /* FIXME: device is not always ready for us here */ sleep(1); align_size = sanlock_align(&disk); if (align_size <= 0) { log_error("S %s rename_vg_san bad align size %d %s", ls_name, align_size, disk.path); return -EINVAL; } /* * Lockspace */ memset(&ss, 0, sizeof(ss)); memcpy(ss.host_id_disk.path, disk.path, SANLK_PATH_LEN); ss.host_id_disk.offset = LS_BEGIN * align_size; rv = sanlock_read_lockspace(&ss, 0, &io_timeout); if (rv < 0) { log_error("S %s rename_vg_san read_lockspace error %d %s", ls_name, rv, ss.host_id_disk.path); return rv; } strncpy(ss.name, ls_name, SANLK_NAME_LEN); rv = sanlock_write_lockspace(&ss, 0, 0, sanlock_io_timeout); if (rv < 0) { log_error("S %s rename_vg_san write_lockspace error %d %s", ls_name, rv, ss.host_id_disk.path); return rv; } /* * GL resource */ memset(&rd, 0, sizeof(rd)); memcpy(rd.rs.disks[0].path, disk.path, SANLK_PATH_LEN); rd.rs.disks[0].offset = align_size * GL_LOCK_BEGIN; rd.rs.num_disks = 1; rv = sanlock_read_resource(&rd.rs, 0); if (rv < 0) { log_error("S %s rename_vg_san read_resource gl error %d %s", ls_name, rv, rd.rs.disks[0].path); return rv; } strncpy(rd.rs.lockspace_name, ss.name, SANLK_NAME_LEN); rv = sanlock_write_resource(&rd.rs, 0, 0, 0); if (rv < 0) { log_error("S %s rename_vg_san write_resource gl error %d %s", ls_name, rv, rd.rs.disks[0].path); return rv; } /* * VG resource */ memset(&rd, 0, sizeof(rd)); memcpy(rd.rs.disks[0].path, disk.path, SANLK_PATH_LEN); rd.rs.disks[0].offset = align_size * VG_LOCK_BEGIN; rd.rs.num_disks = 1; rv = sanlock_read_resource(&rd.rs, 0); if (rv < 0) { log_error("S %s rename_vg_san write_resource vg error %d %s", ls_name, rv, rd.rs.disks[0].path); return rv; } strncpy(rd.rs.lockspace_name, ss.name, SANLK_NAME_LEN); rv = sanlock_write_resource(&rd.rs, 0, 0, 0); if (rv < 0) { log_error("S %s rename_vg_san write_resource vg error %d %s", ls_name, rv, rd.rs.disks[0].path); return rv; } /* * LV resources */ offset = align_size * LV_LOCK_BEGIN; for (i = 0; ; i++) { memset(&rd, 0, sizeof(rd)); memcpy(rd.rs.disks[0].path, disk.path, SANLK_PATH_LEN); rd.rs.disks[0].offset = offset; rd.rs.num_disks = 1; rv = sanlock_read_resource(&rd.rs, 0); if (rv == -EMSGSIZE || rv == -ENOSPC) { /* This indicates the end of the device is reached. */ rv = -EMSGSIZE; break; } if (rv < 0) { log_error("S %s rename_vg_san read_resource resource area %llu error %d", ls_name, (unsigned long long)offset, rv); break; } strncpy(rd.rs.lockspace_name, ss.name, SANLK_NAME_LEN); rv = sanlock_write_resource(&rd.rs, 0, 0, 0); if (rv) { log_error("S %s rename_vg_san write_resource resource area %llu error %d", ls_name, (unsigned long long)offset, rv); break; } offset += align_size; } return 0; } /* lvremove */ int lm_free_lv_sanlock(struct lockspace *ls, struct resource *r) { struct rd_sanlock *rds = (struct rd_sanlock *)r->lm_data; struct sanlk_resource *rs = &rds->rs; int rv; log_debug("S %s R %s free_lv_san", ls->name, r->name); if (daemon_test) return 0; strcpy(rs->name, "#unused"); rv = sanlock_write_resource(rs, 0, 0, 0); if (rv < 0) { log_error("S %s R %s free_lv_san write error %d", ls->name, r->name, rv); } return rv; } int lm_ex_disable_gl_sanlock(struct lockspace *ls) { struct lm_sanlock *lms = (struct lm_sanlock *)ls->lm_data; struct sanlk_resourced rd1; struct sanlk_resourced rd2; struct sanlk_resource *rs1; struct sanlk_resource *rs2; struct sanlk_resource **rs_args; int rv; if (daemon_test) return 0; rs_args = malloc(2 * sizeof(struct sanlk_resource *)); if (!rs_args) return -ENOMEM; rs1 = &rd1.rs; rs2 = &rd2.rs; memset(&rd1, 0, sizeof(rd1)); memset(&rd2, 0, sizeof(rd2)); strncpy(rd1.rs.lockspace_name, ls->name, SANLK_NAME_LEN); strncpy(rd1.rs.name, R_NAME_GL, SANLK_NAME_LEN); strncpy(rd2.rs.lockspace_name, ls->name, SANLK_NAME_LEN); strncpy(rd2.rs.name, R_NAME_GL_DISABLED, SANLK_NAME_LEN); rd1.rs.num_disks = 1; strncpy(rd1.rs.disks[0].path, lms->ss.host_id_disk.path, SANLK_PATH_LEN-1); rd1.rs.disks[0].offset = lms->align_size * GL_LOCK_BEGIN; rv = sanlock_acquire(lms->sock, -1, 0, 1, &rs1, NULL); if (rv < 0) { log_error("S %s ex_disable_gl_san acquire error %d", ls->name, rv); goto out; } rs_args[0] = rs1; rs_args[1] = rs2; rv = sanlock_release(lms->sock, -1, SANLK_REL_RENAME, 2, rs_args); if (rv < 0) { log_error("S %s ex_disable_gl_san release_rename error %d", ls->name, rv); } out: free(rs_args); return rv; } /* * enable/disable exist because each vg contains a global lock, * but we only want to use the gl from one of them. The first * sanlock vg created, has its gl enabled, and subsequent * sanlock vgs have their gl disabled. If the vg containing the * gl is removed, the gl from another sanlock vg needs to be * enabled. Or, if gl in multiple vgs are somehow enabled, we * want to be able to disable one of them. * * Disable works by naming/renaming the gl resource to have a * name that is different from the predefined name. * When a host attempts to acquire the gl with its standard * predefined name, it will fail because the resource's name * on disk doesn't match. */ int lm_able_gl_sanlock(struct lockspace *ls, int enable) { struct lm_sanlock *lms = (struct lm_sanlock *)ls->lm_data; struct sanlk_resourced rd; const char *gl_name; int rv; if (enable) gl_name = R_NAME_GL; else gl_name = R_NAME_GL_DISABLED; if (daemon_test) goto out; memset(&rd, 0, sizeof(rd)); strncpy(rd.rs.lockspace_name, ls->name, SANLK_NAME_LEN); strncpy(rd.rs.name, gl_name, SANLK_NAME_LEN); rd.rs.num_disks = 1; strncpy(rd.rs.disks[0].path, lms->ss.host_id_disk.path, SANLK_PATH_LEN-1); rd.rs.disks[0].offset = lms->align_size * GL_LOCK_BEGIN; rv = sanlock_write_resource(&rd.rs, 0, 0, 0); if (rv < 0) { log_error("S %s able_gl %d write_resource gl error %d %s", ls->name, enable, rv, rd.rs.disks[0].path); return rv; } out: log_debug("S %s able_gl %s", ls->name, gl_name); ls->sanlock_gl_enabled = enable; if (enable) strncpy(gl_lsname_sanlock, ls->name, MAX_NAME); if (!enable && !strcmp(gl_lsname_sanlock, ls->name)) memset(gl_lsname_sanlock, 0, sizeof(gl_lsname_sanlock)); return 0; } static int gl_is_enabled(struct lockspace *ls, struct lm_sanlock *lms) { char strname[SANLK_NAME_LEN + 1]; struct sanlk_resourced rd; uint64_t offset; int rv; if (daemon_test) return 1; memset(&rd, 0, sizeof(rd)); strncpy(rd.rs.lockspace_name, ls->name, SANLK_NAME_LEN); /* leave rs.name empty, it is what we're checking */ rd.rs.num_disks = 1; strncpy(rd.rs.disks[0].path, lms->ss.host_id_disk.path, SANLK_PATH_LEN-1); offset = lms->align_size * GL_LOCK_BEGIN; rd.rs.disks[0].offset = offset; rv = sanlock_read_resource(&rd.rs, 0); if (rv < 0) { log_error("gl_is_enabled read_resource error %d", rv); return rv; } memset(strname, 0, sizeof(strname)); memcpy(strname, rd.rs.name, SANLK_NAME_LEN); if (!strcmp(strname, R_NAME_GL_DISABLED)) { return 0; } if (!strcmp(strname, R_NAME_GL)) { return 1; } log_error("gl_is_enabled invalid gl name %s", strname); return -1; } int lm_gl_is_enabled(struct lockspace *ls) { int rv; rv = gl_is_enabled(ls, ls->lm_data); ls->sanlock_gl_enabled = rv; return rv; } /* * This is called at the beginning of lvcreate to * ensure there is free space for a new LV lock. * If not, lvcreate will extend the lvmlock lv * before continuing with creating the new LV. * This way, lm_init_lv_san() should find a free * lock (unless the autoextend of lvmlock lv has * been disabled.) */ int lm_find_free_lock_sanlock(struct lockspace *ls, uint64_t *free_offset) { struct lm_sanlock *lms = (struct lm_sanlock *)ls->lm_data; struct sanlk_resourced rd; uint64_t offset; uint64_t start_offset; int rv; int round = 0; if (daemon_test) { *free_offset = (1048576 * LV_LOCK_BEGIN) + (1048576 * (daemon_test_lv_count + 1)); return 0; } memset(&rd, 0, sizeof(rd)); strncpy(rd.rs.lockspace_name, ls->name, SANLK_NAME_LEN); rd.rs.num_disks = 1; strncpy(rd.rs.disks[0].path, lms->ss.host_id_disk.path, SANLK_PATH_LEN-1); if (ls->free_lock_offset) offset = ls->free_lock_offset; else offset = lms->align_size * LV_LOCK_BEGIN; start_offset = offset; while (1) { if (offset >= start_offset && round) { /* This indicates the all space are allocated. */ log_debug("S %s init_lv_san read back to start offset %llu", ls->name, (unsigned long long)offset); rv = -EMSGSIZE; return rv; } rd.rs.disks[0].offset = offset; memset(rd.rs.name, 0, SANLK_NAME_LEN); rv = sanlock_read_resource(&rd.rs, 0); if (rv == -EMSGSIZE || rv == -ENOSPC) { /* This indicates the end of the device is reached. */ log_debug("S %s find_free_lock_san read limit offset %llu", ls->name, (unsigned long long)offset); /* remember the NO SPACE offset, if no free area left, * search from this offset after extend */ *free_offset = offset; offset = lms->align_size * LV_LOCK_BEGIN; round = 1; continue; } /* * If we read newly extended space, it will not be initialized * with an "#unused" resource, but will return an error about * an invalid paxos structure on disk. */ if (rv == SANLK_LEADER_MAGIC) { log_debug("S %s find_free_lock_san found empty area at %llu", ls->name, (unsigned long long)offset); *free_offset = offset; return 0; } if (rv) { log_error("S %s find_free_lock_san read error %d offset %llu", ls->name, rv, (unsigned long long)offset); break; } if (!strcmp(rd.rs.name, "#unused")) { log_debug("S %s find_free_lock_san found unused area at %llu", ls->name, (unsigned long long)offset); *free_offset = offset; return 0; } offset += lms->align_size; } return rv; } /* * host A: start_vg/add_lockspace * host B: vgremove * * The global lock cannot always be held around start_vg * on host A because the gl is in a vg that may not be * started yet, or may be in the vg we are starting. * * If B removes the vg, destroying the delta leases, * while A is a lockspace member, it will cause A's * sanlock delta lease renewal to fail, and lockspace * recovery. * * I expect this overlap would usually cause a failure * in the add_lockspace() on host A when it sees that * the lockspace structures have been clobbered by B. * Having add_lockspace() fail should be a fine result. * * If add_lockspace was somehow able to finish, the * subsequent renewal would probably fail instead. * This should also not create any major problems. */ int lm_prepare_lockspace_sanlock(struct lockspace *ls) { struct stat st; struct lm_sanlock *lms = NULL; char lock_lv_name[MAX_ARGS+1]; char lsname[SANLK_NAME_LEN + 1]; char disk_path[SANLK_PATH_LEN]; char killpath[SANLK_PATH_LEN]; char killargs[SANLK_PATH_LEN]; int gl_found; int ret, rv; memset(disk_path, 0, sizeof(disk_path)); memset(lock_lv_name, 0, sizeof(lock_lv_name)); /* * Construct the path to lvmlockctl by using the path to the lvm binary * and appending "lockctl" to get /path/to/lvmlockctl. */ memset(killpath, 0, sizeof(killpath)); snprintf(killpath, SANLK_PATH_LEN - 1, "%slockctl", LVM_PATH); memset(killargs, 0, sizeof(killargs)); snprintf(killargs, SANLK_PATH_LEN - 1, "--kill %s", ls->vg_name); rv = check_args_version(ls->vg_args, VG_LOCK_ARGS_MAJOR); if (rv < 0) { ret = -EARGS; goto fail; } rv = lock_lv_name_from_args(ls->vg_args, lock_lv_name); if (rv < 0) { log_error("S %s prepare_lockspace_san lock_lv_name_from_args error %d %s", ls->name, rv, ls->vg_args); ret = -EARGS; goto fail; } snprintf(disk_path, SANLK_PATH_LEN-1, "/dev/mapper/%s-%s", ls->vg_name, lock_lv_name); /* * When a vg is started, the internal sanlock lv should be * activated before lvmlockd is asked to add the lockspace. * (sanlock needs to use the lv.) * * In the future we might be able to ask something on the system * to activate the sanlock lv from here, and with that we might be * able to start sanlock VGs without requiring a * vgchange --lock-start command. */ /* FIXME: device is not always ready for us here */ sleep(1); rv = stat(disk_path, &st); if (rv < 0) { log_error("S %s prepare_lockspace_san stat error %d disk_path %s", ls->name, errno, disk_path); ret = -EARGS; goto fail; } if (!ls->host_id) { if (daemon_host_id) ls->host_id = daemon_host_id; else if (daemon_host_id_file) ls->host_id = read_host_id_file(); } if (!ls->host_id || ls->host_id > 2000) { log_error("S %s prepare_lockspace_san invalid host_id %llu", ls->name, (unsigned long long)ls->host_id); ret = -EHOSTID; goto fail; } lms = malloc(sizeof(struct lm_sanlock)); if (!lms) { ret = -ENOMEM; goto fail; } memset(lsname, 0, sizeof(lsname)); strncpy(lsname, ls->name, SANLK_NAME_LEN); memset(lms, 0, sizeof(struct lm_sanlock)); memcpy(lms->ss.name, lsname, SANLK_NAME_LEN); lms->ss.host_id_disk.offset = 0; lms->ss.host_id = ls->host_id; strncpy(lms->ss.host_id_disk.path, disk_path, SANLK_PATH_LEN-1); if (daemon_test) { if (!gl_lsname_sanlock[0]) { strncpy(gl_lsname_sanlock, lsname, MAX_NAME); log_debug("S %s prepare_lockspace_san use global lock", lsname); } goto out; } lms->sock = sanlock_register(); if (lms->sock < 0) { log_error("S %s prepare_lockspace_san register error %d", lsname, lms->sock); lms->sock = 0; ret = -EMANAGER; goto fail; } log_debug("set killpath to %s %s", killpath, killargs); rv = sanlock_killpath(lms->sock, 0, killpath, killargs); if (rv < 0) { log_error("S %s killpath error %d", lsname, rv); ret = -EMANAGER; goto fail; } rv = sanlock_restrict(lms->sock, SANLK_RESTRICT_SIGKILL); if (rv < 0) { log_error("S %s restrict error %d", lsname, rv); ret = -EMANAGER; goto fail; } lms->align_size = sanlock_align(&lms->ss.host_id_disk); if (lms->align_size <= 0) { log_error("S %s prepare_lockspace_san align error %d", lsname, lms->align_size); ret = -EMANAGER; goto fail; } gl_found = gl_is_enabled(ls, lms); if (gl_found < 0) { log_error("S %s prepare_lockspace_san gl_enabled error %d", lsname, gl_found); ret = -EARGS; goto fail; } ls->sanlock_gl_enabled = gl_found; if (gl_found) { if (gl_use_dlm) { log_error("S %s prepare_lockspace_san gl_use_dlm is set", lsname); } else if (gl_lsname_sanlock[0] && strcmp(gl_lsname_sanlock, lsname)) { log_error("S %s prepare_lockspace_san multiple sanlock global locks current %s", lsname, gl_lsname_sanlock); } else { strncpy(gl_lsname_sanlock, lsname, MAX_NAME); log_debug("S %s prepare_lockspace_san use global lock %s", lsname, gl_lsname_sanlock); } } out: ls->lm_data = lms; log_debug("S %s prepare_lockspace_san done", lsname); return 0; fail: if (lms && lms->sock) close(lms->sock); if (lms) free(lms); return ret; } int lm_add_lockspace_sanlock(struct lockspace *ls, int adopt) { struct lm_sanlock *lms = (struct lm_sanlock *)ls->lm_data; int rv; if (daemon_test) { sleep(2); goto out; } rv = sanlock_add_lockspace_timeout(&lms->ss, 0, sanlock_io_timeout); if (rv == -EEXIST && adopt) { /* We could alternatively just skip the sanlock call for adopt. */ log_debug("S %s add_lockspace_san adopt found ls", ls->name); goto out; } if (rv < 0) { /* retry for some errors? */ log_error("S %s add_lockspace_san add_lockspace error %d", ls->name, rv); goto fail; } /* * Don't let the lockspace be cleanly released if orphan locks * exist, because the orphan locks are still protecting resources * that are being used on the host, e.g. active lvs. If the * lockspace is cleanly released, another host could acquire the * orphan leases. */ rv = sanlock_set_config(ls->name, 0, SANLK_CONFIG_USED_BY_ORPHANS, NULL); if (rv < 0) { log_error("S %s add_lockspace_san set_config error %d", ls->name, rv); sanlock_rem_lockspace(&lms->ss, 0); goto fail; } out: log_debug("S %s add_lockspace_san done", ls->name); return 0; fail: if (close(lms->sock)) log_error("failed to close sanlock daemon socket connection"); free(lms); ls->lm_data = NULL; return rv; } int lm_rem_lockspace_sanlock(struct lockspace *ls, int free_vg) { struct lm_sanlock *lms = (struct lm_sanlock *)ls->lm_data; int rv; if (daemon_test) goto out; rv = sanlock_rem_lockspace(&lms->ss, 0); if (rv < 0) { log_error("S %s rem_lockspace_san error %d", ls->name, rv); return rv; } if (free_vg) { /* * Destroy sanlock lockspace (delta leases). Forces failure for any * other host that is still using or attempts to use this lockspace. * This shouldn't be generally necessary, but there may some races * between nodes starting and removing a vg which this could help. */ strncpy(lms->ss.name, "#unused", SANLK_NAME_LEN); rv = sanlock_write_lockspace(&lms->ss, 0, 0, sanlock_io_timeout); if (rv < 0) { log_error("S %s rem_lockspace free_vg write_lockspace error %d %s", ls->name, rv, lms->ss.host_id_disk.path); } } if (close(lms->sock)) log_error("failed to close sanlock daemon socket connection"); out: free(lms); ls->lm_data = NULL; /* FIXME: should we only clear gl_lsname when doing free_vg? */ if (!strcmp(ls->name, gl_lsname_sanlock)) memset(gl_lsname_sanlock, 0, sizeof(gl_lsname_sanlock)); return 0; } static int lm_add_resource_sanlock(struct lockspace *ls, struct resource *r) { struct lm_sanlock *lms = (struct lm_sanlock *)ls->lm_data; struct rd_sanlock *rds = (struct rd_sanlock *)r->lm_data; strncpy(rds->rs.lockspace_name, ls->name, SANLK_NAME_LEN); strncpy(rds->rs.name, r->name, SANLK_NAME_LEN); rds->rs.num_disks = 1; memcpy(rds->rs.disks[0].path, lms->ss.host_id_disk.path, SANLK_PATH_LEN); if (r->type == LD_RT_GL) rds->rs.disks[0].offset = GL_LOCK_BEGIN * lms->align_size; else if (r->type == LD_RT_VG) rds->rs.disks[0].offset = VG_LOCK_BEGIN * lms->align_size; /* LD_RT_LV offset is set in each lm_lock call from lv_args. */ if (r->type == LD_RT_GL || r->type == LD_RT_VG) { rds->vb = malloc(sizeof(struct val_blk)); if (!rds->vb) return -ENOMEM; memset(rds->vb, 0, sizeof(struct val_blk)); } return 0; } int lm_rem_resource_sanlock(struct lockspace *ls, struct resource *r) { struct rd_sanlock *rds = (struct rd_sanlock *)r->lm_data; /* FIXME: assert r->mode == UN or unlock if it's not? */ if (rds->vb) free(rds->vb); memset(rds, 0, sizeof(struct rd_sanlock)); r->lm_init = 0; return 0; } int lm_lock_sanlock(struct lockspace *ls, struct resource *r, int ld_mode, struct val_blk *vb_out, int *retry, int adopt) { struct lm_sanlock *lms = (struct lm_sanlock *)ls->lm_data; struct rd_sanlock *rds = (struct rd_sanlock *)r->lm_data; struct sanlk_resource *rs; struct sanlk_options opt; uint64_t lock_lv_offset; uint32_t flags = 0; struct val_blk vb; int added = 0; int rv; if (!r->lm_init) { rv = lm_add_resource_sanlock(ls, r); if (rv < 0) return rv; r->lm_init = 1; added = 1; } rs = &rds->rs; /* * While there are duplicate global locks, keep checking * to see if any have been disabled. */ if (sanlock_gl_dup && ls->sanlock_gl_enabled && (r->type == LD_RT_GL || r->type == LD_RT_VG)) ls->sanlock_gl_enabled = gl_is_enabled(ls, ls->lm_data); if (r->type == LD_RT_LV) { /* * The lv may have been removed and recreated with a new lease * offset, so we need to get the offset from lv_args each time * instead of reusing the value that we last set in rds->rs. * act->lv_args is copied to r->lv_args before every lm_lock(). */ rv = check_args_version(r->lv_args, LV_LOCK_ARGS_MAJOR); if (rv < 0) { log_error("S %s R %s lock_san wrong lv_args version %s", ls->name, r->name, r->lv_args); return rv; } rv = lock_lv_offset_from_args(r->lv_args, &lock_lv_offset); if (rv < 0) { log_error("S %s R %s lock_san lv_offset_from_args error %d %s", ls->name, r->name, rv, r->lv_args); return rv; } if (!added && (rds->rs.disks[0].offset != lock_lv_offset)) { log_debug("S %s R %s lock_san offset old %llu new %llu", ls->name, r->name, (unsigned long long)rds->rs.disks[0].offset, (unsigned long long)lock_lv_offset); } rds->rs.disks[0].offset = lock_lv_offset; } if (ld_mode == LD_LK_SH) { rs->flags |= SANLK_RES_SHARED; } else if (ld_mode == LD_LK_EX) { rs->flags &= ~SANLK_RES_SHARED; } else { log_error("lock_san invalid mode %d", ld_mode); return -EINVAL; } /* * Use PERSISTENT because if lvmlockd exits while holding * a lock, it's not safe to simply clear/drop the lock while * a command or lv is using it. */ rs->flags |= SANLK_RES_PERSISTENT; log_debug("S %s R %s lock_san %s at %s:%llu", ls->name, r->name, mode_str(ld_mode), rs->disks[0].path, (unsigned long long)rs->disks[0].offset); if (daemon_test) { if (rds->vb) { vb_out->version = le16_to_cpu(rds->vb->version); vb_out->flags = le16_to_cpu(rds->vb->flags); vb_out->r_version = le32_to_cpu(rds->vb->r_version); } return 0; } if (rds->vb) flags |= SANLK_ACQUIRE_LVB; if (adopt) flags |= SANLK_ACQUIRE_ORPHAN_ONLY; /* * Don't block waiting for a failed lease to expire since it causes * sanlock_acquire to block for a long time, which would prevent this * thread from processing other lock requests. */ flags |= SANLK_ACQUIRE_OWNER_NOWAIT; memset(&opt, 0, sizeof(opt)); sprintf(opt.owner_name, "%s", "lvmlockd"); rv = sanlock_acquire(lms->sock, -1, flags, 1, &rs, &opt); if (rv == -EAGAIN) { /* * It appears that sanlock_acquire returns EAGAIN when we request * a shared lock but the lock is held ex by another host. * There's no point in retrying this case, just return an error. */ log_debug("S %s R %s lock_san acquire mode %d rv EAGAIN", ls->name, r->name, ld_mode); *retry = 0; return -EAGAIN; } if ((rv == -EMSGSIZE) && (r->type == LD_RT_LV)) { /* * sanlock tried to read beyond the end of the device, * so the offset of the lv lease is beyond the end of the * device, which means that the lease lv was extended, and * the lease for this lv was allocated in the new space. * The lvm command will see this error, refresh the lvmlock * lv, and try again. */ log_debug("S %s R %s lock_san acquire offset %llu rv EMSGSIZE", ls->name, r->name, (unsigned long long)rs->disks[0].offset); *retry = 0; return -EMSGSIZE; } if (adopt && (rv == -EUCLEAN)) { /* * The orphan lock exists but in a different mode than we asked * for, so the caller should try again with the other mode. */ log_debug("S %s R %s lock_san adopt mode %d try other mode", ls->name, r->name, ld_mode); *retry = 0; return -EUCLEAN; } if (adopt && (rv == -ENOENT)) { /* * No orphan lock exists. */ log_debug("S %s R %s lock_san adopt mode %d no orphan found", ls->name, r->name, ld_mode); *retry = 0; return -ENOENT; } if (rv == SANLK_ACQUIRE_IDLIVE || rv == SANLK_ACQUIRE_OWNED || rv == SANLK_ACQUIRE_OTHER) { /* * The lock is held by another host. These failures can * happen while multiple hosts are concurrently acquiring * shared locks. We want to retry a couple times in this * case because we'll probably get the sh lock. * * I believe these are also the errors when requesting an * ex lock that another host holds ex. We want to report * something like: "lock is held by another host" in this case. * Retry is pointless here. * * We can't distinguish between the two cases above, * so if requesting a sh lock, retry a couple times, * otherwise don't. */ log_debug("S %s R %s lock_san acquire mode %d rv %d", ls->name, r->name, ld_mode, rv); *retry = (ld_mode == LD_LK_SH) ? 1 : 0; return -EAGAIN; } if (rv == SANLK_ACQUIRE_OWNED_RETRY) { /* * The lock is held by a failed host, and will eventually * expire. If we retry we'll eventually acquire the lock * (or find someone else has acquired it). The EAGAIN retry * attempts for SH locks above would not be sufficient for * the length of expiration time. We could add a longer * retry time here to cover the full expiration time and block * the activation command for that long. For now just return * the standard error indicating that another host still owns * the lease. FIXME: return a different error number so the * command can print an different error indicating that the * owner of the lease is in the process of expiring? */ log_debug("S %s R %s lock_san acquire mode %d rv %d", ls->name, r->name, ld_mode, rv); *retry = 0; return -EAGAIN; } if (rv < 0) { log_error("S %s R %s lock_san acquire error %d", ls->name, r->name, rv); /* if the gl has been disabled, remove and free the gl resource */ if ((rv == SANLK_LEADER_RESOURCE) && (r->type == LD_RT_GL)) { if (!lm_gl_is_enabled(ls)) { log_error("S %s R %s lock_san gl has been disabled", ls->name, r->name); if (!strcmp(gl_lsname_sanlock, ls->name)) memset(gl_lsname_sanlock, 0, sizeof(gl_lsname_sanlock)); return -EUNATCH; } } if (added) lm_rem_resource_sanlock(ls, r); /* sanlock gets i/o errors trying to read/write the leases. */ if (rv == -EIO) rv = -ELOCKIO; /* * The sanlock lockspace can disappear if the lease storage fails, * the delta lease renewals fail, the lockspace enters recovery, * lvmlockd holds no leases in the lockspace, so sanlock can * stop and free the lockspace. */ if (rv == -ENOSPC) rv = -ELOCKIO; return rv; } if (rds->vb) { rv = sanlock_get_lvb(0, rs, (char *)&vb, sizeof(vb)); if (rv < 0) { log_error("S %s R %s lock_san get_lvb error %d", ls->name, r->name, rv); memset(rds->vb, 0, sizeof(struct val_blk)); memset(vb_out, 0, sizeof(struct val_blk)); goto out; } /* * 'vb' contains disk endian values, not host endian. * It is copied directly to rrs->vb which is also kept * in disk endian form. * vb_out is returned to the caller in host endian form. */ memcpy(rds->vb, &vb, sizeof(vb)); vb_out->version = le16_to_cpu(vb.version); vb_out->flags = le16_to_cpu(vb.flags); vb_out->r_version = le32_to_cpu(vb.r_version); } out: return rv; } int lm_convert_sanlock(struct lockspace *ls, struct resource *r, int ld_mode, uint32_t r_version) { struct lm_sanlock *lms = (struct lm_sanlock *)ls->lm_data; struct rd_sanlock *rds = (struct rd_sanlock *)r->lm_data; struct sanlk_resource *rs = &rds->rs; struct val_blk vb; uint32_t flags = 0; int rv; log_debug("S %s R %s convert_san %s to %s", ls->name, r->name, mode_str(r->mode), mode_str(ld_mode)); if (daemon_test) goto rs_flag; if (rds->vb && r_version && (r->mode == LD_LK_EX)) { if (!rds->vb->version) { /* first time vb has been written */ rds->vb->version = cpu_to_le16(VAL_BLK_VERSION); } if (r_version) rds->vb->r_version = cpu_to_le32(r_version); memcpy(&vb, rds->vb, sizeof(vb)); log_debug("S %s R %s convert_san set r_version %u", ls->name, r->name, r_version); rv = sanlock_set_lvb(0, rs, (char *)&vb, sizeof(vb)); if (rv < 0) { log_error("S %s R %s convert_san set_lvb error %d", ls->name, r->name, rv); } } rs_flag: if (ld_mode == LD_LK_SH) rs->flags |= SANLK_RES_SHARED; else rs->flags &= ~SANLK_RES_SHARED; if (daemon_test) return 0; rv = sanlock_convert(lms->sock, -1, flags, rs); if (rv == -EAGAIN) { /* FIXME: When could this happen? Should something different be done? */ log_error("S %s R %s convert_san EAGAIN", ls->name, r->name); return -EAGAIN; } if (rv < 0) { log_error("S %s R %s convert_san convert error %d", ls->name, r->name, rv); } return rv; } static int release_rename(struct lockspace *ls, struct resource *r) { struct rd_sanlock rd1; struct rd_sanlock rd2; struct sanlk_resource *res1; struct sanlk_resource *res2; struct sanlk_resource **res_args; struct lm_sanlock *lms = (struct lm_sanlock *)ls->lm_data; struct rd_sanlock *rds = (struct rd_sanlock *)r->lm_data; int rv; log_debug("S %s R %s release rename", ls->name, r->name); res_args = malloc(2 * sizeof(struct sanlk_resource *)); if (!res_args) return -ENOMEM; memcpy(&rd1, rds, sizeof(struct rd_sanlock)); memcpy(&rd2, rds, sizeof(struct rd_sanlock)); res1 = (struct sanlk_resource *)&rd1; res2 = (struct sanlk_resource *)&rd2; strcpy(res2->name, "invalid_removed"); res_args[0] = res1; res_args[1] = res2; rv = sanlock_release(lms->sock, -1, SANLK_REL_RENAME, 2, res_args); if (rv < 0) { log_error("S %s R %s unlock_san release rename error %d", ls->name, r->name, rv); } free(res_args); return rv; } /* * rds->vb is stored in le * * r_version is r->version * * for GL locks lvmlockd just increments this value * each time the global lock is released from ex. * * for VG locks it is the seqno from the vg metadata. */ int lm_unlock_sanlock(struct lockspace *ls, struct resource *r, uint32_t r_version, uint32_t lmu_flags) { struct lm_sanlock *lms = (struct lm_sanlock *)ls->lm_data; struct rd_sanlock *rds = (struct rd_sanlock *)r->lm_data; struct sanlk_resource *rs = &rds->rs; struct val_blk vb; int rv; log_debug("S %s R %s unlock_san %s r_version %u flags %x", ls->name, r->name, mode_str(r->mode), r_version, lmu_flags); if (daemon_test) { if (rds->vb && r_version && (r->mode == LD_LK_EX)) { if (!rds->vb->version) rds->vb->version = cpu_to_le16(VAL_BLK_VERSION); if (r_version) rds->vb->r_version = cpu_to_le32(r_version); } return 0; } if (rds->vb && r_version && (r->mode == LD_LK_EX)) { if (!rds->vb->version) { /* first time vb has been written */ rds->vb->version = cpu_to_le16(VAL_BLK_VERSION); } if (r_version) rds->vb->r_version = cpu_to_le32(r_version); memcpy(&vb, rds->vb, sizeof(vb)); log_debug("S %s R %s unlock_san set r_version %u", ls->name, r->name, r_version); rv = sanlock_set_lvb(0, rs, (char *)&vb, sizeof(vb)); if (rv < 0) { log_error("S %s R %s unlock_san set_lvb error %d", ls->name, r->name, rv); } } /* * For vgremove (FREE_VG) we unlock-rename the vg and gl locks * so they cannot be reacquired. */ if ((lmu_flags & LMUF_FREE_VG) && (r->type == LD_RT_GL || r->type == LD_RT_VG)) { return release_rename(ls, r); } rv = sanlock_release(lms->sock, -1, 0, 1, &rs); if (rv < 0) log_error("S %s R %s unlock_san release error %d", ls->name, r->name, rv); if (rv == -EIO) rv = -ELOCKIO; return rv; } int lm_hosts_sanlock(struct lockspace *ls, int notify) { struct sanlk_host *hss = NULL; struct sanlk_host *hs; uint32_t state; int hss_count = 0; int found_self = 0; int found_others = 0; int i, rv; if (daemon_test) return 0; rv = sanlock_get_hosts(ls->name, 0, &hss, &hss_count, 0); if (rv < 0) { log_error("S %s hosts_san get_hosts error %d", ls->name, rv); return 0; } if (!hss || !hss_count) { log_error("S %s hosts_san zero hosts", ls->name); return 0; } hs = hss; for (i = 0; i < hss_count; i++) { log_debug("S %s hosts_san host_id %llu gen %llu flags %x", ls->name, (unsigned long long)hs->host_id, (unsigned long long)hs->generation, hs->flags); if (hs->host_id == ls->host_id) { found_self = 1; hs++; continue; } state = hs->flags & SANLK_HOST_MASK; if (state == SANLK_HOST_LIVE) found_others++; hs++; } free(hss); if (found_others && notify) { /* * We could use the sanlock event mechanism to notify lvmlockd * on other hosts to stop this VG. lvmlockd would need to * register for and listen for sanlock events in the main loop. * The events are slow to propagate. We'd need to retry for a * while before all the hosts see the event and stop the VG. * sanlock_set_event(ls->name, &he, SANLK_SETEV_ALL_HOSTS); * * Wait to try this until there appears to be real value/interest * in doing it. */ } if (!found_self) { log_error("S %s hosts_san self not found others %d", ls->name, found_others); return 0; } return found_others; } int lm_get_lockspaces_sanlock(struct list_head *ls_rejoin) { struct sanlk_lockspace *ss_all = NULL; struct sanlk_lockspace *ss; struct lockspace *ls; int ss_count = 0; int i, rv; rv = sanlock_get_lockspaces(&ss_all, &ss_count, 0); if (rv < 0) return rv; if (!ss_all || !ss_count) return 0; ss = ss_all; for (i = 0; i < ss_count; i++) { if (strncmp(ss->name, LVM_LS_PREFIX, strlen(LVM_LS_PREFIX))) continue; if (!(ls = alloc_lockspace())) return -ENOMEM; ls->lm_type = LD_LM_SANLOCK; ls->host_id = ss->host_id; strncpy(ls->name, ss->name, MAX_NAME); strncpy(ls->vg_name, ss->name + strlen(LVM_LS_PREFIX), MAX_NAME); list_add_tail(&ls->list, ls_rejoin); ss++; } free(ss_all); return 0; } int lm_is_running_sanlock(void) { uint32_t daemon_version; uint32_t daemon_proto; int rv; if (daemon_test) return gl_use_sanlock; rv = sanlock_version(0, &daemon_version, &daemon_proto); if (rv < 0) return 0; return 1; } LVM2.2.02.176/daemons/lvmlockd/lvmlockd-client.h0000644000000000000120000000251713176752421020032 0ustar rootwheel/* * Copyright (C) 2014-2015 Red Hat, Inc. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. */ #ifndef _LVM_LVMLOCKD_CLIENT_H #define _LVM_LVMLOCKD_CLIENT_H #include "daemon-client.h" #define LVMLOCKD_SOCKET DEFAULT_RUN_DIR "/lvmlockd.socket" /* Wrappers to open/close connection */ static inline daemon_handle lvmlockd_open(const char *sock) { daemon_info lvmlockd_info = { .path = "lvmlockd", .socket = sock ?: LVMLOCKD_SOCKET, .protocol = "lvmlockd", .protocol_version = 1, .autostart = 0 }; return daemon_open(lvmlockd_info); } static inline void lvmlockd_close(daemon_handle h) { return daemon_close(h); } /* * Errors returned as the lvmlockd result value. */ #define ENOLS 210 /* lockspace not found */ #define ESTARTING 211 /* lockspace is starting */ #define EARGS 212 #define EHOSTID 213 #define EMANAGER 214 #define EPREPARE 215 #define ELOCKD 216 #define EVGKILLED 217 /* sanlock lost access to leases and VG is killed. */ #define ELOCKIO 218 /* sanlock io errors during lock op, may be transient. */ #define EREMOVED 219 #define EDEVOPEN 220 /* sanlock failed to open lvmlock LV */ #endif /* _LVM_LVMLOCKD_CLIENT_H */ LVM2.2.02.176/daemons/lvmlockd/lvmlockctl.c0000644000000000000120000004061213176752421017106 0ustar rootwheel/* * Copyright (C) 2014-2015 Red Hat, Inc. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. */ #include "tool.h" #include "lvmlockd-client.h" #include #include #include #include #include #include #include #include static int quit = 0; static int info = 0; static int dump = 0; static int wait_opt = 0; static int force_opt = 0; static int kill_vg = 0; static int drop_vg = 0; static int gl_enable = 0; static int gl_disable = 0; static int stop_lockspaces = 0; static char *arg_vg_name = NULL; #define DUMP_SOCKET_NAME "lvmlockd-dump.sock" #define DUMP_BUF_SIZE (1024 * 1024) static char dump_buf[DUMP_BUF_SIZE+1]; static int dump_len; static struct sockaddr_un dump_addr; static socklen_t dump_addrlen; daemon_handle _lvmlockd; #define log_error(fmt, args...) \ do { \ printf(fmt "\n", ##args); \ } while (0) #define MAX_LINE 512 /* copied from lvmlockd-internal.h */ #define MAX_NAME 64 #define MAX_ARGS 64 /* * lvmlockd dumps the client info before the lockspaces, * so we can look up client info when printing lockspace info. */ #define MAX_CLIENTS 100 struct client_info { uint32_t client_id; int pid; char name[MAX_NAME+1]; }; static struct client_info clients[MAX_CLIENTS]; static int num_clients; static void save_client_info(char *line) { uint32_t pid = 0; int fd = 0; int pi = 0; uint32_t client_id = 0; char name[MAX_NAME+1] = { 0 }; (void) sscanf(line, "info=client pid=%u fd=%d pi=%d id=%u name=%s", &pid, &fd, &pi, &client_id, name); clients[num_clients].client_id = client_id; clients[num_clients].pid = pid; strcpy(clients[num_clients].name, name); num_clients++; } static void find_client_info(uint32_t client_id, uint32_t *pid, char *cl_name) { int i; for (i = 0; i < num_clients; i++) { if (clients[i].client_id == client_id) { *pid = clients[i].pid; strcpy(cl_name, clients[i].name); return; } } } static int first_ls = 1; static void format_info_ls(char *line) { char ls_name[MAX_NAME+1] = { 0 }; char vg_name[MAX_NAME+1] = { 0 }; char vg_uuid[MAX_NAME+1] = { 0 }; char vg_sysid[MAX_NAME+1] = { 0 }; char lock_args[MAX_ARGS+1] = { 0 }; char lock_type[MAX_NAME+1] = { 0 }; (void) sscanf(line, "info=ls ls_name=%s vg_name=%s vg_uuid=%s vg_sysid=%s vg_args=%s lm_type=%s", ls_name, vg_name, vg_uuid, vg_sysid, lock_args, lock_type); if (!first_ls) printf("\n"); first_ls = 0; printf("VG %s lock_type=%s %s\n", vg_name, lock_type, vg_uuid); printf("LS %s %s\n", lock_type, ls_name); } static void format_info_ls_action(char *line) { uint32_t client_id = 0; char flags[MAX_NAME+1] = { 0 }; char version[MAX_NAME+1] = { 0 }; char op[MAX_NAME+1] = { 0 }; uint32_t pid = 0; char cl_name[MAX_NAME+1] = { 0 }; (void) sscanf(line, "info=ls_action client_id=%u %s %s op=%s", &client_id, flags, version, op); find_client_info(client_id, &pid, cl_name); printf("OP %s pid %u (%s)\n", op, pid, cl_name); } static void format_info_r(char *line, char *r_name_out, char *r_type_out) { char r_name[MAX_NAME+1] = { 0 }; char r_type[4] = { 0 }; char mode[4] = { 0 }; char sh_count[MAX_NAME+1] = { 0 }; uint32_t ver = 0; (void) sscanf(line, "info=r name=%s type=%s mode=%s %s version=%u", r_name, r_type, mode, sh_count, &ver); strcpy(r_name_out, r_name); strcpy(r_type_out, r_type); /* when mode is not un, wait and print each lk line */ if (strcmp(mode, "un")) return; /* when mode is un, there will be no lk lines, so print now */ if (!strcmp(r_type, "gl")) { printf("LK GL un ver %u\n", ver); } else if (!strcmp(r_type, "vg")) { printf("LK VG un ver %u\n", ver); } else if (!strcmp(r_type, "lv")) { printf("LK LV un %s\n", r_name); } } static void format_info_lk(char *line, char *r_name, char *r_type) { char mode[4] = { 0 }; uint32_t ver = 0; char flags[MAX_NAME+1] = { 0 }; uint32_t client_id = 0; uint32_t pid = 0; char cl_name[MAX_NAME+1] = { 0 }; if (!r_name[0] || !r_type[0]) { printf("format_info_lk error r_name %s r_type %s\n", r_name, r_type); printf("%s\n", line); return; } (void) sscanf(line, "info=lk mode=%s version=%u %s client_id=%u", mode, &ver, flags, &client_id); find_client_info(client_id, &pid, cl_name); if (!strcmp(r_type, "gl")) { printf("LK GL %s ver %u pid %u (%s)\n", mode, ver, pid, cl_name); } else if (!strcmp(r_type, "vg")) { printf("LK VG %s ver %u pid %u (%s)\n", mode, ver, pid, cl_name); } else if (!strcmp(r_type, "lv")) { printf("LK LV %s %s\n", mode, r_name); } } static void format_info_r_action(char *line, char *r_name, char *r_type) { uint32_t client_id = 0; char flags[MAX_NAME+1] = { 0 }; char version[MAX_NAME+1] = { 0 }; char op[MAX_NAME+1] = { 0 }; char rt[4] = { 0 }; char mode[4] = { 0 }; char lm[MAX_NAME+1] = { 0 }; char result[MAX_NAME+1] = { 0 }; char lm_rv[MAX_NAME+1] = { 0 }; uint32_t pid = 0; char cl_name[MAX_NAME+1] = { 0 }; if (!r_name[0] || !r_type[0]) { printf("format_info_r_action error r_name %s r_type %s\n", r_name, r_type); printf("%s\n", line); return; } (void) sscanf(line, "info=r_action client_id=%u %s %s op=%s rt=%s mode=%s %s %s %s", &client_id, flags, version, op, rt, mode, lm, result, lm_rv); find_client_info(client_id, &pid, cl_name); if (strcmp(op, "lock")) { printf("OP %s pid %u (%s)\n", op, pid, cl_name); return; } if (!strcmp(r_type, "gl")) { printf("LW GL %s ver %u pid %u (%s)\n", mode, 0, pid, cl_name); } else if (!strcmp(r_type, "vg")) { printf("LW VG %s ver %u pid %u (%s)\n", mode, 0, pid, cl_name); } else if (!strcmp(r_type, "lv")) { printf("LW LV %s %s\n", mode, r_name); } } static void format_info_line(char *line, char *r_name, char *r_type) { if (!strncmp(line, "info=structs ", strlen("info=structs "))) { /* only print this in the raw info dump */ } else if (!strncmp(line, "info=client ", strlen("info=client "))) { save_client_info(line); } else if (!strncmp(line, "info=ls ", strlen("info=ls "))) { format_info_ls(line); } else if (!strncmp(line, "info=ls_action ", strlen("info=ls_action "))) { format_info_ls_action(line); } else if (!strncmp(line, "info=r ", strlen("info=r "))) { /* * r_name/r_type are reset when a new resource is found. * They are reused for the lock and action lines that * follow a resource line. */ memset(r_name, 0, MAX_NAME+1); memset(r_type, 0, MAX_NAME+1); format_info_r(line, r_name, r_type); } else if (!strncmp(line, "info=lk ", strlen("info=lk "))) { /* will use info from previous r */ format_info_lk(line, r_name, r_type); } else if (!strncmp(line, "info=r_action ", strlen("info=r_action "))) { /* will use info from previous r */ format_info_r_action(line, r_name, r_type); } else { printf("UN %s\n", line); } } static void format_info(void) { char line[MAX_LINE]; char r_name[MAX_NAME+1]; char r_type[MAX_NAME+1]; int i, j; j = 0; memset(line, 0, sizeof(line)); for (i = 0; i < dump_len; i++) { line[j++] = dump_buf[i]; if ((line[j-1] == '\n') || (line[j-1] == '\0')) { format_info_line(line, r_name, r_type); j = 0; memset(line, 0, sizeof(line)); } } } static daemon_reply _lvmlockd_send(const char *req_name, ...) { va_list ap; daemon_reply repl; daemon_request req; req = daemon_request_make(req_name); va_start(ap, req_name); daemon_request_extend_v(req, ap); va_end(ap); repl = daemon_send(_lvmlockd, req); daemon_request_destroy(req); return repl; } /* See the same in lib/locking/lvmlockd.c */ #define NO_LOCKD_RESULT -1000 static int _lvmlockd_result(daemon_reply reply, int *result) { int reply_result; if (reply.error) { log_error("lvmlockd_result reply error %d", reply.error); return 0; } if (strcmp(daemon_reply_str(reply, "response", ""), "OK")) { log_error("lvmlockd_result bad response"); return 0; } reply_result = daemon_reply_int(reply, "op_result", NO_LOCKD_RESULT); if (reply_result == -1000) { log_error("lvmlockd_result no op_result"); return 0; } *result = reply_result; return 1; } static int do_quit(void) { daemon_reply reply; int rv = 0; reply = daemon_send_simple(_lvmlockd, "quit", NULL); if (reply.error) { log_error("reply error %d", reply.error); rv = reply.error; } daemon_reply_destroy(reply); return rv; } static int setup_dump_socket(void) { int s, rv; s = socket(AF_LOCAL, SOCK_DGRAM, 0); if (s < 0) return s; memset(&dump_addr, 0, sizeof(dump_addr)); dump_addr.sun_family = AF_LOCAL; strcpy(&dump_addr.sun_path[1], DUMP_SOCKET_NAME); dump_addrlen = sizeof(sa_family_t) + strlen(dump_addr.sun_path+1) + 1; rv = bind(s, (struct sockaddr *) &dump_addr, dump_addrlen); if (rv < 0) { rv = -errno; if (close(s)) log_error("failed to close dump socket"); return rv; } return s; } static int do_dump(const char *req_name) { daemon_reply reply; int result; int fd, rv = 0; int count = 0; fd = setup_dump_socket(); if (fd < 0) { log_error("socket error %d", fd); return fd; } reply = daemon_send_simple(_lvmlockd, req_name, NULL); if (reply.error) { log_error("reply error %d", reply.error); rv = reply.error; goto out; } result = daemon_reply_int(reply, "result", 0); dump_len = daemon_reply_int(reply, "dump_len", 0); daemon_reply_destroy(reply); if (result < 0) { rv = result; log_error("result %d", result); } if (!dump_len) goto out; memset(dump_buf, 0, sizeof(dump_buf)); retry: rv = recvfrom(fd, dump_buf + count, dump_len - count, MSG_WAITALL, (struct sockaddr *)&dump_addr, &dump_addrlen); if (rv < 0) { log_error("recvfrom error %d %d", rv, errno); rv = -errno; goto out; } count += rv; if (count < dump_len) goto retry; rv = 0; if ((info && dump) || !strcmp(req_name, "dump")) printf("%s\n", dump_buf); else format_info(); out: if (close(fd)) log_error("failed to close dump socket %d", fd); return rv; } static int do_able(const char *req_name) { daemon_reply reply; int result; int rv; reply = _lvmlockd_send(req_name, "cmd = %s", "lvmlockctl", "pid = " FMTd64, (int64_t) getpid(), "vg_name = %s", arg_vg_name, NULL); if (!_lvmlockd_result(reply, &result)) { log_error("lvmlockd result %d", result); rv = result; } else { rv = 0; } daemon_reply_destroy(reply); return rv; } static int do_stop_lockspaces(void) { daemon_reply reply; char opts[32]; int result; int rv; memset(opts, 0, sizeof(opts)); if (wait_opt) strcat(opts, "wait "); if (force_opt) strcat(opts, "force "); reply = _lvmlockd_send("stop_all", "cmd = %s", "lvmlockctl", "pid = " FMTd64, (int64_t) getpid(), "opts = %s", opts[0] ? opts : "none", NULL); if (!_lvmlockd_result(reply, &result)) { log_error("lvmlockd result %d", result); rv = result; } else { rv = 0; } daemon_reply_destroy(reply); return rv; } static int do_kill(void) { daemon_reply reply; int result; int rv; syslog(LOG_EMERG, "Lost access to sanlock lease storage in VG %s.", arg_vg_name); /* These two lines explain the manual alternative to the FIXME below. */ syslog(LOG_EMERG, "Immediately deactivate LVs in VG %s.", arg_vg_name); syslog(LOG_EMERG, "Once VG is unused, run lvmlockctl --drop %s.", arg_vg_name); /* * It may not be strictly necessary to notify lvmlockd of the kill, but * lvmlockd can use this information to avoid attempting any new lock * requests in the VG (which would fail anyway), and can return an * error indicating that the VG has been killed. */ reply = _lvmlockd_send("kill_vg", "cmd = %s", "lvmlockctl", "pid = " FMTd64, (int64_t) getpid(), "vg_name = %s", arg_vg_name, NULL); if (!_lvmlockd_result(reply, &result)) { log_error("lvmlockd result %d", result); rv = result; } else { rv = 0; } daemon_reply_destroy(reply); /* * FIXME: here is where we should implement a strong form of * blkdeactivate, and if it completes successfully, automatically call * do_drop() afterward. (The drop step may not always be necessary * if the lvm commands run while shutting things down release all the * leases.) * * run_strong_blkdeactivate(); * do_drop(); */ return rv; } static int do_drop(void) { daemon_reply reply; int result; int rv; syslog(LOG_WARNING, "Dropping locks for VG %s.", arg_vg_name); /* * Check for misuse by looking for any active LVs in the VG * and refusing this operation if found? One possible way * to kill LVs (e.g. if fs cannot be unmounted) is to suspend * them, or replace them with the error target. In that * case the LV will still appear to be active, but it is * safe to release the lock. */ reply = _lvmlockd_send("drop_vg", "cmd = %s", "lvmlockctl", "pid = " FMTd64, (int64_t) getpid(), "vg_name = %s", arg_vg_name, NULL); if (!_lvmlockd_result(reply, &result)) { log_error("lvmlockd result %d", result); rv = result; } else { rv = 0; } daemon_reply_destroy(reply); return rv; } static void print_usage(void) { printf("lvmlockctl options\n"); printf("Options:\n"); printf("--help | -h\n"); printf(" Show this help information.\n"); printf("--quit | -q\n"); printf(" Tell lvmlockd to quit.\n"); printf("--info | -i\n"); printf(" Print lock state information from lvmlockd.\n"); printf("--dump | -d\n"); printf(" Print log buffer from lvmlockd.\n"); printf("--wait | -w 0|1\n"); printf(" Wait option for other commands.\n"); printf("--force | -f 0|1>\n"); printf(" Force option for other commands.\n"); printf("--kill | -k \n"); printf(" Kill access to the VG when sanlock cannot renew lease.\n"); printf("--drop | -r \n"); printf(" Clear locks for the VG when it is unused after kill (-k).\n"); printf("--gl-enable | -E \n"); printf(" Tell lvmlockd to enable the global lock in a sanlock VG.\n"); printf("--gl-disable | -D \n"); printf(" Tell lvmlockd to disable the global lock in a sanlock VG.\n"); printf("--stop-lockspaces | -S\n"); printf(" Stop all lockspaces.\n"); } static int read_options(int argc, char *argv[]) { int option_index = 0; int c; static struct option long_options[] = { {"help", no_argument, 0, 'h' }, {"quit", no_argument, 0, 'q' }, {"info", no_argument, 0, 'i' }, {"dump", no_argument, 0, 'd' }, {"wait", required_argument, 0, 'w' }, {"force", required_argument, 0, 'f' }, {"kill", required_argument, 0, 'k' }, {"drop", required_argument, 0, 'r' }, {"gl-enable", required_argument, 0, 'E' }, {"gl-disable", required_argument, 0, 'D' }, {"stop-lockspaces", no_argument, 0, 'S' }, {0, 0, 0, 0 } }; if (argc == 1) { print_usage(); exit(0); } while (1) { c = getopt_long(argc, argv, "hqidE:D:w:k:r:S", long_options, &option_index); if (c == -1) break; switch (c) { case 'h': /* --help */ print_usage(); exit(0); case 'q': /* --quit */ quit = 1; break; case 'i': /* --info */ info = 1; break; case 'd': /* --dump */ dump = 1; break; case 'w': wait_opt = atoi(optarg); break; case 'k': kill_vg = 1; arg_vg_name = strdup(optarg); break; case 'r': drop_vg = 1; arg_vg_name = strdup(optarg); break; case 'E': gl_enable = 1; arg_vg_name = strdup(optarg); break; case 'D': gl_disable = 1; arg_vg_name = strdup(optarg); break; case 'S': stop_lockspaces = 1; break; default: print_usage(); exit(1); } } return 0; } int main(int argc, char **argv) { int rv = 0; rv = read_options(argc, argv); if (rv < 0) return rv; _lvmlockd = lvmlockd_open(NULL); if (_lvmlockd.socket_fd < 0 || _lvmlockd.error) { log_error("Cannot connect to lvmlockd."); return -1; } if (quit) { rv = do_quit(); goto out; } if (info) { rv = do_dump("info"); goto out; } if (dump) { rv = do_dump("dump"); goto out; } if (kill_vg) { rv = do_kill(); goto out; } if (drop_vg) { rv = do_drop(); goto out; } if (gl_enable) { syslog(LOG_INFO, "Enabling global lock in VG %s.", arg_vg_name); rv = do_able("enable_gl"); goto out; } if (gl_disable) { syslog(LOG_INFO, "Disabling global lock in VG %s.", arg_vg_name); rv = do_able("disable_gl"); goto out; } if (stop_lockspaces) { rv = do_stop_lockspaces(); goto out; } out: lvmlockd_close(_lvmlockd); return rv; } LVM2.2.02.176/daemons/lvmlockd/lvmlockd-core.c0000644000000000000120000045247213176752421017510 0ustar rootwheel/* * Copyright (C) 2014-2015 Red Hat, Inc. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. */ #define _XOPEN_SOURCE 500 /* pthread */ #define _ISOC99_SOURCE #define _REENTRANT #include "tool.h" #include "daemon-io.h" #include "daemon-server.h" #include "lvm-version.h" #include "lvmetad-client.h" #include "lvmlockd-client.h" #include "dm-ioctl.h" /* for DM_UUID_LEN */ /* #include */ #include #include #include #include #include #include #include #include #include #include #include #include #include #define EXTERN #include "lvmlockd-internal.h" /* * Basic operation of lvmlockd * * lvmlockd main process runs main_loop() which uses poll(). * poll listens for new connections from lvm commands and for * messages from existing connected lvm commands. * * lvm command starts and connects to lvmlockd. * * lvmlockd receives a connection request from command and adds a * 'struct client' to keep track of the connection to the command. * The client's fd is added to the set of fd's in poll(). * * lvm command sends a lock request to lvmlockd. The lock request * can be for the global lock, a vg lock, or an lv lock. * * lvmlockd main_loop/poll sees a message from an existing client. * It sets client.recv = 1, then wakes up client_thread_main. * * client_thread_main iterates through client structs (cl), looking * for any that need processing, finds the one with cl->recv set, * and calls client_recv_action(cl). * * client_recv_action(cl) reads the message/request from the client, * allocates a new 'struct action' (act) to represent the request, * sets the act with what is found in the request, then looks at * the specific operation in act->op (LD_OP_FOO) to decide what to * do with the action: * * . If the action is to start a lockspace, create a new thread * to manage that lockspace: add_lockspace(act). * * . If the action is a lock request, pass the act to the thread * that is managing that lockspace: add_lock_action(act). * * . Other misc actions are are passed to the worker_thread: * add_work_action(act). * * Onec the client_thread has passed the action off to another * thread to process, it goes back to waiting for more client * handling work to do. * * The thread that was given the action by the client_thread * now processes that action according to the operation, act->op. * This is either a lockspace_thread (for lock ops or ops that * add/rem a lockspace), or the worker_thread. See below for * how these ops are processed by these threads. When the * given thread is done processing the action, the result is * set in act->result, and the act struct for the completed action * is passed back to the client_thread (client_results list). * * The client_thread takes completed actions (from client_results * list), and sends the result back to the client that sent the * request represented by the action. The act struct is then freed. * * This completes the cycle of work between lvm commands (clients) * and lvmlockd. In summary: * * - main process polls for new client connections and new requests * from lvm commands * - client_thread reads requests from clients * - client_thread creates an action struct for each request * - client_thread passes the act to another thread for processing * - other threads pass completed act structs back to client_thread * - client_thread sends the act result back to the client and frees the act * * * Lockspace threads: * Each lockd VG has its own lockspace that contains locks for that VG. * Each 'struct lockspace' is managed by a separate lockspace_thread. * When the lockspace_thread is first created, the first thing it does * is join the lockspace in the lock manager. This can take a long time. * If the join fails, the thread exits. After the join, the thread * enters a loop waiting for lock actions to perform in the lockspace. * * The request to remove/leave a lockspace causes a flag to be set in * the lockspace struct. When the lockspace_thread sees this flag * set, it leaves the lockspace, and exits. * * When the client_thread passes a new action to a lockspace_thread, * i.e. a new lock request, the lockspace_thread identifies which resource * is being locked (GL, VG, LV), and gets the 'struct resource' (r) for it. * r->type will be LD_RT_GL, LD_RT_VG, or LD_RT_LV. r->name is the * resource name, and is fixed for GL and VG resources, but is based on * the LV name for LV resources. The act is added to the resource's * list of actions: r->actions, i.e. outstanding lock requests on the * resource. * * The lockspace thread then iterates through each resource in the * lockspace, processing any outstanding actions on each: res_process(ls, r). * * res_process() compares the outstanding actions/requests in r->actions * against any existing locks on the resource in r->locks. If the * action is blocked by existing locks, it's left on r->actions. If not, * the action/request is passed to the lock manager. If the result from * the lock manager is success, a new 'struct lock' is created for the * action and saved on r->locks. The result is set in act->result and * the act is passed back to the client_thread to be returned to the client. */ static const char *lvmlockd_protocol = "lvmlockd"; static const int lvmlockd_protocol_version = 1; static int daemon_quit; static int adopt_opt; static daemon_handle lvmetad_handle; static pthread_mutex_t lvmetad_mutex; static int lvmetad_connected; /* * We use a separate socket for dumping daemon info. * This will not interfere with normal operations, and allows * free-form debug data to be dumped instead of the libdaemon * protocol that wants all data in the cft format. * 1MB should fit all the info we need to dump. */ #define DUMP_SOCKET_NAME "lvmlockd-dump.sock" #define DUMP_BUF_SIZE (1024 * 1024) static char dump_buf[DUMP_BUF_SIZE]; static struct sockaddr_un dump_addr; static socklen_t dump_addrlen; /* * Main program polls client connections, adds new clients, * adds work for client thread. * * pollfd_mutex is used for adding vs removing entries, * and for resume vs realloc. */ #define POLL_FD_UNUSED -1 /* slot if free */ #define POLL_FD_IGNORE -2 /* slot is used but ignore in poll */ #define ADD_POLL_SIZE 16 /* increment slots by this amount */ static pthread_mutex_t pollfd_mutex; static struct pollfd *pollfd; static int pollfd_size; static int pollfd_maxi; static int listen_pi; static int listen_fd; static int restart_pi; static int restart_fds[2]; /* * Each lockspace has its own thread to do locking. * The lockspace thread makes synchronous lock requests to dlm/sanlock. * Every vg with a lockd type, i.e. "dlm", "sanlock", should be on this list. */ static pthread_mutex_t lockspaces_mutex; static struct list_head lockspaces; /* * Client thread reads client requests and writes client results. */ static pthread_t client_thread; static pthread_mutex_t client_mutex; static pthread_cond_t client_cond; static struct list_head client_list; /* connected clients */ static struct list_head client_results; /* actions to send back to clients */ static uint32_t client_ids; /* 0 and INTERNAL_CLIENT_ID are skipped */ static int client_stop; /* stop the thread */ static int client_work; /* a client on client_list has work to do */ #define INTERNAL_CLIENT_ID 0xFFFFFFFF /* special client_id for internal actions */ static struct list_head adopt_results; /* special start actions from adopt_locks() */ /* * Worker thread performs misc non-locking actions, e.g. init/free. */ static pthread_t worker_thread; static pthread_mutex_t worker_mutex; static pthread_cond_t worker_cond; static struct list_head worker_list; /* actions for worker_thread */ static int worker_stop; /* stop the thread */ static int worker_wake; /* wake the thread without adding work */ /* * The content of every log_foo() statement is saved in the * circular buffer, which can be dumped to a client and printed. */ #define LOG_LINE_SIZE 256 #define LOG_DUMP_SIZE DUMP_BUF_SIZE #define LOG_SYSLOG_PRIO LOG_WARNING static char log_dump[LOG_DUMP_SIZE]; static unsigned int log_point; static unsigned int log_wrap; static pthread_mutex_t log_mutex; static int syslog_priority = LOG_SYSLOG_PRIO; /* * Structure pools to avoid repeated malloc/free. */ #define MAX_UNUSED_ACTION 64 #define MAX_UNUSED_CLIENT 64 #define MAX_UNUSED_RESOURCE 64 #define MAX_UNUSED_LOCK 64 static pthread_mutex_t unused_struct_mutex; static struct list_head unused_action; static struct list_head unused_client; static struct list_head unused_resource; static struct list_head unused_lock; static int unused_action_count; static int unused_client_count; static int unused_resource_count; static int unused_lock_count; static int resource_lm_data_size; /* max size of lm_data from sanlock|dlm */ static int alloc_new_structs; /* used for initializing in setup_structs */ #define DO_STOP 1 #define NO_STOP 0 #define DO_FREE 1 #define NO_FREE 0 #define DO_FORCE 1 #define NO_FORCE 0 static int add_lock_action(struct action *act); static int str_to_lm(const char *str); static int setup_dump_socket(void); static void send_dump_buf(int fd, int dump_len); static int dump_info(int *dump_len); static int dump_log(int *dump_len); static int _syslog_name_to_num(const char *name) { if (!strcmp(name, "emerg")) return LOG_EMERG; if (!strcmp(name, "alert")) return LOG_ALERT; if (!strcmp(name, "crit")) return LOG_CRIT; if (!strcmp(name, "err") || !strcmp(name, "error")) return LOG_ERR; if (!strcmp(name, "warning") || !strcmp(name, "warn")) return LOG_WARNING; if (!strcmp(name, "notice")) return LOG_NOTICE; if (!strcmp(name, "info")) return LOG_INFO; if (!strcmp(name, "debug")) return LOG_DEBUG; return LOG_WARNING; } static const char *_syslog_num_to_name(int num) { switch (num) { case LOG_EMERG: return "emerg"; case LOG_ALERT: return "alert"; case LOG_CRIT: return "crit"; case LOG_ERR: return "err"; case LOG_WARNING: return "warning"; case LOG_NOTICE: return "notice"; case LOG_INFO: return "info"; case LOG_DEBUG: return "debug"; } return "unknown"; } static uint64_t monotime(void) { struct timespec ts; if (clock_gettime(CLOCK_MONOTONIC, &ts)) { log_error("clock_gettime failed to get timestamp %s.", strerror(errno)); ts.tv_sec = 0; } return ts.tv_sec; } static void log_save_line(int len, char *line, char *log_buf, unsigned int *point, unsigned int *wrap) { unsigned int p = *point; unsigned int w = *wrap; int i; if (len < (int) (LOG_DUMP_SIZE - p)) { memcpy(log_buf + p, line, len); p += len; if (p == LOG_DUMP_SIZE) { p = 0; w = 1; } goto out; } for (i = 0; i < len; i++) { log_buf[p++] = line[i]; if (p == LOG_DUMP_SIZE) { p = 0; w = 1; } } out: *point = p; *wrap = w; } void log_level(int level, const char *fmt, ...) { char line[LOG_LINE_SIZE]; va_list ap; int len = LOG_LINE_SIZE - 1; int ret, pos = 0; memset(line, 0, sizeof(line)); ret = snprintf(line, len, "%llu ", (unsigned long long)time(NULL)); pos += ret; va_start(ap, fmt); ret = vsnprintf(line + pos, len - pos, fmt, ap); va_end(ap); if (ret >= len - pos) pos = len - 1; else pos += ret; line[pos++] = '\n'; line[pos++] = '\0'; pthread_mutex_lock(&log_mutex); log_save_line(pos - 1, line, log_dump, &log_point, &log_wrap); pthread_mutex_unlock(&log_mutex); if (level <= syslog_priority) syslog(level, "%s", line); if (daemon_debug) fprintf(stderr, "%s", line); } static int dump_log(int *dump_len) { int tail_len; pthread_mutex_lock(&log_mutex); if (!log_wrap && !log_point) { *dump_len = 0; } else if (log_wrap) { tail_len = LOG_DUMP_SIZE - log_point; memcpy(dump_buf, log_dump+log_point, tail_len); if (log_point) memcpy(dump_buf+tail_len, log_dump, log_point); *dump_len = LOG_DUMP_SIZE; } else { memcpy(dump_buf, log_dump, log_point-1); *dump_len = log_point-1; } pthread_mutex_unlock(&log_mutex); return 0; } struct lockspace *alloc_lockspace(void) { struct lockspace *ls; if (!(ls = malloc(sizeof(struct lockspace)))) { log_error("out of memory for lockspace"); return NULL; } memset(ls, 0, sizeof(struct lockspace)); INIT_LIST_HEAD(&ls->actions); INIT_LIST_HEAD(&ls->resources); pthread_mutex_init(&ls->mutex, NULL); pthread_cond_init(&ls->cond, NULL); return ls; } static struct action *alloc_action(void) { struct action *act; pthread_mutex_lock(&unused_struct_mutex); if (!unused_action_count || alloc_new_structs) { act = malloc(sizeof(struct action)); } else { act = list_first_entry(&unused_action, struct action, list); list_del(&act->list); unused_action_count--; } pthread_mutex_unlock(&unused_struct_mutex); if (act) memset(act, 0, sizeof(struct action)); else log_error("out of memory for action"); return act; } static struct client *alloc_client(void) { struct client *cl; pthread_mutex_lock(&unused_struct_mutex); if (!unused_client_count || alloc_new_structs) { cl = malloc(sizeof(struct client)); } else { cl = list_first_entry(&unused_client, struct client, list); list_del(&cl->list); unused_client_count--; } pthread_mutex_unlock(&unused_struct_mutex); if (cl) memset(cl, 0, sizeof(struct client)); else log_error("out of memory for client"); return cl; } static struct resource *alloc_resource(void) { struct resource *r; pthread_mutex_lock(&unused_struct_mutex); if (!unused_resource_count || alloc_new_structs) { r = malloc(sizeof(struct resource) + resource_lm_data_size); } else { r = list_first_entry(&unused_resource, struct resource, list); list_del(&r->list); unused_resource_count--; } pthread_mutex_unlock(&unused_struct_mutex); if (r) { memset(r, 0, sizeof(struct resource) + resource_lm_data_size); INIT_LIST_HEAD(&r->locks); INIT_LIST_HEAD(&r->actions); } else { log_error("out of memory for resource"); } return r; } static struct lock *alloc_lock(void) { struct lock *lk; pthread_mutex_lock(&unused_struct_mutex); if (!unused_lock_count || alloc_new_structs) { lk = malloc(sizeof(struct lock)); } else { lk = list_first_entry(&unused_lock, struct lock, list); list_del(&lk->list); unused_lock_count--; } pthread_mutex_unlock(&unused_struct_mutex); if (lk) memset(lk, 0, sizeof(struct lock)); else log_error("out of memory for lock"); return lk; } static void free_action(struct action *act) { pthread_mutex_lock(&unused_struct_mutex); if (unused_action_count >= MAX_UNUSED_ACTION) { free(act); } else { list_add_tail(&act->list, &unused_action); unused_action_count++; } pthread_mutex_unlock(&unused_struct_mutex); } static void free_client(struct client *cl) { pthread_mutex_lock(&unused_struct_mutex); if (unused_client_count >= MAX_UNUSED_CLIENT) { free(cl); } else { list_add_tail(&cl->list, &unused_client); unused_client_count++; } pthread_mutex_unlock(&unused_struct_mutex); } static void free_resource(struct resource *r) { pthread_mutex_lock(&unused_struct_mutex); if (unused_resource_count >= MAX_UNUSED_RESOURCE) { free(r); } else { list_add_tail(&r->list, &unused_resource); unused_resource_count++; } pthread_mutex_unlock(&unused_struct_mutex); } static void free_lock(struct lock *lk) { pthread_mutex_lock(&unused_struct_mutex); if (unused_lock_count >= MAX_UNUSED_LOCK) { free(lk); } else { list_add_tail(&lk->list, &unused_lock); unused_lock_count++; } pthread_mutex_unlock(&unused_struct_mutex); } static int setup_structs(void) { struct action *act; struct client *cl; struct resource *r; struct lock *lk; int data_san = lm_data_size_sanlock(); int data_dlm = lm_data_size_dlm(); int i; resource_lm_data_size = data_san > data_dlm ? data_san : data_dlm; pthread_mutex_init(&unused_struct_mutex, NULL); INIT_LIST_HEAD(&unused_action); INIT_LIST_HEAD(&unused_client); INIT_LIST_HEAD(&unused_resource); INIT_LIST_HEAD(&unused_lock); /* * For setup, force the alloc_ functions to alloc new structs instead * of taking them unused. This allows alloc_struct/free_struct loop to * populate the unused lists. */ alloc_new_structs = 1; for (i = 0; i < MAX_UNUSED_ACTION/2; i++) { if (!(act = alloc_action())) goto fail; free_action(act); } for (i = 0; i < MAX_UNUSED_CLIENT/2; i++) { if (!(cl = alloc_client())) goto fail; free_client(cl); } for (i = 0; i < MAX_UNUSED_RESOURCE/2; i++) { if (!(r = alloc_resource())) goto fail; free_resource(r); } for (i = 0; i < MAX_UNUSED_LOCK/2; i++) { if (!(lk = alloc_lock())) goto fail; free_lock(lk); } alloc_new_structs = 0; return 0; fail: alloc_new_structs = 0; return -ENOMEM; } static int add_pollfd(int fd) { int i, new_size; struct pollfd *tmp_pollfd; pthread_mutex_lock(&pollfd_mutex); for (i = 0; i < pollfd_size; i++) { if (pollfd[i].fd != POLL_FD_UNUSED) continue; pollfd[i].fd = fd; pollfd[i].events = POLLIN; pollfd[i].revents = 0; if (i > pollfd_maxi) pollfd_maxi = i; pthread_mutex_unlock(&pollfd_mutex); return i; } new_size = pollfd_size + ADD_POLL_SIZE; tmp_pollfd = realloc(pollfd, new_size * sizeof(struct pollfd)); if (!tmp_pollfd) { log_error("can't alloc new size %d for pollfd", new_size); pthread_mutex_unlock(&pollfd_mutex); return -ENOMEM; } pollfd = tmp_pollfd; for (i = pollfd_size; i < new_size; i++) { pollfd[i].fd = POLL_FD_UNUSED; pollfd[i].events = 0; pollfd[i].revents = 0; } i = pollfd_size; pollfd[i].fd = fd; pollfd[i].events = POLLIN; pollfd[i].revents = 0; pollfd_maxi = i; pollfd_size = new_size; pthread_mutex_unlock(&pollfd_mutex); return i; } static void rem_pollfd(int pi) { if (pi < 0) { log_error("rem_pollfd %d", pi); return; } pthread_mutex_lock(&pollfd_mutex); pollfd[pi].fd = POLL_FD_UNUSED; pollfd[pi].events = 0; pollfd[pi].revents = 0; pthread_mutex_unlock(&pollfd_mutex); } static const char *lm_str(int x) { switch (x) { case LD_LM_NONE: return "none"; case LD_LM_DLM: return "dlm"; case LD_LM_SANLOCK: return "sanlock"; default: return "lm_unknown"; } } static const char *rt_str(int x) { switch (x) { case LD_RT_GL: return "gl"; case LD_RT_VG: return "vg"; case LD_RT_LV: return "lv"; default: return "."; }; } static const char *op_str(int x) { switch (x) { case LD_OP_INIT: return "init"; case LD_OP_FREE: return "free"; case LD_OP_START: return "start"; case LD_OP_STOP: return "stop"; case LD_OP_LOCK: return "lock"; case LD_OP_UPDATE: return "update"; case LD_OP_CLOSE: return "close"; case LD_OP_ENABLE: return "enable"; case LD_OP_DISABLE: return "disable"; case LD_OP_START_WAIT: return "start_wait"; case LD_OP_STOP_ALL: return "stop_all"; case LD_OP_RENAME_BEFORE: return "rename_before"; case LD_OP_RENAME_FINAL: return "rename_final"; case LD_OP_RUNNING_LM: return "running_lm"; case LD_OP_FIND_FREE_LOCK: return "find_free_lock"; case LD_OP_KILL_VG: return "kill_vg"; case LD_OP_DROP_VG: return "drop_vg"; case LD_OP_DUMP_LOG: return "dump_log"; case LD_OP_DUMP_INFO: return "dump_info"; case LD_OP_BUSY: return "busy"; default: return "op_unknown"; }; } int last_string_from_args(char *args_in, char *last) { const char *args = args_in; const char *colon, *str = NULL; while (1) { if (!args || (*args == '\0')) break; colon = strstr(args, ":"); if (!colon) break; str = colon; args = colon + 1; } if (str) { snprintf(last, MAX_ARGS, "%s", str + 1); return 0; } return -1; } int version_from_args(char *args, unsigned int *major, unsigned int *minor, unsigned int *patch) { char version[MAX_ARGS+1]; char *major_str, *minor_str, *patch_str; char *n, *d1, *d2; memset(version, 0, sizeof(version)); strncpy(version, args, MAX_ARGS); version[MAX_ARGS] = '\0'; n = strstr(version, ":"); if (n) *n = '\0'; d1 = strstr(version, "."); if (!d1) return -1; d2 = strstr(d1 + 1, "."); if (!d2) return -1; major_str = version; minor_str = d1 + 1; patch_str = d2 + 1; *d1 = '\0'; *d2 = '\0'; if (major) *major = atoi(major_str); if (minor) *minor = atoi(minor_str); if (patch) *patch = atoi(patch_str); return 0; } /* * These are few enough that arrays of function pointers can * be avoided. */ static int lm_prepare_lockspace(struct lockspace *ls, struct action *act) { int rv; if (ls->lm_type == LD_LM_DLM) rv = lm_prepare_lockspace_dlm(ls); else if (ls->lm_type == LD_LM_SANLOCK) rv = lm_prepare_lockspace_sanlock(ls); else return -1; if (act) act->lm_rv = rv; return rv; } static int lm_add_lockspace(struct lockspace *ls, struct action *act, int adopt) { int rv; if (ls->lm_type == LD_LM_DLM) rv = lm_add_lockspace_dlm(ls, adopt); else if (ls->lm_type == LD_LM_SANLOCK) rv = lm_add_lockspace_sanlock(ls, adopt); else return -1; if (act) act->lm_rv = rv; return rv; } static int lm_rem_lockspace(struct lockspace *ls, struct action *act, int free_vg) { int rv; if (ls->lm_type == LD_LM_DLM) rv = lm_rem_lockspace_dlm(ls, free_vg); else if (ls->lm_type == LD_LM_SANLOCK) rv = lm_rem_lockspace_sanlock(ls, free_vg); else return -1; if (act) act->lm_rv = rv; return rv; } static int lm_lock(struct lockspace *ls, struct resource *r, int mode, struct action *act, struct val_blk *vb_out, int *retry, int adopt) { int rv; if (ls->lm_type == LD_LM_DLM) rv = lm_lock_dlm(ls, r, mode, vb_out, adopt); else if (ls->lm_type == LD_LM_SANLOCK) rv = lm_lock_sanlock(ls, r, mode, vb_out, retry, adopt); else return -1; if (act) act->lm_rv = rv; return rv; } static int lm_convert(struct lockspace *ls, struct resource *r, int mode, struct action *act, uint32_t r_version) { int rv; if (ls->lm_type == LD_LM_DLM) rv = lm_convert_dlm(ls, r, mode, r_version); else if (ls->lm_type == LD_LM_SANLOCK) rv = lm_convert_sanlock(ls, r, mode, r_version); else return -1; if (act) act->lm_rv = rv; return rv; } static int lm_unlock(struct lockspace *ls, struct resource *r, struct action *act, uint32_t r_version, uint32_t lmu_flags) { int rv; if (ls->lm_type == LD_LM_DLM) rv = lm_unlock_dlm(ls, r, r_version, lmu_flags); else if (ls->lm_type == LD_LM_SANLOCK) rv = lm_unlock_sanlock(ls, r, r_version, lmu_flags); else return -1; if (act) act->lm_rv = rv; return rv; } static int lm_hosts(struct lockspace *ls, int notify) { if (ls->lm_type == LD_LM_DLM) return lm_hosts_dlm(ls, notify); else if (ls->lm_type == LD_LM_SANLOCK) return lm_hosts_sanlock(ls, notify); return -1; } static void lm_rem_resource(struct lockspace *ls, struct resource *r) { if (ls->lm_type == LD_LM_DLM) lm_rem_resource_dlm(ls, r); else if (ls->lm_type == LD_LM_SANLOCK) lm_rem_resource_sanlock(ls, r); } static int lm_find_free_lock(struct lockspace *ls, uint64_t *free_offset) { if (ls->lm_type == LD_LM_DLM) return 0; else if (ls->lm_type == LD_LM_SANLOCK) return lm_find_free_lock_sanlock(ls, free_offset); return -1; } /* * While adopting locks, actions originate from the adopt_locks() * function, not from a client. So, these actions (flagged ADOPT), * should be passed back to the adopt_locks() function through the * adopt_results list, and not be sent back to a client via the * client_list/client_thread. */ static void add_client_result(struct action *act) { if (act->flags & LD_AF_NO_CLIENT) { log_debug("internal action done op %s mode %s result %d vg %s", op_str(act->op), mode_str(act->mode), act->result, act->vg_name); free_action(act); return; } pthread_mutex_lock(&client_mutex); if (act->flags & LD_AF_ADOPT) list_add_tail(&act->list, &adopt_results); else list_add_tail(&act->list, &client_results); pthread_cond_signal(&client_cond); pthread_mutex_unlock(&client_mutex); } static struct lock *find_lock_client(struct resource *r, uint32_t client_id) { struct lock *lk; list_for_each_entry(lk, &r->locks, list) { if (lk->client_id == client_id) return lk; } return NULL; } static struct lock *find_lock_persistent(struct resource *r) { struct lock *lk; list_for_each_entry(lk, &r->locks, list) { if (lk->flags & LD_LF_PERSISTENT) return lk; } return NULL; } static struct action *find_action_client(struct resource *r, uint32_t client_id) { struct action *act; list_for_each_entry(act, &r->actions, list) { if (act->client_id != client_id) continue; return act; } return NULL; } static void add_work_action(struct action *act) { pthread_mutex_lock(&worker_mutex); if (!worker_stop) { list_add_tail(&act->list, &worker_list); pthread_cond_signal(&worker_cond); } pthread_mutex_unlock(&worker_mutex); } static daemon_reply send_lvmetad(const char *id, ...) { daemon_reply reply; va_list ap; int retries = 0; int err; va_start(ap, id); /* * mutex is used because all threads share a single * lvmetad connection/handle. */ pthread_mutex_lock(&lvmetad_mutex); retry: if (!lvmetad_connected) { lvmetad_handle = lvmetad_open(NULL); if (lvmetad_handle.error || lvmetad_handle.socket_fd < 0) { err = lvmetad_handle.error ?: lvmetad_handle.socket_fd; pthread_mutex_unlock(&lvmetad_mutex); log_error("lvmetad_open reconnect error %d", err); memset(&reply, 0, sizeof(reply)); reply.error = err; va_end(ap); return reply; } else { log_debug("lvmetad reconnected"); lvmetad_connected = 1; } } reply = daemon_send_simple_v(lvmetad_handle, id, ap); /* lvmetad may have been restarted */ if ((reply.error == ECONNRESET) && (retries < 2)) { daemon_close(lvmetad_handle); lvmetad_connected = 0; retries++; goto retry; } pthread_mutex_unlock(&lvmetad_mutex); va_end(ap); return reply; } static int res_lock(struct lockspace *ls, struct resource *r, struct action *act, int *retry) { struct lock *lk; struct val_blk vb; uint32_t new_version = 0; int inval_meta; int rv = 0; memset(&vb, 0, sizeof(vb)); r->last_client_id = act->client_id; if (r->type == LD_RT_LV) log_debug("S %s R %s res_lock cl %u mode %s (%s)", ls->name, r->name, act->client_id, mode_str(act->mode), act->lv_name); else log_debug("S %s R %s res_lock cl %u mode %s", ls->name, r->name, act->client_id, mode_str(act->mode)); if (r->mode == LD_LK_SH && act->mode == LD_LK_SH) goto add_lk; if (r->type == LD_RT_LV && act->lv_args[0]) memcpy(r->lv_args, act->lv_args, MAX_ARGS); rv = lm_lock(ls, r, act->mode, act, &vb, retry, act->flags & LD_AF_ADOPT); if (r->use_vb) log_debug("S %s R %s res_lock rv %d read vb %x %x %u", ls->name, r->name, rv, vb.version, vb.flags, vb.r_version); else log_debug("S %s R %s res_lock rv %d", ls->name, r->name, rv); if (rv < 0) return rv; if (sanlock_gl_dup && ls->sanlock_gl_enabled) act->flags |= LD_AF_DUP_GL_LS; /* * Check new lvb values to decide if lvmetad cache should * be invalidated. When we need to invalidate the lvmetad * cache, but don't have a usable r_version from the lvb, * send lvmetad new_version 0 which causes it to invalidate * the VG metdata without comparing against the currently * cached VG seqno. */ inval_meta = 0; if (!r->use_vb) { /* LV locks don't use an lvb. */ } else if (vb.version && ((vb.version & 0xFF00) > (VAL_BLK_VERSION & 0xFF00))) { log_error("S %s R %s res_lock invalid val_blk version %x flags %x r_version %u", ls->name, r->name, vb.version, vb.flags, vb.r_version); inval_meta = 1; new_version = 0; rv = -EINVAL; } else if (vb.r_version && (vb.r_version == r->version)) { /* * Common case when the version hasn't changed. * Do nothing. */ } else if (r->version && vb.r_version && (vb.r_version > r->version)) { /* * Common case when the version has changed. Another host * has changed the data protected by the lock since we last * acquired it, and increased r_version so we know that our * cache is invalid. */ log_debug("S %s R %s res_lock got version %u our %u", ls->name, r->name, vb.r_version, r->version); r->version = vb.r_version; new_version = vb.r_version; r->version_zero_valid = 0; inval_meta = 1; } else if (r->version_zero_valid && !vb.r_version) { /* * The lvb is in a persistent zero state, which will end * once someone uses the lock and writes a new lvb value. * Do nothing. */ log_debug("S %s R %s res_lock version_zero_valid still zero", ls->name, r->name); } else if (r->version_zero_valid && vb.r_version) { /* * Someone has written to the lvb after it was in a * persistent zero state. Begin tracking normal * non-zero changes. We may or may not have known * about a previous non-zero version (in r->version). * If we did, it means the lvb content was lost and * has now been reinitialized. * * If the new reinitialized value is less than the * previous non-zero value in r->version, then something * unusual has happened. For a VG lock, it probably * means the VG was removed and recreated. Invalidate * our cache and begin using the new VG version. For * a GL lock, another host may have reinitialized a * lost/zero lvb with a value less than we'd seen * before. Invalidate the cache, and begin using * the lower version (or continue using our old * larger version?) */ if (r->version && (r->version >= vb.r_version)) { log_debug("S %s R %s res_lock version_zero_valid got version %u less than our %u", ls->name, r->name, vb.r_version, r->version); new_version = 0; } else { log_debug("S %s R %s res_lock version_zero_valid got version %u our %u", ls->name, r->name, vb.r_version, r->version); new_version = vb.r_version; } r->version = vb.r_version; r->version_zero_valid = 0; inval_meta = 1; } else if (!r->version && vb.r_version) { /* * The first time we've acquired the lock and seen the lvb. */ log_debug("S %s R %s res_lock initial version %u", ls->name, r->name, vb.r_version); r->version = vb.r_version; inval_meta = 1; new_version = vb.r_version; r->version_zero_valid = 0; } else if (!r->version && !vb.r_version) { /* * The lock may have never been used to change something. * (e.g. a new sanlock GL?) */ log_debug("S %s R %s res_lock all versions zero", ls->name, r->name); if (!r->version_zero_valid) { inval_meta = 1; new_version = 0; } r->version_zero_valid = 1; } else if (r->version && !vb.r_version) { /* * The lvb content has been lost or never been initialized. * It can be lost during dlm recovery when the master node * is removed. * * If we're the next to write the lvb, reinitialze it to the * new VG seqno, or a new GL counter larger than was seen by * any hosts before (how to estimate that?) * * If we see non-zero values before we next write to it, use * those values. * * While the lvb values remain zero, the data for the lock * is unchanged and we don't need to invalidate metadata. */ if ((ls->lm_type == LD_LM_DLM) && !vb.version && !vb.flags) log_debug("S %s R %s res_lock all lvb content is blank", ls->name, r->name); log_debug("S %s R %s res_lock our version %u got vb %x %x %u", ls->name, r->name, r->version, vb.version, vb.flags, vb.r_version); r->version_zero_valid = 1; inval_meta = 1; new_version = 0; } else if (r->version && vb.r_version && (vb.r_version < r->version)) { /* * The lvb value has gone backwards, which shouldn't generally happen, * but could when the dlm lvb is lost and reinitialized, or the VG * is removed and recreated. * * If this is a VG lock, it probably means the VG has been removed * and recreated while we had the dlm lockspace running. * FIXME: how does the cache validation and replacement in lvmetad * work in this case? */ log_debug("S %s R %s res_lock got version %u less than our version %u", ls->name, r->name, vb.r_version, r->version); r->version = vb.r_version; inval_meta = 1; new_version = 0; r->version_zero_valid = 0; } else { log_debug("S %s R %s res_lock undefined vb condition vzv %d our version %u vb %x %x %u", ls->name, r->name, r->version_zero_valid, r->version, vb.version, vb.flags, vb.r_version); } if (vb.version && vb.r_version && (vb.flags & VBF_REMOVED)) { /* Should we set ls->thread_stop = 1 ? */ log_debug("S %s R %s res_lock vb flag REMOVED", ls->name, r->name); rv = -EREMOVED; } /* * r is vglk: tell lvmetad to set the vg invalid * flag, and provide the new r_version. If lvmetad finds * that its cached vg has seqno less than the value * we send here, it will set the vg invalid flag. * lvm commands that read the vg from lvmetad, will * see the invalid flag returned, will reread the * vg from disk, update the lvmetad copy, and go on. * * r is global: tell lvmetad to set the global invalid * flag. When commands see this flag returned from lvmetad, * they will reread metadata from disk, update the lvmetad * caches, and tell lvmetad to set global invalid to 0. */ if (inval_meta && (r->type == LD_RT_VG)) { daemon_reply reply; char *uuid; log_debug("S %s R %s res_lock set lvmetad vg version %u", ls->name, r->name, new_version); if (!ls->vg_uuid[0] || !strcmp(ls->vg_uuid, "none")) uuid = (char *)"none"; else uuid = ls->vg_uuid; reply = send_lvmetad("set_vg_info", "token = %s", "skip", "uuid = %s", uuid, "name = %s", ls->vg_name, "version = " FMTd64, (int64_t)new_version, NULL); if (reply.error || strcmp(daemon_reply_str(reply, "response", ""), "OK")) log_error("set_vg_info in lvmetad failed %d", reply.error); daemon_reply_destroy(reply); } if (inval_meta && (r->type == LD_RT_GL)) { daemon_reply reply; log_debug("S %s R %s res_lock set lvmetad global invalid", ls->name, r->name); reply = send_lvmetad("set_global_info", "token = %s", "skip", "global_invalid = " FMTd64, INT64_C(1), NULL); if (reply.error || strcmp(daemon_reply_str(reply, "response", ""), "OK")) log_error("set_global_info in lvmetad failed %d", reply.error); daemon_reply_destroy(reply); } /* * Record the new lock state. */ r->mode = act->mode; add_lk: if (r->mode == LD_LK_SH) r->sh_count++; if (!(lk = alloc_lock())) return -ENOMEM; lk->client_id = act->client_id; lk->mode = act->mode; if (act->flags & LD_AF_PERSISTENT) { lk->flags |= LD_LF_PERSISTENT; lk->client_id = 0; } /* * LV_LOCK means the action acquired the lv lock in the lock manager * (as opposed to finding that the lv lock was already held). If * the client for this LV_LOCK action fails before we send the result, * then we automatically unlock the lv since the lv wasn't activated. * (There will always be an odd chance the lv lock is held while the * lv is not active, but this helps.) The most common case where this * is helpful is when the lv lock operation is slow/delayed and the * command is canceled by the user. * * LV_UNLOCK means the lv unlock action was generated by lvmlockd when * it tried to send the reply for an lv lock action (with LV_LOCK set), * and failed to send the reply to the client/command. The * last_client_id saved on the resource is compared to this LV_UNLOCK * action before the auto unlock is done in case another action locked * the lv between the failed client lock action and the auto unlock. */ if (r->type == LD_RT_LV) act->flags |= LD_AF_LV_LOCK; list_add_tail(&lk->list, &r->locks); return rv; } static int res_convert(struct lockspace *ls, struct resource *r, struct lock *lk, struct action *act) { uint32_t r_version; int rv; r->last_client_id = act->client_id; log_debug("S %s R %s res_convert cl %u mode %s", ls->name, r->name, act->client_id, mode_str(act->mode)); if (act->mode == LD_LK_EX && lk->mode == LD_LK_SH && r->sh_count > 1) return -EAGAIN; /* * lm_convert() writes new version (from ex) * Same as lm_unlock() */ if ((r->type == LD_RT_GL) && (r->mode == LD_LK_EX)) { r->version++; lk->version = r->version; r_version = r->version; r->version_zero_valid = 0; log_debug("S %s R %s res_convert r_version inc %u", ls->name, r->name, r_version); } else if ((r->type == LD_RT_VG) && (r->mode == LD_LK_EX) && (lk->version > r->version)) { r->version = lk->version; r_version = r->version; r->version_zero_valid = 0; log_debug("S %s R %s res_convert r_version new %u", ls->name, r->name, r_version); } else { r_version = 0; } rv = lm_convert(ls, r, act->mode, act, r_version); if (rv < 0) { log_error("S %s R %s res_convert lm error %d", ls->name, r->name, rv); return rv; } log_debug("S %s R %s res_convert lm done", ls->name, r->name); if (lk->mode == LD_LK_EX && act->mode == LD_LK_SH) { r->sh_count = 1; } else if (lk->mode == LD_LK_SH && act->mode == LD_LK_EX) { r->sh_count = 0; } else { /* should not be possible */ log_error("S %s R %s res_convert invalid modes %d %d", ls->name, r->name, lk->mode, act->mode); return -1; } r->mode = act->mode; lk->mode = act->mode; return 0; } static int res_cancel(struct lockspace *ls, struct resource *r, struct action *act) { struct action *cact; /* * a client can cancel its own non-persistent lock requests, * when could this happen? * * a client can cancel other client's persistent lock requests, * when could this happen? */ if (act->flags & LD_AF_PERSISTENT) { list_for_each_entry(cact, &r->actions, list) { if (!(cact->flags & LD_AF_PERSISTENT)) continue; goto do_cancel; } } else { cact = find_action_client(r, act->client_id); if (cact) goto do_cancel; } return -ENOENT; do_cancel: log_debug("S %s R %s res_cancel cl %u", ls->name, r->name, cact->client_id); cact->result = -ECANCELED; list_del(&cact->list); add_client_result(cact); return -ECANCELED; } /* * lm_unlock() writes new a r_version (from ex) * * The r_version of the vg resource is incremented if * an "update" was received for the vg lock. The update * contains the new vg seqno from the vg metadata which is * used as the r_version. * * The r_version of the global resource is automatically * incremented when it is unlocked from ex mode. * * r_version is incremented every time a command releases * the global lock from ex. */ /* * persistent locks will not be unlocked for OP_CLOSE/act_close * because act_close->flags does not have the PERSISTENT flag * set, and a persistent lk->client_id is zero, which will not * match the client in act_close->client_id. */ static int res_unlock(struct lockspace *ls, struct resource *r, struct action *act) { struct lock *lk; uint32_t r_version; int rv; if (act->flags & LD_AF_PERSISTENT) { lk = find_lock_persistent(r); if (lk) goto do_unlock; } else { lk = find_lock_client(r, act->client_id); if (lk) goto do_unlock; } if (act->op != LD_OP_CLOSE) log_debug("S %s R %s res_unlock cl %u no locks", ls->name, r->name, act->client_id); return -ENOENT; do_unlock: if ((act->flags & LD_AF_LV_UNLOCK) && (r->last_client_id != act->client_id)) { log_debug("S %s R %s res_unlock cl %u for failed client ignored, last client %u", ls->name, r->name, act->client_id, r->last_client_id); return -ENOENT; } r->last_client_id = act->client_id; if (act->op == LD_OP_CLOSE) log_debug("S %s R %s res_unlock cl %u from close", ls->name, r->name, act->client_id); else if (r->type == LD_RT_LV) log_debug("S %s R %s res_unlock cl %u (%s)", ls->name, r->name, act->client_id, act->lv_name); else log_debug("S %s R %s res_unlock cl %u", ls->name, r->name, act->client_id); /* send unlock to lm when last sh lock is unlocked */ if (lk->mode == LD_LK_SH) { r->sh_count--; if (r->sh_count > 0) { log_debug("S %s R %s res_unlock sh_count %u", ls->name, r->name, r->sh_count); goto rem_lk; } } if ((r->type == LD_RT_GL) && (r->mode == LD_LK_EX)) { r->version++; lk->version = r->version; r_version = r->version; r->version_zero_valid = 0; log_debug("S %s R %s res_unlock r_version inc %u", ls->name, r->name, r_version); } else if ((r->type == LD_RT_VG) && (r->mode == LD_LK_EX) && (lk->version > r->version)) { r->version = lk->version; r_version = r->version; r->version_zero_valid = 0; log_debug("S %s R %s res_unlock r_version new %u", ls->name, r->name, r_version); } else { r_version = 0; } rv = lm_unlock(ls, r, act, r_version, 0); if (rv < 0) { /* should never happen, retry? */ log_error("S %s R %s res_unlock lm error %d", ls->name, r->name, rv); return rv; } log_debug("S %s R %s res_unlock lm done", ls->name, r->name); rem_lk: list_del(&lk->list); free_lock(lk); if (list_empty(&r->locks)) r->mode = LD_LK_UN; return 0; } static int res_update(struct lockspace *ls, struct resource *r, struct action *act) { struct lock *lk; lk = find_lock_client(r, act->client_id); if (!lk) { log_error("S %s R %s res_update cl %u lock not found", ls->name, r->name, act->client_id); return -ENOENT; } if (r->mode != LD_LK_EX) { log_error("S %s R %s res_update cl %u version on non-ex lock", ls->name, r->name, act->client_id); return -EINVAL; } /* lk version will be written to lm by unlock */ if (act->flags & LD_AF_NEXT_VERSION) lk->version = r->version + 1; else { if (r->version >= act->version) { /* * This update is done from vg_write. If the metadata with * this seqno is not committed by vg_commit, then next * vg_write can use the same seqno, causing us to see no * increase in seqno here as expected. * FIXME: In this case, do something like setting the lvb * version to 0 to instead of the same seqno which will * force an invalidation on other hosts. The next change * will return to using the seqno again. */ log_error("S %s R %s res_update cl %u old version %u new version %u too small", ls->name, r->name, act->client_id, r->version, act->version); } lk->version = act->version; } log_debug("S %s R %s res_update cl %u lk version to %u", ls->name, r->name, act->client_id, lk->version); return 0; } /* * There is nothing to deallocate when freeing a dlm LV, the LV * will simply be unlocked by rem_resource. */ static int free_lv(struct lockspace *ls, struct resource *r) { if (ls->lm_type == LD_LM_SANLOCK) return lm_free_lv_sanlock(ls, r); else if (ls->lm_type == LD_LM_DLM) return 0; else return -EINVAL; } /* * NB. we can't do this if sanlock is holding any locks on * the resource; we'd be rewriting the resource from under * sanlock and would confuse or break it badly. We don't * know what another host is doing, so these must be used * very carefully. */ static int res_able(struct lockspace *ls, struct resource *r, struct action *act) { int rv; if (ls->lm_type != LD_LM_SANLOCK) { log_error("enable/disable only applies to sanlock"); return -EINVAL; } if (r->type != LD_RT_GL) { log_error("enable/disable only applies to global lock"); return -EINVAL; } if (r->mode != LD_LK_UN) { log_error("enable/disable only allowed on unlocked resource"); return -EINVAL; } if (act->op == LD_OP_ENABLE && gl_lsname_sanlock[0]) { log_error("disable global lock in %s before enable in %s", gl_lsname_sanlock, ls->name); return -EINVAL; } if ((act->op == LD_OP_DISABLE) && (act->flags & LD_AF_EX_DISABLE)) { rv = lm_ex_disable_gl_sanlock(ls); goto out; } rv = lm_able_gl_sanlock(ls, act->op == LD_OP_ENABLE); if (!rv && (act->op == LD_OP_ENABLE)) gl_vg_removed = 0; out: return rv; } /* * Go through queued actions, and make lock/unlock calls on the resource * based on the actions and the existing lock state. * * All lock operations sent to the lock manager are non-blocking. * This is because sanlock does not support lock queueing. * Eventually we could enhance this to take advantage of lock * queueing when available (i.e. for the dlm). * * act_close_list: list of CLOSE actions, identifying clients that have * closed/terminated their lvmlockd connection, and whose locks should * be released. Do not remove these actions from act_close_list. * * retry_out: set to 1 if the lock manager said we should retry, * meaning we should call res_process() again in a short while to retry. */ static void res_process(struct lockspace *ls, struct resource *r, struct list_head *act_close_list, int *retry_out) { struct action *act, *safe, *act_close; struct lock *lk; int lm_retry; int rv; /* * handle version updates for ex locks * (new version will be written by unlock) */ list_for_each_entry_safe(act, safe, &r->actions, list) { if (act->op == LD_OP_UPDATE) { rv = res_update(ls, r, act); act->result = rv; list_del(&act->list); add_client_result(act); } } /* * handle explicit unlock actions */ list_for_each_entry_safe(act, safe, &r->actions, list) { if ((act->op == LD_OP_LOCK) && (act->mode == LD_LK_IV || act->mode == LD_LK_NL)) { act->result = -EINVAL; list_del(&act->list); add_client_result(act); } if (act->op == LD_OP_LOCK && act->mode == LD_LK_UN) { rv = res_unlock(ls, r, act); if (rv == -ENOENT && (act->flags & LD_AF_UNLOCK_CANCEL)) rv = res_cancel(ls, r, act); /* * possible unlock results: * 0: unlock succeeded * -ECANCELED: cancel succeeded * -ENOENT: nothing to unlock or cancel */ act->result = rv; list_del(&act->list); add_client_result(act); } } /* * handle implicit unlocks due to client exit, * also clear any outstanding actions for the client */ list_for_each_entry(act_close, act_close_list, list) { res_unlock(ls, r, act_close); res_cancel(ls, r, act_close); } /* * handle freeing a lock for an lv that has been removed */ list_for_each_entry_safe(act, safe, &r->actions, list) { if (act->op == LD_OP_FREE && act->rt == LD_RT_LV) { log_debug("S %s R %s free_lv", ls->name, r->name); rv = free_lv(ls, r); act->result = rv; list_del(&act->list); add_client_result(act); goto r_free; } } /* * handle enable/disable */ list_for_each_entry_safe(act, safe, &r->actions, list) { if (act->op == LD_OP_ENABLE || act->op == LD_OP_DISABLE) { rv = res_able(ls, r, act); act->result = rv; list_del(&act->list); add_client_result(act); if (!rv && act->op == LD_OP_DISABLE) { log_debug("S %s R %s free disabled", ls->name, r->name); goto r_free; } } } /* * transient requests on existing transient locks */ list_for_each_entry_safe(act, safe, &r->actions, list) { if (act->flags & LD_AF_PERSISTENT) continue; lk = find_lock_client(r, act->client_id); if (!lk) continue; if (lk->mode != act->mode) { /* convert below */ /* act->result = -EEXIST; list_del(&act->list); add_client_result(act); */ continue; } else { /* success */ r->last_client_id = act->client_id; act->result = -EALREADY; list_del(&act->list); add_client_result(act); } } /* * persistent requests on existing persistent locks * * persistent locks are not owned by a client, so any * existing with matching mode satisfies a request. * only one persistent lock is kept on a resource. * a single "unowned" persistent lock satisfies * any/multiple client requests for a persistent lock. */ list_for_each_entry_safe(act, safe, &r->actions, list) { if (!(act->flags & LD_AF_PERSISTENT)) continue; lk = find_lock_persistent(r); if (!lk) continue; if (lk->mode != act->mode) { /* convert below */ /* act->result = -EEXIST; list_del(&act->list); add_client_result(act); */ continue; } else { /* success */ r->last_client_id = act->client_id; act->result = -EALREADY; list_del(&act->list); add_client_result(act); } } /* * transient requests with existing persistent locks * * Just grant the transient request and do not * keep a record of it. Assume that the persistent * lock will not go away while the transient lock * is needed. * * This would be used when an ex, persistent lv lock * exists from activation, and then something like * lvextend asks for a transient ex lock to change * the lv. The lv could not be unlocked by deactivation * while the lvextend was running. * * The logic here for mixing T/P locks is not general * support; there are a number of cases where it will * not work: updating version number (lv locks have * none), ex locks from multiple clients will not * conflict, explicit un of the transient lock will fail. */ list_for_each_entry_safe(act, safe, &r->actions, list) { if (act->flags & LD_AF_PERSISTENT) continue; lk = find_lock_persistent(r); if (!lk) continue; if ((lk->mode == LD_LK_EX) || (lk->mode == LD_LK_SH && act->mode == LD_LK_SH)) { r->last_client_id = act->client_id; act->result = 0; list_del(&act->list); add_client_result(act); } else { /* persistent lock is sh, transient request is ex */ /* FIXME: can we remove this case? do a convert here? */ log_debug("res_process %s existing persistent lock new transient", r->name); r->last_client_id = act->client_id; act->result = -EEXIST; list_del(&act->list); add_client_result(act); } } /* * persistent requests with existing transient locks * * If a client requests a P (persistent) lock for a T (transient) * lock it already holds, we can just change T to P. Fail if the * same happens for locks from different clients. Changing * another client's lock from T to P may cause problems * if that client tries to unlock or update version. * * I don't think this P/T combination will be used. * It might be used if a command was able to take a P * vg lock, in which case the T vg lock would already * be held for reading. If the T lock was sh, it would * be converted to P ex. If the T/P modes matched, the * lock could just be changed from T to P. */ list_for_each_entry_safe(act, safe, &r->actions, list) { if (!(act->flags & LD_AF_PERSISTENT)) continue; lk = find_lock_client(r, act->client_id); if (!lk) continue; if (lk->mode != act->mode) { /* FIXME: convert and change to persistent? */ log_debug("res_process %s existing transient lock new persistent", r->name); r->last_client_id = act->client_id; act->result = -EEXIST; list_del(&act->list); add_client_result(act); } else { r->last_client_id = act->client_id; lk->flags |= LD_LF_PERSISTENT; lk->client_id = 0; act->result = 0; list_del(&act->list); add_client_result(act); } } /* * convert mode of existing locks */ list_for_each_entry_safe(act, safe, &r->actions, list) { if (act->flags & LD_AF_PERSISTENT) lk = find_lock_persistent(r); else lk = find_lock_client(r, act->client_id); if (!lk) continue; if (lk->mode == act->mode) { /* should never happen, should be found above */ log_error("convert same mode"); continue; } /* convert fails immediately, no EAGAIN retry */ rv = res_convert(ls, r, lk, act); act->result = rv; list_del(&act->list); add_client_result(act); } /* * Cases above are all requests addressed by existing locks. * Below handles the rest. Transient and persistent are * handled the same, except * - if mode of existing lock is incompat with requested, * leave the act on r->actions * - if r mode is EX, any lock action is blocked, just quit * * Retry a lock request that fails due to a lock conflict (-EAGAIN): * if we have not exceeded max retries and lm sets lm_retry (sanlock * transient conflicts from shared lock implementation), or r type * is gl or vg (transient real conflicts we want to hide from command). * lv lock conflicts won't be transient so don't retry them. */ if (r->mode == LD_LK_EX) return; /* * r mode is SH or UN, pass lock-sh actions to lm */ list_for_each_entry_safe(act, safe, &r->actions, list) { /* grant in order, so break here */ if (act->op == LD_OP_LOCK && act->mode == LD_LK_EX) break; if (act->op == LD_OP_LOCK && act->mode == LD_LK_SH) { lm_retry = 0; rv = res_lock(ls, r, act, &lm_retry); if ((rv == -EAGAIN) && (act->retries <= act->max_retries) && (lm_retry || (r->type != LD_RT_LV))) { /* leave act on list */ log_debug("S %s R %s res_lock EAGAIN retry", ls->name, r->name); act->retries++; *retry_out = 1; } else { act->result = rv; list_del(&act->list); add_client_result(act); } if (rv == -EUNATCH) goto r_free; } } /* * r mode is SH, any ex lock action is blocked, just quit */ if (r->mode == LD_LK_SH) return; /* * r mode is UN, pass lock-ex action to lm */ list_for_each_entry_safe(act, safe, &r->actions, list) { if (act->op == LD_OP_LOCK && act->mode == LD_LK_EX) { lm_retry = 0; rv = res_lock(ls, r, act, &lm_retry); if ((rv == -EAGAIN) && (act->retries <= act->max_retries) && (lm_retry || (r->type != LD_RT_LV))) { /* leave act on list */ log_debug("S %s R %s res_lock EAGAIN retry", ls->name, r->name); act->retries++; *retry_out = 1; } else { act->result = rv; list_del(&act->list); add_client_result(act); } if (rv == -EUNATCH) goto r_free; break; } } return; r_free: /* For the EUNATCH case it may be possible there are queued actions? */ list_for_each_entry_safe(act, safe, &r->actions, list) { log_error("S %s R %s res_process r_free cancel %s client %d", ls->name, r->name, op_str(act->op), act->client_id); act->result = -ECANCELED; list_del(&act->list); add_client_result(act); } log_debug("S %s R %s res_process free", ls->name, r->name); lm_rem_resource(ls, r); list_del(&r->list); free_resource(r); } #define LOCKS_EXIST_ANY 1 #define LOCKS_EXIST_GL 2 #define LOCKS_EXIST_VG 3 #define LOCKS_EXIST_LV 4 static int for_each_lock(struct lockspace *ls, int locks_do) { struct resource *r; struct lock *lk; list_for_each_entry(r, &ls->resources, list) { list_for_each_entry(lk, &r->locks, list) { if (locks_do == LOCKS_EXIST_ANY) return 1; if (locks_do == LOCKS_EXIST_GL && r->type == LD_RT_GL) return 1; if (locks_do == LOCKS_EXIST_VG && r->type == LD_RT_VG) return 1; if (locks_do == LOCKS_EXIST_LV && r->type == LD_RT_LV) return 1; } } return 0; } static int clear_locks(struct lockspace *ls, int free_vg, int drop_vg) { struct resource *r, *r_safe; struct lock *lk, *lk_safe; struct action *act, *act_safe; uint32_t lk_version; uint32_t r_version; int lk_count = 0; int rv; list_for_each_entry_safe(r, r_safe, &ls->resources, list) { lk_version = 0; list_for_each_entry_safe(lk, lk_safe, &r->locks, list) { lk_count++; /* * Stopping a lockspace shouldn't happen with LV locks * still held, but it will be stopped with GL and VG * locks held. The drop_vg case may see LV locks. */ if (lk->flags & LD_LF_PERSISTENT && !drop_vg) log_error("S %s R %s clear lock persistent", ls->name, r->name); else log_debug("S %s R %s clear lock mode %s client %d", ls->name, r->name, mode_str(lk->mode), lk->client_id); if (lk->version > lk_version) lk_version = lk->version; list_del(&lk->list); free_lock(lk); } if (r->mode == LD_LK_UN) goto r_free; if ((r->type == LD_RT_GL) && (r->mode == LD_LK_EX)) { r->version++; r_version = r->version; log_debug("S %s R %s clear_locks r_version inc %u", ls->name, r->name, r_version); } else if ((r->type == LD_RT_VG) && (r->mode == LD_LK_EX) && (lk_version > r->version)) { r->version = lk_version; r_version = r->version; log_debug("S %s R %s clear_locks r_version new %u", ls->name, r->name, r_version); } else { r_version = 0; } rv = lm_unlock(ls, r, NULL, r_version, free_vg ? LMUF_FREE_VG : 0); if (rv < 0) { /* should never happen */ log_error("S %s R %s clear_locks free %d drop %d lm unlock error %d", ls->name, r->name, free_vg, drop_vg, rv); } list_for_each_entry_safe(act, act_safe, &r->actions, list) { log_error("S %s R %s clear_locks cancel %s client %d", ls->name, r->name, op_str(act->op), act->client_id); act->result = -ECANCELED; list_del(&act->list); add_client_result(act); } r_free: log_debug("S %s R %s free", ls->name, r->name); lm_rem_resource(ls, r); list_del(&r->list); free_resource(r); } return lk_count; } /* * find and return the resource that is referenced by the action * - there is a single gl resource per lockspace * - there is a single vg resource per lockspace * - there can be many lv resources per lockspace, compare names */ static struct resource *find_resource_act(struct lockspace *ls, struct action *act, int nocreate) { struct resource *r; list_for_each_entry(r, &ls->resources, list) { if (r->type != act->rt) continue; if (r->type == LD_RT_GL && act->rt == LD_RT_GL) return r; if (r->type == LD_RT_VG && act->rt == LD_RT_VG) return r; if (r->type == LD_RT_LV && act->rt == LD_RT_LV && !strcmp(r->name, act->lv_uuid)) return r; } if (nocreate) return NULL; if (!(r = alloc_resource())) return NULL; r->type = act->rt; r->mode = LD_LK_UN; if (r->type == LD_RT_GL) { strncpy(r->name, R_NAME_GL, MAX_NAME); r->use_vb = 1; } else if (r->type == LD_RT_VG) { strncpy(r->name, R_NAME_VG, MAX_NAME); r->use_vb = 1; } else if (r->type == LD_RT_LV) { strncpy(r->name, act->lv_uuid, MAX_NAME); r->use_vb = 0; } list_add_tail(&r->list, &ls->resources); return r; } static void free_ls_resources(struct lockspace *ls) { struct resource *r, *r_safe; list_for_each_entry_safe(r, r_safe, &ls->resources, list) { lm_rem_resource(ls, r); list_del(&r->list); free_resource(r); } } /* * ls is the vg being removed that holds the global lock. * check if any other vgs will be left without a global lock. */ static int other_sanlock_vgs_exist(struct lockspace *ls_rem) { struct lockspace *ls; list_for_each_entry(ls, &lockspaces, list) { if (ls->lm_type != LD_LM_SANLOCK) continue; if (!strcmp(ls->name, ls_rem->name)) continue; log_debug("other sanlock vg exists %s", ls->name); return 1; } return 0; } /* * LOCK is the main thing we're interested in; the others are unlikely. */ static int process_op_during_kill(struct action *act) { if (act->op == LD_OP_LOCK && act->mode == LD_LK_UN) return 1; switch (act->op) { case LD_OP_LOCK: case LD_OP_ENABLE: case LD_OP_DISABLE: case LD_OP_UPDATE: case LD_OP_RENAME_BEFORE: case LD_OP_RENAME_FINAL: case LD_OP_FIND_FREE_LOCK: return 0; }; return 1; } /* * Process actions queued for this lockspace by * client_recv_action / add_lock_action. * * The lockspace_thread can touch its own ls struct without holding * lockspaces_mutex until it sets ls->thread_done, after which it * cannot touch ls without holding lockspaces_mutex. */ #define LOCK_RETRY_MS 1000 /* milliseconds to delay between retry */ static void *lockspace_thread_main(void *arg_in) { struct lockspace *ls = arg_in; struct resource *r, *r2; struct action *add_act, *act, *safe; struct action *act_op_free = NULL; struct list_head tmp_act; struct list_head act_close; char tmp_name[MAX_NAME+1]; int free_vg = 0; int drop_vg = 0; int error = 0; int adopt_flag = 0; int wait_flag = 0; int retry; int rv; INIT_LIST_HEAD(&act_close); /* first action may be client add */ pthread_mutex_lock(&ls->mutex); act = NULL; add_act = NULL; if (!list_empty(&ls->actions)) { act = list_first_entry(&ls->actions, struct action, list); if (act->op == LD_OP_START) { add_act = act; list_del(&add_act->list); if (add_act->flags & LD_AF_WAIT) wait_flag = 1; if (add_act->flags & LD_AF_ADOPT) adopt_flag = 1; } } pthread_mutex_unlock(&ls->mutex); log_debug("S %s lm_add_lockspace %s wait %d adopt %d", ls->name, lm_str(ls->lm_type), wait_flag, adopt_flag); /* * The prepare step does not wait for anything and is quick; * it tells us if the parameters are valid and the lm is running. */ error = lm_prepare_lockspace(ls, add_act); if (add_act && (!wait_flag || error)) { /* send initial join result back to client */ add_act->result = error; add_client_result(add_act); add_act = NULL; } /* * The actual lockspace join can take a while. */ if (!error) { error = lm_add_lockspace(ls, add_act, adopt_flag); log_debug("S %s lm_add_lockspace done %d", ls->name, error); if (ls->sanlock_gl_enabled && gl_lsname_sanlock[0] && strcmp(ls->name, gl_lsname_sanlock)) sanlock_gl_dup = 1; if (add_act) { /* send final join result back to client */ add_act->result = error; add_client_result(add_act); } } pthread_mutex_lock(&ls->mutex); if (error) { ls->thread_stop = 1; ls->create_fail = 1; } else { ls->create_done = 1; } pthread_mutex_unlock(&ls->mutex); if (error) goto out_act; while (1) { pthread_mutex_lock(&ls->mutex); while (!ls->thread_work) { if (ls->thread_stop) { pthread_mutex_unlock(&ls->mutex); goto out_rem; } pthread_cond_wait(&ls->cond, &ls->mutex); } /* * Process all the actions queued for this lockspace. * The client thread queues actions on ls->actions. * * Here, take all the actions off of ls->actions, and: * * - For lock operations, move the act to r->actions. * These lock actions/operations processed by res_process(). * * - For non-lock operations, e.g. related to managing * the lockspace, process them in this loop. */ while (1) { if (list_empty(&ls->actions)) { ls->thread_work = 0; break; } act = list_first_entry(&ls->actions, struct action, list); if (act->op == LD_OP_KILL_VG && act->rt == LD_RT_VG) { /* Continue processing until DROP_VG arrives. */ log_debug("S %s kill_vg", ls->name); ls->kill_vg = 1; list_del(&act->list); act->result = 0; add_client_result(act); continue; } if (ls->kill_vg && !process_op_during_kill(act)) { log_debug("S %s disallow op %s after kill_vg", ls->name, op_str(act->op)); list_del(&act->list); act->result = -EVGKILLED; add_client_result(act); continue; } if (act->op == LD_OP_DROP_VG && act->rt == LD_RT_VG) { /* * If leases are released after i/o errors begin * but before lvmlockctl --kill, then the VG is not * killed, but drop is still needed to clean up the * VG, so in that case there would be a drop op without * a preceding kill op. */ if (!ls->kill_vg) log_debug("S %s received drop without kill", ls->name); log_debug("S %s drop_vg", ls->name); ls->thread_work = 0; ls->thread_stop = 1; drop_vg = 1; break; } if (act->op == LD_OP_STOP) { /* thread_stop is already set */ ls->thread_work = 0; break; } if (act->op == LD_OP_FREE && act->rt == LD_RT_VG) { /* vgremove */ log_debug("S %s checking for lockspace hosts", ls->name); rv = lm_hosts(ls, 1); if (rv) { /* * Checking for hosts here in addition to after the * main loop allows vgremove to fail and be rerun * after the ls is stopped on other hosts. */ log_error("S %s lockspace hosts %d", ls->name, rv); list_del(&act->list); act->result = -EBUSY; add_client_result(act); continue; } ls->thread_work = 0; ls->thread_stop = 1; free_vg = 1; break; } if (act->op == LD_OP_BUSY && act->rt == LD_RT_VG) { log_debug("S %s checking if lockspace is busy", ls->name); rv = lm_hosts(ls, 0); if (rv) act->result = -EBUSY; else act->result = 0; list_del(&act->list); add_client_result(act); continue; } if (act->op == LD_OP_RENAME_BEFORE && act->rt == LD_RT_VG) { /* vgrename */ log_debug("S %s checking for lockspace hosts", ls->name); rv = lm_hosts(ls, 1); if (rv) { log_error("S %s lockspace hosts %d", ls->name, rv); list_del(&act->list); act->result = -EBUSY; add_client_result(act); continue; } ls->thread_work = 0; ls->thread_stop = 1; /* Do we want to check hosts again below like vgremove? */ break; } if (act->op == LD_OP_FIND_FREE_LOCK && act->rt == LD_RT_VG) { uint64_t free_offset = 0; log_debug("S %s find free lock", ls->name); rv = lm_find_free_lock(ls, &free_offset); log_debug("S %s find free lock %d offset %llu", ls->name, rv, (unsigned long long)free_offset); ls->free_lock_offset = free_offset; list_del(&act->list); act->result = rv; add_client_result(act); continue; } list_del(&act->list); /* applies to all resources */ if (act->op == LD_OP_CLOSE) { list_add(&act->list, &act_close); continue; } /* * All the other op's are for locking. * Find the specific resource that the lock op is for, * and add the act to the resource's list of lock ops. * * (This creates a new resource if the one named in * the act is not found.) */ r = find_resource_act(ls, act, (act->op == LD_OP_FREE) ? 1 : 0); if (!r) { act->result = (act->op == LD_OP_FREE) ? -ENOENT : -ENOMEM; add_client_result(act); continue; } list_add_tail(&act->list, &r->actions); log_debug("S %s R %s action %s %s", ls->name, r->name, op_str(act->op), mode_str(act->mode)); } pthread_mutex_unlock(&ls->mutex); /* * Process the lock operations that have been queued for each * resource. */ retry = 0; list_for_each_entry_safe(r, r2, &ls->resources, list) res_process(ls, r, &act_close, &retry); list_for_each_entry_safe(act, safe, &act_close, list) { list_del(&act->list); free_action(act); } if (retry) { ls->thread_work = 1; usleep(LOCK_RETRY_MS * 1000); } } out_rem: log_debug("S %s stopping", ls->name); /* * For sanlock, we need to unlock any existing locks * before removing the lockspace, otherwise the sanlock * daemon will kill us when the lockspace goes away. * For dlm, we leave with force, so all locks will * automatically be dropped when we leave the lockspace, * so unlocking all before leaving could be skipped. * * Blindly dropping all existing locks must only be * allowed in emergency/force situations, otherwise it's * obviously dangerous, since the lock holders are still * operating under the assumption that they hold the lock. * drop_vg drops all existing locks, but should only * happen when the VG access has been forcibly and * succesfully terminated. * * For vgremove of a sanlock vg, the vg lock will be held, * and possibly the gl lock if this vg holds the gl. * sanlock vgremove wants to unlock-rename these locks. */ log_debug("S %s clearing locks", ls->name); rv = clear_locks(ls, free_vg, drop_vg); /* * Tell any other hosts in the lockspace to leave it * before we remove it (for vgremove). We do this * before leaving the lockspace ourself because we * need to be in the lockspace to see others. */ if (free_vg) { log_debug("S %s checking for lockspace hosts", ls->name); rv = lm_hosts(ls, 1); if (rv) log_error("S %s other lockspace hosts %d", ls->name, rv); } /* * Leave the lockspace. */ rv = lm_rem_lockspace(ls, NULL, free_vg); log_debug("S %s rem_lockspace done %d", ls->name, rv); out_act: /* * Move remaining actions to results; this will usually (always?) * be only the stop action. */ INIT_LIST_HEAD(&tmp_act); pthread_mutex_lock(&ls->mutex); list_for_each_entry_safe(act, safe, &ls->actions, list) { if (act->op == LD_OP_FREE) { act_op_free = act; act->result = 0; } else if (act->op == LD_OP_STOP) act->result = 0; else if (act->op == LD_OP_DROP_VG) act->result = 0; else if (act->op == LD_OP_RENAME_BEFORE) act->result = 0; else act->result = -ENOLS; list_del(&act->list); list_add_tail(&act->list, &tmp_act); } pthread_mutex_unlock(&ls->mutex); /* * If this freed a sanlock vg that had gl enabled, and other sanlock * vgs exist, return a flag so the command can warn that the gl has * been removed and may need to be enabled in another sanlock vg. */ if (free_vg && ls->sanlock_gl_enabled && act_op_free) { pthread_mutex_lock(&lockspaces_mutex); if (other_sanlock_vgs_exist(ls)) { act_op_free->flags |= LD_AF_WARN_GL_REMOVED; gl_vg_removed = 1; } pthread_mutex_unlock(&lockspaces_mutex); } pthread_mutex_lock(&client_mutex); list_for_each_entry_safe(act, safe, &tmp_act, list) { list_del(&act->list); list_add_tail(&act->list, &client_results); } pthread_cond_signal(&client_cond); pthread_mutex_unlock(&client_mutex); pthread_mutex_lock(&lockspaces_mutex); ls->thread_done = 1; ls->free_vg = free_vg; ls->drop_vg = drop_vg; if (ls->lm_type == LD_LM_DLM && !strcmp(ls->name, gl_lsname_dlm)) global_dlm_lockspace_exists = 0; /* * Avoid a name collision of the same lockspace is added again before * this thread is cleaned up. We just set ls->name to a "junk" value * for the short period until the struct is freed. We could make it * blank or fill it with garbage, but instead set it to REM: * to make it easier to follow progress of freeing is via log_debug. */ dm_strncpy(tmp_name, ls->name, sizeof(tmp_name)); snprintf(ls->name, sizeof(ls->name), "REM:%s", tmp_name); pthread_mutex_unlock(&lockspaces_mutex); /* worker_thread will join this thread, and free the ls */ pthread_mutex_lock(&worker_mutex); worker_wake = 1; pthread_cond_signal(&worker_cond); pthread_mutex_unlock(&worker_mutex); return NULL; } int lockspaces_empty(void) { int rv; pthread_mutex_lock(&lockspaces_mutex); rv = list_empty(&lockspaces); pthread_mutex_unlock(&lockspaces_mutex); return rv; } /* * lockspaces_mutex is locked * * When duplicate sanlock global locks have been seen, * this function has a secondary job of counting the * number of lockspaces that exist with the gl enabled, * with the side effect of setting sanlock_gl_dup back to * zero when the duplicates have been removed/disabled. */ static struct lockspace *find_lockspace_name(char *ls_name) { struct lockspace *ls_found = NULL; struct lockspace *ls; int gl_count = 0; list_for_each_entry(ls, &lockspaces, list) { if (!strcmp(ls->name, ls_name)) ls_found = ls; if (!sanlock_gl_dup && ls_found) return ls_found; if (sanlock_gl_dup && ls->sanlock_gl_enabled) gl_count++; } /* this is the side effect we want from this function */ if (sanlock_gl_dup && gl_count < 2) sanlock_gl_dup = 0; return ls_found; } /* * If lvm_ is longer than max lockspace name (64) we just ignore the * extra characters. For sanlock vgs, the name is shortened further to 48 in * the sanlock code. */ static int vg_ls_name(const char *vg_name, char *ls_name) { if (strlen(vg_name) + 4 > MAX_NAME) { log_error("vg name too long %s", vg_name); return -1; } snprintf(ls_name, MAX_NAME, "%s%s", LVM_LS_PREFIX, vg_name); return 0; } /* FIXME: add mutex for gl_lsname_ ? */ static void gl_ls_name(char *ls_name) { if (gl_use_dlm) memcpy(ls_name, gl_lsname_dlm, MAX_NAME); else if (gl_use_sanlock) memcpy(ls_name, gl_lsname_sanlock, MAX_NAME); else memset(ls_name, 0, MAX_NAME); } /* * When this function returns an error, the caller needs to deal * with act (in the cases where act exists). */ static int add_lockspace_thread(const char *ls_name, const char *vg_name, const char *vg_uuid, int lm_type, const char *vg_args, struct action *act) { struct lockspace *ls, *ls2; struct resource *r; int rv; log_debug("add_lockspace_thread %s %s version %u", lm_str(lm_type), ls_name, act ? act->version : 0); if (!(ls = alloc_lockspace())) return -ENOMEM; strncpy(ls->name, ls_name, MAX_NAME); ls->lm_type = lm_type; if (act) ls->start_client_id = act->client_id; if (vg_uuid) strncpy(ls->vg_uuid, vg_uuid, 64); if (vg_name) strncpy(ls->vg_name, vg_name, MAX_NAME); if (vg_args) strncpy(ls->vg_args, vg_args, MAX_ARGS); if (act) ls->host_id = act->host_id; if (!(r = alloc_resource())) { free(ls); return -ENOMEM; } r->type = LD_RT_VG; r->mode = LD_LK_UN; r->use_vb = 1; strncpy(r->name, R_NAME_VG, MAX_NAME); list_add_tail(&r->list, &ls->resources); pthread_mutex_lock(&lockspaces_mutex); ls2 = find_lockspace_name(ls->name); if (ls2) { if (ls2->thread_stop) { log_debug("add_lockspace_thread %s exists and stopping", ls->name); rv = -EAGAIN; } else { log_debug("add_lockspace_thread %s exists", ls->name); rv = -EEXIST; } pthread_mutex_unlock(&lockspaces_mutex); free_resource(r); free(ls); return rv; } /* * act will be null when this lockspace is added automatically/internally * and not by an explicit client action that wants a result. */ if (act) list_add(&act->list, &ls->actions); if (ls->lm_type == LD_LM_DLM && !strcmp(ls->name, gl_lsname_dlm)) global_dlm_lockspace_exists = 1; list_add_tail(&ls->list, &lockspaces); pthread_mutex_unlock(&lockspaces_mutex); rv = pthread_create(&ls->thread, NULL, lockspace_thread_main, ls); if (rv < 0) { log_error("add_lockspace_thread %s pthread error %d %d", ls->name, rv, errno); pthread_mutex_lock(&lockspaces_mutex); list_del(&ls->list); pthread_mutex_unlock(&lockspaces_mutex); free_resource(r); free(ls); return rv; } return 0; } /* * There is no add_sanlock_global_lockspace or * rem_sanlock_global_lockspace because with sanlock, * the global lockspace is one of the vg lockspaces. */ static int add_dlm_global_lockspace(struct action *act) { int rv; if (global_dlm_lockspace_exists) return 0; /* * FIXME: if the dlm global lockspace is started without a global * lock request, insert an internal gl sh lock request? */ rv = add_lockspace_thread(gl_lsname_dlm, NULL, NULL, LD_LM_DLM, NULL, act); if (rv < 0) log_debug("add_dlm_global_lockspace add_lockspace_thread %d", rv); /* * EAGAIN may be returned for a short period because * global_dlm_lockspace_exists is set to 0 before the * ls is removed from the lockspaces list by the * worker_thread. */ return rv; } /* * If dlm gl lockspace is the only one left, then stop it. * This is not used for an explicit rem_lockspace action from * the client, only for auto remove. */ static int rem_dlm_global_lockspace(void) { struct lockspace *ls, *ls_gl = NULL; int others = 0; int rv = 0; pthread_mutex_lock(&lockspaces_mutex); list_for_each_entry(ls, &lockspaces, list) { if (!strcmp(ls->name, gl_lsname_dlm)) { ls_gl = ls; continue; } if (ls->thread_stop) continue; others++; break; } if (others) { rv = -EAGAIN; goto out; } if (!ls_gl) { rv = -ENOENT; goto out; } ls = ls_gl; pthread_mutex_lock(&ls->mutex); ls->thread_stop = 1; ls->thread_work = 1; pthread_cond_signal(&ls->cond); pthread_mutex_unlock(&ls->mutex); rv = 0; out: pthread_mutex_unlock(&lockspaces_mutex); return rv; } /* * When the first dlm lockspace is added for a vg, automatically add a separate * dlm lockspace for the global lock. * * For sanlock, a separate lockspace is not used for the global lock, but the * gl lock lives in a vg lockspace, (although it's recommended to create a * special vg dedicated to holding the gl). */ static int add_lockspace(struct action *act) { char ls_name[MAX_NAME+1]; int rv; memset(ls_name, 0, sizeof(ls_name)); /* * FIXME: I don't think this is used any more. * Remove it, or add the ability to start the global * dlm lockspace using lvmlockctl? */ if (act->rt == LD_RT_GL) { if (gl_use_dlm) { rv = add_dlm_global_lockspace(act); return rv; } else { return -EINVAL; } } if (act->rt == LD_RT_VG) { if (gl_use_dlm) add_dlm_global_lockspace(NULL); vg_ls_name(act->vg_name, ls_name); rv = add_lockspace_thread(ls_name, act->vg_name, act->vg_uuid, act->lm_type, act->vg_args, act); if (rv) log_debug("add_lockspace %s add_lockspace_thread %d", ls_name, rv); return rv; } log_error("add_lockspace bad type %d", act->rt); return -1; } /* * vgchange --lock-stop vgname will lock the vg ex, then send a stop, * so we exect to find the ex vg lock held here, and will automatically * unlock it when stopping. * * Should we attempt to stop the lockspace containing the gl last? */ static int rem_lockspace(struct action *act) { struct lockspace *ls; char ls_name[MAX_NAME+1]; int force = act->flags & LD_AF_FORCE; int rt = act->rt; if (act->rt == LD_RT_GL && act->lm_type != LD_LM_DLM) return -EINVAL; memset(ls_name, 0, sizeof(ls_name)); if (act->rt == LD_RT_GL) gl_ls_name(ls_name); else vg_ls_name(act->vg_name, ls_name); pthread_mutex_lock(&lockspaces_mutex); ls = find_lockspace_name(ls_name); if (!ls) { pthread_mutex_unlock(&lockspaces_mutex); return -ENOLS; } pthread_mutex_lock(&ls->mutex); if (ls->thread_stop) { pthread_mutex_unlock(&ls->mutex); pthread_mutex_unlock(&lockspaces_mutex); return -ESTALE; } if (!force && for_each_lock(ls, LOCKS_EXIST_LV)) { pthread_mutex_unlock(&ls->mutex); pthread_mutex_unlock(&lockspaces_mutex); return -EBUSY; } ls->thread_work = 1; ls->thread_stop = 1; list_add_tail(&act->list, &ls->actions); pthread_cond_signal(&ls->cond); pthread_mutex_unlock(&ls->mutex); pthread_mutex_unlock(&lockspaces_mutex); /* * The dlm global lockspace was automatically added when * the first dlm vg lockspace was added, now reverse that * by automatically removing the dlm global lockspace when * the last dlm vg lockspace is removed. */ if (rt == LD_RT_VG && gl_use_dlm) rem_dlm_global_lockspace(); return 0; } /* * count how many lockspaces started by this client are still starting; * the client will use this to wait for all its start operations to finish * (START_WAIT). */ static int count_lockspace_starting(uint32_t client_id) { struct lockspace *ls; int count = 0; int done = 0; int fail = 0; pthread_mutex_lock(&lockspaces_mutex); list_for_each_entry(ls, &lockspaces, list) { if (ls->start_client_id != client_id) continue; if (!ls->create_done && !ls->create_fail) { count++; continue; } if (ls->create_done) done++; if (ls->create_fail) fail++; } pthread_mutex_unlock(&lockspaces_mutex); log_debug("count_lockspace_starting client %u count %d done %d fail %d", client_id, count, done, fail); return count; } /* * Loop through all lockspaces, and: * - if do_stop is set, stop any that are not stopped * - if do_free is set, join any that are done stopping (and free ls) * * do_stop will not stop an ls with lv locks unless force is set. * * This function does not block or wait for anything. * * do_stop (no do_free): * returns count of lockspaces that need stop (have locks and no force) * * do_free (no do_stop): * returns count of lockspaces that are stopped and need freeing * * do_stop and do_free: * returns sum of the previous two */ static int for_each_lockspace(int do_stop, int do_free, int do_force) { struct lockspace *ls, *safe; int need_stop = 0; int need_free = 0; int stop_count = 0; int free_count = 0; int done; int stop; int perrno; pthread_mutex_lock(&lockspaces_mutex); if (do_stop) { list_for_each_entry(ls, &lockspaces, list) { pthread_mutex_lock(&ls->mutex); if (ls->thread_stop) { pthread_mutex_unlock(&ls->mutex); continue; } if (!do_force && for_each_lock(ls, LOCKS_EXIST_ANY)) { need_stop++; } else { ls->thread_work = 1; ls->thread_stop = 1; pthread_cond_signal(&ls->cond); stop_count++; } pthread_mutex_unlock(&ls->mutex); } } if (do_free) { list_for_each_entry_safe(ls, safe, &lockspaces, list) { pthread_mutex_lock(&ls->mutex); done = ls->thread_done; stop = ls->thread_stop; pthread_mutex_unlock(&ls->mutex); /* This ls has locks and force is not set. */ if (!stop) continue; /* * Once thread_done is set, we know that the lockspace_thread * will not be using/touching the ls struct. Any other * thread touches the ls struct under lockspaces_mutex. */ if (done) { if ((perrno = pthread_join(ls->thread, NULL))) log_error("pthread_join error %d", perrno); list_del(&ls->list); /* FIXME: will free_vg ever not be set? */ log_debug("free ls %s", ls->name); if (ls->free_vg) { /* In future we may need to free ls->actions here */ free_ls_resources(ls); free(ls); free_count++; } } else { need_free++; } } } if (list_empty(&lockspaces)) { if (!gl_type_static) { gl_use_dlm = 0; gl_use_sanlock = 0; } } pthread_mutex_unlock(&lockspaces_mutex); if (stop_count || free_count || need_stop || need_free) { log_debug("for_each_lockspace do_stop %d do_free %d " "stop_count %d free_count %d need_stop %d need_free %d", do_stop, do_free, stop_count, free_count, need_stop, need_free); } return need_stop + need_free; } /* * This is only called when the daemon is exiting so the sleep/retry * loop doesn't have any adverse impact. */ static void for_each_lockspace_retry(int do_stop, int do_free, int do_force) { int count; while (1) { count = for_each_lockspace(do_stop, do_free, do_force); if (!count) break; log_debug("for_each_lockspace_retry remaining %d", count); sleep(1); } } static int work_init_vg(struct action *act) { struct lockspace *ls; char ls_name[MAX_NAME+1]; int rv = 0; memset(ls_name, 0, sizeof(ls_name)); vg_ls_name(act->vg_name, ls_name); /* * The max dlm ls name is 64 and the max sanlock ls name is 48. So, * after the "lvm_" prefix, only the first 60/44 characters of the VG * name are used for the lockspace name. This will cause a collision * in the lock manager if two different VG names have the first 60/44 * chars in common. At the time of vgcreate (here), check if any other * VG's are known that would collide. If the collision is not detected * at vgcreate time, it will be detected at start time and add_lockspace * will fail for the second of the two matching ls names. */ pthread_mutex_lock(&lockspaces_mutex); list_for_each_entry(ls, &lockspaces, list) { if ((ls->lm_type == LD_LM_SANLOCK) && !strncmp(ls->name, ls_name, 48)) { rv = -EEXIST; break; } if ((ls->lm_type == LD_LM_DLM) && !strcmp(ls->name, ls_name)) { rv = -EEXIST; break; } } pthread_mutex_unlock(&lockspaces_mutex); if (rv == -EEXIST) { log_error("Existing lockspace name %s matches new %s VG names %s %s", ls->name, ls_name, ls->vg_name, act->vg_name); return rv; } if (act->lm_type == LD_LM_SANLOCK) rv = lm_init_vg_sanlock(ls_name, act->vg_name, act->flags, act->vg_args); else if (act->lm_type == LD_LM_DLM) rv = lm_init_vg_dlm(ls_name, act->vg_name, act->flags, act->vg_args); else rv = -EINVAL; return rv; } static int work_rename_vg(struct action *act) { char ls_name[MAX_NAME+1]; int rv = 0; memset(ls_name, 0, sizeof(ls_name)); vg_ls_name(act->vg_name, ls_name); if (act->lm_type == LD_LM_SANLOCK) rv = lm_rename_vg_sanlock(ls_name, act->vg_name, act->flags, act->vg_args); else if (act->lm_type == LD_LM_DLM) return 0; else rv = -EINVAL; return rv; } static void work_test_gl(void) { struct lockspace *ls; int is_enabled = 0; pthread_mutex_lock(&lockspaces_mutex); list_for_each_entry(ls, &lockspaces, list) { if (ls->lm_type != LD_LM_SANLOCK) continue; pthread_mutex_lock(&ls->mutex); if (ls->create_done && !ls->thread_stop) { is_enabled = lm_gl_is_enabled(ls); if (is_enabled) { log_debug("S %s worker found gl_is_enabled", ls->name); strncpy(gl_lsname_sanlock, ls->name, MAX_NAME); } } pthread_mutex_unlock(&ls->mutex); if (is_enabled) break; } if (!is_enabled) log_debug("worker found no gl_is_enabled"); pthread_mutex_unlock(&lockspaces_mutex); } static int work_init_lv(struct action *act) { struct lockspace *ls; char ls_name[MAX_NAME+1]; char vg_args[MAX_ARGS+1]; char lv_args[MAX_ARGS+1]; uint64_t free_offset = 0; int lm_type = 0; int rv = 0; memset(ls_name, 0, sizeof(ls_name)); memset(vg_args, 0, sizeof(vg_args)); memset(lv_args, 0, sizeof(lv_args)); vg_ls_name(act->vg_name, ls_name); pthread_mutex_lock(&lockspaces_mutex); ls = find_lockspace_name(ls_name); if (ls) { lm_type = ls->lm_type; memcpy(vg_args, ls->vg_args, MAX_ARGS); free_offset = ls->free_lock_offset; } pthread_mutex_unlock(&lockspaces_mutex); if (!ls) { lm_type = act->lm_type; memcpy(vg_args, act->vg_args, MAX_ARGS); } if (act->lm_type != lm_type) { log_error("init_lv ls_name %s wrong lm_type %d %d", ls_name, act->lm_type, lm_type); return -EINVAL; } if (lm_type == LD_LM_SANLOCK) { rv = lm_init_lv_sanlock(ls_name, act->vg_name, act->lv_uuid, vg_args, lv_args, free_offset); memcpy(act->lv_args, lv_args, MAX_ARGS); return rv; } else if (act->lm_type == LD_LM_DLM) { return 0; } else { log_error("init_lv ls_name %s bad lm_type %d", ls_name, act->lm_type); return -EINVAL; } } /* * When an action is queued for the worker_thread, it is processed right away. * After processing, some actions need to be retried again in a short while. * These actions are put on the delayed_list, and the worker_thread will * process these delayed actions again in SHORT_DELAY_PERIOD. */ #define SHORT_DELAY_PERIOD 2 #define LONG_DELAY_PERIOD 60 static void *worker_thread_main(void *arg_in) { struct list_head delayed_list; struct timespec ts; struct action *act, *safe; uint64_t last_delayed_time = 0; int delay_sec = LONG_DELAY_PERIOD; int rv; INIT_LIST_HEAD(&delayed_list); while (1) { pthread_mutex_lock(&worker_mutex); if (clock_gettime(CLOCK_REALTIME, &ts)) { log_error("clock_gettime failed."); ts.tv_sec = ts.tv_nsec = 0; } ts.tv_sec += delay_sec; rv = 0; act = NULL; while (list_empty(&worker_list) && !worker_stop && !worker_wake && !rv) { rv = pthread_cond_timedwait(&worker_cond, &worker_mutex, &ts); } worker_wake = 0; if (worker_stop) { pthread_mutex_unlock(&worker_mutex); goto out; } if (!list_empty(&worker_list)) { act = list_first_entry(&worker_list, struct action, list); list_del(&act->list); } pthread_mutex_unlock(&worker_mutex); /* * Do new work actions before processing delayed work actions. */ if (!act) goto delayed_work; if (act->op == LD_OP_RUNNING_LM) { int run_sanlock = lm_is_running_sanlock(); int run_dlm = lm_is_running_dlm(); if (daemon_test) { run_sanlock = gl_use_sanlock; run_dlm = gl_use_dlm; } if (run_sanlock && run_dlm) act->result = -EXFULL; else if (!run_sanlock && !run_dlm) act->result = -ENOLCK; else if (run_sanlock) act->result = LD_LM_SANLOCK; else if (run_dlm) act->result = LD_LM_DLM; add_client_result(act); } else if ((act->op == LD_OP_LOCK) && (act->flags & LD_AF_SEARCH_LS)) { /* * worker_thread used as a helper to search existing * sanlock vgs for an enabled gl. */ log_debug("work search for gl"); work_test_gl(); /* try again to find a gl lockspace for this act */ rv = add_lock_action(act); if (rv < 0) { act->result = rv; add_client_result(act); } } else if ((act->op == LD_OP_INIT) && (act->rt == LD_RT_VG)) { log_debug("work init_vg %s", act->vg_name); act->result = work_init_vg(act); add_client_result(act); } else if ((act->op == LD_OP_INIT) && (act->rt == LD_RT_LV)) { log_debug("work init_lv %s/%s uuid %s", act->vg_name, act->lv_name, act->lv_uuid); act->result = work_init_lv(act); add_client_result(act); } else if ((act->op == LD_OP_RENAME_FINAL) && (act->rt == LD_RT_VG)) { log_debug("work rename_vg %s", act->vg_name); act->result = work_rename_vg(act); add_client_result(act); } else if (act->op == LD_OP_START_WAIT) { act->result = count_lockspace_starting(act->client_id); if (!act->result) add_client_result(act); else list_add(&act->list, &delayed_list); } else if (act->op == LD_OP_STOP_ALL) { act->result = for_each_lockspace(DO_STOP, DO_FREE, (act->flags & LD_AF_FORCE) ? DO_FORCE : NO_FORCE); if (!act->result || !(act->flags & LD_AF_WAIT)) add_client_result(act); else list_add(&act->list, &delayed_list); } else { log_error("work unknown op %d", act->op); act->result = -EINVAL; add_client_result(act); } delayed_work: /* * We may want to track retry times per action so that * we can delay different actions by different amounts. */ if (monotime() - last_delayed_time < SHORT_DELAY_PERIOD) { delay_sec = 1; continue; } last_delayed_time = monotime(); list_for_each_entry_safe(act, safe, &delayed_list, list) { if (act->op == LD_OP_START_WAIT) { log_debug("work delayed start_wait for client %u", act->client_id); act->result = count_lockspace_starting(act->client_id); if (!act->result) { list_del(&act->list); add_client_result(act); } } else if (act->op == LD_OP_STOP_ALL) { log_debug("work delayed stop_all"); act->result = for_each_lockspace(DO_STOP, DO_FREE, (act->flags & LD_AF_FORCE) ? DO_FORCE : NO_FORCE); if (!act->result) { list_del(&act->list); act->result = 0; add_client_result(act); } } } /* * This is not explicitly queued work, and not delayed work, * but lockspace thread cleanup that's needed when a * lockspace has been stopped/removed or failed to start. */ for_each_lockspace(NO_STOP, DO_FREE, NO_FORCE); if (list_empty(&delayed_list)) delay_sec = LONG_DELAY_PERIOD; else delay_sec = 1; } out: list_for_each_entry_safe(act, safe, &delayed_list, list) { list_del(&act->list); free_action(act); } pthread_mutex_lock(&worker_mutex); list_for_each_entry_safe(act, safe, &worker_list, list) { list_del(&act->list); free_action(act); } pthread_mutex_unlock(&worker_mutex); return NULL; } static int setup_worker_thread(void) { int rv; INIT_LIST_HEAD(&worker_list); pthread_mutex_init(&worker_mutex, NULL); pthread_cond_init(&worker_cond, NULL); rv = pthread_create(&worker_thread, NULL, worker_thread_main, NULL); if (rv) return -1; return 0; } static void close_worker_thread(void) { int perrno; pthread_mutex_lock(&worker_mutex); worker_stop = 1; pthread_cond_signal(&worker_cond); pthread_mutex_unlock(&worker_mutex); if ((perrno = pthread_join(worker_thread, NULL))) log_error("pthread_join worker_thread error %d", perrno); } /* client_mutex is locked */ static struct client *find_client_work(void) { struct client *cl; list_for_each_entry(cl, &client_list, list) { if (cl->recv || cl->dead) return cl; } return NULL; } /* client_mutex is locked */ static struct client *find_client_id(uint32_t id) { struct client *cl; list_for_each_entry(cl, &client_list, list) { if (cl->id == id) return cl; } return NULL; } /* client_mutex is locked */ static struct client *find_client_pi(int pi) { struct client *cl; list_for_each_entry(cl, &client_list, list) { if (cl->pi == pi) return cl; } return NULL; } /* * wake up poll() because we have added an fd * back into pollfd and poll() needs to be restarted * to recognize it. */ static void restart_poll(void) { int rv; rv = write(restart_fds[1], "w", 1); if (!rv || rv < 0) log_debug("restart_poll write %d", errno); } /* poll will take requests from client again, cl->mutex must be held */ static void client_resume(struct client *cl) { if (cl->dead) return; if (!cl->poll_ignore || cl->fd == -1 || cl->pi == -1) { /* shouldn't happen */ log_error("client_resume %u bad state ig %d fd %d pi %d", cl->id, cl->poll_ignore, cl->fd, cl->pi); return; } pthread_mutex_lock(&pollfd_mutex); if (pollfd[cl->pi].fd != POLL_FD_IGNORE) { log_error("client_resume %u pi %d fd %d not IGNORE", cl->id, cl->pi, cl->fd); } pollfd[cl->pi].fd = cl->fd; pollfd[cl->pi].events = POLLIN; pthread_mutex_unlock(&pollfd_mutex); restart_poll(); } /* called from client_thread, cl->mutex is held */ static int client_send_result(struct client *cl, struct action *act) { response res; char result_flags[128]; int dump_len = 0; int dump_fd = -1; int rv = 0; if (cl->dead) { log_debug("send cl %u skip dead", cl->id); return -1; } memset(result_flags, 0, sizeof(result_flags)); buffer_init(&res.buffer); /* * EUNATCH is returned when the global lock existed, * but had been disabled when we tried to lock it, * so we removed it, and no longer have a gl to lock. */ if (act->result == -EUNATCH) act->result = -ENOLS; /* * init_vg with dlm|sanlock returns vg_args * init_lv with sanlock returns lv_args */ if (act->result == -ENOLS) { /* * The lockspace could not be found, in which case * the caller may want to know if any lockspaces exist * or if lockspaces exist, but not one with the global lock. * Given this detail, it may be able to procede without * the lock. */ pthread_mutex_lock(&lockspaces_mutex); if (list_empty(&lockspaces)) strcat(result_flags, "NO_LOCKSPACES,"); pthread_mutex_unlock(&lockspaces_mutex); if (gl_use_sanlock) { if (!gl_lsname_sanlock[0]) strcat(result_flags, "NO_GL_LS,"); } else if (gl_use_dlm) { if (!gl_lsname_dlm[0]) strcat(result_flags, "NO_GL_LS,"); } else { int found_lm = 0; if (lm_support_dlm() && lm_is_running_dlm()) found_lm++; if (lm_support_sanlock() && lm_is_running_sanlock()) found_lm++; if (!found_lm) strcat(result_flags, "NO_GL_LS,NO_LM"); else strcat(result_flags, "NO_GL_LS"); } } if (act->flags & LD_AF_DUP_GL_LS) strcat(result_flags, "DUP_GL_LS,"); if ((act->flags & LD_AF_WARN_GL_REMOVED) || gl_vg_removed) strcat(result_flags, "WARN_GL_REMOVED,"); if (act->op == LD_OP_INIT) { /* * init is a special case where lock args need * to be passed back to the client. */ const char *vg_args = "none"; const char *lv_args = "none"; if (act->vg_args[0]) vg_args = act->vg_args; if (act->lv_args[0]) lv_args = act->lv_args; log_debug("send %s[%d] cl %u %s %s rv %d vg_args %s lv_args %s", cl->name[0] ? cl->name : "client", cl->pid, cl->id, op_str(act->op), rt_str(act->rt), act->result, vg_args ? vg_args : "", lv_args ? lv_args : ""); res = daemon_reply_simple("OK", "op = " FMTd64, (int64_t)act->op, "op_result = " FMTd64, (int64_t) act->result, "lm_result = " FMTd64, (int64_t) act->lm_rv, "vg_lock_args = %s", vg_args, "lv_lock_args = %s", lv_args, "result_flags = %s", result_flags[0] ? result_flags : "none", NULL); } else if (act->op == LD_OP_DUMP_LOG || act->op == LD_OP_DUMP_INFO) { /* * lvmlockctl creates the unix socket then asks us to write to it. * FIXME: move processing this to a new dedicated query thread to * avoid having a large data dump interfere with normal operation * of the client thread? */ dump_fd = setup_dump_socket(); if (dump_fd < 0) act->result = dump_fd; else if (act->op == LD_OP_DUMP_LOG) act->result = dump_log(&dump_len); else if (act->op == LD_OP_DUMP_INFO) act->result = dump_info(&dump_len); else act->result = -EINVAL; log_debug("send %s[%d] cl %u dump result %d dump_len %d", cl->name[0] ? cl->name : "client", cl->pid, cl->id, act->result, dump_len); res = daemon_reply_simple("OK", "result = " FMTd64, (int64_t) act->result, "dump_len = " FMTd64, (int64_t) dump_len, NULL); } else { /* * A normal reply. */ log_debug("send %s[%d] cl %u %s %s rv %d %s %s", cl->name[0] ? cl->name : "client", cl->pid, cl->id, op_str(act->op), rt_str(act->rt), act->result, (act->result == -ENOLS) ? "ENOLS" : "", result_flags); res = daemon_reply_simple("OK", "op = " FMTd64, (int64_t) act->op, "lock_type = %s", lm_str(act->lm_type), "op_result = " FMTd64, (int64_t) act->result, "lm_result = " FMTd64, (int64_t) act->lm_rv, "result_flags = %s", result_flags[0] ? result_flags : "none", NULL); } if (!buffer_write(cl->fd, &res.buffer)) { rv = -errno; if (rv >= 0) rv = -1; log_debug("send cl %u fd %d error %d", cl->id, cl->fd, rv); } buffer_destroy(&res.buffer); client_resume(cl); if (dump_fd >= 0) { /* To avoid deadlock, send data here after the reply. */ send_dump_buf(dump_fd, dump_len); if (close(dump_fd)) log_error("failed to close dump socket %d", dump_fd); } return rv; } /* called from client_thread */ static void client_purge(struct client *cl) { struct lockspace *ls; struct action *act; /* * If the client made no lock requests, there can be * no locks to release for it. */ if (!cl->lock_ops) return; pthread_mutex_lock(&lockspaces_mutex); list_for_each_entry(ls, &lockspaces, list) { if (!(act = alloc_action())) continue; act->op = LD_OP_CLOSE; act->client_id = cl->id; pthread_mutex_lock(&ls->mutex); if (!ls->thread_stop) { list_add_tail(&act->list, &ls->actions); ls->thread_work = 1; pthread_cond_signal(&ls->cond); } else { free_action(act); } pthread_mutex_unlock(&ls->mutex); } pthread_mutex_unlock(&lockspaces_mutex); } static int add_lock_action(struct action *act) { struct lockspace *ls = NULL; char ls_name[MAX_NAME+1]; memset(ls_name, 0, sizeof(ls_name)); /* * Determine which lockspace this action is for, and set ls_name. */ if (act->rt == LD_RT_GL) { /* Global lock is requested */ if (gl_use_sanlock && (act->op == LD_OP_ENABLE || act->op == LD_OP_DISABLE)) { vg_ls_name(act->vg_name, ls_name); } else { if (!gl_use_dlm && !gl_use_sanlock) { if (lm_is_running_dlm()) gl_use_dlm = 1; else if (lm_is_running_sanlock()) gl_use_sanlock = 1; } gl_ls_name(ls_name); } } else { /* VG lock is requested */ vg_ls_name(act->vg_name, ls_name); } retry: pthread_mutex_lock(&lockspaces_mutex); if (ls_name[0]) ls = find_lockspace_name(ls_name); if (!ls) { pthread_mutex_unlock(&lockspaces_mutex); if (act->op == LD_OP_UPDATE && act->rt == LD_RT_VG) { log_debug("lockspace \"%s\" not found ignored for vg update", ls_name); return -ENOLS; } else if (act->flags & LD_AF_SEARCH_LS) { /* * Fail if we've already tried searching for the lockspace. */ log_debug("lockspace \"%s\" not found after search", ls_name); return -ENOLS; } else if (act->op == LD_OP_LOCK && act->rt == LD_RT_GL && gl_use_sanlock) { /* * The sanlock global lock may have been enabled in an existing VG, * so search existing VGs for an enabled global lock. */ log_debug("lockspace \"%s\" not found for sanlock gl, searching...", ls_name); act->flags |= LD_AF_SEARCH_LS; add_work_action(act); return 0; } else if (act->op == LD_OP_LOCK && act->rt == LD_RT_GL && act->mode != LD_LK_UN && gl_use_dlm) { /* * Automatically start the dlm global lockspace when * a command tries to acquire the global lock. */ log_debug("lockspace \"%s\" not found for dlm gl, adding...", ls_name); act->flags |= LD_AF_SEARCH_LS; act->flags |= LD_AF_WAIT_STARTING; add_dlm_global_lockspace(NULL); goto retry; } else if (act->op == LD_OP_LOCK && act->mode == LD_LK_UN) { log_debug("lockspace \"%s\" not found for unlock ignored", ls_name); return -ENOLS; } else { log_debug("lockspace \"%s\" not found", ls_name); return -ENOLS; } } if (act->lm_type == LD_LM_NONE) { /* return to the command the type we are using */ act->lm_type = ls->lm_type; } else if (act->lm_type != ls->lm_type) { /* should not happen */ log_error("S %s add_lock_action bad lm_type %d ls %d", ls_name, act->lm_type, ls->lm_type); pthread_mutex_unlock(&lockspaces_mutex); return -EINVAL; } pthread_mutex_lock(&ls->mutex); if (ls->thread_stop) { pthread_mutex_unlock(&ls->mutex); pthread_mutex_unlock(&lockspaces_mutex); log_error("lockspace is stopping %s", ls_name); return -ESTALE; } if (!ls->create_fail && !ls->create_done && !(act->flags & LD_AF_WAIT_STARTING)) { pthread_mutex_unlock(&ls->mutex); pthread_mutex_unlock(&lockspaces_mutex); log_debug("lockspace is starting %s", ls_name); return -ESTARTING; } list_add_tail(&act->list, &ls->actions); ls->thread_work = 1; pthread_cond_signal(&ls->cond); pthread_mutex_unlock(&ls->mutex); pthread_mutex_unlock(&lockspaces_mutex); /* lockspace_thread_main / res_process take it from here */ return 0; } static int str_to_op_rt(const char *req_name, int *op, int *rt) { if (!req_name) goto out; if (!strcmp(req_name, "hello")) { *op = LD_OP_HELLO; *rt = 0; return 0; } if (!strcmp(req_name, "quit")) { *op = LD_OP_QUIT; *rt = 0; return 0; } if (!strcmp(req_name, "info")) { *op = LD_OP_DUMP_INFO; *rt = 0; return 0; } if (!strcmp(req_name, "dump")) { *op = LD_OP_DUMP_LOG; *rt = 0; return 0; } if (!strcmp(req_name, "init_vg")) { *op = LD_OP_INIT; *rt = LD_RT_VG; return 0; } if (!strcmp(req_name, "init_lv")) { *op = LD_OP_INIT; *rt = LD_RT_LV; return 0; } if (!strcmp(req_name, "free_vg")) { *op = LD_OP_FREE; *rt = LD_RT_VG; return 0; } if (!strcmp(req_name, "busy_vg")) { *op = LD_OP_BUSY; *rt = LD_RT_VG; return 0; } if (!strcmp(req_name, "free_lv")) { *op = LD_OP_FREE; *rt = LD_RT_LV; return 0; } if (!strcmp(req_name, "start_vg")) { *op = LD_OP_START; *rt = LD_RT_VG; return 0; } if (!strcmp(req_name, "stop_vg")) { *op = LD_OP_STOP; *rt = LD_RT_VG; return 0; } if (!strcmp(req_name, "start_wait")) { *op = LD_OP_START_WAIT; *rt = 0; return 0; } if (!strcmp(req_name, "stop_all")) { *op = LD_OP_STOP_ALL; *rt = 0; return 0; } if (!strcmp(req_name, "lock_gl")) { *op = LD_OP_LOCK; *rt = LD_RT_GL; return 0; } if (!strcmp(req_name, "lock_vg")) { *op = LD_OP_LOCK; *rt = LD_RT_VG; return 0; } if (!strcmp(req_name, "lock_lv")) { *op = LD_OP_LOCK; *rt = LD_RT_LV; return 0; } if (!strcmp(req_name, "vg_update")) { *op = LD_OP_UPDATE; *rt = LD_RT_VG; return 0; } if (!strcmp(req_name, "enable_gl")) { *op = LD_OP_ENABLE; *rt = LD_RT_GL; return 0; } if (!strcmp(req_name, "disable_gl")) { *op = LD_OP_DISABLE; *rt = LD_RT_GL; return 0; } if (!strcmp(req_name, "rename_vg_before")) { *op = LD_OP_RENAME_BEFORE; *rt = LD_RT_VG; return 0; } if (!strcmp(req_name, "rename_vg_final")) { *op = LD_OP_RENAME_FINAL; *rt = LD_RT_VG; return 0; } if (!strcmp(req_name, "running_lm")) { *op = LD_OP_RUNNING_LM; *rt = 0; return 0; } if (!strcmp(req_name, "find_free_lock")) { *op = LD_OP_FIND_FREE_LOCK; *rt = LD_RT_VG; return 0; } if (!strcmp(req_name, "kill_vg")) { *op = LD_OP_KILL_VG; *rt = LD_RT_VG; return 0; } if (!strcmp(req_name, "drop_vg")) { *op = LD_OP_DROP_VG; *rt = LD_RT_VG; return 0; } out: return -1; } static int str_to_mode(const char *str) { if (!str) goto out; if (!strcmp(str, "un")) return LD_LK_UN; if (!strcmp(str, "nl")) return LD_LK_NL; if (!strcmp(str, "sh")) return LD_LK_SH; if (!strcmp(str, "ex")) return LD_LK_EX; out: return LD_LK_IV; } static int str_to_lm(const char *str) { if (!str || !strcmp(str, "none")) return LD_LM_NONE; if (!strcmp(str, "sanlock")) return LD_LM_SANLOCK; if (!strcmp(str, "dlm")) return LD_LM_DLM; return -2; } static uint32_t str_to_opts(const char *str) { uint32_t flags = 0; if (!str) goto out; if (strstr(str, "persistent")) flags |= LD_AF_PERSISTENT; if (strstr(str, "unlock_cancel")) flags |= LD_AF_UNLOCK_CANCEL; if (strstr(str, "next_version")) flags |= LD_AF_NEXT_VERSION; if (strstr(str, "wait")) flags |= LD_AF_WAIT; if (strstr(str, "force")) flags |= LD_AF_FORCE; if (strstr(str, "ex_disable")) flags |= LD_AF_EX_DISABLE; if (strstr(str, "enable")) flags |= LD_AF_ENABLE; if (strstr(str, "disable")) flags |= LD_AF_DISABLE; out: return flags; } /* * dump info * client_list: each client struct * lockspaces: each lockspace struct * lockspace actions: each action struct * lockspace resources: each resource struct * lockspace resource actions: each action struct * lockspace resource locks: each lock struct */ static int setup_dump_socket(void) { int s; s = socket(AF_LOCAL, SOCK_DGRAM, 0); if (s < 0) return s; memset(&dump_addr, 0, sizeof(dump_addr)); dump_addr.sun_family = AF_LOCAL; strcpy(&dump_addr.sun_path[1], DUMP_SOCKET_NAME); dump_addrlen = sizeof(sa_family_t) + strlen(dump_addr.sun_path+1) + 1; return s; } #define MAX_SEND_LEN 65536 #define RESEND_DELAY_US 1000 #define RESEND_DELAY_US_MAX 500000 static void send_dump_buf(int fd, int dump_len) { int pos = 0; int ret; int send_len; int delay = 0; if (!dump_len) return; repeat: if (dump_len - pos < MAX_SEND_LEN) send_len = dump_len - pos; else send_len = MAX_SEND_LEN; ret = sendto(fd, dump_buf + pos, send_len, MSG_NOSIGNAL | MSG_DONTWAIT, (struct sockaddr *)&dump_addr, dump_addrlen); if (ret < 0) { if ((errno == EAGAIN || errno == EINTR) && (delay < RESEND_DELAY_US_MAX)) { usleep(RESEND_DELAY_US); delay += RESEND_DELAY_US; goto repeat; } log_error("send_dump_buf delay %d errno %d", delay, errno); return; } pos += ret; if (pos < dump_len) goto repeat; log_debug("send_dump_buf delay %d total %d", delay, pos); } static int print_structs(const char *prefix, int pos, int len) { return snprintf(dump_buf + pos, len - pos, "info=%s " "unused_action_count=%d " "unused_client_count=%d " "unused_resource_count=%d " "unused_lock_count=%d\n", prefix, unused_action_count, unused_client_count, unused_resource_count, unused_lock_count); } static int print_client(struct client *cl, const char *prefix, int pos, int len) { return snprintf(dump_buf + pos, len - pos, "info=%s " "pid=%d " "fd=%d " "pi=%d " "id=%u " "name=%s\n", prefix, cl->pid, cl->fd, cl->pi, cl->id, cl->name[0] ? cl->name : "."); } static int print_lockspace(struct lockspace *ls, const char *prefix, int pos, int len) { return snprintf(dump_buf + pos, len - pos, "info=%s " "ls_name=%s " "vg_name=%s " "vg_uuid=%s " "vg_sysid=%s " "vg_args=%s " "lm_type=%s " "host_id=%llu " "create_fail=%d " "create_done=%d " "thread_work=%d " "thread_stop=%d " "thread_done=%d " "kill_vg=%d " "drop_vg=%d " "sanlock_gl_enabled=%d\n", prefix, ls->name, ls->vg_name, ls->vg_uuid, ls->vg_sysid[0] ? ls->vg_sysid : ".", ls->vg_args, lm_str(ls->lm_type), (unsigned long long)ls->host_id, ls->create_fail ? 1 : 0, ls->create_done ? 1 : 0, ls->thread_work ? 1 : 0, ls->thread_stop ? 1 : 0, ls->thread_done ? 1 : 0, ls->kill_vg, ls->drop_vg, ls->sanlock_gl_enabled ? 1 : 0); } static int print_action(struct action *act, const char *prefix, int pos, int len) { return snprintf(dump_buf + pos, len - pos, "info=%s " "client_id=%u " "flags=0x%x " "version=%u " "op=%s " "rt=%s " "mode=%s " "lm_type=%s " "result=%d " "lm_rv=%d\n", prefix, act->client_id, act->flags, act->version, op_str(act->op), rt_str(act->rt), mode_str(act->mode), lm_str(act->lm_type), act->result, act->lm_rv); } static int print_resource(struct resource *r, const char *prefix, int pos, int len) { return snprintf(dump_buf + pos, len - pos, "info=%s " "name=%s " "type=%s " "mode=%s " "sh_count=%d " "version=%u\n", prefix, r->name, rt_str(r->type), mode_str(r->mode), r->sh_count, r->version); } static int print_lock(struct lock *lk, const char *prefix, int pos, int len) { return snprintf(dump_buf + pos, len - pos, "info=%s " "mode=%s " "version=%u " "flags=0x%x " "client_id=%u\n", prefix, mode_str(lk->mode), lk->version, lk->flags, lk->client_id); } static int dump_info(int *dump_len) { struct client *cl; struct lockspace *ls; struct resource *r; struct lock *lk; struct action *act; int len, pos, ret; int rv = 0; memset(dump_buf, 0, sizeof(dump_buf)); len = sizeof(dump_buf); pos = 0; /* * memory */ pthread_mutex_lock(&unused_struct_mutex); ret = print_structs("structs", pos, len); if (ret >= len - pos) { pthread_mutex_unlock(&unused_struct_mutex); return -ENOSPC; } pos += ret; pthread_mutex_unlock(&unused_struct_mutex); /* * clients */ pthread_mutex_lock(&client_mutex); list_for_each_entry(cl, &client_list, list) { ret = print_client(cl, "client", pos, len); if (ret >= len - pos) { rv = -ENOSPC; break; } pos += ret; } pthread_mutex_unlock(&client_mutex); if (rv < 0) return rv; /* * lockspaces with their action/resource/lock info */ pthread_mutex_lock(&lockspaces_mutex); list_for_each_entry(ls, &lockspaces, list) { ret = print_lockspace(ls, "ls", pos, len); if (ret >= len - pos) { rv = -ENOSPC; goto out; } pos += ret; list_for_each_entry(act, &ls->actions, list) { ret = print_action(act, "ls_action", pos, len); if (ret >= len - pos) { rv = -ENOSPC; goto out; } pos += ret; } list_for_each_entry(r, &ls->resources, list) { ret = print_resource(r, "r", pos, len); if (ret >= len - pos) { rv = -ENOSPC; goto out; } pos += ret; list_for_each_entry(lk, &r->locks, list) { ret = print_lock(lk, "lk", pos, len); if (ret >= len - pos) { rv = -ENOSPC; goto out; } pos += ret; } list_for_each_entry(act, &r->actions, list) { ret = print_action(act, "r_action", pos, len); if (ret >= len - pos) { rv = -ENOSPC; goto out; } pos += ret; } } } out: pthread_mutex_unlock(&lockspaces_mutex); *dump_len = pos; return rv; } /* called from client_thread, cl->mutex is held */ static void client_recv_action(struct client *cl) { request req; response res; struct action *act; const char *cl_name; const char *vg_name; const char *vg_uuid; const char *vg_sysid; const char *str; int64_t val; uint32_t opts = 0; int result = 0; int cl_pid; int op, rt, lm, mode; int rv; buffer_init(&req.buffer); rv = buffer_read(cl->fd, &req.buffer); if (!rv) { if (errno == ECONNRESET) { log_debug("client recv %u ECONNRESET", cl->id); cl->dead = 1; } else { log_error("client recv %u buffer_read error %d", cl->id, errno); } buffer_destroy(&req.buffer); client_resume(cl); return; } req.cft = config_tree_from_string_without_dup_node_check(req.buffer.mem); if (!req.cft) { log_error("client recv %u config_from_string error", cl->id); buffer_destroy(&req.buffer); client_resume(cl); return; } str = daemon_request_str(req, "request", NULL); rv = str_to_op_rt(str, &op, &rt); if (rv < 0) { log_error("client recv %u bad request name \"%s\"", cl->id, str ? str : ""); dm_config_destroy(req.cft); buffer_destroy(&req.buffer); client_resume(cl); return; } if (op == LD_OP_HELLO || op == LD_OP_QUIT) { /* * FIXME: add the client command name to the hello messages * so it can be saved in cl->name here. */ result = 0; if (op == LD_OP_QUIT) { log_debug("op quit"); pthread_mutex_lock(&lockspaces_mutex); if (list_empty(&lockspaces)) daemon_quit = 1; else result = -EBUSY; pthread_mutex_unlock(&lockspaces_mutex); } buffer_init(&res.buffer); res = daemon_reply_simple("OK", "result = " FMTd64, (int64_t) result, "protocol = %s", lvmlockd_protocol, "version = " FMTd64, (int64_t) lvmlockd_protocol_version, NULL); buffer_write(cl->fd, &res.buffer); buffer_destroy(&res.buffer); dm_config_destroy(req.cft); buffer_destroy(&req.buffer); client_resume(cl); return; } cl_name = daemon_request_str(req, "cmd", NULL); cl_pid = daemon_request_int(req, "pid", 0); vg_name = daemon_request_str(req, "vg_name", NULL); vg_uuid = daemon_request_str(req, "vg_uuid", NULL); vg_sysid = daemon_request_str(req, "vg_sysid", NULL); str = daemon_request_str(req, "mode", NULL); mode = str_to_mode(str); str = daemon_request_str(req, "opts", NULL); opts = str_to_opts(str); str = daemon_request_str(req, "vg_lock_type", NULL); lm = str_to_lm(str); if (cl_pid && cl_pid != cl->pid) log_error("client recv bad message pid %d client %d", cl_pid, cl->pid); /* FIXME: do this in hello message instead */ if (!cl->name[0] && cl_name) strncpy(cl->name, cl_name, MAX_NAME); if (!gl_use_dlm && !gl_use_sanlock && (lm > 0)) { if (lm == LD_LM_DLM && lm_support_dlm()) gl_use_dlm = 1; else if (lm == LD_LM_SANLOCK && lm_support_sanlock()) gl_use_sanlock = 1; log_debug("set gl_use_%s", lm_str(lm)); } if (!(act = alloc_action())) { log_error("No memory for action"); dm_config_destroy(req.cft); buffer_destroy(&req.buffer); client_resume(cl); return; } act->client_id = cl->id; act->op = op; act->rt = rt; act->mode = mode; act->flags = opts; act->lm_type = lm; if (vg_name && strcmp(vg_name, "none")) strncpy(act->vg_name, vg_name, MAX_NAME); if (vg_uuid && strcmp(vg_uuid, "none")) strncpy(act->vg_uuid, vg_uuid, 64); if (vg_sysid && strcmp(vg_sysid, "none")) strncpy(act->vg_sysid, vg_sysid, MAX_NAME); str = daemon_request_str(req, "lv_name", NULL); if (str && strcmp(str, "none")) strncpy(act->lv_name, str, MAX_NAME); str = daemon_request_str(req, "lv_uuid", NULL); if (str && strcmp(str, "none")) strncpy(act->lv_uuid, str, MAX_NAME); val = daemon_request_int(req, "version", 0); if (val) act->version = (uint32_t)val; str = daemon_request_str(req, "vg_lock_args", NULL); if (str && strcmp(str, "none")) strncpy(act->vg_args, str, MAX_ARGS); str = daemon_request_str(req, "lv_lock_args", NULL); if (str && strcmp(str, "none")) strncpy(act->lv_args, str, MAX_ARGS); /* start_vg will include lvmlocal.conf local/host_id here */ val = daemon_request_int(req, "host_id", 0); if (val) act->host_id = val; act->max_retries = daemon_request_int(req, "max_retries", DEFAULT_MAX_RETRIES); dm_config_destroy(req.cft); buffer_destroy(&req.buffer); log_debug("recv %s[%d] cl %u %s %s \"%s\" mode %s flags %x", cl->name[0] ? cl->name : "client", cl->pid, cl->id, op_str(act->op), rt_str(act->rt), act->vg_name, mode_str(act->mode), opts); if (lm == LD_LM_DLM && !lm_support_dlm()) { log_debug("dlm not supported"); rv = -EPROTONOSUPPORT; goto out; } if (lm == LD_LM_SANLOCK && !lm_support_sanlock()) { log_debug("sanlock not supported"); rv = -EPROTONOSUPPORT; goto out; } if (act->op == LD_OP_LOCK && act->mode != LD_LK_UN) cl->lock_ops = 1; switch (act->op) { case LD_OP_START: rv = add_lockspace(act); break; case LD_OP_STOP: rv = rem_lockspace(act); break; case LD_OP_DUMP_LOG: case LD_OP_DUMP_INFO: /* The client thread reply will copy and send the dump. */ add_client_result(act); rv = 0; break; case LD_OP_INIT: case LD_OP_START_WAIT: case LD_OP_STOP_ALL: case LD_OP_RENAME_FINAL: case LD_OP_RUNNING_LM: add_work_action(act); rv = 0; break; case LD_OP_LOCK: case LD_OP_UPDATE: case LD_OP_ENABLE: case LD_OP_DISABLE: case LD_OP_FREE: case LD_OP_RENAME_BEFORE: case LD_OP_FIND_FREE_LOCK: case LD_OP_KILL_VG: case LD_OP_DROP_VG: case LD_OP_BUSY: rv = add_lock_action(act); break; default: rv = -EINVAL; }; out: if (rv < 0) { act->result = rv; add_client_result(act); } } static void *client_thread_main(void *arg_in) { struct client *cl; struct action *act; struct action *act_un; int rv; while (1) { pthread_mutex_lock(&client_mutex); while (!client_work && list_empty(&client_results)) { if (client_stop) { pthread_mutex_unlock(&client_mutex); goto out; } pthread_cond_wait(&client_cond, &client_mutex); } /* * Send outgoing results back to clients */ if (!list_empty(&client_results)) { act = list_first_entry(&client_results, struct action, list); list_del(&act->list); cl = find_client_id(act->client_id); pthread_mutex_unlock(&client_mutex); if (cl) { pthread_mutex_lock(&cl->mutex); rv = client_send_result(cl, act); pthread_mutex_unlock(&cl->mutex); } else { log_debug("no client %u for result", act->client_id); rv = -1; } /* * The client failed after we acquired an LV lock for * it, but before getting this reply saying it's done. * So the lv will not be active and we should release * the lv lock it requested. */ if ((rv < 0) && (act->flags & LD_AF_LV_LOCK)) { log_debug("auto unlock lv for failed client %u", act->client_id); if ((act_un = alloc_action())) { memcpy(act_un, act, sizeof(struct action)); act_un->mode = LD_LK_UN; act_un->flags |= LD_AF_LV_UNLOCK; act_un->flags &= ~LD_AF_LV_LOCK; add_lock_action(act_un); } } free_action(act); continue; } /* * Queue incoming actions for lockspace threads */ if (client_work) { cl = find_client_work(); if (!cl) client_work = 0; pthread_mutex_unlock(&client_mutex); if (!cl) continue; pthread_mutex_lock(&cl->mutex); if (cl->recv) { cl->recv = 0; client_recv_action(cl); } if (cl->dead) { /* log_debug("client rem %d pi %d fd %d ig %d", cl->id, cl->pi, cl->fd, cl->poll_ignore); */ /* * If cl->dead was set in main_loop, then the * fd has already been closed and the pollfd * entry is already unused. * main_loop set dead=1, ignore=0, pi=-1, fd=-1 * * if cl->dead was not set in main_loop, but * set in client_recv_action, then the main_loop * should be ignoring this client fd. * main_loop set ignore=1 */ if (cl->poll_ignore) { log_debug("client close %d pi %d fd %d", cl->id, cl->pi, cl->fd); /* assert cl->pi != -1 */ /* assert pollfd[pi].fd == FD_IGNORE */ if (close(cl->fd)) log_error("client close %d pi %d fd %d failed", cl->id, cl->pi, cl->fd); rem_pollfd(cl->pi); cl->pi = -1; cl->fd = -1; cl->poll_ignore = 0; } else { /* main thread should have closed */ if (cl->pi != -1 || cl->fd != -1) { log_error("client %d bad state pi %d fd %d", cl->id, cl->pi, cl->fd); } } pthread_mutex_unlock(&cl->mutex); pthread_mutex_lock(&client_mutex); list_del(&cl->list); pthread_mutex_unlock(&client_mutex); client_purge(cl); free_client(cl); } else { pthread_mutex_unlock(&cl->mutex); } } else pthread_mutex_unlock(&client_mutex); } out: return NULL; } static int setup_client_thread(void) { int rv; INIT_LIST_HEAD(&client_list); INIT_LIST_HEAD(&client_results); pthread_mutex_init(&client_mutex, NULL); pthread_cond_init(&client_cond, NULL); rv = pthread_create(&client_thread, NULL, client_thread_main, NULL); if (rv) return -1; return 0; } static void close_client_thread(void) { int perrno; pthread_mutex_lock(&client_mutex); client_stop = 1; pthread_cond_signal(&client_cond); pthread_mutex_unlock(&client_mutex); if ((perrno = pthread_join(client_thread, NULL))) log_error("pthread_join client_thread error %d", perrno); } /* * Get a list of all VGs with a lockd type (sanlock|dlm) from lvmetad. * We'll match this list against a list of existing lockspaces that are * found in the lock manager. * * For each of these VGs, also create a struct resource on ls->resources to * represent each LV in the VG that uses a lock. For each of these LVs * that are active, we'll attempt to adopt a lock. */ static int get_lockd_vgs(struct list_head *vg_lockd) { struct list_head update_vgs; daemon_reply reply; struct dm_config_node *cn; struct dm_config_node *metadata; struct dm_config_node *md_cn; struct dm_config_node *lv_cn; struct lockspace *ls, *safe; struct resource *r; const char *vg_name; const char *vg_uuid; const char *lv_uuid; const char *lock_type; const char *lock_args; char find_str_path[PATH_MAX]; int rv = 0; INIT_LIST_HEAD(&update_vgs); reply = send_lvmetad("vg_list", "token = %s", "skip", NULL); if (reply.error || strcmp(daemon_reply_str(reply, "response", ""), "OK")) { log_error("vg_list from lvmetad failed %d", reply.error); rv = -EINVAL; goto destroy; } if (!(cn = dm_config_find_node(reply.cft->root, "volume_groups"))) { log_error("get_lockd_vgs no vgs"); rv = -EINVAL; goto destroy; } /* create an update_vgs list of all vg uuids */ for (cn = cn->child; cn; cn = cn->sib) { vg_uuid = cn->key; if (!(ls = alloc_lockspace())) { rv = -ENOMEM; break; } strncpy(ls->vg_uuid, vg_uuid, 64); list_add_tail(&ls->list, &update_vgs); log_debug("get_lockd_vgs %s", vg_uuid); } destroy: daemon_reply_destroy(reply); if (rv < 0) goto out; /* get vg_name and lock_type for each vg uuid entry in update_vgs */ list_for_each_entry(ls, &update_vgs, list) { reply = send_lvmetad("vg_lookup", "token = %s", "skip", "uuid = %s", ls->vg_uuid, NULL); if (reply.error || strcmp(daemon_reply_str(reply, "response", ""), "OK")) { log_error("vg_lookup from lvmetad failed %d", reply.error); rv = -EINVAL; goto next; } vg_name = daemon_reply_str(reply, "name", NULL); if (!vg_name) { log_error("get_lockd_vgs %s no name", ls->vg_uuid); rv = -EINVAL; goto next; } strncpy(ls->vg_name, vg_name, MAX_NAME); metadata = dm_config_find_node(reply.cft->root, "metadata"); if (!metadata) { log_error("get_lockd_vgs %s name %s no metadata", ls->vg_uuid, ls->vg_name); rv = -EINVAL; goto next; } lock_type = dm_config_find_str(metadata, "metadata/lock_type", NULL); ls->lm_type = str_to_lm(lock_type); if ((ls->lm_type != LD_LM_SANLOCK) && (ls->lm_type != LD_LM_DLM)) { log_debug("get_lockd_vgs %s not lockd type", ls->vg_name); continue; } lock_args = dm_config_find_str(metadata, "metadata/lock_args", NULL); if (lock_args) strncpy(ls->vg_args, lock_args, MAX_ARGS); log_debug("get_lockd_vgs %s lock_type %s lock_args %s", ls->vg_name, lock_type, lock_args ?: "none"); /* * Make a record (struct resource) of each lv that uses a lock. * For any lv that uses a lock, we'll check if the lv is active * and if so try to adopt a lock for it. */ for (md_cn = metadata->child; md_cn; md_cn = md_cn->sib) { if (strcmp(md_cn->key, "logical_volumes")) continue; for (lv_cn = md_cn->child; lv_cn; lv_cn = lv_cn->sib) { snprintf(find_str_path, PATH_MAX, "%s/lock_args", lv_cn->key); lock_args = dm_config_find_str(lv_cn, find_str_path, NULL); if (!lock_args) continue; snprintf(find_str_path, PATH_MAX, "%s/id", lv_cn->key); lv_uuid = dm_config_find_str(lv_cn, find_str_path, NULL); if (!lv_uuid) { log_error("get_lock_vgs no lv id for name %s", lv_cn->key); continue; } if (!(r = alloc_resource())) { rv = -ENOMEM; goto next; } r->use_vb = 0; r->type = LD_RT_LV; strncpy(r->name, lv_uuid, MAX_NAME); if (lock_args) strncpy(r->lv_args, lock_args, MAX_ARGS); list_add_tail(&r->list, &ls->resources); log_debug("get_lockd_vgs %s lv %s %s (name %s)", ls->vg_name, r->name, lock_args ? lock_args : "", lv_cn->key); } } next: daemon_reply_destroy(reply); if (rv < 0) break; } out: /* Return lockd VG's on the vg_lockd list. */ list_for_each_entry_safe(ls, safe, &update_vgs, list) { list_del(&ls->list); if ((ls->lm_type == LD_LM_SANLOCK) || (ls->lm_type == LD_LM_DLM)) list_add_tail(&ls->list, vg_lockd); else free(ls); } return rv; } static char _dm_uuid[DM_UUID_LEN]; static char *get_dm_uuid(char *dm_name) { struct dm_info info; struct dm_task *dmt; const char *uuid; if (!(dmt = dm_task_create(DM_DEVICE_INFO))) goto fail_out; if (!dm_task_set_name(dmt, dm_name)) goto fail; if (!dm_task_run(dmt)) goto fail; if (!dm_task_get_info(dmt, &info)) goto fail; if (!info.exists) goto fail; uuid = dm_task_get_uuid(dmt); if (!uuid) { log_error("Failed to get uuid for device %s", dm_name); goto fail; } if (strncmp(uuid, "LVM", 3)) { log_debug("dm device %s is not from LVM", dm_name); goto fail; } memset(_dm_uuid, 0, sizeof(_dm_uuid)); strncpy(_dm_uuid, uuid, sizeof(_dm_uuid)-1); dm_task_destroy(dmt); return _dm_uuid; fail: dm_task_destroy(dmt); fail_out: return NULL; } /* * dm reports the LV uuid as: * LVM-ydpRIdDWBDX25upmj2k0D4deat6oxH8er03T0f4xM8rPIV8XqIhwv3h8Y7xRWjMr * * the lock name for the LV is: * r03T0f-4xM8-rPIV-8XqI-hwv3-h8Y7-xRWjMr * * This function formats both as: * r03T0f4xM8rPIV8XqIhwv3h8Y7xRWjMr * * and returns 1 if they match. */ static int match_dm_uuid(char *dm_uuid, char *lv_lock_uuid) { char buf1[64]; char buf2[64]; int i, j; memset(buf1, 0, sizeof(buf1)); memset(buf2, 0, sizeof(buf2)); for (i = 0, j = 0; i < strlen(lv_lock_uuid); i++) { if (lv_lock_uuid[i] == '-') continue; buf1[j] = lv_lock_uuid[i]; j++; } for (i = 36, j = 0; i < 69; i++) { buf2[j] = dm_uuid[i]; j++; } if (!strcmp(buf1, buf2)) return 1; return 0; } /* * All LVs with a lock_type are on ls->resources. * Remove any that are not active. The remaining * will have locks adopted. */ static int remove_inactive_lvs(struct list_head *vg_lockd) { struct lockspace *ls; struct resource *r, *rsafe; struct dm_names *names; struct dm_task *dmt; char *dm_uuid; char *vgname, *lvname, *layer; char namebuf[MAX_NAME+1]; unsigned next = 0; int rv = 0; if (!(dmt = dm_task_create(DM_DEVICE_LIST))) return -1; if (!dm_task_run(dmt)) { log_error("Failed to get dm devices"); rv = -1; goto ret; } if (!(names = dm_task_get_names(dmt))) { log_error("Failed to get dm names"); rv = -1; goto ret; } if (!names->dev) { log_debug("dm names none found"); goto out; } /* * For each dm name, compare it to each lv in each lockd vg. */ do { names = (struct dm_names *)((char *) names + next); dm_uuid = get_dm_uuid(names->name); if (!dm_uuid) goto next_dmname; vgname = NULL; lvname = NULL; layer = NULL; memset(namebuf, 0, sizeof(namebuf)); strncpy(namebuf, names->name, MAX_NAME); vgname = namebuf; if (!dm_split_lvm_name(NULL, namebuf, &vgname, &lvname, &layer)) { log_error("failed to split dm name %s", namebuf); goto next_dmname; } log_debug("adopt remove_inactive dm name %s dm uuid %s vgname %s lvname %s", names->name, dm_uuid, vgname, lvname); if (!vgname || !lvname) { log_debug("dm name %s invalid split vg %s lv %s layer %s", names->name, vgname ? vgname : "", lvname ? lvname : "", layer ? layer : ""); goto next_dmname; } list_for_each_entry(ls, vg_lockd, list) { if (strcmp(vgname, ls->vg_name)) continue; if (!strcmp(lvname, "lvmlock")) continue; list_for_each_entry(r, &ls->resources, list) { if (!match_dm_uuid(dm_uuid, r->name)) continue; /* Found an active LV in a lockd VG. */ log_debug("dm device %s adopt in vg %s lv %s", names->name, ls->vg_name, r->name); r->adopt = 1; goto next_dmname; } } next_dmname: next = names->next; } while (next); out: /* Remove any struct resources that do not need locks adopted. */ list_for_each_entry(ls, vg_lockd, list) { list_for_each_entry_safe(r, rsafe, &ls->resources, list) { if (r->adopt) { r->adopt = 0; } else { log_debug("lockd vg %s remove inactive lv %s", ls->vg_name, r->name); list_del(&r->list); free_resource(r); } } } ret: dm_task_destroy(dmt); return rv; } static void adopt_locks(void) { struct list_head ls_found; struct list_head vg_lockd; struct list_head to_unlock; struct lockspace *ls, *lsafe; struct lockspace *ls1, *l1safe; struct lockspace *ls2, *l2safe; struct resource *r, *rsafe; struct action *act, *asafe; int count_start = 0, count_start_done = 0, count_start_fail = 0; int count_adopt = 0, count_adopt_done = 0, count_adopt_fail = 0; int found, rv; INIT_LIST_HEAD(&adopt_results); INIT_LIST_HEAD(&ls_found); INIT_LIST_HEAD(&vg_lockd); INIT_LIST_HEAD(&to_unlock); /* * Get list of lockspaces from lock managers. * Get list of VGs from lvmetad with a lockd type. * Get list of active lockd type LVs from /dev. */ if (lm_support_dlm() && lm_is_running_dlm()) { rv = lm_get_lockspaces_dlm(&ls_found); if (rv < 0) goto fail; } if (lm_support_sanlock() && lm_is_running_sanlock()) { rv = lm_get_lockspaces_sanlock(&ls_found); if (rv < 0) goto fail; } if (list_empty(&ls_found)) { log_debug("No lockspaces found to adopt"); return; } /* * Adds a struct lockspace to vg_lockd for each lockd VG. * Adds a struct resource to ls->resources for each LV. */ rv = get_lockd_vgs(&vg_lockd); if (rv < 0) { log_error("adopt_locks get_lockd_vgs failed"); goto fail; } /* * For each resource on each lockspace, check if the * corresponding LV is active. If so, leave the * resource struct, if not free the resource struct. * The remain entries need to have locks adopted. */ rv = remove_inactive_lvs(&vg_lockd); if (rv < 0) { log_error("adopt_locks remove_inactive_lvs failed"); goto fail; } list_for_each_entry(ls, &ls_found, list) { if (ls->lm_type == LD_LM_DLM) gl_use_dlm = 1; log_debug("adopt %s lockspace %s vg %s", lm_str(ls->lm_type), ls->name, ls->vg_name); } if (!gl_use_dlm) gl_use_sanlock = 1; list_for_each_entry(ls, &vg_lockd, list) { log_debug("adopt lvmetad vg %s lock_type %s lock_args %s", ls->vg_name, lm_str(ls->lm_type), ls->vg_args); list_for_each_entry(r, &ls->resources, list) log_debug("adopt lv %s %s", ls->vg_name, r->name); } /* * Compare and merge the list of lockspaces in ls_found * and the list of lockd VGs in vg_lockd. * * An ls from ls_found may not have had any active lvs when * previous lvmlockd died, but the ls should still be joined, * and checked for GL/VG locks. * * An ls from vg_lockd with active lvs should be in ls_found. * If it's not then we might want to join the ls and acquire locks * for the active lvs (as opposed to adopting orphans for them.) * The orphan lock in the ls should have prevented the ls in * the lock manager from going away. * * If an ls in vg_lockd has no active lvs and does not have * a matching entry in ls_found, then skip it. * * An ls in ls_found should always have a matching ls in * vg_lockd. If it doesn't, then maybe the vg has been * removed even though the lockspace for the vg is still * in the lock manager. Just leave the ls in the lm * alone, and skip the ls_found entry. */ list_for_each_entry_safe(ls1, l1safe, &ls_found, list) { /* The dlm global lockspace is special and doesn't match a VG. */ if ((ls1->lm_type == LD_LM_DLM) && !strcmp(ls1->name, gl_lsname_dlm)) { list_del(&ls1->list); free(ls1); continue; } found = 0; list_for_each_entry_safe(ls2, l2safe, &vg_lockd, list) { if (strcmp(ls1->vg_name, ls2->vg_name)) continue; /* * LS in both ls_found and vg_lockd. */ log_debug("ls %s matches vg %s", ls1->name, ls2->vg_name); memcpy(ls1->vg_uuid, ls2->vg_uuid, 64); memcpy(ls1->vg_args, ls2->vg_args, MAX_ARGS); list_for_each_entry_safe(r, rsafe, &ls2->resources, list) { list_del(&r->list); list_add(&r->list, &ls1->resources); } list_del(&ls2->list); free(ls2); found = 1; break; } /* * LS in ls_found, not in vg_lockd. * An lvm lockspace found in the lock manager has no * corresponding VG in lvmetad. This shouldn't usually * happen, but it's possible the VG could have been removed * while the orphaned lockspace from it was still around. * Report an error and leave the ls in the lm alone. */ if (!found) { log_error("No VG %s found for lockspace %s %s", ls1->vg_name, ls1->name, lm_str(ls1->lm_type)); list_del(&ls1->list); free(ls1); } } /* * LS in vg_lockd, not in ls_found. * lockd vgs from lvmetad that do not have an existing lockspace. * This wouldn't be unusual; we just skip the vg. * But, if the vg has active lvs, then it should have had locks * and a lockspace. Should we attempt to join the lockspace and * acquire (not adopt) locks for these LVs? */ list_for_each_entry_safe(ls, lsafe, &vg_lockd, list) { if (!list_empty(&ls->resources)) { /* We should have found a lockspace. */ /* add this ls and acquire locks for ls->resources? */ log_error("No lockspace %s %s found for VG %s with active LVs", ls->name, lm_str(ls->lm_type), ls->vg_name); } else { /* The VG wasn't started in the previous lvmlockd. */ log_debug("No ls found for vg %s", ls->vg_name); } list_del(&ls->list); free(ls); } /* * Create and queue start actions to add lockspaces. */ if (gl_use_dlm) { if (!(act = alloc_action())) goto fail; log_debug("adopt add dlm global lockspace"); act->op = LD_OP_START; act->flags = (LD_AF_ADOPT | LD_AF_WAIT); act->rt = LD_RT_GL; act->lm_type = LD_LM_DLM; act->client_id = INTERNAL_CLIENT_ID; add_dlm_global_lockspace(act); count_start++; } list_for_each_entry_safe(ls, lsafe, &ls_found, list) { if (!(act = alloc_action())) goto fail; act->op = LD_OP_START; act->flags = (LD_AF_ADOPT | LD_AF_WAIT); act->rt = LD_RT_VG; act->lm_type = ls->lm_type; act->client_id = INTERNAL_CLIENT_ID; strncpy(act->vg_name, ls->vg_name, MAX_NAME); memcpy(act->vg_uuid, ls->vg_uuid, 64); memcpy(act->vg_args, ls->vg_args, MAX_ARGS); act->host_id = ls->host_id; /* set act->version from lvmetad data? */ log_debug("adopt add %s vg lockspace %s", lm_str(act->lm_type), act->vg_name); rv = add_lockspace_thread(ls->name, act->vg_name, act->vg_uuid, act->lm_type, act->vg_args, act); if (rv < 0) { log_error("Failed to create lockspace thread for VG %s", ls->vg_name); list_del(&ls->list); free(ls); free_action(act); count_start_fail++; continue; } /* * When the lockspace_thread is done with the start act, * it will see the act ADOPT flag and move the act onto * the adopt_results list for us to collect below. */ count_start++; } log_debug("adopt starting %d lockspaces", count_start); /* * Wait for all start/rejoin actions to complete. Each start action * queued above will appear on the adopt_results list when finished. */ while (count_start_done < count_start) { sleep(1); act = NULL; pthread_mutex_lock(&client_mutex); if (!list_empty(&adopt_results)) { act = list_first_entry(&adopt_results, struct action, list); list_del(&act->list); } pthread_mutex_unlock(&client_mutex); if (!act) continue; if (act->result < 0) { log_error("adopt add lockspace failed vg %s %d", act->vg_name, act->result); count_start_fail++; } free_action(act); count_start_done++; } log_debug("adopt started %d lockspaces done %d fail %d", count_start, count_start_done, count_start_fail); /* * Create lock-adopt actions for active LVs (ls->resources), * and GL/VG locks (we don't know if these locks were held * and orphaned by the last lvmlockd, so try to adopt them * to see.) * * A proper struct lockspace now exists on the lockspaces list * for each ls in ls_found. Lock ops for one of those * lockspaces can be done as OP_LOCK actions queued using * add_lock_action(); * * Start by attempting to adopt the lock in the most likely * mode it was left in (ex for lvs, sh for vg/gl). If * the mode is wrong, the lm will return an error and we * try again with the other mode. */ list_for_each_entry(ls, &ls_found, list) { /* * Adopt orphan LV locks. */ list_for_each_entry(r, &ls->resources, list) { if (!(act = alloc_action())) goto fail; act->op = LD_OP_LOCK; act->rt = LD_RT_LV; act->mode = LD_LK_EX; act->flags = (LD_AF_ADOPT | LD_AF_PERSISTENT); act->client_id = INTERNAL_CLIENT_ID; act->lm_type = ls->lm_type; strncpy(act->vg_name, ls->vg_name, MAX_NAME); strncpy(act->lv_uuid, r->name, MAX_NAME); strncpy(act->lv_args, r->lv_args, MAX_ARGS); log_debug("adopt lock for lv %s %s", act->vg_name, act->lv_uuid); rv = add_lock_action(act); if (rv < 0) { log_error("adopt add_lock_action lv %s %s error %d", act->vg_name, act->lv_uuid, rv); count_adopt_fail++; free_action(act); } else { count_adopt++; } } /* * Adopt orphan VG lock. */ if (!(act = alloc_action())) goto fail; act->op = LD_OP_LOCK; act->rt = LD_RT_VG; act->mode = LD_LK_SH; act->flags = LD_AF_ADOPT; act->client_id = INTERNAL_CLIENT_ID; act->lm_type = ls->lm_type; strncpy(act->vg_name, ls->vg_name, MAX_NAME); log_debug("adopt lock for vg %s", act->vg_name); rv = add_lock_action(act); if (rv < 0) { log_error("adopt add_lock_action vg %s error %d", act->vg_name, rv); count_adopt_fail++; free_action(act); } else { count_adopt++; } } /* * Adopt orphan GL lock. */ if (!(act = alloc_action())) goto fail; act->op = LD_OP_LOCK; act->rt = LD_RT_GL; act->mode = LD_LK_SH; act->flags = LD_AF_ADOPT; act->client_id = INTERNAL_CLIENT_ID; act->lm_type = (gl_use_sanlock ? LD_LM_SANLOCK : LD_LM_DLM); log_debug("adopt lock for gl"); rv = add_lock_action(act); if (rv < 0) { log_error("adopt add_lock_action gl %s error %d", act->vg_name, rv); count_adopt_fail++; free_action(act); } else { count_adopt++; } /* * Wait for lock-adopt actions to complete. The completed * actions are passed back here via the adopt_results list. */ while (count_adopt_done < count_adopt) { sleep(1); act = NULL; pthread_mutex_lock(&client_mutex); if (!list_empty(&adopt_results)) { act = list_first_entry(&adopt_results, struct action, list); list_del(&act->list); } pthread_mutex_unlock(&client_mutex); if (!act) continue; /* * lock adopt results */ if (act->result == -EUCLEAN) { /* * Adopt failed because the orphan has a different mode * than initially requested. Repeat the lock-adopt operation * with the other mode. N.B. this logic depends on first * trying sh then ex for GL/VG locks, and ex then sh for * LV locks. */ if ((act->rt != LD_RT_LV) && (act->mode == LD_LK_SH)) { /* GL/VG locks: attempt to adopt ex after sh failed. */ act->mode = LD_LK_EX; rv = add_lock_action(act); } else if ((act->rt == LD_RT_LV) && (act->mode == LD_LK_EX)) { /* LV locks: attempt to adopt sh after ex failed. */ act->mode = LD_LK_SH; rv = add_lock_action(act); } else { log_error("Failed to adopt %s lock in vg %s error %d", rt_str(act->rt), act->vg_name, act->result); count_adopt_fail++; count_adopt_done++; free_action(act); rv = 0; } if (rv < 0) { log_error("adopt add_lock_action again %s", act->vg_name); count_adopt_fail++; count_adopt_done++; free_action(act); } } else if (act->result == -ENOENT) { /* * No orphan lock exists. This is common for GL/VG locks * because they may not have been held when lvmlockd exited. * It's also expected for LV types that do not use a lock. */ if (act->rt == LD_RT_LV) { /* Unexpected, we should have found an orphan. */ log_error("Failed to adopt LV lock for %s %s error %d", act->vg_name, act->lv_uuid, act->result); count_adopt_fail++; } else { /* Normal, no GL/VG lock was orphaned. */ log_debug("Did not adopt %s lock in vg %s error %d", rt_str(act->rt), act->vg_name, act->result); } count_adopt_done++; free_action(act); } else if (act->result < 0) { /* * Some unexpected error. */ log_error("adopt lock rt %s vg %s lv %s error %d", rt_str(act->rt), act->vg_name, act->lv_uuid, act->result); count_adopt_fail++; count_adopt_done++; free_action(act); } else { /* * Adopt success. */ if (act->rt == LD_RT_LV) { log_debug("adopt success lv %s %s %s", act->vg_name, act->lv_uuid, mode_str(act->mode)); free_action(act); } else if (act->rt == LD_RT_VG) { log_debug("adopt success vg %s %s", act->vg_name, mode_str(act->mode)); list_add_tail(&act->list, &to_unlock); } else if (act->rt == LD_RT_GL) { log_debug("adopt success gl %s %s", act->vg_name, mode_str(act->mode)); list_add_tail(&act->list, &to_unlock); } count_adopt_done++; } } /* * Release adopted GL/VG locks. * The to_unlock actions were the ones used to lock-adopt the GL/VG locks; * now use them to do the unlocks. These actions will again be placed * on adopt_results for us to collect because they have the ADOPT flag set. */ count_adopt = 0; count_adopt_done = 0; list_for_each_entry_safe(act, asafe, &to_unlock, list) { list_del(&act->list); if (act->mode == LD_LK_EX) { /* * FIXME: we probably want to check somehow that * there's no lvm command still running that's * using this ex lock and changing things. */ log_warn("adopt releasing ex %s lock %s", rt_str(act->rt), act->vg_name); } act->mode = LD_LK_UN; log_debug("adopt unlock for %s %s", rt_str(act->rt), act->vg_name); rv = add_lock_action(act); if (rv < 0) { log_error("adopt unlock add_lock_action error %d", rv); free_action(act); } else { count_adopt++; } } /* Wait for the unlocks to complete. */ while (count_adopt_done < count_adopt) { sleep(1); act = NULL; pthread_mutex_lock(&client_mutex); if (!list_empty(&adopt_results)) { act = list_first_entry(&adopt_results, struct action, list); list_del(&act->list); } pthread_mutex_unlock(&client_mutex); if (!act) continue; if (act->result < 0) log_error("adopt unlock error %d", act->result); count_adopt_done++; free_action(act); } /* FIXME: purge any remaining orphan locks in each rejoined ls? */ if (count_start_fail || count_adopt_fail) goto fail; log_debug("adopt_locks done"); return; fail: log_error("adopt_locks failed, reset host"); } static int get_peer_pid(int fd) { struct ucred cred; unsigned int len = sizeof(cred); if (getsockopt(fd, SOL_SOCKET, SO_PEERCRED, &cred, &len) != 0) return -1; return cred.pid; } static void process_listener(int poll_fd) { struct client *cl; int fd, pi; /* assert poll_fd == listen_fd */ fd = accept(listen_fd, NULL, NULL); if (fd < 0) return; if (!(cl = alloc_client())) { if (!close(fd)) log_error("failed to close lockd poll fd"); return; } pi = add_pollfd(fd); if (pi < 0) { log_error("process_listener add_pollfd error %d", pi); free_client(cl); return; } cl->pi = pi; cl->fd = fd; cl->pid = get_peer_pid(fd); pthread_mutex_init(&cl->mutex, NULL); pthread_mutex_lock(&client_mutex); client_ids++; if (client_ids == INTERNAL_CLIENT_ID) client_ids++; if (!client_ids) client_ids++; cl->id = client_ids; list_add_tail(&cl->list, &client_list); pthread_mutex_unlock(&client_mutex); log_debug("new cl %u pi %d fd %d", cl->id, cl->pi, cl->fd); } /* * main loop polls on pipe[0] so that a thread can * restart the poll by writing to pipe[1]. */ static int setup_restart(void) { if (pipe(restart_fds)) { log_error("setup_restart pipe error %d", errno); return -1; } restart_pi = add_pollfd(restart_fds[0]); if (restart_pi < 0) return restart_pi; return 0; } /* * thread wrote 'w' to restart_fds[1] to restart poll() * after adding an fd back into pollfd. */ static void process_restart(int fd) { char wake[1]; int rv; /* assert fd == restart_fds[0] */ rv = read(restart_fds[0], wake, 1); if (!rv || rv < 0) log_debug("process_restart error %d", errno); } static void sigterm_handler(int sig __attribute__((unused))) { daemon_quit = 1; } static int main_loop(daemon_state *ds_arg) { struct client *cl; int i, rv, is_recv, is_dead; signal(SIGTERM, &sigterm_handler); rv = setup_structs(); if (rv < 0) { log_error("Can't allocate memory"); return rv; } strcpy(gl_lsname_dlm, S_NAME_GL_DLM); INIT_LIST_HEAD(&lockspaces); pthread_mutex_init(&lockspaces_mutex, NULL); pthread_mutex_init(&pollfd_mutex, NULL); pthread_mutex_init(&log_mutex, NULL); openlog("lvmlockd", LOG_CONS | LOG_PID, LOG_DAEMON); log_warn("lvmlockd started"); listen_fd = ds_arg->socket_fd; listen_pi = add_pollfd(listen_fd); setup_client_thread(); setup_worker_thread(); setup_restart(); pthread_mutex_init(&lvmetad_mutex, NULL); lvmetad_handle = lvmetad_open(NULL); if (lvmetad_handle.error || lvmetad_handle.socket_fd < 0) log_error("lvmetad_open error %d", lvmetad_handle.error); else lvmetad_connected = 1; /* * Attempt to rejoin lockspaces and adopt locks from a previous * instance of lvmlockd that left behind lockspaces/locks. */ if (adopt_opt) adopt_locks(); while (1) { rv = poll(pollfd, pollfd_maxi + 1, -1); if ((rv == -1 && errno == EINTR) || daemon_quit) { if (daemon_quit) { int count; /* first sigterm would trigger stops, and second sigterm may finish the joins. */ count = for_each_lockspace(DO_STOP, DO_FREE, NO_FORCE); if (!count) break; log_debug("ignore shutdown for %d lockspaces", count); daemon_quit = 0; } continue; } if (rv < 0) { log_error("poll errno %d", errno); break; } for (i = 0; i <= pollfd_maxi; i++) { if (pollfd[i].fd < 0) continue; is_recv = 0; is_dead = 0; if (pollfd[i].revents & POLLIN) is_recv = 1; if (pollfd[i].revents & (POLLERR | POLLHUP | POLLNVAL)) is_dead = 1; if (!is_recv && !is_dead) continue; if (i == listen_pi) { process_listener(pollfd[i].fd); continue; } if (i == restart_pi) { process_restart(pollfd[i].fd); continue; } /* log_debug("poll pi %d fd %d revents %x", i, pollfd[i].fd, pollfd[i].revents); */ pthread_mutex_lock(&client_mutex); cl = find_client_pi(i); if (cl) { pthread_mutex_lock(&cl->mutex); if (cl->recv) { /* should not happen */ log_error("main client %u already recv", cl->id); } else if (cl->dead) { /* should not happen */ log_error("main client %u already dead", cl->id); } else if (is_dead) { log_debug("close %s[%d] cl %u fd %d", cl->name[0] ? cl->name : "client", cl->pid, cl->id, cl->fd); cl->dead = 1; cl->pi = -1; cl->fd = -1; cl->poll_ignore = 0; if (close(pollfd[i].fd)) log_error("close fd %d failed", pollfd[i].fd); pollfd[i].fd = POLL_FD_UNUSED; pollfd[i].events = 0; pollfd[i].revents = 0; } else if (is_recv) { cl->recv = 1; cl->poll_ignore = 1; pollfd[i].fd = POLL_FD_IGNORE; pollfd[i].events = 0; pollfd[i].revents = 0; } pthread_mutex_unlock(&cl->mutex); client_work = 1; pthread_cond_signal(&client_cond); /* client_thread will pick up and work on any client with cl->recv or cl->dead set */ } else { /* don't think this can happen */ log_error("no client for index %d fd %d", i, pollfd[i].fd); if (close(pollfd[i].fd)) log_error("close fd %d failed", pollfd[i].fd); pollfd[i].fd = POLL_FD_UNUSED; pollfd[i].events = 0; pollfd[i].revents = 0; } pthread_mutex_unlock(&client_mutex); /* After set_dead, should we scan pollfd for last unused slot and reduce pollfd_maxi? */ } } for_each_lockspace_retry(DO_STOP, DO_FREE, DO_FORCE); close_worker_thread(); close_client_thread(); closelog(); daemon_close(lvmetad_handle); return 1; /* libdaemon uses 1 for success */ } static void usage(char *prog, FILE *file) { fprintf(file, "Usage:\n"); fprintf(file, "%s [options]\n\n", prog); fprintf(file, " --help | -h\n"); fprintf(file, " Show this help information.\n"); fprintf(file, " --version | -V\n"); fprintf(file, " Show version of lvmlockd.\n"); fprintf(file, " --test | -T\n"); fprintf(file, " Test mode, do not call lock manager.\n"); fprintf(file, " --foreground | -f\n"); fprintf(file, " Don't fork.\n"); fprintf(file, " --daemon-debug | -D\n"); fprintf(file, " Don't fork and print debugging to stdout.\n"); fprintf(file, " --pid-file | -p \n"); fprintf(file, " Set path to the pid file. [%s]\n", LVMLOCKD_PIDFILE); fprintf(file, " --socket-path | -s \n"); fprintf(file, " Set path to the socket to listen on. [%s]\n", LVMLOCKD_SOCKET); fprintf(file, " --syslog-priority | -S err|warning|debug\n"); fprintf(file, " Write log messages from this level up to syslog. [%s]\n", _syslog_num_to_name(LOG_SYSLOG_PRIO)); fprintf(file, " --gl-type | -g \n"); fprintf(file, " Set global lock type to be dlm|sanlock.\n"); fprintf(file, " --host-id | -i \n"); fprintf(file, " Set the local sanlock host id.\n"); fprintf(file, " --host-id-file | -F \n"); fprintf(file, " A file containing the local sanlock host_id.\n"); fprintf(file, " --sanlock-timeout | -o \n"); fprintf(file, " Set the sanlock lockspace I/O timeout.\n"); fprintf(file, " --adopt | -A 0|1\n"); fprintf(file, " Adopt locks from a previous instance of lvmlockd.\n"); } int main(int argc, char *argv[]) { daemon_state ds = { .daemon_main = main_loop, .daemon_init = NULL, .daemon_fini = NULL, .pidfile = getenv("LVM_LVMLOCKD_PIDFILE"), .socket_path = getenv("LVM_LVMLOCKD_SOCKET"), .protocol = lvmlockd_protocol, .protocol_version = lvmlockd_protocol_version, .name = "lvmlockd", }; static struct option long_options[] = { {"help", no_argument, 0, 'h' }, {"version", no_argument, 0, 'V' }, {"test", no_argument, 0, 'T' }, {"foreground", no_argument, 0, 'f' }, {"daemon-debug", no_argument, 0, 'D' }, {"pid-file", required_argument, 0, 'p' }, {"socket-path", required_argument, 0, 's' }, {"gl-type", required_argument, 0, 'g' }, {"host-id", required_argument, 0, 'i' }, {"host-id-file", required_argument, 0, 'F' }, {"adopt", required_argument, 0, 'A' }, {"syslog-priority", required_argument, 0, 'S' }, {"sanlock-timeout", required_argument, 0, 'o' }, {0, 0, 0, 0 } }; while (1) { int c; int lm; int option_index = 0; c = getopt_long(argc, argv, "hVTfDp:s:l:g:S:I:A:o:", long_options, &option_index); if (c == -1) break; switch (c) { case '0': break; case 'h': usage(argv[0], stdout); exit(EXIT_SUCCESS); case 'V': printf("lvmlockd version: " LVM_VERSION "\n"); exit(EXIT_SUCCESS); case 'T': daemon_test = 1; break; case 'f': ds.foreground = 1; break; case 'D': ds.foreground = 1; daemon_debug = 1; break; case 'p': ds.pidfile = strdup(optarg); break; case 's': ds.socket_path = strdup(optarg); break; case 'g': lm = str_to_lm(optarg); if (lm == LD_LM_DLM && lm_support_dlm()) gl_use_dlm = 1; else if (lm == LD_LM_SANLOCK && lm_support_sanlock()) gl_use_sanlock = 1; else { fprintf(stderr, "invalid gl-type option\n"); exit(EXIT_FAILURE); } break; case 'i': daemon_host_id = atoi(optarg); break; case 'F': daemon_host_id_file = strdup(optarg); break; case 'o': sanlock_io_timeout = atoi(optarg); break; case 'A': adopt_opt = atoi(optarg); break; case 'S': syslog_priority = _syslog_name_to_num(optarg); break; case '?': default: usage(argv[0], stdout); exit(EXIT_FAILURE); } } if (!ds.pidfile) ds.pidfile = LVMLOCKD_PIDFILE; if (!ds.socket_path) ds.socket_path = LVMLOCKD_SOCKET; /* runs daemon_main/main_loop */ daemon_start(ds); return 0; } LVM2.2.02.176/daemons/lvmlockd/lvmlockd-internal.h0000644000000000000120000003652013176752421020371 0ustar rootwheel/* * Copyright (C) 2014-2015 Red Hat, Inc. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. */ #ifndef _LVM_LVMLOCKD_INTERNAL_H #define _LVM_LVMLOCKD_INTERNAL_H #define MAX_NAME 64 #define MAX_ARGS 64 #define R_NAME_GL_DISABLED "_GLLK_disabled" #define R_NAME_GL "GLLK" #define R_NAME_VG "VGLK" #define S_NAME_GL_DLM "lvm_global" #define LVM_LS_PREFIX "lvm_" /* ls name is prefix + vg_name */ /* global lockspace name for sanlock is a vg name */ /* lock manager types */ enum { LD_LM_NONE = 0, LD_LM_UNUSED = 1, /* place holder so values match lib/locking/lvmlockd.h */ LD_LM_DLM = 2, LD_LM_SANLOCK = 3, }; /* operation types */ enum { LD_OP_HELLO = 1, LD_OP_QUIT, LD_OP_INIT, LD_OP_FREE, LD_OP_START, LD_OP_STOP, LD_OP_LOCK, LD_OP_UPDATE, LD_OP_CLOSE, LD_OP_ENABLE, LD_OP_DISABLE, LD_OP_START_WAIT, LD_OP_STOP_ALL, LD_OP_DUMP_INFO, LD_OP_DUMP_LOG, LD_OP_RENAME_BEFORE, LD_OP_RENAME_FINAL, LD_OP_RUNNING_LM, LD_OP_FIND_FREE_LOCK, LD_OP_KILL_VG, LD_OP_DROP_VG, LD_OP_BUSY, }; /* resource types */ enum { LD_RT_GL = 1, LD_RT_VG, LD_RT_LV, }; /* lock modes, more restrictive must be larger value */ enum { LD_LK_IV = -1, LD_LK_UN = 0, LD_LK_NL = 1, LD_LK_SH = 2, LD_LK_EX = 3, }; struct list_head { struct list_head *next, *prev; }; struct client { struct list_head list; pthread_mutex_t mutex; int pid; int fd; int pi; uint32_t id; unsigned int recv : 1; unsigned int dead : 1; unsigned int poll_ignore : 1; unsigned int lock_ops : 1; char name[MAX_NAME+1]; }; #define LD_AF_PERSISTENT 0x00000001 #define LD_AF_NO_CLIENT 0x00000002 #define LD_AF_UNLOCK_CANCEL 0x00000004 #define LD_AF_NEXT_VERSION 0x00000008 #define LD_AF_WAIT 0x00000010 #define LD_AF_FORCE 0x00000020 #define LD_AF_EX_DISABLE 0x00000040 #define LD_AF_ENABLE 0x00000080 #define LD_AF_DISABLE 0x00000100 #define LD_AF_SEARCH_LS 0x00000200 #define LD_AF_WAIT_STARTING 0x00001000 #define LD_AF_DUP_GL_LS 0x00002000 #define LD_AF_ADOPT 0x00010000 #define LD_AF_WARN_GL_REMOVED 0x00020000 #define LD_AF_LV_LOCK 0x00040000 #define LD_AF_LV_UNLOCK 0x00080000 /* * Number of times to repeat a lock request after * a lock conflict (-EAGAIN) if unspecified in the * request. */ #define DEFAULT_MAX_RETRIES 4 struct action { struct list_head list; uint32_t client_id; uint32_t flags; /* LD_AF_ */ uint32_t version; uint64_t host_id; int8_t op; /* operation type LD_OP_ */ int8_t rt; /* resource type LD_RT_ */ int8_t mode; /* lock mode LD_LK_ */ int8_t lm_type; /* lock manager: LM_DLM, LM_SANLOCK */ int retries; int max_retries; int result; int lm_rv; /* return value from lm_ function */ char vg_uuid[64]; char vg_name[MAX_NAME+1]; char lv_name[MAX_NAME+1]; char lv_uuid[MAX_NAME+1]; char vg_args[MAX_ARGS+1]; char lv_args[MAX_ARGS+1]; char vg_sysid[MAX_NAME+1]; }; struct resource { struct list_head list; /* lockspace.resources */ char name[MAX_NAME+1]; /* vg name or lv name */ int8_t type; /* resource type LD_RT_ */ int8_t mode; unsigned int sh_count; /* number of sh locks on locks list */ uint32_t version; uint32_t last_client_id; /* last client_id to lock or unlock resource */ unsigned int lm_init : 1; /* lm_data is initialized */ unsigned int adopt : 1; /* temp flag in remove_inactive_lvs */ unsigned int version_zero_valid : 1; unsigned int use_vb : 1; struct list_head locks; struct list_head actions; char lv_args[MAX_ARGS+1]; char lm_data[0]; /* lock manager specific data */ }; #define LD_LF_PERSISTENT 0x00000001 struct lock { struct list_head list; /* resource.locks */ int8_t mode; /* lock mode LD_LK_ */ uint32_t version; uint32_t flags; /* LD_LF_ */ uint32_t client_id; /* may be 0 for persistent or internal locks */ }; struct lockspace { struct list_head list; /* lockspaces */ char name[MAX_NAME+1]; char vg_name[MAX_NAME+1]; char vg_uuid[64]; char vg_args[MAX_ARGS+1]; /* lock manager specific args */ char vg_sysid[MAX_NAME+1]; int8_t lm_type; /* lock manager: LM_DLM, LM_SANLOCK */ void *lm_data; uint64_t host_id; uint64_t free_lock_offset; /* start search for free lock here */ uint32_t start_client_id; /* client_id that started the lockspace */ pthread_t thread; /* makes synchronous lock requests */ pthread_cond_t cond; pthread_mutex_t mutex; unsigned int create_fail : 1; unsigned int create_done : 1; unsigned int thread_work : 1; unsigned int thread_stop : 1; unsigned int thread_done : 1; unsigned int sanlock_gl_enabled: 1; unsigned int sanlock_gl_dup: 1; unsigned int free_vg: 1; unsigned int kill_vg: 1; unsigned int drop_vg: 1; struct list_head actions; /* new client actions */ struct list_head resources; /* resource/lock state for gl/vg/lv */ }; /* val_blk version */ #define VAL_BLK_VERSION 0x0101 /* val_blk flags */ #define VBF_REMOVED 0x0001 struct val_blk { uint16_t version; uint16_t flags; uint32_t r_version; }; /* lm_unlock flags */ #define LMUF_FREE_VG 0x00000001 #define container_of(ptr, type, member) ({ \ const typeof( ((type *)0)->member ) *__mptr = (ptr); \ (type *)( (char *)__mptr - offsetof(type,member) );}) static inline void INIT_LIST_HEAD(struct list_head *list) { list->next = list; list->prev = list; } static inline void __list_add(struct list_head *new, struct list_head *prev, struct list_head *next) { next->prev = new; new->next = next; new->prev = prev; prev->next = new; } static inline void __list_del(struct list_head *prev, struct list_head *next) { next->prev = prev; prev->next = next; } static inline void list_add(struct list_head *new, struct list_head *head) { __list_add(new, head, head->next); } static inline void list_add_tail(struct list_head *new, struct list_head *head) { __list_add(new, head->prev, head); } static inline void list_del(struct list_head *entry) { __list_del(entry->prev, entry->next); } static inline int list_empty(const struct list_head *head) { return head->next == head; } #define list_entry(ptr, type, member) \ container_of(ptr, type, member) #define list_first_entry(ptr, type, member) \ list_entry((ptr)->next, type, member) #define list_for_each_entry(pos, head, member) \ for (pos = list_entry((head)->next, typeof(*pos), member); \ &pos->member != (head); \ pos = list_entry(pos->member.next, typeof(*pos), member)) #define list_for_each_entry_safe(pos, n, head, member) \ for (pos = list_entry((head)->next, typeof(*pos), member), \ n = list_entry(pos->member.next, typeof(*pos), member); \ &pos->member != (head); \ pos = n, n = list_entry(n->member.next, typeof(*n), member)) /* to improve readability */ #define WAIT 1 #define NO_WAIT 0 #define FORCE 1 #define NO_FORCE 0 /* * global variables */ #ifndef EXTERN #define EXTERN extern #define INIT(X) #else #undef EXTERN #define EXTERN #define INIT(X) =X #endif /* * gl_type_static and gl_use_ are set by command line or config file * to specify whether the global lock comes from dlm or sanlock. * Without a static setting, lvmlockd will figure out where the * global lock should be (but it could get mixed up in cases where * both sanlock and dlm vgs exist.) * * gl_use_dlm means that the gl should come from lockspace gl_lsname_dlm * gl_use_sanlock means that the gl should come from lockspace gl_lsname_sanlock * * gl_use_dlm has precedence over gl_use_sanlock, so if a node sees both * dlm and sanlock vgs, it will use the dlm gl. * * gl_use_ is set when the first evidence of that lm_type is seen * in any command. * * gl_lsname_sanlock is set when the first vg is seen in which an * enabled gl is exists, or when init_vg creates a vg with gl enabled, * or when enable_gl is used. * * gl_lsname_sanlock is cleared when free_vg deletes a vg with gl enabled * or when disable_gl matches. */ EXTERN int gl_type_static; EXTERN int gl_use_dlm; EXTERN int gl_use_sanlock; EXTERN int gl_vg_removed; EXTERN char gl_lsname_dlm[MAX_NAME+1]; EXTERN char gl_lsname_sanlock[MAX_NAME+1]; EXTERN int global_dlm_lockspace_exists; EXTERN int daemon_test; /* run as much as possible without a live lock manager */ EXTERN int daemon_debug; EXTERN int daemon_host_id; EXTERN const char *daemon_host_id_file; EXTERN int sanlock_io_timeout; /* * This flag is set to 1 if we see multiple vgs with the global * lock enabled. While this is set, we return a special flag * with the vg lock result indicating to the lvm command that * there is a duplicate gl in the vg which should be resolved. * While this is set, find_lockspace_name has the side job of * counting the number of lockspaces with enabled gl's so that * this can be set back to zero when the duplicates are disabled. */ EXTERN int sanlock_gl_dup; void log_level(int level, const char *fmt, ...) __attribute__((format(printf, 2, 3))); #define log_debug(fmt, args...) log_level(LOG_DEBUG, fmt, ##args) #define log_error(fmt, args...) log_level(LOG_ERR, fmt, ##args) #define log_warn(fmt, args...) log_level(LOG_WARNING, fmt, ##args) struct lockspace *alloc_lockspace(void); int lockspaces_empty(void); int last_string_from_args(char *args_in, char *last); int version_from_args(char *args, unsigned int *major, unsigned int *minor, unsigned int *patch); static inline const char *mode_str(int x) { switch (x) { case LD_LK_IV: return "iv"; case LD_LK_UN: return "un"; case LD_LK_NL: return "nl"; case LD_LK_SH: return "sh"; case LD_LK_EX: return "ex"; default: return "."; }; } #ifdef LOCKDDLM_SUPPORT int lm_init_vg_dlm(char *ls_name, char *vg_name, uint32_t flags, char *vg_args); int lm_prepare_lockspace_dlm(struct lockspace *ls); int lm_add_lockspace_dlm(struct lockspace *ls, int adopt); int lm_rem_lockspace_dlm(struct lockspace *ls, int free_vg); int lm_lock_dlm(struct lockspace *ls, struct resource *r, int ld_mode, struct val_blk *vb_out, int adopt); int lm_convert_dlm(struct lockspace *ls, struct resource *r, int ld_mode, uint32_t r_version); int lm_unlock_dlm(struct lockspace *ls, struct resource *r, uint32_t r_version, uint32_t lmu_flags); int lm_rem_resource_dlm(struct lockspace *ls, struct resource *r); int lm_get_lockspaces_dlm(struct list_head *ls_rejoin); int lm_data_size_dlm(void); int lm_is_running_dlm(void); int lm_hosts_dlm(struct lockspace *ls, int notify); static inline int lm_support_dlm(void) { return 1; } #else static inline int lm_init_vg_dlm(char *ls_name, char *vg_name, uint32_t flags, char *vg_args) { return -1; } static inline int lm_prepare_lockspace_dlm(struct lockspace *ls) { return -1; } static inline int lm_add_lockspace_dlm(struct lockspace *ls, int adopt) { return -1; } static inline int lm_rem_lockspace_dlm(struct lockspace *ls, int free_vg) { return -1; } static inline int lm_lock_dlm(struct lockspace *ls, struct resource *r, int ld_mode, struct val_blk *vb_out, int adopt) { return -1; } static inline int lm_convert_dlm(struct lockspace *ls, struct resource *r, int ld_mode, uint32_t r_version) { return -1; } static inline int lm_unlock_dlm(struct lockspace *ls, struct resource *r, uint32_t r_version, uint32_t lmu_flags) { return -1; } static inline int lm_rem_resource_dlm(struct lockspace *ls, struct resource *r) { return -1; } static inline int lm_get_lockspaces_dlm(struct list_head *ls_rejoin) { return -1; } static inline int lm_data_size_dlm(void) { return -1; } static inline int lm_is_running_dlm(void) { return 0; } static inline int lm_support_dlm(void) { return 0; } static inline int lm_hosts_dlm(struct lockspace *ls, int notify) { return 0; } #endif /* dlm support */ #ifdef LOCKDSANLOCK_SUPPORT int lm_init_vg_sanlock(char *ls_name, char *vg_name, uint32_t flags, char *vg_args); int lm_init_lv_sanlock(char *ls_name, char *vg_name, char *lv_name, char *vg_args, char *lv_args, uint64_t free_offset); int lm_free_lv_sanlock(struct lockspace *ls, struct resource *r); int lm_rename_vg_sanlock(char *ls_name, char *vg_name, uint32_t flags, char *vg_args); int lm_prepare_lockspace_sanlock(struct lockspace *ls); int lm_add_lockspace_sanlock(struct lockspace *ls, int adopt); int lm_rem_lockspace_sanlock(struct lockspace *ls, int free_vg); int lm_lock_sanlock(struct lockspace *ls, struct resource *r, int ld_mode, struct val_blk *vb_out, int *retry, int adopt); int lm_convert_sanlock(struct lockspace *ls, struct resource *r, int ld_mode, uint32_t r_version); int lm_unlock_sanlock(struct lockspace *ls, struct resource *r, uint32_t r_version, uint32_t lmu_flags); int lm_able_gl_sanlock(struct lockspace *ls, int enable); int lm_ex_disable_gl_sanlock(struct lockspace *ls); int lm_hosts_sanlock(struct lockspace *ls, int notify); int lm_rem_resource_sanlock(struct lockspace *ls, struct resource *r); int lm_gl_is_enabled(struct lockspace *ls); int lm_get_lockspaces_sanlock(struct list_head *ls_rejoin); int lm_data_size_sanlock(void); int lm_is_running_sanlock(void); int lm_find_free_lock_sanlock(struct lockspace *ls, uint64_t *free_offset); static inline int lm_support_sanlock(void) { return 1; } #else static inline int lm_init_vg_sanlock(char *ls_name, char *vg_name, uint32_t flags, char *vg_args) { return -1; } static inline int lm_init_lv_sanlock(char *ls_name, char *vg_name, char *lv_name, char *vg_args, char *lv_args, uint64_t free_offset) { return -1; } static inline int lm_free_lv_sanlock(struct lockspace *ls, struct resource *r) { return -1; } static inline int lm_rename_vg_sanlock(char *ls_name, char *vg_name, uint32_t flags, char *vg_args) { return -1; } static inline int lm_prepare_lockspace_sanlock(struct lockspace *ls) { return -1; } static inline int lm_add_lockspace_sanlock(struct lockspace *ls, int adopt) { return -1; } static inline int lm_rem_lockspace_sanlock(struct lockspace *ls, int free_vg) { return -1; } static inline int lm_lock_sanlock(struct lockspace *ls, struct resource *r, int ld_mode, struct val_blk *vb_out, int *retry, int adopt) { return -1; } static inline int lm_convert_sanlock(struct lockspace *ls, struct resource *r, int ld_mode, uint32_t r_version) { return -1; } static inline int lm_unlock_sanlock(struct lockspace *ls, struct resource *r, uint32_t r_version, uint32_t lmu_flags) { return -1; } static inline int lm_able_gl_sanlock(struct lockspace *ls, int enable) { return -1; } static inline int lm_ex_disable_gl_sanlock(struct lockspace *ls) { return -1; } static inline int lm_hosts_sanlock(struct lockspace *ls, int notify) { return -1; } static inline int lm_rem_resource_sanlock(struct lockspace *ls, struct resource *r) { return -1; } static inline int lm_gl_is_enabled(struct lockspace *ls) { return -1; } static inline int lm_get_lockspaces_sanlock(struct list_head *ls_rejoin) { return -1; } static inline int lm_data_size_sanlock(void) { return -1; } static inline int lm_is_running_sanlock(void) { return 0; } static inline int lm_find_free_lock_sanlock(struct lockspace *ls, uint64_t *free_offset) { return -1; } static inline int lm_support_sanlock(void) { return 0; } #endif /* sanlock support */ #endif /* _LVM_LVMLOCKD_INTERNAL_H */ LVM2.2.02.176/daemons/dmeventd/0000755000000000000120000000000013176752421014560 5ustar rootwheelLVM2.2.02.176/daemons/dmeventd/Makefile.in0000644000000000000120000000626113176752421016632 0ustar rootwheel# # Copyright (C) 2005-2011 Red Hat, Inc. All rights reserved. # # This file is part of the device-mapper userspace tools. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU Lesser General Public License v.2.1. # # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA srcdir = @srcdir@ top_srcdir = @top_srcdir@ top_builddir = @top_builddir@ SOURCES = libdevmapper-event.c SOURCES2 = dmeventd.c TARGETS = dmeventd .PHONY: install_lib_dynamic install_lib_static install_include \ install_pkgconfig install_dmeventd_dynamic install_dmeventd_static \ install_lib install_dmeventd INSTALL_DMEVENTD_TARGETS = install_dmeventd_dynamic INSTALL_LIB_TARGETS = install_lib_dynamic LIB_NAME = libdevmapper-event ifeq ("@STATIC_LINK@", "yes") LIB_STATIC = $(LIB_NAME).a TARGETS += $(LIB_STATIC) dmeventd.static INSTALL_DMEVENTD_TARGETS += install_dmeventd_static INSTALL_LIB_TARGETS += install_lib_static endif LIB_VERSION = $(LIB_VERSION_DM) LIB_SHARED = $(LIB_NAME).$(LIB_SUFFIX) CLEAN_TARGETS = dmeventd.static $(LIB_NAME).a ifneq ($(MAKECMDGOALS),device-mapper) SUBDIRS+=plugins endif CFLOW_LIST = $(SOURCES) CFLOW_LIST_TARGET = $(LIB_NAME).cflow CFLOW_TARGET = dmeventd EXPORTED_HEADER = $(srcdir)/libdevmapper-event.h EXPORTED_FN_PREFIX = dm_event include $(top_builddir)/make.tmpl all: device-mapper device-mapper: $(TARGETS) CFLAGS_dmeventd.o += $(EXTRA_EXEC_CFLAGS) LIBS += -ldevmapper $(PTHREAD_LIBS) dmeventd: $(LIB_SHARED) dmeventd.o $(CC) $(CFLAGS) -L. $(LDFLAGS) $(EXTRA_EXEC_LDFLAGS) $(ELDFLAGS) dmeventd.o \ -o $@ $(DL_LIBS) $(DMEVENT_LIBS) $(LIBS) dmeventd.static: $(LIB_STATIC) dmeventd.o $(interfacebuilddir)/libdevmapper.a $(CC) $(CFLAGS) $(LDFLAGS) -static -L. -L$(interfacebuilddir) dmeventd.o \ -o $@ $(DL_LIBS) $(DMEVENT_LIBS) $(LIBS) $(STATIC_LIBS) ifeq ("@PKGCONFIG@", "yes") INSTALL_LIB_TARGETS += install_pkgconfig endif ifneq ("$(CFLOW_CMD)", "") CFLOW_SOURCES = $(addprefix $(srcdir)/, $(SOURCES)) -include $(top_builddir)/libdm/libdevmapper.cflow -include $(top_builddir)/lib/liblvm-internal.cflow -include $(top_builddir)/lib/liblvm2cmd.cflow -include $(top_builddir)/daemons/dmeventd/$(LIB_NAME).cflow -include $(top_builddir)/daemons/dmeventd/plugins/mirror/$(LIB_NAME)-lvm2mirror.cflow endif install_include: $(srcdir)/libdevmapper-event.h $(INSTALL_DATA) -D $< $(includedir)/$( #include /* TODO - move this mountinfo code into library to be reusable */ #ifdef __linux__ # include "kdev_t.h" #else # define MAJOR(x) major((x)) # define MINOR(x) minor((x)) #endif /* First warning when thin data or metadata is 80% full. */ #define WARNING_THRESH (DM_PERCENT_1 * 80) /* Umount thin LVs when thin data or metadata LV is >= * and lvextend --use-policies has failed. */ #define UMOUNT_THRESH (DM_PERCENT_1 * 95) /* Run a check every 5%. */ #define CHECK_STEP (DM_PERCENT_1 * 5) /* Do not bother checking thin data or metadata is less than 50% full. */ #define CHECK_MINIMUM (DM_PERCENT_1 * 50) #define UMOUNT_COMMAND "/bin/umount" #define MAX_FAILS (256) /* ~42 mins between cmd call retry with 10s delay */ #define THIN_DEBUG 0 struct dso_state { struct dm_pool *mem; int metadata_percent_check; int metadata_percent; int data_percent_check; int data_percent; uint64_t known_metadata_size; uint64_t known_data_size; unsigned fails; unsigned max_fails; int restore_sigset; sigset_t old_sigset; pid_t pid; char *argv[3]; char *cmd_str; }; DM_EVENT_LOG_FN("thin") #define UUID_PREFIX "LVM-" static int _run_command(struct dso_state *state) { char val[3][36]; char *env[] = { val[0], val[1], val[2], NULL }; int i; /* Mark for possible lvm2 command we are running from dmeventd * lvm2 will not try to talk back to dmeventd while processing it */ (void) dm_snprintf(val[0], sizeof(val[0]), "LVM_RUN_BY_DMEVENTD=1"); if (state->data_percent) { /* Prepare some known data to env vars for easy use */ (void) dm_snprintf(val[1], sizeof(val[1]), "DMEVENTD_THIN_POOL_DATA=%d", state->data_percent / DM_PERCENT_1); (void) dm_snprintf(val[2], sizeof(val[2]), "DMEVENTD_THIN_POOL_METADATA=%d", state->metadata_percent / DM_PERCENT_1); } else { /* For an error event it's for a user to check status and decide */ env[1] = NULL; log_debug("Error event processing."); } log_verbose("Executing command: %s", state->cmd_str); /* TODO: * Support parallel run of 'task' and it's waitpid maintainence * ATM we can't handle signaling of SIGALRM * as signalling is not allowed while 'process_event()' is running */ if (!(state->pid = fork())) { /* child */ (void) close(0); for (i = 3; i < 255; ++i) (void) close(i); execve(state->argv[0], state->argv, env); _exit(errno); } else if (state->pid == -1) { log_error("Can't fork command %s.", state->cmd_str); state->fails = 1; return 0; } return 1; } static int _use_policy(struct dm_task *dmt, struct dso_state *state) { #if THIN_DEBUG log_debug("dmeventd executes: %s.", state->cmd_str); #endif if (state->argv[0]) return _run_command(state); if (!dmeventd_lvm2_run_with_lock(state->cmd_str)) { log_error("Failed command for %s.", dm_task_get_name(dmt)); state->fails = 1; return 0; } state->fails = 0; return 1; } /* Check if executed command has finished * Only 1 command may run */ static int _wait_for_pid(struct dso_state *state) { int status = 0; if (state->pid == -1) return 1; if (!waitpid(state->pid, &status, WNOHANG)) return 0; /* Wait for finish */ if (WIFEXITED(status)) { log_verbose("Child %d exited with status %d.", state->pid, WEXITSTATUS(status)); state->fails = WEXITSTATUS(status) ? 1 : 0; } else { if (WIFSIGNALED(status)) log_verbose("Child %d was terminated with status %d.", state->pid, WTERMSIG(status)); state->fails = 1; } state->pid = -1; return 1; } void process_event(struct dm_task *dmt, enum dm_event_mask event __attribute__((unused)), void **user) { const char *device = dm_task_get_name(dmt); struct dso_state *state = *user; struct dm_status_thin_pool *tps = NULL; void *next = NULL; uint64_t start, length; char *target_type = NULL; char *params; int needs_policy = 0; struct dm_task *new_dmt = NULL; #if THIN_DEBUG log_debug("Watch for tp-data:%.2f%% tp-metadata:%.2f%%.", dm_percent_to_round_float(state->data_percent_check, 2), dm_percent_to_round_float(state->metadata_percent_check, 2)); #endif if (!_wait_for_pid(state)) { log_warn("WARNING: Skipping event, child %d is still running (%s).", state->pid, state->cmd_str); return; } if (event & DM_EVENT_DEVICE_ERROR) { /* Error -> no need to check and do instant resize */ state->data_percent = state->metadata_percent = 0; if (_use_policy(dmt, state)) goto out; stack; /* * Rather update oldish status * since after 'command' processing * percentage info could have changed a lot. * If we would get above UMOUNT_THRESH * we would wait for next sigalarm. */ if (!(new_dmt = dm_task_create(DM_DEVICE_STATUS))) goto_out; if (!dm_task_set_uuid(new_dmt, dm_task_get_uuid(dmt))) goto_out; /* Non-blocking status read */ if (!dm_task_no_flush(new_dmt)) log_warn("WARNING: Can't set no_flush for dm status."); if (!dm_task_run(new_dmt)) goto_out; dmt = new_dmt; } dm_get_next_target(dmt, next, &start, &length, &target_type, ¶ms); if (!target_type || (strcmp(target_type, "thin-pool") != 0)) { log_error("Invalid target type."); goto out; } if (!dm_get_status_thin_pool(state->mem, params, &tps)) { log_error("Failed to parse status."); goto out; } #if THIN_DEBUG log_debug("Thin pool status " FMTu64 "/" FMTu64 " " FMTu64 "/" FMTu64 ".", tps->used_metadata_blocks, tps->total_metadata_blocks, tps->used_data_blocks, tps->total_data_blocks); #endif /* Thin pool size had changed. Clear the threshold. */ if (state->known_metadata_size != tps->total_metadata_blocks) { state->metadata_percent_check = CHECK_MINIMUM; state->known_metadata_size = tps->total_metadata_blocks; state->fails = 0; } if (state->known_data_size != tps->total_data_blocks) { state->data_percent_check = CHECK_MINIMUM; state->known_data_size = tps->total_data_blocks; state->fails = 0; } /* * Trigger action when threshold boundary is exceeded. * Report 80% threshold warning when it's used above 80%. * Only 100% is exception as it cannot be surpased so policy * action is called for: >50%, >55% ... >95%, 100% */ state->metadata_percent = dm_make_percent(tps->used_metadata_blocks, tps->total_metadata_blocks); if ((state->metadata_percent > WARNING_THRESH) && (state->metadata_percent > state->metadata_percent_check)) log_warn("WARNING: Thin pool %s metadata is now %.2f%% full.", device, dm_percent_to_round_float(state->metadata_percent, 2)); if (state->metadata_percent > CHECK_MINIMUM) { /* Run action when usage raised more than CHECK_STEP since the last time */ if (state->metadata_percent > state->metadata_percent_check) needs_policy = 1; state->metadata_percent_check = (state->metadata_percent / CHECK_STEP + 1) * CHECK_STEP; if (state->metadata_percent_check == DM_PERCENT_100) state->metadata_percent_check--; /* Can't get bigger then 100% */ } else state->metadata_percent_check = CHECK_MINIMUM; state->data_percent = dm_make_percent(tps->used_data_blocks, tps->total_data_blocks); if ((state->data_percent > WARNING_THRESH) && (state->data_percent > state->data_percent_check)) log_warn("WARNING: Thin pool %s data is now %.2f%% full.", device, dm_percent_to_round_float(state->data_percent, 2)); if (state->data_percent > CHECK_MINIMUM) { /* Run action when usage raised more than CHECK_STEP since the last time */ if (state->data_percent > state->data_percent_check) needs_policy = 1; state->data_percent_check = (state->data_percent / CHECK_STEP + 1) * CHECK_STEP; if (state->data_percent_check == DM_PERCENT_100) state->data_percent_check--; /* Can't get bigger then 100% */ } else state->data_percent_check = CHECK_MINIMUM; /* Reduce number of _use_policy() calls by power-of-2 factor till frequency of MAX_FAILS is reached. * Avoids too high number of error retries, yet shows some status messages in log regularly. * i.e. PV could have been pvmoved and VG/LV was locked for a while... */ if (state->fails) { if (state->fails++ <= state->max_fails) { log_debug("Postponing frequently failing policy (%u <= %u).", state->fails - 1, state->max_fails); return; } if (state->max_fails < MAX_FAILS) state->max_fails <<= 1; state->fails = needs_policy = 1; /* Retry failing command */ } else state->max_fails = 1; /* Reset on success */ if (needs_policy) _use_policy(dmt, state); out: if (tps) dm_pool_free(state->mem, tps); if (new_dmt) dm_task_destroy(new_dmt); } /* Handle SIGCHLD for a thread */ static void _sig_child(int signum __attribute__((unused))) { /* empty SIG_IGN */; } /* Setup handler for SIGCHLD when executing external command * to get quick 'waitpid()' reaction * It will interrupt syscall just like SIGALRM and * invoke process_event(). */ static void _init_thread_signals(struct dso_state *state) { struct sigaction act = { .sa_handler = _sig_child }; sigset_t my_sigset; sigemptyset(&my_sigset); if (sigaction(SIGCHLD, &act, NULL)) log_warn("WARNING: Failed to set SIGCHLD action."); else if (sigaddset(&my_sigset, SIGCHLD)) log_warn("WARNING: Failed to add SIGCHLD to set."); else if (pthread_sigmask(SIG_UNBLOCK, &my_sigset, &state->old_sigset)) log_warn("WARNING: Failed to unblock SIGCHLD."); else state->restore_sigset = 1; } static void _restore_thread_signals(struct dso_state *state) { if (state->restore_sigset && pthread_sigmask(SIG_SETMASK, &state->old_sigset, NULL)) log_warn("WARNING: Failed to block SIGCHLD."); } int register_device(const char *device, const char *uuid __attribute__((unused)), int major __attribute__((unused)), int minor __attribute__((unused)), void **user) { struct dso_state *state; char *str; char cmd_str[PATH_MAX + 128 + 2]; /* cmd ' ' vg/lv \0 */ if (!dmeventd_lvm2_init_with_pool("thin_pool_state", state)) goto_bad; if (!dmeventd_lvm2_command(state->mem, cmd_str, sizeof(cmd_str), "_dmeventd_thin_command", device)) goto_bad; if (strncmp(cmd_str, "lvm ", 4) == 0) { if (!(state->cmd_str = dm_pool_strdup(state->mem, cmd_str + 4))) { log_error("Failed to copy lvm command."); goto bad; } } else if (cmd_str[0] == '/') { if (!(state->cmd_str = dm_pool_strdup(state->mem, cmd_str))) { log_error("Failed to copy thin command."); goto bad; } /* Find last space before 'vg/lv' */ if (!(str = strrchr(state->cmd_str, ' '))) goto inval; if (!(state->argv[0] = dm_pool_strndup(state->mem, state->cmd_str, str - state->cmd_str))) { log_error("Failed to copy command."); goto bad; } state->argv[1] = str + 1; /* 1 argument - vg/lv */ _init_thread_signals(state); } else /* Unuspported command format */ goto inval; state->pid = -1; *user = state; log_info("Monitoring thin pool %s.", device); return 1; inval: log_error("Invalid command for monitoring: %s.", cmd_str); bad: log_error("Failed to monitor thin pool %s.", device); if (state) dmeventd_lvm2_exit_with_pool(state); return 0; } int unregister_device(const char *device, const char *uuid __attribute__((unused)), int major __attribute__((unused)), int minor __attribute__((unused)), void **user) { struct dso_state *state = *user; int i; for (i = 0; !_wait_for_pid(state) && (i < 6); ++i) { if (i == 0) /* Give it 2 seconds, then try to terminate & kill it */ log_verbose("Child %d still not finished (%s) waiting.", state->pid, state->cmd_str); else if (i == 3) { log_warn("WARNING: Terminating child %d.", state->pid); kill(state->pid, SIGINT); kill(state->pid, SIGTERM); } else if (i == 5) { log_warn("WARNING: Killing child %d.", state->pid); kill(state->pid, SIGKILL); } sleep(1); } if (state->pid != -1) log_warn("WARNING: Cannot kill child %d!", state->pid); _restore_thread_signals(state); dmeventd_lvm2_exit_with_pool(state); log_info("No longer monitoring thin pool %s.", device); return 1; } LVM2.2.02.176/daemons/dmeventd/plugins/thin/.exported_symbols0000644000000000000120000000006013176752421022602 0ustar rootwheelprocess_event register_device unregister_device LVM2.2.02.176/daemons/dmeventd/plugins/raid/0000755000000000000120000000000013176752421017160 5ustar rootwheelLVM2.2.02.176/daemons/dmeventd/plugins/raid/Makefile.in0000644000000000000120000000177613176752421021240 0ustar rootwheel# # Copyright (C) 2011-2014 Red Hat, Inc. All rights reserved. # # This file is part of LVM2. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA srcdir = @srcdir@ top_srcdir = @top_srcdir@ top_builddir = @top_builddir@ INCLUDES += -I$(top_srcdir)/daemons/dmeventd/plugins/lvm2 CLDFLAGS += -L$(top_builddir)/daemons/dmeventd/plugins/lvm2 SOURCES = dmeventd_raid.c LIB_NAME = libdevmapper-event-lvm2raid LIB_SHARED = $(LIB_NAME).$(LIB_SUFFIX) LIB_VERSION = $(LIB_VERSION_LVM) CFLOW_LIST = $(SOURCES) CFLOW_LIST_TARGET = $(LIB_NAME).cflow include $(top_builddir)/make.tmpl LIBS += -ldevmapper-event-lvm2 -ldevmapper install_lvm2: install_dm_plugin install: install_lvm2 LVM2.2.02.176/daemons/dmeventd/plugins/raid/.exported_symbols0000644000000000000120000000006013176752421022557 0ustar rootwheelprocess_event register_device unregister_device LVM2.2.02.176/daemons/dmeventd/plugins/raid/dmeventd_raid.c0000644000000000000120000001153313176752421022134 0ustar rootwheel/* * Copyright (C) 2005-2017 Red Hat, Inc. All rights reserved. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "lib.h" #include "defaults.h" #include "dmeventd_lvm.h" #include "libdevmapper-event.h" /* Hold enough elements for the mximum number of RAID images */ #define RAID_DEVS_ELEMS ((DEFAULT_RAID_MAX_IMAGES + 63) / 64) struct dso_state { struct dm_pool *mem; char cmd_lvconvert[512]; uint64_t raid_devs[RAID_DEVS_ELEMS]; int failed; int warned; }; DM_EVENT_LOG_FN("raid") /* FIXME Reformat to 80 char lines. */ static int _process_raid_event(struct dso_state *state, char *params, const char *device) { struct dm_status_raid *status; const char *d; int dead = 0, r = 1; uint32_t dev; if (!dm_get_status_raid(state->mem, params, &status)) { log_error("Failed to process status line for %s.", device); return 0; } d = status->dev_health; while ((d = strchr(d, 'D'))) { dev = (uint32_t)(d - status->dev_health); if (!(state->raid_devs[dev / 64] & (UINT64_C(1) << (dev % 64)))) { state->raid_devs[dev / 64] |= (UINT64_C(1) << (dev % 64)); log_warn("WARNING: Device #%u of %s array, %s, has failed.", dev, status->raid_type, device); } d++; dead = 1; } /* * if we are converting from non-RAID to RAID (e.g. linear -> raid1) * and too many original devices die, such that we cannot continue * the "recover" operation, the sync action will go to "idle", the * unsynced devs will remain at 'a', and the original devices will * NOT SWITCH TO 'D', but will remain at 'A' - hoping to be revived. * * This is simply the way the kernel works... */ if (!strcmp(status->sync_action, "idle") && (status->dev_health[0] == 'a') && (status->insync_regions < status->total_regions)) { log_error("Primary sources for new RAID, %s, have failed.", device); dead = 1; /* run it through LVM repair */ } if (dead) { if (status->insync_regions < status->total_regions) { if (!state->warned) { state->warned = 1; log_warn("WARNING: waiting for resynchronization to finish " "before initiating repair on RAID device %s.", device); } goto out; /* Not yet done syncing with accessible devices */ } if (state->failed) goto out; /* already reported */ state->failed = 1; /* if repair goes OK, report success even if lvscan has failed */ if (!dmeventd_lvm2_run_with_lock(state->cmd_lvconvert)) { log_error("Repair of RAID device %s failed.", device); r = 0; } } else { state->failed = 0; if (status->insync_regions == status->total_regions) memset(&state->raid_devs, 0, sizeof(state->raid_devs)); log_info("%s array, %s, is %s in-sync.", status->raid_type, device, (status->insync_regions == status->total_regions) ? "now" : "not"); } out: dm_pool_free(state->mem, status); return r; } void process_event(struct dm_task *dmt, enum dm_event_mask event __attribute__((unused)), void **user) { struct dso_state *state = *user; void *next = NULL; uint64_t start, length; char *target_type = NULL; char *params; const char *device = dm_task_get_name(dmt); do { next = dm_get_next_target(dmt, next, &start, &length, &target_type, ¶ms); if (!target_type) { log_info("%s mapping lost.", device); continue; } if (strcmp(target_type, "raid")) { log_info("%s has non-raid portion.", device); continue; } if (!_process_raid_event(state, params, device)) log_error("Failed to process event for %s.", device); } while (next); } int register_device(const char *device, const char *uuid __attribute__((unused)), int major __attribute__((unused)), int minor __attribute__((unused)), void **user) { struct dso_state *state; if (!dmeventd_lvm2_init_with_pool("raid_state", state)) goto_bad; if (!dmeventd_lvm2_command(state->mem, state->cmd_lvconvert, sizeof(state->cmd_lvconvert), "lvconvert --repair --use-policies", device)) goto_bad; *user = state; log_info("Monitoring RAID device %s for events.", device); return 1; bad: log_error("Failed to monitor RAID %s.", device); if (state) dmeventd_lvm2_exit_with_pool(state); return 0; } int unregister_device(const char *device, const char *uuid __attribute__((unused)), int major __attribute__((unused)), int minor __attribute__((unused)), void **user) { struct dso_state *state = *user; dmeventd_lvm2_exit_with_pool(state); log_info("No longer monitoring RAID device %s for events.", device); return 1; } LVM2.2.02.176/daemons/dmeventd/plugins/snapshot/0000755000000000000120000000000013176752421020100 5ustar rootwheelLVM2.2.02.176/daemons/dmeventd/plugins/snapshot/Makefile.in0000644000000000000120000000177213176752421022154 0ustar rootwheel# # Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved. # Copyright (C) 2004-2014 Red Hat, Inc. All rights reserved. # # This file is part of the LVM2. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA srcdir = @srcdir@ top_srcdir = @top_srcdir@ top_builddir = @top_builddir@ INCLUDES += -I$(top_srcdir)/daemons/dmeventd/plugins/lvm2 CLDFLAGS += -L$(top_builddir)/daemons/dmeventd/plugins/lvm2 SOURCES = dmeventd_snapshot.c LIB_SHARED = libdevmapper-event-lvm2snapshot.$(LIB_SUFFIX) LIB_VERSION = $(LIB_VERSION_LVM) include $(top_builddir)/make.tmpl LIBS += -ldevmapper-event-lvm2 -ldevmapper install_lvm2: install_dm_plugin install: install_lvm2 LVM2.2.02.176/daemons/dmeventd/plugins/snapshot/dmeventd_snapshot.c0000644000000000000120000001665713176752421024010 0ustar rootwheel/* * Copyright (C) 2007-2015 Red Hat, Inc. All rights reserved. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "lib.h" #include "dmeventd_lvm.h" #include "libdevmapper-event.h" #include #include #include #include /* First warning when snapshot is 80% full. */ #define WARNING_THRESH (DM_PERCENT_1 * 80) /* Run a check every 5%. */ #define CHECK_STEP (DM_PERCENT_1 * 5) /* Do not bother checking snapshots less than 50% full. */ #define CHECK_MINIMUM (DM_PERCENT_1 * 50) #define UMOUNT_COMMAND "/bin/umount" struct dso_state { struct dm_pool *mem; dm_percent_t percent_check; uint64_t known_size; char cmd_lvextend[512]; }; DM_EVENT_LOG_FN("snap") static int _run(const char *cmd, ...) { va_list ap; int argc = 1; /* for argv[0], i.e. cmd */ int i = 0; const char **argv; pid_t pid = fork(); int status; if (pid == 0) { /* child */ va_start(ap, cmd); while (va_arg(ap, const char *)) ++ argc; va_end(ap); /* + 1 for the terminating NULL */ argv = alloca(sizeof(const char *) * (argc + 1)); argv[0] = cmd; va_start(ap, cmd); while ((argv[++i] = va_arg(ap, const char *))); va_end(ap); execvp(cmd, (char **)argv); log_sys_error("exec", cmd); exit(127); } if (pid > 0) { /* parent */ if (waitpid(pid, &status, 0) != pid) return 0; /* waitpid failed */ if (!WIFEXITED(status) || WEXITSTATUS(status)) return 0; /* the child failed */ } if (pid < 0) return 0; /* fork failed */ return 1; /* all good */ } static int _extend(const char *cmd) { log_debug("Extending snapshot via %s.", cmd); return dmeventd_lvm2_run_with_lock(cmd); } #ifdef SNAPSHOT_REMOVE /* Remove invalid snapshot from dm-table */ /* Experimental for now and not used by default */ static int _remove(const char *uuid) { int r = 1; uint32_t cookie = 0; struct dm_task *dmt; if (!(dmt = dm_task_create(DM_DEVICE_REMOVE))) return 0; if (!dm_task_set_uuid(dmt, uuid)) { r = 0; goto_out; } dm_task_retry_remove(dmt); if (!dm_task_set_cookie(dmt, &cookie, 0)) { r = 0; goto_out; } if (!dm_task_run(dmt)) { r = 0; goto_out; } out: dm_task_destroy(dmt); return r; } #endif /* SNAPSHOT_REMOVE */ static void _umount(const char *device, int major, int minor) { FILE *mounts; char buffer[4096]; char *words[3]; struct stat st; const char procmounts[] = "/proc/mounts"; if (!(mounts = fopen(procmounts, "r"))) { log_sys_error("fopen", procmounts); log_error("Not umounting %s.", device); return; } while (!feof(mounts)) { /* read a line of /proc/mounts */ if (!fgets(buffer, sizeof(buffer), mounts)) break; /* eof, likely */ /* words[0] is the mount point and words[1] is the device path */ if (dm_split_words(buffer, 3, 0, words) < 2) continue; /* find the major/minor of the device */ if (stat(words[0], &st)) continue; /* can't stat, skip this one */ if (S_ISBLK(st.st_mode) && (int) major(st.st_rdev) == major && (int) minor(st.st_rdev) == minor) { log_error("Unmounting invalid snapshot %s from %s.", device, words[1]); if (!_run(UMOUNT_COMMAND, "-fl", words[1], NULL)) log_error("Failed to umount snapshot %s from %s: %s.", device, words[1], strerror(errno)); } } if (fclose(mounts)) log_sys_error("close", procmounts); } void process_event(struct dm_task *dmt, enum dm_event_mask event __attribute__((unused)), void **user) { struct dso_state *state = *user; void *next = NULL; uint64_t start, length; char *target_type = NULL; char *params; struct dm_status_snapshot *status = NULL; const char *device = dm_task_get_name(dmt); int percent; struct dm_info info; /* No longer monitoring, waiting for remove */ if (!state->percent_check) return; dm_get_next_target(dmt, next, &start, &length, &target_type, ¶ms); if (!target_type || strcmp(target_type, "snapshot")) { log_error("Target %s is not snapshot.", target_type); return; } if (!dm_get_status_snapshot(state->mem, params, &status)) { log_error("Cannot parse snapshot %s state: %s.", device, params); return; } /* * If the snapshot has been invalidated or we failed to parse * the status string. Report the full status string to syslog. */ if (status->invalid || status->overflow || !status->total_sectors) { log_warn("WARNING: Snapshot %s changed state to: %s and should be removed.", device, params); state->percent_check = 0; if (dm_task_get_info(dmt, &info)) _umount(device, info.major, info.minor); #ifdef SNAPSHOT_REMOVE /* Maybe configurable ? */ _remove(dm_task_get_uuid(dmt)); #endif pthread_kill(pthread_self(), SIGALRM); goto out; } if (length <= (status->used_sectors - status->metadata_sectors)) { /* TODO eventually recognize earlier when room is enough */ log_info("Dropping monitoring of fully provisioned snapshot %s.", device); pthread_kill(pthread_self(), SIGALRM); goto out; } /* Snapshot size had changed. Clear the threshold. */ if (state->known_size != status->total_sectors) { state->percent_check = CHECK_MINIMUM; state->known_size = status->total_sectors; } percent = dm_make_percent(status->used_sectors, status->total_sectors); if (percent >= state->percent_check) { /* Usage has raised more than CHECK_STEP since the last time. Run actions. */ state->percent_check = (percent / CHECK_STEP) * CHECK_STEP + CHECK_STEP; if (percent >= WARNING_THRESH) /* Print a warning to syslog. */ log_warn("WARNING: Snapshot %s is now %.2f%% full.", device, dm_percent_to_round_float(percent, 2)); /* Try to extend the snapshot, in accord with user-set policies */ if (!_extend(state->cmd_lvextend)) log_error("Failed to extend snapshot %s.", device); } out: dm_pool_free(state->mem, status); } int register_device(const char *device, const char *uuid __attribute__((unused)), int major __attribute__((unused)), int minor __attribute__((unused)), void **user) { struct dso_state *state; if (!dmeventd_lvm2_init_with_pool("snapshot_state", state)) goto_bad; if (!dmeventd_lvm2_command(state->mem, state->cmd_lvextend, sizeof(state->cmd_lvextend), "lvextend --use-policies", device)) goto_bad; state->percent_check = CHECK_MINIMUM; *user = state; log_info("Monitoring snapshot %s.", device); return 1; bad: log_error("Failed to monitor snapshot %s.", device); if (state) dmeventd_lvm2_exit_with_pool(state); return 0; } int unregister_device(const char *device, const char *uuid __attribute__((unused)), int major __attribute__((unused)), int minor __attribute__((unused)), void **user) { struct dso_state *state = *user; dmeventd_lvm2_exit_with_pool(state); log_info("No longer monitoring snapshot %s.", device); return 1; } LVM2.2.02.176/daemons/dmeventd/plugins/snapshot/.exported_symbols0000644000000000000120000000006013176752421023477 0ustar rootwheelprocess_event register_device unregister_device LVM2.2.02.176/daemons/dmeventd/plugins/mirror/0000755000000000000120000000000013176752421017553 5ustar rootwheelLVM2.2.02.176/daemons/dmeventd/plugins/mirror/Makefile.in0000644000000000000120000000212313176752421021616 0ustar rootwheel# # Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved. # Copyright (C) 2004-2005, 2008-2014 Red Hat, Inc. All rights reserved. # # This file is part of LVM2. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA srcdir = @srcdir@ top_srcdir = @top_srcdir@ top_builddir = @top_builddir@ INCLUDES += -I$(top_srcdir)/daemons/dmeventd/plugins/lvm2 CLDFLAGS += -L$(top_builddir)/daemons/dmeventd/plugins/lvm2 SOURCES = dmeventd_mirror.c LIB_NAME = libdevmapper-event-lvm2mirror LIB_SHARED = $(LIB_NAME).$(LIB_SUFFIX) LIB_VERSION = $(LIB_VERSION_LVM) CFLOW_LIST = $(SOURCES) CFLOW_LIST_TARGET = $(LIB_NAME).cflow include $(top_builddir)/make.tmpl LIBS += -ldevmapper-event-lvm2 -ldevmapper install_lvm2: install_dm_plugin install: install_lvm2 LVM2.2.02.176/daemons/dmeventd/plugins/mirror/dmeventd_mirror.c0000644000000000000120000001271213176752421023122 0ustar rootwheel/* * Copyright (C) 2005-2017 Red Hat, Inc. All rights reserved. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "lib.h" #include "libdevmapper-event.h" #include "dmeventd_lvm.h" #include "activate.h" /* For TARGET_NAME* */ /* FIXME Reformat to 80 char lines. */ #define ME_IGNORE 0 #define ME_INSYNC 1 #define ME_FAILURE 2 struct dso_state { struct dm_pool *mem; char cmd_lvconvert[512]; }; DM_EVENT_LOG_FN("mirr") static void _process_status_code(dm_status_mirror_health_t health, uint32_t major, uint32_t minor, const char *dev_type, int *r) { /* * A => Alive - No failures * D => Dead - A write failure occurred leaving mirror out-of-sync * F => Flush failed. * S => Sync - A sychronization failure occurred, mirror out-of-sync * R => Read - A read failure occurred, mirror data unaffected * U => Unclassified failure (bug) */ switch (health) { case DM_STATUS_MIRROR_ALIVE: return; case DM_STATUS_MIRROR_FLUSH_FAILED: log_error("%s device %u:%u flush failed.", dev_type, major, minor); *r = ME_FAILURE; break; case DM_STATUS_MIRROR_SYNC_FAILED: log_error("%s device %u:%u sync failed.", dev_type, major, minor); break; case DM_STATUS_MIRROR_READ_FAILED: log_error("%s device %u:%u read failed.", dev_type, major, minor); break; default: log_error("%s device %u:%u has failed (%c).", dev_type, major, minor, (char)health); *r = ME_FAILURE; break; } } static int _get_mirror_event(struct dso_state *state, char *params) { int r = ME_INSYNC; unsigned i; struct dm_status_mirror *ms; if (!dm_get_status_mirror(state->mem, params, &ms)) { log_error("Unable to parse mirror status string."); return ME_IGNORE; } /* Check for bad mirror devices */ for (i = 0; i < ms->dev_count; ++i) _process_status_code(ms->devs[i].health, ms->devs[i].major, ms->devs[i].minor, i ? "Secondary mirror" : "Primary mirror", &r); /* Check for bad disk log device */ for (i = 0; i < ms->log_count; ++i) _process_status_code(ms->logs[i].health, ms->logs[i].major, ms->logs[i].minor, "Log", &r); /* Ignore if not in-sync */ if ((r == ME_INSYNC) && (ms->insync_regions != ms->total_regions)) r = ME_IGNORE; dm_pool_free(state->mem, ms); return r; } static int _remove_failed_devices(const char *cmd_lvconvert, const char *device) { /* if repair goes OK, report success even if lvscan has failed */ if (!dmeventd_lvm2_run_with_lock(cmd_lvconvert)) { log_error("Repair of mirrored device %s failed.", device); return 0; } log_info("Repair of mirrored device %s finished successfully.", device); return 1; } void process_event(struct dm_task *dmt, enum dm_event_mask event __attribute__((unused)), void **user) { struct dso_state *state = *user; void *next = NULL; uint64_t start, length; char *target_type = NULL; char *params; const char *device = dm_task_get_name(dmt); do { next = dm_get_next_target(dmt, next, &start, &length, &target_type, ¶ms); if (!target_type) { log_info("%s mapping lost.", device); continue; } if (strcmp(target_type, TARGET_NAME_MIRROR)) { log_info("%s has unmirrored portion.", device); continue; } switch(_get_mirror_event(state, params)) { case ME_INSYNC: /* FIXME: all we really know is that this _part_ of the device is in sync Also, this is not an error */ log_notice("%s is now in-sync.", device); break; case ME_FAILURE: log_error("Device failure in %s.", device); if (!_remove_failed_devices(state->cmd_lvconvert, device)) /* FIXME Why are all the error return codes unused? Get rid of them? */ log_error("Failed to remove faulty devices in %s.", device); /* Should check before warning user that device is now linear else log_notice("%s is now a linear device.", device); */ break; case ME_IGNORE: break; default: /* FIXME Provide value then! */ log_warn("WARNING: %s received unknown event.", device); } } while (next); } int register_device(const char *device, const char *uuid __attribute__((unused)), int major __attribute__((unused)), int minor __attribute__((unused)), void **user) { struct dso_state *state; if (!dmeventd_lvm2_init_with_pool("mirror_state", state)) goto_bad; /* CANNOT use --config as this disables cached content */ if (!dmeventd_lvm2_command(state->mem, state->cmd_lvconvert, sizeof(state->cmd_lvconvert), "lvconvert --repair --use-policies", device)) goto_bad; *user = state; log_info("Monitoring mirror device %s for events.", device); return 1; bad: log_error("Failed to monitor mirror %s.", device); if (state) dmeventd_lvm2_exit_with_pool(state); return 0; } int unregister_device(const char *device, const char *uuid __attribute__((unused)), int major __attribute__((unused)), int minor __attribute__((unused)), void **user) { struct dso_state *state = *user; dmeventd_lvm2_exit_with_pool(state); log_info("No longer monitoring mirror device %s for events.", device); return 1; } LVM2.2.02.176/daemons/dmeventd/plugins/mirror/.exported_symbols0000644000000000000120000000006013176752421023152 0ustar rootwheelprocess_event register_device unregister_device LVM2.2.02.176/daemons/dmeventd/plugins/lvm2/0000755000000000000120000000000013176752421017121 5ustar rootwheelLVM2.2.02.176/daemons/dmeventd/plugins/lvm2/Makefile.in0000644000000000000120000000153113176752421021166 0ustar rootwheel# # Copyright (C) 2010-2014 Red Hat, Inc. All rights reserved. # # This file is part of LVM2. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA srcdir = @srcdir@ top_srcdir = @top_srcdir@ top_builddir = @top_builddir@ CLDFLAGS += -L$(top_builddir)/tools SOURCES = dmeventd_lvm.c LIB_SHARED = libdevmapper-event-lvm2.$(LIB_SUFFIX) LIB_VERSION = $(LIB_VERSION_LVM) include $(top_builddir)/make.tmpl LIBS += @LVM2CMD_LIB@ -ldevmapper $(PTHREAD_LIBS) install_lvm2: install_lib_shared install: install_lvm2 LVM2.2.02.176/daemons/dmeventd/plugins/lvm2/dmeventd_lvm.c0000644000000000000120000000737213176752421021762 0ustar rootwheel/* * Copyright (C) 2010-2015 Red Hat, Inc. All rights reserved. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "lib.h" #include "dmeventd_lvm.h" #include "libdevmapper-event.h" #include "lvm2cmd.h" #include /* * register_device() is called first and performs initialisation. * Only one device may be registered or unregistered at a time. */ static pthread_mutex_t _register_mutex = PTHREAD_MUTEX_INITIALIZER; /* * Number of active registrations. */ static int _register_count = 0; static struct dm_pool *_mem_pool = NULL; static void *_lvm_handle = NULL; DM_EVENT_LOG_FN("#lvm") static void _lvm2_print_log(int level, const char *file, int line, int dm_errno_or_class, const char *msg) { print_log(level, file, line, dm_errno_or_class, "%s", msg); } /* * Currently only one event can be processed at a time. */ static pthread_mutex_t _event_mutex = PTHREAD_MUTEX_INITIALIZER; void dmeventd_lvm2_lock(void) { pthread_mutex_lock(&_event_mutex); } void dmeventd_lvm2_unlock(void) { pthread_mutex_unlock(&_event_mutex); } int dmeventd_lvm2_init(void) { int r = 0; pthread_mutex_lock(&_register_mutex); if (!_lvm_handle) { lvm2_log_fn(_lvm2_print_log); if (!(_lvm_handle = lvm2_init())) goto out; /* * Need some space for allocations. 1024 should be more * than enough for what we need (device mapper name splitting) */ if (!_mem_pool && !(_mem_pool = dm_pool_create("mirror_dso", 1024))) { lvm2_exit(_lvm_handle); _lvm_handle = NULL; goto out; } lvm2_disable_dmeventd_monitoring(_lvm_handle); /* FIXME Temporary: move to dmeventd core */ lvm2_run(_lvm_handle, "_memlock_inc"); log_debug("lvm plugin initilized."); } _register_count++; r = 1; out: pthread_mutex_unlock(&_register_mutex); return r; } void dmeventd_lvm2_exit(void) { pthread_mutex_lock(&_register_mutex); if (!--_register_count) { log_debug("lvm plugin shuting down."); lvm2_run(_lvm_handle, "_memlock_dec"); dm_pool_destroy(_mem_pool); _mem_pool = NULL; lvm2_exit(_lvm_handle); _lvm_handle = NULL; log_debug("lvm plugin exited."); } pthread_mutex_unlock(&_register_mutex); } struct dm_pool *dmeventd_lvm2_pool(void) { return _mem_pool; } int dmeventd_lvm2_run(const char *cmdline) { return (lvm2_run(_lvm_handle, cmdline) == LVM2_COMMAND_SUCCEEDED); } int dmeventd_lvm2_command(struct dm_pool *mem, char *buffer, size_t size, const char *cmd, const char *device) { static char _internal_prefix[] = "_dmeventd_"; char *vg = NULL, *lv = NULL, *layer; int r; if (!dm_split_lvm_name(mem, device, &vg, &lv, &layer)) { log_error("Unable to determine VG name from %s.", device); return 0; } /* strip off the mirror component designations */ if ((layer = strstr(lv, "_mimagetmp")) || (layer = strstr(lv, "_mlog"))) *layer = '\0'; if (!strncmp(cmd, _internal_prefix, sizeof(_internal_prefix) - 1)) { dmeventd_lvm2_lock(); /* output of internal command passed via env var */ if (!dmeventd_lvm2_run(cmd)) cmd = NULL; else if ((cmd = getenv(cmd))) cmd = dm_pool_strdup(mem, cmd); /* copy with lock */ dmeventd_lvm2_unlock(); if (!cmd) { log_error("Unable to find configured command."); return 0; } } r = dm_snprintf(buffer, size, "%s %s/%s", cmd, vg, lv); dm_pool_free(mem, vg); if (r < 0) { log_error("Unable to form LVM command. (too long)."); return 0; } return 1; } LVM2.2.02.176/daemons/dmeventd/plugins/lvm2/.exported_symbols0000644000000000000120000000021113176752421022516 0ustar rootwheeldmeventd_lvm2_init dmeventd_lvm2_exit dmeventd_lvm2_lock dmeventd_lvm2_unlock dmeventd_lvm2_pool dmeventd_lvm2_run dmeventd_lvm2_command LVM2.2.02.176/daemons/dmeventd/plugins/lvm2/dmeventd_lvm.h0000644000000000000120000000360413176752421021761 0ustar rootwheel/* * Copyright (C) 2010-2015 Red Hat, Inc. All rights reserved. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /* * Wrappers around liblvm2cmd functions for dmeventd plug-ins. * * liblvm2cmd is not thread-safe so the locking in this library helps dmeventd * threads to co-operate in sharing a single instance. * * FIXME Either support this properly as a generic liblvm2cmd wrapper or make * liblvm2cmd thread-safe so this can go away. */ #ifndef _DMEVENTD_LVMWRAP_H #define _DMEVENTD_LVMWRAP_H struct dm_pool; int dmeventd_lvm2_init(void); void dmeventd_lvm2_exit(void); int dmeventd_lvm2_run(const char *cmdline); void dmeventd_lvm2_lock(void); void dmeventd_lvm2_unlock(void); struct dm_pool *dmeventd_lvm2_pool(void); int dmeventd_lvm2_command(struct dm_pool *mem, char *buffer, size_t size, const char *cmd, const char *device); #define dmeventd_lvm2_run_with_lock(cmdline) \ ({\ int rc;\ dmeventd_lvm2_lock();\ rc = dmeventd_lvm2_run(cmdline);\ dmeventd_lvm2_unlock();\ rc;\ }) #define dmeventd_lvm2_init_with_pool(name, st) \ ({\ struct dm_pool *mem;\ st = NULL;\ if (dmeventd_lvm2_init()) {\ if ((mem = dm_pool_create(name, 2048)) &&\ (st = dm_pool_zalloc(mem, sizeof(*st))))\ st->mem = mem;\ else {\ if (mem)\ dm_pool_destroy(mem);\ dmeventd_lvm2_exit();\ }\ }\ st;\ }) #define dmeventd_lvm2_exit_with_pool(pool) \ do {\ dm_pool_destroy(pool->mem);\ dmeventd_lvm2_exit();\ } while(0) #endif /* _DMEVENTD_LVMWRAP_H */ LVM2.2.02.176/daemons/dmeventd/libdevmapper-event.c0000644000000000000120000005620313176752421020523 0ustar rootwheel/* * Copyright (C) 2005-2015 Red Hat, Inc. All rights reserved. * * This file is part of the device-mapper userspace tools. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "dm-logging.h" #include "dmlib.h" #include "libdevmapper-event.h" #include "dmeventd.h" #include #include #include #include #include #include /* for htonl, ntohl */ #include #include static int _debug_level = 0; static int _use_syslog = 0; static int _sequence_nr = 0; struct dm_event_handler { char *dso; char *dmeventd_path; char *dev_name; char *uuid; int major; int minor; uint32_t timeout; enum dm_event_mask mask; }; static void _dm_event_handler_clear_dev_info(struct dm_event_handler *dmevh) { dm_free(dmevh->dev_name); dm_free(dmevh->uuid); dmevh->dev_name = dmevh->uuid = NULL; dmevh->major = dmevh->minor = 0; } struct dm_event_handler *dm_event_handler_create(void) { struct dm_event_handler *dmevh; if (!(dmevh = dm_zalloc(sizeof(*dmevh)))) { log_error("Failed to allocate event handler."); return NULL; } return dmevh; } void dm_event_handler_destroy(struct dm_event_handler *dmevh) { _dm_event_handler_clear_dev_info(dmevh); dm_free(dmevh->dso); dm_free(dmevh->dmeventd_path); dm_free(dmevh); } int dm_event_handler_set_dmeventd_path(struct dm_event_handler *dmevh, const char *dmeventd_path) { if (!dmeventd_path) /* noop */ return 0; dm_free(dmevh->dmeventd_path); if (!(dmevh->dmeventd_path = dm_strdup(dmeventd_path))) return -ENOMEM; return 0; } int dm_event_handler_set_dso(struct dm_event_handler *dmevh, const char *path) { if (!path) /* noop */ return 0; dm_free(dmevh->dso); if (!(dmevh->dso = dm_strdup(path))) return -ENOMEM; return 0; } int dm_event_handler_set_dev_name(struct dm_event_handler *dmevh, const char *dev_name) { if (!dev_name) return 0; _dm_event_handler_clear_dev_info(dmevh); if (!(dmevh->dev_name = dm_strdup(dev_name))) return -ENOMEM; return 0; } int dm_event_handler_set_uuid(struct dm_event_handler *dmevh, const char *uuid) { if (!uuid) return 0; _dm_event_handler_clear_dev_info(dmevh); if (!(dmevh->uuid = dm_strdup(uuid))) return -ENOMEM; return 0; } void dm_event_handler_set_major(struct dm_event_handler *dmevh, int major) { int minor = dmevh->minor; _dm_event_handler_clear_dev_info(dmevh); dmevh->major = major; dmevh->minor = minor; } void dm_event_handler_set_minor(struct dm_event_handler *dmevh, int minor) { int major = dmevh->major; _dm_event_handler_clear_dev_info(dmevh); dmevh->major = major; dmevh->minor = minor; } void dm_event_handler_set_event_mask(struct dm_event_handler *dmevh, enum dm_event_mask evmask) { dmevh->mask = evmask; } void dm_event_handler_set_timeout(struct dm_event_handler *dmevh, int timeout) { dmevh->timeout = timeout; } const char *dm_event_handler_get_dso(const struct dm_event_handler *dmevh) { return dmevh->dso; } const char *dm_event_handler_get_dev_name(const struct dm_event_handler *dmevh) { return dmevh->dev_name; } const char *dm_event_handler_get_uuid(const struct dm_event_handler *dmevh) { return dmevh->uuid; } int dm_event_handler_get_major(const struct dm_event_handler *dmevh) { return dmevh->major; } int dm_event_handler_get_minor(const struct dm_event_handler *dmevh) { return dmevh->minor; } int dm_event_handler_get_timeout(const struct dm_event_handler *dmevh) { return dmevh->timeout; } enum dm_event_mask dm_event_handler_get_event_mask(const struct dm_event_handler *dmevh) { return dmevh->mask; } static int _check_message_id(struct dm_event_daemon_message *msg) { int pid, seq_nr; if ((sscanf(msg->data, "%d:%d", &pid, &seq_nr) != 2) || (pid != getpid()) || (seq_nr != _sequence_nr)) { log_error("Ignoring out-of-sequence reply from dmeventd. " "Expected %d:%d but received %s.", getpid(), _sequence_nr, msg->data); return 0; } return 1; } /* * daemon_read * @fifos * @msg * * Read message from daemon. * * Returns: 0 on failure, 1 on success */ static int _daemon_read(struct dm_event_fifos *fifos, struct dm_event_daemon_message *msg) { unsigned bytes = 0; int ret, i; fd_set fds; size_t size = 2 * sizeof(uint32_t); /* status + size */ uint32_t *header = alloca(size); char *buf = (char *)header; while (bytes < size) { for (i = 0, ret = 0; (i < 20) && (ret < 1); i++) { /* Watch daemon read FIFO for input. */ struct timeval tval = { .tv_sec = 1 }; FD_ZERO(&fds); FD_SET(fifos->server, &fds); ret = select(fifos->server + 1, &fds, NULL, NULL, &tval); if (ret < 0 && errno != EINTR) { log_error("Unable to read from event server."); return 0; } if ((ret == 0) && (i > 4) && !bytes) { log_error("No input from event server."); return 0; } } if (ret < 1) { log_error("Unable to read from event server."); return 0; } ret = read(fifos->server, buf + bytes, size); if (ret < 0) { if ((errno == EINTR) || (errno == EAGAIN)) continue; log_error("Unable to read from event server."); return 0; } bytes += ret; if (header && (bytes == 2 * sizeof(uint32_t))) { msg->cmd = ntohl(header[0]); msg->size = ntohl(header[1]); buf = msg->data = dm_malloc(msg->size); size = msg->size; bytes = 0; header = 0; } } if (bytes != size) { dm_free(msg->data); msg->data = NULL; } return bytes == size; } /* Write message to daemon. */ static int _daemon_write(struct dm_event_fifos *fifos, struct dm_event_daemon_message *msg) { int ret; fd_set fds; size_t bytes = 0; size_t size = 2 * sizeof(uint32_t) + msg->size; uint32_t *header = alloca(size); char *buf = (char *)header; char drainbuf[128]; header[0] = htonl(msg->cmd); header[1] = htonl(msg->size); memcpy(buf + 2 * sizeof(uint32_t), msg->data, msg->size); /* drain the answer fifo */ while (1) { struct timeval tval = { .tv_usec = 100 }; FD_ZERO(&fds); FD_SET(fifos->server, &fds); ret = select(fifos->server + 1, &fds, NULL, NULL, &tval); if (ret < 0) { if (errno == EINTR) continue; log_error("Unable to talk to event daemon."); return 0; } if (ret == 0) break; ret = read(fifos->server, drainbuf, sizeof(drainbuf)); if (ret < 0) { if ((errno == EINTR) || (errno == EAGAIN)) continue; log_error("Unable to talk to event daemon."); return 0; } } while (bytes < size) { do { /* Watch daemon write FIFO to be ready for output. */ FD_ZERO(&fds); FD_SET(fifos->client, &fds); ret = select(fifos->client + 1, NULL, &fds, NULL, NULL); if ((ret < 0) && (errno != EINTR)) { log_error("Unable to talk to event daemon."); return 0; } } while (ret < 1); ret = write(fifos->client, buf + bytes, size - bytes); if (ret < 0) { if ((errno == EINTR) || (errno == EAGAIN)) continue; log_error("Unable to talk to event daemon."); return 0; } bytes += ret; } return bytes == size; } int daemon_talk(struct dm_event_fifos *fifos, struct dm_event_daemon_message *msg, int cmd, const char *dso_name, const char *dev_name, enum dm_event_mask evmask, uint32_t timeout) { int msg_size; memset(msg, 0, sizeof(*msg)); /* * Set command and pack the arguments * into ASCII message string. */ if ((msg_size = ((cmd == DM_EVENT_CMD_HELLO) ? dm_asprintf(&(msg->data), "%d:%d HELLO", getpid(), _sequence_nr) : dm_asprintf(&(msg->data), "%d:%d %s %s %u %" PRIu32, getpid(), _sequence_nr, dso_name ? : "-", dev_name ? : "-", evmask, timeout))) < 0) { log_error("_daemon_talk: message allocation failed."); return -ENOMEM; } msg->cmd = cmd; msg->size = msg_size; /* * Write command and message to and * read status return code from daemon. */ if (!_daemon_write(fifos, msg)) { stack; dm_free(msg->data); msg->data = NULL; return -EIO; } do { dm_free(msg->data); msg->data = NULL; if (!_daemon_read(fifos, msg)) { stack; return -EIO; } } while (!_check_message_id(msg)); _sequence_nr++; return (int32_t) msg->cmd; } /* * start_daemon * * This function forks off a process (dmeventd) that will handle * the events. I am currently test opening one of the fifos to * ensure that the daemon is running and listening... I thought * this would be less expensive than fork/exec'ing every time. * Perhaps there is an even quicker/better way (no, checking the * lock file is _not_ a better way). * * Returns: 1 on success, 0 otherwise */ static int _start_daemon(char *dmeventd_path, struct dm_event_fifos *fifos) { int pid, ret = 0; int status; struct stat statbuf; char default_dmeventd_path[] = DMEVENTD_PATH; char *args[] = { dmeventd_path ? : default_dmeventd_path, NULL }; /* * FIXME Explicitly verify the code's requirement that client_path is secure: * - All parent directories owned by root without group/other write access unless sticky. */ /* If client fifo path exists, only use it if it is root-owned fifo mode 0600 */ if ((lstat(fifos->client_path, &statbuf) < 0)) { if (errno == ENOENT) /* Jump ahead if fifo does not already exist. */ goto start_server; else { log_sys_error("stat", fifos->client_path); return 0; } } else if (!S_ISFIFO(statbuf.st_mode)) { log_error("%s must be a fifo.", fifos->client_path); return 0; } else if (statbuf.st_uid) { log_error("%s must be owned by uid 0.", fifos->client_path); return 0; } else if (statbuf.st_mode & (S_IEXEC | S_IRWXG | S_IRWXO)) { log_error("%s must have mode 0600.", fifos->client_path); return 0; } /* Anyone listening? If not, errno will be ENXIO */ fifos->client = open(fifos->client_path, O_WRONLY | O_NONBLOCK); if (fifos->client >= 0) { /* Should never happen if all the above checks passed. */ if ((fstat(fifos->client, &statbuf) < 0) || !S_ISFIFO(statbuf.st_mode) || statbuf.st_uid || (statbuf.st_mode & (S_IEXEC | S_IRWXG | S_IRWXO))) { log_error("%s is no longer a secure root-owned fifo with mode 0600.", fifos->client_path); if (close(fifos->client)) log_sys_debug("close", fifos->client_path); return 0; } /* server is running and listening */ if (close(fifos->client)) log_sys_debug("close", fifos->client_path); return 1; } if (errno != ENXIO && errno != ENOENT) { /* problem */ log_sys_error("open", fifos->client_path); return 0; } start_server: /* server is not running */ if ((args[0][0] == '/') && stat(args[0], &statbuf)) { log_sys_error("stat", args[0]); return 0; } pid = fork(); if (pid < 0) log_sys_error("fork", ""); else if (!pid) { execvp(args[0], args); log_error("Unable to exec dmeventd: %s.", strerror(errno)); _exit(EXIT_FAILURE); } else { if (waitpid(pid, &status, 0) < 0) log_error("Unable to start dmeventd: %s.", strerror(errno)); else if (WEXITSTATUS(status)) log_error("Unable to start dmeventd."); else ret = 1; } return ret; } int init_fifos(struct dm_event_fifos *fifos) { /* FIXME? Is fifo the most suitable method? Why not share comms/daemon code with something else e.g. multipath? */ /* Open the fifo used to read from the daemon. */ if ((fifos->server = open(fifos->server_path, O_RDWR)) < 0) { log_sys_error("open", fifos->server_path); return 0; } /* Lock out anyone else trying to do communication with the daemon. */ if (flock(fifos->server, LOCK_EX) < 0) { log_sys_error("flock", fifos->server_path); goto bad; } /* if ((fifos->client = open(fifos->client_path, O_WRONLY | O_NONBLOCK)) < 0) {*/ if ((fifos->client = open(fifos->client_path, O_RDWR | O_NONBLOCK)) < 0) { log_sys_error("open", fifos->client_path); goto bad; } return 1; bad: if (close(fifos->server)) log_sys_debug("close", fifos->server_path); fifos->server = -1; return 0; } /* Initialize client. */ static int _init_client(char *dmeventd_path, struct dm_event_fifos *fifos) { if (!_start_daemon(dmeventd_path, fifos)) return_0; return init_fifos(fifos); } void fini_fifos(struct dm_event_fifos *fifos) { if (fifos->client >= 0 && close(fifos->client)) log_sys_debug("close", fifos->client_path); if (fifos->server >= 0) { if (flock(fifos->server, LOCK_UN)) log_sys_debug("flock unlock", fifos->server_path); if (close(fifos->server)) log_sys_debug("close", fifos->server_path); } } /* Get uuid of a device */ static struct dm_task *_get_device_info(const struct dm_event_handler *dmevh) { struct dm_task *dmt; struct dm_info info; if (!(dmt = dm_task_create(DM_DEVICE_INFO))) { log_error("_get_device_info: dm_task creation for info failed."); return NULL; } if (dmevh->uuid) { if (!dm_task_set_uuid(dmt, dmevh->uuid)) goto_bad; } else if (dmevh->dev_name) { if (!dm_task_set_name(dmt, dmevh->dev_name)) goto_bad; } else if (dmevh->major && dmevh->minor) { if (!dm_task_set_major(dmt, dmevh->major) || !dm_task_set_minor(dmt, dmevh->minor)) goto_bad; } /* FIXME Add name or uuid or devno to messages */ if (!dm_task_run(dmt)) { log_error("_get_device_info: dm_task_run() failed."); goto bad; } if (!dm_task_get_info(dmt, &info)) { log_error("_get_device_info: failed to get info for device."); goto bad; } if (!info.exists) { log_error("_get_device_info: %s%s%s%.0d%s%.0d%s%s: device not found.", dmevh->uuid ? : "", (!dmevh->uuid && dmevh->dev_name) ? dmevh->dev_name : "", (!dmevh->uuid && !dmevh->dev_name && dmevh->major > 0) ? "(" : "", (!dmevh->uuid && !dmevh->dev_name && dmevh->major > 0) ? dmevh->major : 0, (!dmevh->uuid && !dmevh->dev_name && dmevh->major > 0) ? ":" : "", (!dmevh->uuid && !dmevh->dev_name && dmevh->minor > 0) ? dmevh->minor : 0, (!dmevh->uuid && !dmevh->dev_name && dmevh->major > 0) && dmevh->minor == 0 ? "0" : "", (!dmevh->uuid && !dmevh->dev_name && dmevh->major > 0) ? ") " : ""); goto bad; } return dmt; bad: dm_task_destroy(dmt); return NULL; } /* Handle the event (de)registration call and return negative error codes. */ static int _do_event(int cmd, char *dmeventd_path, struct dm_event_daemon_message *msg, const char *dso_name, const char *dev_name, enum dm_event_mask evmask, uint32_t timeout) { int ret; struct dm_event_fifos fifos = { .server = -1, .client = -1, /* FIXME Make these either configurable or depend directly on dmeventd_path */ .client_path = DM_EVENT_FIFO_CLIENT, .server_path = DM_EVENT_FIFO_SERVER }; if (!_init_client(dmeventd_path, &fifos)) { ret = -ESRCH; goto_out; } ret = daemon_talk(&fifos, msg, DM_EVENT_CMD_HELLO, NULL, NULL, 0, 0); dm_free(msg->data); msg->data = 0; if (!ret) ret = daemon_talk(&fifos, msg, cmd, dso_name, dev_name, evmask, timeout); out: /* what is the opposite of init? */ fini_fifos(&fifos); return ret; } /* External library interface. */ int dm_event_register_handler(const struct dm_event_handler *dmevh) { int ret = 1, err; const char *uuid; struct dm_task *dmt; struct dm_event_daemon_message msg = { 0 }; if (!(dmt = _get_device_info(dmevh))) return_0; uuid = dm_task_get_uuid(dmt); if (!strstr(dmevh->dso, "libdevmapper-event-lvm2thin.so") && !strstr(dmevh->dso, "libdevmapper-event-lvm2snapshot.so") && !strstr(dmevh->dso, "libdevmapper-event-lvm2mirror.so") && !strstr(dmevh->dso, "libdevmapper-event-lvm2raid.so")) log_warn("WARNING: %s: dmeventd plugins are deprecated.", dmevh->dso); if ((err = _do_event(DM_EVENT_CMD_REGISTER_FOR_EVENT, dmevh->dmeventd_path, &msg, dmevh->dso, uuid, dmevh->mask, dmevh->timeout)) < 0) { log_error("%s: event registration failed: %s.", dm_task_get_name(dmt), msg.data ? msg.data : strerror(-err)); ret = 0; } dm_free(msg.data); dm_task_destroy(dmt); return ret; } int dm_event_unregister_handler(const struct dm_event_handler *dmevh) { int ret = 1, err; const char *uuid; struct dm_task *dmt; struct dm_event_daemon_message msg = { 0 }; if (!(dmt = _get_device_info(dmevh))) return_0; uuid = dm_task_get_uuid(dmt); if ((err = _do_event(DM_EVENT_CMD_UNREGISTER_FOR_EVENT, dmevh->dmeventd_path, &msg, dmevh->dso, uuid, dmevh->mask, dmevh->timeout)) < 0) { log_error("%s: event deregistration failed: %s.", dm_task_get_name(dmt), msg.data ? msg.data : strerror(-err)); ret = 0; } dm_free(msg.data); dm_task_destroy(dmt); return ret; } /* Fetch a string off src and duplicate it into *dest. */ /* FIXME: move to separate module to share with the daemon. */ static char *_fetch_string(char **src, const int delimiter) { char *p, *ret; if ((p = strchr(*src, delimiter))) *p = 0; if ((ret = dm_strdup(*src))) *src += strlen(ret) + 1; if (p) *p = delimiter; return ret; } /* Parse a device message from the daemon. */ static int _parse_message(struct dm_event_daemon_message *msg, char **dso_name, char **uuid, enum dm_event_mask *evmask) { char *id; char *p = msg->data; if ((id = _fetch_string(&p, ' ')) && (*dso_name = _fetch_string(&p, ' ')) && (*uuid = _fetch_string(&p, ' '))) { *evmask = atoi(p); dm_free(id); return 0; } dm_free(id); return -ENOMEM; } /* * Returns 0 if handler found; error (-ENOMEM, -ENOENT) otherwise. */ int dm_event_get_registered_device(struct dm_event_handler *dmevh, int next) { int ret = 0; const char *uuid = NULL; char *reply_dso = NULL, *reply_uuid = NULL; enum dm_event_mask reply_mask = 0; struct dm_task *dmt = NULL; struct dm_event_daemon_message msg = { 0 }; struct dm_info info; if (!(dmt = _get_device_info(dmevh))) { log_debug("Device does not exists (uuid=%s, name=%s, %d:%d).", dmevh->uuid, dmevh->dev_name, dmevh->major, dmevh->minor); ret = -ENODEV; goto fail; } uuid = dm_task_get_uuid(dmt); /* FIXME Distinguish errors connecting to daemon */ if (_do_event(next ? DM_EVENT_CMD_GET_NEXT_REGISTERED_DEVICE : DM_EVENT_CMD_GET_REGISTERED_DEVICE, dmevh->dmeventd_path, &msg, dmevh->dso, uuid, dmevh->mask, 0)) { log_debug("%s: device not registered.", dm_task_get_name(dmt)); ret = -ENOENT; goto fail; } /* FIXME this will probably horribly break if we get ill-formatted reply */ ret = _parse_message(&msg, &reply_dso, &reply_uuid, &reply_mask); dm_task_destroy(dmt); dmt = NULL; dm_free(msg.data); msg.data = NULL; _dm_event_handler_clear_dev_info(dmevh); if (!reply_uuid) { ret = -ENXIO; /* dmeventd probably gave us bogus uuid back */ goto fail; } if (!(dmevh->uuid = dm_strdup(reply_uuid))) { ret = -ENOMEM; goto fail; } if (!(dmt = _get_device_info(dmevh))) { ret = -ENXIO; /* dmeventd probably gave us bogus uuid back */ goto fail; } dm_event_handler_set_dso(dmevh, reply_dso); dm_event_handler_set_event_mask(dmevh, reply_mask); dm_free(reply_dso); reply_dso = NULL; dm_free(reply_uuid); reply_uuid = NULL; if (!(dmevh->dev_name = dm_strdup(dm_task_get_name(dmt)))) { ret = -ENOMEM; goto fail; } if (!dm_task_get_info(dmt, &info)) { ret = -1; goto fail; } dmevh->major = info.major; dmevh->minor = info.minor; dm_task_destroy(dmt); return ret; fail: dm_free(msg.data); dm_free(reply_dso); dm_free(reply_uuid); _dm_event_handler_clear_dev_info(dmevh); if (dmt) dm_task_destroy(dmt); return ret; } /* * You can (and have to) call this at the stage of the protocol where * daemon_talk(fifos, &msg, DM_EVENT_CMD_HELLO, NULL, NULL, 0, 0) * * would be normally sent. This call will parse the version reply from * dmeventd, in addition to above call. It is not safe to call this at any * other place in the protocol. * * This is an internal function, not exposed in the public API. */ int dm_event_get_version(struct dm_event_fifos *fifos, int *version) { char *p; struct dm_event_daemon_message msg = { 0 }; if (daemon_talk(fifos, &msg, DM_EVENT_CMD_HELLO, NULL, NULL, 0, 0)) return 0; p = msg.data; *version = 0; if (!p || !(p = strchr(p, ' '))) /* Message ID */ return 0; if (!(p = strchr(p + 1, ' '))) /* HELLO */ return 0; if ((p = strchr(p + 1, ' '))) /* HELLO, once more */ *version = atoi(p); return 1; } void dm_event_log_set(int debug_log_level, int use_syslog) { _debug_level = debug_log_level; _use_syslog = use_syslog; } void dm_event_log(const char *subsys, int level, const char *file, int line, int dm_errno_or_class, const char *format, va_list ap) { static int _abort_on_internal_errors = -1; static pthread_mutex_t _log_mutex = PTHREAD_MUTEX_INITIALIZER; static time_t start = 0; const char *indent = ""; FILE *stream = log_stderr(level) ? stderr : stdout; int prio; time_t now; int log_with_debug = 0; if (subsys[0] == '#') { /* Subsystems starting with '#' are logged * only when debugging is enabled. */ log_with_debug++; subsys++; } switch (log_level(level)) { case _LOG_DEBUG: /* Never shown without -ddd */ if (_debug_level < 3) return; prio = LOG_DEBUG; indent = " "; break; case _LOG_INFO: if (log_with_debug && _debug_level < 2) return; prio = LOG_INFO; indent = " "; break; case _LOG_NOTICE: if (log_with_debug && _debug_level < 1) return; prio = LOG_NOTICE; indent = " "; break; case _LOG_WARN: prio = LOG_WARNING; break; case _LOG_ERR: prio = LOG_ERR; stream = stderr; break; default: prio = LOG_CRIT; } /* Serialize to keep lines readable */ pthread_mutex_lock(&_log_mutex); if (_use_syslog) { vsyslog(prio, format, ap); } else { now = time(NULL); if (!start) start = now; now -= start; if (_debug_level) fprintf(stream, "[%2d:%02d] %8x:%-6s%s", (int)now / 60, (int)now % 60, // TODO: Maybe use shorter ID // ((int)(pthread_self()) >> 6) & 0xffff, (int)pthread_self(), subsys, (_debug_level > 3) ? "" : indent); if (_debug_level > 3) fprintf(stream, "%28s:%4d %s", file, line, indent); vfprintf(stream, _(format), ap); fputc('\n', stream); fflush(stream); } pthread_mutex_unlock(&_log_mutex); if (_abort_on_internal_errors < 0) /* Set when env DM_ABORT_ON_INTERNAL_ERRORS is not "0" */ _abort_on_internal_errors = strcmp(getenv("DM_ABORT_ON_INTERNAL_ERRORS") ? : "0", "0"); if (_abort_on_internal_errors && !strncmp(format, INTERNAL_ERROR, sizeof(INTERNAL_ERROR) - 1)) abort(); } #if 0 /* left out for now */ static char *_skip_string(char *src, const int delimiter) { src = srtchr(src, delimiter); if (src && *(src + 1)) return src + 1; return NULL; } int dm_event_set_timeout(const char *device_path, uint32_t timeout) { struct dm_event_daemon_message msg = { 0 }; if (!device_exists(device_path)) return -ENODEV; return _do_event(DM_EVENT_CMD_SET_TIMEOUT, &msg, NULL, device_path, 0, timeout); } int dm_event_get_timeout(const char *device_path, uint32_t *timeout) { int ret; struct dm_event_daemon_message msg = { 0 }; if (!device_exists(device_path)) return -ENODEV; if (!(ret = _do_event(DM_EVENT_CMD_GET_TIMEOUT, &msg, NULL, device_path, 0, 0))) { char *p = _skip_string(msg.data, ' '); if (!p) { log_error("Malformed reply from dmeventd '%s'.", msg.data); dm_free(msg.data); return -EIO; } *timeout = atoi(p); } dm_free(msg.data); return ret; } #endif LVM2.2.02.176/daemons/dmeventd/dmeventd.h0000644000000000000120000000460713176752421016546 0ustar rootwheel/* * Copyright (C) 2005-2007 Red Hat, Inc. All rights reserved. * * This file is part of the device-mapper userspace tools. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef __DMEVENTD_DOT_H__ #define __DMEVENTD_DOT_H__ /* FIXME This stuff must be configurable. */ #define DM_EVENT_FIFO_CLIENT DEFAULT_DM_RUN_DIR "/dmeventd-client" #define DM_EVENT_FIFO_SERVER DEFAULT_DM_RUN_DIR "/dmeventd-server" #define DM_EVENT_DEFAULT_TIMEOUT 10 /* Commands for the daemon passed in the message below. */ enum dm_event_command { DM_EVENT_CMD_ACTIVE = 1, DM_EVENT_CMD_REGISTER_FOR_EVENT, DM_EVENT_CMD_UNREGISTER_FOR_EVENT, DM_EVENT_CMD_GET_REGISTERED_DEVICE, DM_EVENT_CMD_GET_NEXT_REGISTERED_DEVICE, DM_EVENT_CMD_SET_TIMEOUT, DM_EVENT_CMD_GET_TIMEOUT, DM_EVENT_CMD_HELLO, DM_EVENT_CMD_DIE, DM_EVENT_CMD_GET_STATUS, DM_EVENT_CMD_GET_PARAMETERS, }; /* Message passed between client and daemon. */ struct dm_event_daemon_message { uint32_t cmd; uint32_t size; char *data; }; /* FIXME Is this meant to be exported? I can't see where the interface uses it. */ /* Fifos for client/daemon communication. */ struct dm_event_fifos { int client; int server; const char *client_path; const char *server_path; }; /* EXIT_SUCCESS 0 -- stdlib.h */ /* EXIT_FAILURE 1 -- stdlib.h */ /* EXIT_LOCKFILE_INUSE 2 -- obsoleted */ #define EXIT_DESC_CLOSE_FAILURE 3 #define EXIT_DESC_OPEN_FAILURE 4 /* EXIT_OPEN_PID_FAILURE 5 -- obsoleted */ #define EXIT_FIFO_FAILURE 6 #define EXIT_CHDIR_FAILURE 7 /* Implemented in libdevmapper-event.c, but not part of public API. */ // FIXME misuse of bitmask as enum int daemon_talk(struct dm_event_fifos *fifos, struct dm_event_daemon_message *msg, int cmd, const char *dso_name, const char *dev_name, enum dm_event_mask evmask, uint32_t timeout); int init_fifos(struct dm_event_fifos *fifos); void fini_fifos(struct dm_event_fifos *fifos); int dm_event_get_version(struct dm_event_fifos *fifos, int *version); #endif /* __DMEVENTD_DOT_H__ */ LVM2.2.02.176/daemons/dmeventd/libdevmapper-event.pc.in0000644000000000000120000000040713176752421021303 0ustar rootwheelprefix=@prefix@ exec_prefix=@exec_prefix@ libdir=@libdir@ includedir=@includedir@ Name: devmapper-event Description: device-mapper event library Version: @DM_LIB_PATCHLEVEL@ Cflags: -I${includedir} Libs: -L${libdir} -ldevmapper-event Requires.private: devmapper LVM2.2.02.176/daemons/dmeventd/.exported_symbols0000644000000000000120000000006713176752421020166 0ustar rootwheelinit_fifos fini_fifos daemon_talk dm_event_get_version LVM2.2.02.176/daemons/dmeventd/libdevmapper-event.h0000644000000000000120000001167113176752421020530 0ustar rootwheel/* * Copyright (C) 2005-2015 Red Hat, Inc. All rights reserved. * * This file is part of the device-mapper userspace tools. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /* * Note that this file is released only as part of a technology preview * and its contents may change in future updates in ways that do not * preserve compatibility. */ #ifndef LIB_DMEVENT_H #define LIB_DMEVENT_H #include /* * Event library interface. */ enum dm_event_mask { DM_EVENT_SETTINGS_MASK = 0x0000FF, DM_EVENT_SINGLE = 0x000001, /* Report multiple errors just once. */ DM_EVENT_MULTI = 0x000002, /* Report all of them. */ DM_EVENT_ERROR_MASK = 0x00FF00, DM_EVENT_SECTOR_ERROR = 0x000100, /* Failure on a particular sector. */ DM_EVENT_DEVICE_ERROR = 0x000200, /* Device failure. */ DM_EVENT_PATH_ERROR = 0x000400, /* Failure on an io path. */ DM_EVENT_ADAPTOR_ERROR = 0x000800, /* Failure of a host adaptor. */ DM_EVENT_STATUS_MASK = 0xFF0000, DM_EVENT_SYNC_STATUS = 0x010000, /* Mirror synchronization completed/failed. */ DM_EVENT_TIMEOUT = 0x020000, /* Timeout has occured */ DM_EVENT_REGISTRATION_PENDING = 0x1000000, /* Monitor thread is setting-up/shutting-down */ }; #define DM_EVENT_ALL_ERRORS DM_EVENT_ERROR_MASK #define DM_EVENT_PROTOCOL_VERSION 2 struct dm_task; struct dm_event_handler; struct dm_event_handler *dm_event_handler_create(void); void dm_event_handler_destroy(struct dm_event_handler *dmevh); /* * Path of shared library to handle events. * * All of dmeventd, dso, device_name and uuid strings are duplicated so * you do not need to keep the pointers valid after the call succeeds. * They may return -ENOMEM though. */ int dm_event_handler_set_dso(struct dm_event_handler *dmevh, const char *path); /* * Path of dmeventd binary. */ int dm_event_handler_set_dmeventd_path(struct dm_event_handler *dmevh, const char *dmeventd_path); /* * Identify the device to monitor by exactly one of device_name, uuid or * device number. String arguments are duplicated, see above. */ int dm_event_handler_set_dev_name(struct dm_event_handler *dmevh, const char *device_name); int dm_event_handler_set_uuid(struct dm_event_handler *dmevh, const char *uuid); void dm_event_handler_set_major(struct dm_event_handler *dmevh, int major); void dm_event_handler_set_minor(struct dm_event_handler *dmevh, int minor); void dm_event_handler_set_timeout(struct dm_event_handler *dmevh, int timeout); /* * Specify mask for events to monitor. */ // FIXME misuse of bitmask as enum void dm_event_handler_set_event_mask(struct dm_event_handler *dmevh, enum dm_event_mask evmask); const char *dm_event_handler_get_dso(const struct dm_event_handler *dmevh); const char *dm_event_handler_get_dev_name(const struct dm_event_handler *dmevh); const char *dm_event_handler_get_uuid(const struct dm_event_handler *dmevh); int dm_event_handler_get_major(const struct dm_event_handler *dmevh); int dm_event_handler_get_minor(const struct dm_event_handler *dmevh); int dm_event_handler_get_timeout(const struct dm_event_handler *dmevh); // FIXME misuse of bitmask as enum enum dm_event_mask dm_event_handler_get_event_mask(const struct dm_event_handler *dmevh); /* FIXME Review interface (what about this next thing?) */ int dm_event_get_registered_device(struct dm_event_handler *dmevh, int next); /* * Initiate monitoring using dmeventd. */ int dm_event_register_handler(const struct dm_event_handler *dmevh); int dm_event_unregister_handler(const struct dm_event_handler *dmevh); /* Set debug level for logging, and whether to log on stdout/stderr or syslog */ void dm_event_log_set(int debug_log_level, int use_syslog); /* Log messages acroding to current debug level */ __attribute__((format(printf, 6, 0))) void dm_event_log(const char *subsys, int level, const char *file, int line, int dm_errno_or_class, const char *format, va_list ap); /* Macro to route print_log do dm_event_log() */ #define DM_EVENT_LOG_FN(subsys) \ void print_log(int level, const char *file, int line, int dm_errno_or_class,\ const char *format, ...)\ {\ va_list ap;\ va_start(ap, format);\ dm_event_log(subsys, level, file, line, dm_errno_or_class, format, ap);\ va_end(ap);\ } /* Prototypes for DSO interface, see dmeventd.c, struct dso_data for detailed descriptions. */ // FIXME misuse of bitmask as enum void process_event(struct dm_task *dmt, enum dm_event_mask evmask, void **user); int register_device(const char *device_name, const char *uuid, int major, int minor, void **user); int unregister_device(const char *device_name, const char *uuid, int major, int minor, void **user); #endif LVM2.2.02.176/daemons/dmeventd/dmeventd.c0000644000000000000120000015643113176752421016544 0ustar rootwheel/* * Copyright (C) 2005-2015 Red Hat, Inc. All rights reserved. * * This file is part of the device-mapper userspace tools. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /* * dmeventd - dm event daemon to monitor active mapped devices */ #include "dm-logging.h" #include "libdevmapper-event.h" #include "dmeventd.h" #include "tool.h" #include #include #include #include #include #include #include #include #include /* for htonl, ntohl */ #include /* for musl libc */ #ifdef __linux__ /* * Kernel version 2.6.36 and higher has * new OOM killer adjustment interface. */ # define OOM_ADJ_FILE_OLD "/proc/self/oom_adj" # define OOM_ADJ_FILE "/proc/self/oom_score_adj" /* From linux/oom.h */ /* Old interface */ # define OOM_DISABLE (-17) # define OOM_ADJUST_MIN (-16) /* New interface */ # define OOM_SCORE_ADJ_MIN (-1000) /* Systemd on-demand activation support */ # define SD_RUNTIME_UNIT_FILE_DIR DEFAULT_DM_RUN_DIR "/systemd/system/" # define SD_ACTIVATION_ENV_VAR_NAME "SD_ACTIVATION" # define SD_LISTEN_PID_ENV_VAR_NAME "LISTEN_PID" # define SD_LISTEN_FDS_ENV_VAR_NAME "LISTEN_FDS" # define SD_LISTEN_FDS_START 3 # define SD_FD_FIFO_SERVER SD_LISTEN_FDS_START # define SD_FD_FIFO_CLIENT (SD_LISTEN_FDS_START + 1) #endif #include #define DM_SIGNALED_EXIT 1 #define DM_SCHEDULED_EXIT 2 static volatile sig_atomic_t _exit_now = 0; /* set to '1' when signal is given to exit */ /* List (un)link macros. */ #define LINK(x, head) dm_list_add(head, &(x)->list) #define LINK_DSO(dso) LINK(dso, &_dso_registry) #define LINK_THREAD(thread) LINK(thread, &_thread_registry) #define UNLINK(x) dm_list_del(&(x)->list) #define UNLINK_DSO(x) UNLINK(x) #define UNLINK_THREAD(x) UNLINK(x) #define DAEMON_NAME "dmeventd" /* Global mutex for thread list access. Has to be held when: - iterating thread list - adding or removing elements from thread list - changing or reading thread_status's fields: processing, status, events Use _lock_mutex() and _unlock_mutex() to hold/release it */ static pthread_mutex_t _global_mutex; static const size_t THREAD_STACK_SIZE = 300 * 1024; /* Default idle exit timeout 1 hour (in seconds) */ static const time_t DMEVENTD_IDLE_EXIT_TIMEOUT = 60 * 60; static int _debug_level = 0; static int _use_syslog = 1; static int _systemd_activation = 0; static int _foreground = 0; static int _restart = 0; static time_t _idle_since = 0; static char **_initial_registrations = 0; /* FIXME Make configurable at runtime */ /* All libdm messages */ __attribute__((format(printf, 5, 6))) static void _libdm_log(int level, const char *file, int line, int dm_errno_or_class, const char *format, ...) { va_list ap; va_start(ap, format); dm_event_log("#dm", level, file, line, dm_errno_or_class, format, ap); va_end(ap); } /* All dmeventd messages */ #undef LOG_MESG #define LOG_MESG(l, f, ln, e, x...) _dmeventd_log(l, f, ln, e, ## x) __attribute__((format(printf, 5, 6))) static void _dmeventd_log(int level, const char *file, int line, int dm_errno_or_class, const char *format, ...) { va_list ap; va_start(ap, format); dm_event_log("dmeventd", level, file, line, dm_errno_or_class, format, ap); va_end(ap); } #ifdef DEBUG # define DEBUGLOG log_debug static const char *decode_cmd(uint32_t cmd) { switch (cmd) { case DM_EVENT_CMD_ACTIVE: return "ACTIVE"; case DM_EVENT_CMD_REGISTER_FOR_EVENT: return "REGISTER_FOR_EVENT"; case DM_EVENT_CMD_UNREGISTER_FOR_EVENT: return "UNREGISTER_FOR_EVENT"; case DM_EVENT_CMD_GET_REGISTERED_DEVICE: return "GET_REGISTERED_DEVICE"; case DM_EVENT_CMD_GET_NEXT_REGISTERED_DEVICE: return "GET_NEXT_REGISTERED_DEVICE"; case DM_EVENT_CMD_SET_TIMEOUT: return "SET_TIMEOUT"; case DM_EVENT_CMD_GET_TIMEOUT: return "GET_TIMEOUT"; case DM_EVENT_CMD_HELLO: return "HELLO"; case DM_EVENT_CMD_DIE: return "DIE"; case DM_EVENT_CMD_GET_STATUS: return "GET_STATUS"; case DM_EVENT_CMD_GET_PARAMETERS: return "GET_PARAMETERS"; default: return "unknown"; } } #else # define DEBUGLOG(fmt, args...) do { } while (0) #endif /* Data kept about a DSO. */ struct dso_data { struct dm_list list; char *dso_name; /* DSO name (eg, "evms", "dmraid", "lvm2"). */ void *dso_handle; /* Opaque handle as returned from dlopen(). */ unsigned int ref_count; /* Library reference count. */ /* * Event processing. * * The DSO can do whatever appropriate steps if an event * happens such as changing the mapping in case a mirror * fails, update the application metadata etc. * * This function gets a dm_task that is a result of * DM_DEVICE_WAITEVENT ioctl (results equivalent to * DM_DEVICE_STATUS). It should not destroy it. * The caller must dispose of the task. */ void (*process_event)(struct dm_task *dmt, enum dm_event_mask event, void **user); /* * Device registration. * * When an application registers a device for an event, the DSO * can carry out appropriate steps so that a later call to * the process_event() function is sane (eg, read metadata * and activate a mapping). */ int (*register_device)(const char *device, const char *uuid, int major, int minor, void **user); /* * Device unregistration. * * In case all devices of a mapping (eg, RAID10) are unregistered * for events, the DSO can recognize this and carry out appropriate * steps (eg, deactivate mapping, metadata update). */ int (*unregister_device)(const char *device, const char *uuid, int major, int minor, void **user); }; static DM_LIST_INIT(_dso_registry); /* Structure to keep parsed register variables from client message. */ struct message_data { char *id; char *dso_name; /* Name of DSO. */ char *device_uuid; /* Mapped device path. */ char *events_str; /* Events string as fetched from message. */ enum dm_event_mask events_field; /* Events bitfield. */ char *timeout_str; uint32_t timeout_secs; struct dm_event_daemon_message *msg; /* Pointer to message buffer. */ }; /* There are three states a thread can attain. */ enum { DM_THREAD_REGISTERING, /* Registering, transitions to RUNNING */ DM_THREAD_RUNNING, /* Working on events, transitions to DONE */ DM_THREAD_DONE /* Terminated and cleanup is pending */ }; /* * Housekeeping of thread+device states. * * One thread per mapped device which can block on it until an event * occurs and the event processing function of the DSO gets called. */ struct thread_status { struct dm_list list; pthread_t thread; struct dso_data *dso_data; /* DSO this thread accesses. */ struct { char *uuid; char *name; int major, minor; } device; int processing; /* Set when event is being processed */ int status; /* See DM_THREAD_{REGISTERING,RUNNING,DONE} */ int events; /* bitfield for event filter. */ int current_events; /* bitfield for occured events. */ struct dm_task *wait_task; int pending; /* Set when event filter change is pending */ time_t next_time; uint32_t timeout; struct dm_list timeout_list; void *dso_private; /* dso per-thread status variable */ /* TODO per-thread mutex */ }; static DM_LIST_INIT(_thread_registry); static DM_LIST_INIT(_thread_registry_unused); static int _timeout_running; static DM_LIST_INIT(_timeout_registry); static pthread_mutex_t _timeout_mutex = PTHREAD_MUTEX_INITIALIZER; static pthread_cond_t _timeout_cond = PTHREAD_COND_INITIALIZER; /********** * DSO **********/ /* DSO data allocate/free. */ static void _free_dso_data(struct dso_data *data) { dm_free(data->dso_name); dm_free(data); } static struct dso_data *_alloc_dso_data(struct message_data *data) { struct dso_data *ret = (typeof(ret)) dm_zalloc(sizeof(*ret)); if (!ret) return_NULL; if (!(ret->dso_name = dm_strdup(data->dso_name))) { dm_free(ret); return_NULL; } return ret; } /* DSO reference counting. */ static void _lib_get(struct dso_data *data) { data->ref_count++; } static void _lib_put(struct dso_data *data) { if (!--data->ref_count) { dlclose(data->dso_handle); UNLINK_DSO(data); _free_dso_data(data); /* Close control device if there is no plugin in-use */ if (dm_list_empty(&_dso_registry)) { DEBUGLOG("Unholding control device."); dm_hold_control_dev(0); dm_lib_release(); _idle_since = time(NULL); } } } /* Find DSO data. */ static struct dso_data *_lookup_dso(struct message_data *data) { struct dso_data *dso_data, *ret = NULL; dm_list_iterate_items(dso_data, &_dso_registry) if (!strcmp(data->dso_name, dso_data->dso_name)) { ret = dso_data; break; } return ret; } /* Lookup DSO symbols we need. */ static int _lookup_symbol(void *dl, void **symbol, const char *name) { if (!(*symbol = dlsym(dl, name))) return_0; return 1; } static int _lookup_symbols(void *dl, struct dso_data *data) { return _lookup_symbol(dl, (void *) &data->process_event, "process_event") && _lookup_symbol(dl, (void *) &data->register_device, "register_device") && _lookup_symbol(dl, (void *) &data->unregister_device, "unregister_device"); } /* Load an application specific DSO. */ static struct dso_data *_load_dso(struct message_data *data) { void *dl; struct dso_data *ret; const char *dlerr; if (!(dl = dlopen(data->dso_name, RTLD_NOW))) { dlerr = dlerror(); goto_bad; } if (!(ret = _alloc_dso_data(data))) { dlclose(dl); dlerr = "no memory"; goto_bad; } if (!(_lookup_symbols(dl, ret))) { _free_dso_data(ret); dlclose(dl); dlerr = "symbols missing"; goto_bad; } /* Keep control device open until last user closes */ if (dm_list_empty(&_dso_registry)) { DEBUGLOG("Holding control device open."); dm_hold_control_dev(1); _idle_since = 0; } /* * Keep handle to close the library once * we've got no references to it any more. */ ret->dso_handle = dl; LINK_DSO(ret); return ret; bad: log_error("dmeventd %s dlopen failed: %s.", data->dso_name, dlerr); data->msg->size = dm_asprintf(&(data->msg->data), "%s %s dlopen failed: %s", data->id, data->dso_name, dlerr); return NULL; } /************ * THREAD ************/ /* Allocate/free the thread status structure for a monitoring thread. */ static void _free_thread_status(struct thread_status *thread) { _lib_put(thread->dso_data); if (thread->wait_task) dm_task_destroy(thread->wait_task); dm_free(thread->device.uuid); dm_free(thread->device.name); dm_free(thread); } /* Note: events_field must not be 0, ensured by caller */ static struct thread_status *_alloc_thread_status(const struct message_data *data, struct dso_data *dso_data) { struct thread_status *thread; if (!(thread = dm_zalloc(sizeof(*thread)))) { log_error("Cannot create new thread, out of memory."); return NULL; } _lib_get(dso_data); thread->dso_data = dso_data; if (!(thread->wait_task = dm_task_create(DM_DEVICE_WAITEVENT))) goto_out; if (!dm_task_set_uuid(thread->wait_task, data->device_uuid)) goto_out; if (!(thread->device.uuid = dm_strdup(data->device_uuid))) goto_out; /* Until real name resolved, use UUID */ if (!(thread->device.name = dm_strdup(data->device_uuid))) goto_out; /* runs ioctl and may register lvm2 pluging */ thread->processing = 1; thread->status = DM_THREAD_REGISTERING; thread->events = data->events_field; thread->pending = DM_EVENT_REGISTRATION_PENDING; thread->timeout = data->timeout_secs; dm_list_init(&thread->timeout_list); return thread; out: _free_thread_status(thread); return NULL; } /* * Create a device monitoring thread. * N.B. Error codes returned are positive. */ static int _pthread_create_smallstack(pthread_t *t, void *(*fun)(void *), void *arg) { int r; pthread_t tmp; pthread_attr_t attr; /* * From pthread_attr_init man page: * POSIX.1-2001 documents an ENOMEM error for pthread_attr_init(); on * Linux these functions always succeed (but portable and future-proof * applications should nevertheless handle a possible error return). */ if ((r = pthread_attr_init(&attr)) != 0) { log_sys_error("pthread_attr_init", ""); return r; } /* * We use a smaller stack since it gets preallocated in its entirety */ pthread_attr_setstacksize(&attr, THREAD_STACK_SIZE + getpagesize()); /* * If no-one will be waiting, we need to detach. */ if (!t) { pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); t = &tmp; } if ((r = pthread_create(t, &attr, fun, arg))) log_sys_error("pthread_create", ""); pthread_attr_destroy(&attr); return r; } /* * Fetch a string off src and duplicate it into *ptr. * Pay attention to zero-length and 'empty' strings ('-'). */ /* FIXME? move to libdevmapper to share with the client lib (need to make delimiter a parameter then) */ static int _fetch_string(char **ptr, char **src, const int delimiter) { int ret = 1; char *p; size_t len; *ptr = NULL; /* Empty field returns NULL pointer */ if ((*src)[0] == '-') { /* Could be empty field '-', handle without allocation */ if ((*src)[1] == '\0') { (*src)++; goto out; } else if ((*src)[1] == delimiter) { (*src) += 2; goto out; } } if ((p = strchr(*src, delimiter))) { if (*src < p) { *p = 0; /* Temporary exit with \0 */ if (!(*ptr = dm_strdup(*src))) { log_error("Failed to fetch item %s.", *src); ret = 0; /* Allocation fail */ } *p = delimiter; *src = p; } (*src)++; /* Skip delmiter, next field */ } else if ((len = strlen(*src))) { /* No delimiter, item ends with '\0' */ if (!(*ptr = dm_strdup(*src))) { log_error("Failed to fetch last item %s.", *src); ret = 0; /* Fail */ } *src += len + 1; } out: return ret; } /* Free message memory. */ static void _free_message(struct message_data *message_data) { dm_free(message_data->id); dm_free(message_data->dso_name); dm_free(message_data->device_uuid); dm_free(message_data->events_str); dm_free(message_data->timeout_str); } /* Parse a register message from the client. */ static int _parse_message(struct message_data *message_data) { int ret = 0; struct dm_event_daemon_message *msg = message_data->msg; char *p = msg->data; if (!msg->data) return 0; /* * Retrieve application identifier, mapped device * path and events # string from message. */ if (_fetch_string(&message_data->id, &p, ' ') && _fetch_string(&message_data->dso_name, &p, ' ') && _fetch_string(&message_data->device_uuid, &p, ' ') && _fetch_string(&message_data->events_str, &p, ' ') && _fetch_string(&message_data->timeout_str, &p, ' ')) { if (message_data->events_str) message_data->events_field = atoi(message_data->events_str); if (message_data->timeout_str) message_data->timeout_secs = atoi(message_data->timeout_str) ? : DM_EVENT_DEFAULT_TIMEOUT; ret = 1; } dm_free(msg->data); msg->data = NULL; return ret; } /* Global mutex to lock access to lists et al. See _global_mutex above. */ static int _lock_mutex(void) { return pthread_mutex_lock(&_global_mutex); } static int _unlock_mutex(void) { return pthread_mutex_unlock(&_global_mutex); } /* Check, if a device exists. */ static int _fill_device_data(struct thread_status *ts) { struct dm_task *dmt; struct dm_info dmi; int ret = 0; if (!(dmt = dm_task_create(DM_DEVICE_INFO))) return 0; if (!dm_task_set_uuid(dmt, ts->device.uuid)) goto fail; if (!dm_task_run(dmt)) goto fail; dm_free(ts->device.name); if (!(ts->device.name = dm_strdup(dm_task_get_name(dmt)))) goto fail; if (!dm_task_get_info(dmt, &dmi)) goto fail; ts->device.major = dmi.major; ts->device.minor = dmi.minor; dm_task_set_event_nr(ts->wait_task, dmi.event_nr); ret = 1; fail: dm_task_destroy(dmt); return ret; } static struct dm_task *_get_device_status(struct thread_status *ts) { struct dm_task *dmt = dm_task_create(DM_DEVICE_STATUS); if (!dmt) return_NULL; if (!dm_task_set_uuid(dmt, ts->device.uuid)) { dm_task_destroy(dmt); return_NULL; } /* Non-blocking status read */ if (!dm_task_no_flush(dmt)) log_warn("WARNING: Can't set no_flush for dm status."); if (!dm_task_run(dmt)) { dm_task_destroy(dmt); return_NULL; } return dmt; } /* * Find an existing thread for a device. * * Mutex must be held when calling this. */ static struct thread_status *_lookup_thread_status(struct message_data *data) { struct thread_status *thread; dm_list_iterate_items(thread, &_thread_registry) if (!strcmp(data->device_uuid, thread->device.uuid)) return thread; return NULL; } static int _get_status(struct message_data *message_data) { struct dm_event_daemon_message *msg = message_data->msg; struct thread_status *thread; int i = 0, j; int ret = -ENOMEM; int count; int size = 0, current; size_t len; char **buffers; char *message; _lock_mutex(); count = dm_list_size(&_thread_registry); buffers = alloca(sizeof(char*) * count); dm_list_iterate_items(thread, &_thread_registry) { if ((current = dm_asprintf(buffers + i, "0:%d %s %s %u %" PRIu32 ";", i, thread->dso_data->dso_name, thread->device.uuid, thread->events, thread->timeout)) < 0) { _unlock_mutex(); goto out; } ++i; size += current; /* count with trailing '\0' */ } _unlock_mutex(); len = strlen(message_data->id); msg->size = size + len + 1; dm_free(msg->data); if (!(msg->data = dm_malloc(msg->size))) goto out; memcpy(msg->data, message_data->id, len); message = msg->data + len; *message++ = ' '; for (j = 0; j < i; ++j) { len = strlen(buffers[j]); memcpy(message, buffers[j], len); message += len; } ret = 0; out: for (j = 0; j < i; ++j) dm_free(buffers[j]); return ret; } static int _get_parameters(struct message_data *message_data) { struct dm_event_daemon_message *msg = message_data->msg; int size; dm_free(msg->data); if ((size = dm_asprintf(&msg->data, "%s pid=%d daemon=%s exec_method=%s", message_data->id, getpid(), _foreground ? "no" : "yes", _systemd_activation ? "systemd" : "direct")) < 0) { stack; return -ENOMEM; } msg->size = (uint32_t) size; return 0; } /* Cleanup at exit. */ static void _exit_dm_lib(void) { dm_lib_release(); dm_lib_exit(); } static void _exit_timeout(void *unused __attribute__((unused))) { _timeout_running = 0; pthread_mutex_unlock(&_timeout_mutex); } /* Wake up monitor threads every so often. */ static void *_timeout_thread(void *unused __attribute__((unused))) { struct thread_status *thread; struct timespec timeout; time_t curr_time; DEBUGLOG("Timeout thread starting."); pthread_cleanup_push(_exit_timeout, NULL); pthread_mutex_lock(&_timeout_mutex); while (!dm_list_empty(&_timeout_registry)) { timeout.tv_sec = 0; timeout.tv_nsec = 0; curr_time = time(NULL); dm_list_iterate_items_gen(thread, &_timeout_registry, timeout_list) { if (thread->next_time <= curr_time) { thread->next_time = curr_time + thread->timeout; _lock_mutex(); if (thread->processing) { /* Cannot signal processing monitoring thread */ log_debug("Skipping SIGALRM to processing Thr %x for timeout.", (int) thread->thread); } else { DEBUGLOG("Sending SIGALRM to Thr %x for timeout.", (int) thread->thread); pthread_kill(thread->thread, SIGALRM); } _unlock_mutex(); } if (thread->next_time < timeout.tv_sec || !timeout.tv_sec) timeout.tv_sec = thread->next_time; } pthread_cond_timedwait(&_timeout_cond, &_timeout_mutex, &timeout); } DEBUGLOG("Timeout thread finished."); pthread_cleanup_pop(1); return NULL; } static int _register_for_timeout(struct thread_status *thread) { int ret = 0; pthread_mutex_lock(&_timeout_mutex); if (dm_list_empty(&thread->timeout_list)) { thread->next_time = time(NULL) + thread->timeout; dm_list_add(&_timeout_registry, &thread->timeout_list); if (_timeout_running) pthread_cond_signal(&_timeout_cond); } if (!_timeout_running && !(ret = _pthread_create_smallstack(NULL, _timeout_thread, NULL))) _timeout_running = 1; pthread_mutex_unlock(&_timeout_mutex); return ret; } static void _unregister_for_timeout(struct thread_status *thread) { pthread_mutex_lock(&_timeout_mutex); if (!dm_list_empty(&thread->timeout_list)) { dm_list_del(&thread->timeout_list); dm_list_init(&thread->timeout_list); if (dm_list_empty(&_timeout_registry)) /* No more work -> wakeup to finish quickly */ pthread_cond_signal(&_timeout_cond); } pthread_mutex_unlock(&_timeout_mutex); } #ifdef DEBUG_SIGNALS /* Print list of signals within a signal set */ static void _print_sigset(const char *prefix, const sigset_t *sigset) { int sig, cnt = 0; for (sig = 1; sig < NSIG; sig++) if (!sigismember(sigset, sig)) { cnt++; log_debug("%s%d (%s)", prefix, sig, strsignal(sig)); } if (!cnt) log_debug("%s", prefix); } #endif enum { DM_WAIT_RETRY, DM_WAIT_INTR, DM_WAIT_FATAL }; /* Wait on a device until an event occurs. */ static int _event_wait(struct thread_status *thread) { sigset_t set, old; int ret = DM_WAIT_RETRY; struct dm_info info; /* TODO: audit libdm thread usage */ /* * This is so that you can break out of waiting on an event, * either for a timeout event, or to cancel the thread. */ sigemptyset(&set); sigaddset(&set, SIGALRM); if (pthread_sigmask(SIG_UNBLOCK, &set, &old) != 0) { log_sys_error("pthread_sigmask", "unblock alarm"); return ret; /* What better */ } if (dm_task_run(thread->wait_task)) { thread->current_events |= DM_EVENT_DEVICE_ERROR; ret = DM_WAIT_INTR; /* Update event_nr */ if (dm_task_get_info(thread->wait_task, &info)) dm_task_set_event_nr(thread->wait_task, info.event_nr); } else { switch (dm_task_get_errno(thread->wait_task)) { case ENXIO: log_error("%s disappeared, detaching.", thread->device.name); ret = DM_WAIT_FATAL; break; case EINTR: thread->current_events |= DM_EVENT_TIMEOUT; ret = DM_WAIT_INTR; break; default: log_sys_error("dm_task_run", "waitevent"); } } if (pthread_sigmask(SIG_SETMASK, &old, NULL) != 0) log_sys_error("pthread_sigmask", "block alarm"); #ifdef DEBUG_SIGNALS _print_sigset("dmeventd blocking ", &old); #endif DEBUGLOG("Completed waitevent task for %s.", thread->device.name); return ret; } /* Register a device with the DSO. */ static int _do_register_device(struct thread_status *thread) { return thread->dso_data->register_device(thread->device.name, thread->device.uuid, thread->device.major, thread->device.minor, &(thread->dso_private)); } /* Unregister a device with the DSO. */ static int _do_unregister_device(struct thread_status *thread) { return thread->dso_data->unregister_device(thread->device.name, thread->device.uuid, thread->device.major, thread->device.minor, &(thread->dso_private)); } /* Process an event in the DSO. */ static void _do_process_event(struct thread_status *thread) { struct dm_task *task; /* NOTE: timeout event gets status */ task = (thread->current_events & DM_EVENT_TIMEOUT) ? _get_device_status(thread) : thread->wait_task; if (!task) log_error("Lost event in Thr %x.", (int)thread->thread); else { thread->dso_data->process_event(task, thread->current_events, &(thread->dso_private)); if (task != thread->wait_task) dm_task_destroy(task); } } static void _thread_unused(struct thread_status *thread) { UNLINK_THREAD(thread); LINK(thread, &_thread_registry_unused); } /* Thread cleanup handler to unregister device. */ static void _monitor_unregister(void *arg) { struct thread_status *thread = arg, *thread_iter; dm_list_iterate_items(thread_iter, &_thread_registry) if (thread_iter == thread) { /* Relink to _unused */ _thread_unused(thread); break; } thread->events = 0; /* Filter is now empty */ thread->pending = 0; /* Event pending resolved */ thread->processing = 1; /* Process unregistering */ _unlock_mutex(); DEBUGLOG("Unregistering monitor for %s.", thread->device.name); _unregister_for_timeout(thread); if ((thread->status != DM_THREAD_REGISTERING) && !_do_unregister_device(thread)) log_error("%s: %s unregister failed.", __func__, thread->device.name); DEBUGLOG("Marking Thr %x as DONE and unused.", (int)thread->thread); _lock_mutex(); thread->status = DM_THREAD_DONE; /* Last access to thread memory! */ _unlock_mutex(); } /* Device monitoring thread. */ static void *_monitor_thread(void *arg) { struct thread_status *thread = arg; int ret; sigset_t pendmask; pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL); pthread_cleanup_push(_monitor_unregister, thread); if (!_fill_device_data(thread)) { log_error("Failed to fill device data for %s.", thread->device.uuid); _lock_mutex(); goto out; } if (!_do_register_device(thread)) { log_error("Failed to register device %s.", thread->device.name); _lock_mutex(); goto out; } _lock_mutex(); thread->status = DM_THREAD_RUNNING; thread->processing = 0; /* Loop awaiting/analyzing device events. */ while (thread->events) { thread->pending = 0; /* Event is no longer pending... */ /* * Check against bitmask filter. * * If there's current events delivered from _event_wait() AND * the device got registered for those events AND * those events haven't been processed yet, call * the DSO's process_event() handler. */ if (thread->events & thread->current_events) { thread->processing = 1; /* Cannot be removed/signaled */ _unlock_mutex(); _do_process_event(thread); thread->current_events = 0; /* Current events processed */ _lock_mutex(); thread->processing = 0; /* * Thread can terminate itself from plugin via SIGALRM * Timer thread will not send signal while processing * TODO: maybe worth API change and return value for * _do_process_event() instead of this signal solution */ if (sigpending(&pendmask) < 0) log_sys_error("sigpending", ""); else if (sigismember(&pendmask, SIGALRM)) break; } else { _unlock_mutex(); if ((ret = _event_wait(thread)) == DM_WAIT_RETRY) usleep(100); /* Avoid busy loop, wait without mutex */ _lock_mutex(); if (ret == DM_WAIT_FATAL) break; } } out: /* ';' fixes gcc compilation problem with older pthread macros * "label at end of compound statement" */ ; pthread_cleanup_pop(1); return NULL; } /* Create a device monitoring thread. */ static int _create_thread(struct thread_status *thread) { return _pthread_create_smallstack(&thread->thread, _monitor_thread, thread); } /* Update events - needs to be locked */ static int _update_events(struct thread_status *thread, int events) { int ret = 0; if (thread->events == events) return 0; /* Nothing has changed */ thread->events = events; thread->pending = DM_EVENT_REGISTRATION_PENDING; /* Only non-processing threads can be notified */ if (!thread->processing) { DEBUGLOG("Sending SIGALRM to wakeup Thr %x.", (int)thread->thread); /* Notify thread waiting in ioctl (to speed-up) */ if ((ret = pthread_kill(thread->thread, SIGALRM))) { if (ret == ESRCH) thread->events = 0; /* thread is gone */ else log_error("Unable to wakeup thread: %s", strerror(ret)); } } /* Threads with no events has to be moved to unused */ if (!thread->events) _thread_unused(thread); return -ret; } /* Return success on daemon active check. */ static int _active(struct message_data *message_data) { return 0; } /* * Unregister for an event. * * Only one caller at a time here as with register_for_event(). */ static int _unregister_for_event(struct message_data *message_data) { struct thread_status *thread; int ret; /* * Clear event in bitfield and deactivate * monitoring thread in case bitfield is 0. */ _lock_mutex(); if (!(thread = _lookup_thread_status(message_data))) { _unlock_mutex(); return -ENODEV; } /* AND mask event ~# from events bitfield. */ ret = _update_events(thread, (thread->events & ~message_data->events_field)); _unlock_mutex(); /* If there are no events, thread is later garbage * collected by _cleanup_unused_threads */ if (message_data->events_field & DM_EVENT_TIMEOUT) _unregister_for_timeout(thread); DEBUGLOG("Unregistered event for %s.", thread->device.name); return ret; } /* * Register for an event. * * Only one caller at a time here, because we use * a FIFO and lock it against multiple accesses. */ static int _register_for_event(struct message_data *message_data) { int ret = 0; struct thread_status *thread; struct dso_data *dso_data; if (!(dso_data = _lookup_dso(message_data)) && !(dso_data = _load_dso(message_data))) { stack; #ifdef ELIBACC ret = ELIBACC; #else ret = ENODEV; #endif return ret; } _lock_mutex(); if ((thread = _lookup_thread_status(message_data))) { /* OR event # into events bitfield. */ ret = _update_events(thread, (thread->events | message_data->events_field)); } else { _unlock_mutex(); /* Only creating thread during event processing * Remaining initialization happens within monitoring thread */ if (!(thread = _alloc_thread_status(message_data, dso_data))) { stack; return -ENOMEM; } if ((ret = _create_thread(thread))) { stack; _free_thread_status(thread); return -ret; } _lock_mutex(); /* Note: same uuid can't be added in parallel */ LINK_THREAD(thread); } _unlock_mutex(); /* If creation of timeout thread fails (as it may), we fail here completely. The client is responsible for either retrying later or trying to register without timeout events. However, if timeout thread cannot be started, it usually means we are so starved on resources that we are almost as good as dead already... */ if ((message_data->events_field & DM_EVENT_TIMEOUT) && (ret = _register_for_timeout(thread))) { stack; _unregister_for_event(message_data); } return -ret; } /* * Get registered device. * * Only one caller at a time here as with register_for_event(). */ static int _registered_device(struct message_data *message_data, struct thread_status *thread) { int r; struct dm_event_daemon_message *msg = message_data->msg; dm_free(msg->data); if ((r = dm_asprintf(&(msg->data), "%s %s %s %u", message_data->id, thread->dso_data->dso_name, thread->device.uuid, thread->events | thread->pending)) < 0) return -ENOMEM; msg->size = (uint32_t) r; DEBUGLOG("Registered %s.", msg->data); return 0; } static int _want_registered_device(char *dso_name, char *device_uuid, struct thread_status *thread) { /* If DSO names and device paths are equal. */ if (dso_name && device_uuid) return !strcmp(dso_name, thread->dso_data->dso_name) && !strcmp(device_uuid, thread->device.uuid); /* If DSO names are equal. */ if (dso_name) return !strcmp(dso_name, thread->dso_data->dso_name); /* If device paths are equal. */ if (device_uuid) return !strcmp(device_uuid, thread->device.uuid); return 1; } static int _get_registered_dev(struct message_data *message_data, int next) { struct thread_status *thread, *hit = NULL; int ret = -ENOENT; DEBUGLOG("Get%s dso:%s uuid:%s.", next ? "" : "Next", message_data->dso_name, message_data->device_uuid); _lock_mutex(); /* Iterate list of threads checking if we want a particular one. */ dm_list_iterate_items(thread, &_thread_registry) if (_want_registered_device(message_data->dso_name, message_data->device_uuid, thread)) { hit = thread; break; } /* * If we got a registered device and want the next one -> * fetch next conforming element off the list. */ if (hit && !next) goto reg; /* * If we didn't get a match, try the threads waiting to be deleted. * FIXME Do something similar if 'next' is set. */ if (!hit && !next) dm_list_iterate_items(thread, &_thread_registry_unused) if (_want_registered_device(message_data->dso_name, message_data->device_uuid, thread)) { hit = thread; goto reg; } if (!hit) { DEBUGLOG("Get%s not registered", next ? "" : "Next"); goto out; } while (1) { if (dm_list_end(&_thread_registry, &thread->list)) goto out; thread = dm_list_item(thread->list.n, struct thread_status); if (_want_registered_device(message_data->dso_name, NULL, thread)) { hit = thread; break; } } reg: ret = _registered_device(message_data, hit); out: _unlock_mutex(); return ret; } static int _get_registered_device(struct message_data *message_data) { return _get_registered_dev(message_data, 0); } static int _get_next_registered_device(struct message_data *message_data) { return _get_registered_dev(message_data, 1); } static int _set_timeout(struct message_data *message_data) { struct thread_status *thread; _lock_mutex(); thread = _lookup_thread_status(message_data); _unlock_mutex(); if (!thread) return -ENODEV; /* Lets reprogram timer */ pthread_mutex_lock(&_timeout_mutex); thread->timeout = message_data->timeout_secs; thread->next_time = 0; pthread_cond_signal(&_timeout_cond); pthread_mutex_unlock(&_timeout_mutex); return 0; } static int _get_timeout(struct message_data *message_data) { struct thread_status *thread; struct dm_event_daemon_message *msg = message_data->msg; _lock_mutex(); thread = _lookup_thread_status(message_data); _unlock_mutex(); if (!thread) return -ENODEV; dm_free(msg->data); msg->size = dm_asprintf(&(msg->data), "%s %" PRIu32, message_data->id, thread->timeout); return (msg->data && msg->size) ? 0 : -ENOMEM; } static int _open_fifo(const char *path) { struct stat st; int fd = -1; /* * FIXME Explicitly verify the code's requirement that path is secure: * - All parent directories owned by root without group/other write access unless sticky. */ /* If path exists, only use it if it is root-owned fifo mode 0600 */ if ((lstat(path, &st) < 0)) { if (errno != ENOENT) { log_sys_error("stat", path); return -1; } } else if (!S_ISFIFO(st.st_mode) || st.st_uid || (st.st_mode & (S_IEXEC | S_IRWXG | S_IRWXO))) { log_warn("WARNING: %s has wrong attributes: Replacing.", path); if (unlink(path)) { log_sys_error("unlink", path); return -1; } } /* Create fifo. */ (void) dm_prepare_selinux_context(path, S_IFIFO); if ((mkfifo(path, 0600) == -1) && errno != EEXIST) { log_sys_error("mkfifo", path); (void) dm_prepare_selinux_context(NULL, 0); goto fail; } (void) dm_prepare_selinux_context(NULL, 0); /* Need to open read+write or we will block or fail */ if ((fd = open(path, O_RDWR)) < 0) { log_sys_error("open", path); goto fail; } /* Warn about wrong permissions if applicable */ if (fstat(fd, &st)) { log_sys_error("fstat", path); goto fail; } if (!S_ISFIFO(st.st_mode) || st.st_uid || (st.st_mode & (S_IEXEC | S_IRWXG | S_IRWXO))) { log_error("%s: fifo has incorrect attributes", path); goto fail; } if (fcntl(fd, F_SETFD, FD_CLOEXEC)) { log_sys_error("fcntl(FD_CLOEXEC)", path); goto fail; } return fd; fail: if ((fd >= 0) && close(fd)) log_sys_error("close", path); return -1; } /* Open fifos used for client communication. */ static int _open_fifos(struct dm_event_fifos *fifos) { /* Create client fifo. */ if ((fifos->client = _open_fifo(fifos->client_path)) < 0) goto fail; /* Create server fifo. */ if ((fifos->server = _open_fifo(fifos->server_path)) < 0) goto fail; return 1; fail: if (fifos->client >= 0 && close(fifos->client)) log_sys_error("close", fifos->client_path); return 0; } /* * Read message from client making sure that data is available * and a complete message is read. Must not block indefinitely. */ static int _client_read(struct dm_event_fifos *fifos, struct dm_event_daemon_message *msg) { struct timeval t; unsigned bytes = 0; int ret = 0; fd_set fds; size_t size = 2 * sizeof(uint32_t); /* status + size */ uint32_t *header = alloca(size); char *buf = (char *)header; msg->data = NULL; errno = 0; while (bytes < size && errno != EOF) { /* Watch client read FIFO for input. */ FD_ZERO(&fds); FD_SET(fifos->client, &fds); t.tv_sec = 1; t.tv_usec = 0; ret = select(fifos->client + 1, &fds, NULL, NULL, &t); if (!ret && !bytes) /* nothing to read */ return 0; if (!ret) /* trying to finish read */ continue; if (ret < 0) /* error */ return 0; ret = read(fifos->client, buf + bytes, size - bytes); bytes += ret > 0 ? ret : 0; if (header && (bytes == 2 * sizeof(uint32_t))) { msg->cmd = ntohl(header[0]); size = msg->size = ntohl(header[1]); bytes = 0; if (!size) break; /* No data -> error */ buf = msg->data = dm_malloc(msg->size); if (!buf) break; /* No mem -> error */ header = 0; } } if (bytes != size) { dm_free(msg->data); msg->data = NULL; return 0; } return 1; } /* * Write a message to the client making sure that it is ready to write. */ static int _client_write(struct dm_event_fifos *fifos, struct dm_event_daemon_message *msg) { uint32_t temp[2]; unsigned bytes = 0; int ret = 0; fd_set fds; size_t size = 2 * sizeof(uint32_t) + ((msg->data) ? msg->size : 0); uint32_t *header = dm_malloc(size); char *buf = (char *)header; if (!header) { /* Reply with ENOMEM message */ header = temp; size = sizeof(temp); header[0] = htonl(-ENOMEM); header[1] = 0; } else { header[0] = htonl(msg->cmd); header[1] = htonl((msg->data) ? msg->size : 0); if (msg->data) memcpy(buf + 2 * sizeof(uint32_t), msg->data, msg->size); } while (bytes < size) { do { /* Watch client write FIFO to be ready for output. */ FD_ZERO(&fds); FD_SET(fifos->server, &fds); } while (select(fifos->server + 1, NULL, &fds, NULL, NULL) != 1); if ((ret = write(fifos->server, buf + bytes, size - bytes)) > 0) bytes += ret; else if (errno == EIO) break; } if (header != temp) dm_free(header); return (bytes == size); } /* * Handle a client request. * * We put the request handling functions into * a list because of the growing number. */ static int _handle_request(struct dm_event_daemon_message *msg, struct message_data *message_data) { switch (msg->cmd) { case DM_EVENT_CMD_REGISTER_FOR_EVENT: if (!message_data->events_field) return -EINVAL; return _register_for_event(message_data); case DM_EVENT_CMD_UNREGISTER_FOR_EVENT: return _unregister_for_event(message_data); case DM_EVENT_CMD_GET_REGISTERED_DEVICE: return _get_registered_device(message_data); case DM_EVENT_CMD_GET_NEXT_REGISTERED_DEVICE: return _get_next_registered_device(message_data); case DM_EVENT_CMD_SET_TIMEOUT: return _set_timeout(message_data); case DM_EVENT_CMD_GET_TIMEOUT: return _get_timeout(message_data); case DM_EVENT_CMD_ACTIVE: return _active(message_data); case DM_EVENT_CMD_GET_STATUS: return _get_status(message_data); /* dmeventd parameters of running dmeventd, * returns 'pid= daemon= exec_method=' * pid - pidfile of running dmeventd * daemon - running as a daemon or not (foreground)? * exec_method - "direct" if executed directly or * "systemd" if executed via systemd */ case DM_EVENT_CMD_GET_PARAMETERS: return _get_parameters(message_data); default: return -EINVAL; } } /* Process a request passed from the communication thread. */ static int _do_process_request(struct dm_event_daemon_message *msg) { int ret; char *answer; struct message_data message_data = { .msg = msg }; /* Parse the message. */ if (msg->cmd == DM_EVENT_CMD_HELLO || msg->cmd == DM_EVENT_CMD_DIE) { ret = 0; answer = msg->data; if (answer) { msg->size = dm_asprintf(&(msg->data), "%s %s %d", answer, (msg->cmd == DM_EVENT_CMD_DIE) ? "DYING" : "HELLO", DM_EVENT_PROTOCOL_VERSION); dm_free(answer); } } else if (msg->cmd != DM_EVENT_CMD_ACTIVE && !_parse_message(&message_data)) { stack; ret = -EINVAL; } else ret = _handle_request(msg, &message_data); msg->cmd = ret; if (!msg->data) msg->size = dm_asprintf(&(msg->data), "%s %s", message_data.id, strerror(-ret)); _free_message(&message_data); return ret; } /* Only one caller at a time. */ static void _process_request(struct dm_event_fifos *fifos) { struct dm_event_daemon_message msg = { 0 }; int cmd; /* * Read the request from the client (client_read, client_write * give true on success and false on failure). */ if (!_client_read(fifos, &msg)) return; cmd = msg.cmd; DEBUGLOG(">>> CMD:%s (0x%x) processing...", decode_cmd(cmd), cmd); /* _do_process_request fills in msg (if memory allows for data, otherwise just cmd and size = 0) */ _do_process_request(&msg); if (!_client_write(fifos, &msg)) stack; DEBUGLOG("<<< CMD:%s (0x%x) completed (result %d).", decode_cmd(cmd), cmd, msg.cmd); dm_free(msg.data); if (cmd == DM_EVENT_CMD_DIE) { if (unlink(DMEVENTD_PIDFILE)) log_sys_error("unlink", DMEVENTD_PIDFILE); _exit(0); } } static void _process_initial_registrations(void) { int i; char *reg; struct dm_event_daemon_message msg = { 0 }; for (i = 0; (reg = _initial_registrations[i]); ++i) { msg.cmd = DM_EVENT_CMD_REGISTER_FOR_EVENT; if ((msg.size = strlen(reg))) { msg.data = reg; _do_process_request(&msg); } } } static void _cleanup_unused_threads(void) { struct dm_list *l; struct thread_status *thread; int ret; _lock_mutex(); while ((l = dm_list_first(&_thread_registry_unused))) { thread = dm_list_item(l, struct thread_status); if (thread->status != DM_THREAD_DONE) { if (thread->processing) break; /* cleanup on the next round */ /* Signal possibly sleeping thread */ ret = pthread_kill(thread->thread, SIGALRM); if (!ret || (ret != ESRCH)) break; /* check again on the next round */ /* thread is likely gone */ } dm_list_del(l); _unlock_mutex(); DEBUGLOG("Destroying Thr %x.", (int)thread->thread); if (pthread_join(thread->thread, NULL)) log_sys_error("pthread_join", ""); _free_thread_status(thread); _lock_mutex(); } _unlock_mutex(); } static void _sig_alarm(int signum __attribute__((unused))) { /* empty SIG_IGN */; } /* Init thread signal handling. */ static void _init_thread_signals(void) { sigset_t my_sigset; struct sigaction act = { .sa_handler = _sig_alarm }; sigaction(SIGALRM, &act, NULL); sigfillset(&my_sigset); /* These are used for exiting */ sigdelset(&my_sigset, SIGTERM); sigdelset(&my_sigset, SIGINT); sigdelset(&my_sigset, SIGHUP); sigdelset(&my_sigset, SIGQUIT); pthread_sigmask(SIG_BLOCK, &my_sigset, NULL); } /* * exit_handler * @sig * * Set the global variable which the process should * be watching to determine when to exit. */ static void _exit_handler(int sig __attribute__((unused))) { _exit_now = DM_SIGNALED_EXIT; } #ifdef __linux__ static int _set_oom_adj(const char *oom_adj_path, int val) { FILE *fp; if (!(fp = fopen(oom_adj_path, "w"))) { log_sys_error("open", oom_adj_path); return 0; } fprintf(fp, "%i", val); if (dm_fclose(fp)) log_sys_error("fclose", oom_adj_path); return 1; } /* * Protection against OOM killer if kernel supports it */ static int _protect_against_oom_killer(void) { struct stat st; if (stat(OOM_ADJ_FILE, &st) == -1) { if (errno != ENOENT) log_sys_error("stat", OOM_ADJ_FILE); /* Try old oom_adj interface as a fallback */ if (stat(OOM_ADJ_FILE_OLD, &st) == -1) { log_sys_error("stat", OOM_ADJ_FILE_OLD); return 1; } return _set_oom_adj(OOM_ADJ_FILE_OLD, OOM_DISABLE) || _set_oom_adj(OOM_ADJ_FILE_OLD, OOM_ADJUST_MIN); } return _set_oom_adj(OOM_ADJ_FILE, OOM_SCORE_ADJ_MIN); } static int _handle_preloaded_fifo(int fd, const char *path) { struct stat st_fd, st_path; int flags; if ((flags = fcntl(fd, F_GETFD)) < 0) return 0; if (flags & FD_CLOEXEC) return 0; if (fstat(fd, &st_fd) < 0 || !S_ISFIFO(st_fd.st_mode)) return 0; if (stat(path, &st_path) < 0 || st_path.st_dev != st_fd.st_dev || st_path.st_ino != st_fd.st_ino) return 0; if (fcntl(fd, F_SETFD, flags | FD_CLOEXEC) < 0) return 0; return 1; } static int _systemd_handover(struct dm_event_fifos *fifos) { const char *e; char *p; unsigned long env_pid, env_listen_fds; int r = 0; /* SD_ACTIVATION must be set! */ if (!(e = getenv(SD_ACTIVATION_ENV_VAR_NAME)) || strcmp(e, "1")) goto out; /* LISTEN_PID must be equal to our PID! */ if (!(e = getenv(SD_LISTEN_PID_ENV_VAR_NAME))) goto out; errno = 0; env_pid = strtoul(e, &p, 10); if (errno || !p || *p || env_pid <= 0 || getpid() != (pid_t) env_pid) goto out; /* LISTEN_FDS must be 2 and the fds must be FIFOSs! */ if (!(e = getenv(SD_LISTEN_FDS_ENV_VAR_NAME))) goto out; errno = 0; env_listen_fds = strtoul(e, &p, 10); if (errno || !p || *p || env_listen_fds != 2) goto out; /* Check and handle the FIFOs passed in */ r = (_handle_preloaded_fifo(SD_FD_FIFO_SERVER, DM_EVENT_FIFO_SERVER) && _handle_preloaded_fifo(SD_FD_FIFO_CLIENT, DM_EVENT_FIFO_CLIENT)); if (r) { fifos->server = SD_FD_FIFO_SERVER; fifos->server_path = DM_EVENT_FIFO_SERVER; fifos->client = SD_FD_FIFO_CLIENT; fifos->client_path = DM_EVENT_FIFO_CLIENT; } out: unsetenv(SD_ACTIVATION_ENV_VAR_NAME); unsetenv(SD_LISTEN_PID_ENV_VAR_NAME); unsetenv(SD_LISTEN_FDS_ENV_VAR_NAME); return r; } #endif static void _remove_files_on_exit(void) { if (unlink(DMEVENTD_PIDFILE)) log_sys_error("unlink", DMEVENTD_PIDFILE); if (!_systemd_activation) { if (unlink(DM_EVENT_FIFO_CLIENT)) log_sys_error("unlink", DM_EVENT_FIFO_CLIENT); if (unlink(DM_EVENT_FIFO_SERVER)) log_sys_error("unlink", DM_EVENT_FIFO_SERVER); } } static void _daemonize(void) { int child_status; int fd; pid_t pid; struct rlimit rlim; struct timeval tval; sigset_t my_sigset; sigemptyset(&my_sigset); if (sigprocmask(SIG_SETMASK, &my_sigset, NULL) < 0) { fprintf(stderr, "Unable to restore signals.\n"); exit(EXIT_FAILURE); } signal(SIGTERM, &_exit_handler); switch (pid = fork()) { case -1: log_sys_error("fork", ""); exit(EXIT_FAILURE); case 0: /* Child */ break; default: /* Wait for response from child */ while (!waitpid(pid, &child_status, WNOHANG) && !_exit_now) { tval.tv_sec = 0; tval.tv_usec = 250000; /* .25 sec */ select(0, NULL, NULL, NULL, &tval); } if (_exit_now) /* Child has signaled it is ok - we can exit now */ exit(EXIT_SUCCESS); /* Problem with child. Determine what it is by exit code */ switch (WEXITSTATUS(child_status)) { case EXIT_DESC_CLOSE_FAILURE: case EXIT_DESC_OPEN_FAILURE: case EXIT_FIFO_FAILURE: case EXIT_CHDIR_FAILURE: default: fprintf(stderr, "Child exited with code %d\n", WEXITSTATUS(child_status)); break; } exit(WEXITSTATUS(child_status)); } if (chdir("/")) exit(EXIT_CHDIR_FAILURE); if (getrlimit(RLIMIT_NOFILE, &rlim) < 0) fd = 256; /* just have to guess */ else fd = rlim.rlim_cur; for (--fd; fd >= 0; fd--) { #ifdef __linux__ /* Do not close fds preloaded by systemd! */ if (_systemd_activation && (fd == SD_FD_FIFO_SERVER || fd == SD_FD_FIFO_CLIENT)) continue; #endif (void) close(fd); } if ((open("/dev/null", O_RDONLY) < 0) || (open("/dev/null", O_WRONLY) < 0) || (open("/dev/null", O_WRONLY) < 0)) exit(EXIT_DESC_OPEN_FAILURE); setsid(); } static int _reinstate_registrations(struct dm_event_fifos *fifos) { static const char _failed_parsing_msg[] = "Failed to parse existing event registration.\n"; static const char *_delim = " "; struct dm_event_daemon_message msg = { 0 }; char *endp, *dso_name, *dev_name, *mask, *timeout; unsigned long mask_value, timeout_value; int i, ret; ret = daemon_talk(fifos, &msg, DM_EVENT_CMD_HELLO, NULL, NULL, 0, 0); dm_free(msg.data); msg.data = NULL; if (ret) { fprintf(stderr, "Failed to communicate with new instance of dmeventd.\n"); return 0; } for (i = 0; _initial_registrations[i]; ++i) { if (!(strtok(_initial_registrations[i], _delim)) || !(dso_name = strtok(NULL, _delim)) || !(dev_name = strtok(NULL, _delim)) || !(mask = strtok(NULL, _delim)) || !(timeout = strtok(NULL, _delim))) { fputs(_failed_parsing_msg, stderr); continue; } errno = 0; mask_value = strtoul(mask, &endp, 10); if (errno || !endp || *endp) { fputs(_failed_parsing_msg, stderr); continue; } errno = 0; timeout_value = strtoul(timeout, &endp, 10); if (errno || !endp || *endp) { fputs(_failed_parsing_msg, stderr); continue; } if (daemon_talk(fifos, &msg, DM_EVENT_CMD_REGISTER_FOR_EVENT, dso_name, dev_name, (enum dm_event_mask) mask_value, timeout_value)) fprintf(stderr, "Failed to reinstate monitoring for device %s.\n", dev_name); } return 1; } static void _restart_dmeventd(void) { struct dm_event_fifos fifos = { .server = -1, .client = -1, /* FIXME Make these either configurable or depend directly on dmeventd_path */ .client_path = DM_EVENT_FIFO_CLIENT, .server_path = DM_EVENT_FIFO_SERVER }; struct dm_event_daemon_message msg = { 0 }; int i, count = 0; char *message; int version; const char *e; /* Get the list of registrations from the running daemon. */ if (!init_fifos(&fifos)) { fprintf(stderr, "WARNING: Could not initiate communication with existing dmeventd.\n"); exit(EXIT_FAILURE); } if (!dm_event_get_version(&fifos, &version)) { fprintf(stderr, "WARNING: Could not communicate with existing dmeventd.\n"); goto bad; } if (version < 1) { fprintf(stderr, "WARNING: The running dmeventd instance is too old.\n" "Protocol version %d (required: 1). Action cancelled.\n", version); goto bad; } if (daemon_talk(&fifos, &msg, DM_EVENT_CMD_GET_STATUS, "-", "-", 0, 0)) goto bad; message = strchr(msg.data, ' ') + 1; for (i = 0; msg.data[i]; ++i) if (msg.data[i] == ';') { msg.data[i] = 0; ++count; } if (!(_initial_registrations = dm_malloc(sizeof(char*) * (count + 1)))) { fprintf(stderr, "Memory allocation registration failed.\n"); goto bad; } for (i = 0; i < count; ++i) { if (!(_initial_registrations[i] = dm_strdup(message))) { fprintf(stderr, "Memory allocation for message failed.\n"); goto bad; } message += strlen(message) + 1; } _initial_registrations[count] = NULL; if (version >= 2) { if (daemon_talk(&fifos, &msg, DM_EVENT_CMD_GET_PARAMETERS, "-", "-", 0, 0)) { fprintf(stderr, "Failed to acquire parameters from old dmeventd.\n"); goto bad; } if (strstr(msg.data, "exec_method=systemd")) _systemd_activation = 1; } #ifdef __linux__ /* * If the protocol version is old, just assume that if systemd is running, * the dmeventd is also run as a systemd service via fifo activation. */ if (version < 2) { /* This check is copied from sd-daemon.c. */ struct stat st; if (!lstat(SD_RUNTIME_UNIT_FILE_DIR, &st) && !!S_ISDIR(st.st_mode)) _systemd_activation = 1; } #endif if (daemon_talk(&fifos, &msg, DM_EVENT_CMD_DIE, "-", "-", 0, 0)) { fprintf(stderr, "Old dmeventd refused to die.\n"); goto bad; } if (!_systemd_activation && ((e = getenv(SD_ACTIVATION_ENV_VAR_NAME)) && strcmp(e, "1"))) _systemd_activation = 1; for (i = 0; i < 10; ++i) { if ((access(DMEVENTD_PIDFILE, F_OK) == -1) && (errno == ENOENT)) break; usleep(10); } if (!_systemd_activation) { fini_fifos(&fifos); return; } /* Reopen fifos. */ fini_fifos(&fifos); if (!init_fifos(&fifos)) { fprintf(stderr, "Could not initiate communication with new instance of dmeventd.\n"); exit(EXIT_FAILURE); } if (!_reinstate_registrations(&fifos)) { fprintf(stderr, "Failed to reinstate monitoring with new instance of dmeventd.\n"); goto bad; } fini_fifos(&fifos); exit(EXIT_SUCCESS); bad: fini_fifos(&fifos); exit(EXIT_FAILURE); } static void _usage(char *prog, FILE *file) { fprintf(file, "Usage:\n" "%s [-d [-d [-d]]] [-f] [-h] [-l] [-R] [-V] [-?]\n\n" " -d Log debug messages to syslog (-d, -dd, -ddd)\n" " -f Don't fork, run in the foreground\n" " -h Show this help information\n" " -l Log to stdout,stderr instead of syslog\n" " -? Show this help information on stderr\n" " -R Restart dmeventd\n" " -V Show version of dmeventd\n\n", prog); } int main(int argc, char *argv[]) { signed char opt; struct dm_event_fifos fifos = { .client = -1, .server = -1, .client_path = DM_EVENT_FIFO_CLIENT, .server_path = DM_EVENT_FIFO_SERVER }; time_t now, idle_exit_timeout = DMEVENTD_IDLE_EXIT_TIMEOUT; opterr = 0; optind = 0; while ((opt = getopt(argc, argv, "?fhVdlR")) != EOF) { switch (opt) { case 'h': _usage(argv[0], stdout); exit(EXIT_SUCCESS); case '?': _usage(argv[0], stderr); exit(EXIT_SUCCESS); case 'R': _restart++; break; case 'f': _foreground++; break; case 'd': _debug_level++; break; case 'l': _use_syslog = 0; break; case 'V': printf("dmeventd version: %s\n", DM_LIB_VERSION); exit(EXIT_SUCCESS); } } if (!_foreground && !_use_syslog) { printf("WARNING: Ignoring logging to stdout, needs options -f\n"); _use_syslog = 1; } /* * Switch to C locale to avoid reading large locale-archive file * used by some glibc (on some distributions it takes over 100MB). * Daemon currently needs to use mlockall(). */ if (setenv("LC_ALL", "C", 1)) perror("Cannot set LC_ALL to C"); if (_restart) _restart_dmeventd(); #ifdef __linux__ _systemd_activation = _systemd_handover(&fifos); #endif if (!_foreground) _daemonize(); if (_use_syslog) openlog("dmeventd", LOG_PID, LOG_DAEMON); dm_event_log_set(_debug_level, _use_syslog); dm_log_with_errno_init(_libdm_log); (void) dm_prepare_selinux_context(DMEVENTD_PIDFILE, S_IFREG); if (dm_create_lockfile(DMEVENTD_PIDFILE) == 0) exit(EXIT_FAILURE); atexit(_remove_files_on_exit); (void) dm_prepare_selinux_context(NULL, 0); /* Set the rest of the signals to cause '_exit_now' to be set */ signal(SIGTERM, &_exit_handler); signal(SIGINT, &_exit_handler); signal(SIGHUP, &_exit_handler); signal(SIGQUIT, &_exit_handler); #ifdef __linux__ /* Systemd has adjusted oom killer for us already */ if (!_systemd_activation && !_protect_against_oom_killer()) log_warn("WARNING: Failed to protect against OOM killer."); #endif _init_thread_signals(); pthread_mutex_init(&_global_mutex, NULL); if (!_systemd_activation && !_open_fifos(&fifos)) exit(EXIT_FIFO_FAILURE); /* Signal parent, letting them know we are ready to go. */ if (!_foreground) kill(getppid(), SIGTERM); log_notice("dmeventd ready for processing."); _idle_since = time(NULL); if (_initial_registrations) _process_initial_registrations(); for (;;) { if (_idle_since) { if (_exit_now) { if (_exit_now == DM_SCHEDULED_EXIT) break; /* Only prints shutdown message */ log_info("dmeventd detected break while being idle " "for %ld second(s), exiting.", (long) (time(NULL) - _idle_since)); break; } if (idle_exit_timeout) { now = time(NULL); if (now < _idle_since) _idle_since = now; /* clock change? */ now -= _idle_since; if (now >= idle_exit_timeout) { log_info("dmeventd was idle for %ld second(s), " "exiting.", (long) now); break; } } } else if (_exit_now == DM_SIGNALED_EXIT) { _exit_now = DM_SCHEDULED_EXIT; /* * When '_exit_now' is set, signal has been received, * but can not simply exit unless all * threads are done processing. */ log_info("dmeventd received break, scheduling exit."); } _process_request(&fifos); _cleanup_unused_threads(); } pthread_mutex_destroy(&_global_mutex); log_notice("dmeventd shutting down."); if (fifos.client >= 0 && close(fifos.client)) log_sys_error("client close", fifos.client_path); if (fifos.server >= 0 && close(fifos.server)) log_sys_error("server close", fifos.server_path); if (_use_syslog) closelog(); _exit_dm_lib(); exit(EXIT_SUCCESS); } LVM2.2.02.176/daemons/clvmd/0000755000000000000120000000000013176752421014057 5ustar rootwheelLVM2.2.02.176/daemons/clvmd/Makefile.in0000644000000000000120000000470713176752421016134 0ustar rootwheel# # Copyright (C) 2004 Red Hat, Inc. All rights reserved. # # This file is part of LVM2. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA srcdir = @srcdir@ top_srcdir = @top_srcdir@ top_builddir = @top_builddir@ CMAN_LIBS = @CMAN_LIBS@ CMAN_CFLAGS = @CMAN_CFLAGS@ CMAP_LIBS = @CMAP_LIBS@ CMAP_CFLAGS = @CMAP_CFLAGS@ CONFDB_LIBS = @CONFDB_LIBS@ CONFDB_CFLAGS = @CONFDB_CFLAGS@ CPG_LIBS = @CPG_LIBS@ CPG_CFLAGS = @CPG_CFLAGS@ DLM_LIBS = @DLM_LIBS@ DLM_CFLAGS = @DLM_CFLAGS@ QUORUM_LIBS = @QUORUM_LIBS@ QUORUM_CFLAGS = @QUORUM_CFLAGS@ SALCK_LIBS = @SALCK_LIBS@ SALCK_CFLAGS = @SALCK_CFLAGS@ SOURCES = \ clvmd-command.c\ clvmd.c\ lvm-functions.c\ refresh_clvmd.c ifneq (,$(findstring cman,, "@CLVMD@,")) SOURCES += clvmd-cman.c LMLIBS += $(CMAN_LIBS) $(CONFDB_LIBS) $(DLM_LIBS) CFLAGS += $(CMAN_CFLAGS) $(CONFDB_CFLAGS) $(DLM_CFLAGS) DEFS += -DUSE_CMAN endif ifneq (,$(findstring openais,, "@CLVMD@,")) SOURCES += clvmd-openais.c LMLIBS += $(CONFDB_LIBS) $(CPG_LIBS) $(SALCK_LIBS) CFLAGS += $(CONFDB_CFLAGS) $(CPG_CFLAGS) $(SALCK_CFLAGS) DEFS += -DUSE_OPENAIS endif ifneq (,$(findstring corosync,, "@CLVMD@,")) SOURCES += clvmd-corosync.c LMLIBS += $(CMAP_LIBS) $(CONFDB_LIBS) $(CPG_LIBS) $(DLM_LIBS) $(QUORUM_LIBS) CFLAGS += $(CMAP_CFLAGS) $(CONFDB_CFLAGS) $(CPG_CFLAGS) $(DLM_CFLAGS) $(QUORUM_CFLAGS) DEFS += -DUSE_COROSYNC endif ifneq (,$(findstring singlenode,, "@CLVMD@,")) SOURCES += clvmd-singlenode.c DEFS += -DUSE_SINGLENODE endif ifeq ($(MAKECMDGOALS),distclean) SOURCES += clvmd-cman.c SOURCES += clvmd-openais.c SOURCES += clvmd-corosync.c SOURCES += clvmd-singlenode.c endif TARGETS = \ clvmd include $(top_builddir)/make.tmpl LIBS += $(LVMINTERNAL_LIBS) -ldevmapper $(PTHREAD_LIBS) CFLAGS += -fno-strict-aliasing $(EXTRA_EXEC_CFLAGS) INSTALL_TARGETS = \ install_clvmd clvmd: $(OBJECTS) $(top_builddir)/lib/liblvm-internal.a $(CC) $(CFLAGS) $(LDFLAGS) $(EXTRA_EXEC_LDFLAGS) $(ELDFLAGS) \ -o clvmd $(OBJECTS) $(LMLIBS) $(LIBS) .PHONY: install_clvmd install_clvmd: $(TARGETS) $(INSTALL_PROGRAM) -D clvmd $(usrsbindir)/clvmd install: $(INSTALL_TARGETS) install_cluster: $(INSTALL_TARGETS) LVM2.2.02.176/daemons/clvmd/lvm-functions.c0000644000000000000120000005627413176752421017045 0ustar rootwheel/* * Copyright (C) 2002-2004 Sistina Software, Inc. All rights reserved. * Copyright (C) 2004-2012 Red Hat, Inc. All rights reserved. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License v.2. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "clvmd-common.h" #include #include "clvm.h" #include "clvmd-comms.h" #include "clvmd.h" #include "lvm-functions.h" /* LVM2 headers */ #include "toolcontext.h" #include "lvmcache.h" #include "lvm-globals.h" #include "activate.h" #include "archiver.h" #include "memlock.h" #include static struct cmd_context *cmd = NULL; static struct dm_hash_table *lv_hash = NULL; static pthread_mutex_t lv_hash_lock; static pthread_mutex_t lvm_lock; static char last_error[1024]; struct lv_info { int lock_id; int lock_mode; }; static const char *decode_full_locking_cmd(uint32_t cmdl) { static char buf[128]; const char *type; const char *scope; const char *command; switch (cmdl & LCK_TYPE_MASK) { case LCK_NULL: type = "NULL"; break; case LCK_READ: type = "READ"; break; case LCK_PREAD: type = "PREAD"; break; case LCK_WRITE: type = "WRITE"; break; case LCK_EXCL: type = "EXCL"; break; case LCK_UNLOCK: type = "UNLOCK"; break; default: type = "unknown"; break; } switch (cmdl & LCK_SCOPE_MASK) { case LCK_VG: scope = "VG"; command = "LCK_VG"; break; case LCK_LV: scope = "LV"; switch (cmdl & LCK_MASK) { case LCK_LV_EXCLUSIVE & LCK_MASK: command = "LCK_LV_EXCLUSIVE"; break; case LCK_LV_SUSPEND & LCK_MASK: command = "LCK_LV_SUSPEND"; break; case LCK_LV_RESUME & LCK_MASK: command = "LCK_LV_RESUME"; break; case LCK_LV_ACTIVATE & LCK_MASK: command = "LCK_LV_ACTIVATE"; break; case LCK_LV_DEACTIVATE & LCK_MASK: command = "LCK_LV_DEACTIVATE"; break; default: command = "unknown"; break; } break; default: scope = "unknown"; command = "unknown"; break; } sprintf(buf, "0x%x %s (%s|%s%s%s%s%s)", cmdl, command, type, scope, cmdl & LCK_NONBLOCK ? "|NONBLOCK" : "", cmdl & LCK_HOLD ? "|HOLD" : "", cmdl & LCK_CLUSTER_VG ? "|CLUSTER_VG" : "", cmdl & LCK_CACHE ? "|CACHE" : ""); return buf; } /* * Only processes 8 bits: excludes LCK_CACHE. */ static const char *decode_locking_cmd(unsigned char cmdl) { return decode_full_locking_cmd((uint32_t) cmdl); } static const char *decode_flags(unsigned char flags) { static char buf[128]; int len; len = sprintf(buf, "0x%x ( %s%s%s%s%s%s%s%s)", flags, flags & LCK_PARTIAL_MODE ? "PARTIAL_MODE|" : "", flags & LCK_MIRROR_NOSYNC_MODE ? "MIRROR_NOSYNC|" : "", flags & LCK_DMEVENTD_MONITOR_MODE ? "DMEVENTD_MONITOR|" : "", flags & LCK_ORIGIN_ONLY_MODE ? "ORIGIN_ONLY|" : "", flags & LCK_TEST_MODE ? "TEST|" : "", flags & LCK_CONVERT_MODE ? "CONVERT|" : "", flags & LCK_DMEVENTD_MONITOR_IGNORE ? "DMEVENTD_MONITOR_IGNORE|" : "", flags & LCK_REVERT_MODE ? "REVERT|" : ""); if (len > 1) buf[len - 2] = ' '; else buf[0] = '\0'; return buf; } char *get_last_lvm_error(void) { return last_error; } /* * Hash lock info helpers */ static struct lv_info *lookup_info(const char *resource) { struct lv_info *lvi; pthread_mutex_lock(&lv_hash_lock); lvi = dm_hash_lookup(lv_hash, resource); pthread_mutex_unlock(&lv_hash_lock); return lvi; } static int insert_info(const char *resource, struct lv_info *lvi) { int ret; pthread_mutex_lock(&lv_hash_lock); ret = dm_hash_insert(lv_hash, resource, lvi); pthread_mutex_unlock(&lv_hash_lock); return ret; } static void remove_info(const char *resource) { int num_open; pthread_mutex_lock(&lv_hash_lock); dm_hash_remove(lv_hash, resource); /* When last lock is remove, validate there are not left opened devices */ if (!dm_hash_get_first(lv_hash)) { if (critical_section()) log_error(INTERNAL_ERROR "No volumes are locked however clvmd is in activation mode critical section."); if ((num_open = dev_cache_check_for_open_devices())) log_error(INTERNAL_ERROR "No volumes are locked however %d devices are still open.", num_open); } pthread_mutex_unlock(&lv_hash_lock); } /* * Return the mode a lock is currently held at (or -1 if not held) */ static int get_current_lock(char *resource) { struct lv_info *lvi; if ((lvi = lookup_info(resource))) return lvi->lock_mode; return -1; } void init_lvhash(void) { /* Create hash table for keeping LV locks & status */ lv_hash = dm_hash_create(1024); pthread_mutex_init(&lv_hash_lock, NULL); pthread_mutex_init(&lvm_lock, NULL); } /* Called at shutdown to tidy the lockspace */ void destroy_lvhash(void) { struct dm_hash_node *v; struct lv_info *lvi; char *resource; int status; pthread_mutex_lock(&lv_hash_lock); dm_hash_iterate(v, lv_hash) { lvi = dm_hash_get_data(lv_hash, v); resource = dm_hash_get_key(lv_hash, v); if ((status = sync_unlock(resource, lvi->lock_id))) DEBUGLOG("unlock_all. unlock failed(%d): %s\n", status, strerror(errno)); dm_free(lvi); } dm_hash_destroy(lv_hash); lv_hash = NULL; pthread_mutex_unlock(&lv_hash_lock); } /* Gets a real lock and keeps the info in the hash table */ static int hold_lock(char *resource, int mode, int flags) { int status; int saved_errno; struct lv_info *lvi; /* Mask off invalid options */ flags &= LCKF_NOQUEUE | LCKF_CONVERT; lvi = lookup_info(resource); if (lvi) { if (lvi->lock_mode == mode) { DEBUGLOG("hold_lock, lock mode %d already held\n", mode); return 0; } if ((lvi->lock_mode == LCK_EXCL) && (mode == LCK_WRITE)) { DEBUGLOG("hold_lock, lock already held LCK_EXCL, " "ignoring LCK_WRITE request\n"); return 0; } } /* Only allow explicit conversions */ if (lvi && !(flags & LCKF_CONVERT)) { errno = EBUSY; return -1; } if (lvi) { /* Already exists - convert it */ status = sync_lock(resource, mode, flags, &lvi->lock_id); saved_errno = errno; if (!status) lvi->lock_mode = mode; else DEBUGLOG("hold_lock. convert to %d failed: %s\n", mode, strerror(errno)); errno = saved_errno; } else { if (!(lvi = dm_malloc(sizeof(struct lv_info)))) { errno = ENOMEM; return -1; } lvi->lock_mode = mode; lvi->lock_id = 0; status = sync_lock(resource, mode, flags & ~LCKF_CONVERT, &lvi->lock_id); saved_errno = errno; if (status) { dm_free(lvi); DEBUGLOG("hold_lock. lock at %d failed: %s\n", mode, strerror(errno)); } else if (!insert_info(resource, lvi)) { errno = ENOMEM; return -1; } errno = saved_errno; } return status; } /* Unlock and remove it from the hash table */ static int hold_unlock(char *resource) { struct lv_info *lvi; int status; int saved_errno; if (!(lvi = lookup_info(resource))) { DEBUGLOG("hold_unlock, lock not already held\n"); return 0; } status = sync_unlock(resource, lvi->lock_id); saved_errno = errno; if (!status) { remove_info(resource); dm_free(lvi); } else { DEBUGLOG("hold_unlock. unlock failed(%d): %s\n", status, strerror(errno)); } errno = saved_errno; return status; } /* Watch the return codes here. liblvm API functions return 1(true) for success, 0(false) for failure and don't set errno. libdlm API functions return 0 for success, -1 for failure and do set errno. These functions here return 0 for success or >0 for failure (where the retcode is errno) */ /* Activate LV exclusive or non-exclusive */ static int do_activate_lv(char *resource, unsigned char command, unsigned char lock_flags, int mode) { int oldmode; int status; int activate_lv; int exclusive = 0; struct lvinfo lvi; /* Is it already open ? */ oldmode = get_current_lock(resource); if (oldmode == mode && (command & LCK_CLUSTER_VG)) { DEBUGLOG("do_activate_lv, lock already held at %d\n", oldmode); return 0; /* Nothing to do */ } /* Does the config file want us to activate this LV ? */ if (!lv_activation_filter(cmd, resource, &activate_lv, NULL)) return EIO; if (!activate_lv) return 0; /* Success, we did nothing! */ /* Do we need to activate exclusively? */ if ((activate_lv == 2) || (mode == LCK_EXCL)) { exclusive = 1; mode = LCK_EXCL; } /* * Try to get the lock if it's a clustered volume group. * Use lock conversion only if requested, to prevent implicit conversion * of exclusive lock to shared one during activation. */ if (!test_mode() && command & LCK_CLUSTER_VG) { status = hold_lock(resource, mode, LCKF_NOQUEUE | ((lock_flags & LCK_CONVERT_MODE) ? LCKF_CONVERT:0)); if (status) { /* Return an LVM-sensible error for this. * Forcing EIO makes the upper level return this text * rather than the strerror text for EAGAIN. */ if (errno == EAGAIN) { sprintf(last_error, "Volume is busy on another node"); errno = EIO; } return errno; } } /* If it's suspended then resume it */ if (!lv_info_by_lvid(cmd, resource, 0, &lvi, 0, 0)) goto error; if (lvi.suspended) { critical_section_inc(cmd, "resuming"); if (!lv_resume(cmd, resource, 0, NULL)) { critical_section_dec(cmd, "resumed"); goto error; } } /* Now activate it */ if (!lv_activate(cmd, resource, exclusive, 0, 0, NULL)) goto error; return 0; error: if (!test_mode() && (oldmode == -1 || oldmode != mode)) (void)hold_unlock(resource); return EIO; } /* Resume the LV if it was active */ static int do_resume_lv(char *resource, unsigned char command, unsigned char lock_flags) { int oldmode, origin_only, exclusive, revert; /* Is it open ? */ oldmode = get_current_lock(resource); if (oldmode == -1 && (command & LCK_CLUSTER_VG)) { DEBUGLOG("do_resume_lv, lock not already held\n"); return 0; /* We don't need to do anything */ } origin_only = (lock_flags & LCK_ORIGIN_ONLY_MODE) ? 1 : 0; exclusive = (oldmode == LCK_EXCL) ? 1 : 0; revert = (lock_flags & LCK_REVERT_MODE) ? 1 : 0; if (!lv_resume_if_active(cmd, resource, origin_only, exclusive, revert, NULL)) return EIO; return 0; } /* Suspend the device if active */ static int do_suspend_lv(char *resource, unsigned char command, unsigned char lock_flags) { int oldmode; unsigned origin_only = (lock_flags & LCK_ORIGIN_ONLY_MODE) ? 1 : 0; unsigned exclusive; /* Is it open ? */ oldmode = get_current_lock(resource); if (oldmode == -1 && (command & LCK_CLUSTER_VG)) { DEBUGLOG("do_suspend_lv, lock not already held\n"); return 0; /* Not active, so it's OK */ } exclusive = (oldmode == LCK_EXCL) ? 1 : 0; /* Always call lv_suspend to read commited and precommited data */ if (!lv_suspend_if_active(cmd, resource, origin_only, exclusive, NULL, NULL)) return EIO; return 0; } static int do_deactivate_lv(char *resource, unsigned char command, unsigned char lock_flags) { int oldmode; int status; /* Is it open ? */ oldmode = get_current_lock(resource); if (oldmode == -1 && (command & LCK_CLUSTER_VG)) { DEBUGLOG("do_deactivate_lock, lock not already held\n"); return 0; /* We don't need to do anything */ } if (!lv_deactivate(cmd, resource, NULL)) return EIO; if (!test_mode() && command & LCK_CLUSTER_VG) { status = hold_unlock(resource); if (status) return errno; } return 0; } const char *do_lock_query(char *resource) { int mode; const char *type; mode = get_current_lock(resource); switch (mode) { case LCK_NULL: type = "NL"; break; case LCK_READ: type = "CR"; break; case LCK_PREAD:type = "PR"; break; case LCK_WRITE:type = "PW"; break; case LCK_EXCL: type = "EX"; break; default: type = NULL; } DEBUGLOG("do_lock_query: resource '%s', mode %i (%s)\n", resource, mode, type ?: "--"); return type; } /* This is the LOCK_LV part that happens on all nodes in the cluster - it is responsible for the interaction with device-mapper and LVM */ int do_lock_lv(unsigned char command, unsigned char lock_flags, char *resource) { int status = 0; DEBUGLOG("do_lock_lv: resource '%s', cmd = %s, flags = %s, critical_section = %d\n", resource, decode_locking_cmd(command), decode_flags(lock_flags), critical_section()); if (!cmd->initialized.config || config_files_changed(cmd)) { /* Reinitialise various settings inc. logging, filters */ if (do_refresh_cache()) { log_error("Updated config file invalid. Aborting."); return EINVAL; } } pthread_mutex_lock(&lvm_lock); init_test((lock_flags & LCK_TEST_MODE) ? 1 : 0); if (lock_flags & LCK_MIRROR_NOSYNC_MODE) init_mirror_in_sync(1); if (lock_flags & LCK_DMEVENTD_MONITOR_IGNORE) init_dmeventd_monitor(DMEVENTD_MONITOR_IGNORE); else { if (lock_flags & LCK_DMEVENTD_MONITOR_MODE) init_dmeventd_monitor(1); else init_dmeventd_monitor(0); } cmd->partial_activation = (lock_flags & LCK_PARTIAL_MODE) ? 1 : 0; /* clvmd should never try to read suspended device */ init_ignore_suspended_devices(1); switch (command & LCK_MASK) { case LCK_LV_EXCLUSIVE: status = do_activate_lv(resource, command, lock_flags, LCK_EXCL); break; case LCK_LV_SUSPEND: status = do_suspend_lv(resource, command, lock_flags); break; case LCK_UNLOCK: case LCK_LV_RESUME: /* if active */ status = do_resume_lv(resource, command, lock_flags); break; case LCK_LV_ACTIVATE: status = do_activate_lv(resource, command, lock_flags, LCK_READ); break; case LCK_LV_DEACTIVATE: status = do_deactivate_lv(resource, command, lock_flags); break; default: DEBUGLOG("Invalid LV command 0x%x\n", command); status = EINVAL; break; } if (lock_flags & LCK_MIRROR_NOSYNC_MODE) init_mirror_in_sync(0); cmd->partial_activation = 0; /* clean the pool for another command */ dm_pool_empty(cmd->mem); init_test(0); pthread_mutex_unlock(&lvm_lock); DEBUGLOG("Command return is %d, critical_section is %d\n", status, critical_section()); return status; } /* Functions to do on the local node only BEFORE the cluster-wide stuff above happens */ int pre_lock_lv(unsigned char command, unsigned char lock_flags, char *resource) { /* Nearly all the stuff happens cluster-wide. Apart from SUSPEND. Here we get the lock out on this node (because we are the node modifying the metadata) before suspending cluster-wide. LCKF_CONVERT is used always, local node is going to modify metadata */ if ((command & (LCK_SCOPE_MASK | LCK_TYPE_MASK)) == LCK_LV_SUSPEND && (command & LCK_CLUSTER_VG)) { DEBUGLOG("pre_lock_lv: resource '%s', cmd = %s, flags = %s\n", resource, decode_locking_cmd(command), decode_flags(lock_flags)); if (!(lock_flags & LCK_TEST_MODE) && hold_lock(resource, LCK_WRITE, LCKF_NOQUEUE | LCKF_CONVERT)) return errno; } return 0; } /* Functions to do on the local node only AFTER the cluster-wide stuff above happens */ int post_lock_lv(unsigned char command, unsigned char lock_flags, char *resource) { int status; unsigned origin_only = (lock_flags & LCK_ORIGIN_ONLY_MODE) ? 1 : 0; /* Opposite of above, done on resume after a metadata update */ if ((command & (LCK_SCOPE_MASK | LCK_TYPE_MASK)) == LCK_LV_RESUME && (command & LCK_CLUSTER_VG)) { int oldmode; DEBUGLOG("post_lock_lv: resource '%s', cmd = %s, flags = %s\n", resource, decode_locking_cmd(command), decode_flags(lock_flags)); /* If the lock state is PW then restore it to what it was */ oldmode = get_current_lock(resource); if (oldmode == LCK_WRITE) { struct lvinfo lvi; pthread_mutex_lock(&lvm_lock); status = lv_info_by_lvid(cmd, resource, origin_only, &lvi, 0, 0); pthread_mutex_unlock(&lvm_lock); if (!status) return EIO; if (!(lock_flags & LCK_TEST_MODE)) { if (lvi.exists) { if (hold_lock(resource, LCK_READ, LCKF_CONVERT)) return errno; } else if (hold_unlock(resource)) return errno; } } } return 0; } /* Check if a VG is in use by LVM1 so we don't stomp on it */ int do_check_lvm1(const char *vgname) { int status; status = check_lvm1_vg_inactive(cmd, vgname); return status == 1 ? 0 : EBUSY; } int do_refresh_cache(void) { DEBUGLOG("Refreshing context\n"); log_notice("Refreshing context"); pthread_mutex_lock(&lvm_lock); if (!refresh_toolcontext(cmd)) { pthread_mutex_unlock(&lvm_lock); return -1; } init_full_scan_done(0); init_ignore_suspended_devices(1); lvmcache_force_next_label_scan(); lvmcache_label_scan(cmd); dm_pool_empty(cmd->mem); pthread_mutex_unlock(&lvm_lock); return 0; } /* * Handle VG lock - drop metadata or update lvmcache state */ void do_lock_vg(unsigned char command, unsigned char lock_flags, char *resource) { uint32_t lock_cmd = command; char *vgname = resource + 2; lock_cmd &= (LCK_SCOPE_MASK | LCK_TYPE_MASK | LCK_HOLD); /* * Check if LCK_CACHE should be set. All P_ locks except # are cache related. */ if (strncmp(resource, "P_#", 3) && !strncmp(resource, "P_", 2)) lock_cmd |= LCK_CACHE; DEBUGLOG("do_lock_vg: resource '%s', cmd = %s, flags = %s, critical_section = %d\n", resource, decode_full_locking_cmd(lock_cmd), decode_flags(lock_flags), critical_section()); /* P_#global causes a full cache refresh */ if (!strcmp(resource, "P_" VG_GLOBAL)) { do_refresh_cache(); return; } pthread_mutex_lock(&lvm_lock); init_test((lock_flags & LCK_TEST_MODE) ? 1 : 0); switch (lock_cmd) { case LCK_VG_COMMIT: DEBUGLOG("vg_commit notification for VG %s\n", vgname); lvmcache_commit_metadata(vgname); break; case LCK_VG_REVERT: DEBUGLOG("vg_revert notification for VG %s\n", vgname); lvmcache_drop_metadata(vgname, 1); break; case LCK_VG_DROP_CACHE: default: DEBUGLOG("Invalidating cached metadata for VG %s\n", vgname); lvmcache_drop_metadata(vgname, 0); } init_test(0); pthread_mutex_unlock(&lvm_lock); } /* * Ideally, clvmd should be started before any LVs are active * but this may not be the case... * I suppose this also comes in handy if clvmd crashes, not that it would! */ static int get_initial_state(struct dm_hash_table *excl_uuid) { int lock_mode; char lv[65], vg[65], flags[26], vg_flags[26]; /* with space for '\0' */ char uuid[65]; char line[255]; char *lvs_cmd; const char *lvm_binary = getenv("LVM_BINARY") ? : LVM_PATH; FILE *lvs; if (dm_asprintf(&lvs_cmd, "%s lvs --config 'log{command_names=0 prefix=\"\"}' " "--nolocking --noheadings -o vg_uuid,lv_uuid,lv_attr,vg_attr", lvm_binary) < 0) return_0; /* FIXME: Maybe link and use liblvm2cmd directly instead of fork */ if (!(lvs = popen(lvs_cmd, "r"))) { dm_free(lvs_cmd); return 0; } while (fgets(line, sizeof(line), lvs)) { if (sscanf(line, "%64s %64s %25s %25s\n", vg, lv, flags, vg_flags) == 4) { /* States: s:suspended a:active S:dropped snapshot I:invalid snapshot */ if (strlen(vg) == 38 && /* is is a valid UUID ? */ (flags[4] == 'a' || flags[4] == 's') && /* is it active or suspended? */ vg_flags[5] == 'c') { /* is it clustered ? */ /* Convert hyphen-separated UUIDs into one */ memcpy(&uuid[0], &vg[0], 6); memcpy(&uuid[6], &vg[7], 4); memcpy(&uuid[10], &vg[12], 4); memcpy(&uuid[14], &vg[17], 4); memcpy(&uuid[18], &vg[22], 4); memcpy(&uuid[22], &vg[27], 4); memcpy(&uuid[26], &vg[32], 6); memcpy(&uuid[32], &lv[0], 6); memcpy(&uuid[38], &lv[7], 4); memcpy(&uuid[42], &lv[12], 4); memcpy(&uuid[46], &lv[17], 4); memcpy(&uuid[50], &lv[22], 4); memcpy(&uuid[54], &lv[27], 4); memcpy(&uuid[58], &lv[32], 6); uuid[64] = '\0'; /* Look for this lock in the list of EX locks we were passed on the command-line */ lock_mode = (dm_hash_lookup(excl_uuid, uuid)) ? LCK_EXCL : LCK_READ; DEBUGLOG("getting initial lock for %s\n", uuid); if (hold_lock(uuid, lock_mode, LCKF_NOQUEUE)) DEBUGLOG("Failed to hold lock %s\n", uuid); } } } if (pclose(lvs)) DEBUGLOG("lvs pclose failed: %s\n", strerror(errno)); dm_free(lvs_cmd); return 1; } static void lvm2_log_fn(int level, const char *file, int line, int dm_errno, const char *message) { /* Send messages to the normal LVM2 logging system too, so we get debug output when it's asked for. We need to NULL the function ptr otherwise it will just call back into here! */ init_log_fn(NULL); print_log(level, file, line, dm_errno, "%s", message); init_log_fn(lvm2_log_fn); /* * Ignore non-error messages, but store the latest one for returning * to the user. */ if (level != _LOG_ERR && level != _LOG_FATAL) return; strncpy(last_error, message, sizeof(last_error)); last_error[sizeof(last_error)-1] = '\0'; } /* This checks some basic cluster-LVM configuration stuff */ static void check_config(void) { int locking_type; locking_type = find_config_tree_int(cmd, global_locking_type_CFG, NULL); if (locking_type == 3) /* compiled-in cluster support */ return; if (locking_type == 2) { /* External library, check name */ const char *libname; libname = find_config_tree_str(cmd, global_locking_library_CFG, NULL); if (libname && strstr(libname, "liblvm2clusterlock.so")) return; log_error("Incorrect LVM locking library specified in lvm.conf, cluster operations may not work."); return; } log_error("locking_type not set correctly in lvm.conf, cluster operations will not work."); } /* Backups up the LVM metadata if it's changed */ void lvm_do_backup(const char *vgname) { struct volume_group * vg; int consistent = 0; DEBUGLOG("Triggering backup of VG metadata for %s.\n", vgname); pthread_mutex_lock(&lvm_lock); vg = vg_read_internal(cmd, vgname, NULL /*vgid*/, WARN_PV_READ, &consistent); if (vg && consistent) check_current_backup(vg); else log_error("Error backing up metadata, can't find VG for group %s", vgname); release_vg(vg); dm_pool_empty(cmd->mem); pthread_mutex_unlock(&lvm_lock); } struct dm_hash_node *get_next_excl_lock(struct dm_hash_node *v, char **name) { struct lv_info *lvi; *name = NULL; if (!v) v = dm_hash_get_first(lv_hash); do { if (v) { lvi = dm_hash_get_data(lv_hash, v); DEBUGLOG("Looking for EX locks. found %x mode %d\n", lvi->lock_id, lvi->lock_mode); if (lvi->lock_mode == LCK_EXCL) { *name = dm_hash_get_key(lv_hash, v); } v = dm_hash_get_next(lv_hash, v); } } while (v && !*name); if (*name) DEBUGLOG("returning EXclusive UUID %s\n", *name); return v; } void lvm_do_fs_unlock(void) { pthread_mutex_lock(&lvm_lock); DEBUGLOG("Syncing device names\n"); fs_unlock(); pthread_mutex_unlock(&lvm_lock); } /* Called to initialise the LVM context of the daemon */ int init_clvm(struct dm_hash_table *excl_uuid) { /* Use LOG_DAEMON for syslog messages instead of LOG_USER */ init_syslog(LOG_DAEMON); openlog("clvmd", LOG_PID, LOG_DAEMON); /* Initialise already held locks */ if (!get_initial_state(excl_uuid)) log_error("Cannot load initial lock states."); if (!udev_init_library_context()) stack; if (!(cmd = create_toolcontext(1, NULL, 0, 1, 1, 1))) { log_error("Failed to allocate command context"); udev_fin_library_context(); return 0; } if (stored_errno()) { destroy_toolcontext(cmd); return 0; } cmd->cmd_line = "clvmd"; /* Check lvm.conf is setup for cluster-LVM */ check_config(); init_ignore_suspended_devices(1); /* Trap log messages so we can pass them back to the user */ init_log_fn(lvm2_log_fn); memlock_inc_daemon(cmd); return 1; } void destroy_lvm(void) { if (cmd) { memlock_dec_daemon(cmd); destroy_toolcontext(cmd); udev_fin_library_context(); cmd = NULL; } } LVM2.2.02.176/daemons/clvmd/clvmd-openais.c0000644000000000000120000003767013176752421017001 0ustar rootwheel/* * Copyright (C) 2007-2009 Red Hat, Inc. All rights reserved. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /* * This provides the interface between clvmd and OpenAIS as the cluster * and lock manager. */ #include "clvmd-common.h" #include #include #include #include #include #include #include #include "locking.h" #include "clvm.h" #include "clvmd-comms.h" #include "lvm-functions.h" #include "clvmd.h" /* Timeout value for several openais calls */ #define TIMEOUT 10 static void openais_cpg_deliver_callback (cpg_handle_t handle, const struct cpg_name *groupName, uint32_t nodeid, uint32_t pid, void *msg, size_t msg_len); static void openais_cpg_confchg_callback(cpg_handle_t handle, const struct cpg_name *groupName, const struct cpg_address *member_list, size_t member_list_entries, const struct cpg_address *left_list, size_t left_list_entries, const struct cpg_address *joined_list, size_t joined_list_entries); static void _cluster_closedown(void); /* Hash list of nodes in the cluster */ static struct dm_hash_table *node_hash; /* For associating lock IDs & resource handles */ static struct dm_hash_table *lock_hash; /* Number of active nodes */ static int num_nodes; static unsigned int our_nodeid; static struct local_client *cluster_client; /* OpenAIS handles */ static cpg_handle_t cpg_handle; static SaLckHandleT lck_handle; static struct cpg_name cpg_group_name; /* Openais callback structs */ cpg_callbacks_t openais_cpg_callbacks = { .cpg_deliver_fn = openais_cpg_deliver_callback, .cpg_confchg_fn = openais_cpg_confchg_callback, }; struct node_info { enum {NODE_UNKNOWN, NODE_DOWN, NODE_UP, NODE_CLVMD} state; int nodeid; }; struct lock_info { SaLckResourceHandleT res_handle; SaLckLockIdT lock_id; SaNameT lock_name; }; /* Set errno to something approximating the right value and return 0 or -1 */ static int ais_to_errno(SaAisErrorT err) { switch(err) { case SA_AIS_OK: return 0; case SA_AIS_ERR_LIBRARY: errno = EINVAL; break; case SA_AIS_ERR_VERSION: errno = EINVAL; break; case SA_AIS_ERR_INIT: errno = EINVAL; break; case SA_AIS_ERR_TIMEOUT: errno = ETIME; break; case SA_AIS_ERR_TRY_AGAIN: errno = EAGAIN; break; case SA_AIS_ERR_INVALID_PARAM: errno = EINVAL; break; case SA_AIS_ERR_NO_MEMORY: errno = ENOMEM; break; case SA_AIS_ERR_BAD_HANDLE: errno = EINVAL; break; case SA_AIS_ERR_BUSY: errno = EBUSY; break; case SA_AIS_ERR_ACCESS: errno = EPERM; break; case SA_AIS_ERR_NOT_EXIST: errno = ENOENT; break; case SA_AIS_ERR_NAME_TOO_LONG: errno = ENAMETOOLONG; break; case SA_AIS_ERR_EXIST: errno = EEXIST; break; case SA_AIS_ERR_NO_SPACE: errno = ENOSPC; break; case SA_AIS_ERR_INTERRUPT: errno = EINTR; break; case SA_AIS_ERR_NAME_NOT_FOUND: errno = ENOENT; break; case SA_AIS_ERR_NO_RESOURCES: errno = ENOMEM; break; case SA_AIS_ERR_NOT_SUPPORTED: errno = EOPNOTSUPP; break; case SA_AIS_ERR_BAD_OPERATION: errno = EINVAL; break; case SA_AIS_ERR_FAILED_OPERATION: errno = EIO; break; case SA_AIS_ERR_MESSAGE_ERROR: errno = EIO; break; case SA_AIS_ERR_QUEUE_FULL: errno = EXFULL; break; case SA_AIS_ERR_QUEUE_NOT_AVAILABLE: errno = EINVAL; break; case SA_AIS_ERR_BAD_FLAGS: errno = EINVAL; break; case SA_AIS_ERR_TOO_BIG: errno = E2BIG; break; case SA_AIS_ERR_NO_SECTIONS: errno = ENOMEM; break; default: errno = EINVAL; break; } return -1; } static char *print_openais_csid(const char *csid) { static char buf[128]; int id; memcpy(&id, csid, sizeof(int)); sprintf(buf, "%d", id); return buf; } static int add_internal_client(int fd, fd_callback_t callback) { struct local_client *client; DEBUGLOG("Add_internal_client, fd = %d\n", fd); if (!(client = dm_zalloc(sizeof(*client)))) { DEBUGLOG("malloc failed\n"); return -1; } client->fd = fd; client->type = CLUSTER_INTERNAL; client->callback = callback; add_client(client); /* Set Close-on-exec */ fcntl(fd, F_SETFD, 1); return 0; } static void openais_cpg_deliver_callback (cpg_handle_t handle, const struct cpg_name *groupName, uint32_t nodeid, uint32_t pid, void *msg, size_t msg_len) { int target_nodeid; memcpy(&target_nodeid, msg, OPENAIS_CSID_LEN); DEBUGLOG("%u got message from nodeid %d for %d. len %" PRIsize_t "\n", our_nodeid, nodeid, target_nodeid, msg_len-4); if (nodeid != our_nodeid) if (target_nodeid == our_nodeid || target_nodeid == 0) process_message(cluster_client, (char *)msg+OPENAIS_CSID_LEN, msg_len-OPENAIS_CSID_LEN, (char*)&nodeid); } static void openais_cpg_confchg_callback(cpg_handle_t handle, const struct cpg_name *groupName, const struct cpg_address *member_list, size_t member_list_entries, const struct cpg_address *left_list, size_t left_list_entries, const struct cpg_address *joined_list, size_t joined_list_entries) { int i; struct node_info *ninfo; DEBUGLOG("confchg callback. %" PRIsize_t " joined, " FMTsize_t " left, %" PRIsize_t " members\n", joined_list_entries, left_list_entries, member_list_entries); for (i=0; inodeid = joined_list[i].nodeid; dm_hash_insert_binary(node_hash, (char *)&ninfo->nodeid, OPENAIS_CSID_LEN, ninfo); } } ninfo->state = NODE_CLVMD; } for (i=0; istate = NODE_DOWN; } for (i=0; inodeid = member_list[i].nodeid; dm_hash_insert_binary(node_hash, (char *)&ninfo->nodeid, OPENAIS_CSID_LEN, ninfo); } } ninfo->state = NODE_CLVMD; } num_nodes = member_list_entries; } static int lck_dispatch(struct local_client *client, char *buf, int len, const char *csid, struct local_client **new_client) { *new_client = NULL; saLckDispatch(lck_handle, SA_DISPATCH_ONE); return 1; } static int _init_cluster(void) { SaAisErrorT err; SaVersionT ver = { 'B', 1, 1 }; int select_fd; node_hash = dm_hash_create(100); lock_hash = dm_hash_create(10); err = cpg_initialize(&cpg_handle, &openais_cpg_callbacks); if (err != SA_AIS_OK) { syslog(LOG_ERR, "Cannot initialise OpenAIS CPG service: %d", err); DEBUGLOG("Cannot initialise OpenAIS CPG service: %d", err); return ais_to_errno(err); } err = saLckInitialize(&lck_handle, NULL, &ver); if (err != SA_AIS_OK) { cpg_initialize(&cpg_handle, &openais_cpg_callbacks); syslog(LOG_ERR, "Cannot initialise OpenAIS lock service: %d", err); DEBUGLOG("Cannot initialise OpenAIS lock service: %d\n\n", err); return ais_to_errno(err); } /* Connect to the clvmd group */ strcpy((char *)cpg_group_name.value, "clvmd"); cpg_group_name.length = strlen((char *)cpg_group_name.value); err = cpg_join(cpg_handle, &cpg_group_name); if (err != SA_AIS_OK) { cpg_finalize(cpg_handle); saLckFinalize(lck_handle); syslog(LOG_ERR, "Cannot join clvmd process group"); DEBUGLOG("Cannot join clvmd process group: %d\n", err); return ais_to_errno(err); } err = cpg_local_get(cpg_handle, &our_nodeid); if (err != SA_AIS_OK) { cpg_finalize(cpg_handle); saLckFinalize(lck_handle); syslog(LOG_ERR, "Cannot get local node id\n"); return ais_to_errno(err); } DEBUGLOG("Our local node id is %d\n", our_nodeid); saLckSelectionObjectGet(lck_handle, (SaSelectionObjectT *)&select_fd); add_internal_client(select_fd, lck_dispatch); DEBUGLOG("Connected to OpenAIS\n"); return 0; } static void _cluster_closedown(void) { saLckFinalize(lck_handle); cpg_finalize(cpg_handle); } static void _get_our_csid(char *csid) { memcpy(csid, &our_nodeid, sizeof(int)); } /* OpenAIS doesn't really have nmode names so we just use the node ID in hex instead */ static int _csid_from_name(char *csid, const char *name) { int nodeid; struct node_info *ninfo; if (sscanf(name, "%x", &nodeid) == 1) { ninfo = dm_hash_lookup_binary(node_hash, csid, OPENAIS_CSID_LEN); if (ninfo) return nodeid; } return -1; } static int _name_from_csid(const char *csid, char *name) { struct node_info *ninfo; ninfo = dm_hash_lookup_binary(node_hash, csid, OPENAIS_CSID_LEN); if (!ninfo) { sprintf(name, "UNKNOWN %s", print_openais_csid(csid)); return -1; } sprintf(name, "%x", ninfo->nodeid); return 0; } static int _get_num_nodes() { DEBUGLOG("num_nodes = %d\n", num_nodes); return num_nodes; } /* Node is now known to be running a clvmd */ static void _add_up_node(const char *csid) { struct node_info *ninfo; ninfo = dm_hash_lookup_binary(node_hash, csid, OPENAIS_CSID_LEN); if (!ninfo) { DEBUGLOG("openais_add_up_node no node_hash entry for csid %s\n", print_openais_csid(csid)); return; } DEBUGLOG("openais_add_up_node %d\n", ninfo->nodeid); ninfo->state = NODE_CLVMD; } /* Call a callback for each node, so the caller knows whether it's up or down */ static int _cluster_do_node_callback(struct local_client *master_client, void (*callback)(struct local_client *, const char *csid, int node_up)) { struct dm_hash_node *hn; struct node_info *ninfo; int somedown = 0; dm_hash_iterate(hn, node_hash) { char csid[OPENAIS_CSID_LEN]; ninfo = dm_hash_get_data(node_hash, hn); memcpy(csid, dm_hash_get_key(node_hash, hn), OPENAIS_CSID_LEN); DEBUGLOG("down_callback. node %d, state = %d\n", ninfo->nodeid, ninfo->state); if (ninfo->state != NODE_DOWN) callback(master_client, csid, ninfo->state == NODE_CLVMD); if (ninfo->state != NODE_CLVMD) somedown = -1; } return somedown; } /* Real locking */ static int _lock_resource(char *resource, int mode, int flags, int *lockid) { struct lock_info *linfo; SaLckResourceHandleT res_handle; SaAisErrorT err; SaLckLockIdT lock_id; SaLckLockStatusT lockStatus; /* This needs to be converted from DLM/LVM2 value for OpenAIS LCK */ if (flags & LCK_NONBLOCK) flags = SA_LCK_LOCK_NO_QUEUE; linfo = malloc(sizeof(struct lock_info)); if (!linfo) return -1; DEBUGLOG("lock_resource '%s', flags=%d, mode=%d\n", resource, flags, mode); linfo->lock_name.length = strlen(resource)+1; strcpy((char *)linfo->lock_name.value, resource); err = saLckResourceOpen(lck_handle, &linfo->lock_name, SA_LCK_RESOURCE_CREATE, TIMEOUT, &res_handle); if (err != SA_AIS_OK) { DEBUGLOG("ResourceOpen returned %d\n", err); free(linfo); return ais_to_errno(err); } err = saLckResourceLock( res_handle, &lock_id, mode, flags, 0, SA_TIME_END, &lockStatus); if (err != SA_AIS_OK && lockStatus != SA_LCK_LOCK_GRANTED) { free(linfo); saLckResourceClose(res_handle); return ais_to_errno(err); } /* Wait for it to complete */ DEBUGLOG("lock_resource returning %d, lock_id=%" PRIx64 "\n", err, lock_id); linfo->lock_id = lock_id; linfo->res_handle = res_handle; dm_hash_insert(lock_hash, resource, linfo); return ais_to_errno(err); } static int _unlock_resource(char *resource, int lockid) { SaAisErrorT err; struct lock_info *linfo; DEBUGLOG("unlock_resource %s\n", resource); linfo = dm_hash_lookup(lock_hash, resource); if (!linfo) return 0; DEBUGLOG("unlock_resource: lockid: %" PRIx64 "\n", linfo->lock_id); err = saLckResourceUnlock(linfo->lock_id, SA_TIME_END); if (err != SA_AIS_OK) { DEBUGLOG("Unlock returned %d\n", err); return ais_to_errno(err); } /* Release the resource */ dm_hash_remove(lock_hash, resource); saLckResourceClose(linfo->res_handle); free(linfo); return ais_to_errno(err); } static int _sync_lock(const char *resource, int mode, int flags, int *lockid) { int status; char lock1[strlen(resource)+3]; char lock2[strlen(resource)+3]; snprintf(lock1, sizeof(lock1), "%s-1", resource); snprintf(lock2, sizeof(lock2), "%s-2", resource); switch (mode) { case LCK_EXCL: status = _lock_resource(lock1, SA_LCK_EX_LOCK_MODE, flags, lockid); if (status) goto out; /* If we can't get this lock too then bail out */ status = _lock_resource(lock2, SA_LCK_EX_LOCK_MODE, LCK_NONBLOCK, lockid); if (status == SA_LCK_LOCK_NOT_QUEUED) { _unlock_resource(lock1, *lockid); status = -1; errno = EAGAIN; } break; case LCK_PREAD: case LCK_READ: status = _lock_resource(lock1, SA_LCK_PR_LOCK_MODE, flags, lockid); if (status) goto out; _unlock_resource(lock2, *lockid); break; case LCK_WRITE: status = _lock_resource(lock2, SA_LCK_EX_LOCK_MODE, flags, lockid); if (status) goto out; _unlock_resource(lock1, *lockid); break; default: status = -1; errno = EINVAL; break; } out: *lockid = mode; return status; } static int _sync_unlock(const char *resource, int lockid) { int status = 0; char lock1[strlen(resource)+3]; char lock2[strlen(resource)+3]; snprintf(lock1, sizeof(lock1), "%s-1", resource); snprintf(lock2, sizeof(lock2), "%s-2", resource); _unlock_resource(lock1, lockid); _unlock_resource(lock2, lockid); return status; } /* We are always quorate ! */ static int _is_quorate() { return 1; } static int _get_main_cluster_fd(void) { int select_fd; cpg_fd_get(cpg_handle, &select_fd); return select_fd; } static int _cluster_fd_callback(struct local_client *fd, char *buf, int len, const char *csid, struct local_client **new_client) { cluster_client = fd; *new_client = NULL; cpg_dispatch(cpg_handle, SA_DISPATCH_ONE); return 1; } static int _cluster_send_message(const void *buf, int msglen, const char *csid, const char *errtext) { struct iovec iov[2]; SaAisErrorT err; int target_node; if (csid) memcpy(&target_node, csid, OPENAIS_CSID_LEN); else target_node = 0; iov[0].iov_base = &target_node; iov[0].iov_len = sizeof(int); iov[1].iov_base = (char *)buf; iov[1].iov_len = msglen; err = cpg_mcast_joined(cpg_handle, CPG_TYPE_AGREED, iov, 2); return ais_to_errno(err); } /* We don't have a cluster name to report here */ static int _get_cluster_name(char *buf, int buflen) { strncpy(buf, "OpenAIS", buflen); return 0; } static struct cluster_ops _cluster_openais_ops = { .name = "openais", .cluster_init_completed = NULL, .cluster_send_message = _cluster_send_message, .name_from_csid = _name_from_csid, .csid_from_name = _csid_from_name, .get_num_nodes = _get_num_nodes, .cluster_fd_callback = _cluster_fd_callback, .get_main_cluster_fd = _get_main_cluster_fd, .cluster_do_node_callback = _cluster_do_node_callback, .is_quorate = _is_quorate, .get_our_csid = _get_our_csid, .add_up_node = _add_up_node, .reread_config = NULL, .cluster_closedown = _cluster_closedown, .get_cluster_name = _get_cluster_name, .sync_lock = _sync_lock, .sync_unlock = _sync_unlock, }; struct cluster_ops *init_openais_cluster(void) { if (!_init_cluster()) return &_cluster_openais_ops; return NULL; } LVM2.2.02.176/daemons/clvmd/refresh_clvmd.h0000644000000000000120000000115213176752421017052 0ustar rootwheel/* * Copyright (C) 2007 Red Hat, Inc. All rights reserved. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License v.2. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ int refresh_clvmd(int all_nodes); int restart_clvmd(int all_nodes); int debug_clvmd(int level, int clusterwide); LVM2.2.02.176/daemons/clvmd/clvmd-cman.c0000644000000000000120000002717513176752421016260 0ustar rootwheel/* * Copyright (C) 2002-2004 Sistina Software, Inc. All rights reserved. * Copyright (C) 2004 Red Hat, Inc. All rights reserved. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License v.2. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /* * CMAN communication layer for clvmd. */ #include "clvmd-common.h" #include #include "clvmd-comms.h" #include "clvm.h" #include "clvmd.h" #include "lvm-functions.h" #include #include #define LOCKSPACE_NAME "clvmd" struct clvmd_node { struct cman_node *node; int clvmd_up; }; static int num_nodes; static struct cman_node *nodes = NULL; static struct cman_node this_node; static int count_nodes; /* size of allocated nodes array */ static struct dm_hash_table *node_updown_hash; static dlm_lshandle_t *lockspace; static cman_handle_t c_handle; static void count_clvmds_running(void); static void get_members(void); static int nodeid_from_csid(const char *csid); static int name_from_nodeid(int nodeid, char *name); static void event_callback(cman_handle_t handle, void *private, int reason, int arg); static void data_callback(cman_handle_t handle, void *private, char *buf, int len, uint8_t port, int nodeid); struct lock_wait { pthread_cond_t cond; pthread_mutex_t mutex; struct dlm_lksb lksb; }; static int _init_cluster(void) { node_updown_hash = dm_hash_create(100); /* Open the cluster communication socket */ c_handle = cman_init(NULL); if (!c_handle) { syslog(LOG_ERR, "Can't open cluster manager socket: %m"); return -1; } DEBUGLOG("Connected to CMAN\n"); if (cman_start_recv_data(c_handle, data_callback, CLUSTER_PORT_CLVMD)) { syslog(LOG_ERR, "Can't bind cluster socket: %m"); return -1; } if (cman_start_notification(c_handle, event_callback)) { syslog(LOG_ERR, "Can't start cluster event listening"); return -1; } /* Get the cluster members list */ get_members(); count_clvmds_running(); DEBUGLOG("CMAN initialisation complete\n"); /* Create a lockspace for LV & VG locks to live in */ lockspace = dlm_open_lockspace(LOCKSPACE_NAME); if (!lockspace) { lockspace = dlm_create_lockspace(LOCKSPACE_NAME, 0600); if (!lockspace) { syslog(LOG_ERR, "Unable to create DLM lockspace for CLVM: %m"); return -1; } DEBUGLOG("Created DLM lockspace for CLVMD.\n"); } else DEBUGLOG("Opened existing DLM lockspace for CLVMD.\n"); dlm_ls_pthread_init(lockspace); DEBUGLOG("DLM initialisation complete\n"); return 0; } static void _cluster_init_completed(void) { clvmd_cluster_init_completed(); } static int _get_main_cluster_fd(void) { return cman_get_fd(c_handle); } static int _get_num_nodes(void) { int i; int nnodes = 0; /* return number of ACTIVE nodes */ for (i=0; i= 2 case CMAN_REASON_PORTOPENED: /* Ignore this, wait for startup message from clvmd itself */ break; case CMAN_REASON_TRY_SHUTDOWN: DEBUGLOG("Got try shutdown, sending OK\n"); cman_replyto_shutdown(c_handle, 1); break; #endif default: /* ERROR */ DEBUGLOG("Got unknown event callback message: %d\n", reason); break; } } static struct local_client *cman_client; static int _cluster_fd_callback(struct local_client *fd, char *buf, int len, const char *csid, struct local_client **new_client) { /* Save this for data_callback */ cman_client = fd; /* We never return a new client */ *new_client = NULL; return cman_dispatch(c_handle, 0); } static void data_callback(cman_handle_t handle, void *private, char *buf, int len, uint8_t port, int nodeid) { /* Ignore looped back messages */ if (nodeid == this_node.cn_nodeid) return; process_message(cman_client, buf, len, (char *)&nodeid); } static void _add_up_node(const char *csid) { /* It's up ! */ int nodeid = nodeid_from_csid(csid); dm_hash_insert_binary(node_updown_hash, (char *)&nodeid, sizeof(int), (void *)1); DEBUGLOG("Added new node %d to updown list\n", nodeid); } static void _cluster_closedown(void) { dlm_release_lockspace(LOCKSPACE_NAME, lockspace, 1); cman_finish(c_handle); } static int is_listening(int nodeid) { int status; do { status = cman_is_listening(c_handle, nodeid, CLUSTER_PORT_CLVMD); if (status < 0 && errno == EBUSY) { /* Don't busywait */ sleep(1); errno = EBUSY; /* In case sleep trashes it */ } } while (status < 0 && errno == EBUSY); return status; } /* Populate the list of CLVMDs running. called only at startup time */ static void count_clvmds_running(void) { int i; for (i = 0; i < num_nodes; i++) { int nodeid = nodes[i].cn_nodeid; if (is_listening(nodeid) == 1) dm_hash_insert_binary(node_updown_hash, (void *)&nodeid, sizeof(int), (void*)1); else dm_hash_insert_binary(node_updown_hash, (void *)&nodeid, sizeof(int), (void*)0); } } /* Get a list of active cluster members */ static void get_members(void) { int retnodes; int status; int i; int high_nodeid = 0; num_nodes = cman_get_node_count(c_handle); if (num_nodes == -1) { log_error("Unable to get node count"); return; } /* Not enough room for new nodes list ? */ if (num_nodes > count_nodes && nodes) { free(nodes); nodes = NULL; } if (nodes == NULL) { count_nodes = num_nodes + 10; /* Overallocate a little */ nodes = malloc(count_nodes * sizeof(struct cman_node)); if (!nodes) { log_error("Unable to allocate nodes array\n"); exit(5); } } status = cman_get_nodes(c_handle, count_nodes, &retnodes, nodes); if (status < 0) { log_error("Unable to get node details"); exit(6); } /* Get the highest nodeid */ for (i=0; i high_nodeid) high_nodeid = nodes[i].cn_nodeid; } } /* Convert a node name to a CSID */ static int _csid_from_name(char *csid, const char *name) { int i; for (i = 0; i < num_nodes; i++) { if (strcmp(name, nodes[i].cn_name) == 0) { memcpy(csid, &nodes[i].cn_nodeid, CMAN_MAX_CSID_LEN); return 0; } } return -1; } /* Convert a CSID to a node name */ static int _name_from_csid(const char *csid, char *name) { int i; for (i = 0; i < num_nodes; i++) { if (memcmp(csid, &nodes[i].cn_nodeid, CMAN_MAX_CSID_LEN) == 0) { strcpy(name, nodes[i].cn_name); return 0; } } /* Who?? */ strcpy(name, "Unknown"); return -1; } /* Convert a node ID to a node name */ static int name_from_nodeid(int nodeid, char *name) { int i; for (i = 0; i < num_nodes; i++) { if (nodeid == nodes[i].cn_nodeid) { strcpy(name, nodes[i].cn_name); return 0; } } /* Who?? */ strcpy(name, "Unknown"); return -1; } /* Convert a CSID to a node ID */ static int nodeid_from_csid(const char *csid) { int nodeid; memcpy(&nodeid, csid, CMAN_MAX_CSID_LEN); return nodeid; } static int _is_quorate(void) { return cman_is_quorate(c_handle); } static void sync_ast_routine(void *arg) { struct lock_wait *lwait = arg; pthread_mutex_lock(&lwait->mutex); pthread_cond_signal(&lwait->cond); pthread_mutex_unlock(&lwait->mutex); } static int _sync_lock(const char *resource, int mode, int flags, int *lockid) { int status; struct lock_wait lwait; if (!lockid) { errno = EINVAL; return -1; } DEBUGLOG("sync_lock: '%s' mode:%d flags=%d\n", resource,mode,flags); /* Conversions need the lockid in the LKSB */ if (flags & LKF_CONVERT) lwait.lksb.sb_lkid = *lockid; pthread_cond_init(&lwait.cond, NULL); pthread_mutex_init(&lwait.mutex, NULL); pthread_mutex_lock(&lwait.mutex); status = dlm_ls_lock(lockspace, mode, &lwait.lksb, flags, resource, strlen(resource), 0, sync_ast_routine, &lwait, NULL, NULL); if (status) return status; /* Wait for it to complete */ pthread_cond_wait(&lwait.cond, &lwait.mutex); pthread_mutex_unlock(&lwait.mutex); *lockid = lwait.lksb.sb_lkid; errno = lwait.lksb.sb_status; DEBUGLOG("sync_lock: returning lkid %x\n", *lockid); if (lwait.lksb.sb_status) return -1; else return 0; } static int _sync_unlock(const char *resource /* UNUSED */, int lockid) { int status; struct lock_wait lwait; DEBUGLOG("sync_unlock: '%s' lkid:%x\n", resource, lockid); pthread_cond_init(&lwait.cond, NULL); pthread_mutex_init(&lwait.mutex, NULL); pthread_mutex_lock(&lwait.mutex); status = dlm_ls_unlock(lockspace, lockid, 0, &lwait.lksb, &lwait); if (status) return status; /* Wait for it to complete */ pthread_cond_wait(&lwait.cond, &lwait.mutex); pthread_mutex_unlock(&lwait.mutex); errno = lwait.lksb.sb_status; if (lwait.lksb.sb_status != EUNLOCK) return -1; else return 0; } static int _get_cluster_name(char *buf, int buflen) { cman_cluster_t cluster_info; int status; status = cman_get_cluster(c_handle, &cluster_info); if (!status) { strncpy(buf, cluster_info.ci_name, buflen); } return status; } static struct cluster_ops _cluster_cman_ops = { .name = "cman", .cluster_init_completed = _cluster_init_completed, .cluster_send_message = _cluster_send_message, .name_from_csid = _name_from_csid, .csid_from_name = _csid_from_name, .get_num_nodes = _get_num_nodes, .cluster_fd_callback = _cluster_fd_callback, .get_main_cluster_fd = _get_main_cluster_fd, .cluster_do_node_callback = _cluster_do_node_callback, .is_quorate = _is_quorate, .get_our_csid = _get_our_csid, .add_up_node = _add_up_node, .cluster_closedown = _cluster_closedown, .get_cluster_name = _get_cluster_name, .sync_lock = _sync_lock, .sync_unlock = _sync_unlock, }; struct cluster_ops *init_cman_cluster(void) { if (!_init_cluster()) return &_cluster_cman_ops; else return NULL; } LVM2.2.02.176/daemons/clvmd/clvmd-comms.h0000644000000000000120000000715313176752421016457 0ustar rootwheel/* * Copyright (C) 2002-2004 Sistina Software, Inc. All rights reserved. * Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License v.2. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /* * Abstraction layer for clvmd cluster communications */ #ifndef _CLVMD_COMMS_H #define _CLVMD_COMMS_H struct local_client; struct cluster_ops { const char *name; void (*cluster_init_completed) (void); int (*cluster_send_message) (const void *buf, int msglen, const char *csid, const char *errtext); int (*name_from_csid) (const char *csid, char *name); int (*csid_from_name) (char *csid, const char *name); int (*get_num_nodes) (void); int (*cluster_fd_callback) (struct local_client *fd, char *buf, int len, const char *csid, struct local_client **new_client); int (*get_main_cluster_fd) (void); /* gets accept FD or cman cluster socket */ int (*cluster_do_node_callback) (struct local_client *client, void (*callback) (struct local_client *, const char *csid, int node_up)); int (*is_quorate) (void); void (*get_our_csid) (char *csid); void (*add_up_node) (const char *csid); void (*reread_config) (void); void (*cluster_closedown) (void); int (*get_cluster_name)(char *buf, int buflen); int (*sync_lock) (const char *resource, int mode, int flags, int *lockid); int (*sync_unlock) (const char *resource, int lockid); }; #ifdef USE_CMAN # include # include "libcman.h" # define CMAN_MAX_CSID_LEN 4 # ifndef MAX_CSID_LEN # define MAX_CSID_LEN CMAN_MAX_CSID_LEN # endif # undef MAX_CLUSTER_MEMBER_NAME_LEN # define MAX_CLUSTER_MEMBER_NAME_LEN CMAN_MAX_NODENAME_LEN # define CMAN_MAX_CLUSTER_MESSAGE 1500 # define CLUSTER_PORT_CLVMD 11 struct cluster_ops *init_cman_cluster(void); #endif #ifdef USE_OPENAIS # include # include # define OPENAIS_CSID_LEN (sizeof(int)) # define OPENAIS_MAX_CLUSTER_MESSAGE MESSAGE_SIZE_MAX # define OPENAIS_MAX_CLUSTER_MEMBER_NAME_LEN SA_MAX_NAME_LENGTH # ifndef MAX_CLUSTER_MEMBER_NAME_LEN # define MAX_CLUSTER_MEMBER_NAME_LEN SA_MAX_NAME_LENGTH # endif # ifndef CMAN_MAX_CLUSTER_MESSAGE # define CMAN_MAX_CLUSTER_MESSAGE MESSAGE_SIZE_MAX # endif # ifndef MAX_CSID_LEN # define MAX_CSID_LEN sizeof(int) # endif struct cluster_ops *init_openais_cluster(void); #endif #ifdef USE_COROSYNC # include # define COROSYNC_CSID_LEN (sizeof(int)) # define COROSYNC_MAX_CLUSTER_MESSAGE 65535 # define COROSYNC_MAX_CLUSTER_MEMBER_NAME_LEN CS_MAX_NAME_LENGTH # ifndef MAX_CLUSTER_MEMBER_NAME_LEN # define MAX_CLUSTER_MEMBER_NAME_LEN CS_MAX_NAME_LENGTH # endif # ifndef CMAN_MAX_CLUSTER_MESSAGE # define CMAN_MAX_CLUSTER_MESSAGE 65535 # endif # ifndef MAX_CSID_LEN # define MAX_CSID_LEN sizeof(int) # endif struct cluster_ops *init_corosync_cluster(void); #endif #ifdef USE_SINGLENODE # define SINGLENODE_CSID_LEN (sizeof(int)) # ifndef MAX_CLUSTER_MEMBER_NAME_LEN # define MAX_CLUSTER_MEMBER_NAME_LEN 64 # endif # define SINGLENODE_MAX_CLUSTER_MESSAGE 65535 # ifndef MAX_CSID_LEN # define MAX_CSID_LEN sizeof(int) # endif struct cluster_ops *init_singlenode_cluster(void); #endif #endif LVM2.2.02.176/daemons/clvmd/clvmd.c0000644000000000000120000020201313176752421015326 0ustar rootwheel/* * Copyright (C) 2002-2004 Sistina Software, Inc. All rights reserved. * Copyright (C) 2004-2014 Red Hat, Inc. All rights reserved. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License v.2. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /* * CLVMD: Cluster LVM daemon */ #include "clvmd-common.h" #include "clvmd-comms.h" #include "clvm.h" #include "clvmd.h" #include "lvm-functions.h" #include "lvm-version.h" #include "refresh_clvmd.h" #ifdef HAVE_COROSYNC_CONFDB_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #ifndef TRUE #define TRUE 1 #endif #ifndef FALSE #define FALSE 0 #endif #define MAX_RETRIES 4 #define MAX_MISSING_LEN 8000 /* Max supported clvmd message size ? */ #define ISLOCAL_CSID(c) (memcmp(c, our_csid, max_csid_len) == 0) /* Head of the fd list. Also contains the cluster_socket details */ static struct local_client local_client_head; static int _local_client_count = 0; static unsigned short global_xid = 0; /* Last transaction ID issued */ struct cluster_ops *clops = NULL; static char our_csid[MAX_CSID_LEN]; static unsigned max_csid_len; static unsigned max_cluster_message; static unsigned max_cluster_member_name_len; static void _add_client(struct local_client *new_client, struct local_client *existing_client) { _local_client_count++; DEBUGLOG("(%p) Adding listener for fd %d. (Now %d monitored fds.)\n", new_client, new_client->fd, _local_client_count); new_client->next = existing_client->next; existing_client->next = new_client; } int add_client(struct local_client *new_client) { _add_client(new_client, &local_client_head); return 0; } /* Returns 0 if delfd is found and removed from list */ static int _del_client(struct local_client *delfd) { struct local_client *lastfd, *thisfd; for (lastfd = &local_client_head; (thisfd = lastfd->next); lastfd = thisfd) if (thisfd == delfd) { DEBUGLOG("(%p) Removing listener for fd %d\n", thisfd, thisfd->fd); lastfd->next = delfd->next; _local_client_count--; return 0; } return 1; } /* Structure of items on the LVM thread list */ struct lvm_thread_cmd { struct dm_list list; struct local_client *client; struct clvm_header *msg; char csid[MAX_CSID_LEN]; int remote; /* Flag */ int msglen; unsigned short xid; }; struct lvm_startup_params { struct dm_hash_table *excl_uuid; }; static debug_t debug = DEBUG_OFF; static int foreground_mode = 0; static pthread_t lvm_thread; /* Stack size 128KiB for thread, must be bigger then DEFAULT_RESERVED_STACK */ static const size_t STACK_SIZE = 128 * 1024; static pthread_attr_t stack_attr; static int lvm_thread_exit = 0; static pthread_mutex_t lvm_thread_mutex; static pthread_mutex_t _debuglog_mutex = PTHREAD_MUTEX_INITIALIZER; static pthread_cond_t lvm_thread_cond; static pthread_barrier_t lvm_start_barrier; static struct dm_list lvm_cmd_head; static volatile sig_atomic_t quit = 0; static volatile sig_atomic_t reread_config = 0; static int child_pipe[2]; /* Reasons the daemon failed initialisation */ #define DFAIL_INIT 1 #define DFAIL_LOCAL_SOCK 2 #define DFAIL_CLUSTER_IF 3 #define DFAIL_MALLOC 4 #define DFAIL_TIMEOUT 5 #define SUCCESS 0 typedef enum {IF_AUTO, IF_CMAN, IF_OPENAIS, IF_COROSYNC, IF_SINGLENODE} if_type_t; /* Prototypes for code further down */ static void sigusr2_handler(int sig); static void sighup_handler(int sig); static void sigterm_handler(int sig); static void send_local_reply(struct local_client *client, int status, int clientid); static void free_reply(struct local_client *client); static void send_version_message(void); static void *pre_and_post_thread(void *arg); static int send_message(void *buf, int msglen, const char *csid, int fd, const char *errtext); static int read_from_local_sock(struct local_client *thisfd); static int cleanup_zombie(struct local_client *thisfd); static int process_local_command(struct clvm_header *msg, int msglen, struct local_client *client, unsigned short xid); static void process_remote_command(struct clvm_header *msg, int msglen, int fd, const char *csid); static int process_reply(const struct clvm_header *msg, int msglen, const char *csid); static int open_local_sock(void); static void close_local_sock(int local_socket); static int check_local_clvmd(void); static struct local_client *find_client(int clientid); static void main_loop(int cmd_timeout); static void be_daemon(int start_timeout); static int check_all_clvmds_running(struct local_client *client); static int local_rendezvous_callback(struct local_client *thisfd, char *buf, int len, const char *csid, struct local_client **new_client); static void *lvm_thread_fn(void *) __attribute__((noreturn)); static int add_to_lvmqueue(struct local_client *client, struct clvm_header *msg, int msglen, const char *csid); static int distribute_command(struct local_client *thisfd); static void hton_clvm(struct clvm_header *hdr); static void ntoh_clvm(struct clvm_header *hdr); static void add_reply_to_list(struct local_client *client, int status, const char *csid, const char *buf, int len); static if_type_t parse_cluster_interface(char *ifname); static if_type_t get_cluster_type(void); static void usage(const char *prog, FILE *file) { fprintf(file, "Usage: %s [options]\n" " -C Sets debug level (from -d) on all clvmd instances clusterwide\n" " -d[] Set debug logging (0:none, 1:stderr (implies -f option), 2:syslog)\n" " -E Take this lock uuid as exclusively locked resource (for restart)\n" " -f Don't fork, run in the foreground\n" " -h Show this help information\n" " -I Cluster manager (default: auto)\n" " Available cluster managers: " #ifdef USE_COROSYNC "corosync " #endif #ifdef USE_CMAN "cman " #endif #ifdef USE_OPENAIS "openais " #endif #ifdef USE_SINGLENODE "singlenode " #endif "\n" " -R Tell all running clvmds in the cluster to reload their device cache\n" " -S Restart clvmd, preserving exclusive locks\n" " -t Command timeout (default: 60 seconds)\n" " -T Startup timeout (default: 0 seconds)\n" " -V Show version of clvmd\n" "\n", prog); } /* Called to signal the parent how well we got on during initialisation */ static void child_init_signal(int status) { if (child_pipe[1]) { /* FIXME Use a proper wrapper around write */ if (write(child_pipe[1], &status, sizeof(status)) < 0) log_sys_error("write", "child_pipe"); if (close(child_pipe[1])) log_sys_error("close", "child_pipe"); } } static __attribute__((noreturn)) void child_init_signal_and_exit(int status) { child_init_signal(status); exit(status); } static void safe_close(int *fd) { if (*fd >= 0) { int to_close = *fd; *fd = -1; if (close(to_close)) log_sys_error("close", ""); /* path */ } } void debuglog(const char *fmt, ...) { time_t P; va_list ap; static int syslog_init = 0; char buf_ctime[64]; switch (clvmd_get_debug()) { case DEBUG_STDERR: pthread_mutex_lock(&_debuglog_mutex); va_start(ap,fmt); time(&P); fprintf(stderr, "CLVMD[%x]: %.15s ", (int)pthread_self(), ctime_r(&P, buf_ctime) + 4); vfprintf(stderr, fmt, ap); va_end(ap); fflush(stderr); pthread_mutex_unlock(&_debuglog_mutex); break; case DEBUG_SYSLOG: pthread_mutex_lock(&_debuglog_mutex); if (!syslog_init) { openlog("clvmd", LOG_PID, LOG_DAEMON); syslog_init = 1; } va_start(ap,fmt); vsyslog(LOG_DEBUG, fmt, ap); va_end(ap); pthread_mutex_unlock(&_debuglog_mutex); break; case DEBUG_OFF: break; } } void clvmd_set_debug(debug_t new_debug) { if (!foreground_mode && new_debug == DEBUG_STDERR) new_debug = DEBUG_SYSLOG; if (new_debug > DEBUG_SYSLOG) new_debug = DEBUG_SYSLOG; debug = new_debug; } debug_t clvmd_get_debug(void) { return debug; } int clvmd_get_foreground(void) { return foreground_mode; } static const char *decode_cmd(unsigned char cmdl) { static char buf[128]; const char *command; switch (cmdl) { case CLVMD_CMD_TEST: command = "TEST"; break; case CLVMD_CMD_LOCK_VG: command = "LOCK_VG"; break; case CLVMD_CMD_LOCK_LV: command = "LOCK_LV"; break; case CLVMD_CMD_REFRESH: command = "REFRESH"; break; case CLVMD_CMD_SET_DEBUG: command = "SET_DEBUG"; break; case CLVMD_CMD_GET_CLUSTERNAME: command = "GET_CLUSTERNAME"; break; case CLVMD_CMD_VG_BACKUP: command = "VG_BACKUP"; break; case CLVMD_CMD_REPLY: command = "REPLY"; break; case CLVMD_CMD_VERSION: command = "VERSION"; break; case CLVMD_CMD_GOAWAY: command = "GOAWAY"; break; case CLVMD_CMD_LOCK: command = "LOCK"; break; case CLVMD_CMD_UNLOCK: command = "UNLOCK"; break; case CLVMD_CMD_LOCK_QUERY: command = "LOCK_QUERY"; break; case CLVMD_CMD_RESTART: command = "RESTART"; break; case CLVMD_CMD_SYNC_NAMES: command = "SYNC_NAMES"; break; default: command = "unknown"; break; } snprintf(buf, sizeof(buf), "%s (0x%x)", command, cmdl); return buf; } static void remove_lockfile(void) { if (unlink(CLVMD_PIDFILE)) log_sys_error("unlink", CLVMD_PIDFILE); } /* * clvmd require dm-ioctl capability for operation */ static void check_permissions(void) { if (getuid() || geteuid()) { log_error("Cannot run as a non-root user."); /* * Fail cleanly here if not run as root, instead of failing * later when attempting a root-only operation * Preferred exit code from an initscript for this. */ exit(4); } } int main(int argc, char *argv[]) { int local_sock; struct local_client *newfd, *delfd; struct lvm_startup_params lvm_params; int opt; int cmd_timeout = DEFAULT_CMD_TIMEOUT; int start_timeout = 0; if_type_t cluster_iface = IF_AUTO; sigset_t ss; debug_t debug_opt = DEBUG_OFF; debug_t debug_arg = DEBUG_OFF; int clusterwide_opt = 0; mode_t old_mask; int ret = 1; struct option longopts[] = { { "help", 0, 0, 'h' }, { NULL, 0, 0, 0 } }; if (!(lvm_params.excl_uuid = dm_hash_create(128))) { fprintf(stderr, "Failed to allocate hash table\n"); return 1; } /* Deal with command-line arguments */ opterr = 0; optind = 0; while ((opt = getopt_long(argc, argv, "Vhfd:t:RST:CI:E:", longopts, NULL)) != -1) { switch (opt) { case 'h': usage(argv[0], stdout); exit(0); case 'R': check_permissions(); ret = (refresh_clvmd(1) == 1) ? 0 : 1; goto out; case 'S': check_permissions(); ret = (restart_clvmd(clusterwide_opt) == 1) ? 0 : 1; goto out; case 'C': clusterwide_opt = 1; break; case 'd': debug_opt = DEBUG_STDERR; debug_arg = (debug_t) atoi(optarg); if (debug_arg == DEBUG_STDERR) foreground_mode = 1; break; case 'f': foreground_mode = 1; break; case 't': cmd_timeout = atoi(optarg); if (!cmd_timeout) { fprintf(stderr, "command timeout is invalid\n"); usage(argv[0], stderr); exit(1); } break; case 'I': cluster_iface = parse_cluster_interface(optarg); break; case 'E': if (!dm_hash_insert(lvm_params.excl_uuid, optarg, optarg)) { fprintf(stderr, "Failed to allocate hash entry\n"); goto out; } break; case 'T': start_timeout = atoi(optarg); if (start_timeout <= 0) { fprintf(stderr, "startup timeout is invalid\n"); usage(argv[0], stderr); exit(1); } break; case 'V': printf("Cluster LVM daemon version: %s\n", LVM_VERSION); printf("Protocol version: %d.%d.%d\n", CLVMD_MAJOR_VERSION, CLVMD_MINOR_VERSION, CLVMD_PATCH_VERSION); exit(0); break; default: usage(argv[0], stderr); exit(2); } } check_permissions(); /* * Switch to C locale to avoid reading large locale-archive file * used by some glibc (on some distributions it takes over 100MB). * Daemon currently needs to use mlockall(). */ if (setenv("LC_ALL", "C", 1)) perror("Cannot set LC_ALL to C"); /* Setting debug options on an existing clvmd */ if (debug_opt && !check_local_clvmd()) { dm_hash_destroy(lvm_params.excl_uuid); return debug_clvmd(debug_arg, clusterwide_opt)==1?0:1; } clvmd_set_debug(debug_arg); /* Fork into the background (unless requested not to) */ if (!foreground_mode) be_daemon(start_timeout); (void) dm_prepare_selinux_context(DEFAULT_RUN_DIR, S_IFDIR); old_mask = umask(0077); if (dm_create_dir(DEFAULT_RUN_DIR) == 0) { DEBUGLOG("clvmd: unable to create %s directory\n", DEFAULT_RUN_DIR); umask(old_mask); exit(1); } umask(old_mask); /* Create pidfile */ (void) dm_prepare_selinux_context(CLVMD_PIDFILE, S_IFREG); if (dm_create_lockfile(CLVMD_PIDFILE) == 0) { DEBUGLOG("clvmd: unable to create lockfile\n"); exit(1); } (void) dm_prepare_selinux_context(NULL, 0); atexit(remove_lockfile); DEBUGLOG("CLVMD started\n"); /* Open the Unix socket we listen for commands on. We do this before opening the cluster socket so that potential clients will block rather than error if we are running but the cluster is not ready yet */ local_sock = open_local_sock(); if (local_sock < 0) { child_init_signal_and_exit(DFAIL_LOCAL_SOCK); /* NOTREACHED */ } /* Set up signal handlers, USR1 is for cluster change notifications (in cman) USR2 causes child threads to exit. (HUP used to cause gulm to re-read the nodes list from CCS.) PIPE should be ignored */ signal(SIGUSR2, sigusr2_handler); signal(SIGHUP, sighup_handler); signal(SIGPIPE, SIG_IGN); /* Block SIGUSR2/SIGINT/SIGTERM in process */ sigemptyset(&ss); sigaddset(&ss, SIGUSR2); sigaddset(&ss, SIGINT); sigaddset(&ss, SIGTERM); sigprocmask(SIG_BLOCK, &ss, NULL); /* Initialise the LVM thread variables */ dm_list_init(&lvm_cmd_head); if (pthread_attr_init(&stack_attr) || pthread_attr_setstacksize(&stack_attr, STACK_SIZE + getpagesize())) { log_sys_error("pthread_attr_init", ""); exit(1); } pthread_mutex_init(&lvm_thread_mutex, NULL); pthread_cond_init(&lvm_thread_cond, NULL); pthread_barrier_init(&lvm_start_barrier, NULL, 2); init_lvhash(); /* Start the cluster interface */ if (cluster_iface == IF_AUTO) cluster_iface = get_cluster_type(); #ifdef USE_CMAN if ((cluster_iface == IF_AUTO || cluster_iface == IF_CMAN) && (clops = init_cman_cluster())) { max_csid_len = CMAN_MAX_CSID_LEN; max_cluster_message = CMAN_MAX_CLUSTER_MESSAGE; max_cluster_member_name_len = CMAN_MAX_NODENAME_LEN; syslog(LOG_NOTICE, "Cluster LVM daemon started - connected to CMAN"); } #endif #ifdef USE_COROSYNC if (!clops) if (((cluster_iface == IF_AUTO || cluster_iface == IF_COROSYNC) && (clops = init_corosync_cluster()))) { max_csid_len = COROSYNC_CSID_LEN; max_cluster_message = COROSYNC_MAX_CLUSTER_MESSAGE; max_cluster_member_name_len = COROSYNC_MAX_CLUSTER_MEMBER_NAME_LEN; syslog(LOG_NOTICE, "Cluster LVM daemon started - connected to Corosync"); } #endif #ifdef USE_OPENAIS if (!clops) if ((cluster_iface == IF_AUTO || cluster_iface == IF_OPENAIS) && (clops = init_openais_cluster())) { max_csid_len = OPENAIS_CSID_LEN; max_cluster_message = OPENAIS_MAX_CLUSTER_MESSAGE; max_cluster_member_name_len = OPENAIS_MAX_CLUSTER_MEMBER_NAME_LEN; syslog(LOG_NOTICE, "Cluster LVM daemon started - connected to OpenAIS"); } #endif #ifdef USE_SINGLENODE if (!clops) if (cluster_iface == IF_SINGLENODE && (clops = init_singlenode_cluster())) { max_csid_len = SINGLENODE_CSID_LEN; max_cluster_message = SINGLENODE_MAX_CLUSTER_MESSAGE; max_cluster_member_name_len = MAX_CLUSTER_MEMBER_NAME_LEN; syslog(LOG_NOTICE, "Cluster LVM daemon started - running in single-node mode"); } #endif if (!clops) { DEBUGLOG("Can't initialise cluster interface\n"); log_error("Can't initialise cluster interface."); child_init_signal_and_exit(DFAIL_CLUSTER_IF); /* NOTREACHED */ } DEBUGLOG("Cluster ready, doing some more initialisation\n"); /* Save our CSID */ clops->get_our_csid(our_csid); /* Initialise the FD list head */ local_client_head.fd = clops->get_main_cluster_fd(); local_client_head.type = CLUSTER_MAIN_SOCK; local_client_head.callback = clops->cluster_fd_callback; _local_client_count++; /* Add the local socket to the list */ if (!(newfd = dm_zalloc(sizeof(struct local_client)))) { child_init_signal_and_exit(DFAIL_MALLOC); /* NOTREACHED */ } newfd->fd = local_sock; newfd->type = LOCAL_RENDEZVOUS; newfd->callback = local_rendezvous_callback; (void) add_client(newfd); /* This needs to be started after cluster initialisation as it may need to take out locks */ DEBUGLOG("Starting LVM thread\n"); DEBUGLOG("(%p) Main cluster socket fd %d with local socket %d (%p)\n", &local_client_head, local_client_head.fd, newfd->fd, newfd); /* Don't let anyone else to do work until we are started */ if (pthread_create(&lvm_thread, &stack_attr, lvm_thread_fn, &lvm_params)) { log_sys_error("pthread_create", ""); goto out; } /* Don't start until the LVM thread is ready */ pthread_barrier_wait(&lvm_start_barrier); /* Tell the rest of the cluster our version number */ if (clops->cluster_init_completed) clops->cluster_init_completed(); DEBUGLOG("clvmd ready for work\n"); child_init_signal(SUCCESS); /* Try to shutdown neatly */ signal(SIGTERM, sigterm_handler); signal(SIGINT, sigterm_handler); /* Do some work */ main_loop(cmd_timeout); pthread_mutex_lock(&lvm_thread_mutex); lvm_thread_exit = 1; pthread_cond_signal(&lvm_thread_cond); pthread_mutex_unlock(&lvm_thread_mutex); if ((errno = pthread_join(lvm_thread, NULL))) log_sys_error("pthread_join", ""); close_local_sock(local_sock); while ((delfd = local_client_head.next)) { local_client_head.next = delfd->next; _local_client_count--; /* Failing cleanup_zombie leaks... */ if (delfd->type == LOCAL_SOCK && !cleanup_zombie(delfd)) cmd_client_cleanup(delfd); /* calls sync_unlock */ if (delfd->fd != local_sock) safe_close(&(delfd->fd)); dm_free(delfd); } DEBUGLOG("cluster_closedown\n"); destroy_lvhash(); clops->cluster_closedown(); ret = 0; out: dm_hash_destroy(lvm_params.excl_uuid); return ret; } /* Called when the cluster layer has completed initialisation. We send the version message */ void clvmd_cluster_init_completed(void) { send_version_message(); } /* Data on a connected socket */ static int local_sock_callback(struct local_client *thisfd, char *buf, int len, const char *csid, struct local_client **new_client) { *new_client = NULL; return read_from_local_sock(thisfd); } /* Data on a connected socket */ static int local_rendezvous_callback(struct local_client *thisfd, char *buf, int len, const char *csid, struct local_client **new_client) { /* Someone connected to our local socket, accept it. */ struct sockaddr_un socka; struct local_client *newfd; socklen_t sl = sizeof(socka); int client_fd = accept(thisfd->fd, (struct sockaddr *) &socka, &sl); if (client_fd == -1 && errno == EINTR) return 1; if (client_fd >= 0) { if (!(newfd = dm_zalloc(sizeof(*newfd)))) { if (close(client_fd)) log_sys_error("close", "socket"); return 1; } pthread_cond_init(&newfd->bits.localsock.cond, NULL); pthread_mutex_init(&newfd->bits.localsock.mutex, NULL); if (fcntl(client_fd, F_SETFD, 1)) DEBUGLOG("(%p) Setting CLOEXEC on client fd %d failed: %s\n", thisfd, client_fd, strerror(errno)); newfd->fd = client_fd; newfd->type = LOCAL_SOCK; newfd->callback = local_sock_callback; newfd->bits.localsock.all_success = 1; DEBUGLOG("(%p) Got new connection on fd %d\n", newfd, newfd->fd); *new_client = newfd; } return 1; } static int local_pipe_callback(struct local_client *thisfd, char *buf, int maxlen, const char *csid, struct local_client **new_client) { int len; char buffer[PIPE_BUF]; struct local_client *sock_client = thisfd->bits.pipe.client; int status = -1; /* in error by default */ len = read(thisfd->fd, buffer, sizeof(int)); if (len == -1 && errno == EINTR) return 1; if (len == sizeof(int)) memcpy(&status, buffer, sizeof(int)); DEBUGLOG("(%p) Read on pipe %d, %d bytes, status %d\n", thisfd, thisfd->fd, len, status); /* EOF on pipe or an error, close it */ if (len <= 0) { void *ret = &status; if (close(thisfd->fd)) log_sys_error("close", "local_pipe"); /* Clear out the cross-link */ if (thisfd->bits.pipe.client) thisfd->bits.pipe.client->bits.localsock.pipe_client = NULL; /* Reap child thread */ if (thisfd->bits.pipe.threadid) { if ((errno = pthread_join(thisfd->bits.pipe.threadid, &ret))) log_sys_error("pthread_join", ""); thisfd->bits.pipe.threadid = 0; if (thisfd->bits.pipe.client) thisfd->bits.pipe.client->bits.localsock.threadid = 0; } return -1; } else { DEBUGLOG("(%p) Background routine status was %d, sock_client %p\n", thisfd, status, sock_client); /* But has the client gone away ?? */ if (!sock_client) { DEBUGLOG("(%p) Got pipe response for dead client, ignoring it\n", thisfd); } else { /* If error then just return that code */ if (status) send_local_reply(sock_client, status, sock_client->fd); else { /* FIXME: closer inspect this code since state is write thread protected */ pthread_mutex_lock(&sock_client->bits.localsock.mutex); if (sock_client->bits.localsock.state == POST_COMMAND) { pthread_mutex_unlock(&sock_client->bits.localsock.mutex); send_local_reply(sock_client, 0, sock_client->fd); } else { /* PRE_COMMAND finished. */ pthread_mutex_unlock(&sock_client->bits.localsock.mutex); if ((status = distribute_command(sock_client))) send_local_reply(sock_client, EFBIG, sock_client->fd); } } } } return len; } /* If a noed is up, look for it in the reply array, if it's not there then add one with "ETIMEDOUT". NOTE: This won't race with real replies because they happen in the same thread. */ static void timedout_callback(struct local_client *client, const char *csid, int node_up) { struct node_reply *reply; char nodename[max_cluster_member_name_len]; if (!node_up) return; clops->name_from_csid(csid, nodename); DEBUGLOG("(%p) Checking for a reply from %s\n", client, nodename); pthread_mutex_lock(&client->bits.localsock.mutex); reply = client->bits.localsock.replies; while (reply && strcmp(reply->node, nodename) != 0) reply = reply->next; pthread_mutex_unlock(&client->bits.localsock.mutex); if (!reply) { DEBUGLOG("(%p) Node %s timed-out\n", client, nodename); add_reply_to_list(client, ETIMEDOUT, csid, "Command timed out", 18); } } /* Called when the request has timed out on at least one node. We fill in the remaining node entries with ETIMEDOUT and return. By the time we get here the node that caused the timeout could have gone down, in which case we will never get the expected number of replies that triggers the post command so we need to do it here */ static void request_timed_out(struct local_client *client) { DEBUGLOG("(%p) Request timed-out. padding\n", client); clops->cluster_do_node_callback(client, timedout_callback); if (!client->bits.localsock.threadid) return; pthread_mutex_lock(&client->bits.localsock.mutex); if (!client->bits.localsock.finished && (client->bits.localsock.num_replies != client->bits.localsock.expected_replies)) { /* Post-process the command */ client->bits.localsock.state = POST_COMMAND; pthread_cond_signal(&client->bits.localsock.cond); } pthread_mutex_unlock(&client->bits.localsock.mutex); } /* This is where the real work happens */ static void main_loop(int cmd_timeout) { sigset_t ss; DEBUGLOG("Using timeout of %d seconds\n", cmd_timeout); sigemptyset(&ss); sigaddset(&ss, SIGINT); sigaddset(&ss, SIGTERM); pthread_sigmask(SIG_UNBLOCK, &ss, NULL); /* Main loop */ while (!quit) { fd_set in; int select_status; struct local_client *thisfd, *nextfd; struct timeval tv = { cmd_timeout, 0 }; int quorate = clops->is_quorate(); int client_count = 0; int max_fd = 0; /* Wait on the cluster FD and all local sockets/pipes */ local_client_head.fd = clops->get_main_cluster_fd(); FD_ZERO(&in); for (thisfd = &local_client_head; thisfd; thisfd = thisfd->next) { client_count++; max_fd = max(max_fd, thisfd->fd); } if (max_fd > FD_SETSIZE - 32) { fprintf(stderr, "WARNING: There are too many connections to clvmd. Investigate and take action now!\n"); fprintf(stderr, "WARNING: Your cluster may freeze up if the number of clvmd file descriptors (%d) exceeds %d.\n", max_fd + 1, FD_SETSIZE); } for (thisfd = &local_client_head; thisfd; thisfd = nextfd) { nextfd = thisfd->next; if (thisfd->removeme && !cleanup_zombie(thisfd)) { /* cleanup_zombie might have removed the next list element */ nextfd = thisfd->next; (void) _del_client(thisfd); DEBUGLOG("(%p) removeme set with %d monitored fds remaining\n", thisfd, _local_client_count); /* Queue cleanup, this also frees the client struct */ add_to_lvmqueue(thisfd, NULL, 0, NULL); continue; } if (thisfd->removeme) continue; /* if the cluster is not quorate then don't listen for new requests */ if ((thisfd->type != LOCAL_RENDEZVOUS && thisfd->type != LOCAL_SOCK) || quorate) if (thisfd->fd < FD_SETSIZE) FD_SET(thisfd->fd, &in); } select_status = select(FD_SETSIZE, &in, NULL, NULL, &tv); if (reread_config) { int saved_errno = errno; reread_config = 0; DEBUGLOG("got SIGHUP\n"); if (clops->reread_config) clops->reread_config(); errno = saved_errno; } if (select_status > 0) { char csid[MAX_CSID_LEN]; char buf[max_cluster_message]; for (thisfd = &local_client_head; thisfd; thisfd = thisfd->next) { if (thisfd->fd < FD_SETSIZE && FD_ISSET(thisfd->fd, &in)) { struct local_client *newfd = NULL; int ret; /* FIXME Remove from main thread in case it blocks! */ /* Do callback */ ret = thisfd->callback(thisfd, buf, sizeof(buf), csid, &newfd); /* Ignore EAGAIN */ if (ret < 0 && (errno == EAGAIN || errno == EINTR)) { continue; } /* Got error or EOF: Remove it from the list safely */ if (ret <= 0) { int type = thisfd->type; /* If the cluster socket shuts down, so do we */ if (type == CLUSTER_MAIN_SOCK || type == CLUSTER_INTERNAL) goto closedown; DEBUGLOG("(%p) ret == %d, errno = %d. removing client\n", thisfd, ret, errno); thisfd->removeme = 1; continue; } /* New client...simply add it to the list */ if (newfd) { _add_client(newfd, thisfd); thisfd = newfd; } } } } /* Select timed out. Check for clients that have been waiting too long for a response */ if (select_status == 0) { time_t the_time = time(NULL); for (thisfd = &local_client_head; thisfd; thisfd = thisfd->next) { if (thisfd->type == LOCAL_SOCK && thisfd->bits.localsock.sent_out && (thisfd->bits.localsock.sent_time + cmd_timeout) < the_time && thisfd->bits.localsock.expected_replies != thisfd->bits.localsock.num_replies) { /* Send timed out message + replies we already have */ DEBUGLOG("Request to client %p timed-out (send: %ld, now: %ld)\n", thisfd, thisfd->bits.localsock.sent_time, the_time); thisfd->bits.localsock.all_success = 0; request_timed_out(thisfd); } } } if (select_status < 0) { if (errno == EINTR) continue; #ifdef DEBUG perror("select error"); exit(-1); #endif } } closedown: if (quit) DEBUGLOG("SIGTERM received\n"); } static __attribute__ ((noreturn)) void wait_for_child(int c_pipe, int timeout) { int child_status; fd_set fds; struct timeval tv = {timeout, 0}; FD_ZERO(&fds); FD_SET(c_pipe, &fds); switch (select(c_pipe+1, &fds, NULL, NULL, timeout? &tv: NULL)) { case 0: fprintf(stderr, "clvmd startup timed out\n"); exit(DFAIL_TIMEOUT); case 1: if (read(c_pipe, &child_status, sizeof(child_status)) != sizeof(child_status)) { fprintf(stderr, "clvmd failed in initialisation\n"); exit(DFAIL_INIT); } switch (child_status) { case SUCCESS: break; case DFAIL_INIT: fprintf(stderr, "clvmd failed in initialisation\n"); break; case DFAIL_LOCAL_SOCK: fprintf(stderr, "clvmd could not create local socket\n"); fprintf(stderr, "Another clvmd is probably already running\n"); break; case DFAIL_CLUSTER_IF: fprintf(stderr, "clvmd could not connect to cluster manager\n"); fprintf(stderr, "Consult syslog for more information\n"); break; case DFAIL_MALLOC: fprintf(stderr, "clvmd failed, not enough memory\n"); break; default: fprintf(stderr, "clvmd failed, error was %d\n", child_status); break; } exit(child_status); default: fprintf(stderr, "clvmd startup, select failed: %s\n", strerror(errno)); exit(DFAIL_INIT); } } /* * Fork into the background and detach from our parent process. * In the interests of user-friendliness we wait for the daemon * to complete initialisation before returning its status * the the user. */ static void be_daemon(int timeout) { int devnull = open("/dev/null", O_RDWR); if (devnull == -1) { perror("Can't open /dev/null"); exit(3); } if (pipe(child_pipe)) { perror("Error creating pipe"); exit(3); } switch (fork()) { case -1: perror("clvmd: can't fork"); exit(2); case 0: /* Child */ (void) close(child_pipe[0]); break; default: /* Parent */ (void) close(devnull); (void) close(child_pipe[1]); wait_for_child(child_pipe[0], timeout); /* noreturn */ } /* Detach ourself from the calling environment */ if ((dup2(devnull, STDIN_FILENO) == -1) || (dup2(devnull, STDOUT_FILENO) == -1) || (dup2(devnull, STDERR_FILENO) == -1)) { perror("Error setting terminal FDs to /dev/null"); log_error("Error setting terminal FDs to /dev/null: %m"); exit(5); } if ((devnull > STDERR_FILENO) && close(devnull)) { log_sys_error("close", "/dev/null"); exit(7); } if (chdir("/")) { log_error("Error setting current directory to /: %m"); exit(6); } setsid(); } static int verify_message(char *buf, int len) { struct clvm_header *h = (struct clvm_header *)buf; if (len < (int)sizeof(struct clvm_header)) { log_error("verify_message short len %d.", len); return -1; } switch (h->cmd) { case CLVMD_CMD_REPLY: case CLVMD_CMD_VERSION: case CLVMD_CMD_GOAWAY: case CLVMD_CMD_TEST: case CLVMD_CMD_LOCK: case CLVMD_CMD_UNLOCK: case CLVMD_CMD_LOCK_LV: case CLVMD_CMD_LOCK_VG: case CLVMD_CMD_LOCK_QUERY: case CLVMD_CMD_REFRESH: case CLVMD_CMD_GET_CLUSTERNAME: case CLVMD_CMD_SET_DEBUG: case CLVMD_CMD_VG_BACKUP: case CLVMD_CMD_RESTART: case CLVMD_CMD_SYNC_NAMES: break; default: log_error("verify_message bad cmd %x.", h->cmd); return -1; } /* TODO: we may be able to narrow len/flags/clientid/arglen checks based on cmd */ if (h->flags & ~(CLVMD_FLAG_LOCAL | CLVMD_FLAG_SYSTEMLV | CLVMD_FLAG_NODEERRS | CLVMD_FLAG_REMOTE)) { log_error("verify_message bad flags %x.", h->flags); return -1; } if (h->arglen > max_cluster_message) { log_error("verify_message bad arglen %x max %d.", h->arglen, max_cluster_message); return -1; } return 0; } static void dump_message(char *buf, int len) { unsigned char row[8]; char str[9]; int i, j = 0; str[8] = '\0'; if (len > 128) len = 128; for (i = 0; i < len; ++i) { row[j] = buf[i]; str[j] = (isprint(buf[i])) ? buf[i] : ' '; if (i + 1 == len) { for (;j < 8; ++j) { row[j] = 0; str[j] = ' '; } log_error("%02x %02x %02x %02x %02x %02x %02x %02x [%s]", row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], str); j = 0; } } } static int cleanup_zombie(struct local_client *thisfd) { int *status; struct local_client *pipe_client; if (thisfd->type != LOCAL_SOCK) return 0; if (!thisfd->bits.localsock.cleanup_needed) return 0; DEBUGLOG("(%p) EOF on local socket %d: inprogress=%d\n", thisfd, thisfd->fd, thisfd->bits.localsock.in_progress); if ((pipe_client = thisfd->bits.localsock.pipe_client)) pipe_client = pipe_client->bits.pipe.client; /* If the client went away in mid command then tidy up */ if (thisfd->bits.localsock.in_progress) { DEBUGLOG("Sending SIGUSR2 to pre&post thread (%p in-progress)\n", pipe_client); pthread_kill(thisfd->bits.localsock.threadid, SIGUSR2); if (pthread_mutex_trylock(&thisfd->bits.localsock.mutex)) return 1; thisfd->bits.localsock.state = POST_COMMAND; thisfd->bits.localsock.finished = 1; pthread_cond_signal(&thisfd->bits.localsock.cond); pthread_mutex_unlock(&thisfd->bits.localsock.mutex); /* Free any unsent buffers */ free_reply(thisfd); } /* Kill the subthread & free resources */ if (thisfd->bits.localsock.threadid) { DEBUGLOG("(%p) Waiting for pre&post thread\n", pipe_client); pthread_mutex_lock(&thisfd->bits.localsock.mutex); thisfd->bits.localsock.state = PRE_COMMAND; thisfd->bits.localsock.finished = 1; pthread_cond_signal(&thisfd->bits.localsock.cond); pthread_mutex_unlock(&thisfd->bits.localsock.mutex); if ((errno = pthread_join(thisfd->bits.localsock.threadid, (void **) &status))) log_sys_error("pthread_join", ""); DEBUGLOG("(%p) Joined pre&post thread\n", pipe_client); thisfd->bits.localsock.threadid = 0; /* Remove the pipe client */ if (thisfd->bits.localsock.pipe_client) { struct local_client *delfd = thisfd->bits.localsock.pipe_client; (void) close(delfd->fd); /* Close pipe */ (void) close(thisfd->bits.localsock.pipe); /* Remove pipe client */ if (!_del_client(delfd)) { dm_free(delfd); thisfd->bits.localsock.pipe_client = NULL; } } } /* Free the command buffer */ dm_free(thisfd->bits.localsock.cmd); safe_close(&(thisfd->fd)); thisfd->bits.localsock.cleanup_needed = 0; return 0; } /* Called when we have a read from the local socket. was in the main loop but it's grown up and is a big girl now */ static int read_from_local_sock(struct local_client *thisfd) { int len; int argslen; int missing_len; char buffer[PIPE_BUF + 1]; char csid[MAX_CSID_LEN]; int comms_pipe[2]; struct local_client *newfd; struct clvm_header *inheader = (struct clvm_header *) buffer; int status; len = read(thisfd->fd, buffer, sizeof(buffer) - 1); if (len == -1 && errno == EINTR) return 1; DEBUGLOG("(%p) Read on local socket %d, len = %d\n", thisfd, thisfd->fd, len); if (len && verify_message(buffer, len) < 0) { log_error("read_from_local_sock from %d len %d bad verify.", thisfd->fd, len); dump_message(buffer, len); /* force error handling below */ len = 0; } /* EOF or error on socket */ if (len <= 0) { thisfd->bits.localsock.cleanup_needed = 1; (void) cleanup_zombie(thisfd); /* ignore errors here */ return 0; } buffer[len] = 0; /* Ensure \0 terminated */ /* Fill in the client ID */ inheader->clientid = htonl(thisfd->fd); /* If we are already busy then return an error */ if (thisfd->bits.localsock.in_progress) { struct clvm_header reply = { .cmd = CLVMD_CMD_REPLY, .status = EBUSY }; send_message(&reply, sizeof(reply), our_csid, thisfd->fd, "Error sending EBUSY reply to local user"); return len; } /* See if we have the whole message */ argslen = len - strlen(inheader->node) - sizeof(struct clvm_header); missing_len = inheader->arglen - argslen; if (missing_len < 0) missing_len = 0; /* We need at least sizeof(struct clvm_header) bytes in buffer */ if (len < (int)sizeof(struct clvm_header) || /* Already handled in verify_message() */ argslen < 0 || missing_len > MAX_MISSING_LEN) { struct clvm_header reply = { .cmd = CLVMD_CMD_REPLY, .status = EINVAL }; send_message(&reply, sizeof(reply), our_csid, thisfd->fd, "Error sending EINVAL reply to local user"); return 0; } /* Free any old buffer space */ dm_free(thisfd->bits.localsock.cmd); /* Save the message */ if (!(thisfd->bits.localsock.cmd = dm_malloc(len + missing_len))) { struct clvm_header reply = { .cmd = CLVMD_CMD_REPLY, .status = ENOMEM }; send_message(&reply, sizeof(reply), our_csid, thisfd->fd, "Error sending ENOMEM reply to local user"); return 0; } memcpy(thisfd->bits.localsock.cmd, buffer, len); thisfd->bits.localsock.cmd_len = len + missing_len; inheader = (struct clvm_header *) thisfd->bits.localsock.cmd; /* If we don't have the full message then read the rest now */ if (missing_len) { char *argptr = inheader->node + strlen(inheader->node) + 1; while (missing_len > 0) { DEBUGLOG("(%p) got %d bytes, need another %d (total %d)\n", thisfd, argslen, missing_len, inheader->arglen); len = read(thisfd->fd, argptr + argslen, missing_len); if (len == -1 && errno == EINTR) continue; if (len <= 0) { /* EOF or error on socket */ DEBUGLOG("(%p) EOF on local socket\n", thisfd); dm_free(thisfd->bits.localsock.cmd); thisfd->bits.localsock.cmd = NULL; return 0; } missing_len -= len; argslen += len; } } /* Only run the command if all the cluster nodes are running CLVMD */ if (((inheader->flags & CLVMD_FLAG_LOCAL) == 0) && (check_all_clvmds_running(thisfd) == -1)) { thisfd->bits.localsock.expected_replies = 0; thisfd->bits.localsock.num_replies = 0; send_local_reply(thisfd, EHOSTDOWN, thisfd->fd); return len; } /* Check the node name for validity */ if (inheader->node[0] && clops->csid_from_name(csid, inheader->node)) { /* Error, node is not in the cluster */ struct clvm_header reply = { .cmd = CLVMD_CMD_REPLY, .status = ENOENT }; DEBUGLOG("(%p) Unknown node: '%s'\n", thisfd, inheader->node); send_message(&reply, sizeof(reply), our_csid, thisfd->fd, "Error sending ENOENT reply to local user"); thisfd->bits.localsock.expected_replies = 0; thisfd->bits.localsock.num_replies = 0; thisfd->bits.localsock.in_progress = FALSE; thisfd->bits.localsock.sent_out = FALSE; return len; } /* If we already have a subthread then just signal it to start */ if (thisfd->bits.localsock.threadid) { pthread_mutex_lock(&thisfd->bits.localsock.mutex); thisfd->bits.localsock.state = PRE_COMMAND; pthread_cond_signal(&thisfd->bits.localsock.cond); pthread_mutex_unlock(&thisfd->bits.localsock.mutex); return len; } /* Create a pipe and add the reading end to our FD list */ if (pipe(comms_pipe)) { struct clvm_header reply = { .cmd = CLVMD_CMD_REPLY, .status = EBUSY }; DEBUGLOG("(%p) Creating pipe failed: %s\n", thisfd, strerror(errno)); send_message(&reply, sizeof(reply), our_csid, thisfd->fd, "Error sending EBUSY reply to local user"); return len; } if (!(newfd = dm_zalloc(sizeof(*newfd)))) { struct clvm_header reply = { .cmd = CLVMD_CMD_REPLY, .status = ENOMEM }; (void) close(comms_pipe[0]); (void) close(comms_pipe[1]); send_message(&reply, sizeof(reply), our_csid, thisfd->fd, "Error sending ENOMEM reply to local user"); return len; } DEBUGLOG("(%p) Creating pipe, [%d, %d]\n", thisfd, comms_pipe[0], comms_pipe[1]); if (fcntl(comms_pipe[0], F_SETFD, 1)) DEBUGLOG("setting CLOEXEC on pipe[0] failed: %s\n", strerror(errno)); if (fcntl(comms_pipe[1], F_SETFD, 1)) DEBUGLOG("setting CLOEXEC on pipe[1] failed: %s\n", strerror(errno)); newfd->fd = comms_pipe[0]; newfd->type = THREAD_PIPE; newfd->callback = local_pipe_callback; newfd->bits.pipe.client = thisfd; _add_client(newfd, thisfd); /* Store a cross link to the pipe */ thisfd->bits.localsock.pipe_client = newfd; thisfd->bits.localsock.pipe = comms_pipe[1]; /* Make sure the thread has a copy of it's own ID */ newfd->bits.pipe.threadid = thisfd->bits.localsock.threadid; /* Run the pre routine */ thisfd->bits.localsock.in_progress = TRUE; thisfd->bits.localsock.state = PRE_COMMAND; thisfd->bits.localsock.cleanup_needed = 1; DEBUGLOG("(%p) Creating pre&post thread for pipe fd %d\n", newfd, newfd->fd); status = pthread_create(&thisfd->bits.localsock.threadid, &stack_attr, pre_and_post_thread, thisfd); DEBUGLOG("(%p) Created pre&post thread, state = %d\n", newfd, status); return len; } /* Add a file descriptor from the cluster or comms interface to our list of FDs for select */ /* Called when the pre-command has completed successfully - we now execute the real command on all the requested nodes */ static int distribute_command(struct local_client *thisfd) { struct clvm_header *inheader = (struct clvm_header *) thisfd->bits.localsock.cmd; int len = thisfd->bits.localsock.cmd_len; thisfd->xid = global_xid++; DEBUGLOG("(%p) distribute command: XID = %d, flags=0x%x (%s%s)\n", thisfd, thisfd->xid, inheader->flags, (inheader->flags & CLVMD_FLAG_LOCAL) ? "LOCAL" : "", (inheader->flags & CLVMD_FLAG_REMOTE) ? "REMOTE" : ""); /* Forward it to other nodes in the cluster if needed */ if (!(inheader->flags & CLVMD_FLAG_LOCAL)) { /* if node is empty then do it on the whole cluster */ if (inheader->node[0] == '\0') { thisfd->bits.localsock.expected_replies = clops->get_num_nodes(); thisfd->bits.localsock.num_replies = 0; thisfd->bits.localsock.sent_time = time(NULL); thisfd->bits.localsock.in_progress = TRUE; thisfd->bits.localsock.sent_out = TRUE; /* * Send to local node first, even if CLVMD_FLAG_REMOTE * is set so we still get a reply if this is the * only node. */ add_to_lvmqueue(thisfd, inheader, len, NULL); DEBUGLOG("(%p) Sending message to all cluster nodes\n", thisfd); inheader->xid = thisfd->xid; send_message(inheader, len, NULL, -1, "Error forwarding message to cluster"); } else { /* Do it on a single node */ char csid[MAX_CSID_LEN]; if (clops->csid_from_name(csid, inheader->node)) /* This has already been checked so should not happen */ return 0; /* OK, found a node... */ thisfd->bits.localsock.in_progress = TRUE; thisfd->bits.localsock.expected_replies = 1; thisfd->bits.localsock.num_replies = 0; /* Are we the requested node ?? */ if (memcmp(csid, our_csid, max_csid_len) == 0) { DEBUGLOG("(%p) Doing command on local node only\n", thisfd); add_to_lvmqueue(thisfd, inheader, len, NULL); } else { DEBUGLOG("(%p) Sending message to single node: %s\n", thisfd, inheader->node); inheader->xid = thisfd->xid; send_message(inheader, len, csid, -1, "Error forwarding message to cluster node"); } } } else { /* Local explicitly requested, ignore nodes */ thisfd->bits.localsock.in_progress = TRUE; thisfd->bits.localsock.expected_replies = 1; thisfd->bits.localsock.num_replies = 0; DEBUGLOG("(%p) Doing command explicitly on local node only\n", thisfd); add_to_lvmqueue(thisfd, inheader, len, NULL); } return 0; } /* Process a command from a remote node and return the result */ static void process_remote_command(struct clvm_header *msg, int msglen, int fd, const char *csid) { char *replyargs; char nodename[max_cluster_member_name_len]; int replylen = 0; int buflen = max_cluster_message - sizeof(struct clvm_header) - 1; int status; /* Get the node name as we /may/ need it later */ clops->name_from_csid(csid, nodename); DEBUGLOG("process_remote_command %s for clientid 0x%x XID %d on node %s\n", decode_cmd(msg->cmd), msg->clientid, msg->xid, nodename); /* Check for GOAWAY and sulk */ if (msg->cmd == CLVMD_CMD_GOAWAY) { DEBUGLOG("Told to go away by %s\n", nodename); log_error("Told to go away by %s.", nodename); exit(99); } /* Version check is internal - don't bother exposing it in clvmd-command.c */ if (msg->cmd == CLVMD_CMD_VERSION) { int version_nums[3]; char node[256]; memcpy(version_nums, msg->args, sizeof(version_nums)); clops->name_from_csid(csid, node); DEBUGLOG("Remote node %s is version %d.%d.%d\n", node, ntohl(version_nums[0]), ntohl(version_nums[1]), ntohl(version_nums[2])); if (ntohl(version_nums[0]) != CLVMD_MAJOR_VERSION) { struct clvm_header byebyemsg = { .cmd = CLVMD_CMD_GOAWAY }; DEBUGLOG("Telling node %s to go away because of incompatible version number\n", node); log_notice("Telling node %s to go away because of incompatible version number %d.%d.%d\n", node, ntohl(version_nums[0]), ntohl(version_nums[1]), ntohl(version_nums[2])); clops->cluster_send_message(&byebyemsg, sizeof(byebyemsg), our_csid, "Error Sending GOAWAY message"); } else clops->add_up_node(csid); return; } /* Allocate a default reply buffer */ if ((replyargs = dm_malloc(max_cluster_message - sizeof(struct clvm_header)))) /* Run the command */ /* FIXME: usage of init_test() is unprotected */ status = do_command(NULL, msg, msglen, &replyargs, buflen, &replylen); else status = ENOMEM; /* If it wasn't a reply, then reply */ if (msg->cmd != CLVMD_CMD_REPLY) { char *aggreply; aggreply = dm_realloc(replyargs, replylen + sizeof(struct clvm_header)); if (aggreply) { struct clvm_header *agghead = (struct clvm_header *) aggreply; replyargs = aggreply; /* Move it up so there's room for a header in front of the data */ memmove(aggreply + offsetof(struct clvm_header, args), replyargs, replylen); agghead->xid = msg->xid; agghead->cmd = CLVMD_CMD_REPLY; agghead->status = status; agghead->flags = 0; agghead->clientid = msg->clientid; agghead->arglen = replylen; agghead->node[0] = '\0'; send_message(aggreply, sizeof(struct clvm_header) + replylen, csid, fd, "Error sending command reply"); } else { /* Return a failure response */ struct clvm_header reply = { .cmd = CLVMD_CMD_REPLY, .status = ENOMEM, .clientid = msg->clientid }; DEBUGLOG("Error attempting to realloc return buffer\n"); send_message(&reply, sizeof(reply), csid, fd, "Error sending ENOMEM command reply"); } } dm_free(replyargs); } /* Add a reply to a command to the list of replies for this client. If we have got a full set then send them to the waiting client down the local socket */ static void add_reply_to_list(struct local_client *client, int status, const char *csid, const char *buf, int len) { struct node_reply *reply; /* Add it to the list of replies */ if (!(reply = dm_zalloc(sizeof(*reply)))) { /* It's all gone horribly wrong... */ send_local_reply(client, ENOMEM, client->fd); return; } reply->status = status; clops->name_from_csid(csid, reply->node); DEBUGLOG("(%p) Reply from node %s: %d bytes\n", client, reply->node, len); if (len > 0) { if (!(reply->replymsg = dm_malloc(len))) reply->status = ENOMEM; else memcpy(reply->replymsg, buf, len); } else reply->replymsg = NULL; pthread_mutex_lock(&client->bits.localsock.mutex); if (client->bits.localsock.finished) { dm_free(reply->replymsg); dm_free(reply); } else { /* Hook it onto the reply chain */ reply->next = client->bits.localsock.replies; client->bits.localsock.replies = reply; /* If we have the whole lot then do the post-process */ /* Post-process the command */ if (++client->bits.localsock.num_replies == client->bits.localsock.expected_replies) { client->bits.localsock.state = POST_COMMAND; pthread_cond_signal(&client->bits.localsock.cond); } DEBUGLOG("(%p) Got %d replies, expecting: %d\n", client, client->bits.localsock.num_replies, client->bits.localsock.expected_replies); } pthread_mutex_unlock(&client->bits.localsock.mutex); } /* This is the thread that runs the PRE and post commands for a particular connection */ static __attribute__ ((noreturn)) void *pre_and_post_thread(void *arg) { struct local_client *client = (struct local_client *) arg; int status; int write_status; sigset_t ss; int pipe_fd = client->bits.localsock.pipe; DEBUGLOG("(%p) Pre&post thread pipe fd %d\n", client, pipe_fd); pthread_mutex_lock(&client->bits.localsock.mutex); /* Ignore SIGUSR1 (handled by master process) but enable SIGUSR2 (kills subthreads) */ sigemptyset(&ss); sigaddset(&ss, SIGUSR1); pthread_sigmask(SIG_BLOCK, &ss, NULL); sigdelset(&ss, SIGUSR1); sigaddset(&ss, SIGUSR2); pthread_sigmask(SIG_UNBLOCK, &ss, NULL); /* Loop around doing PRE and POST functions until the client goes away */ while (!client->bits.localsock.finished) { /* Execute the code */ /* FIXME: usage of init_test() is unprotected as in do_command() */ if ((status = do_pre_command(client))) client->bits.localsock.all_success = 0; DEBUGLOG("(%p) Pre&post thread writes status %d down to pipe fd %d\n", client, status, pipe_fd); /* Tell the parent process we have finished this bit */ while ((write_status = write(pipe_fd, &status, sizeof(int))) != sizeof(int)) if (write_status >=0 || (errno != EINTR && errno != EAGAIN)) { log_error("Error sending to pipe: %m"); break; } if (status) { client->bits.localsock.state = POST_COMMAND; goto next_pre; } /* We may need to wait for the condition variable before running the post command */ if (client->bits.localsock.state != POST_COMMAND && !client->bits.localsock.finished) { DEBUGLOG("(%p) Pre&post thread waiting to do post command, state = %d\n", client, client->bits.localsock.state); pthread_cond_wait(&client->bits.localsock.cond, &client->bits.localsock.mutex); } DEBUGLOG("(%p) Pre&post thread got post command condition...\n", client); /* POST function must always run, even if the client aborts */ status = 0; do_post_command(client); while ((write_status = write(pipe_fd, &status, sizeof(int))) != sizeof(int)) if (write_status >=0 || (errno != EINTR && errno != EAGAIN)) { log_error("Error sending to pipe: %m"); break; } next_pre: if (client->bits.localsock.state != PRE_COMMAND && !client->bits.localsock.finished) { DEBUGLOG("(%p) Pre&post thread waiting for next pre command\n", client); pthread_cond_wait(&client->bits.localsock.cond, &client->bits.localsock.mutex); } DEBUGLOG("(%p) Pre&post thread got pre command condition...\n", client); } pthread_mutex_unlock(&client->bits.localsock.mutex); DEBUGLOG("(%p) Pre&post thread finished\n", client); pthread_exit(NULL); } /* Process a command on the local node and store the result */ static int process_local_command(struct clvm_header *msg, int msglen, struct local_client *client, unsigned short xid) { char *replybuf; int buflen = max_cluster_message - sizeof(struct clvm_header) - 1; int replylen = 0; int status; if (!(replybuf = dm_malloc(max_cluster_message))) return -1; DEBUGLOG("(%p) process_local_command: %s msg=%p, msglen =%d\n", client, decode_cmd(msg->cmd), msg, msglen); /* If remote flag is set, just set a successful status code. */ if (msg->flags & CLVMD_FLAG_REMOTE) status = 0; else status = do_command(client, msg, msglen, &replybuf, buflen, &replylen); if (status) client->bits.localsock.all_success = 0; /* If we took too long then discard the reply */ if (xid == client->xid) add_reply_to_list(client, status, our_csid, replybuf, replylen); else DEBUGLOG("(%p) Local command took too long, discarding xid %d, current is %d\n", client, xid, client->xid); dm_free(replybuf); return status; } static int process_reply(const struct clvm_header *msg, int msglen, const char *csid) { struct local_client *client; if (!(client = find_client(msg->clientid))) { DEBUGLOG("Got message for unknown client 0x%x\n", msg->clientid); log_error("Got message for unknown client 0x%x.", msg->clientid); return -1; } if (msg->status) client->bits.localsock.all_success = 0; /* Gather replies together for this client id */ if (msg->xid == client->xid) add_reply_to_list(client, msg->status, csid, msg->args, msg->arglen); else DEBUGLOG("Discarding reply with old XID %d, current = %d\n", msg->xid, client->xid); return 0; } /* Send an aggregated reply back to the client */ static void send_local_reply(struct local_client *client, int status, int fd) { struct clvm_header *clientreply; struct node_reply *thisreply = client->bits.localsock.replies; char *replybuf; char *ptr; int message_len = 0; DEBUGLOG("(%p) Send local reply\n", client); /* Work out the total size of the reply */ while (thisreply) { if (thisreply->replymsg) message_len += strlen(thisreply->replymsg) + 1; else message_len++; message_len += strlen(thisreply->node) + 1 + sizeof(int); thisreply = thisreply->next; } /* Add in the size of our header */ message_len = message_len + sizeof(struct clvm_header); if (!(replybuf = dm_malloc(message_len))) { DEBUGLOG("(%p) Memory allocation fails\n", client); return; } clientreply = (struct clvm_header *) replybuf; clientreply->status = status; clientreply->cmd = CLVMD_CMD_REPLY; clientreply->node[0] = '\0'; clientreply->xid = 0; clientreply->clientid = 0; clientreply->flags = 0; ptr = clientreply->args; /* Add in all the replies, and free them as we go */ thisreply = client->bits.localsock.replies; while (thisreply) { struct node_reply *tempreply = thisreply; strcpy(ptr, thisreply->node); ptr += strlen(thisreply->node) + 1; if (thisreply->status) clientreply->flags |= CLVMD_FLAG_NODEERRS; memcpy(ptr, &thisreply->status, sizeof(int)); ptr += sizeof(int); if (thisreply->replymsg) { strcpy(ptr, thisreply->replymsg); ptr += strlen(thisreply->replymsg) + 1; } else { ptr[0] = '\0'; ptr++; } thisreply = thisreply->next; dm_free(tempreply->replymsg); dm_free(tempreply); } /* Terminate with an empty node name */ *ptr = '\0'; clientreply->arglen = ptr - clientreply->args; /* And send it */ send_message(replybuf, message_len, our_csid, fd, "Error sending REPLY to client"); dm_free(replybuf); /* Reset comms variables */ client->bits.localsock.replies = NULL; client->bits.localsock.expected_replies = 0; client->bits.localsock.in_progress = FALSE; client->bits.localsock.sent_out = FALSE; } /* Just free a reply chain baceuse it wasn't used. */ static void free_reply(struct local_client *client) { /* Add in all the replies, and free them as we go */ struct node_reply *thisreply = client->bits.localsock.replies; while (thisreply) { struct node_reply *tempreply = thisreply; thisreply = thisreply->next; dm_free(tempreply->replymsg); dm_free(tempreply); } client->bits.localsock.replies = NULL; } /* Send our version number to the cluster */ static void send_version_message(void) { char message[sizeof(struct clvm_header) + sizeof(int) * 3]; struct clvm_header *msg = (struct clvm_header *) message; int version_nums[3] = { htonl(CLVMD_MAJOR_VERSION), htonl(CLVMD_MINOR_VERSION), htonl(CLVMD_PATCH_VERSION) }; msg->cmd = CLVMD_CMD_VERSION; msg->status = 0; msg->flags = 0; msg->clientid = 0; msg->arglen = sizeof(version_nums); memcpy(&msg->args, version_nums, sizeof(version_nums)); hton_clvm(msg); clops->cluster_send_message(message, sizeof(message), NULL, "Error Sending version number"); } /* Send a message to either a local client or another server */ static int send_message(void *buf, int msglen, const char *csid, int fd, const char *errtext) { int len = 0; int ptr; struct timespec delay; struct timespec remtime; int retry_cnt = 0; /* Send remote messages down the cluster socket */ if (!csid || !ISLOCAL_CSID(csid)) { hton_clvm((struct clvm_header *) buf); return clops->cluster_send_message(buf, msglen, csid, errtext); } /* Make sure it all goes */ for (ptr = 0; ptr < msglen;) { if ((len = write(fd, (char*)buf + ptr, msglen - ptr)) <= 0) { if (errno == EINTR) continue; if ((errno == EAGAIN || errno == EIO || errno == ENOSPC) && ++retry_cnt < MAX_RETRIES) { delay.tv_sec = 0; delay.tv_nsec = 100000; remtime.tv_sec = 0; remtime.tv_nsec = 0; (void) nanosleep (&delay, &remtime); continue; } DEBUGLOG("%s", errtext); log_error("%s", errtext); break; } ptr += len; } return len; } static int process_work_item(struct lvm_thread_cmd *cmd) { /* If msg is NULL then this is a cleanup request */ if (cmd->msg == NULL) { DEBUGLOG("(%p) process_work_item: free\n", cmd->client); cmd_client_cleanup(cmd->client); pthread_mutex_destroy(&cmd->client->bits.localsock.mutex); pthread_cond_destroy(&cmd->client->bits.localsock.cond); dm_free(cmd->client); return 0; } if (!cmd->remote) { DEBUGLOG("(%p) process_work_item: local\n", cmd->client); process_local_command(cmd->msg, cmd->msglen, cmd->client, cmd->xid); } else { DEBUGLOG("(%p) process_work_item: remote\n", cmd->client); process_remote_command(cmd->msg, cmd->msglen, cmd->client->fd, cmd->csid); } return 0; } /* * Routine that runs in the "LVM thread". */ static void *lvm_thread_fn(void *arg) { sigset_t ss; struct lvm_startup_params *lvm_params = arg; struct lvm_thread_cmd *cmd; DEBUGLOG("LVM thread function started\n"); /* Ignore SIGUSR1 & 2 */ sigemptyset(&ss); sigaddset(&ss, SIGUSR1); sigaddset(&ss, SIGUSR2); pthread_sigmask(SIG_BLOCK, &ss, NULL); /* Initialise the interface to liblvm */ init_clvm(lvm_params->excl_uuid); /* Allow others to get moving */ pthread_barrier_wait(&lvm_start_barrier); DEBUGLOG("LVM thread ready for work.\n"); /* Now wait for some actual work */ pthread_mutex_lock(&lvm_thread_mutex); for (;;) { while (!dm_list_empty(&lvm_cmd_head)) { cmd = dm_list_item(dm_list_first(&lvm_cmd_head), struct lvm_thread_cmd); dm_list_del(&cmd->list); pthread_mutex_unlock(&lvm_thread_mutex); process_work_item(cmd); dm_free(cmd->msg); dm_free(cmd); pthread_mutex_lock(&lvm_thread_mutex); } if (lvm_thread_exit) break; DEBUGLOG("LVM thread waiting for work\n"); pthread_cond_wait(&lvm_thread_cond, &lvm_thread_mutex); } pthread_mutex_unlock(&lvm_thread_mutex); DEBUGLOG("LVM thread exits\n"); destroy_lvm(); pthread_exit(NULL); } /* Pass down some work to the LVM thread */ static int add_to_lvmqueue(struct local_client *client, struct clvm_header *msg, int msglen, const char *csid) { struct lvm_thread_cmd *cmd; if (!(cmd = dm_malloc(sizeof(*cmd)))) return ENOMEM; if (msglen) { if (!(cmd->msg = dm_malloc(msglen))) { log_error("Unable to allocate buffer space."); dm_free(cmd); return -1; } memcpy(cmd->msg, msg, msglen); } else cmd->msg = NULL; cmd->client = client; cmd->msglen = msglen; cmd->xid = client->xid; if (csid) { memcpy(cmd->csid, csid, max_csid_len); cmd->remote = 1; } else cmd->remote = 0; DEBUGLOG("(%p) add_to_lvmqueue: cmd=%p, msg=%p, len=%d, csid=%p, xid=%d\n", client, cmd, msg, msglen, csid, cmd->xid); pthread_mutex_lock(&lvm_thread_mutex); if (lvm_thread_exit) { pthread_mutex_unlock(&lvm_thread_mutex); dm_free(cmd->msg); dm_free(cmd); return -1; /* We are about to exit */ } dm_list_add(&lvm_cmd_head, &cmd->list); pthread_cond_signal(&lvm_thread_cond); pthread_mutex_unlock(&lvm_thread_mutex); return 0; } /* Return 0 if we can talk to an existing clvmd */ static int check_local_clvmd(void) { int local_socket; int ret = 0; struct sockaddr_un sockaddr = { .sun_family = AF_UNIX }; if (!dm_strncpy(sockaddr.sun_path, CLVMD_SOCKNAME, sizeof(sockaddr.sun_path))) { log_error("%s: clvmd socket name too long.", CLVMD_SOCKNAME); return -1; } /* Open local socket */ if ((local_socket = socket(PF_UNIX, SOCK_STREAM, 0)) < 0) { log_sys_error("socket", "local socket"); return -1; } if (connect(local_socket,(struct sockaddr *) &sockaddr, sizeof(sockaddr))) { log_sys_error("connect", "local socket"); ret = -1; } if (close(local_socket)) log_sys_error("close", "local socket"); return ret; } static void close_local_sock(int local_socket) { if (local_socket != -1 && close(local_socket)) log_sys_error("close", CLVMD_SOCKNAME); if (CLVMD_SOCKNAME[0] != '\0' && unlink(CLVMD_SOCKNAME)) stack; } /* Open the local socket, that's the one we talk to libclvm down */ static int open_local_sock(void) { mode_t old_mask; int local_socket = -1; struct sockaddr_un sockaddr = { .sun_family = AF_UNIX }; if (!dm_strncpy(sockaddr.sun_path, CLVMD_SOCKNAME, sizeof(sockaddr.sun_path))) { log_error("%s: clvmd socket name too long.", CLVMD_SOCKNAME); return -1; } close_local_sock(local_socket); (void) dm_prepare_selinux_context(CLVMD_SOCKNAME, S_IFSOCK); old_mask = umask(0077); /* Open local socket */ if ((local_socket = socket(PF_UNIX, SOCK_STREAM, 0)) < 0) { log_error("Can't create local socket: %m"); goto error; } /* Set Close-on-exec & non-blocking */ if (fcntl(local_socket, F_SETFD, 1)) DEBUGLOG("setting CLOEXEC on local_socket failed: %s\n", strerror(errno)); if (fcntl(local_socket, F_SETFL, fcntl(local_socket, F_GETFL, 0) | O_NONBLOCK)) DEBUGLOG("setting O_NONBLOCK on local_socket failed: %s\n", strerror(errno)); if (bind(local_socket, (struct sockaddr *) &sockaddr, sizeof(sockaddr))) { log_error("can't bind local socket: %m"); goto error; } if (listen(local_socket, 1) != 0) { log_error("listen local: %m"); goto error; } umask(old_mask); (void) dm_prepare_selinux_context(NULL, 0); return local_socket; error: close_local_sock(local_socket); umask(old_mask); (void) dm_prepare_selinux_context(NULL, 0); return -1; } void process_message(struct local_client *client, char *buf, int len, const char *csid) { char nodename[max_cluster_member_name_len]; struct clvm_header *inheader = (struct clvm_header *) buf; ntoh_clvm(inheader); /* Byteswap fields */ if (verify_message(buf, len) < 0) { clops->name_from_csid(csid, nodename); log_error("process_message from %s len %d bad verify.", nodename, len); dump_message(buf, len); return; } if (inheader->cmd == CLVMD_CMD_REPLY) process_reply(inheader, len, csid); else add_to_lvmqueue(client, inheader, len, csid); } static void check_all_callback(struct local_client *client, const char *csid, int node_up) { if (!node_up) add_reply_to_list(client, EHOSTDOWN, csid, "CLVMD not running", 18); } /* Check to see if all CLVMDs are running (ie one on every node in the cluster). If not, returns -1 and prints out a list of errant nodes */ static int check_all_clvmds_running(struct local_client *client) { DEBUGLOG("(%p) check_all_clvmds_running\n", client); return clops->cluster_do_node_callback(client, check_all_callback); } /* Return a local_client struct given a client ID. client IDs are in network byte order */ static struct local_client *find_client(int clientid) { struct local_client *thisfd; for (thisfd = &local_client_head; thisfd; thisfd = thisfd->next) if (thisfd->fd == (int)ntohl(clientid)) return thisfd; return NULL; } /* Byte-swapping routines for the header so we work in a heterogeneous environment */ static void hton_clvm(struct clvm_header *hdr) { hdr->status = htonl(hdr->status); hdr->arglen = htonl(hdr->arglen); hdr->xid = htons(hdr->xid); /* Don't swap clientid as it's only a token as far as remote nodes are concerned */ } static void ntoh_clvm(struct clvm_header *hdr) { hdr->status = ntohl(hdr->status); hdr->arglen = ntohl(hdr->arglen); hdr->xid = ntohs(hdr->xid); } /* Handler for SIGUSR2 - sent to kill subthreads */ static void sigusr2_handler(int sig) { DEBUGLOG("SIGUSR2 received\n"); } static void sigterm_handler(int sig) { quit = 1; } static void sighup_handler(int sig) { reread_config = 1; } int sync_lock(const char *resource, int mode, int flags, int *lockid) { return clops->sync_lock(resource, mode, flags, lockid); } int sync_unlock(const char *resource, int lockid) { return clops->sync_unlock(resource, lockid); } static if_type_t parse_cluster_interface(char *ifname) { if_type_t iface = IF_AUTO; if (!strcmp(ifname, "auto")) iface = IF_AUTO; else if (!strcmp(ifname, "cman")) iface = IF_CMAN; else if (!strcmp(ifname, "openais")) iface = IF_OPENAIS; else if (!strcmp(ifname, "corosync")) iface = IF_COROSYNC; else if (!strcmp(ifname, "singlenode")) iface = IF_SINGLENODE; return iface; } /* * Try and find a cluster system in corosync's objdb, if it is running. This is * only called if the command-line option is not present, and if it fails * we still try the interfaces in order. */ static if_type_t get_cluster_type(void) { #ifdef HAVE_COROSYNC_CONFDB_H confdb_handle_t handle; if_type_t type = IF_AUTO; int result; char buf[255]; size_t namelen = sizeof(buf); hdb_handle_t cluster_handle; hdb_handle_t clvmd_handle; confdb_callbacks_t callbacks = { 0 }; result = confdb_initialize (&handle, &callbacks); if (result != CS_OK) return type; result = confdb_object_find_start(handle, OBJECT_PARENT_HANDLE); if (result != CS_OK) goto out; result = confdb_object_find(handle, OBJECT_PARENT_HANDLE, (void *)"cluster", strlen("cluster"), &cluster_handle); if (result != CS_OK) goto out; result = confdb_object_find_start(handle, cluster_handle); if (result != CS_OK) goto out; result = confdb_object_find(handle, cluster_handle, (void *)"clvmd", strlen("clvmd"), &clvmd_handle); if (result != CS_OK) goto out; result = confdb_key_get(handle, clvmd_handle, (void *)"interface", strlen("interface"), buf, &namelen); if (result != CS_OK) goto out; if (namelen >= sizeof(buf)) namelen = sizeof(buf) - 1; buf[namelen] = '\0'; type = parse_cluster_interface(buf); DEBUGLOG("got interface type '%s' from confdb\n", buf); out: confdb_finalize(handle); return type; #else return IF_AUTO; #endif } LVM2.2.02.176/daemons/clvmd/lvm-functions.h0000644000000000000120000000300713176752421017034 0ustar rootwheel/* * Copyright (C) 2002-2004 Sistina Software, Inc. All rights reserved. * Copyright (C) 2004-2010 Red Hat, Inc. All rights reserved. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License v.2. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /* Functions in lvm-functions.c */ #ifndef _LVM_FUNCTIONS_H #define _LVM_FUNCTIONS_H extern int pre_lock_lv(unsigned char lock_cmd, unsigned char lock_flags, char *resource); extern int do_lock_lv(unsigned char lock_cmd, unsigned char lock_flags, char *resource); extern const char *do_lock_query(char *resource); extern int post_lock_lv(unsigned char lock_cmd, unsigned char lock_flags, char *resource); extern int do_check_lvm1(const char *vgname); extern int do_refresh_cache(void); extern int init_clvm(struct dm_hash_table *excl_uuid); extern void destroy_lvm(void); extern void init_lvhash(void); extern void destroy_lvhash(void); extern void lvm_do_backup(const char *vgname); extern char *get_last_lvm_error(void); extern void do_lock_vg(unsigned char command, unsigned char lock_flags, char *resource); extern struct dm_hash_node *get_next_excl_lock(struct dm_hash_node *v, char **name); void lvm_do_fs_unlock(void); #endif LVM2.2.02.176/daemons/clvmd/clvmd-singlenode.c0000644000000000000120000002264713176752421017470 0ustar rootwheel/* * Copyright (C) 2009-2013 Red Hat, Inc. All rights reserved. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "clvmd-common.h" #include #include "locking.h" #include "clvm.h" #include "clvmd-comms.h" #include "clvmd.h" #include #include #include static const char SINGLENODE_CLVMD_SOCKNAME[] = DEFAULT_RUN_DIR "/clvmd_singlenode.sock"; static int listen_fd = -1; static struct dm_hash_table *_locks; static int _lockid; static pthread_mutex_t _lock_mutex = PTHREAD_MUTEX_INITIALIZER; /* Using one common condition for all locks for simplicity */ static pthread_cond_t _lock_cond = PTHREAD_COND_INITIALIZER; struct lock { struct dm_list list; int lockid; int mode; }; static void close_comms(void) { if (listen_fd != -1 && close(listen_fd)) stack; (void)unlink(SINGLENODE_CLVMD_SOCKNAME); listen_fd = -1; } static int init_comms(void) { mode_t old_mask; struct sockaddr_un addr = { .sun_family = AF_UNIX }; if (!dm_strncpy(addr.sun_path, SINGLENODE_CLVMD_SOCKNAME, sizeof(addr.sun_path))) { DEBUGLOG("%s: singlenode socket name too long.", SINGLENODE_CLVMD_SOCKNAME); return -1; } close_comms(); (void) dm_prepare_selinux_context(SINGLENODE_CLVMD_SOCKNAME, S_IFSOCK); old_mask = umask(0077); listen_fd = socket(PF_UNIX, SOCK_STREAM, 0); if (listen_fd < 0) { DEBUGLOG("Can't create local socket: %s\n", strerror(errno)); goto error; } /* Set Close-on-exec */ if (fcntl(listen_fd, F_SETFD, 1)) { DEBUGLOG("Setting CLOEXEC on client fd failed: %s\n", strerror(errno)); goto error; } if (bind(listen_fd, (struct sockaddr *)&addr, sizeof(addr)) < 0) { DEBUGLOG("Can't bind local socket: %s\n", strerror(errno)); goto error; } if (listen(listen_fd, 10) < 0) { DEBUGLOG("Can't listen local socket: %s\n", strerror(errno)); goto error; } umask(old_mask); (void) dm_prepare_selinux_context(NULL, 0); return 0; error: umask(old_mask); (void) dm_prepare_selinux_context(NULL, 0); close_comms(); return -1; } static int _init_cluster(void) { int r; if (!(_locks = dm_hash_create(128))) { DEBUGLOG("Failed to allocate single-node hash table.\n"); return 1; } r = init_comms(); if (r) { dm_hash_destroy(_locks); _locks = NULL; return r; } DEBUGLOG("Single-node cluster initialised.\n"); return 0; } static void _cluster_closedown(void) { close_comms(); /* If there is any awaited resource, kill it softly */ pthread_mutex_lock(&_lock_mutex); dm_hash_destroy(_locks); _locks = NULL; _lockid = 0; pthread_cond_broadcast(&_lock_cond); /* wakeup waiters */ pthread_mutex_unlock(&_lock_mutex); } static void _get_our_csid(char *csid) { int nodeid = 1; memcpy(csid, &nodeid, sizeof(int)); } static int _csid_from_name(char *csid, const char *name) { return 1; } static int _name_from_csid(const char *csid, char *name) { strcpy(name, "SINGLENODE"); return 0; } static int _get_num_nodes(void) { return 1; } /* Node is now known to be running a clvmd */ static void _add_up_node(const char *csid) { } /* Call a callback for each node, so the caller knows whether it's up or down */ static int _cluster_do_node_callback(struct local_client *master_client, void (*callback)(struct local_client *, const char *csid, int node_up)) { return 0; } int _lock_file(const char *file, uint32_t flags); static const char *_get_mode(int mode) { switch (mode) { case LCK_NULL: return "NULL"; case LCK_READ: return "READ"; case LCK_PREAD: return "PREAD"; case LCK_WRITE: return "WRITE"; case LCK_EXCL: return "EXCLUSIVE"; case LCK_UNLOCK: return "UNLOCK"; default: return "????"; } } /* Real locking */ static int _lock_resource(const char *resource, int mode, int flags, int *lockid) { /* DLM table of allowed transition states */ static const int _dlm_table[6][6] = { /* Mode NL CR CW PR PW EX */ /* NL */ { 1, 1, 1, 1, 1, 1}, /* CR */ { 1, 1, 1, 1, 1, 0}, /* CW */ { 1, 1, 1, 0, 0, 0}, /* PR */ { 1, 1, 0, 1, 0, 0}, /* PW */ { 1, 1, 0, 0, 0, 0}, /* EX */ { 1, 0, 0, 0, 0, 0} }; struct lock *lck = NULL, *lckt; struct dm_list *head; DEBUGLOG("Locking resource %s, flags=0x%02x (%s%s%s), mode=%s (%d)\n", resource, flags, (flags & LCKF_NOQUEUE) ? "NOQUEUE" : "", ((flags & (LCKF_NOQUEUE | LCKF_CONVERT)) == (LCKF_NOQUEUE | LCKF_CONVERT)) ? "|" : "", (flags & LCKF_CONVERT) ? "CONVERT" : "", _get_mode(mode), mode); mode &= LCK_TYPE_MASK; pthread_mutex_lock(&_lock_mutex); retry: if (!(head = dm_hash_lookup(_locks, resource))) { if (flags & LCKF_CONVERT) { /* In real DLM, lock is identified only by lockid, resource is not used */ DEBUGLOG("Unlocked resource %s cannot be converted\n", resource); goto_bad; } /* Add new locked resource */ if (!(head = dm_malloc(sizeof(struct dm_list))) || !dm_hash_insert(_locks, resource, head)) { dm_free(head); goto_bad; } dm_list_init(head); } else /* Update/convert locked resource */ dm_list_iterate_items(lck, head) { /* Check is all locks are compatible with requested lock */ if (flags & LCKF_CONVERT) { if (lck->lockid != *lockid) continue; DEBUGLOG("Converting resource %s lockid=%d mode:%s -> %s...\n", resource, lck->lockid, _get_mode(lck->mode), _get_mode(mode)); dm_list_iterate_items(lckt, head) { if ((lckt->lockid != *lockid) && !_dlm_table[mode][lckt->mode]) { if (!(flags & LCKF_NOQUEUE) && /* TODO: Real dlm uses here conversion queues */ !pthread_cond_wait(&_lock_cond, &_lock_mutex) && _locks) /* End of the game? */ goto retry; goto bad; } } lck->mode = mode; /* Lock is now converted */ goto out; } else if (!_dlm_table[mode][lck->mode]) { DEBUGLOG("Resource %s already locked lockid=%d, mode:%s\n", resource, lck->lockid, _get_mode(lck->mode)); if (!(flags & LCKF_NOQUEUE) && !pthread_cond_wait(&_lock_cond, &_lock_mutex) && _locks) { /* End of the game? */ DEBUGLOG("Resource %s retrying lock in mode:%s...\n", resource, _get_mode(mode)); goto retry; } goto bad; } } if (!(flags & LCKF_CONVERT)) { if (!(lck = dm_malloc(sizeof(struct lock)))) goto_bad; *lockid = lck->lockid = ++_lockid; lck->mode = mode; dm_list_add(head, &lck->list); } out: pthread_cond_broadcast(&_lock_cond); /* to wakeup waiters */ pthread_mutex_unlock(&_lock_mutex); DEBUGLOG("Locked resource %s, lockid=%d, mode=%s\n", resource, lck->lockid, _get_mode(lck->mode)); return 0; bad: pthread_cond_broadcast(&_lock_cond); /* to wakeup waiters */ pthread_mutex_unlock(&_lock_mutex); DEBUGLOG("Failed to lock resource %s\n", resource); return 1; /* fail */ } static int _unlock_resource(const char *resource, int lockid) { struct lock *lck; struct dm_list *head; int r = 1; if (lockid < 0) { DEBUGLOG("Not tracking unlock of lockid -1: %s, lockid=%d\n", resource, lockid); return 1; } DEBUGLOG("Unlocking resource %s, lockid=%d\n", resource, lockid); pthread_mutex_lock(&_lock_mutex); pthread_cond_broadcast(&_lock_cond); /* wakeup waiters */ if (!(head = dm_hash_lookup(_locks, resource))) { pthread_mutex_unlock(&_lock_mutex); DEBUGLOG("Resource %s is not locked.\n", resource); return 1; } dm_list_iterate_items(lck, head) if (lck->lockid == lockid) { dm_list_del(&lck->list); dm_free(lck); r = 0; goto out; } DEBUGLOG("Resource %s has wrong lockid %d.\n", resource, lockid); out: if (dm_list_empty(head)) { //DEBUGLOG("Resource %s is no longer hashed (lockid=%d).\n", resource, lockid); dm_hash_remove(_locks, resource); dm_free(head); } pthread_mutex_unlock(&_lock_mutex); return r; } static int _is_quorate(void) { return 1; } static int _get_main_cluster_fd(void) { return listen_fd; } static int _cluster_fd_callback(struct local_client *fd, char *buf, int len, const char *csid, struct local_client **new_client) { return 1; } static int _cluster_send_message(const void *buf, int msglen, const char *csid, const char *errtext) { return 0; } static int _get_cluster_name(char *buf, int buflen) { return dm_strncpy(buf, "localcluster", buflen) ? 0 : 1; } static struct cluster_ops _cluster_singlenode_ops = { .name = "singlenode", .cluster_init_completed = NULL, .cluster_send_message = _cluster_send_message, .name_from_csid = _name_from_csid, .csid_from_name = _csid_from_name, .get_num_nodes = _get_num_nodes, .cluster_fd_callback = _cluster_fd_callback, .get_main_cluster_fd = _get_main_cluster_fd, .cluster_do_node_callback = _cluster_do_node_callback, .is_quorate = _is_quorate, .get_our_csid = _get_our_csid, .add_up_node = _add_up_node, .reread_config = NULL, .cluster_closedown = _cluster_closedown, .get_cluster_name = _get_cluster_name, .sync_lock = _lock_resource, .sync_unlock = _unlock_resource, }; struct cluster_ops *init_singlenode_cluster(void) { if (!_init_cluster()) return &_cluster_singlenode_ops; return NULL; } LVM2.2.02.176/daemons/clvmd/clvmd-common.h0000644000000000000120000000131713176752421016625 0ustar rootwheel/* * Copyright (C) 2010 Red Hat, Inc. All rights reserved. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /* * This file must be included first by every clvmd source file. */ #ifndef _LVM_CLVMD_COMMON_H #define _LVM_CLVMD_COMMON_H #define _REENTRANT #include "tool.h" #include "lvm-logging.h" #endif LVM2.2.02.176/daemons/clvmd/clvmd.h0000644000000000000120000000765313176752421015350 0ustar rootwheel/* * Copyright (C) 2002-2004 Sistina Software, Inc. All rights reserved. * Copyright (C) 2004 Red Hat, Inc. All rights reserved. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License v.2. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef _CLVMD_H #define _CLVMD_H #define CLVMD_MAJOR_VERSION 0 #define CLVMD_MINOR_VERSION 2 #define CLVMD_PATCH_VERSION 1 /* Default time (in seconds) we will wait for all remote commands to execute before declaring them dead */ #define DEFAULT_CMD_TIMEOUT 60 /* One of these for each reply we get from command execution on a node */ struct node_reply { char node[MAX_CLUSTER_MEMBER_NAME_LEN]; char *replymsg; int status; struct node_reply *next; }; typedef enum {DEBUG_OFF, DEBUG_STDERR, DEBUG_SYSLOG} debug_t; /* * These exist for the use of local sockets only when we are * collecting responses from all cluster nodes */ struct localsock_bits { struct node_reply *replies; int num_replies; int expected_replies; time_t sent_time; /* So we can check for timeouts */ int in_progress; /* Only execute one cmd at a time per client */ int sent_out; /* Flag to indicate that a command was sent to remote nodes */ void *private; /* Private area for command processor use */ void *cmd; /* Whole command as passed down local socket */ int cmd_len; /* Length of above */ int pipe; /* Pipe to send PRE completion status down */ int finished; /* Flag to tell subthread to exit */ int all_success; /* Set to 0 if any node (or the pre_command) failed */ int cleanup_needed; /* helper for cleanup_zombie */ struct local_client *pipe_client; pthread_t threadid; enum { PRE_COMMAND, POST_COMMAND } state; pthread_mutex_t mutex; /* Main thread and worker synchronisation */ pthread_cond_t cond; }; /* Entries for PIPE clients */ struct pipe_bits { struct local_client *client; /* Actual (localsock) client */ pthread_t threadid; /* Our own copy of the thread id */ }; /* Entries for Network socket clients */ struct netsock_bits { void *private; int flags; }; typedef int (*fd_callback_t) (struct local_client * fd, char *buf, int len, const char *csid, struct local_client ** new_client); /* One of these for each fd we are listening on */ struct local_client { int fd; enum { CLUSTER_MAIN_SOCK, CLUSTER_DATA_SOCK, LOCAL_RENDEZVOUS, LOCAL_SOCK, THREAD_PIPE, CLUSTER_INTERNAL } type; struct local_client *next; unsigned short xid; fd_callback_t callback; uint8_t removeme; union { struct localsock_bits localsock; struct pipe_bits pipe; struct netsock_bits net; } bits; }; #define DEBUGLOG(fmt, args...) debuglog(fmt, ## args) #ifndef max #define max(a,b) ((a)>(b)?(a):(b)) #endif /* The real command processor is in clvmd-command.c */ extern int do_command(struct local_client *client, struct clvm_header *msg, int msglen, char **buf, int buflen, int *retlen); /* Pre and post command routines are called only on the local node */ extern int do_pre_command(struct local_client *client); extern int do_post_command(struct local_client *client); extern void cmd_client_cleanup(struct local_client *client); extern int add_client(struct local_client *new_client); extern void clvmd_cluster_init_completed(void); extern void process_message(struct local_client *client, char *buf, int len, const char *csid); extern void debuglog(const char *fmt, ... ) __attribute__ ((format(printf, 1, 2))); void clvmd_set_debug(debug_t new_de); debug_t clvmd_get_debug(void); int clvmd_get_foreground(void); int sync_lock(const char *resource, int mode, int flags, int *lockid); int sync_unlock(const char *resource, int lockid); #endif LVM2.2.02.176/daemons/clvmd/clvmd-command.c0000644000000000000120000002570213176752421016752 0ustar rootwheel/* * Copyright (C) 2002-2004 Sistina Software, Inc. All rights reserved. * Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License v.2. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /* CLVMD Cluster LVM daemon command processor. To add commands to the daemon simply add a processor in do_command and return and messages back in buf and the length in *retlen. The initial value of buflen is the maximum size of the buffer. if buf is not large enough then it may be reallocated by the functions in here to a suitable size bearing in mind that anything larger than the passed-in size will have to be returned using the system LV and so performance will suffer. The status return will be negated and passed back to the originating node. pre- and post- command routines are called only on the local node. The purpose is primarily to get and release locks, though the pre- routine should also do any other local setups required by the command (if any) and can return a failure code that prevents the command from being distributed around the cluster The pre- and post- routines are run in their own thread so can block as long they like, do_command is run in the main clvmd thread so should not block for too long. If the pre-command returns an error code (!=0) then the command will not be propogated around the cluster but the post-command WILL be called Also note that the pre and post routine are *always* called on the local node, even if the command to be executed was only requested to run on a remote node. It may peek inside the client structure to check the status of the command. The clients of the daemon must, naturally, understand the return messages and codes. Routines in here may only READ the values in the client structure passed in apart from client->private which they are free to do what they like with. */ #include "clvmd-common.h" #include "clvmd-comms.h" #include "clvm.h" #include "clvmd.h" #include "lvm-globals.h" #include "lvm-functions.h" #include "locking.h" #include extern struct cluster_ops *clops; static int restart_clvmd(void); /* This is where all the real work happens: NOTE: client will be NULL when this is executed on a remote node */ int do_command(struct local_client *client, struct clvm_header *msg, int msglen, char **buf, int buflen, int *retlen) { char *args = msg->node + strlen(msg->node) + 1; int arglen = msglen - sizeof(struct clvm_header) - strlen(msg->node); int status = 0; char *lockname; const char *locktype; struct utsname nodeinfo; unsigned char lock_cmd; unsigned char lock_flags; /* Do the command */ switch (msg->cmd) { /* Just a test message */ case CLVMD_CMD_TEST: if (arglen > buflen) { char *new_buf; buflen = arglen + 200; new_buf = realloc(*buf, buflen); if (new_buf == NULL) { status = errno; free (*buf); } *buf = new_buf; } if (*buf) { if (uname(&nodeinfo)) memset(&nodeinfo, 0, sizeof(nodeinfo)); *retlen = 1 + dm_snprintf(*buf, buflen, "TEST from %s: %s v%s", nodeinfo.nodename, args, nodeinfo.release); } break; case CLVMD_CMD_LOCK_VG: lock_cmd = args[0]; lock_flags = args[1]; lockname = &args[2]; /* Check to see if the VG is in use by LVM1 */ status = do_check_lvm1(lockname); do_lock_vg(lock_cmd, lock_flags, lockname); break; case CLVMD_CMD_LOCK_LV: /* This is the biggie */ lock_cmd = args[0]; lock_flags = args[1]; lockname = &args[2]; status = do_lock_lv(lock_cmd, lock_flags, lockname); /* Replace EIO with something less scary */ if (status == EIO) { *retlen = 1 + dm_snprintf(*buf, buflen, "%s", get_last_lvm_error()); return EIO; } break; case CLVMD_CMD_LOCK_QUERY: lockname = &args[2]; if (buflen < 3) return EIO; if ((locktype = do_lock_query(lockname))) *retlen = 1 + dm_snprintf(*buf, buflen, "%s", locktype); break; case CLVMD_CMD_REFRESH: do_refresh_cache(); break; case CLVMD_CMD_SYNC_NAMES: lvm_do_fs_unlock(); break; case CLVMD_CMD_SET_DEBUG: clvmd_set_debug((debug_t) args[0]); break; case CLVMD_CMD_RESTART: status = restart_clvmd(); break; case CLVMD_CMD_GET_CLUSTERNAME: status = clops->get_cluster_name(*buf, buflen); if (!status) *retlen = strlen(*buf)+1; break; case CLVMD_CMD_VG_BACKUP: /* * Do not run backup on local node, caller should do that. */ if (!client) lvm_do_backup(&args[2]); break; default: /* Won't get here because command is validated in pre_command */ break; } /* Check the status of the command and return the error text */ if (status) { if (*buf) *retlen = dm_snprintf(*buf, buflen, "%s", strerror(status)) + 1; else *retlen = 0; } return status; } static int lock_vg(struct local_client *client) { struct dm_hash_table *lock_hash; struct clvm_header *header = (struct clvm_header *) client->bits.localsock.cmd; unsigned char lock_cmd; int lock_mode; char *args = header->node + strlen(header->node) + 1; int lkid; int status; char *lockname; /* * Keep a track of VG locks in our own hash table. In current * practice there should only ever be more than two VGs locked * if a user tries to merge lots of them at once */ if (!client->bits.localsock.private) { if (!(lock_hash = dm_hash_create(3))) return ENOMEM; client->bits.localsock.private = (void *) lock_hash; } else lock_hash = (struct dm_hash_table *) client->bits.localsock.private; lock_cmd = args[0] & (LCK_NONBLOCK | LCK_HOLD | LCK_SCOPE_MASK | LCK_TYPE_MASK); lock_mode = ((int) lock_cmd & LCK_TYPE_MASK); /* lock_flags = args[1]; */ lockname = &args[2]; DEBUGLOG("(%p) doing PRE command LOCK_VG '%s' at %x\n", client, lockname, lock_cmd); if (lock_mode == LCK_UNLOCK) { if (!(lkid = (int) (long) dm_hash_lookup(lock_hash, lockname))) return EINVAL; if ((status = sync_unlock(lockname, lkid))) status = errno; else dm_hash_remove(lock_hash, lockname); } else { /* Read locks need to be PR; other modes get passed through */ if (lock_mode == LCK_READ) lock_mode = LCK_PREAD; if ((status = sync_lock(lockname, lock_mode, (lock_cmd & LCK_NONBLOCK) ? LCKF_NOQUEUE : 0, &lkid))) status = errno; else if (!dm_hash_insert(lock_hash, lockname, (void *) (long) lkid)) return ENOMEM; } return status; } /* Pre-command is a good place to get locks that are needed only for the duration of the commands around the cluster (don't forget to free them in post-command), and to sanity check the command arguments */ int do_pre_command(struct local_client *client) { struct clvm_header *header = (struct clvm_header *) client->bits.localsock.cmd; unsigned char lock_cmd; unsigned char lock_flags; char *args = header->node + strlen(header->node) + 1; int lockid = 0; int status = 0; char *lockname; switch (header->cmd) { case CLVMD_CMD_TEST: status = sync_lock("CLVMD_TEST", LCK_EXCL, 0, &lockid); client->bits.localsock.private = (void *)(long)lockid; break; case CLVMD_CMD_LOCK_VG: lockname = &args[2]; /* We take out a real lock unless LCK_CACHE was set */ if (!strncmp(lockname, "V_", 2) || !strncmp(lockname, "P_#", 3)) status = lock_vg(client); break; case CLVMD_CMD_LOCK_LV: lock_cmd = args[0]; lock_flags = args[1]; lockname = &args[2]; status = pre_lock_lv(lock_cmd, lock_flags, lockname); break; case CLVMD_CMD_REFRESH: case CLVMD_CMD_GET_CLUSTERNAME: case CLVMD_CMD_SET_DEBUG: case CLVMD_CMD_VG_BACKUP: case CLVMD_CMD_SYNC_NAMES: case CLVMD_CMD_LOCK_QUERY: case CLVMD_CMD_RESTART: break; default: log_error("Unknown command %d received\n", header->cmd); status = EINVAL; } return status; } /* Note that the post-command routine is called even if the pre-command or the real command failed */ int do_post_command(struct local_client *client) { struct clvm_header *header = (struct clvm_header *) client->bits.localsock.cmd; int status = 0; unsigned char lock_cmd; unsigned char lock_flags; char *args = header->node + strlen(header->node) + 1; char *lockname; switch (header->cmd) { case CLVMD_CMD_TEST: status = sync_unlock("CLVMD_TEST", (int) (long) client->bits.localsock.private); client->bits.localsock.private = NULL; break; case CLVMD_CMD_LOCK_LV: lock_cmd = args[0]; lock_flags = args[1]; lockname = &args[2]; status = post_lock_lv(lock_cmd, lock_flags, lockname); break; default: /* Nothing to do here */ break; } return status; } /* Called when the client is about to be deleted */ void cmd_client_cleanup(struct local_client *client) { struct dm_hash_node *v; struct dm_hash_table *lock_hash; int lkid; char *lockname; DEBUGLOG("(%p) Client thread cleanup\n", client); if (!client->bits.localsock.private) return; lock_hash = (struct dm_hash_table *)client->bits.localsock.private; dm_hash_iterate(v, lock_hash) { lkid = (int)(long)dm_hash_get_data(lock_hash, v); lockname = dm_hash_get_key(lock_hash, v); DEBUGLOG("(%p) Cleanup: Unlocking lock %s %x\n", client, lockname, lkid); (void) sync_unlock(lockname, lkid); } dm_hash_destroy(lock_hash); client->bits.localsock.private = NULL; } static int restart_clvmd(void) { const char **argv; char *lv_name; int argc = 0, max_locks = 0; struct dm_hash_node *hn = NULL; char debug_arg[16]; const char *clvmd = getenv("LVM_CLVMD_BINARY") ? : CLVMD_PATH; DEBUGLOG("clvmd restart requested\n"); /* Count exclusively-open LVs */ do { hn = get_next_excl_lock(hn, &lv_name); if (lv_name) { max_locks++; if (!*lv_name) break; /* FIXME: Is this error ? */ } } while (hn); /* clvmd + locks (-E uuid) + debug (-d X) + NULL */ if (!(argv = malloc((max_locks * 2 + 6) * sizeof(*argv)))) goto_out; /* * Build the command-line */ argv[argc++] = "clvmd"; /* Propagate debug options */ if (clvmd_get_debug()) { if (dm_snprintf(debug_arg, sizeof(debug_arg), "-d%u", clvmd_get_debug()) < 0) goto_out; argv[argc++] = debug_arg; } /* Propagate foreground options */ if (clvmd_get_foreground()) argv[argc++] = "-f"; argv[argc++] = "-I"; argv[argc++] = clops->name; /* Now add the exclusively-open LVs */ hn = NULL; do { hn = get_next_excl_lock(hn, &lv_name); if (lv_name) { if (!*lv_name) break; /* FIXME: Is this error ? */ argv[argc++] = "-E"; argv[argc++] = lv_name; DEBUGLOG("excl lock: %s\n", lv_name); } } while (hn); argv[argc] = NULL; /* Exec new clvmd */ DEBUGLOG("--- Restarting %s ---\n", clvmd); for (argc = 1; argv[argc]; argc++) DEBUGLOG("--- %d: %s\n", argc, argv[argc]); /* NOTE: This will fail when downgrading! */ execvp(clvmd, (char **)argv); out: /* We failed */ DEBUGLOG("Restart of clvmd failed.\n"); free(argv); return EIO; } LVM2.2.02.176/daemons/clvmd/refresh_clvmd.c0000644000000000000120000002167413176752421017060 0ustar rootwheel/* * Copyright (C) 2002-2004 Sistina Software, Inc. All rights reserved. * Copyright (C) 2004-2010 Red Hat, Inc. All rights reserved. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License v.2. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /* FIXME Remove duplicated functions from this file. */ /* * Send a command to a running clvmd from the command-line */ #include "clvmd-common.h" #include "clvm.h" #include "refresh_clvmd.h" #include #include #include typedef struct lvm_response { char node[255]; char *response; int status; int len; } lvm_response_t; /* * This gets stuck at the start of memory we allocate so we * can sanity-check it at deallocation time */ #define LVM_SIGNATURE 0x434C564D static int _clvmd_sock = -1; /* Open connection to the clvm daemon */ static int _open_local_sock(void) { int local_socket; struct sockaddr_un sockaddr = { .sun_family = AF_UNIX }; if (!dm_strncpy(sockaddr.sun_path, CLVMD_SOCKNAME, sizeof(sockaddr.sun_path))) { fprintf(stderr, "%s: clvmd socket name too long.", CLVMD_SOCKNAME); return -1; } /* Open local socket */ if ((local_socket = socket(PF_UNIX, SOCK_STREAM, 0)) < 0) { fprintf(stderr, "Local socket creation failed: %s", strerror(errno)); return -1; } if (connect(local_socket,(struct sockaddr *) &sockaddr, sizeof(sockaddr))) { int saved_errno = errno; fprintf(stderr, "connect() failed on local socket: %s\n", strerror(errno)); if (close(local_socket)) return -1; errno = saved_errno; return -1; } return local_socket; } /* Send a request and return the status */ static int _send_request(const char *inbuf, int inlen, char **retbuf, int no_response) { char outbuf[PIPE_BUF]; struct clvm_header *outheader = (struct clvm_header *) outbuf; int len; unsigned off; int buflen; int err; /* Send it to CLVMD */ rewrite: if ( (err = write(_clvmd_sock, inbuf, inlen)) != inlen) { if (err == -1 && errno == EINTR) goto rewrite; fprintf(stderr, "Error writing data to clvmd: %s", strerror(errno)); return 0; } if (no_response) return 1; /* Get the response */ reread: if ((len = read(_clvmd_sock, outbuf, sizeof(struct clvm_header))) < 0) { if (errno == EINTR) goto reread; fprintf(stderr, "Error reading data from clvmd: %s", strerror(errno)); return 0; } if (len == 0) { fprintf(stderr, "EOF reading CLVMD"); errno = ENOTCONN; return 0; } /* Allocate buffer */ buflen = len + outheader->arglen; *retbuf = dm_malloc(buflen); if (!*retbuf) { errno = ENOMEM; return 0; } /* Copy the header */ memcpy(*retbuf, outbuf, len); outheader = (struct clvm_header *) *retbuf; /* Read the returned values */ off = 1; /* we've already read the first byte */ while (off <= outheader->arglen && len > 0) { len = read(_clvmd_sock, outheader->args + off, buflen - off - offsetof(struct clvm_header, args)); if (len > 0) off += len; } /* Was it an error ? */ if (outheader->status != 0) { errno = outheader->status; /* Only return an error here if there are no node-specific errors present in the message that might have more detail */ if (!(outheader->flags & CLVMD_FLAG_NODEERRS)) { fprintf(stderr, "cluster request failed: %s\n", strerror(errno)); return 0; } } return 1; } /* Build the structure header and parse-out wildcard node names */ static void _build_header(struct clvm_header *head, int cmd, const char *node, unsigned int len) { head->cmd = cmd; head->status = 0; head->flags = 0; head->xid = 0; head->clientid = 0; if (len) /* 1 byte is used from struct clvm_header.args[1], so -> len - 1 */ head->arglen = len - 1; else { head->arglen = 0; *head->args = '\0'; } /* * Translate special node names. */ if (!node || !strcmp(node, NODE_ALL)) head->node[0] = '\0'; else if (!strcmp(node, NODE_LOCAL)) { head->node[0] = '\0'; head->flags = CLVMD_FLAG_LOCAL; } else strcpy(head->node, node); } /* * Send a message to a(or all) node(s) in the cluster and wait for replies */ static int _cluster_request(char cmd, const char *node, void *data, int len, lvm_response_t ** response, int *num, int no_response) { char outbuf[sizeof(struct clvm_header) + len + strlen(node) + 1]; char *inptr; char *retbuf = NULL; int status; int i; int num_responses = 0; struct clvm_header *head = (struct clvm_header *) outbuf; lvm_response_t *rarray; *num = 0; if (_clvmd_sock == -1) _clvmd_sock = _open_local_sock(); if (_clvmd_sock == -1) return 0; _build_header(head, cmd, node, len); if (len) memcpy(head->node + strlen(head->node) + 1, data, len); status = _send_request(outbuf, sizeof(struct clvm_header) + strlen(head->node) + len, &retbuf, no_response); if (!status || no_response) goto out; /* Count the number of responses we got */ head = (struct clvm_header *) retbuf; inptr = head->args; while (inptr[0]) { num_responses++; inptr += strlen(inptr) + 1; inptr += sizeof(int); inptr += strlen(inptr) + 1; } /* * Allocate response array. * With an extra pair of INTs on the front to sanity * check the pointer when we are given it back to free */ *response = NULL; if (!(rarray = dm_malloc(sizeof(lvm_response_t) * num_responses + sizeof(int) * 2))) { errno = ENOMEM; status = 0; goto out; } /* Unpack the response into an lvm_response_t array */ inptr = head->args; i = 0; while (inptr[0]) { strcpy(rarray[i].node, inptr); inptr += strlen(inptr) + 1; memcpy(&rarray[i].status, inptr, sizeof(int)); inptr += sizeof(int); rarray[i].response = dm_malloc(strlen(inptr) + 1); if (rarray[i].response == NULL) { /* Free up everything else and return error */ int j; for (j = 0; j < i; j++) dm_free(rarray[i].response); dm_free(rarray); errno = ENOMEM; status = 0; goto out; } strcpy(rarray[i].response, inptr); rarray[i].len = strlen(inptr); inptr += strlen(inptr) + 1; i++; } *num = num_responses; *response = rarray; out: dm_free(retbuf); return status; } /* Free reply array */ static int _cluster_free_request(lvm_response_t * response, int num) { int i; for (i = 0; i < num; i++) { dm_free(response[i].response); } dm_free(response); return 1; } int refresh_clvmd(int all_nodes) { int num_responses; char args[1]; // No args really. lvm_response_t *response = NULL; int saved_errno; int status; int i; status = _cluster_request(CLVMD_CMD_REFRESH, all_nodes ? NODE_ALL : NODE_LOCAL, args, 0, &response, &num_responses, 0); /* If any nodes were down then display them and return an error */ for (i = 0; i < num_responses; i++) { if (response[i].status == EHOSTDOWN) { fprintf(stderr, "clvmd not running on node %s", response[i].node); status = 0; errno = response[i].status; } else if (response[i].status) { fprintf(stderr, "Error resetting node %s: %s", response[i].node, response[i].response[0] ? response[i].response : strerror(response[i].status)); status = 0; errno = response[i].status; } } saved_errno = errno; _cluster_free_request(response, num_responses); errno = saved_errno; return status; } int restart_clvmd(int all_nodes) { int dummy, status; status = _cluster_request(CLVMD_CMD_RESTART, all_nodes ? NODE_ALL : NODE_LOCAL, NULL, 0, NULL, &dummy, 1); /* * FIXME: we cannot receive response, clvmd re-exec before it. * but also should not close socket too early (the whole rq is dropped then). * FIXME: This should be handled this way: * - client waits for RESTART ack (and socket close) * - server restarts * - client checks that server is ready again (VERSION command?) */ usleep(500000); return status; } int debug_clvmd(int level, int clusterwide) { int num_responses; char args[1]; const char *nodes; lvm_response_t *response = NULL; int saved_errno; int status; int i; args[0] = level; if (clusterwide) nodes = NODE_ALL; else nodes = NODE_LOCAL; status = _cluster_request(CLVMD_CMD_SET_DEBUG, nodes, args, 1, &response, &num_responses, 0); /* If any nodes were down then display them and return an error */ for (i = 0; i < num_responses; i++) { if (response[i].status == EHOSTDOWN) { fprintf(stderr, "clvmd not running on node %s", response[i].node); status = 0; errno = response[i].status; } else if (response[i].status) { fprintf(stderr, "Error setting debug on node %s: %s", response[i].node, response[i].response[0] ? response[i].response : strerror(response[i].status)); status = 0; errno = response[i].status; } } saved_errno = errno; _cluster_free_request(response, num_responses); errno = saved_errno; return status; } LVM2.2.02.176/daemons/clvmd/clvm.h0000644000000000000120000000554113176752421015176 0ustar rootwheel/* * Copyright (C) 2002-2004 Sistina Software, Inc. All rights reserved. * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License v.2. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /* Definitions for CLVMD server and clients */ /* * The protocol spoken over the cluster and across the local socket. */ #ifndef _CLVM_H #define _CLVM_H #include "configure.h" #include struct clvm_header { uint8_t cmd; /* See below */ uint8_t flags; /* See below */ uint16_t xid; /* Transaction ID */ uint32_t clientid; /* Only used in Daemon->Daemon comms */ int32_t status; /* For replies, whether request succeeded */ uint32_t arglen; /* Length of argument below. If >1500 then it will be passed around the cluster in the system LV */ char node[1]; /* Actually a NUL-terminated string, node name. If this is empty then the command is forwarded to all cluster nodes unless FLAG_LOCAL or FLAG_REMOTE is also set. */ char args[1]; /* Arguments for the command follow the node name, This member is only valid if the node name is empty */ } __attribute__ ((packed)); /* Flags */ #define CLVMD_FLAG_LOCAL 1 /* Only do this on the local node */ #define CLVMD_FLAG_SYSTEMLV 2 /* Data in system LV under my node name */ #define CLVMD_FLAG_NODEERRS 4 /* Reply has errors in node-specific portion */ #define CLVMD_FLAG_REMOTE 8 /* Do this on all nodes except for the local node */ /* Name of the local socket to communicate between lvm and clvmd */ #define CLVMD_SOCKNAME DEFAULT_RUN_DIR "/clvmd.sock" /* Internal commands & replies */ #define CLVMD_CMD_REPLY 1 #define CLVMD_CMD_VERSION 2 /* Send version around cluster when we start */ #define CLVMD_CMD_GOAWAY 3 /* Die if received this - we are running an incompatible version */ #define CLVMD_CMD_TEST 4 /* Just for mucking about */ #define CLVMD_CMD_LOCK 30 #define CLVMD_CMD_UNLOCK 31 /* Lock/Unlock commands */ #define CLVMD_CMD_LOCK_LV 50 #define CLVMD_CMD_LOCK_VG 51 #define CLVMD_CMD_LOCK_QUERY 52 /* Misc functions */ #define CLVMD_CMD_REFRESH 40 #define CLVMD_CMD_GET_CLUSTERNAME 41 #define CLVMD_CMD_SET_DEBUG 42 #define CLVMD_CMD_VG_BACKUP 43 #define CLVMD_CMD_RESTART 44 #define CLVMD_CMD_SYNC_NAMES 45 /* Used internally by some callers, but not part of the protocol.*/ #ifndef NODE_ALL # define NODE_ALL "*" # define NODE_LOCAL "." # define NODE_REMOTE "^" #endif #endif LVM2.2.02.176/daemons/clvmd/clvmd-corosync.c0000644000000000000120000003672213176752421017177 0ustar rootwheel/* * Copyright (C) 2009-2012 Red Hat, Inc. All rights reserved. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /* * This provides the interface between clvmd and corosync/DLM as the cluster * and lock manager. */ #include "clvmd-common.h" #include #include "clvm.h" #include "clvmd-comms.h" #include "clvmd.h" #include "lvm-functions.h" #include "locking.h" #include #include #ifdef HAVE_COROSYNC_CONFDB_H # include #elif defined HAVE_COROSYNC_CMAP_H # include #else # error "Either HAVE_COROSYNC_CONFDB_H or HAVE_COROSYNC_CMAP_H must be defined." #endif #include #include /* Timeout value for several corosync calls */ #define LOCKSPACE_NAME "clvmd" static void corosync_cpg_deliver_callback (cpg_handle_t handle, const struct cpg_name *groupName, uint32_t nodeid, uint32_t pid, void *msg, size_t msg_len); static void corosync_cpg_confchg_callback(cpg_handle_t handle, const struct cpg_name *groupName, const struct cpg_address *member_list, size_t member_list_entries, const struct cpg_address *left_list, size_t left_list_entries, const struct cpg_address *joined_list, size_t joined_list_entries); static void _cluster_closedown(void); /* Hash list of nodes in the cluster */ static struct dm_hash_table *node_hash; /* Number of active nodes */ static int num_nodes; static unsigned int our_nodeid; static struct local_client *cluster_client; /* Corosync handles */ static cpg_handle_t cpg_handle; static quorum_handle_t quorum_handle; /* DLM Handle */ static dlm_lshandle_t *lockspace; static struct cpg_name cpg_group_name; /* Corosync callback structs */ cpg_callbacks_t corosync_cpg_callbacks = { .cpg_deliver_fn = corosync_cpg_deliver_callback, .cpg_confchg_fn = corosync_cpg_confchg_callback, }; quorum_callbacks_t quorum_callbacks = { .quorum_notify_fn = NULL, }; struct node_info { enum {NODE_DOWN, NODE_CLVMD} state; int nodeid; }; /* Set errno to something approximating the right value and return 0 or -1 */ static int cs_to_errno(cs_error_t err) { switch(err) { case CS_OK: return 0; case CS_ERR_LIBRARY: errno = EINVAL; break; case CS_ERR_VERSION: errno = EINVAL; break; case CS_ERR_INIT: errno = EINVAL; break; case CS_ERR_TIMEOUT: errno = ETIME; break; case CS_ERR_TRY_AGAIN: errno = EAGAIN; break; case CS_ERR_INVALID_PARAM: errno = EINVAL; break; case CS_ERR_NO_MEMORY: errno = ENOMEM; break; case CS_ERR_BAD_HANDLE: errno = EINVAL; break; case CS_ERR_BUSY: errno = EBUSY; break; case CS_ERR_ACCESS: errno = EPERM; break; case CS_ERR_NOT_EXIST: errno = ENOENT; break; case CS_ERR_NAME_TOO_LONG: errno = ENAMETOOLONG; break; case CS_ERR_EXIST: errno = EEXIST; break; case CS_ERR_NO_SPACE: errno = ENOSPC; break; case CS_ERR_INTERRUPT: errno = EINTR; break; case CS_ERR_NAME_NOT_FOUND: errno = ENOENT; break; case CS_ERR_NO_RESOURCES: errno = ENOMEM; break; case CS_ERR_NOT_SUPPORTED: errno = EOPNOTSUPP; break; case CS_ERR_BAD_OPERATION: errno = EINVAL; break; case CS_ERR_FAILED_OPERATION: errno = EIO; break; case CS_ERR_MESSAGE_ERROR: errno = EIO; break; case CS_ERR_QUEUE_FULL: errno = EXFULL; break; case CS_ERR_QUEUE_NOT_AVAILABLE: errno = EINVAL; break; case CS_ERR_BAD_FLAGS: errno = EINVAL; break; case CS_ERR_TOO_BIG: errno = E2BIG; break; case CS_ERR_NO_SECTIONS: errno = ENOMEM; break; default: errno = EINVAL; break; } return -1; } static char *print_corosync_csid(const char *csid) { static char buf[128]; int id; memcpy(&id, csid, sizeof(int)); sprintf(buf, "%d", id); return buf; } static void corosync_cpg_deliver_callback (cpg_handle_t handle, const struct cpg_name *groupName, uint32_t nodeid, uint32_t pid, void *msg, size_t msg_len) { int target_nodeid; memcpy(&target_nodeid, msg, COROSYNC_CSID_LEN); DEBUGLOG("%u got message from nodeid %d for %d. len %zd\n", our_nodeid, nodeid, target_nodeid, msg_len-4); if (nodeid != our_nodeid) if (target_nodeid == our_nodeid || target_nodeid == 0) process_message(cluster_client, (char *)msg+COROSYNC_CSID_LEN, msg_len-COROSYNC_CSID_LEN, (char*)&nodeid); } static void corosync_cpg_confchg_callback(cpg_handle_t handle, const struct cpg_name *groupName, const struct cpg_address *member_list, size_t member_list_entries, const struct cpg_address *left_list, size_t left_list_entries, const struct cpg_address *joined_list, size_t joined_list_entries) { int i; struct node_info *ninfo; DEBUGLOG("confchg callback. %zd joined, %zd left, %zd members\n", joined_list_entries, left_list_entries, member_list_entries); for (i=0; inodeid = joined_list[i].nodeid; dm_hash_insert_binary(node_hash, (char *)&ninfo->nodeid, COROSYNC_CSID_LEN, ninfo); } } ninfo->state = NODE_CLVMD; } for (i=0; istate = NODE_DOWN; } num_nodes = member_list_entries; } static int _init_cluster(void) { cs_error_t err; #ifdef QUORUM_SET /* corosync/quorum.h */ uint32_t quorum_type; #endif node_hash = dm_hash_create(100); err = cpg_initialize(&cpg_handle, &corosync_cpg_callbacks); if (err != CS_OK) { syslog(LOG_ERR, "Cannot initialise Corosync CPG service: %d", err); DEBUGLOG("Cannot initialise Corosync CPG service: %d", err); return cs_to_errno(err); } #ifdef QUORUM_SET err = quorum_initialize(&quorum_handle, &quorum_callbacks, &quorum_type); if (quorum_type != QUORUM_SET) { syslog(LOG_ERR, "Corosync quorum service is not configured"); DEBUGLOG("Corosync quorum service is not configured"); return EINVAL; } #else err = quorum_initialize(&quorum_handle, &quorum_callbacks); #endif if (err != CS_OK) { syslog(LOG_ERR, "Cannot initialise Corosync quorum service: %d", err); DEBUGLOG("Cannot initialise Corosync quorum service: %d", err); return cs_to_errno(err); } /* Create a lockspace for LV & VG locks to live in */ lockspace = dlm_open_lockspace(LOCKSPACE_NAME); if (!lockspace) { lockspace = dlm_create_lockspace(LOCKSPACE_NAME, 0600); if (!lockspace) { syslog(LOG_ERR, "Unable to create DLM lockspace for CLVM: %m"); return -1; } DEBUGLOG("Created DLM lockspace for CLVMD.\n"); } else DEBUGLOG("Opened existing DLM lockspace for CLVMD.\n"); dlm_ls_pthread_init(lockspace); DEBUGLOG("DLM initialisation complete\n"); /* Connect to the clvmd group */ strcpy((char *)cpg_group_name.value, "clvmd"); cpg_group_name.length = strlen((char *)cpg_group_name.value); err = cpg_join(cpg_handle, &cpg_group_name); if (err != CS_OK) { cpg_finalize(cpg_handle); quorum_finalize(quorum_handle); dlm_release_lockspace(LOCKSPACE_NAME, lockspace, 1); syslog(LOG_ERR, "Cannot join clvmd process group"); DEBUGLOG("Cannot join clvmd process group: %d\n", err); return cs_to_errno(err); } err = cpg_local_get(cpg_handle, &our_nodeid); if (err != CS_OK) { cpg_finalize(cpg_handle); quorum_finalize(quorum_handle); dlm_release_lockspace(LOCKSPACE_NAME, lockspace, 1); syslog(LOG_ERR, "Cannot get local node id\n"); return cs_to_errno(err); } DEBUGLOG("Our local node id is %d\n", our_nodeid); DEBUGLOG("Connected to Corosync\n"); return 0; } static void _cluster_closedown(void) { dlm_release_lockspace(LOCKSPACE_NAME, lockspace, 1); cpg_finalize(cpg_handle); quorum_finalize(quorum_handle); } static void _get_our_csid(char *csid) { memcpy(csid, &our_nodeid, sizeof(int)); } /* Corosync doesn't really have nmode names so we just use the node ID in hex instead */ static int _csid_from_name(char *csid, const char *name) { int nodeid; struct node_info *ninfo; if (sscanf(name, "%x", &nodeid) == 1) { ninfo = dm_hash_lookup_binary(node_hash, csid, COROSYNC_CSID_LEN); if (ninfo) return nodeid; } return -1; } static int _name_from_csid(const char *csid, char *name) { struct node_info *ninfo; ninfo = dm_hash_lookup_binary(node_hash, csid, COROSYNC_CSID_LEN); if (!ninfo) { sprintf(name, "UNKNOWN %s", print_corosync_csid(csid)); return -1; } sprintf(name, "%x", ninfo->nodeid); return 0; } static int _get_num_nodes(void) { DEBUGLOG("num_nodes = %d\n", num_nodes); return num_nodes; } /* Node is now known to be running a clvmd */ static void _add_up_node(const char *csid) { struct node_info *ninfo; ninfo = dm_hash_lookup_binary(node_hash, csid, COROSYNC_CSID_LEN); if (!ninfo) { DEBUGLOG("corosync_add_up_node no node_hash entry for csid %s\n", print_corosync_csid(csid)); return; } DEBUGLOG("corosync_add_up_node %d\n", ninfo->nodeid); ninfo->state = NODE_CLVMD; return; } /* Call a callback for each node, so the caller knows whether it's up or down */ static int _cluster_do_node_callback(struct local_client *master_client, void (*callback)(struct local_client *, const char *csid, int node_up)) { struct dm_hash_node *hn; struct node_info *ninfo; dm_hash_iterate(hn, node_hash) { char csid[COROSYNC_CSID_LEN]; ninfo = dm_hash_get_data(node_hash, hn); memcpy(csid, dm_hash_get_key(node_hash, hn), COROSYNC_CSID_LEN); DEBUGLOG("down_callback. node %d, state = %d\n", ninfo->nodeid, ninfo->state); if (ninfo->state == NODE_CLVMD) callback(master_client, csid, 1); } return 0; } /* Real locking */ static int _lock_resource(const char *resource, int mode, int flags, int *lockid) { struct dlm_lksb lksb; int err; DEBUGLOG("lock_resource '%s', flags=%d, mode=%d\n", resource, flags, mode); if (flags & LKF_CONVERT) lksb.sb_lkid = *lockid; err = dlm_ls_lock_wait(lockspace, mode, &lksb, flags, resource, strlen(resource), 0, NULL, NULL, NULL); if (err != 0) { DEBUGLOG("dlm_ls_lock returned %d\n", errno); return err; } if (lksb.sb_status != 0) { DEBUGLOG("dlm_ls_lock returns lksb.sb_status %d\n", lksb.sb_status); errno = lksb.sb_status; return -1; } DEBUGLOG("lock_resource returning %d, lock_id=%x\n", err, lksb.sb_lkid); *lockid = lksb.sb_lkid; return 0; } static int _unlock_resource(const char *resource, int lockid) { struct dlm_lksb lksb; int err; DEBUGLOG("unlock_resource: %s lockid: %x\n", resource, lockid); lksb.sb_lkid = lockid; err = dlm_ls_unlock_wait(lockspace, lockid, 0, &lksb); if (err != 0) { DEBUGLOG("Unlock returned %d\n", err); return err; } if (lksb.sb_status != EUNLOCK) { DEBUGLOG("dlm_ls_unlock_wait returns lksb.sb_status: %d\n", lksb.sb_status); errno = lksb.sb_status; return -1; } return 0; } static int _is_quorate(void) { int quorate; if (quorum_getquorate(quorum_handle, &quorate) == CS_OK) return quorate; else return 0; } static int _get_main_cluster_fd(void) { int select_fd; cpg_fd_get(cpg_handle, &select_fd); return select_fd; } static int _cluster_fd_callback(struct local_client *fd, char *buf, int len, const char *csid, struct local_client **new_client) { cluster_client = fd; *new_client = NULL; cpg_dispatch(cpg_handle, CS_DISPATCH_ONE); return 1; } static int _cluster_send_message(const void *buf, int msglen, const char *csid, const char *errtext) { static pthread_mutex_t _mutex = PTHREAD_MUTEX_INITIALIZER; struct iovec iov[2]; cs_error_t err; int target_node; if (csid) memcpy(&target_node, csid, COROSYNC_CSID_LEN); else target_node = 0; iov[0].iov_base = &target_node; iov[0].iov_len = sizeof(int); iov[1].iov_base = (char *)buf; iov[1].iov_len = msglen; pthread_mutex_lock(&_mutex); err = cpg_mcast_joined(cpg_handle, CPG_TYPE_AGREED, iov, 2); pthread_mutex_unlock(&_mutex); return cs_to_errno(err); } #ifdef HAVE_COROSYNC_CONFDB_H /* * We are not necessarily connected to a Red Hat Cluster system, * but if we are, this returns the cluster name from cluster.conf. * I've used confdb rather than ccs to reduce the inter-package * dependancies as well as to allow people to set a cluster name * for themselves even if they are not running on RH cluster. */ static int _get_cluster_name(char *buf, int buflen) { confdb_handle_t handle; int result; size_t namelen = buflen; hdb_handle_t cluster_handle; confdb_callbacks_t callbacks = { .confdb_key_change_notify_fn = NULL, .confdb_object_create_change_notify_fn = NULL, .confdb_object_delete_change_notify_fn = NULL }; /* This is a default in case everything else fails */ strncpy(buf, "Corosync", buflen); /* Look for a cluster name in confdb */ result = confdb_initialize (&handle, &callbacks); if (result != CS_OK) return 0; result = confdb_object_find_start(handle, OBJECT_PARENT_HANDLE); if (result != CS_OK) goto out; result = confdb_object_find(handle, OBJECT_PARENT_HANDLE, (void *)"cluster", strlen("cluster"), &cluster_handle); if (result != CS_OK) goto out; result = confdb_key_get(handle, cluster_handle, (void *)"name", strlen("name"), buf, &namelen); if (result != CS_OK) goto out; buf[namelen] = '\0'; out: confdb_finalize(handle); return 0; } #elif defined HAVE_COROSYNC_CMAP_H static int _get_cluster_name(char *buf, int buflen) { cmap_handle_t cmap_handle = 0; int result; char *name = NULL; /* This is a default in case everything else fails */ strncpy(buf, "Corosync", buflen); /* Look for a cluster name in cmap */ result = cmap_initialize(&cmap_handle); if (result != CS_OK) return 0; result = cmap_get_string(cmap_handle, "totem.cluster_name", &name); if (result != CS_OK) goto out; memset(buf, 0, buflen); strncpy(buf, name, buflen - 1); out: if (name) free(name); cmap_finalize(cmap_handle); return 0; } #endif static struct cluster_ops _cluster_corosync_ops = { .name = "corosync", .cluster_init_completed = NULL, .cluster_send_message = _cluster_send_message, .name_from_csid = _name_from_csid, .csid_from_name = _csid_from_name, .get_num_nodes = _get_num_nodes, .cluster_fd_callback = _cluster_fd_callback, .get_main_cluster_fd = _get_main_cluster_fd, .cluster_do_node_callback = _cluster_do_node_callback, .is_quorate = _is_quorate, .get_our_csid = _get_our_csid, .add_up_node = _add_up_node, .reread_config = NULL, .cluster_closedown = _cluster_closedown, .get_cluster_name = _get_cluster_name, .sync_lock = _lock_resource, .sync_unlock = _unlock_resource, }; struct cluster_ops *init_corosync_cluster(void) { if (!_init_cluster()) return &_cluster_corosync_ops; else return NULL; } LVM2.2.02.176/daemons/lvmpolld/0000755000000000000120000000000013176752421014603 5ustar rootwheelLVM2.2.02.176/daemons/lvmpolld/Makefile.in0000644000000000000120000000246513176752421016657 0ustar rootwheel# # Copyright (C) 2014-2015 Red Hat, Inc. # # This file is part of LVM2. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU Lesser General Public License v.2.1. # # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA srcdir = @srcdir@ top_srcdir = @top_srcdir@ top_builddir = @top_builddir@ SOURCES = lvmpolld-core.c lvmpolld-data-utils.c lvmpolld-cmd-utils.c TARGETS = lvmpolld .PHONY: install_lvmpolld CFLOW_LIST = $(SOURCES) CFLOW_LIST_TARGET = $(LIB_NAME).cflow CFLOW_TARGET = lvmpolld include $(top_builddir)/make.tmpl CFLAGS += $(EXTRA_EXEC_CFLAGS) INCLUDES += -I$(top_srcdir)/libdaemon/server LDFLAGS += -L$(top_builddir)/libdaemon/server $(EXTRA_EXEC_LDFLAGS) $(ELDFLAGS) LIBS += $(DAEMON_LIBS) -ldaemonserver -ldevmapper $(PTHREAD_LIBS) lvmpolld: $(OBJECTS) $(top_builddir)/libdaemon/client/libdaemonclient.a \ $(top_builddir)/libdaemon/server/libdaemonserver.a $(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(OBJECTS) $(LIBS) install_lvmpolld: lvmpolld $(INSTALL_PROGRAM) -D $< $(sbindir)/$( #include #endif /* _LVM_LVMPOLLD_COMMON_H */ LVM2.2.02.176/daemons/lvmpolld/polling_ops.h0000644000000000000120000000144213176752421017302 0ustar rootwheel/* * Copyright (C) 2014-2015 Red Hat, Inc. All rights reserved. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef _LVM_TOOL_POLLING_OPS_H #define _LVM_TOOL_POLLING_OPS_H /* this file is also part of lvmpolld protocol */ #define PVMOVE_POLL "pvmove" #define CONVERT_POLL "convert" #define MERGE_POLL "merge" #define MERGE_THIN_POLL "merge_thin" #endif /* _LVM_TOOL_POLLING_OPS_H */ LVM2.2.02.176/daemons/lvmpolld/lvmpolld-cmd-utils.c0000644000000000000120000000704313176752421020503 0ustar rootwheel/* * Copyright (C) 2015 Red Hat, Inc. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "lvmpolld-common.h" /* extract this info from autoconf/automake files */ #define LVPOLL_CMD "lvpoll" #define MIN_ARGV_SIZE 8 static const char *const polling_ops[] = { [PVMOVE] = LVMPD_REQ_PVMOVE, [CONVERT] = LVMPD_REQ_CONVERT, [MERGE] = LVMPD_REQ_MERGE, [MERGE_THIN] = LVMPD_REQ_MERGE_THIN }; const char *polling_op(enum poll_type type) { return type < POLL_TYPE_MAX ? polling_ops[type] : ""; } static int add_to_cmd_arr(const char ***cmdargv, const char *str, unsigned *ind) { const char **newargv; if (*ind && !(*ind % MIN_ARGV_SIZE)) { newargv = dm_realloc(*cmdargv, (*ind / MIN_ARGV_SIZE + 1) * MIN_ARGV_SIZE * sizeof(char *)); if (!newargv) return 0; *cmdargv = newargv; } *(*cmdargv + (*ind)++) = str; return 1; } const char **cmdargv_ctr(const struct lvmpolld_lv *pdlv, const char *lvm_binary, unsigned abort_polling, unsigned handle_missing_pvs) { unsigned i = 0; const char **cmd_argv = dm_malloc(MIN_ARGV_SIZE * sizeof(char *)); if (!cmd_argv) return NULL; /* path to lvm2 binary */ if (!add_to_cmd_arr(&cmd_argv, lvm_binary, &i)) goto err; /* cmd to execute */ if (!add_to_cmd_arr(&cmd_argv, LVPOLL_CMD, &i)) goto err; /* transfer internal polling interval */ if (pdlv->sinterval && (!add_to_cmd_arr(&cmd_argv, "--interval", &i) || !add_to_cmd_arr(&cmd_argv, pdlv->sinterval, &i))) goto err; /* pass abort param */ if (abort_polling && !add_to_cmd_arr(&cmd_argv, "--abort", &i)) goto err; /* pass handle-missing-pvs. used by mirror polling operation */ if (handle_missing_pvs && !add_to_cmd_arr(&cmd_argv, "--handlemissingpvs", &i)) goto err; /* one of: "convert", "pvmove", "merge", "merge_thin" */ if (!add_to_cmd_arr(&cmd_argv, "--polloperation", &i) || !add_to_cmd_arr(&cmd_argv, polling_ops[pdlv->type], &i)) goto err; /* vg/lv name */ if (!add_to_cmd_arr(&cmd_argv, pdlv->lvname, &i)) goto err; /* disable metadata backup */ if (!add_to_cmd_arr(&cmd_argv, "-An", &i)) goto err; /* terminating NULL */ if (!add_to_cmd_arr(&cmd_argv, NULL, &i)) goto err; return cmd_argv; err: dm_free(cmd_argv); return NULL; } /* FIXME: in fact exclude should be va list */ static int copy_env(const char ***cmd_envp, unsigned *i, const char *exclude) { const char * const* tmp = (const char * const*) environ; if (!tmp) return 0; while (*tmp) { if (strncmp(*tmp, exclude, strlen(exclude)) && !add_to_cmd_arr(cmd_envp, *tmp, i)) return 0; tmp++; } return 1; } const char **cmdenvp_ctr(const struct lvmpolld_lv *pdlv) { unsigned i = 0; const char **cmd_envp = dm_malloc(MIN_ARGV_SIZE * sizeof(char *)); if (!cmd_envp) return NULL; /* copy whole environment from lvmpolld, exclude LVM_SYSTEM_DIR if set */ if (!copy_env(&cmd_envp, &i, "LVM_SYSTEM_DIR=")) goto err; /* Add per client LVM_SYSTEM_DIR variable if set */ if (*pdlv->lvm_system_dir_env && !add_to_cmd_arr(&cmd_envp, pdlv->lvm_system_dir_env, &i)) goto err; /* terminating NULL */ if (!add_to_cmd_arr(&cmd_envp, NULL, &i)) goto err; return cmd_envp; err: dm_free(cmd_envp); return NULL; } LVM2.2.02.176/daemons/lvmpolld/lvmpolld-core.c0000644000000000000120000006451213176752421017536 0ustar rootwheel/* * Copyright (C) 2014-2015 Red Hat, Inc. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "lvmpolld-common.h" #include "lvm-version.h" #include "daemon-server.h" #include "daemon-log.h" #include #include #include #define LVMPOLLD_SOCKET DEFAULT_RUN_DIR "/lvmpolld.socket" #define PD_LOG_PREFIX "LVMPOLLD" #define LVM2_LOG_PREFIX "\tLVPOLL" /* predefined reason for response = "failed" case */ #define REASON_REQ_NOT_IMPLEMENTED "request not implemented" #define REASON_MISSING_LVID "request requires lvid set" #define REASON_MISSING_LVNAME "request requires lvname set" #define REASON_MISSING_VGNAME "request requires vgname set" #define REASON_POLLING_FAILED "polling of lvm command failed" #define REASON_ILLEGAL_ABORT_REQUEST "abort only supported with PVMOVE polling operation" #define REASON_DIFFERENT_OPERATION_IN_PROGRESS "Different operation on LV already in progress" #define REASON_INVALID_INTERVAL "request requires interval set" #define REASON_ENOMEM "not enough memory" struct lvmpolld_state { daemon_idle *idle; log_state *log; const char *log_config; const char *lvm_binary; struct lvmpolld_store *id_to_pdlv_abort; struct lvmpolld_store *id_to_pdlv_poll; }; static pthread_key_t key; static const char *_strerror_r(int errnum, struct lvmpolld_thread_data *data) { #ifdef _GNU_SOURCE return strerror_r(errnum, data->buf, sizeof(data->buf)); /* never returns NULL */ #elif (_POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600) return strerror_r(errnum, data->buf, sizeof(data->buf)) ? "" : data->buf; #else # warning "Can't decide proper strerror_r implementation. lvmpolld will not issue specific system error messages" return ""; #endif } static void _usage(const char *prog, FILE *file) { fprintf(file, "Usage:\n" "%s [-V] [-h] [-f] [-l {all|wire|debug}] [-s path] [-B path] [-p path] [-t secs]\n" "%s --dump [-s path]\n" " -V|--version Show version info\n" " -h|--help Show this help information\n" " -f|--foreground Don't fork, run in the foreground\n" " --dump Dump full lvmpolld state\n" " -l|--log Logging message level (-l {all|wire|debug})\n" " -p|--pidfile Set path to the pidfile\n" " -s|--socket Set path to the communication socket\n" " -B|--binary Path to lvm2 binary\n" " -t|--timeout Time to wait in seconds before shutdown on idle (missing or 0 = inifinite)\n\n", prog, prog); } static int _init(struct daemon_state *s) { struct lvmpolld_state *ls = s->private; ls->log = s->log; /* * log warnings to stderr by default. Otherwise we would miss any lvpoll * error messages in default configuration */ daemon_log_enable(ls->log, DAEMON_LOG_OUTLET_STDERR, DAEMON_LOG_WARN, 1); if (!daemon_log_parse(ls->log, DAEMON_LOG_OUTLET_STDERR, ls->log_config, 1)) return 0; if (pthread_key_create(&key, lvmpolld_thread_data_destroy)) { FATAL(ls, "%s: %s", PD_LOG_PREFIX, "Failed to create pthread key"); return 0; } ls->id_to_pdlv_poll = pdst_init("polling"); ls->id_to_pdlv_abort = pdst_init("abort"); if (!ls->id_to_pdlv_poll || !ls->id_to_pdlv_abort) { FATAL(ls, "%s: %s", PD_LOG_PREFIX, "Failed to allocate internal data structures"); return 0; } ls->lvm_binary = ls->lvm_binary ?: LVM_PATH; if (access(ls->lvm_binary, X_OK)) { FATAL(ls, "%s: %s %s", PD_LOG_PREFIX, "Execute access rights denied on", ls->lvm_binary); return 0; } if (ls->idle) ls->idle->is_idle = 1; return 1; } static void _lvmpolld_stores_lock(struct lvmpolld_state *ls) { pdst_lock(ls->id_to_pdlv_poll); pdst_lock(ls->id_to_pdlv_abort); } static void _lvmpolld_stores_unlock(struct lvmpolld_state *ls) { pdst_unlock(ls->id_to_pdlv_abort); pdst_unlock(ls->id_to_pdlv_poll); } static void _lvmpolld_global_lock(struct lvmpolld_state *ls) { _lvmpolld_stores_lock(ls); pdst_locked_lock_all_pdlvs(ls->id_to_pdlv_poll); pdst_locked_lock_all_pdlvs(ls->id_to_pdlv_abort); } static void _lvmpolld_global_unlock(struct lvmpolld_state *ls) { pdst_locked_unlock_all_pdlvs(ls->id_to_pdlv_abort); pdst_locked_unlock_all_pdlvs(ls->id_to_pdlv_poll); _lvmpolld_stores_unlock(ls); } static int _fini(struct daemon_state *s) { int done; const struct timespec t = { .tv_nsec = 250000000 }; /* .25 sec */ struct lvmpolld_state *ls = s->private; DEBUGLOG(s, "fini"); DEBUGLOG(s, "sending cancel requests"); _lvmpolld_global_lock(ls); pdst_locked_send_cancel(ls->id_to_pdlv_poll); pdst_locked_send_cancel(ls->id_to_pdlv_abort); _lvmpolld_global_unlock(ls); DEBUGLOG(s, "waiting for background threads to finish"); while(1) { _lvmpolld_stores_lock(ls); done = !pdst_locked_get_active_count(ls->id_to_pdlv_poll) && !pdst_locked_get_active_count(ls->id_to_pdlv_abort); _lvmpolld_stores_unlock(ls); if (done) break; nanosleep(&t, NULL); } DEBUGLOG(s, "destroying internal data structures"); _lvmpolld_stores_lock(ls); pdst_locked_destroy_all_pdlvs(ls->id_to_pdlv_poll); pdst_locked_destroy_all_pdlvs(ls->id_to_pdlv_abort); _lvmpolld_stores_unlock(ls); pdst_destroy(ls->id_to_pdlv_poll); pdst_destroy(ls->id_to_pdlv_abort); pthread_key_delete(key); return 1; } static response reply(const char *res, const char *reason) { return daemon_reply_simple(res, "reason = %s", reason, NULL); } static int read_single_line(struct lvmpolld_thread_data *data, int err) { ssize_t r = getline(&data->line, &data->line_size, err ? data->ferr : data->fout); if (r > 0 && *(data->line + r - 1) == '\n') *(data->line + r - 1) = '\0'; return (r > 0); } static void update_idle_state(struct lvmpolld_state *ls) { if (!ls->idle) return; _lvmpolld_stores_lock(ls); ls->idle->is_idle = !pdst_locked_get_active_count(ls->id_to_pdlv_poll) && !pdst_locked_get_active_count(ls->id_to_pdlv_abort); _lvmpolld_stores_unlock(ls); DEBUGLOG(ls, "%s: %s %s%s", PD_LOG_PREFIX, "daemon is", ls->idle->is_idle ? "" : "not ", "idle"); } /* make this configurable */ #define MAX_TIMEOUT 2 static int poll_for_output(struct lvmpolld_lv *pdlv, struct lvmpolld_thread_data *data) { int ch_stat, r, err = 1, fds_count = 2, timeout = 0; pid_t pid; struct lvmpolld_cmd_stat cmd_state = { .retcode = -1, .signal = 0 }; struct pollfd fds[] = { { .fd = data->outpipe[0], .events = POLLIN }, { .fd = data->errpipe[0], .events = POLLIN } }; if (!(data->fout = fdopen(data->outpipe[0], "r")) || !(data->ferr = fdopen(data->errpipe[0], "r"))) { ERROR(pdlv->ls, "%s: %s: (%d) %s", PD_LOG_PREFIX, "failed to open file stream", errno, _strerror_r(errno, data)); goto out; } while (1) { do { r = poll(fds, 2, pdlv_get_timeout(pdlv) * 1000); } while (r < 0 && errno == EINTR); DEBUGLOG(pdlv->ls, "%s: %s %d", PD_LOG_PREFIX, "poll() returned", r); if (r < 0) { ERROR(pdlv->ls, "%s: %s (PID %d) failed: (%d) %s", PD_LOG_PREFIX, "poll() for LVM2 cmd", pdlv->cmd_pid, errno, _strerror_r(errno, data)); goto out; } else if (!r) { timeout++; WARN(pdlv->ls, "%s: %s (PID %d) %s", PD_LOG_PREFIX, "polling for output of the lvm cmd", pdlv->cmd_pid, "has timed out"); if (timeout > MAX_TIMEOUT) { ERROR(pdlv->ls, "%s: %s (PID %d) (no output for %d seconds)", PD_LOG_PREFIX, "LVM2 cmd is unresponsive too long", pdlv->cmd_pid, timeout * pdlv_get_timeout(pdlv)); goto out; } continue; /* while(1) */ } timeout = 0; /* handle the command's STDOUT */ if (fds[0].revents & POLLIN) { DEBUGLOG(pdlv->ls, "%s: %s", PD_LOG_PREFIX, "caught input data in STDOUT"); assert(read_single_line(data, 0)); /* may block indef. anyway */ INFO(pdlv->ls, "%s: PID %d: %s: '%s'", LVM2_LOG_PREFIX, pdlv->cmd_pid, "STDOUT", data->line); } else if (fds[0].revents) { if (fds[0].revents & POLLHUP) DEBUGLOG(pdlv->ls, "%s: %s", PD_LOG_PREFIX, "caught POLLHUP"); else WARN(pdlv->ls, "%s: %s", PD_LOG_PREFIX, "poll for command's STDOUT failed"); fds[0].fd = -1; fds_count--; } /* handle the command's STDERR */ if (fds[1].revents & POLLIN) { DEBUGLOG(pdlv->ls, "%s: %s", PD_LOG_PREFIX, "caught input data in STDERR"); assert(read_single_line(data, 1)); /* may block indef. anyway */ WARN(pdlv->ls, "%s: PID %d: %s: '%s'", LVM2_LOG_PREFIX, pdlv->cmd_pid, "STDERR", data->line); } else if (fds[1].revents) { if (fds[1].revents & POLLHUP) DEBUGLOG(pdlv->ls, "%s: %s", PD_LOG_PREFIX, "caught err POLLHUP"); else WARN(pdlv->ls, "%s: %s", PD_LOG_PREFIX, "poll for command's STDOUT failed"); fds[1].fd = -1; fds_count--; } do { /* * fds_count == 0 means polling reached EOF * or received error on both descriptors. * In such case, just wait for command to finish */ pid = waitpid(pdlv->cmd_pid, &ch_stat, fds_count ? WNOHANG : 0); } while (pid < 0 && errno == EINTR); if (pid) { if (pid < 0) { ERROR(pdlv->ls, "%s: %s (PID %d) failed: (%d) %s", PD_LOG_PREFIX, "waitpid() for lvm2 cmd", pdlv->cmd_pid, errno, _strerror_r(errno, data)); goto out; } DEBUGLOG(pdlv->ls, "%s: %s", PD_LOG_PREFIX, "child exited"); break; } } /* while(1) */ DEBUGLOG(pdlv->ls, "%s: %s", PD_LOG_PREFIX, "about to collect remaining lines"); if (fds[0].fd >= 0) while (read_single_line(data, 0)) { assert(r > 0); INFO(pdlv->ls, "%s: PID %d: %s: %s", LVM2_LOG_PREFIX, pdlv->cmd_pid, "STDOUT", data->line); } if (fds[1].fd >= 0) while (read_single_line(data, 1)) { assert(r > 0); WARN(pdlv->ls, "%s: PID %d: %s: %s", LVM2_LOG_PREFIX, pdlv->cmd_pid, "STDERR", data->line); } if (WIFEXITED(ch_stat)) { cmd_state.retcode = WEXITSTATUS(ch_stat); if (cmd_state.retcode) ERROR(pdlv->ls, "%s: %s (PID %d) %s (retcode: %d)", PD_LOG_PREFIX, "lvm2 cmd", pdlv->cmd_pid, "failed", cmd_state.retcode); else INFO(pdlv->ls, "%s: %s (PID %d) %s", PD_LOG_PREFIX, "lvm2 cmd", pdlv->cmd_pid, "finished successfully"); } else if (WIFSIGNALED(ch_stat)) { ERROR(pdlv->ls, "%s: %s (PID %d) %s (%d)", PD_LOG_PREFIX, "lvm2 cmd", pdlv->cmd_pid, "got terminated by signal", WTERMSIG(ch_stat)); cmd_state.signal = WTERMSIG(ch_stat); } err = 0; out: if (!err) pdlv_set_cmd_state(pdlv, &cmd_state); return err; } static void debug_print(struct lvmpolld_state *ls, const char * const* ptr) { const char * const* tmp = ptr; if (!tmp) return; while (*tmp) { DEBUGLOG(ls, "%s: %s", PD_LOG_PREFIX, *tmp); tmp++; } } static void *fork_and_poll(void *args) { int outfd, errfd, state; struct lvmpolld_thread_data *data; pid_t r; int error = 1; struct lvmpolld_lv *pdlv = (struct lvmpolld_lv *) args; struct lvmpolld_state *ls = pdlv->ls; pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &state); data = lvmpolld_thread_data_constructor(pdlv); pthread_setspecific(key, data); pthread_setcancelstate(state, &state); if (!data) { ERROR(ls, "%s: %s", PD_LOG_PREFIX, "Failed to initialize per-thread data"); goto err; } DEBUGLOG(ls, "%s: %s", PD_LOG_PREFIX, "cmd line arguments:"); debug_print(ls, pdlv->cmdargv); DEBUGLOG(ls, "%s: %s", PD_LOG_PREFIX, "---end---"); DEBUGLOG(ls, "%s: %s", PD_LOG_PREFIX, "cmd environment variables:"); debug_print(ls, pdlv->cmdenvp); DEBUGLOG(ls, "%s: %s", PD_LOG_PREFIX, "---end---"); outfd = data->outpipe[1]; errfd = data->errpipe[1]; r = fork(); if (!r) { /* child */ /* !!! Do not touch any posix thread primitives !!! */ if ((dup2(outfd, STDOUT_FILENO ) != STDOUT_FILENO) || (dup2(errfd, STDERR_FILENO ) != STDERR_FILENO)) _exit(LVMPD_RET_DUP_FAILED); execve(*(pdlv->cmdargv), (char *const *)pdlv->cmdargv, (char *const *)pdlv->cmdenvp); _exit(LVMPD_RET_EXC_FAILED); } else { /* parent */ if (r == -1) { ERROR(ls, "%s: %s: (%d) %s", PD_LOG_PREFIX, "fork failed", errno, _strerror_r(errno, data)); goto err; } INFO(ls, "%s: LVM2 cmd \"%s\" (PID: %d)", PD_LOG_PREFIX, *(pdlv->cmdargv), r); pdlv->cmd_pid = r; /* failure to close write end of any pipe will result in broken polling */ if (close(data->outpipe[1])) { ERROR(ls, "%s: %s: (%d) %s", PD_LOG_PREFIX, "failed to close write end of pipe", errno, _strerror_r(errno, data)); goto err; } data->outpipe[1] = -1; if (close(data->errpipe[1])) { ERROR(ls, "%s: %s: (%d) %s", PD_LOG_PREFIX, "failed to close write end of err pipe", errno, _strerror_r(errno, data)); goto err; } data->errpipe[1] = -1; error = poll_for_output(pdlv, data); DEBUGLOG(ls, "%s: %s", PD_LOG_PREFIX, "polling for lvpoll output has finished"); } err: r = 0; pdst_lock(pdlv->pdst); if (error) { /* last reader is responsible for pdlv cleanup */ r = pdlv->cmd_pid; pdlv_set_error(pdlv, 1); } pdlv_set_polling_finished(pdlv, 1); if (data) data->pdlv = NULL; pdst_locked_dec(pdlv->pdst); pdst_unlock(pdlv->pdst); pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &state); lvmpolld_thread_data_destroy(data); pthread_setspecific(key, NULL); pthread_setcancelstate(state, &state); update_idle_state(ls); /* * This is unfortunate case where we * know nothing about state of lvm cmd and * (eventually) ongoing progress. * * harvest zombies */ if (r) while(waitpid(r, NULL, 0) < 0 && errno == EINTR); return NULL; } static response progress_info(client_handle h, struct lvmpolld_state *ls, request req) { char *id; struct lvmpolld_lv *pdlv; struct lvmpolld_store *pdst; struct lvmpolld_lv_state st; response r; const char *lvid = daemon_request_str(req, LVMPD_PARM_LVID, NULL); const char *sysdir = daemon_request_str(req, LVMPD_PARM_SYSDIR, NULL); unsigned abort_polling = daemon_request_int(req, LVMPD_PARM_ABORT, 0); if (!lvid) return reply(LVMPD_RESP_FAILED, REASON_MISSING_LVID); id = construct_id(sysdir, lvid); if (!id) { ERROR(ls, "%s: %s", PD_LOG_PREFIX, "progress_info request failed to construct ID."); return reply(LVMPD_RESP_FAILED, REASON_ENOMEM); } DEBUGLOG(ls, "%s: %s: %s", PD_LOG_PREFIX, "ID", id); pdst = abort_polling ? ls->id_to_pdlv_abort : ls->id_to_pdlv_poll; pdst_lock(pdst); pdlv = pdst_locked_lookup(pdst, id); if (pdlv) { /* * with store lock held, I'm the only reader accessing the pdlv */ st = pdlv_get_status(pdlv); if (st.error || st.polling_finished) { INFO(ls, "%s: %s %s", PD_LOG_PREFIX, "Polling finished. Removing related data structure for LV", lvid); pdst_locked_remove(pdst, id); pdlv_destroy(pdlv); } } /* pdlv must not be dereferenced from now on */ pdst_unlock(pdst); dm_free(id); if (pdlv) { if (st.error) return reply(LVMPD_RESP_FAILED, REASON_POLLING_FAILED); if (st.polling_finished) r = daemon_reply_simple(LVMPD_RESP_FINISHED, "reason = %s", st.cmd_state.signal ? LVMPD_REAS_SIGNAL : LVMPD_REAS_RETCODE, LVMPD_PARM_VALUE " = " FMTd64, (int64_t)(st.cmd_state.signal ?: st.cmd_state.retcode), NULL); else r = daemon_reply_simple(LVMPD_RESP_IN_PROGRESS, NULL); } else r = daemon_reply_simple(LVMPD_RESP_NOT_FOUND, NULL); return r; } static struct lvmpolld_lv *construct_pdlv(request req, struct lvmpolld_state *ls, struct lvmpolld_store *pdst, const char *interval, const char *id, const char *vgname, const char *lvname, const char *sysdir, enum poll_type type, unsigned abort_polling, unsigned uinterval) { const char **cmdargv, **cmdenvp; struct lvmpolld_lv *pdlv; unsigned handle_missing_pvs = daemon_request_int(req, LVMPD_PARM_HANDLE_MISSING_PVS, 0); pdlv = pdlv_create(ls, id, vgname, lvname, sysdir, type, interval, uinterval, pdst); if (!pdlv) { ERROR(ls, "%s: %s", PD_LOG_PREFIX, "failed to create internal LV data structure."); return NULL; } cmdargv = cmdargv_ctr(pdlv, pdlv->ls->lvm_binary, abort_polling, handle_missing_pvs); if (!cmdargv) { pdlv_destroy(pdlv); ERROR(ls, "%s: %s", PD_LOG_PREFIX, "failed to construct cmd arguments for lvpoll command"); return NULL; } pdlv->cmdargv = cmdargv; cmdenvp = cmdenvp_ctr(pdlv); if (!cmdenvp) { pdlv_destroy(pdlv); ERROR(ls, "%s: %s", PD_LOG_PREFIX, "failed to construct cmd environment for lvpoll command"); return NULL; } pdlv->cmdenvp = cmdenvp; return pdlv; } static int spawn_detached_thread(struct lvmpolld_lv *pdlv) { int r; pthread_attr_t attr; if (pthread_attr_init(&attr) != 0) return 0; if (pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) != 0) return 0; r = pthread_create(&pdlv->tid, &attr, fork_and_poll, (void *)pdlv); if (pthread_attr_destroy(&attr) != 0) return 0; return !r; } static response poll_init(client_handle h, struct lvmpolld_state *ls, request req, enum poll_type type) { char *id; struct lvmpolld_lv *pdlv; struct lvmpolld_store *pdst; unsigned uinterval; const char *interval = daemon_request_str(req, LVMPD_PARM_INTERVAL, NULL); const char *lvid = daemon_request_str(req, LVMPD_PARM_LVID, NULL); const char *lvname = daemon_request_str(req, LVMPD_PARM_LVNAME, NULL); const char *vgname = daemon_request_str(req, LVMPD_PARM_VGNAME, NULL); const char *sysdir = daemon_request_str(req, LVMPD_PARM_SYSDIR, NULL); unsigned abort_polling = daemon_request_int(req, LVMPD_PARM_ABORT, 0); assert(type < POLL_TYPE_MAX); if (abort_polling && type != PVMOVE) return reply(LVMPD_RESP_EINVAL, REASON_ILLEGAL_ABORT_REQUEST); if (!interval || strpbrk(interval, "-") || sscanf(interval, "%u", &uinterval) != 1) return reply(LVMPD_RESP_EINVAL, REASON_INVALID_INTERVAL); if (!lvname) return reply(LVMPD_RESP_FAILED, REASON_MISSING_LVNAME); if (!lvid) return reply(LVMPD_RESP_FAILED, REASON_MISSING_LVID); if (!vgname) return reply(LVMPD_RESP_FAILED, REASON_MISSING_VGNAME); id = construct_id(sysdir, lvid); if (!id) { ERROR(ls, "%s: %s", PD_LOG_PREFIX, "poll_init request failed to construct ID."); return reply(LVMPD_RESP_FAILED, REASON_ENOMEM); } DEBUGLOG(ls, "%s: %s=%s", PD_LOG_PREFIX, "ID", id); pdst = abort_polling ? ls->id_to_pdlv_abort : ls->id_to_pdlv_poll; pdst_lock(pdst); pdlv = pdst_locked_lookup(pdst, id); if (pdlv && pdlv_get_polling_finished(pdlv)) { WARN(ls, "%s: %s %s", PD_LOG_PREFIX, "Force removal of uncollected info for LV", lvid); /* * lvmpolld has to remove uncollected results in this case. * otherwise it would have to refuse request for new polling * lv with same id. */ pdst_locked_remove(pdst, id); pdlv_destroy(pdlv); pdlv = NULL; } if (pdlv) { if (!pdlv_is_type(pdlv, type)) { pdst_unlock(pdst); ERROR(ls, "%s: %s '%s': expected: %s, requested: %s", PD_LOG_PREFIX, "poll operation type mismatch on LV identified by", id, polling_op(pdlv_get_type(pdlv)), polling_op(type)); dm_free(id); return reply(LVMPD_RESP_EINVAL, REASON_DIFFERENT_OPERATION_IN_PROGRESS); } pdlv->init_rq_count++; /* safe. protected by store lock */ } else { pdlv = construct_pdlv(req, ls, pdst, interval, id, vgname, lvname, sysdir, type, abort_polling, 2 * uinterval); if (!pdlv) { pdst_unlock(pdst); dm_free(id); return reply(LVMPD_RESP_FAILED, REASON_ENOMEM); } if (!pdst_locked_insert(pdst, id, pdlv)) { pdlv_destroy(pdlv); pdst_unlock(pdst); ERROR(ls, "%s: %s", PD_LOG_PREFIX, "couldn't store internal LV data structure"); dm_free(id); return reply(LVMPD_RESP_FAILED, REASON_ENOMEM); } if (!spawn_detached_thread(pdlv)) { ERROR(ls, "%s: %s", PD_LOG_PREFIX, "failed to spawn detached monitoring thread"); pdst_locked_remove(pdst, id); pdlv_destroy(pdlv); pdst_unlock(pdst); dm_free(id); return reply(LVMPD_RESP_FAILED, REASON_ENOMEM); } pdst_locked_inc(pdst); if (ls->idle) ls->idle->is_idle = 0; } pdst_unlock(pdst); dm_free(id); return daemon_reply_simple(LVMPD_RESP_OK, NULL); } static response dump_state(client_handle h, struct lvmpolld_state *ls, request r) { response res = { 0 }; struct buffer *b = &res.buffer; buffer_init(b); _lvmpolld_global_lock(ls); buffer_append(b, "# Registered polling operations\n\n"); buffer_append(b, "poll {\n"); pdst_locked_dump(ls->id_to_pdlv_poll, b); buffer_append(b, "}\n\n"); buffer_append(b, "# Registered abort operations\n\n"); buffer_append(b, "abort {\n"); pdst_locked_dump(ls->id_to_pdlv_abort, b); buffer_append(b, "}"); _lvmpolld_global_unlock(ls); return res; } static response _handler(struct daemon_state s, client_handle h, request r) { struct lvmpolld_state *ls = s.private; const char *rq = daemon_request_str(r, "request", "NONE"); if (!strcmp(rq, LVMPD_REQ_PVMOVE)) return poll_init(h, ls, r, PVMOVE); else if (!strcmp(rq, LVMPD_REQ_CONVERT)) return poll_init(h, ls, r, CONVERT); else if (!strcmp(rq, LVMPD_REQ_MERGE)) return poll_init(h, ls, r, MERGE); else if (!strcmp(rq, LVMPD_REQ_MERGE_THIN)) return poll_init(h, ls, r, MERGE_THIN); else if (!strcmp(rq, LVMPD_REQ_PROGRESS)) return progress_info(h, ls, r); else if (!strcmp(rq, LVMPD_REQ_DUMP)) return dump_state(h, ls, r); else return reply(LVMPD_RESP_EINVAL, REASON_REQ_NOT_IMPLEMENTED); } static int process_timeout_arg(const char *str, unsigned *max_timeouts) { char *endptr; unsigned long l; errno = 0; l = strtoul(str, &endptr, 10); if (errno || *endptr || l >= UINT_MAX) return 0; *max_timeouts = (unsigned) l; return 1; } /* Client functionality */ typedef int (*action_fn_t) (void *args); struct log_line_baton { const char *prefix; }; daemon_handle _lvmpolld = { .error = 0 }; static daemon_handle _lvmpolld_open(const char *socket) { daemon_info lvmpolld_info = { .path = "lvmpolld", .socket = socket ?: DEFAULT_RUN_DIR "/lvmpolld.socket", .protocol = LVMPOLLD_PROTOCOL, .protocol_version = LVMPOLLD_PROTOCOL_VERSION }; return daemon_open(lvmpolld_info); } static void _log_line(const char *line, void *baton) { struct log_line_baton *b = baton; fprintf(stdout, "%s%s\n", b->prefix, line); } static int printout_raw_response(const char *prefix, const char *msg) { struct log_line_baton b = { .prefix = prefix }; char *buf; char *pos; buf = dm_strdup(msg); pos = buf; if (!buf) return 0; while (pos) { char *next = strchr(pos, '\n'); if (next) *next = 0; _log_line(pos, &b); pos = next ? next + 1 : 0; } dm_free(buf); return 1; } /* place all action implementations below */ static int action_dump(void *args __attribute__((unused))) { daemon_request req; daemon_reply repl; int r = 0; req = daemon_request_make(LVMPD_REQ_DUMP); if (!req.cft) { fprintf(stderr, "Failed to create lvmpolld " LVMPD_REQ_DUMP " request.\n"); goto out_req; } repl = daemon_send(_lvmpolld, req); if (repl.error) { fprintf(stderr, "Failed to send a request or receive response.\n"); goto out_rep; } /* * This is dumb copy & paste from libdaemon log routines. */ if (!printout_raw_response(" ", repl.buffer.mem)) { fprintf(stderr, "Failed to print out the response.\n"); goto out_rep; } r = 1; out_rep: daemon_reply_destroy(repl); out_req: daemon_request_destroy(req); return r; } enum action_index { ACTION_DUMP = 0, ACTION_MAX /* keep at the end */ }; static const action_fn_t actions[ACTION_MAX] = { [ACTION_DUMP] = action_dump }; static int _make_action(enum action_index idx, void *args) { return idx < ACTION_MAX ? actions[idx](args) : 0; } static int _lvmpolld_client(const char *socket, unsigned action) { int r; _lvmpolld = _lvmpolld_open(socket); if (_lvmpolld.error || _lvmpolld.socket_fd < 0) { fprintf(stderr, "Failed to establish connection with lvmpolld.\n"); return 0; } r = _make_action(action, NULL); daemon_close(_lvmpolld); return r ? EXIT_SUCCESS : EXIT_FAILURE; } static int action_idx = ACTION_MAX; static struct option long_options[] = { /* Have actions always at the beginning of the array. */ {"dump", no_argument, &action_idx, ACTION_DUMP }, /* or an option_index ? */ /* other options */ {"binary", required_argument, 0, 'B' }, {"foreground", no_argument, 0, 'f' }, {"help", no_argument, 0, 'h' }, {"log", required_argument, 0, 'l' }, {"pidfile", required_argument, 0, 'p' }, {"socket", required_argument, 0, 's' }, {"timeout", required_argument, 0, 't' }, {"version", no_argument, 0, 'V' }, {0, 0, 0, 0 } }; int main(int argc, char *argv[]) { int opt; int option_index = 0; int client = 0, server = 0; unsigned action = ACTION_MAX; struct timeval timeout; daemon_idle di = { .ptimeout = &timeout }; struct lvmpolld_state ls = { .log_config = "" }; daemon_state s = { .daemon_fini = _fini, .daemon_init = _init, .handler = _handler, .name = "lvmpolld", .pidfile = getenv("LVM_LVMPOLLD_PIDFILE") ?: LVMPOLLD_PIDFILE, .private = &ls, .protocol = LVMPOLLD_PROTOCOL, .protocol_version = LVMPOLLD_PROTOCOL_VERSION, .socket_path = getenv("LVM_LVMPOLLD_SOCKET") ?: LVMPOLLD_SOCKET, }; while ((opt = getopt_long(argc, argv, "fhVl:p:s:B:t:", long_options, &option_index)) != -1) { switch (opt) { case 0 : if (action < ACTION_MAX) { fprintf(stderr, "Can't perform more actions. Action already requested: %s\n", long_options[action].name); _usage(argv[0], stderr); exit(EXIT_FAILURE); } action = action_idx; client = 1; break; case '?': _usage(argv[0], stderr); exit(EXIT_FAILURE); case 'B': /* --binary */ ls.lvm_binary = optarg; server = 1; break; case 'V': /* --version */ printf("lvmpolld version: " LVM_VERSION "\n"); exit(EXIT_SUCCESS); case 'f': /* --foreground */ s.foreground = 1; server = 1; break; case 'h': /* --help */ _usage(argv[0], stdout); exit(EXIT_SUCCESS); case 'l': /* --log */ ls.log_config = optarg; server = 1; break; case 'p': /* --pidfile */ s.pidfile = optarg; server = 1; break; case 's': /* --socket */ s.socket_path = optarg; break; case 't': /* --timeout in seconds */ if (!process_timeout_arg(optarg, &di.max_timeouts)) { fprintf(stderr, "Invalid value of timeout parameter.\n"); exit(EXIT_FAILURE); } /* 0 equals to wait indefinitely */ if (di.max_timeouts) s.idle = ls.idle = &di; server = 1; break; } } if (client && server) { fprintf(stderr, "Invalid combination of client and server parameters.\n\n"); _usage(argv[0], stdout); exit(EXIT_FAILURE); } if (client) return _lvmpolld_client(s.socket_path, action); /* Server */ daemon_start(s); return EXIT_SUCCESS; } LVM2.2.02.176/daemons/lvmpolld/lvmpolld-data-utils.c0000644000000000000120000002365713176752421020662 0ustar rootwheel/* * Copyright (C) 2014-2015 Red Hat, Inc. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "lvmpolld-common.h" #include "config-util.h" #include #include static const char LVM_SYSTEM_DIR[] = "LVM_SYSTEM_DIR="; static char *_construct_full_lvname(const char *vgname, const char *lvname) { char *name; size_t l; l = strlen(vgname) + strlen(lvname) + 2; /* vg/lv and \0 */ name = (char *) dm_malloc(l * sizeof(char)); if (!name) return NULL; if (dm_snprintf(name, l, "%s/%s", vgname, lvname) < 0) { dm_free(name); name = NULL; } return name; } static char *_construct_lvm_system_dir_env(const char *sysdir) { /* * Store either "LVM_SYSTEM_DIR=/path/to..." * - or - * just single char to store NULL byte */ size_t l = sysdir ? strlen(sysdir) + 16 : 1; char *env = (char *) dm_malloc(l * sizeof(char)); if (!env) return NULL; *env = '\0'; if (sysdir && dm_snprintf(env, l, "%s%s", LVM_SYSTEM_DIR, sysdir) < 0) { dm_free(env); env = NULL; } return env; } static const char *_get_lvid(const char *lvmpolld_id, const char *sysdir) { return lvmpolld_id ? (lvmpolld_id + (sysdir ? strlen(sysdir) : 0)) : NULL; } char *construct_id(const char *sysdir, const char *uuid) { char *id; int r; size_t l; l = strlen(uuid) + (sysdir ? strlen(sysdir) : 0) + 1; id = (char *) dm_malloc(l * sizeof(char)); if (!id) return NULL; r = sysdir ? dm_snprintf(id, l, "%s%s", sysdir, uuid) : dm_snprintf(id, l, "%s", uuid); if (r < 0) { dm_free(id); id = NULL; } return id; } struct lvmpolld_lv *pdlv_create(struct lvmpolld_state *ls, const char *id, const char *vgname, const char *lvname, const char *sysdir, enum poll_type type, const char *sinterval, unsigned pdtimeout, struct lvmpolld_store *pdst) { char *lvmpolld_id = dm_strdup(id), /* copy */ *full_lvname = _construct_full_lvname(vgname, lvname), /* copy */ *lvm_system_dir_env = _construct_lvm_system_dir_env(sysdir); /* copy */ struct lvmpolld_lv tmp = { .ls = ls, .type = type, .lvmpolld_id = lvmpolld_id, .lvid = _get_lvid(lvmpolld_id, sysdir), .lvname = full_lvname, .lvm_system_dir_env = lvm_system_dir_env, .sinterval = dm_strdup(sinterval), /* copy */ .pdtimeout = pdtimeout < MIN_POLLING_TIMEOUT ? MIN_POLLING_TIMEOUT : pdtimeout, .cmd_state = { .retcode = -1, .signal = 0 }, .pdst = pdst, .init_rq_count = 1 }, *pdlv = (struct lvmpolld_lv *) dm_malloc(sizeof(struct lvmpolld_lv)); if (!pdlv || !tmp.lvid || !tmp.lvname || !tmp.lvm_system_dir_env || !tmp.sinterval) goto err; memcpy(pdlv, &tmp, sizeof(*pdlv)); if (pthread_mutex_init(&pdlv->lock, NULL)) goto err; return pdlv; err: dm_free((void *)full_lvname); dm_free((void *)lvmpolld_id); dm_free((void *)lvm_system_dir_env); dm_free((void *)tmp.sinterval); dm_free((void *)pdlv); return NULL; } void pdlv_destroy(struct lvmpolld_lv *pdlv) { dm_free((void *)pdlv->lvmpolld_id); dm_free((void *)pdlv->lvname); dm_free((void *)pdlv->sinterval); dm_free((void *)pdlv->lvm_system_dir_env); dm_free((void *)pdlv->cmdargv); dm_free((void *)pdlv->cmdenvp); pthread_mutex_destroy(&pdlv->lock); dm_free((void *)pdlv); } unsigned pdlv_get_polling_finished(struct lvmpolld_lv *pdlv) { unsigned ret; pdlv_lock(pdlv); ret = pdlv->polling_finished; pdlv_unlock(pdlv); return ret; } struct lvmpolld_lv_state pdlv_get_status(struct lvmpolld_lv *pdlv) { struct lvmpolld_lv_state r; pdlv_lock(pdlv); r.error = pdlv_locked_error(pdlv); r.polling_finished = pdlv_locked_polling_finished(pdlv); r.cmd_state = pdlv_locked_cmd_state(pdlv); pdlv_unlock(pdlv); return r; } void pdlv_set_cmd_state(struct lvmpolld_lv *pdlv, const struct lvmpolld_cmd_stat *cmd_state) { pdlv_lock(pdlv); pdlv->cmd_state = *cmd_state; pdlv_unlock(pdlv); } void pdlv_set_error(struct lvmpolld_lv *pdlv, unsigned error) { pdlv_lock(pdlv); pdlv->error = error; pdlv_unlock(pdlv); } void pdlv_set_polling_finished(struct lvmpolld_lv *pdlv, unsigned finished) { pdlv_lock(pdlv); pdlv->polling_finished = finished; pdlv_unlock(pdlv); } struct lvmpolld_store *pdst_init(const char *name) { struct lvmpolld_store *pdst = (struct lvmpolld_store *) dm_malloc(sizeof(struct lvmpolld_store)); if (!pdst) return NULL; pdst->store = dm_hash_create(32); if (!pdst->store) goto err_hash; if (pthread_mutex_init(&pdst->lock, NULL)) goto err_mutex; pdst->name = name; pdst->active_polling_count = 0; return pdst; err_mutex: dm_hash_destroy(pdst->store); err_hash: dm_free(pdst); return NULL; } void pdst_destroy(struct lvmpolld_store *pdst) { if (!pdst) return; dm_hash_destroy(pdst->store); pthread_mutex_destroy(&pdst->lock); dm_free(pdst); } void pdst_locked_lock_all_pdlvs(const struct lvmpolld_store *pdst) { struct dm_hash_node *n; dm_hash_iterate(n, pdst->store) pdlv_lock(dm_hash_get_data(pdst->store, n)); } void pdst_locked_unlock_all_pdlvs(const struct lvmpolld_store *pdst) { struct dm_hash_node *n; dm_hash_iterate(n, pdst->store) pdlv_unlock(dm_hash_get_data(pdst->store, n)); } static void _pdlv_locked_dump(struct buffer *buff, const struct lvmpolld_lv *pdlv) { char tmp[1024]; const struct lvmpolld_cmd_stat *cmd_state = &pdlv->cmd_state; /* pdlv-section { */ if (dm_snprintf(tmp, sizeof(tmp), "\t%s {\n", pdlv->lvmpolld_id) > 0) buffer_append(buff, tmp); if (dm_snprintf(tmp, sizeof(tmp), "\t\tlvid=\"%s\"\n", pdlv->lvid) > 0) buffer_append(buff, tmp); if (dm_snprintf(tmp, sizeof(tmp), "\t\ttype=\"%s\"\n", polling_op(pdlv->type)) > 0) buffer_append(buff, tmp); if (dm_snprintf(tmp, sizeof(tmp), "\t\tlvname=\"%s\"\n", pdlv->lvname) > 0) buffer_append(buff, tmp); if (dm_snprintf(tmp, sizeof(tmp), "\t\tlvmpolld_internal_timeout=%d\n", pdlv->pdtimeout) > 0) buffer_append(buff, tmp); if (dm_snprintf(tmp, sizeof(tmp), "\t\tlvm_command_interval=\"%s\"\n", pdlv->sinterval ?: "") > 0) buffer_append(buff, tmp); if (dm_snprintf(tmp, sizeof(tmp), "\t\t%s\"%s\"\n", LVM_SYSTEM_DIR, (*pdlv->lvm_system_dir_env ? (pdlv->lvm_system_dir_env + (sizeof(LVM_SYSTEM_DIR) - 1)) : "")) > 0) buffer_append(buff, tmp); if (dm_snprintf(tmp, sizeof(tmp), "\t\tlvm_command_pid=%d\n", pdlv->cmd_pid) > 0) buffer_append(buff, tmp); if (dm_snprintf(tmp, sizeof(tmp), "\t\tpolling_finished=%d\n", pdlv->polling_finished) > 0) buffer_append(buff, tmp); if (dm_snprintf(tmp, sizeof(tmp), "\t\terror_occured=%d\n", pdlv->error) > 0) buffer_append(buff, tmp); if (dm_snprintf(tmp, sizeof(tmp), "\t\tinit_requests_count=%d\n", pdlv->init_rq_count) > 0) buffer_append(buff, tmp); /* lvm_commmand-section { */ buffer_append(buff, "\t\tlvm_command {\n"); if (cmd_state->retcode == -1 && !cmd_state->signal) buffer_append(buff, "\t\t\tstate=\"" LVMPD_RESP_IN_PROGRESS "\"\n"); else { buffer_append(buff, "\t\t\tstate=\"" LVMPD_RESP_FINISHED "\"\n"); if (dm_snprintf(tmp, sizeof(tmp), "\t\t\treason=\"%s\"\n\t\t\tvalue=%d\n", (cmd_state->signal ? LVMPD_REAS_SIGNAL : LVMPD_REAS_RETCODE), (cmd_state->signal ?: cmd_state->retcode)) > 0) buffer_append(buff, tmp); } buffer_append(buff, "\t\t}\n"); /* } lvm_commmand-section */ buffer_append(buff, "\t}\n"); /* } pdlv-section */ } void pdst_locked_dump(const struct lvmpolld_store *pdst, struct buffer *buff) { struct dm_hash_node *n; dm_hash_iterate(n, pdst->store) _pdlv_locked_dump(buff, dm_hash_get_data(pdst->store, n)); } void pdst_locked_send_cancel(const struct lvmpolld_store *pdst) { struct lvmpolld_lv *pdlv; struct dm_hash_node *n; dm_hash_iterate(n, pdst->store) { pdlv = dm_hash_get_data(pdst->store, n); if (!pdlv_locked_polling_finished(pdlv)) pthread_cancel(pdlv->tid); } } void pdst_locked_destroy_all_pdlvs(const struct lvmpolld_store *pdst) { struct dm_hash_node *n; dm_hash_iterate(n, pdst->store) pdlv_destroy(dm_hash_get_data(pdst->store, n)); } struct lvmpolld_thread_data *lvmpolld_thread_data_constructor(struct lvmpolld_lv *pdlv) { struct lvmpolld_thread_data *data = (struct lvmpolld_thread_data *) dm_malloc(sizeof(struct lvmpolld_thread_data)); if (!data) return NULL; data->pdlv = NULL; data->line = NULL; data->line_size = 0; data->fout = data->ferr = NULL; data->outpipe[0] = data->outpipe[1] = data->errpipe[0] = data->errpipe[1] = -1; if (pipe(data->outpipe) || pipe(data->errpipe)) { lvmpolld_thread_data_destroy(data); return NULL; } if (fcntl(data->outpipe[0], F_SETFD, FD_CLOEXEC) || fcntl(data->outpipe[1], F_SETFD, FD_CLOEXEC) || fcntl(data->errpipe[0], F_SETFD, FD_CLOEXEC) || fcntl(data->errpipe[1], F_SETFD, FD_CLOEXEC)) { lvmpolld_thread_data_destroy(data); return NULL; } data->pdlv = pdlv; return data; } void lvmpolld_thread_data_destroy(void *thread_private) { struct lvmpolld_thread_data *data = (struct lvmpolld_thread_data *) thread_private; if (!data) return; if (data->pdlv) { pdst_lock(data->pdlv->pdst); /* * FIXME: skip this step if lvmpolld is activated * by systemd. */ if (!pdlv_get_polling_finished(data->pdlv)) kill(data->pdlv->cmd_pid, SIGTERM); pdlv_set_polling_finished(data->pdlv, 1); pdst_locked_dec(data->pdlv->pdst); pdst_unlock(data->pdlv->pdst); } /* may get reallocated in getline(). dm_free must not be used */ free(data->line); if (data->fout && !fclose(data->fout)) data->outpipe[0] = -1; if (data->ferr && !fclose(data->ferr)) data->errpipe[0] = -1; if (data->outpipe[0] >= 0) (void) close(data->outpipe[0]); if (data->outpipe[1] >= 0) (void) close(data->outpipe[1]); if (data->errpipe[0] >= 0) (void) close(data->errpipe[0]); if (data->errpipe[1] >= 0) (void) close(data->errpipe[1]); dm_free(data); } LVM2.2.02.176/daemons/lvmpolld/lvmpolld-data-utils.h0000644000000000000120000001303513176752421020654 0ustar rootwheel/* * Copyright (C) 2014-2015 Red Hat, Inc. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef _LVM_LVMPOLLD_DATA_UTILS_H #define _LVM_LVMPOLLD_DATA_UTILS_H #include struct buffer; struct lvmpolld_state; enum poll_type { PVMOVE = 0, CONVERT, MERGE, MERGE_THIN, POLL_TYPE_MAX }; struct lvmpolld_cmd_stat { int retcode; int signal; }; struct lvmpolld_store { pthread_mutex_t lock; void *store; const char *name; unsigned active_polling_count; }; struct lvmpolld_lv { /* * accessing following vars doesn't * require struct lvmpolld_lv lock */ struct lvmpolld_state *const ls; const enum poll_type type; const char *const lvid; const char *const lvmpolld_id; const char *const lvname; /* full vg/lv name */ const unsigned pdtimeout; /* in seconds */ const char *const sinterval; const char *const lvm_system_dir_env; struct lvmpolld_store *const pdst; const char *const *cmdargv; const char *const *cmdenvp; /* only used by write */ pid_t cmd_pid; pthread_t tid; pthread_mutex_t lock; /* block of shared variables protected by lock */ struct lvmpolld_cmd_stat cmd_state; unsigned init_rq_count; /* for debuging purposes only */ unsigned polling_finished:1; /* no more updates */ unsigned error:1; /* unrecoverable error occured in lvmpolld */ }; typedef void (*lvmpolld_parse_output_fn_t) (struct lvmpolld_lv *pdlv, const char *line); /* TODO: replace with configuration option */ #define MIN_POLLING_TIMEOUT 60 struct lvmpolld_lv_state { unsigned error:1; unsigned polling_finished:1; struct lvmpolld_cmd_stat cmd_state; }; struct lvmpolld_thread_data { char *line; size_t line_size; int outpipe[2]; int errpipe[2]; FILE *fout; FILE *ferr; char buf[1024]; struct lvmpolld_lv *pdlv; }; char *construct_id(const char *sysdir, const char *lvid); /* LVMPOLLD_LV_T section */ /* only call with appropriate struct lvmpolld_store lock held */ struct lvmpolld_lv *pdlv_create(struct lvmpolld_state *ls, const char *id, const char *vgname, const char *lvname, const char *sysdir, enum poll_type type, const char *sinterval, unsigned pdtimeout, struct lvmpolld_store *pdst); /* only call with appropriate struct lvmpolld_store lock held */ void pdlv_destroy(struct lvmpolld_lv *pdlv); static inline void pdlv_lock(struct lvmpolld_lv *pdlv) { pthread_mutex_lock(&pdlv->lock); } static inline void pdlv_unlock(struct lvmpolld_lv *pdlv) { pthread_mutex_unlock(&pdlv->lock); } /* * no struct lvmpolld_lv lock required section */ static inline int pdlv_is_type(const struct lvmpolld_lv *pdlv, enum poll_type type) { return pdlv->type == type; } static inline unsigned pdlv_get_timeout(const struct lvmpolld_lv *pdlv) { return pdlv->pdtimeout; } static inline enum poll_type pdlv_get_type(const struct lvmpolld_lv *pdlv) { return pdlv->type; } unsigned pdlv_get_polling_finished(struct lvmpolld_lv *pdlv); struct lvmpolld_lv_state pdlv_get_status(struct lvmpolld_lv *pdlv); void pdlv_set_cmd_state(struct lvmpolld_lv *pdlv, const struct lvmpolld_cmd_stat *cmd_state); void pdlv_set_error(struct lvmpolld_lv *pdlv, unsigned error); void pdlv_set_polling_finished(struct lvmpolld_lv *pdlv, unsigned finished); /* * struct lvmpolld_lv lock required section */ static inline struct lvmpolld_cmd_stat pdlv_locked_cmd_state(const struct lvmpolld_lv *pdlv) { return pdlv->cmd_state; } static inline int pdlv_locked_polling_finished(const struct lvmpolld_lv *pdlv) { return pdlv->polling_finished; } static inline unsigned pdlv_locked_error(const struct lvmpolld_lv *pdlv) { return pdlv->error; } /* struct lvmpolld_store manipulation routines */ struct lvmpolld_store *pdst_init(const char *name); void pdst_destroy(struct lvmpolld_store *pdst); void pdst_locked_dump(const struct lvmpolld_store *pdst, struct buffer *buff); void pdst_locked_lock_all_pdlvs(const struct lvmpolld_store *pdst); void pdst_locked_unlock_all_pdlvs(const struct lvmpolld_store *pdst); void pdst_locked_destroy_all_pdlvs(const struct lvmpolld_store *pdst); void pdst_locked_send_cancel(const struct lvmpolld_store *pdst); static inline void pdst_lock(struct lvmpolld_store *pdst) { pthread_mutex_lock(&pdst->lock); } static inline void pdst_unlock(struct lvmpolld_store *pdst) { pthread_mutex_unlock(&pdst->lock); } static inline void pdst_locked_inc(struct lvmpolld_store *pdst) { pdst->active_polling_count++; } static inline void pdst_locked_dec(struct lvmpolld_store *pdst) { pdst->active_polling_count--; } static inline unsigned pdst_locked_get_active_count(const struct lvmpolld_store *pdst) { return pdst->active_polling_count; } static inline int pdst_locked_insert(struct lvmpolld_store *pdst, const char *key, struct lvmpolld_lv *pdlv) { return dm_hash_insert(pdst->store, key, pdlv); } static inline struct lvmpolld_lv *pdst_locked_lookup(struct lvmpolld_store *pdst, const char *key) { return dm_hash_lookup(pdst->store, key); } static inline void pdst_locked_remove(struct lvmpolld_store *pdst, const char *key) { dm_hash_remove(pdst->store, key); } struct lvmpolld_thread_data *lvmpolld_thread_data_constructor(struct lvmpolld_lv *pdlv); void lvmpolld_thread_data_destroy(void *thread_private); #endif /* _LVM_LVMPOLLD_DATA_UTILS_H */ LVM2.2.02.176/daemons/lvmetad/0000755000000000000120000000000013176752421014406 5ustar rootwheelLVM2.2.02.176/daemons/lvmetad/Makefile.in0000644000000000000120000000375513176752421016465 0ustar rootwheel# # Copyright (C) 2011-2012 Red Hat, Inc. # # This file is part of LVM2. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU Lesser General Public License v.2.1. # # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA srcdir = @srcdir@ top_srcdir = @top_srcdir@ top_builddir = @top_builddir@ SOURCES = lvmetad-core.c SOURCES2 = lvmetactl.c TARGETS = lvmetad lvmetactl .PHONY: install_lvmetad CFLOW_LIST = $(SOURCES) CFLOW_LIST_TARGET = $(LIB_NAME).cflow CFLOW_TARGET = lvmetad include $(top_builddir)/make.tmpl CFLAGS_lvmetactl.o += $(EXTRA_EXEC_CFLAGS) CFLAGS_lvmetad-core.o += $(EXTRA_EXEC_CFLAGS) INCLUDES += -I$(top_srcdir)/libdaemon/server LDFLAGS += -L$(top_builddir)/libdaemon/server $(EXTRA_EXEC_LDFLAGS) $(ELDFLAGS) LIBS += $(RT_LIBS) $(DAEMON_LIBS) -ldevmapper $(PTHREAD_LIBS) lvmetad: $(OBJECTS) $(top_builddir)/libdaemon/client/libdaemonclient.a \ $(top_builddir)/libdaemon/server/libdaemonserver.a $(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(OBJECTS) -ldaemonserver $(LIBS) lvmetactl: lvmetactl.o $(top_builddir)/libdaemon/client/libdaemonclient.a \ $(top_builddir)/libdaemon/server/libdaemonserver.a $(CC) $(CFLAGS) $(LDFLAGS) -o $@ lvmetactl.o $(LIBS) CLEAN_TARGETS += lvmetactl.o # TODO: No idea. No idea how to test either. #ifneq ("$(CFLOW_CMD)", "") #CFLOW_SOURCES = $(addprefix $(srcdir)/, $(SOURCES)) #-include $(top_builddir)/libdm/libdevmapper.cflow #-include $(top_builddir)/lib/liblvm-internal.cflow #-include $(top_builddir)/lib/liblvm2cmd.cflow #-include $(top_builddir)/daemons/dmeventd/$(LIB_NAME).cflow #-include $(top_builddir)/daemons/dmeventd/plugins/mirror/$(LIB_NAME)-lvm2mirror.cflow #endif install_lvmetad: lvmetad $(INSTALL_PROGRAM) -D $< $(sbindir)/$(\n"); printf("lvmetactl vg_lookup_uuid \n"); printf("lvmetactl pv_lookup_uuid \n"); printf("lvmetactl set_global_invalid 0|1\n"); printf("lvmetactl set_global_disable 0|1\n"); printf("lvmetactl set_vg_version \n"); printf("lvmetactl vg_lock_type \n"); return -1; } cmd = argv[1]; h = lvmetad_open(NULL); if (!strcmp(cmd, "dump")) { reply = daemon_send_simple(h, "dump", "token = %s", "skip", "pid = " FMTd64, (int64_t)getpid(), "cmd = %s", "lvmetactl", NULL); printf("%s\n", reply.buffer.mem); } else if (!strcmp(cmd, "pv_list")) { reply = daemon_send_simple(h, "pv_list", "token = %s", "skip", "pid = " FMTd64, (int64_t)getpid(), "cmd = %s", "lvmetactl", NULL); printf("%s\n", reply.buffer.mem); } else if (!strcmp(cmd, "vg_list")) { reply = daemon_send_simple(h, "vg_list", "token = %s", "skip", "pid = " FMTd64, (int64_t)getpid(), "cmd = %s", "lvmetactl", NULL); printf("%s\n", reply.buffer.mem); } else if (!strcmp(cmd, "get_global_info")) { reply = daemon_send_simple(h, "get_global_info", "token = %s", "skip", "pid = " FMTd64, (int64_t)getpid(), "cmd = %s", "lvmetactl", NULL); printf("%s\n", reply.buffer.mem); } else if (!strcmp(cmd, "set_global_invalid")) { if (argc < 3) { printf("set_global_invalid 0|1\n"); return -1; } val = atoi(argv[2]); reply = daemon_send_simple(h, "set_global_info", "global_invalid = " FMTd64, (int64_t) val, "token = %s", "skip", "pid = " FMTd64, (int64_t)getpid(), "cmd = %s", "lvmetactl", NULL); print_reply(reply); } else if (!strcmp(cmd, "set_global_disable")) { if (argc < 3) { printf("set_global_disable 0|1\n"); return -1; } val = atoi(argv[2]); reply = daemon_send_simple(h, "set_global_info", "global_disable = " FMTd64, (int64_t) val, "disable_reason = %s", LVMETAD_DISABLE_REASON_DIRECT, "token = %s", "skip", "pid = " FMTd64, (int64_t)getpid(), "cmd = %s", "lvmetactl", NULL); print_reply(reply); } else if (!strcmp(cmd, "set_vg_version")) { if (argc < 5) { printf("set_vg_version \n"); return -1; } uuid = argv[2]; name = argv[3]; ver = atoi(argv[4]); if ((strlen(uuid) == 1) && (uuid[0] == '-')) uuid = NULL; if ((strlen(name) == 1) && (name[0] == '-')) name = NULL; if (uuid && name) { reply = daemon_send_simple(h, "set_vg_info", "uuid = %s", uuid, "name = %s", name, "version = " FMTd64, (int64_t) ver, "token = %s", "skip", "pid = " FMTd64, (int64_t)getpid(), "cmd = %s", "lvmetactl", NULL); } else if (uuid) { reply = daemon_send_simple(h, "set_vg_info", "uuid = %s", uuid, "version = " FMTd64, (int64_t) ver, "token = %s", "skip", "pid = " FMTd64, (int64_t)getpid(), "cmd = %s", "lvmetactl", NULL); } else if (name) { reply = daemon_send_simple(h, "set_vg_info", "name = %s", name, "version = " FMTd64, (int64_t) ver, "token = %s", "skip", "pid = " FMTd64, (int64_t)getpid(), "cmd = %s", "lvmetactl", NULL); } else { printf("name or uuid required\n"); return -1; } print_reply(reply); } else if (!strcmp(cmd, "vg_lookup_name")) { if (argc < 3) { printf("vg_lookup_name \n"); return -1; } name = argv[2]; reply = daemon_send_simple(h, "vg_lookup", "name = %s", name, "token = %s", "skip", "pid = " FMTd64, (int64_t)getpid(), "cmd = %s", "lvmetactl", NULL); printf("%s\n", reply.buffer.mem); } else if (!strcmp(cmd, "vg_lookup_uuid")) { if (argc < 3) { printf("vg_lookup_uuid \n"); return -1; } uuid = argv[2]; reply = daemon_send_simple(h, "vg_lookup", "uuid = %s", uuid, "token = %s", "skip", "pid = " FMTd64, (int64_t)getpid(), "cmd = %s", "lvmetactl", NULL); printf("%s\n", reply.buffer.mem); } else if (!strcmp(cmd, "vg_lock_type")) { struct dm_config_node *metadata; const char *lock_type; if (argc < 3) { printf("vg_lock_type \n"); return -1; } uuid = argv[2]; reply = daemon_send_simple(h, "vg_lookup", "uuid = %s", uuid, "token = %s", "skip", "pid = " FMTd64, (int64_t)getpid(), "cmd = %s", "lvmetactl", NULL); /* printf("%s\n", reply.buffer.mem); */ metadata = dm_config_find_node(reply.cft->root, "metadata"); if (!metadata) { printf("no metadata\n"); goto out; } lock_type = dm_config_find_str(metadata, "metadata/lock_type", NULL); if (!lock_type) { printf("no lock_type\n"); goto out; } printf("lock_type %s\n", lock_type); } else if (!strcmp(cmd, "pv_lookup_uuid")) { if (argc < 3) { printf("pv_lookup_uuid \n"); return -1; } uuid = argv[2]; reply = daemon_send_simple(h, "pv_lookup", "uuid = %s", uuid, "token = %s", "skip", "pid = " FMTd64, (int64_t)getpid(), "cmd = %s", "lvmetactl", NULL); printf("%s\n", reply.buffer.mem); } else { printf("unknown command\n"); goto out_close; } out: daemon_reply_destroy(reply); out_close: daemon_close(h); return 0; } LVM2.2.02.176/daemons/lvmetad/testclient.c0000644000000000000120000001013213176752421016725 0ustar rootwheel/* * Copyright (C) 2011-2014 Red Hat, Inc. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License v.2. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "tool.h" #include "lvmetad-client.h" #include "label.h" #include "lvmcache.h" #include "metadata.h" const char *uuid1 = "abcd-efgh"; const char *uuid2 = "bbcd-efgh"; const char *vgid = "yada-yada"; const char *uuid3 = "cbcd-efgh"; const char *metadata2 = "{\n" "id = \"yada-yada\"\n" "seqno = 15\n" "status = [\"READ\", \"WRITE\"]\n" "flags = []\n" "extent_size = 8192\n" "physical_volumes {\n" " pv0 {\n" " id = \"abcd-efgh\"\n" " }\n" " pv1 {\n" " id = \"bbcd-efgh\"\n" " }\n" " pv2 {\n" " id = \"cbcd-efgh\"\n" " }\n" "}\n" "}\n"; void _handle_reply(daemon_reply reply) { const char *repl = daemon_reply_str(reply, "response", NULL); const char *status = daemon_reply_str(reply, "status", NULL); const char *vgid = daemon_reply_str(reply, "vgid", NULL); fprintf(stderr, "[C] REPLY: %s\n", repl); if (!strcmp(repl, "failed")) fprintf(stderr, "[C] REASON: %s\n", daemon_reply_str(reply, "reason", "unknown")); if (vgid) fprintf(stderr, "[C] VGID: %s\n", vgid); if (status) fprintf(stderr, "[C] STATUS: %s\n", status); daemon_reply_destroy(reply); } void _pv_add(daemon_handle h, const char *uuid, const char *metadata) { daemon_reply reply = daemon_send_simple(h, "pv_add", "uuid = %s", uuid, "metadata = %b", metadata, NULL); _handle_reply(reply); } int scan(daemon_handle h, char *fn) { struct device *dev = dev_cache_get(fn, NULL); struct label *label; if (!label_read(dev, &label, 0)) { fprintf(stderr, "[C] no label found on %s\n", fn); return; } char uuid[64]; if (!id_write_format(dev->pvid, uuid, 64)) { fprintf(stderr, "[C] Failed to format PV UUID for %s", dev_name(dev)); return; } fprintf(stderr, "[C] found PV: %s\n", uuid); struct lvmcache_info *info = (struct lvmcache_info *) label->info; struct physical_volume pv = { 0, }; if (!(info->fmt->ops->pv_read(info->fmt, dev_name(dev), &pv, 0))) { fprintf(stderr, "[C] Failed to read PV %s", dev_name(dev)); return; } struct format_instance_ctx fic; struct format_instance *fid = info->fmt->ops->create_instance(info->fmt, &fic); struct metadata_area *mda; struct volume_group *vg = NULL; dm_list_iterate_items(mda, &info->mdas) { struct volume_group *this = mda->ops->vg_read(fid, "", mda); if (this && !vg || this->seqno > vg->seqno) vg = this; } if (vg) { char *buf = NULL; /* TODO. This is not entirely correct, since export_vg_to_buffer * adds trailing garbage to the buffer. We may need to use * export_vg_to_config_tree and format the buffer ourselves. It * does, however, work for now, since the garbage is well * formatted and has no conflicting keys with the rest of the * request. */ export_vg_to_buffer(vg, &buf); daemon_reply reply = daemon_send_simple(h, "pv_add", "uuid = %s", uuid, "metadata = %b", strchr(buf, '{'), NULL); _handle_reply(reply); } } void _dump_vg(daemon_handle h, const char *uuid) { daemon_reply reply = daemon_send_simple(h, "vg_by_uuid", "uuid = %s", uuid, NULL); fprintf(stderr, "[C] reply buffer: %s\n", reply.buffer); daemon_reply_destroy(reply); } int main(int argc, char **argv) { daemon_handle h = lvmetad_open(); /* FIXME Missing error path */ if (argc > 1) { int i; struct cmd_context *cmd = create_toolcontext(0, NULL, 0, 0, 1, 1); for (i = 1; i < argc; ++i) { const char *uuid = NULL; scan(h, argv[i]); } destroy_toolcontext(cmd); /* FIXME Missing lvmetad_close() */ return 0; } _pv_add(h, uuid1, NULL); _pv_add(h, uuid2, metadata2); _dump_vg(h, vgid); _pv_add(h, uuid3, NULL); daemon_close(h); /* FIXME lvmetad_close? */ return 0; } LVM2.2.02.176/daemons/lvmetad/lvmetad-client.h0000644000000000000120000000534513176752421017476 0ustar rootwheel/* * Copyright (C) 2011-2012 Red Hat, Inc. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef _LVM_LVMETAD_CLIENT_H #define _LVM_LVMETAD_CLIENT_H #include "daemon-client.h" #define LVMETAD_SOCKET DEFAULT_RUN_DIR "/lvmetad.socket" #define LVMETAD_TOKEN_UPDATE_IN_PROGRESS "update in progress" #define LVMETAD_DISABLE_REASON_DIRECT "DIRECT" #define LVMETAD_DISABLE_REASON_LVM1 "LVM1" #define LVMETAD_DISABLE_REASON_DUPLICATES "DUPLICATES" #define LVMETAD_DISABLE_REASON_VGRESTORE "VGRESTORE" #define LVMETAD_DISABLE_REASON_REPAIR "REPAIR" struct volume_group; /* Different types of replies we may get from lvmetad. */ typedef struct { daemon_reply r; const char **uuids; /* NULL terminated array */ } lvmetad_uuidlist; typedef struct { daemon_reply r; struct dm_config_tree *cft; } lvmetad_vg; /* Get a list of VG UUIDs that match a given VG name. */ lvmetad_uuidlist lvmetad_lookup_vgname(daemon_handle h, const char *name); /* Get the metadata of a single VG, identified by UUID. */ lvmetad_vg lvmetad_get_vg(daemon_handle h, const char *uuid); /* * Add and remove PVs on demand. Udev-driven systems will use this interface * instead of scanning. */ daemon_reply lvmetad_add_pv(daemon_handle h, const char *pv_uuid, const char *mda_content); daemon_reply lvmetad_remove_pv(daemon_handle h, const char *pv_uuid); /* Trigger a full disk scan, throwing away all caches. XXX do we eventually want * this? Probably not yet, anyway. * daemon_reply lvmetad_rescan(daemon_handle h); */ /* * Update the version of metadata of a volume group. The VG has to be locked for * writing for this, and the VG metadata here has to match whatever has been * written to the disk (under this lock). This initially avoids the requirement * for lvmetad to write to disk (in later revisions, lvmetad_supersede_vg may * also do the writing, or we probably add another function to do that). */ daemon_reply lvmetad_supersede_vg(daemon_handle h, struct volume_group *vg); /* Wrappers to open/close connection */ static inline daemon_handle lvmetad_open(const char *socket) { daemon_info lvmetad_info = { .path = "lvmetad", .socket = socket ?: LVMETAD_SOCKET, .protocol = "lvmetad", .protocol_version = 1, .autostart = 0 }; return daemon_open(lvmetad_info); } static inline void lvmetad_close(daemon_handle h) { return daemon_close(h); } #endif LVM2.2.02.176/daemons/lvmetad/test.sh0000755000000000000120000000036013176752421015723 0ustar rootwheel#!/bin/bash export LD_LIBRARY_PATH="$1" test -n "$2" && { rm -f /var/run/lvmetad.{socket,pid} chmod +rx lvmetad valgrind ./lvmetad -f & PID=$! sleep 1 ./testclient kill $PID exit 0 } sudo ./test.sh "$1" . LVM2.2.02.176/daemons/lvmetad/lvmetad-core.c0000644000000000000120000026355013176752421017147 0ustar rootwheel/* * Copyright (C) 2012-2015 Red Hat, Inc. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #define _XOPEN_SOURCE 500 /* pthread */ #define _REENTRANT #include "tool.h" #include "daemon-io.h" #include "daemon-server.h" #include "daemon-log.h" #include "lvm-version.h" #include "lvmetad-client.h" #include #include #include #define LVMETAD_SOCKET DEFAULT_RUN_DIR "/lvmetad.socket" /* * cache states: * . Empty: no devices visible to the system have been added to lvmetad * . Scanning: some devices visible to the system have been added to lvmetad * . Initialized: all devices visible to the system have been added to lvmetad * . Outdated: event on system or storage is not yet processed by lvmetad * Outdated variations: * - MissingDev: device added to system, not yet added to lvmetad * - RemovedDev: device removed from system, not yet removed from lvmetad * - MissingVG: new vg is written on disk, not yet added to lvmetad * - RemovedVG: vg is removed on disk, not yet removed in lvmetad * - ChangedVG: vg metadata is changed on disk, not yet updated in lvmetad * - MissingPV: new pv is written on disk, not yet added to in lvmetad * - RemovedPV: pv is removed on disk, not yet removed in lvmetad * - ChangedPV: pv metadata is changed on disk, not yet updated in lvmetad * . Updated: events have been processed by lvmetad * * state transitions: * . Empty -> Scanning * . Scanning -> Initialized * . Initialized -> Scanning * . Initialized -> Outdated * . Outdated -> Updated * . Updated -> Outdated * . Updated -> Scanning * . Outdated -> Scanning * * state transitions caused by: * . Empty is caused by: * - starting/restarting lvmetad * . Scanning is caused by: * - running pvscan --cache * - running any command with different global_filter (token mismatch) * - running any command while lvmetad is Empty * - running a report/display command with --foreign * - running a report/display command with --shared * - running a command using lvmlockd global lock where global state is changed * . Initialized is caused by: * - completion of Scanning * . Outdated is caused by: * - device being added or removed on the system * - creating/removing/changing a VG * - creating/removing/changing a PV * . Updated is caused by: * - receiving and processing all events * * request handling: * . Empty: short period during startup, token error returned * . Scanning: should be very short, lvmetad responds to requests with * the token error "updating" * . Initialized: lvmetad responds to requests * . Updated: lvmetad responds to requests * . Outdated: should be very short, lvmetad responds to requests * * In general, the cache state before and after the transition * "Updated -> Scanning -> Initialized" should match, unless * events occur during that transition. * * The Scanning state includes: * . receive a request to set the token to "updating" (Scanning state begins.) * . receive a pv_clear_all request to clear current cache * . receive a number of pv_found events to repopulate cache * . receive a request to set the token to a hash value (Initialized state begins.) * * The transition from Outdated to Updated depends on lvm commands * sending events to lvmetad, i.e. pv_found, pv_gone, vg_update, * vg_remove. Prior to receiving these events, lvmetad is not aware * that it is in the Outdated state. * * When using a shared VG with lvmlockd, the Outdated state can last a * longer time, but it won't be used in that state. lvmlockd forces a * transition "Outdated -> Scanning -> Initialized" before the cache * is used. */ /* * valid/invalid state of cached metadata * * Normally when using lvmetad, the state is kept up-to-date through a * combination of notifications from clients and updates triggered by uevents. * When using lvmlockd, the lvmetad state is expected to become out of * date (invalid/stale) when other hosts make changes to the metadata on disk. * * To deal with this, the metadata cached in lvmetad can be flagged as invalid. * This invalid flag is returned along with the metadata when read by a * command. The command can check for the invalid flag and decide that it * should either use the stale metadata (uncommon), or read the latest metadata * from disk rather than using the invalid metadata that was returned. If the * command reads the latest metadata from disk, it can choose to send it to * lvmetad to update the cached copy and clear the invalid flag in lvmetad. * Otherwise, the next command to read the metadata from lvmetad will also * receive the invalid metadata with the invalid flag (and like the previous * command, it too may choose to read the latest metadata from disk and can * then also choose to update the lvmetad copy.) * * For purposes of tracking the invalid state, LVM metadata is considered * to be either VG-specific or global. VG-specific metadata is metadata * that is isolated to a VG, such as the LVs it contains. Global * metadata is metadata that is not isolated to a single VG. Global * metdata includes: * . the VG namespace (which VG names are used) * . the set of orphan PVs (which PVs are in VGs and which are not) * . properties of orphan PVs (the size of an orphan PV) * * If the metadata for a single VG becomes invalid, the VGFL_INVALID * flag can be set in the vg_info struct for that VG. If the global * metdata becomes invalid, the GLFL_INVALID flag can be set in the * lvmetad daemon state. * * If a command reads VG metadata and VGFL_INVALID is set, an * extra config node called "vg_invalid" is added to the config * data returned to the command. * * If a command reads global metdata and GLFL_INVALID is set, an * extra config node called "global_invalid" is added to the * config data returned to the command. * * If a command sees vg_invalid, and wants the latest VG metadata, * it only needs to scan disks of the PVs in that VG. * It can then use vg_update to send the latest metadata to lvmetad * which clears the VGFL_INVALID flag. * * If a command sees global_invalid, and wants the latest metadata, * it should scan all devices to update lvmetad, and then send * lvmetad the "set_global_info global_invalid=0" message to clear * GLFL_INVALID. * * (When rescanning devices to update lvmetad, the command must use * the global filter cmd->lvmetad_filter so that it processes the same * devices that are seen by lvmetad.) * * The lvmetad INVALID flags can be set by sending lvmetad the messages: * * . set_vg_info with the latest VG seqno. If the VG seqno is larger * than the cached VG seqno, VGFL_INVALID is set for the VG. * * . set_global_info with global_invalid=1 sets GLFL_INVALID. * * Different entities could use these functions to invalidate metadata * if/when they detected that the cache is stale. How they detect that * the cache is stale depends on the details of the specific entity. * * In the case of lvmlockd, it embeds values into its locks to keep track * of when other nodes have changed metadata on disk related to those locks. * When acquring locks it can look at these values and detect that * the metadata associated with the lock has been changed. * When the values change, it uses set_vg_info/set_global_info to * invalidate the lvmetad cache. * * The values that lvmlockd distributes through its locks are the * latest VG seqno in VG locks and a global counter in the global lock. * When a host acquires a VG lock and sees that the embedded seqno is * larger than it was previously, it knows that it should invalidate the * lvmetad cache for the VG. If the host acquires the global lock * and sees that the counter is larger than previously, it knows that * it should invalidate the global info in lvmetad. This invalidation * is done before the lock is returned to the command. This way the * invalid flag will be set on the metadata before the command reads * it from lvmetad. */ struct vg_info { int64_t external_version; uint32_t flags; /* VGFL_ */ }; #define GLFL_INVALID 0x00000001 #define GLFL_DISABLE 0x00000002 #define GLFL_DISABLE_REASON_DIRECT 0x00000004 #define GLFL_DISABLE_REASON_LVM1 0x00000008 #define GLFL_DISABLE_REASON_DUPLICATES 0x00000010 #define GLFL_DISABLE_REASON_VGRESTORE 0x00000020 #define GLFL_DISABLE_REASON_REPAIR 0x00000040 #define GLFL_DISABLE_REASON_ALL (GLFL_DISABLE_REASON_DIRECT | GLFL_DISABLE_REASON_REPAIR | GLFL_DISABLE_REASON_LVM1 | GLFL_DISABLE_REASON_DUPLICATES | GLFL_DISABLE_REASON_VGRESTORE) #define VGFL_INVALID 0x00000001 #define CMD_NAME_SIZE 32 typedef struct { daemon_idle *idle; log_state *log; /* convenience */ const char *log_config; struct dm_hash_table *pvid_to_pvmeta; struct dm_hash_table *device_to_pvid; /* shares locks with above */ struct dm_hash_table *vgid_to_metadata; struct dm_hash_table *vgid_to_vgname; struct dm_hash_table *vgid_to_outdated_pvs; struct dm_hash_table *vgid_to_info; struct dm_hash_table *vgname_to_vgid; struct dm_hash_table *pvid_to_vgid; char token[128]; char update_cmd[CMD_NAME_SIZE]; int update_pid; int update_timeout; uint64_t update_begin; uint32_t flags; /* GLFL_ */ pthread_mutex_t token_lock; pthread_mutex_t info_lock; pthread_rwlock_t cache_lock; } lvmetad_state; static uint64_t _monotonic_seconds(void) { struct timespec ts; if (clock_gettime(CLOCK_MONOTONIC, &ts) < 0) return 0; return ts.tv_sec; } static void destroy_metadata_hashes(lvmetad_state *s) { struct dm_hash_node *n = NULL; dm_hash_iterate(n, s->vgid_to_metadata) dm_config_destroy(dm_hash_get_data(s->vgid_to_metadata, n)); dm_hash_iterate(n, s->vgid_to_outdated_pvs) dm_config_destroy(dm_hash_get_data(s->vgid_to_outdated_pvs, n)); dm_hash_iterate(n, s->pvid_to_pvmeta) dm_config_destroy(dm_hash_get_data(s->pvid_to_pvmeta, n)); dm_hash_iterate(n, s->vgid_to_vgname) dm_free(dm_hash_get_data(s->vgid_to_vgname, n)); dm_hash_iterate(n, s->vgname_to_vgid) dm_free(dm_hash_get_data(s->vgname_to_vgid, n)); dm_hash_iterate(n, s->vgid_to_info) dm_free(dm_hash_get_data(s->vgid_to_info, n)); dm_hash_iterate(n, s->device_to_pvid) dm_free(dm_hash_get_data(s->device_to_pvid, n)); dm_hash_iterate(n, s->pvid_to_vgid) dm_free(dm_hash_get_data(s->pvid_to_vgid, n)); dm_hash_destroy(s->pvid_to_pvmeta); dm_hash_destroy(s->vgid_to_metadata); dm_hash_destroy(s->vgid_to_vgname); dm_hash_destroy(s->vgid_to_outdated_pvs); dm_hash_destroy(s->vgid_to_info); dm_hash_destroy(s->vgname_to_vgid); dm_hash_destroy(s->device_to_pvid); dm_hash_destroy(s->pvid_to_vgid); } static void create_metadata_hashes(lvmetad_state *s) { s->pvid_to_pvmeta = dm_hash_create(32); s->device_to_pvid = dm_hash_create(32); s->vgid_to_metadata = dm_hash_create(32); s->vgid_to_vgname = dm_hash_create(32); s->vgid_to_outdated_pvs = dm_hash_create(32); s->vgid_to_info = dm_hash_create(32); s->pvid_to_vgid = dm_hash_create(32); s->vgname_to_vgid = dm_hash_create(32); } static response reply_fail(const char *reason) { return daemon_reply_simple("failed", "reason = %s", reason, NULL); } static response reply_unknown(const char *reason) { return daemon_reply_simple("unknown", "reason = %s", reason, NULL); } static struct dm_config_node *pvs(struct dm_config_node *vg) { struct dm_config_node *pv = dm_config_find_node(vg, "metadata/physical_volumes"); if (pv) pv = pv->child; return pv; } static void filter_metadata(struct dm_config_node *vg) { struct dm_config_node *pv = pvs(vg); while (pv) { struct dm_config_node *item = pv->child; while (item) { /* Remove the advisory device nodes. */ if (item->sib && !strcmp(item->sib->key, "device")) item->sib = item->sib->sib; item = item->sib; } pv = pv->sib; } vg->sib = NULL; /* Drop any trailing garbage. */ } static void merge_pvmeta(struct dm_config_node *pv, struct dm_config_node *pvmeta) { struct dm_config_node *tmp; if (!pvmeta) return; tmp = pvmeta; while (tmp->sib) { /* drop the redundant ID and dev_size nodes */ if (!strcmp(tmp->sib->key, "id") || !strcmp(tmp->sib->key, "dev_size")) tmp->sib = tmp->sib->sib; if (!tmp->sib) break; tmp = tmp->sib; tmp->parent = pv; } tmp->sib = pv->child; pv->child = pvmeta; pvmeta->parent = pv; } /* * Either the "big" vgs lock, or a per-vg lock needs to be held before entering * this function. * * cft and vg is data being sent to the caller. */ static int update_pv_status(lvmetad_state *s, struct dm_config_tree *cft, struct dm_config_node *vg) { struct dm_config_node *pv; const char *uuid; struct dm_config_tree *pvmeta; struct dm_config_node *pvmeta_cn; int ret = 1; for (pv = pvs(vg); pv; pv = pv->sib) { if (!(uuid = dm_config_find_str(pv->child, "id", NULL))) { ERROR(s, "update_pv_status found no uuid for PV"); continue; } pvmeta = dm_hash_lookup(s->pvid_to_pvmeta, uuid); set_flag(cft, pv, "status", "MISSING", !pvmeta); if (pvmeta) { if (!(pvmeta_cn = dm_config_clone_node(cft, pvmeta->root->child, 1))) { ERROR(s, "update_pv_status out of memory"); ret = 0; goto out; } merge_pvmeta(pv, pvmeta_cn); } } out: return ret; } static struct dm_config_node *add_last_node(struct dm_config_tree *cft, const char *node_name) { struct dm_config_node *cn, *last; cn = cft->root; last = cn; while (cn->sib) { last = cn->sib; cn = last; } cn = dm_config_create_node(cft, node_name); if (!cn) return NULL; cn->v = NULL; cn->sib = NULL; cn->parent = cft->root; last->sib = cn; return cn; } static struct dm_config_node *make_pv_node(lvmetad_state *s, const char *pvid, struct dm_config_tree *cft, struct dm_config_node *parent, struct dm_config_node *pre_sib) { struct dm_config_tree *pvmeta = dm_hash_lookup(s->pvid_to_pvmeta, pvid); const char *vgid = dm_hash_lookup(s->pvid_to_vgid, pvid), *vgname = NULL; struct dm_config_node *pv; struct dm_config_node *cn = NULL; if (!pvmeta) return NULL; if (vgid) { vgname = dm_hash_lookup(s->vgid_to_vgname, vgid); } /* Nick the pvmeta config tree. */ if (!(pv = dm_config_clone_node(cft, pvmeta->root, 0))) return 0; if (pre_sib) pre_sib->sib = pv; if (parent && !parent->child) parent->child = pv; pv->parent = parent; pv->key = pvid; /* Add the "variable" bits to it. */ if (vgid && strcmp(vgid, "#orphan")) cn = make_text_node(cft, "vgid", vgid, pv, cn); if (vgname) cn = make_text_node(cft, "vgname", vgname, pv, cn); return pv; } static response pv_list(lvmetad_state *s, request r) { struct dm_config_node *cn = NULL, *cn_pvs; struct dm_hash_node *n; const char *id; response res = { 0 }; DEBUGLOG(s, "pv_list"); buffer_init( &res.buffer ); if (!(res.cft = dm_config_create())) return res; /* FIXME error reporting */ /* The response field */ if (!(res.cft->root = make_text_node(res.cft, "response", "OK", NULL, NULL))) return res; /* FIXME doomed */ cn_pvs = make_config_node(res.cft, "physical_volumes", NULL, res.cft->root); dm_hash_iterate(n, s->pvid_to_pvmeta) { id = dm_hash_get_key(s->pvid_to_pvmeta, n); cn = make_pv_node(s, id, res.cft, cn_pvs, cn); } if (s->flags & GLFL_INVALID) add_last_node(res.cft, "global_invalid"); return res; } static response pv_lookup(lvmetad_state *s, request r) { const char *pvid = daemon_request_str(r, "uuid", NULL); int64_t devt = daemon_request_int(r, "device", 0); response res = { 0 }; struct dm_config_node *pv; DEBUGLOG(s, "pv_lookup pvid %s", pvid); buffer_init( &res.buffer ); if (!pvid && !devt) return reply_fail("need PVID or device"); if (!(res.cft = dm_config_create())) return reply_fail("out of memory"); if (!(res.cft->root = make_text_node(res.cft, "response", "OK", NULL, NULL))) return reply_fail("out of memory"); if (!pvid && devt) pvid = dm_hash_lookup_binary(s->device_to_pvid, &devt, sizeof(devt)); if (!pvid) { WARN(s, "pv_lookup: could not find device %" PRIu64, devt); dm_config_destroy(res.cft); return reply_unknown("device not found"); } pv = make_pv_node(s, pvid, res.cft, NULL, res.cft->root); if (!pv) { dm_config_destroy(res.cft); return reply_unknown("PV not found"); } pv->key = "physical_volume"; if (s->flags & GLFL_INVALID) add_last_node(res.cft, "global_invalid"); return res; } static response vg_list(lvmetad_state *s, request r) { struct dm_config_node *cn, *cn_vgs, *cn_last = NULL; struct dm_hash_node *n; const char *id; const char *name; response res = { 0 }; DEBUGLOG(s, "vg_list"); buffer_init( &res.buffer ); if (!(res.cft = dm_config_create())) goto bad; /* FIXME: better error reporting */ /* The response field */ res.cft->root = cn = dm_config_create_node(res.cft, "response"); if (!cn) goto bad; /* FIXME */ cn->parent = res.cft->root; if (!(cn->v = dm_config_create_value(res.cft))) goto bad; /* FIXME */ cn->v->type = DM_CFG_STRING; cn->v->v.str = "OK"; cn_vgs = cn = cn->sib = dm_config_create_node(res.cft, "volume_groups"); if (!cn_vgs) goto bad; /* FIXME */ cn->parent = res.cft->root; cn->v = NULL; cn->child = NULL; dm_hash_iterate(n, s->vgid_to_vgname) { id = dm_hash_get_key(s->vgid_to_vgname, n), name = dm_hash_get_data(s->vgid_to_vgname, n); if (!(cn = dm_config_create_node(res.cft, id))) goto bad; /* FIXME */ if (cn_last) cn_last->sib = cn; cn->parent = cn_vgs; cn->sib = NULL; cn->v = NULL; if (!(cn->child = dm_config_create_node(res.cft, "name"))) goto bad; /* FIXME */ cn->child->parent = cn; cn->child->sib = 0; if (!(cn->child->v = dm_config_create_value(res.cft))) goto bad; /* FIXME */ cn->child->v->type = DM_CFG_STRING; cn->child->v->v.str = name; if (!cn_vgs->child) cn_vgs->child = cn; cn_last = cn; } if (s->flags & GLFL_INVALID) add_last_node(res.cft, "global_invalid"); bad: return res; } static void mark_outdated_pv(lvmetad_state *s, const char *vgid, const char *pvid) { struct dm_config_tree *pvmeta, *outdated_pvs; struct dm_config_node *list, *cft_vgid; struct dm_config_value *v; pvmeta = dm_hash_lookup(s->pvid_to_pvmeta, pvid); /* if the MDA exists and is used, it will have ignore=0 set */ if (!pvmeta || (dm_config_find_int64(pvmeta->root, "pvmeta/mda0/ignore", 1) && dm_config_find_int64(pvmeta->root, "pvmeta/mda1/ignore", 1))) return; ERROR(s, "PV %s has outdated metadata for VG %s", pvid, vgid); outdated_pvs = dm_hash_lookup(s->vgid_to_outdated_pvs, vgid); if (!outdated_pvs) { if (!(outdated_pvs = config_tree_from_string_without_dup_node_check("outdated_pvs/pv_list = []")) || !(cft_vgid = make_text_node(outdated_pvs, "vgid", dm_pool_strdup(outdated_pvs->mem, vgid), outdated_pvs->root, NULL))) abort(); if (!dm_hash_insert(s->vgid_to_outdated_pvs, cft_vgid->v->v.str, outdated_pvs)) abort(); DEBUGLOG(s, "created outdated_pvs list for VG %s", vgid); } list = dm_config_find_node(outdated_pvs->root, "outdated_pvs/pv_list"); v = list->v; while (v) { if (v->type != DM_CFG_EMPTY_ARRAY && !strcmp(v->v.str, pvid)) return; v = v->next; } if (!(v = dm_config_create_value(outdated_pvs))) abort(); v->type = DM_CFG_STRING; v->v.str = dm_pool_strdup(outdated_pvs->mem, pvid); v->next = list->v; list->v = v; } static void chain_outdated_pvs(lvmetad_state *s, const char *vgid, struct dm_config_tree *metadata_cft, struct dm_config_node *metadata) { struct dm_config_tree *cft = dm_hash_lookup(s->vgid_to_outdated_pvs, vgid), *pvmeta; struct dm_config_node *pv, *res, *out_pvs = cft ? dm_config_find_node(cft->root, "outdated_pvs/pv_list") : NULL; struct dm_config_value *pvs_v = out_pvs ? out_pvs->v : NULL; if (!pvs_v) return; if (!(res = make_config_node(metadata_cft, "outdated_pvs", metadata_cft->root, 0))) return; /* oops */ res->sib = metadata->child; metadata->child = res; for (; pvs_v && pvs_v->type != DM_CFG_EMPTY_ARRAY; pvs_v = pvs_v->next) { pvmeta = dm_hash_lookup(s->pvid_to_pvmeta, pvs_v->v.str); if (!pvmeta) { WARN(s, "metadata for PV %s not found", pvs_v->v.str); continue; } if (!(pv = dm_config_clone_node(metadata_cft, pvmeta->root, 0))) continue; pv->key = dm_config_find_str(pv, "pvmeta/id", NULL); pv->sib = res->child; res->child = pv; } } static response vg_lookup(lvmetad_state *s, request r) { struct dm_config_tree *cft; struct dm_config_node *metadata, *n; struct vg_info *info; response res = { 0 }; const char *uuid = daemon_request_str(r, "uuid", NULL); const char *name = daemon_request_str(r, "name", NULL); int count = 0; buffer_init( &res.buffer ); if (!uuid && !name) { ERROR(s, "vg_lookup with no uuid or name"); return reply_unknown("VG not found"); } else if (!uuid || !name) { DEBUGLOG(s, "vg_lookup vgid %s name %s needs lookup", uuid ?: "none", name ?: "none"); if (name && !uuid) uuid = dm_hash_lookup_with_count(s->vgname_to_vgid, name, &count); else if (uuid && !name) name = dm_hash_lookup(s->vgid_to_vgname, uuid); if (name && uuid && (count > 1)) { DEBUGLOG(s, "vg_lookup name %s vgid %s found %d vgids", name, uuid, count); return daemon_reply_simple("multiple", "reason = %s", "Multiple VGs found with same name", NULL); } if (!uuid || !name) return reply_unknown("VG not found"); } else { char *name_lookup = dm_hash_lookup(s->vgid_to_vgname, uuid); char *uuid_lookup = dm_hash_lookup_with_val(s->vgname_to_vgid, name, uuid, strlen(uuid) + 1); /* FIXME: comment out these sanity checks when not testing */ if (!name_lookup || !uuid_lookup) { ERROR(s, "vg_lookup vgid %s name %s found incomplete mapping uuid %s name %s", uuid, name, uuid_lookup ?: "none", name_lookup ?: "none"); return reply_unknown("VG mapping incomplete"); } else if (strcmp(name_lookup, name) || strcmp(uuid_lookup, uuid)) { ERROR(s, "vg_lookup vgid %s name %s found inconsistent mapping uuid %s name %s", uuid, name, uuid_lookup, name_lookup); return reply_unknown("VG mapping inconsistent"); } } DEBUGLOG(s, "vg_lookup vgid %s name %s", uuid ?: "none", name ?: "none"); cft = dm_hash_lookup(s->vgid_to_metadata, uuid); if (!cft || !cft->root) { return reply_unknown("UUID not found"); } metadata = cft->root; if (!(res.cft = dm_config_create())) goto nomem_un; /* The response field */ if (!(res.cft->root = n = dm_config_create_node(res.cft, "response"))) goto nomem_un; if (!(n->v = dm_config_create_value(cft))) goto nomem_un; n->parent = res.cft->root; n->v->type = DM_CFG_STRING; n->v->v.str = "OK"; if (!(n = n->sib = dm_config_create_node(res.cft, "name"))) goto nomem_un; if (!(n->v = dm_config_create_value(res.cft))) goto nomem_un; n->parent = res.cft->root; n->v->type = DM_CFG_STRING; n->v->v.str = name; /* The metadata section */ if (!(n = n->sib = dm_config_clone_node(res.cft, metadata, 1))) goto nomem_un; n->parent = res.cft->root; if (!update_pv_status(s, res.cft, n)) goto nomem; chain_outdated_pvs(s, uuid, res.cft, n); if (s->flags & GLFL_INVALID) add_last_node(res.cft, "global_invalid"); info = dm_hash_lookup(s->vgid_to_info, uuid); if (info && (info->flags & VGFL_INVALID)) { if (!add_last_node(res.cft, "vg_invalid")) goto nomem; } return res; nomem_un: nomem: reply_fail("out of memory"); ERROR(s, "vg_lookup vgid %s name %s out of memory.", uuid ?: "none", name ?: "none"); ERROR(s, "lvmetad could not be updated and is aborting."); exit(EXIT_FAILURE); } static int vg_remove_if_missing(lvmetad_state *s, const char *vgid, int update_pvids); enum update_pvid_mode { UPDATE_ONLY, REMOVE_EMPTY, MARK_OUTDATED }; /* You need to be holding the pvid_to_vgid lock already to call this. */ static int _update_pvid_to_vgid(lvmetad_state *s, struct dm_config_tree *vg, const char *vgid, int mode) { struct dm_config_node *pv; struct dm_hash_table *to_check; struct dm_hash_node *n; const char *pvid; char *vgid_old; char *vgid_dup; const char *check_vgid; int r = 0; if (!vgid) return 0; if (!(to_check = dm_hash_create(32))) goto abort_daemon; for (pv = pvs(vg->root); pv; pv = pv->sib) { if (!(pvid = dm_config_find_str(pv->child, "id", NULL))) { ERROR(s, "PV has no id for update_pvid_to_vgid"); continue; } vgid_old = dm_hash_lookup(s->pvid_to_vgid, pvid); if ((mode == REMOVE_EMPTY) && vgid_old) { /* This copies the vgid_old string, doesn't reference it. */ if ((dm_hash_lookup(to_check, vgid_old) != (void*) 1) && !dm_hash_insert(to_check, vgid_old, (void*) 1)) { ERROR(s, "update_pvid_to_vgid out of memory for hash insert vgid_old %s", vgid_old); goto abort_daemon; } } if (mode == MARK_OUTDATED) mark_outdated_pv(s, vgid, pvid); if (!(vgid_dup = dm_strdup(vgid))) { ERROR(s, "update_pvid_to_vgid out of memory for vgid %s", vgid); goto abort_daemon; } if (!dm_hash_insert(s->pvid_to_vgid, pvid, vgid_dup)) { ERROR(s, "update_pvid_to_vgid out of memory for hash insert vgid %s", vgid_dup); dm_free(vgid_dup); goto abort_daemon; } /* pvid_to_vgid no longer references vgid_old */ dm_free(vgid_old); DEBUGLOG(s, "moving PV %s to VG %s", pvid, vgid); } dm_hash_iterate(n, to_check) { check_vgid = dm_hash_get_key(to_check, n); vg_remove_if_missing(s, check_vgid, 0); } r = 1; dm_hash_destroy(to_check); return r; abort_daemon: ERROR(s, "lvmetad could not be updated and is aborting."); if (to_check) dm_hash_destroy(to_check); exit(EXIT_FAILURE); } /* A pvid map lock needs to be held if update_pvids = 1. */ static int remove_metadata(lvmetad_state *s, const char *vgid, int update_pvids) { struct dm_config_tree *meta_lookup; struct dm_config_tree *outdated_pvs_lookup; struct vg_info *info_lookup; char *name_lookup = NULL; char *vgid_lookup = NULL; /* get data pointers from hash table so they can be freed */ info_lookup = dm_hash_lookup(s->vgid_to_info, vgid); meta_lookup = dm_hash_lookup(s->vgid_to_metadata, vgid); name_lookup = dm_hash_lookup(s->vgid_to_vgname, vgid); outdated_pvs_lookup = dm_hash_lookup(s->vgid_to_outdated_pvs, vgid); if (name_lookup) vgid_lookup = dm_hash_lookup_with_val(s->vgname_to_vgid, name_lookup, vgid, strlen(vgid) + 1); /* remove hash table mappings */ dm_hash_remove(s->vgid_to_info, vgid); dm_hash_remove(s->vgid_to_metadata, vgid); dm_hash_remove(s->vgid_to_vgname, vgid); dm_hash_remove(s->vgid_to_outdated_pvs, vgid); if (name_lookup) dm_hash_remove_with_val(s->vgname_to_vgid, name_lookup, vgid, strlen(vgid) + 1); /* update_pvid_to_vgid will clear/free the pvid_to_vgid hash */ if (update_pvids && meta_lookup) (void) _update_pvid_to_vgid(s, meta_lookup, "#orphan", 0); /* free the unmapped data */ if (meta_lookup) dm_config_destroy(meta_lookup); if (outdated_pvs_lookup) dm_config_destroy(outdated_pvs_lookup); dm_free(info_lookup); dm_free(name_lookup); dm_free(vgid_lookup); return 1; } /* The VG must be locked. */ static int vg_remove_if_missing(lvmetad_state *s, const char *vgid, int update_pvids) { struct dm_config_tree *vg; struct dm_config_node *pv; const char *vgid_check; const char *pvid; int missing = 1; if (!vgid) return 0; if (!(vg = dm_hash_lookup(s->vgid_to_metadata, vgid))) return 1; for (pv = pvs(vg->root); pv; pv = pv->sib) { if (!(pvid = dm_config_find_str(pv->child, "id", NULL))) continue; if ((vgid_check = dm_hash_lookup(s->pvid_to_vgid, pvid)) && dm_hash_lookup(s->pvid_to_pvmeta, pvid) && !strcmp(vgid, vgid_check)) missing = 0; /* at least one PV is around */ } if (missing) { DEBUGLOG(s, "removing empty VG %s", vgid); remove_metadata(s, vgid, update_pvids); } return 1; } /* * Remove all hash table references to arg_name and arg_vgid * so that new metadata using this name and/or vgid can be added * without interference previous data. * * This is used if a command updates metadata in the cache, * but update_metadata finds that what's in the cache is not * consistent with a normal transition between old and new * metadata. If this happens, it assumes that the command * is providing the correct metadata, so it first calls this * function to purge all records of the old metadata so the * new metadata can be added. */ static void _purge_metadata(lvmetad_state *s, const char *arg_name, const char *arg_vgid) { char *rem_vgid; remove_metadata(s, arg_vgid, 1); if ((rem_vgid = dm_hash_lookup_with_val(s->vgname_to_vgid, arg_name, arg_vgid, strlen(arg_vgid) + 1))) { dm_hash_remove_with_val(s->vgname_to_vgid, arg_name, arg_vgid, strlen(arg_vgid) + 1); dm_free(rem_vgid); } } /* * Updates for new vgid and new metadata. * * Remove any existing vg_info struct since it will be * recreated by lvmlockd if/when needed. * * Remove any existing outdated pvs since their metadata * will no longer be associated with this VG. */ static int _update_metadata_new_vgid(lvmetad_state *s, const char *arg_name, const char *old_vgid, const char *new_vgid, struct dm_config_tree *old_meta, struct dm_config_tree *new_meta) { struct vg_info *rem_info; struct dm_config_tree *rem_outdated; char *new_vgid_dup = NULL; char *arg_name_dup = NULL; int abort_daemon = 0; int retval = 0; if (!(new_vgid_dup = dm_strdup(new_vgid))) goto ret; if (!(arg_name_dup = dm_strdup(arg_name))) goto ret; /* * Temporarily orphan the PVs in the old metadata. */ if (!_update_pvid_to_vgid(s, old_meta, "#orphan", 0)) { ERROR(s, "update_metadata_new_vgid failed to move PVs for %s old_vgid %s", arg_name, old_vgid); abort_daemon = 1; goto ret; } /* * Remove things related to the old vgid. (like remove_metadata) */ if ((rem_info = dm_hash_lookup(s->vgid_to_info, old_vgid))) { dm_hash_remove(s->vgid_to_info, old_vgid); dm_free(rem_info); } if ((rem_outdated = dm_hash_lookup(s->vgid_to_outdated_pvs, old_vgid))) { dm_hash_remove(s->vgid_to_outdated_pvs, old_vgid); dm_config_destroy(rem_outdated); } dm_hash_remove(s->vgid_to_metadata, old_vgid); dm_config_destroy(old_meta); old_meta = NULL; dm_hash_remove_with_val(s->vgname_to_vgid, arg_name, old_vgid, strlen(old_vgid) + 1); dm_hash_remove(s->vgid_to_vgname, old_vgid); dm_free((char *)old_vgid); old_vgid = NULL; /* * Insert things with the new vgid. */ if (!dm_hash_insert(s->vgid_to_metadata, new_vgid, new_meta)) { ERROR(s, "update_metadata_new_vgid out of memory for meta hash insert for %s %s", arg_name, new_vgid); abort_daemon = 1; goto out; } if (!dm_hash_insert(s->vgid_to_vgname, new_vgid, arg_name_dup)) { ERROR(s, "update_metadata_new_vgid out of memory for name hash insert for %s %s", arg_name, new_vgid); abort_daemon = 1; goto out; } if (!dm_hash_insert_allow_multiple(s->vgname_to_vgid, arg_name, new_vgid_dup, strlen(new_vgid_dup) + 1)) { ERROR(s, "update_metadata_new_vgid out of memory for vgid hash insert for %s %s", arg_name, new_vgid); abort_daemon = 1; goto out; } /* * Reassign PVs based on the new metadata. */ if (!_update_pvid_to_vgid(s, new_meta, new_vgid, 1)) { ERROR(s, "update_metadata_new_name failed to update PVs for %s %s", arg_name, new_vgid); abort_daemon = 1; goto out; } DEBUGLOG(s, "update_metadata_new_vgid is done for %s %s", arg_name, new_vgid); retval = 1; out: ret: if (!new_vgid_dup || !arg_name_dup || abort_daemon) { ERROR(s, "lvmetad could not be updated and is aborting."); exit(EXIT_FAILURE); } if (!retval && new_meta) dm_config_destroy(new_meta); return retval; } /* * Updates for new name and new metadata. * * Remove any existing vg_info struct since it will be * recreated by lvmlockd if/when needed. * * Remove any existing outdated pvs since their metadata * will no longer be associated with this VG. */ static int _update_metadata_new_name(lvmetad_state *s, const char *arg_vgid, const char *old_name, const char *new_name, struct dm_config_tree *old_meta, struct dm_config_tree *new_meta) { struct vg_info *rem_info; struct dm_config_tree *rem_outdated; char *new_name_dup = NULL; char *arg_vgid_dup = NULL; int abort_daemon = 0; int retval = 0; if (!(new_name_dup = dm_strdup(new_name))) goto ret; if (!(arg_vgid_dup = dm_strdup(arg_vgid))) goto ret; /* * Temporarily orphan the PVs in the old metadata. */ if (!_update_pvid_to_vgid(s, old_meta, "#orphan", 0)) { ERROR(s, "update_metadata_new_name failed to move PVs for old_name %s %s", old_name, arg_vgid); abort_daemon = 1; goto ret; } /* * Remove things related to the old name. */ if ((rem_info = dm_hash_lookup(s->vgid_to_info, arg_vgid))) { dm_hash_remove(s->vgid_to_info, arg_vgid); dm_free(rem_info); } if ((rem_outdated = dm_hash_lookup(s->vgid_to_outdated_pvs, arg_vgid))) { dm_hash_remove(s->vgid_to_outdated_pvs, arg_vgid); dm_config_destroy(rem_outdated); } dm_hash_remove(s->vgid_to_metadata, arg_vgid); dm_config_destroy(old_meta); old_meta = NULL; dm_hash_remove(s->vgid_to_vgname, arg_vgid); dm_hash_remove_with_val(s->vgname_to_vgid, old_name, arg_vgid, strlen(arg_vgid) + 1); dm_free((char *)old_name); old_name = NULL; /* * Insert things with the new name. */ if (!dm_hash_insert(s->vgid_to_metadata, arg_vgid, new_meta)) { ERROR(s, "update_metadata_new_name out of memory for meta hash insert for %s %s", new_name, arg_vgid); abort_daemon = 1; goto out; } if (!dm_hash_insert(s->vgid_to_vgname, arg_vgid, new_name_dup)) { ERROR(s, "update_metadata_new_name out of memory for name hash insert for %s %s", new_name, arg_vgid); abort_daemon = 1; goto out; } if (!dm_hash_insert_allow_multiple(s->vgname_to_vgid, new_name, arg_vgid_dup, strlen(arg_vgid_dup) + 1)) { ERROR(s, "update_metadata_new_name out of memory for vgid hash insert for %s %s", new_name, arg_vgid); abort_daemon = 1; goto out; } /* * Reassign PVs based on the new metadata. */ if (!_update_pvid_to_vgid(s, new_meta, arg_vgid, 1)) { ERROR(s, "update_metadata_new_name failed to update PVs for %s %s", new_name, arg_vgid); abort_daemon = 1; goto out; } DEBUGLOG(s, "update_metadata_new_name is done for %s %s", new_name, arg_vgid); retval = 1; out: ret: if (!new_name_dup || !arg_vgid_dup || abort_daemon) { ERROR(s, "lvmetad could not be updated and is aborting."); exit(EXIT_FAILURE); } if (!retval && new_meta) dm_config_destroy(new_meta); return retval; } /* * Add new entries to all hash tables. */ static int _update_metadata_add_new(lvmetad_state *s, const char *new_name, const char *new_vgid, struct dm_config_tree *new_meta) { char *new_name_dup = NULL; char *new_vgid_dup = NULL; int abort_daemon = 0; int retval = 0; DEBUGLOG(s, "update_metadata_add_new for %s %s", new_name, new_vgid); if (!(new_name_dup = dm_strdup(new_name))) goto out_free; if (!(new_vgid_dup = dm_strdup(new_vgid))) goto out_free; if (!dm_hash_insert(s->vgid_to_metadata, new_vgid, new_meta)) { ERROR(s, "update_metadata_add_new out of memory for meta hash insert for %s %s", new_name, new_vgid); abort_daemon = 1; goto out; } if (!dm_hash_insert(s->vgid_to_vgname, new_vgid, new_name_dup)) { ERROR(s, "update_metadata_add_new out of memory for name hash insert for %s %s", new_name, new_vgid); abort_daemon = 1; goto out; } if (!dm_hash_insert_allow_multiple(s->vgname_to_vgid, new_name, new_vgid_dup, strlen(new_vgid_dup) + 1)) { ERROR(s, "update_metadata_add_new out of memory for vgid hash insert for %s %s", new_name, new_vgid); abort_daemon = 1; goto out; } if (!_update_pvid_to_vgid(s, new_meta, new_vgid, 1)) { ERROR(s, "update_metadata_add_new failed to update PVs for %s %s", new_name, new_vgid); abort_daemon = 1; goto out; } DEBUGLOG(s, "update_metadata_add_new is done for %s %s", new_name, new_vgid); retval = 1; out: out_free: if (!new_name_dup || !new_vgid_dup || abort_daemon) { dm_free(new_name_dup); dm_free(new_vgid_dup); ERROR(s, "lvmetad could not be updated and is aborting."); exit(EXIT_FAILURE); } if (!retval && new_meta) dm_config_destroy(new_meta); return retval; } /* * No locks need to be held. The pointers are never used outside of the scope of * this function, so they can be safely destroyed after update_metadata returns * (anything that might have been retained is copied). * * When this is called from pv_found, the metadata was read from a single * PV specified by the pvid arg and ret_old_seq is not NULL. The metadata * should match the existing metadata (matching seqno). If the metadata * from pv_found has a smaller seqno, it means that the PV is outdated * (was previously used in the VG and now reappeared after changes to the VG). * The next command to access the VG will erase the outdated PV and then clear * the outdated pv record here. If the metadata from pv_found has a larger * seqno than the existing metadata, it means ... (existing pvs are outdated?) * * When this is caleld from vg_update, the metadata is from a command that * has new metadata that should replace the existing metadata. * pvid and ret_old_seq are both NULL. */ static int _update_metadata(lvmetad_state *s, const char *arg_name, const char *arg_vgid, struct dm_config_node *new_metadata, int *ret_old_seq, const char *pvid) { struct dm_config_tree *old_meta = NULL; struct dm_config_tree *new_meta = NULL; const char *arg_name_lookup; /* name lookup result from arg_vgid */ const char *arg_vgid_lookup; /* vgid lookup result from arg_name */ const char *old_name = NULL; const char *new_name = NULL; const char *old_vgid = NULL; const char *new_vgid = NULL; const char *new_metadata_vgid; int new_seq; int old_seq = -1; int needs_repair = 0; int abort_daemon = 0; int retval = 0; int count = 0; if (!arg_vgid || !arg_name) { ERROR(s, "update_metadata missing args arg_vgid %s arg_name %s pvid %s", arg_vgid ?: "none", arg_name ?: "none", pvid ?: "none"); return 0; } DEBUGLOG(s, "update_metadata begin arg_vgid %s arg_name %s pvid %s", arg_vgid, arg_name, pvid ?: "none"); /* * Begin by figuring out what has changed: * . the VG could be new - found no existing record of the vgid or name. * . the VG could have a new vgid - found an existing record of the name. * . the VG could have a new name - found an existing record of the vgid. * . the VG could have unchanged vgid and name - found existing record of both. */ arg_name_lookup = dm_hash_lookup(s->vgid_to_vgname, arg_vgid); arg_vgid_lookup = dm_hash_lookup_with_val(s->vgname_to_vgid, arg_name, arg_vgid, strlen(arg_vgid) + 1); /* * A new VG when there is no existing record of the name or vgid args. */ if (!arg_name_lookup && !arg_vgid_lookup) { new_vgid = arg_vgid; new_name = arg_name; DEBUGLOG(s, "update_metadata new name %s and new vgid %s", new_name, new_vgid); goto update; } /* * An existing name has a new vgid (new_vgid = arg_vgid). * A lookup of the name arg was successful in finding arg_vgid_lookup, * but that resulting vgid doesn't match the arg_vgid. */ if (arg_vgid_lookup && strcmp(arg_vgid_lookup, arg_vgid)) { if (arg_name_lookup) { /* * This shouldn't happen. * arg_vgid should be new and should not map to any name. */ ERROR(s, "update_metadata arg_vgid %s arg_name %s unexpected arg_name_lookup %s", arg_vgid, arg_name, arg_name_lookup); needs_repair = 1; goto update; } new_vgid = arg_vgid; old_vgid = dm_hash_lookup_with_count(s->vgname_to_vgid, arg_name, &count); /* * FIXME: this ensures that arg_name maps to only one existing * VG (old_vgid), because if it maps to multiple vgids, then we * don't know which one should get the new vgid (arg_vgid). If * this function was given both the existing name and existing * vgid to identify the VG, then this wouldn't be a problem. * But as it is now, the vgid arg to this function is the new * vgid and the existing VG is specified only by name. */ if (old_vgid && (count > 1)) { ERROR(s, "update_metadata arg_vgid %s arg_name %s found %d vgids for name", arg_vgid, arg_name, count); old_vgid = NULL; } if (!old_vgid) { /* This shouldn't happen. */ ERROR(s, "update_metadata arg_vgid %s arg_name %s no old_vgid", arg_vgid, arg_name); needs_repair = 1; goto update; } if (!(old_meta = dm_hash_lookup(s->vgid_to_metadata, old_vgid))) { /* This shouldn't happen. */ ERROR(s, "update_metadata arg_vgid %s arg_name %s old_vgid %s no old_meta", arg_vgid, arg_name, old_vgid); needs_repair = 1; goto update; } DEBUGLOG(s, "update_metadata existing name %s has new vgid %s old vgid %s", arg_name, new_vgid, old_vgid); goto update; } /* * An existing vgid has a new name (new_name = arg_name). * A lookup of the vgid arg was successful in finding arg_name_lookup, * but that resulting name doesn't match the arg_name. */ if (arg_name_lookup && strcmp(arg_name_lookup, arg_name)) { if (arg_vgid_lookup) { /* * This shouldn't happen. * arg_name should be new and should not map to any vgid. */ ERROR(s, "update_metadata arg_vgid %s arg_name %s unexpected arg_vgid_lookup %s", arg_vgid, arg_name, arg_vgid_lookup); needs_repair = 1; goto update; } new_name = arg_name; old_name = dm_hash_lookup(s->vgid_to_vgname, arg_vgid); if (!old_name) { /* This shouldn't happen. */ ERROR(s, "update_metadata arg_vgid %s arg_name %s no old_name", arg_vgid, arg_name); needs_repair = 1; goto update; } if (!(old_meta = dm_hash_lookup(s->vgid_to_metadata, arg_vgid))) { /* This shouldn't happen. */ ERROR(s, "update_metadata arg_vgid %s arg_name %s old_name %s no old_meta", arg_vgid, arg_name, old_name); needs_repair = 1; goto update; } DEBUGLOG(s, "update_metadata existing vgid %s has new name %s old name %s", arg_vgid, new_name, old_name); goto update; } /* * An existing VG has unchanged name and vgid. */ if (!new_vgid && !new_name) { if (!arg_vgid_lookup || !arg_name_lookup) { /* This shouldn't happen. */ ERROR(s, "update_metadata arg_vgid %s arg_name %s missing lookups vgid %s name %s", arg_vgid ?: "none", arg_name ?: "none", arg_vgid_lookup ?: "none", arg_name_lookup ?: "none"); needs_repair = 1; goto update; } if (strcmp(arg_name_lookup, arg_name)) { /* This shouldn't happen. */ ERROR(s, "update_metadata arg_vgid %s arg_name %s mismatch arg_name_lookup %s", arg_vgid, arg_name, arg_name_lookup); needs_repair = 1; goto update; } if (strcmp(arg_vgid_lookup, arg_vgid)) { /* This shouldn't happen. Two VGs with the same name is handled above. */ ERROR(s, "update_metadata arg_vgid %s arg_name %s mismatch arg_vgid_lookup %s", arg_vgid, arg_name, arg_vgid_lookup); needs_repair = 1; goto update; } /* old_vgid == arg_vgid, and old_name == arg_name */ if (!(old_meta = dm_hash_lookup(s->vgid_to_metadata, arg_vgid))) { /* This shouldn't happen. */ ERROR(s, "update_metadata arg_vgid %s arg_name %s no old_meta", arg_vgid, arg_name); needs_repair = 1; goto update; } DEBUGLOG(s, "update_metadata existing vgid %s and existing name %s", arg_vgid, arg_name); goto update; } update: filter_metadata(new_metadata); /* sanitize */ /* * FIXME: verify that there's at least one PV in common between * the old and new metadata? */ if (!(new_meta = dm_config_create()) || !(new_meta->root = dm_config_clone_node(new_meta, new_metadata, 0))) { ERROR(s, "update_metadata out of memory for new metadata for %s %s", arg_name, arg_vgid); /* FIXME: should we purge the old metadata here? */ retval = 0; goto out; } /* * Get the seqno from existing (old) and new metadata and perform * sanity checks for transitions that generally shouldn't happen. * Sometimes ignore the new metadata and leave the existing metadata * alone, and sometimes purge the existing metadata and add the new. * This often depends on whether the new metadata comes from a single * PV (via pv_found) that's been scanned, or a vg_update sent from a * command. */ new_seq = dm_config_find_int(new_metadata, "metadata/seqno", -1); if (old_meta) old_seq = dm_config_find_int(old_meta->root, "metadata/seqno", -1); if (ret_old_seq) *ret_old_seq = old_meta ? old_seq : new_seq; /* * The new metadata has an invalid seqno. * This shouldn't happen, but if it does, ignore the new metadata. */ if (new_seq <= 0) { ERROR(s, "update_metadata ignore new metadata because of invalid seqno for %s %s", arg_vgid, arg_name); DEBUGLOG_cft(s, "NEW: ", new_metadata); retval = 0; goto out; } /* * The new metadata is missing an internal vgid. * This shouldn't happen, but if it does, ignore the new metadata. */ if (!(new_metadata_vgid = dm_config_find_str(new_meta->root, "metadata/id", NULL))) { ERROR(s, "update_metadata has no internal vgid for %s %s", arg_name, arg_vgid); DEBUGLOG_cft(s, "NEW: ", new_metadata); retval = 0; goto out; } /* * The new metadata internal vgid doesn't match the arg vgid. * This shouldn't happen, but if it does, ignore the new metadata. */ if (strcmp(new_metadata_vgid, arg_vgid)) { ERROR(s, "update_metadata has bad internal vgid %s for %s %s", new_metadata_vgid, arg_name, arg_vgid); DEBUGLOG_cft(s, "NEW: ", new_metadata); retval = 0; goto out; } /* * A single PV appears with metadata that's inconsistent with * existing, ignore the PV. FIXME: make it outdated? */ if (pvid && needs_repair) { ERROR(s, "update_metadata ignore inconsistent metadata on PV %s seqno %d for %s %s seqno %d", pvid, new_seq, arg_vgid, arg_name, old_seq); if (old_meta) DEBUGLOG_cft(s, "OLD: ", old_meta->root); DEBUGLOG_cft(s, "NEW: ", new_metadata); retval = 0; goto out; } /* * A VG update with metadata that's inconsistent with existing. */ if (!pvid && needs_repair) { ERROR(s, "update_metadata inconsistent with cache for vgid %s and name %s", arg_vgid, arg_name); if (old_meta) DEBUGLOG_cft(s, "OLD: ", old_meta->root); DEBUGLOG_cft(s, "NEW: ", new_metadata); abort_daemon = 1; retval = 0; goto out; } /* * A single PV appears with metadata that's older than the existing, * e.g. an PV that had been in the VG has reappeared after the VG changed. * old PV: the PV that lvmetad was told about first * new PV: the PV that lvmetad is being told about here, second * old_seq: the larger seqno on the old PV, for the newer version of the VG * new_seq: the smaller seqno on the new PV, for the older version of the VG * * So, the new PV (by notification order) is "older" (in terms of * VG seqno) than the old PV. * * Make the new PV outdated so it'll be cleared and keep the existing * metadata from the old PV. */ if (pvid && (old_seq > 0) && (new_seq < old_seq)) { ERROR(s, "update_metadata ignoring outdated metadata on PV %s seqno %d for %s %s seqno %d", pvid, new_seq, arg_vgid, arg_name, old_seq); DEBUGLOG_cft(s, "OLD: ", old_meta->root); DEBUGLOG_cft(s, "NEW: ", new_metadata); mark_outdated_pv(s, arg_vgid, pvid); retval = 0; goto out; } /* * A single PV appears with metadata that's newer than the existing, * e.g. a PV has been found with VG metadata that is newer than the * VG metdata we know about. This can happen when scanning PVs after * an outdated PV (with an older version of the VG metadata) has * reappeared. The rescanning may initially scan the outdated PV * and notify lvmetad about it, and then scan a current PV from * the VG and notify lvmetad about it. * old PV: the PV that lvmetad was told about first * new PV: the PV that lvmetad is being told about here, second * old_seq: the smaller seqno on the old PV, for the older version of the VG * new_seq: the larger seqno on the new PV, for the newer version of the VG * * Make the existing PVs outdated, and use the new metadata. */ if (pvid && (old_seq > 0) && (new_seq > old_seq)) { ERROR(s, "update_metadata found newer metadata on PV %s seqno %d for %s %s seqno %d", pvid, new_seq, arg_vgid, arg_name, old_seq); DEBUGLOG_cft(s, "OLD: ", old_meta->root); DEBUGLOG_cft(s, "NEW: ", new_metadata); _update_pvid_to_vgid(s, old_meta, arg_vgid, MARK_OUTDATED); } /* * The existing/old metadata has an invalid seqno. * This shouldn't happen, but if it does, purge old and add the new. */ if (old_meta && (old_seq <= 0)) { ERROR(s, "update_metadata bad old seqno %d for %s %s", old_seq, arg_name, arg_vgid); DEBUGLOG_cft(s, "OLD: ", old_meta->root); _purge_metadata(s, arg_name, arg_vgid); new_name = arg_name; new_vgid = arg_vgid; old_name = NULL; old_vgid = NULL; old_meta = NULL; old_seq = -1; } /* * A single PV appears with a seqno matching existing metadata, * but unmatching metadata content. This shouldn't happen, * but if it does, ignore the PV. FIXME: make it outdated? */ if (pvid && (new_seq == old_seq) && compare_config(new_metadata, old_meta->root)) { ERROR(s, "update_metadata from pv %s same seqno %d with unmatching data for %s %s", pvid, new_seq, arg_name, arg_vgid); DEBUGLOG_cft(s, "OLD: ", old_meta->root); DEBUGLOG_cft(s, "NEW: ", new_metadata); retval = 0; goto out; } /* * A VG update with metadata matching existing seqno but unmatching content. * This shouldn't happen, but if it does, purge existing and add the new. */ if (!pvid && (new_seq == old_seq) && compare_config(new_metadata, old_meta->root)) { ERROR(s, "update_metadata same seqno %d with unmatching data for %s %s", new_seq, arg_name, arg_vgid); DEBUGLOG_cft(s, "OLD: ", old_meta->root); DEBUGLOG_cft(s, "NEW: ", new_metadata); _purge_metadata(s, arg_name, arg_vgid); new_name = arg_name; new_vgid = arg_vgid; old_name = NULL; old_vgid = NULL; old_meta = NULL; old_seq = -1; } /* * A VG update with metadata older than existing. VG updates should * have increasing seqno. This shouldn't happen, but if it does, * purge existing and add the new. */ if (!pvid && (new_seq < old_seq)) { ERROR(s, "update_metadata new seqno %d less than old seqno %d for %s %s", new_seq, old_seq, arg_name, arg_vgid); DEBUGLOG_cft(s, "OLD: ", old_meta->root); DEBUGLOG_cft(s, "NEW: ", new_metadata); _purge_metadata(s, arg_name, arg_vgid); new_name = arg_name; new_vgid = arg_vgid; old_name = NULL; old_vgid = NULL; old_meta = NULL; old_seq = -1; } /* * All the checks are done, do one of the four possible updates * outlined above: */ /* * Add metadata for a new VG to the cache. */ if (new_name && new_vgid) return _update_metadata_add_new(s, new_name, new_vgid, new_meta); /* * Update cached metadata for a VG with a new vgid. */ if (new_vgid) return _update_metadata_new_vgid(s, arg_name, old_vgid, new_vgid, old_meta, new_meta); /* * Update cached metadata for a renamed VG. */ if (new_name) return _update_metadata_new_name(s, arg_vgid, old_name, new_name, old_meta, new_meta); /* * If the old and new seqnos are the same, we've already compared the * old/new metadata and verified it's the same, so there's no reason * to replace old meta with new meta. */ if (old_seq == new_seq) { DEBUGLOG(s, "update_metadata skipped for %s %s seqno %d is unchanged", arg_name, arg_vgid, old_seq); dm_config_destroy(new_meta); new_meta = NULL; retval = 1; goto out; } /* * Update cached metdata for a VG with unchanged name and vgid. * Replace the old metadata with the new metadata. * old_meta is the old copy of the metadata from the cache. * new_meta is the new copy of the metadata from the command. */ DEBUGLOG(s, "update_metadata for %s %s from %d to %d", arg_name, arg_vgid, old_seq, new_seq); /* * The PVs in the VG may have changed in the new metadata, so * temporarily orphan all of the PVs in the existing VG. * The PVs that are still in the VG will be reassigned to this * VG below by the next call to _update_pvid_to_vgid(). */ if (!_update_pvid_to_vgid(s, old_meta, "#orphan", 0)) { ERROR(s, "update_metadata failed to move PVs for %s %s", arg_name, arg_vgid); abort_daemon = 1; retval = 0; goto out; } /* * The only hash table update that is needed is the actual * metadata config tree in vgid_to_metadata. The VG name * and vgid are unchanged. */ dm_hash_remove(s->vgid_to_metadata, arg_vgid); dm_config_destroy(old_meta); old_meta = NULL; if (!dm_hash_insert(s->vgid_to_metadata, arg_vgid, new_meta)) { ERROR(s, "update_metadata out of memory for hash insert for %s %s", arg_name, arg_vgid); abort_daemon = 1; retval = 0; goto out; } /* * Map the PVs in the new metadata to the vgid. * All pre-existing PVs were temporarily orphaned above. * Previous PVs that were removed from the VG will not * be remapped. New PVs that were added to the VG will * be newly mapped to this vgid, and previous PVs that * remain in the VG will be remapped to the VG again. */ if (!_update_pvid_to_vgid(s, new_meta, arg_vgid, 1)) { ERROR(s, "update_metadata failed to update PVs for %s %s", arg_name, arg_vgid); abort_daemon = 1; retval = 0; } else { DEBUGLOG(s, "update_metadata is done for %s %s", arg_name, arg_vgid); retval = 1; } out: if (abort_daemon) { ERROR(s, "lvmetad could not be updated is aborting."); exit(EXIT_FAILURE); } if (!retval && new_meta) dm_config_destroy(new_meta); return retval; } static response pv_gone(lvmetad_state *s, request r) { const char *arg_pvid = NULL; char *old_pvid = NULL; const char *pvid; int64_t device; struct dm_config_tree *pvmeta; char *vgid; arg_pvid = daemon_request_str(r, "uuid", NULL); device = daemon_request_int(r, "device", 0); if (!arg_pvid && device > 0) old_pvid = dm_hash_lookup_binary(s->device_to_pvid, &device, sizeof(device)); if (!arg_pvid && !old_pvid) { DEBUGLOG(s, "pv_gone device %" PRIu64 " not found", device); return reply_unknown("device not in cache"); } pvid = arg_pvid ? arg_pvid : old_pvid; DEBUGLOG(s, "pv_gone %s device %" PRIu64, pvid ?: "none", device); if (!(pvmeta = dm_hash_lookup(s->pvid_to_pvmeta, pvid))) { DEBUGLOG(s, "pv_gone %s device %" PRIu64 " has no PV metadata", pvid ?: "none", device); return reply_unknown("PVID does not exist"); } vgid = dm_hash_lookup(s->pvid_to_vgid, pvid); dm_hash_remove_binary(s->device_to_pvid, &device, sizeof(device)); dm_hash_remove(s->pvid_to_pvmeta, pvid); if (vgid) { char *vgid_dup; /* * vg_remove_if_missing will clear and free the pvid_to_vgid * mappings for this vg, which will free the "vgid" string that * was returned above from the pvid_to_vgid lookup. */ if (!(vgid_dup = dm_strdup(vgid))) return reply_fail("out of memory"); vg_remove_if_missing(s, vgid_dup, 1); dm_free(vgid_dup); vgid_dup = NULL; vgid = NULL; } dm_config_destroy(pvmeta); dm_free(old_pvid); return daemon_reply_simple("OK", NULL ); } static response pv_clear_all(lvmetad_state *s, request r) { DEBUGLOG(s, "pv_clear_all"); destroy_metadata_hashes(s); create_metadata_hashes(s); return daemon_reply_simple("OK", NULL); } /* * Returns 1 if PV metadata exists for all PVs in a VG. */ static int _vg_is_complete(lvmetad_state *s, struct dm_config_tree *vgmeta) { struct dm_config_node *vg = vgmeta->root; struct dm_config_node *pv; int complete = 1; const char *pvid; for (pv = pvs(vg); pv; pv = pv->sib) { if (!(pvid = dm_config_find_str(pv->child, "id", NULL))) continue; if (!dm_hash_lookup(s->pvid_to_pvmeta, pvid)) { complete = 0; break; } } return complete; } /* * pv_found: a PV has appeared and been scanned * It contains PV metadata, and optionally VG metadata. * Both kinds of metadata should be added to the cache * and hash table mappings related to the PV and device * should be updated. * * Input values from request: * . arg_pvmeta: PV metadata from the found pv * . arg_pvid: pvid from arg_pvmeta (pvmeta/id) * . arg_device: device from arg_pvmeta (pvmeta/device) * . arg_vgmeta: VG metadata from the found pv (optional) * . arg_name: VG name from found pv (optional) * . arg_vgid: VG vgid from arg_vgmeta (optional) * * Search for existing mappings in hash tables: * . pvid_to_pvmeta (which produces pvid to device) * . device_to_pvid * . pvid_to_vgid * * Existing data from cache: * . old_pvmeta: result of pvid_to_pvmeta(arg_pvid) * . arg_device_lookup: result of old_pvmeta:pvmeta/device using arg_pvid * . arg_pvid_lookup: result of device_to_pvid(arg_device) * . arg_vgid_lookup: result of pvid_to_vgid(arg_pvid) * * When arg_pvid doesn't match arg_pvid_lookup: * . a new PV replaces a previous PV on arg_device * . prev_pvid_on_dev: set to arg_pvid_lookup, pvid of the prev PV * . prev_pvmeta_on_dev: result pvid_to_pvmeta(prev_pvid_on_dev) * . prev_vgid_on_dev: result of pvid_to_vgid(prev_pvid_on_dev) * * Old PV on old device * . no PV/device mappings have changed * . arg_pvid_lookup == arg_pvid && arg_device_lookup == arg_device * . arg_device was used to look up a PV and found a PV with * the same pvid as arg_pvid * . arg_pvid was used to look up a PV and found a PV on the * same device as arg_device * . new_pvmeta may be more recent than old_pvmeta * * New PV on new device * . add new mappings in hash tables * . !arg_pvid_lookup && !arg_device_lookup * . arg_device was used to look up a PV and found nothing * . arg_pvid was used to look up a PV and found nothing * * New PV on old device * . a new PV replaces a previous PV on a device * . arg_pvid_lookup != arg_pvid * . arg_device was used to look up a PV and found a PV with * a different pvid than arg_pvid * . replace existing mappings for arg_device and arg_pvid * . replace existing old_pvmeta with new_pvmeta * . remove arg_device association with prev PV (prev_pvid_on_dev) * . possibly remove prev PV (if arg_device was previously a duplicate) * * Old PV on new device * . a duplicate PV * . arg_device_lookup != arg_device * . arg_pvid was used to look up a PV, and found that the PV * has a different device than arg_device. */ static response pv_found(lvmetad_state *s, request r) { struct dm_config_node *arg_vgmeta = NULL; struct dm_config_node *arg_pvmeta = NULL; struct dm_config_tree *old_pvmeta = NULL; struct dm_config_tree *new_pvmeta = NULL; struct dm_config_tree *prev_pvmeta_on_dev = NULL; struct dm_config_tree *vgmeta = NULL; const char *arg_pvid = NULL; const char *arg_pvid_lookup = NULL; const char *new_pvid = NULL; char *new_pvid_dup = NULL; const char *arg_name = NULL; const char *arg_vgid = NULL; const char *arg_vgid_lookup = NULL; const char *prev_pvid_on_dev = NULL; const char *prev_vgid_on_dev = NULL; const char *vg_status = NULL; uint64_t arg_device = 0; uint64_t arg_device_lookup = 0; uint64_t new_device = 0; uint64_t old_device = 0; int arg_seqno = -1; int old_seqno = -1; int vg_status_seqno = -1; int changed = 0; /* * New input values. */ if (!(arg_pvmeta = dm_config_find_node(r.cft->root, "pvmeta"))) { ERROR(s, "Ignore PV without PV metadata"); return reply_fail("Ignore PV without PV metadata"); } if (!(arg_pvid = daemon_request_str(r, "pvmeta/id", NULL))) { ERROR(s, "Ignore PV without PV UUID"); return reply_fail("Ignore PV without PV UUID"); } if (!dm_config_get_uint64(arg_pvmeta, "pvmeta/device", &arg_device)) { ERROR(s, "Ignore PV without device pvid %s", arg_pvid); return reply_fail("Ignore PV without device"); } if ((arg_vgmeta = dm_config_find_node(r.cft->root, "metadata"))) { arg_name = daemon_request_str(r, "vgname", NULL); arg_vgid = daemon_request_str(r, "metadata/id", NULL); arg_seqno = daemon_request_int(r, "metadata/seqno", -1); if (!arg_name || !arg_vgid || (arg_seqno < 0)) ERROR(s, "Ignore VG metadata from PV %s", arg_pvid); if (!arg_name) return reply_fail("Ignore VG metadata from PV without VG name"); if (!arg_vgid) return reply_fail("Ignore VG metadata from PV without VG vgid"); if (arg_seqno < 0) return reply_fail("Ignore VG metadata from PV without VG seqno"); } /* Make a copy of the new pvmeta that can be inserted into cache. */ if (!(new_pvmeta = dm_config_create()) || !(new_pvmeta->root = dm_config_clone_node(new_pvmeta, arg_pvmeta, 0))) { ERROR(s, "pv_found out of memory for new pvmeta %s", arg_pvid); goto nomem; } /* * Existing (old) cache values. */ old_pvmeta = dm_hash_lookup(s->pvid_to_pvmeta, arg_pvid); if (old_pvmeta) dm_config_get_uint64(old_pvmeta->root, "pvmeta/device", &arg_device_lookup); arg_pvid_lookup = dm_hash_lookup_binary(s->device_to_pvid, &arg_device, sizeof(arg_device)); /* * Determine which of the four possible changes is happening * by comparing the existing/old and new values: * old PV, old device * new PV, new device * new PV, old device * old PV, new device */ if (arg_pvid_lookup && arg_device_lookup && (arg_device == arg_device_lookup) && !strcmp(arg_pvid_lookup, arg_pvid)) { /* * Old PV on old device (existing values unchanged) */ new_pvid = NULL; new_device = 0; DEBUGLOG(s, "pv_found pvid %s on device %" PRIu64 " matches existing", arg_pvid, arg_device); } else if (!arg_pvid_lookup && !arg_device_lookup) { /* * New PV on new device (no existing values) */ new_pvid = arg_pvid; new_device = arg_device; DEBUGLOG(s, "pv_found pvid %s on device %" PRIu64 " is new", arg_pvid, arg_device); } else if (arg_pvid_lookup && strcmp(arg_pvid_lookup, arg_pvid)) { /* * New PV on old device (existing device reused for new PV) */ new_pvid = arg_pvid; new_device = 0; prev_pvid_on_dev = arg_pvid_lookup; prev_pvmeta_on_dev = dm_hash_lookup(s->pvid_to_pvmeta, arg_pvid_lookup); prev_vgid_on_dev = dm_hash_lookup(s->pvid_to_vgid, arg_pvid_lookup); DEBUGLOG(s, "pv_found pvid %s vgid %s on device %" PRIu64 " previous pvid %s vgid %s", arg_pvid, arg_vgid ?: "none", arg_device, prev_pvid_on_dev, prev_vgid_on_dev ?: "none"); } else if (arg_device_lookup && (arg_device_lookup != arg_device)) { /* * Old PV on new device (existing PV on a new device, i.e. duplicate) */ new_device = arg_device; new_pvid = NULL; old_device = arg_device_lookup; DEBUGLOG(s, "pv_found pvid %s vgid %s on device %" PRIu64 " duplicate %" PRIu64, arg_pvid, arg_vgid ?: "none", arg_device, arg_device_lookup); } else { ERROR(s, "pv_found pvid %s vgid %s on device %" PRIu64 " unknown lookup %s %s %" PRIu64, arg_pvid, arg_vgid ?: "none", arg_device, arg_pvid_lookup ?: "none", arg_vgid_lookup ?: "none", arg_device_lookup); return reply_fail("Ignore PV for unknown state"); } /* * Make changes to hashes device_to_pvid and pvid_to_pvmeta for each case. */ if (!new_pvid && !new_device) { /* * Old PV on old device (unchanged) * . add new_pvmeta, replacing old_pvmeta */ if (compare_config(old_pvmeta->root, new_pvmeta->root)) changed |= 1; if (!dm_hash_insert(s->pvid_to_pvmeta, arg_pvid, new_pvmeta)) goto nomem_free1; } else if (new_pvid && new_device) { /* * New PV on new device (new entry) * . add new_device/new_pvid mapping * . add new_pvmeta */ changed |= 1; DEBUGLOG(s, "pv_found new entry device_to_pvid %" PRIu64 " to %s", new_device, new_pvid); if (!(new_pvid_dup = dm_strdup(new_pvid))) goto nomem_free1; if (!dm_hash_insert_binary(s->device_to_pvid, &new_device, sizeof(new_device), new_pvid_dup)) goto nomem_free2; if (!dm_hash_insert(s->pvid_to_pvmeta, new_pvid, new_pvmeta)) goto nomem_free1; } else if (new_pvid && !new_device) { /* * New PV on old device (existing device reused for new PV). * The previous PV on arg_device is replaced by the new one. * * Don't free prev_pvid or prev_vgid strings because they are * used at the end to check the VG metadata. */ changed |= 1; if (prev_pvmeta_on_dev) { DEBUGLOG(s, "pv_found new pvid device_to_pvid %" PRIu64 " to %s removes prev pvid %s", arg_device, new_pvid, prev_pvid_on_dev); dm_hash_remove(s->pvid_to_pvmeta, prev_pvid_on_dev); dm_config_destroy(prev_pvmeta_on_dev); prev_pvmeta_on_dev = NULL; /* removes arg_device/prev_pvid_on_dev mapping */ dm_hash_remove_binary(s->device_to_pvid, &arg_device, sizeof(arg_device)); /* * The new PV replacing the prev PV was copied from * another existing PV, creating a duplicate PV which * we ignore. */ if (dm_hash_lookup(s->pvid_to_pvmeta, new_pvid)) { DEBUGLOG(s, "pv_found ignore duplicate device %" PRIu64 " of existing PV for pvid %s", arg_device, arg_pvid); dm_config_destroy(new_pvmeta); /* device_to_pvid no longer references prev_pvid_lookup */ dm_free((void*)prev_pvid_on_dev); s->flags |= GLFL_DISABLE; s->flags |= GLFL_DISABLE_REASON_DUPLICATES; return reply_fail("Ignore duplicate PV"); } } if (!(new_pvid_dup = dm_strdup(new_pvid))) goto nomem_free1; if (!dm_hash_insert_binary(s->device_to_pvid, &arg_device, sizeof(arg_device), new_pvid_dup)) goto nomem_free2; if (!dm_hash_insert(s->pvid_to_pvmeta, new_pvid, new_pvmeta)) goto nomem_free1; } else if (new_device && !new_pvid) { /* * Old PV on new device (duplicate) * Ignore it. */ DEBUGLOG(s, "pv_found ignore duplicate device %" PRIu64 " of existing device %" PRIu64 " for pvid %s", new_device, old_device, arg_pvid); dm_config_destroy(new_pvmeta); s->flags |= GLFL_DISABLE; s->flags |= GLFL_DISABLE_REASON_DUPLICATES; return reply_fail("Ignore duplicate PV"); } if (old_pvmeta) dm_config_destroy(old_pvmeta); /* * Update VG metadata cache with arg_vgmeta from the PV, or * if the PV holds no VG metadata, then look up the vgid and * name of the VG so we can check if the VG is complete. */ if (arg_vgmeta) { DEBUGLOG(s, "pv_found pvid %s has VG %s %s seqno %d", arg_pvid, arg_name, arg_vgid, arg_seqno); if (!_update_metadata(s, arg_name, arg_vgid, arg_vgmeta, &old_seqno, arg_pvid)) { ERROR(s, "Cannot use VG metadata for %s %s from PV %s on %" PRIu64, arg_name, arg_vgid, arg_pvid, arg_device); } changed |= (old_seqno != arg_seqno); } else { arg_vgid = dm_hash_lookup(s->pvid_to_vgid, arg_pvid); if (arg_vgid) { arg_name = dm_hash_lookup(s->vgid_to_vgname, arg_vgid); } } /* * Check if the VG is complete (all PVs have been found) because * the reply indicates if the the VG is complete or partial. * The "vgmeta" from dm_hash_lookup will be a copy of arg_vgmeta that * was cloned and added to the cache by update_metadata. */ if (!arg_vgid || !strcmp(arg_vgid, "#orphan")) { DEBUGLOG(s, "pv_found pvid %s on %" PRIu64 " not in VG %s", arg_pvid, arg_device, arg_vgid ?: ""); vg_status = "orphan"; goto prev_vals; } if (!(vgmeta = dm_hash_lookup(s->vgid_to_metadata, arg_vgid))) { ERROR(s, "pv_found %s on %" PRIu64 " vgid %s no VG metadata found", arg_pvid, arg_device, arg_vgid); } else { vg_status = _vg_is_complete(s, vgmeta) ? "complete" : "partial"; vg_status_seqno = dm_config_find_int(vgmeta->root, "metadata/seqno", -1); } prev_vals: /* * If the device previously held a different VG (prev_vgid_on_dev), * then that VG should be removed if no devices are left for it. * * The mapping from the device's previous pvid to the previous vgid * is removed. */ if (prev_pvid_on_dev || prev_vgid_on_dev) { DEBUGLOG(s, "pv_found pvid %s on %" PRIu64 " had prev pvid %s prev vgid %s", arg_pvid, arg_device, prev_pvid_on_dev ?: "none", prev_vgid_on_dev ?: "none"); } if (prev_vgid_on_dev) { char *tmp_vgid; if (!arg_vgid || strcmp(arg_vgid, prev_vgid_on_dev)) { tmp_vgid = dm_strdup(prev_vgid_on_dev); /* vg_remove_if_missing will clear and free the string pointed to by prev_vgid_on_dev. */ vg_remove_if_missing(s, tmp_vgid, 1); dm_free(tmp_vgid); } /* vg_remove_if_missing may have remapped prev_pvid_on_dev to orphan */ if ((tmp_vgid = dm_hash_lookup(s->pvid_to_vgid, prev_pvid_on_dev))) { dm_hash_remove(s->pvid_to_vgid, prev_pvid_on_dev); dm_free(tmp_vgid); } } /* This was unhashed from device_to_pvid above. */ dm_free((void *)prev_pvid_on_dev); return daemon_reply_simple("OK", "status = %s", vg_status, "changed = " FMTd64, (int64_t) changed, "vgid = %s", arg_vgid ? arg_vgid : "#orphan", "vgname = %s", arg_name ? arg_name : "#orphan", "seqno_before = " FMTd64, (int64_t) old_seqno, "seqno_after = " FMTd64, (int64_t) vg_status_seqno, NULL); nomem_free2: dm_free(new_pvid_dup); nomem_free1: dm_config_destroy(new_pvmeta); nomem: ERROR(s, "pv_found %s is out of memory.", arg_pvid); ERROR(s, "lvmetad could not be updated is aborting."); reply_fail("out of memory"); exit(EXIT_FAILURE); } static response vg_clear_outdated_pvs(lvmetad_state *s, request r) { struct dm_config_tree *outdated_pvs; const char *vgid = daemon_request_str(r, "vgid", NULL); if (!vgid) return reply_fail("need VG UUID"); DEBUGLOG(s, "vg_clear_outdated_pvs vgid %s", vgid); if ((outdated_pvs = dm_hash_lookup(s->vgid_to_outdated_pvs, vgid))) { dm_config_destroy(outdated_pvs); dm_hash_remove(s->vgid_to_outdated_pvs, vgid); } return daemon_reply_simple("OK", NULL); } static void vg_info_update(lvmetad_state *s, const char *uuid, struct dm_config_node *metadata) { struct vg_info *info; int64_t cache_version; cache_version = dm_config_find_int64(metadata, "metadata/seqno", -1); if (cache_version == -1) return; info = (struct vg_info *) dm_hash_lookup(s->vgid_to_info, uuid); if (!info) return; if (cache_version >= info->external_version) info->flags &= ~VGFL_INVALID; } static response vg_update(lvmetad_state *s, request r) { struct dm_config_node *metadata = dm_config_find_node(r.cft->root, "metadata"); const char *vgid = daemon_request_str(r, "metadata/id", NULL); const char *vgname = daemon_request_str(r, "vgname", NULL); DEBUGLOG(s, "vg_update vgid %s name %s", vgid ?: "none", vgname ?: "none"); if (metadata) { if (!vgid) { ERROR(s, "vg_update failed: need VG UUID"); reply_fail("vg_update: need VG UUID"); goto fail; } if (!vgname) { ERROR(s, "vg_update failed: need VG name"); reply_fail("vg_update: need VG name"); goto fail; } if (daemon_request_int(r, "metadata/seqno", -1) < 0) { ERROR(s, "vg_update failed: need VG seqno"); reply_fail("vg_update: need VG seqno"); goto fail; } /* TODO defer metadata update here; add a separate vg_commit * call; if client does not commit, die */ if (!_update_metadata(s, vgname, vgid, metadata, NULL, NULL)) { ERROR(s, "vg_update failed: metadata update failed"); reply_fail("vg_update: failed metadata update"); goto fail; } vg_info_update(s, vgid, metadata); } return daemon_reply_simple("OK", NULL); fail: ERROR(s, "lvmetad could not be updated is aborting."); exit(EXIT_FAILURE); } static response vg_remove(lvmetad_state *s, request r) { const char *vgid = daemon_request_str(r, "uuid", NULL); if (!vgid) return reply_fail("need VG UUID"); DEBUGLOG(s, "vg_remove: %s", vgid); remove_metadata(s, vgid, 1); return daemon_reply_simple("OK", NULL); } /* * Whether lvmetad is disabled is determined only by the single * flag GLFL_DISABLE. The REASON flags are only explanatory * additions to GLFL_DISABLE, and do not control the disabled state. * The REASON flags can accumulate if multiple reasons exist for * the disabled flag. When clearing GLFL_DISABLE, all REASON flags * are cleared. The caller clearing GLFL_DISABLE should only do so * when all the reasons for it have gone. */ static response set_global_info(lvmetad_state *s, request r) { const int global_invalid = daemon_request_int(r, "global_invalid", -1); const int global_disable = daemon_request_int(r, "global_disable", -1); const char *reason; uint32_t reason_flags = 0; if ((reason = daemon_request_str(r, "disable_reason", NULL))) { if (strstr(reason, LVMETAD_DISABLE_REASON_DIRECT)) reason_flags |= GLFL_DISABLE_REASON_DIRECT; if (strstr(reason, LVMETAD_DISABLE_REASON_REPAIR)) reason_flags |= GLFL_DISABLE_REASON_REPAIR; if (strstr(reason, LVMETAD_DISABLE_REASON_LVM1)) reason_flags |= GLFL_DISABLE_REASON_LVM1; if (strstr(reason, LVMETAD_DISABLE_REASON_DUPLICATES)) reason_flags |= GLFL_DISABLE_REASON_DUPLICATES; if (strstr(reason, LVMETAD_DISABLE_REASON_VGRESTORE)) reason_flags |= GLFL_DISABLE_REASON_VGRESTORE; } if (global_invalid != -1) { DEBUGLOG(s, "set global info invalid from %d to %d", (s->flags & GLFL_INVALID) ? 1 : 0, global_invalid); } if (global_disable != -1) { DEBUGLOG(s, "set global info disable from %d to %d %s", (s->flags & GLFL_DISABLE) ? 1 : 0, global_disable, reason ? reason : ""); } if (global_invalid == 1) s->flags |= GLFL_INVALID; else if (global_invalid == 0) s->flags &= ~GLFL_INVALID; if (global_disable == 1) { s->flags |= GLFL_DISABLE; s->flags |= reason_flags; } else if (global_disable == 0) { s->flags &= ~GLFL_DISABLE; s->flags &= ~GLFL_DISABLE_REASON_ALL; } return daemon_reply_simple("OK", NULL); } #define REASON_BUF_SIZE 64 /* * Save the time when "updating" begins, and the config setting for how long * the update is allowed to take. Before returning "updating" as the token * value in get_global_info, check if the update has exceeded the max allowed * time. If so, then return "none" as the current token value (i.e. * uninitialized), so that the command will repopulate our cache. * * This automatically clears a stuck update, where a command started to update * the cache and then failed, leaving the token set to "update in progress". */ static response get_global_info(lvmetad_state *s, request r) { char reason[REASON_BUF_SIZE]; char flag_str[64]; int pid; /* This buffer should be large enough to hold all the possible reasons. */ memset(reason, 0, sizeof(reason)); pid = (int)daemon_request_int(r, "pid", 0); if (s->flags & GLFL_DISABLE) { snprintf(reason, REASON_BUF_SIZE - 1, "%s%s%s%s%s", (s->flags & GLFL_DISABLE_REASON_DIRECT) ? LVMETAD_DISABLE_REASON_DIRECT "," : "", (s->flags & GLFL_DISABLE_REASON_REPAIR) ? LVMETAD_DISABLE_REASON_REPAIR "," : "", (s->flags & GLFL_DISABLE_REASON_LVM1) ? LVMETAD_DISABLE_REASON_LVM1 "," : "", (s->flags & GLFL_DISABLE_REASON_DUPLICATES) ? LVMETAD_DISABLE_REASON_DUPLICATES "," : "", (s->flags & GLFL_DISABLE_REASON_VGRESTORE) ? LVMETAD_DISABLE_REASON_VGRESTORE "," : ""); } if (!reason[0]) strcpy(reason, "none"); /* * If the current update has timed out, then return * token of "none" which means "uninitialized" so that * the caller will repopulate lvmetad. */ if (s->update_begin && s->update_timeout) { if (_monotonic_seconds() - s->update_begin >= s->update_timeout) { DEBUGLOG(s, "global info cancel update after timeout %d len %d begin %llu pid %d cmd %s", s->update_timeout, (int)(_monotonic_seconds() - s->update_begin), (unsigned long long)s->update_begin, s->update_pid, s->update_cmd); memset(s->token, 0, sizeof(s->token)); s->update_begin = 0; s->update_timeout = 0; s->update_pid = 0; memset(s->update_cmd, 0, CMD_NAME_SIZE); } } memset(flag_str, 0, sizeof(flag_str)); if (s->flags & GLFL_INVALID) strcat(flag_str, "Invalid"); if (s->flags & GLFL_DISABLE) strcat(flag_str, "Disable"); if (!flag_str[0]) strcat(flag_str, "none"); DEBUGLOG(s, "%d global info flags %s reason %s token %s update_pid %d", pid, flag_str, reason, s->token[0] ? s->token : "none", s->update_pid); return daemon_reply_simple("OK", "global_invalid = " FMTd64, (int64_t)((s->flags & GLFL_INVALID) ? 1 : 0), "global_disable = " FMTd64, (int64_t)((s->flags & GLFL_DISABLE) ? 1 : 0), "disable_reason = %s", reason, "daemon_pid = " FMTd64, (int64_t)getpid(), "token = %s", s->token[0] ? s->token : "none", "update_cmd = %s", s->update_cmd, "update_pid = " FMTd64, (int64_t)s->update_pid, "update_begin = " FMTd64, (int64_t)s->update_begin, "update_timeout = " FMTd64, (int64_t)s->update_timeout, NULL); } static response set_vg_info(lvmetad_state *s, request r) { struct dm_config_tree *vg; struct vg_info *info; const char *name = NULL; const char *uuid = NULL; const int64_t new_version = daemon_request_int(r, "version", -1); int64_t cache_version = -1; if (new_version == -1) goto out; if (!(uuid = daemon_request_str(r, "uuid", NULL))) goto use_name; if ((vg = dm_hash_lookup(s->vgid_to_metadata, uuid))) goto vers; use_name: if (!(name = daemon_request_str(r, "name", NULL))) goto out; if (!(uuid = dm_hash_lookup(s->vgname_to_vgid, name))) goto out; /* * FIXME: if we only have the name and multiple VGs have that name, * then invalidate each of them. */ if (!(vg = dm_hash_lookup(s->vgid_to_metadata, uuid))) goto out; vers: if (!new_version) goto inval; cache_version = dm_config_find_int64(vg->root, "metadata/seqno", -1); if (cache_version != -1 && new_version != -1 && cache_version >= new_version) goto out; inval: DEBUGLOG(s, "set info VG name %s uuid %s cache_version %d new_version %d", name ?: "none", uuid ?: "none", (int)cache_version, (int)new_version); info = dm_hash_lookup(s->vgid_to_info, uuid); if (!info) { info = malloc(sizeof(struct vg_info)); if (!info) goto bad; memset(info, 0, sizeof(struct vg_info)); if (!dm_hash_insert(s->vgid_to_info, uuid, (void*)info)) goto bad; } info->external_version = new_version; info->flags |= VGFL_INVALID; out: return daemon_reply_simple("OK", NULL); bad: return reply_fail("out of memory"); } static void _dump_cft(struct buffer *buf, struct dm_hash_table *ht, const char *key_addr) { struct dm_hash_node *n; dm_hash_iterate(n, ht) { struct dm_config_tree *cft = dm_hash_get_data(ht, n); const char *key_backup = cft->root->key; cft->root->key = dm_config_find_str(cft->root, key_addr, "unknown"); (void) dm_config_write_node(cft->root, buffer_line, buf); cft->root->key = key_backup; } } static void _dump_pairs(struct buffer *buf, struct dm_hash_table *ht, const char *name, int int_key) { char *append; struct dm_hash_node *n; buffer_append(buf, name); buffer_append(buf, " {\n"); dm_hash_iterate(n, ht) { const char *key = dm_hash_get_key(ht, n), *val = dm_hash_get_data(ht, n); if (int_key) (void) dm_asprintf(&append, " %d = \"%s\"\n", *(const int*)key, val); else (void) dm_asprintf(&append, " %s = \"%s\"\n", key, val); if (append) buffer_append(buf, append); dm_free(append); } buffer_append(buf, "}\n"); } static void _dump_info_version(struct buffer *buf, struct dm_hash_table *ht, const char *name, int int_key) { char *append; struct dm_hash_node *n = dm_hash_get_first(ht); struct vg_info *info; buffer_append(buf, name); buffer_append(buf, " {\n"); while (n) { const char *key = dm_hash_get_key(ht, n); info = dm_hash_get_data(ht, n); (void) dm_asprintf(&append, " %s = %lld\n", key, (long long)info->external_version); if (append) buffer_append(buf, append); dm_free(append); n = dm_hash_get_next(ht, n); } buffer_append(buf, "}\n"); } static void _dump_info_flags(struct buffer *buf, struct dm_hash_table *ht, const char *name, int int_key) { char *append; struct dm_hash_node *n = dm_hash_get_first(ht); struct vg_info *info; buffer_append(buf, name); buffer_append(buf, " {\n"); while (n) { const char *key = dm_hash_get_key(ht, n); info = dm_hash_get_data(ht, n); (void) dm_asprintf(&append, " %s = %llx\n", key, (long long)info->flags); if (append) buffer_append(buf, append); dm_free(append); n = dm_hash_get_next(ht, n); } buffer_append(buf, "}\n"); } static response dump(lvmetad_state *s) { response res = { 0 }; struct buffer *b = &res.buffer; buffer_init(b); /* Lock everything so that we get a consistent dump. */ buffer_append(b, "# VG METADATA\n\n"); _dump_cft(b, s->vgid_to_metadata, "metadata/id"); buffer_append(b, "\n# PV METADATA\n\n"); _dump_cft(b, s->pvid_to_pvmeta, "pvmeta/id"); buffer_append(b, "\n# VGID to VGNAME mapping\n\n"); _dump_pairs(b, s->vgid_to_vgname, "vgid_to_vgname", 0); buffer_append(b, "\n# VGID to outdated PVs mapping\n\n"); _dump_cft(b, s->vgid_to_outdated_pvs, "outdated_pvs/vgid"); buffer_append(b, "\n# VGNAME to VGID mapping\n\n"); _dump_pairs(b, s->vgname_to_vgid, "vgname_to_vgid", 0); buffer_append(b, "\n# PVID to VGID mapping\n\n"); _dump_pairs(b, s->pvid_to_vgid, "pvid_to_vgid", 0); buffer_append(b, "\n# DEVICE to PVID mapping\n\n"); _dump_pairs(b, s->device_to_pvid, "device_to_pvid", 1); buffer_append(b, "\n# VGID to INFO version mapping\n\n"); _dump_info_version(b, s->vgid_to_info, "vgid_to_info", 0); buffer_append(b, "\n# VGID to INFO flags mapping\n\n"); _dump_info_flags(b, s->vgid_to_info, "vgid_to_info", 0); return res; } static response handler(daemon_state s, client_handle h, request r) { response res; lvmetad_state *state = s.private; char prev_token[128] = { 0 }; const char *rq; const char *token; const char *cmd; int prev_in_progress, this_in_progress; int update_timeout; int pid; int cache_lock = 0; int info_lock = 0; rq = daemon_request_str(r, "request", "NONE"); token = daemon_request_str(r, "token", "NONE"); pid = (int)daemon_request_int(r, "pid", 0); cmd = daemon_request_str(r, "cmd", "NONE"); update_timeout = (int)daemon_request_int(r, "update_timeout", 0); pthread_mutex_lock(&state->token_lock); /* * token_update: start populating the cache, i.e. a full update. * To populate the lvmetad cache, a command does: * * - token_update, setting token to "update in progress" * (further requests during the update continue using * this same "update in progress" token) * - pv_clear_all, to clear the current cache * - pv_gone, for each PV * - pv_found, for each PV to populate the cache * - token_update, setting token to filter hash */ if (!strcmp(rq, "token_update")) { prev_in_progress = !strcmp(state->token, LVMETAD_TOKEN_UPDATE_IN_PROGRESS); this_in_progress = !strcmp(token, LVMETAD_TOKEN_UPDATE_IN_PROGRESS); if (!prev_in_progress && this_in_progress) { /* New update is starting (filter token is replaced by update token) */ memcpy(prev_token, state->token, 128); strncpy(state->token, token, 128); state->token[127] = 0; state->update_begin = _monotonic_seconds(); state->update_timeout = update_timeout; state->update_pid = pid; strncpy(state->update_cmd, cmd, CMD_NAME_SIZE - 1); DEBUGLOG(state, "token_update begin %llu timeout %d pid %d cmd %s", (unsigned long long)state->update_begin, state->update_timeout, state->update_pid, state->update_cmd); } else if (prev_in_progress && this_in_progress) { /* Current update is cancelled and replaced by a new update */ DEBUGLOG(state, "token_update replacing pid %d begin %llu len %d cmd %s", state->update_pid, (unsigned long long)state->update_begin, (int)(_monotonic_seconds() - state->update_begin), state->update_cmd); memcpy(prev_token, state->token, 128); strncpy(state->token, token, 128); state->token[127] = 0; state->update_begin = _monotonic_seconds(); state->update_timeout = update_timeout; state->update_pid = pid; strncpy(state->update_cmd, cmd, CMD_NAME_SIZE - 1); DEBUGLOG(state, "token_update begin %llu timeout %d pid %d cmd %s", (unsigned long long)state->update_begin, state->update_timeout, state->update_pid, state->update_cmd); } else if (prev_in_progress && !this_in_progress) { /* Update is finished, update token is replaced by filter token */ if (state->update_pid != pid) { /* If a pid doing update was cancelled, ignore its token update at the end. */ DEBUGLOG(state, "token_update ignored from cancelled update pid %d", pid); pthread_mutex_unlock(&state->token_lock); return daemon_reply_simple("token_mismatch", "expected = %s", state->token, "received = %s", token, "update_pid = " FMTd64, (int64_t)state->update_pid, "reason = %s", "another command has populated the cache", NULL); } DEBUGLOG(state, "token_update end len %d pid %d new token %s", (int)(_monotonic_seconds() - state->update_begin), state->update_pid, token); memcpy(prev_token, state->token, 128); strncpy(state->token, token, 128); state->token[127] = 0; state->update_begin = 0; state->update_timeout = 0; state->update_pid = 0; memset(state->update_cmd, 0, CMD_NAME_SIZE); } pthread_mutex_unlock(&state->token_lock); return daemon_reply_simple("OK", "prev_token = %s", prev_token, "update_pid = " FMTd64, (int64_t)state->update_pid, NULL); } if (strcmp(token, state->token) && strcmp(rq, "dump") && strcmp(token, "skip")) { pthread_mutex_unlock(&state->token_lock); DEBUGLOG(state, "token_mismatch current \"%s\" got \"%s\" from pid %d cmd %s", state->token, token, pid, cmd ?: "none"); return daemon_reply_simple("token_mismatch", "expected = %s", state->token, "received = %s", token, "update_pid = " FMTd64, (int64_t)state->update_pid, "reason = %s", "another command has populated the cache", NULL); } /* If a pid doing update was cancelled, ignore its update messages. */ if (!strcmp(token, LVMETAD_TOKEN_UPDATE_IN_PROGRESS) && state->update_pid && pid && (state->update_pid != pid)) { pthread_mutex_unlock(&state->token_lock); DEBUGLOG(state, "token_mismatch ignore update from pid %d current update pid %d", pid, state->update_pid); return daemon_reply_simple("token_mismatch", "expected = %s", state->token, "received = %s", token, "update_pid = " FMTd64, (int64_t)state->update_pid, "reason = %s", "another command has populated the lvmetad cache", NULL); } pthread_mutex_unlock(&state->token_lock); if (!strcmp(rq, "pv_found") || !strcmp(rq, "pv_gone") || !strcmp(rq, "vg_update") || !strcmp(rq, "vg_remove") || !strcmp(rq, "set_vg_info") || !strcmp(rq, "pv_clear_all") || !strcmp(rq, "vg_clear_outdated_pvs")) { pthread_rwlock_wrlock(&state->cache_lock); cache_lock = 1; goto do_rq; } if (!strcmp(rq, "pv_lookup") || !strcmp(rq, "vg_lookup") || !strcmp(rq, "pv_list") || !strcmp(rq, "vg_list") || !strcmp(rq, "dump")) { pthread_rwlock_rdlock(&state->cache_lock); cache_lock = 1; goto do_rq; } if (!strcmp(rq, "set_global_info") || !strcmp(rq, "get_global_info")) { pthread_mutex_lock(&state->info_lock); info_lock = 1; goto do_rq; } do_rq: if (!strcmp(rq, "pv_found")) res = pv_found(state, r); else if (!strcmp(rq, "pv_gone")) res = pv_gone(state, r); else if (!strcmp(rq, "pv_clear_all")) res = pv_clear_all(state, r); else if (!strcmp(rq, "pv_lookup")) res = pv_lookup(state, r); else if (!strcmp(rq, "vg_update")) res = vg_update(state, r); else if (!strcmp(rq, "vg_clear_outdated_pvs")) res = vg_clear_outdated_pvs(state, r); else if (!strcmp(rq, "vg_remove")) res = vg_remove(state, r); else if (!strcmp(rq, "vg_lookup")) res = vg_lookup(state, r); else if (!strcmp(rq, "pv_list")) res = pv_list(state, r); else if (!strcmp(rq, "vg_list")) res = vg_list(state, r); else if (!strcmp(rq, "set_global_info")) res = set_global_info(state, r); else if (!strcmp(rq, "get_global_info")) res = get_global_info(state, r); else if (!strcmp(rq, "set_vg_info")) res = set_vg_info(state, r); else if (!strcmp(rq, "dump")) res = dump(state); else res = reply_fail("request not implemented"); if (cache_lock) pthread_rwlock_unlock(&state->cache_lock); if (info_lock) pthread_mutex_unlock(&state->info_lock); return res; } static int init(daemon_state *s) { lvmetad_state *ls = s->private; ls->log = s->log; pthread_mutex_init(&ls->token_lock, NULL); pthread_mutex_init(&ls->info_lock, NULL); pthread_rwlock_init(&ls->cache_lock, NULL); create_metadata_hashes(ls); ls->token[0] = 0; /* Set up stderr logging depending on the -l option. */ if (!daemon_log_parse(ls->log, DAEMON_LOG_OUTLET_STDERR, ls->log_config, 1)) return 0; DEBUGLOG(s, "initialised state: vgid_to_metadata = %p", ls->vgid_to_metadata); if (!ls->pvid_to_vgid || !ls->vgid_to_metadata) return 0; /* if (ls->initial_registrations) _process_initial_registrations(ds->initial_registrations); */ if (ls->idle) ls->idle->is_idle = 1; return 1; } static int fini(daemon_state *s) { lvmetad_state *ls = s->private; DEBUGLOG(s, "fini"); destroy_metadata_hashes(ls); return 1; } static int process_timeout_arg(const char *str, unsigned *max_timeouts) { char *endptr; unsigned long l; errno = 0; l = strtoul(str, &endptr, 10); if (errno || *endptr || l >= UINT_MAX) return 0; *max_timeouts = (unsigned) l; return 1; } static void usage(const char *prog, FILE *file) { fprintf(file, "Usage:\n" "%s [-V] [-h] [-f] [-l level[,level ...]] [-s path] [-t secs]\n\n" " -V Show version of lvmetad\n" " -h Show this help information\n" " -f Don't fork, run in the foreground\n" " -l Logging message levels (all,fatal,error,warn,info,wire,debug)\n" " -p Set path to the pidfile\n" " -s Set path to the socket to listen on\n" " -t Time to wait in seconds before shutdown on idle (missing or 0 = inifinite)\n\n", prog); } int main(int argc, char *argv[]) { signed char opt; struct timeval timeout; daemon_idle di = { .ptimeout = &timeout }; lvmetad_state ls = { .log_config = "" }; daemon_state s = { .daemon_fini = fini, .daemon_init = init, .handler = handler, .name = "lvmetad", .pidfile = getenv("LVM_LVMETAD_PIDFILE") ? : LVMETAD_PIDFILE, .private = &ls, .protocol = "lvmetad", .protocol_version = 1, .socket_path = getenv("LVM_LVMETAD_SOCKET") ? : LVMETAD_SOCKET, }; // use getopt_long while ((opt = getopt(argc, argv, "?fhVl:p:s:t:")) != EOF) { switch (opt) { case 'h': usage(argv[0], stdout); exit(0); case '?': usage(argv[0], stderr); exit(0); case 'f': s.foreground = 1; break; case 'l': ls.log_config = optarg; break; case 'p': s.pidfile = optarg; break; case 's': // --socket s.socket_path = optarg; break; case 't': if (!process_timeout_arg(optarg, &di.max_timeouts)) { fprintf(stderr, "Invalid value of timeout parameter.\n"); exit(EXIT_FAILURE); } /* 0 equals to wait indefinitely */ if (di.max_timeouts) s.idle = ls.idle = &di; break; case 'V': printf("lvmetad version: " LVM_VERSION "\n"); exit(1); } } daemon_start(s); return 0; } LVM2.2.02.176/daemons/lvmdbusd/0000755000000000000120000000000013176752421014572 5ustar rootwheelLVM2.2.02.176/daemons/lvmdbusd/Makefile.in0000644000000000000120000000316413176752421016643 0ustar rootwheel# # Copyright (C) 2016 Red Hat, Inc. All rights reserved. # # This file is part of LVM2. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA srcdir = @srcdir@ top_srcdir = @top_srcdir@ top_builddir = @top_builddir@ lvmdbusdir = $(python3dir)/lvmdbusd LVMDBUS_SRCDIR_FILES = \ automatedproperties.py \ background.py \ cfg.py \ cmdhandler.py \ fetch.py \ __init__.py \ job.py \ loader.py \ lvmdb.py \ main.py \ lvm_shell_proxy.py \ lv.py \ manager.py \ objectmanager.py \ pv.py \ request.py \ state.py \ udevwatch.py \ utils.py \ vg.py LVMDBUS_BUILDDIR_FILES = \ path.py LVMDBUSD = $(srcdir)/lvmdbusd include $(top_builddir)/make.tmpl .PHONY: install_lvmdbusd install_lvmdbusd: $(INSTALL_DIR) $(sbindir) $(INSTALL_SCRIPT) $(LVMDBUSD) $(sbindir) $(INSTALL_DIR) $(DESTDIR)$(lvmdbusdir) (cd $(srcdir); $(INSTALL_DATA) $(LVMDBUS_SRCDIR_FILES) $(DESTDIR)$(lvmdbusdir)) $(INSTALL_DATA) $(LVMDBUS_BUILDDIR_FILES) $(DESTDIR)$(lvmdbusdir) PYTHON=$(PYTHON3) $(PYCOMPILE) --destdir "$(DESTDIR)" --basedir "$(lvmdbusdir)" $(LVMDBUS_SRCDIR_FILES) $(LVMDBUS_BUILDDIR_FILES) $(CHMOD) 755 $(DESTDIR)$(lvmdbusdir)/__pycache__ $(CHMOD) 444 $(DESTDIR)$(lvmdbusdir)/__pycache__/*.py[co] install_lvm2: install_lvmdbusd install: install_lvm2 DISTCLEAN_TARGETS+= \ $(LVMDBUS_BUILDDIR_FILES) LVM2.2.02.176/daemons/lvmdbusd/lv.py0000644000000000000120000006121313176752421015570 0ustar rootwheel# Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . from .automatedproperties import AutomatedProperties from . import utils from .utils import vg_obj_path_generate import dbus from . import cmdhandler from . import cfg from .cfg import LV_INTERFACE, THIN_POOL_INTERFACE, SNAPSHOT_INTERFACE, \ LV_COMMON_INTERFACE, CACHE_POOL_INTERFACE, LV_CACHED from .request import RequestEntry from .utils import n, n32 from .loader import common from .state import State from . import background from .utils import round_size, mt_remove_dbus_objects from .job import JobState # Try and build a key for a LV, so that we sort the LVs with least dependencies # first. This may be error prone because of the flexibility LVM # provides and what you can stack. def get_key(i): name = i['lv_name'] parent = i['lv_parent'] pool = i['pool_lv'] a1 = "" a2 = "" if name[0] == '[': a1 = '#' # We have a parent if parent: # Check if parent is hidden if parent[0] == '[': a2 = '##' else: a2 = '#' # If a LV has a pool, then it should be sorted/loaded after the pool # lv, unless it's a hidden too, then after other hidden, but before visible if pool: if pool[0] != '[': a2 += '~' else: a1 = '$' + a1 return "%s%s%s" % (a1, a2, name) # noinspection PyUnusedLocal def lvs_state_retrieve(selection, cache_refresh=True): rc = [] if cache_refresh: cfg.db.refresh() # When building up the model, it's best to process LVs with the least # dependencies to those that are dependant upon other LVs. Otherwise, when # we are trying to gather information we could be in a position where we # don't have information available yet. lvs = sorted(cfg.db.fetch_lvs(selection), key=get_key) for l in lvs: rc.append(LvState( l['lv_uuid'], l['lv_name'], l['lv_path'], n(l['lv_size']), l['vg_name'], l['vg_uuid'], l['pool_lv_uuid'], l['pool_lv'], l['origin_uuid'], l['origin'], n32(l['data_percent']), l['lv_attr'], l['lv_tags'], l['lv_active'], l['data_lv'], l['metadata_lv'], l['segtype'], l['lv_role'], l['lv_layout'], n32(l['snap_percent']), n32(l['metadata_percent']), n32(l['copy_percent']), n32(l['sync_percent']), n(l['lv_metadata_size']), l['move_pv'], l['move_pv_uuid'])) return rc def load_lvs(lv_name=None, object_path=None, refresh=False, emit_signal=False, cache_refresh=True): # noinspection PyUnresolvedReferences return common( lvs_state_retrieve, (LvCommon, Lv, LvThinPool, LvSnapShot), lv_name, object_path, refresh, emit_signal, cache_refresh) # noinspection PyPep8Naming,PyUnresolvedReferences,PyUnusedLocal class LvState(State): @staticmethod def _pv_devices(uuid): rc = [] for pv in sorted(cfg.db.lv_contained_pv(uuid)): (pv_uuid, pv_name, pv_segs) = pv pv_obj = cfg.om.get_object_path_by_uuid_lvm_id(pv_uuid, pv_name) segs_decorate = [] for i in pv_segs: segs_decorate.append((dbus.UInt64(i[0]), dbus.UInt64(i[1]), dbus.String(i[2]))) rc.append((dbus.ObjectPath(pv_obj), segs_decorate)) return dbus.Array(rc, signature="(oa(tts))") def vg_name_lookup(self): return cfg.om.get_object_by_path(self.Vg).Name @property def lvm_id(self): return "%s/%s" % (self.vg_name_lookup(), self.Name) def identifiers(self): return (self.Uuid, self.lvm_id) def _get_hidden_lv(self): rc = dbus.Array([], "o") vg_name = self.vg_name_lookup() for l in cfg.db.hidden_lvs(self.Uuid): full_name = "%s/%s" % (vg_name, l[1]) op = cfg.om.get_object_path_by_uuid_lvm_id(l[0], full_name) assert op rc.append(dbus.ObjectPath(op)) return rc def __init__(self, Uuid, Name, Path, SizeBytes, vg_name, vg_uuid, pool_lv_uuid, PoolLv, origin_uuid, OriginLv, DataPercent, Attr, Tags, active, data_lv, metadata_lv, segtypes, role, layout, SnapPercent, MetaDataPercent, CopyPercent, SyncPercent, MetaDataSizeBytes, move_pv, move_pv_uuid): utils.init_class_from_arguments(self) # The segtypes is possibly an array with potentially dupes or a single # value self._segs = dbus.Array([], signature='s') if not isinstance(segtypes, list): self._segs.append(dbus.String(segtypes)) else: self._segs.extend([dbus.String(x) for x in set(segtypes)]) self.Vg = cfg.om.get_object_path_by_uuid_lvm_id( vg_uuid, vg_name, vg_obj_path_generate) self.Devices = LvState._pv_devices(self.Uuid) if PoolLv: gen = utils.lv_object_path_method(Name, (Attr, layout, role)) self.PoolLv = cfg.om.get_object_path_by_uuid_lvm_id( pool_lv_uuid, '%s/%s' % (vg_name, PoolLv), gen) else: self.PoolLv = '/' if OriginLv: self.OriginLv = \ cfg.om.get_object_path_by_uuid_lvm_id( origin_uuid, '%s/%s' % (vg_name, OriginLv), vg_obj_path_generate) else: self.OriginLv = '/' self.HiddenLvs = self._get_hidden_lv() @property def SegType(self): return self._segs def _object_path_create(self): return utils.lv_object_path_method( self.Name, (self.Attr, self.layout, self.role)) def _object_type_create(self): if self.Attr[0] == 't': return LvThinPool elif self.Attr[0] == 'C': if 'pool' in self.layout: return LvCachePool else: return LvCacheLv elif self.Name[0] == '[': return LvCommon elif self.OriginLv != '/': return LvSnapShot else: return Lv def create_dbus_object(self, path): if not path: path = cfg.om.get_object_path_by_uuid_lvm_id( self.Uuid, self.lvm_id, self._object_path_create()) obj_ctor = self._object_type_create() return obj_ctor(path, self) def creation_signature(self): klass = self._object_type_create() path_method = self._object_path_create() return (klass, path_method) # noinspection PyPep8Naming @utils.dbus_property(LV_COMMON_INTERFACE, 'Uuid', 's') @utils.dbus_property(LV_COMMON_INTERFACE, 'Name', 's') @utils.dbus_property(LV_COMMON_INTERFACE, 'Path', 's') @utils.dbus_property(LV_COMMON_INTERFACE, 'SizeBytes', 't') @utils.dbus_property(LV_COMMON_INTERFACE, 'SegType', 'as') @utils.dbus_property(LV_COMMON_INTERFACE, 'Vg', 'o') @utils.dbus_property(LV_COMMON_INTERFACE, 'OriginLv', 'o') @utils.dbus_property(LV_COMMON_INTERFACE, 'PoolLv', 'o') @utils.dbus_property(LV_COMMON_INTERFACE, 'Devices', "a(oa(tts))") @utils.dbus_property(LV_COMMON_INTERFACE, 'HiddenLvs', "ao") @utils.dbus_property(LV_COMMON_INTERFACE, 'Attr', 's') @utils.dbus_property(LV_COMMON_INTERFACE, 'DataPercent', 'u') @utils.dbus_property(LV_COMMON_INTERFACE, 'SnapPercent', 'u') @utils.dbus_property(LV_COMMON_INTERFACE, 'DataPercent', 'u') @utils.dbus_property(LV_COMMON_INTERFACE, 'MetaDataPercent', 'u') @utils.dbus_property(LV_COMMON_INTERFACE, 'CopyPercent', 'u') @utils.dbus_property(LV_COMMON_INTERFACE, 'SyncPercent', 'u') @utils.dbus_property(LV_COMMON_INTERFACE, 'MetaDataSizeBytes', 't') class LvCommon(AutomatedProperties): _Tags_meta = ("as", LV_COMMON_INTERFACE) _Roles_meta = ("as", LV_COMMON_INTERFACE) _IsThinVolume_meta = ("b", LV_COMMON_INTERFACE) _IsThinPool_meta = ("b", LV_COMMON_INTERFACE) _Active_meta = ("b", LV_COMMON_INTERFACE) _VolumeType_meta = ("(ss)", LV_COMMON_INTERFACE) _Permissions_meta = ("(ss)", LV_COMMON_INTERFACE) _AllocationPolicy_meta = ("(ss)", LV_COMMON_INTERFACE) _State_meta = ("(ss)", LV_COMMON_INTERFACE) _TargetType_meta = ("(ss)", LV_COMMON_INTERFACE) _Health_meta = ("(ss)", LV_COMMON_INTERFACE) _FixedMinor_meta = ('b', LV_COMMON_INTERFACE) _ZeroBlocks_meta = ('b', LV_COMMON_INTERFACE) _SkipActivation_meta = ('b', LV_COMMON_INTERFACE) _MovePv_meta = ('o', LV_COMMON_INTERFACE) def _get_move_pv(self): path = None # It's likely that the move_pv is empty if self.state.move_pv_uuid and self.state.move_pv: path = cfg.om.get_object_path_by_uuid_lvm_id( self.state.move_pv_uuid, self.state.move_pv) if not path: path = '/' return path # noinspection PyUnusedLocal,PyPep8Naming def __init__(self, object_path, object_state): super(LvCommon, self).__init__(object_path, lvs_state_retrieve) self.set_interface(LV_COMMON_INTERFACE) self.state = object_state self._move_pv = self._get_move_pv() @staticmethod def handle_execute(rc, out, err): if rc == 0: cfg.load() else: # Need to work on error handling, need consistent raise dbus.exceptions.DBusException( LV_INTERFACE, 'Exit code %s, stderr = %s' % (str(rc), err)) @staticmethod def validate_dbus_object(lv_uuid, lv_name): dbo = cfg.om.get_object_by_uuid_lvm_id(lv_uuid, lv_name) if not dbo: raise dbus.exceptions.DBusException( LV_INTERFACE, 'LV with uuid %s and name %s not present!' % (lv_uuid, lv_name)) return dbo @property def VolumeType(self): type_map = {'C': 'Cache', 'm': 'mirrored', 'M': 'Mirrored without initial sync', 'o': 'origin', 'O': 'Origin with merging snapshot', 'r': 'raid', 'R': 'Raid without initial sync', 's': 'snapshot', 'S': 'merging Snapshot', 'p': 'pvmove', 'v': 'virtual', 'i': 'mirror or raid image', 'I': 'mirror or raid Image out-of-sync', 'l': 'mirror log device', 'c': 'under conversion', 'V': 'thin Volume', 't': 'thin pool', 'T': 'Thin pool data', 'e': 'raid or pool metadata or pool metadata spare', '-': 'Unspecified'} return dbus.Struct((self.state.Attr[0], type_map[self.state.Attr[0]]), signature="as") @property def Permissions(self): type_map = {'w': 'writable', 'r': 'read-only', 'R': 'Read-only activation of non-read-only volume', '-': 'Unspecified'} return dbus.Struct((self.state.Attr[1], type_map[self.state.Attr[1]]), signature="(ss)") @property def AllocationPolicy(self): type_map = {'a': 'anywhere', 'A': 'anywhere locked', 'c': 'contiguous', 'C': 'contiguous locked', 'i': 'inherited', 'I': 'inherited locked', 'l': 'cling', 'L': 'cling locked', 'n': 'normal', 'N': 'normal locked', '-': 'Unspecified'} return dbus.Struct((self.state.Attr[2], type_map[self.state.Attr[2]]), signature="(ss)") @property def FixedMinor(self): return dbus.Boolean(self.state.Attr[3] == 'm') @property def State(self): type_map = {'a': 'active', 's': 'suspended', 'I': 'Invalid snapshot', 'S': 'invalid Suspended snapshot', 'm': 'snapshot merge failed', 'M': 'suspended snapshot (M)erge failed', 'd': 'mapped device present without tables', 'i': 'mapped device present with inactive table', 'X': 'unknown', '-': 'Unspecified'} return dbus.Struct((self.state.Attr[4], type_map[self.state.Attr[4]]), signature="(ss)") @property def TargetType(self): type_map = {'C': 'Cache', 'm': 'mirror', 'r': 'raid', 's': 'snapshot', 't': 'thin', 'u': 'unknown', 'v': 'virtual', '-': 'Unspecified'} return dbus.Struct((self.state.Attr[6], type_map[self.state.Attr[6]]), signature="(ss)") @property def ZeroBlocks(self): return dbus.Boolean(self.state.Attr[7] == 'z') @property def Health(self): type_map = {'p': 'partial', 'r': 'refresh', 'm': 'mismatches', 'w': 'writemostly', 'X': 'X unknown', '-': 'Unspecified'} return dbus.Struct((self.state.Attr[8], type_map[self.state.Attr[8]]), signature="(ss)") @property def SkipActivation(self): return dbus.Boolean(self.state.Attr[9] == 'k') def vg_name_lookup(self): return self.state.vg_name_lookup() def lv_full_name(self): return "%s/%s" % (self.state.vg_name_lookup(), self.state.Name) @property def identifiers(self): return self.state.identifiers @property def Tags(self): return utils.parse_tags(self.state.Tags) @property def Roles(self): return utils.parse_tags(self.state.role) @property def lvm_id(self): return self.state.lvm_id @property def IsThinVolume(self): return dbus.Boolean(self.state.Attr[0] == 'V') @property def IsThinPool(self): return dbus.Boolean(self.state.Attr[0] == 't') @property def Active(self): return dbus.Boolean(self.state.active == "active") @property def MovePv(self): return dbus.ObjectPath(self._move_pv) # noinspection PyPep8Naming class Lv(LvCommon): def _fetch_hidden(self, name): # The name is vg/name full_name = "%s/%s" % (self.vg_name_lookup(), name) return cfg.om.get_object_path_by_lvm_id(full_name) def _get_data_meta(self): # Get the data return (self._fetch_hidden(self.state.data_lv), self._fetch_hidden(self.state.metadata_lv)) # noinspection PyUnusedLocal,PyPep8Naming def __init__(self, object_path, object_state): super(Lv, self).__init__(object_path, object_state) self.set_interface(LV_INTERFACE) self.state = object_state @staticmethod def _remove(lv_uuid, lv_name, remove_options): # Make sure we have a dbus object representing it LvCommon.validate_dbus_object(lv_uuid, lv_name) # Remove the LV, if successful then remove from the model rc, out, err = cmdhandler.lv_remove(lv_name, remove_options) LvCommon.handle_execute(rc, out, err) return '/' @dbus.service.method( dbus_interface=LV_INTERFACE, in_signature='ia{sv}', out_signature='o', async_callbacks=('cb', 'cbe')) def Remove(self, tmo, remove_options, cb, cbe): r = RequestEntry( tmo, Lv._remove, (self.Uuid, self.lvm_id, remove_options), cb, cbe, False) cfg.worker_q.put(r) @staticmethod def _rename(lv_uuid, lv_name, new_name, rename_options): # Make sure we have a dbus object representing it LvCommon.validate_dbus_object(lv_uuid, lv_name) # Rename the logical volume rc, out, err = cmdhandler.lv_rename(lv_name, new_name, rename_options) LvCommon.handle_execute(rc, out, err) return '/' @dbus.service.method( dbus_interface=LV_INTERFACE, in_signature='sia{sv}', out_signature='o', async_callbacks=('cb', 'cbe')) def Rename(self, name, tmo, rename_options, cb, cbe): utils.validate_lv_name(LV_INTERFACE, self.vg_name_lookup(), name) r = RequestEntry( tmo, Lv._rename, (self.Uuid, self.lvm_id, name, rename_options), cb, cbe, False) cfg.worker_q.put(r) @dbus.service.method( dbus_interface=LV_INTERFACE, in_signature='o(tt)a(ott)ia{sv}', out_signature='o', async_callbacks=('cb', 'cbe')) def Move(self, pv_src_obj, pv_source_range, pv_dests_and_ranges, tmo, move_options, cb, cbe): job_state = JobState() r = RequestEntry( tmo, background.move, (LV_INTERFACE, self.lvm_id, pv_src_obj, pv_source_range, pv_dests_and_ranges, move_options, job_state), cb, cbe, False, job_state) background.cmd_runner(r) @staticmethod def _snap_shot(lv_uuid, lv_name, name, optional_size, snapshot_options): # Make sure we have a dbus object representing it dbo = LvCommon.validate_dbus_object(lv_uuid, lv_name) # If you specify a size you get a 'thick' snapshot even if # it is a thin lv if not dbo.IsThinVolume: if optional_size == 0: space = dbo.SizeBytes / 80 remainder = space % 512 optional_size = space + 512 - remainder rc, out, err = cmdhandler.vg_lv_snapshot( lv_name, snapshot_options, name, optional_size) LvCommon.handle_execute(rc, out, err) full_name = "%s/%s" % (dbo.vg_name_lookup(), name) return cfg.om.get_object_path_by_lvm_id(full_name) @dbus.service.method( dbus_interface=LV_INTERFACE, in_signature='stia{sv}', out_signature='(oo)', async_callbacks=('cb', 'cbe')) def Snapshot(self, name, optional_size, tmo, snapshot_options, cb, cbe): utils.validate_lv_name(LV_INTERFACE, self.vg_name_lookup(), name) r = RequestEntry( tmo, Lv._snap_shot, (self.Uuid, self.lvm_id, name, optional_size, snapshot_options), cb, cbe) cfg.worker_q.put(r) @staticmethod def _resize(lv_uuid, lv_name, new_size_bytes, pv_dests_and_ranges, resize_options): # Make sure we have a dbus object representing it pv_dests = [] dbo = LvCommon.validate_dbus_object(lv_uuid, lv_name) # If we have PVs, verify them if len(pv_dests_and_ranges): for pr in pv_dests_and_ranges: pv_dbus_obj = cfg.om.get_object_by_path(pr[0]) if not pv_dbus_obj: raise dbus.exceptions.DBusException( LV_INTERFACE, 'PV Destination (%s) not found' % pr[0]) pv_dests.append((pv_dbus_obj.lvm_id, pr[1], pr[2])) size_change = new_size_bytes - dbo.SizeBytes rc, out, err = cmdhandler.lv_resize(dbo.lvm_id, size_change, pv_dests, resize_options) LvCommon.handle_execute(rc, out, err) return "/" @dbus.service.method( dbus_interface=LV_INTERFACE, in_signature='ta(ott)ia{sv}', out_signature='o', async_callbacks=('cb', 'cbe')) def Resize(self, new_size_bytes, pv_dests_and_ranges, tmo, resize_options, cb, cbe): """ Resize a LV :param new_size_bytes: The requested final size in bytes :param pv_dests_and_ranges: An array of pv object paths and src & dst. segment ranges :param tmo: -1 to wait forever, 0 to return job immediately, else number of seconds to wait for operation to complete before getting a job :param resize_options: key/value hash of options :param cb: Used by framework not client facing API :param cbe: Used by framework not client facing API :return: '/' if complete, else job object path """ r = RequestEntry( tmo, Lv._resize, (self.Uuid, self.lvm_id, round_size(new_size_bytes), pv_dests_and_ranges, resize_options), cb, cbe, return_tuple=False) cfg.worker_q.put(r) @staticmethod def _lv_activate_deactivate(uuid, lv_name, activate, control_flags, options): # Make sure we have a dbus object representing it LvCommon.validate_dbus_object(uuid, lv_name) rc, out, err = cmdhandler.activate_deactivate( 'lvchange', lv_name, activate, control_flags, options) LvCommon.handle_execute(rc, out, err) return '/' @dbus.service.method( dbus_interface=LV_INTERFACE, in_signature='tia{sv}', out_signature='o', async_callbacks=('cb', 'cbe')) def Activate(self, control_flags, tmo, activate_options, cb, cbe): r = RequestEntry( tmo, Lv._lv_activate_deactivate, (self.state.Uuid, self.state.lvm_id, True, control_flags, activate_options), cb, cbe, return_tuple=False) cfg.worker_q.put(r) # noinspection PyProtectedMember @dbus.service.method( dbus_interface=LV_INTERFACE, in_signature='tia{sv}', out_signature='o', async_callbacks=('cb', 'cbe')) def Deactivate(self, control_flags, tmo, activate_options, cb, cbe): r = RequestEntry( tmo, Lv._lv_activate_deactivate, (self.state.Uuid, self.state.lvm_id, False, control_flags, activate_options), cb, cbe, return_tuple=False) cfg.worker_q.put(r) @staticmethod def _add_rm_tags(uuid, lv_name, tags_add, tags_del, tag_options): # Make sure we have a dbus object representing it LvCommon.validate_dbus_object(uuid, lv_name) rc, out, err = cmdhandler.lv_tag( lv_name, tags_add, tags_del, tag_options) LvCommon.handle_execute(rc, out, err) return '/' @dbus.service.method( dbus_interface=LV_INTERFACE, in_signature='asia{sv}', out_signature='o', async_callbacks=('cb', 'cbe')) def TagsAdd(self, tags, tmo, tag_options, cb, cbe): for t in tags: utils.validate_tag(LV_INTERFACE, t) r = RequestEntry( tmo, Lv._add_rm_tags, (self.state.Uuid, self.state.lvm_id, tags, None, tag_options), cb, cbe, return_tuple=False) cfg.worker_q.put(r) @dbus.service.method( dbus_interface=LV_INTERFACE, in_signature='asia{sv}', out_signature='o', async_callbacks=('cb', 'cbe')) def TagsDel(self, tags, tmo, tag_options, cb, cbe): for t in tags: utils.validate_tag(LV_INTERFACE, t) r = RequestEntry( tmo, Lv._add_rm_tags, (self.state.Uuid, self.state.lvm_id, None, tags, tag_options), cb, cbe, return_tuple=False) cfg.worker_q.put(r) # noinspection PyPep8Naming class LvThinPool(Lv): _DataLv_meta = ("o", THIN_POOL_INTERFACE) _MetaDataLv_meta = ("o", THIN_POOL_INTERFACE) def __init__(self, object_path, object_state): super(LvThinPool, self).__init__(object_path, object_state) self.set_interface(THIN_POOL_INTERFACE) self._data_lv, self._metadata_lv = self._get_data_meta() @property def DataLv(self): return dbus.ObjectPath(self._data_lv) @property def MetaDataLv(self): return dbus.ObjectPath(self._metadata_lv) @staticmethod def _lv_create(lv_uuid, lv_name, name, size_bytes, create_options): # Make sure we have a dbus object representing it dbo = LvCommon.validate_dbus_object(lv_uuid, lv_name) rc, out, err = cmdhandler.lv_lv_create( lv_name, create_options, name, size_bytes) LvCommon.handle_execute(rc, out, err) full_name = "%s/%s" % (dbo.vg_name_lookup(), name) return cfg.om.get_object_path_by_lvm_id(full_name) @dbus.service.method( dbus_interface=THIN_POOL_INTERFACE, in_signature='stia{sv}', out_signature='(oo)', async_callbacks=('cb', 'cbe')) def LvCreate(self, name, size_bytes, tmo, create_options, cb, cbe): utils.validate_lv_name(THIN_POOL_INTERFACE, self.vg_name_lookup(), name) r = RequestEntry( tmo, LvThinPool._lv_create, (self.Uuid, self.lvm_id, name, round_size(size_bytes), create_options), cb, cbe) cfg.worker_q.put(r) # noinspection PyPep8Naming class LvCachePool(Lv): _DataLv_meta = ("o", CACHE_POOL_INTERFACE) _MetaDataLv_meta = ("o", CACHE_POOL_INTERFACE) def __init__(self, object_path, object_state): super(LvCachePool, self).__init__(object_path, object_state) self.set_interface(CACHE_POOL_INTERFACE) self._data_lv, self._metadata_lv = self._get_data_meta() @property def DataLv(self): return dbus.ObjectPath(self._data_lv) @property def MetaDataLv(self): return dbus.ObjectPath(self._metadata_lv) @staticmethod def _cache_lv(lv_uuid, lv_name, lv_object_path, cache_options): # Make sure we have a dbus object representing cache pool dbo = LvCommon.validate_dbus_object(lv_uuid, lv_name) # Make sure we have dbus object representing lv to cache lv_to_cache = cfg.om.get_object_by_path(lv_object_path) if lv_to_cache: fcn = lv_to_cache.lv_full_name() rc, out, err = cmdhandler.lv_cache_lv( dbo.lv_full_name(), fcn, cache_options) if rc == 0: # When we cache an LV, the cache pool and the lv that is getting # cached need to be removed from the object manager and # re-created as their interfaces have changed! mt_remove_dbus_objects((dbo, lv_to_cache)) cfg.load() lv_converted = cfg.om.get_object_path_by_lvm_id(fcn) else: raise dbus.exceptions.DBusException( LV_INTERFACE, 'Exit code %s, stderr = %s' % (str(rc), err)) else: raise dbus.exceptions.DBusException( LV_INTERFACE, 'LV to cache with object path %s not present!' % lv_object_path) return lv_converted @dbus.service.method( dbus_interface=CACHE_POOL_INTERFACE, in_signature='oia{sv}', out_signature='(oo)', async_callbacks=('cb', 'cbe')) def CacheLv(self, lv_object, tmo, cache_options, cb, cbe): r = RequestEntry( tmo, LvCachePool._cache_lv, (self.Uuid, self.lvm_id, lv_object, cache_options), cb, cbe) cfg.worker_q.put(r) # noinspection PyPep8Naming class LvCacheLv(Lv): _CachePool_meta = ("o", LV_CACHED) def __init__(self, object_path, object_state): super(LvCacheLv, self).__init__(object_path, object_state) self.set_interface(LV_CACHED) @property def CachePool(self): return dbus.ObjectPath(self.state.PoolLv) @staticmethod def _detach_lv(lv_uuid, lv_name, detach_options, destroy_cache): # Make sure we have a dbus object representing cache pool dbo = LvCommon.validate_dbus_object(lv_uuid, lv_name) # Get current cache name cache_pool = cfg.om.get_object_by_path(dbo.CachePool) rc, out, err = cmdhandler.lv_detach_cache( dbo.lv_full_name(), detach_options, destroy_cache) if rc == 0: # The cache pool gets removed as hidden and put back to # visible, so lets delete mt_remove_dbus_objects((cache_pool, dbo)) cfg.load() uncached_lv_path = cfg.om.get_object_path_by_lvm_id(lv_name) else: raise dbus.exceptions.DBusException( LV_INTERFACE, 'Exit code %s, stderr = %s' % (str(rc), err)) return uncached_lv_path @dbus.service.method( dbus_interface=LV_CACHED, in_signature='bia{sv}', out_signature='(oo)', async_callbacks=('cb', 'cbe')) def DetachCachePool(self, destroy_cache, tmo, detach_options, cb, cbe): r = RequestEntry( tmo, LvCacheLv._detach_lv, (self.Uuid, self.lvm_id, detach_options, destroy_cache), cb, cbe) cfg.worker_q.put(r) # noinspection PyPep8Naming class LvSnapShot(Lv): def __init__(self, object_path, object_state): super(LvSnapShot, self).__init__(object_path, object_state) self.set_interface(SNAPSHOT_INTERFACE) @dbus.service.method( dbus_interface=SNAPSHOT_INTERFACE, in_signature='ia{sv}', out_signature='o', async_callbacks=('cb', 'cbe')) def Merge(self, tmo, merge_options, cb, cbe): job_state = JobState() r = RequestEntry(tmo, background.merge, (SNAPSHOT_INTERFACE, self.Uuid, self.lvm_id, merge_options, job_state), cb, cbe, False, job_state) background.cmd_runner(r) LVM2.2.02.176/daemons/lvmdbusd/cfg.py0000644000000000000120000000460413176752421015707 0ustar rootwheel# Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . import os import multiprocessing import queue import itertools from lvmdbusd import path LVM_CMD = os.getenv('LVM_BINARY', path.LVM_BINARY) # This is the global object manager om = None # This is the global bus connection bus = None # Command line args args = None # Set to true if we are depending on external events for updates got_external_event = False # Shared state variable across all processes run = multiprocessing.Value('i', 1) # If this is set to true, the current setup support lvm shell and we are # running in that mode of operation SHELL_IN_USE = None # Lock used by pprint stdout_lock = multiprocessing.Lock() worker_q = queue.Queue() # Main event loop loop = None BUS_NAME = os.getenv('LVM_DBUS_NAME', 'com.redhat.lvmdbus1') BASE_INTERFACE = 'com.redhat.lvmdbus1' PV_INTERFACE = BASE_INTERFACE + '.Pv' VG_INTERFACE = BASE_INTERFACE + '.Vg' LV_INTERFACE = BASE_INTERFACE + '.Lv' LV_COMMON_INTERFACE = BASE_INTERFACE + '.LvCommon' THIN_POOL_INTERFACE = BASE_INTERFACE + '.ThinPool' CACHE_POOL_INTERFACE = BASE_INTERFACE + '.CachePool' LV_CACHED = BASE_INTERFACE + '.CachedLv' SNAPSHOT_INTERFACE = BASE_INTERFACE + '.Snapshot' MANAGER_INTERFACE = BASE_INTERFACE + '.Manager' JOB_INTERFACE = BASE_INTERFACE + '.Job' BASE_OBJ_PATH = '/' + BASE_INTERFACE.replace('.', '/') PV_OBJ_PATH = BASE_OBJ_PATH + '/Pv' VG_OBJ_PATH = BASE_OBJ_PATH + '/Vg' LV_OBJ_PATH = BASE_OBJ_PATH + '/Lv' THIN_POOL_PATH = BASE_OBJ_PATH + "/ThinPool" CACHE_POOL_PATH = BASE_OBJ_PATH + "/CachePool" HIDDEN_LV_PATH = BASE_OBJ_PATH + "/HiddenLv" MANAGER_OBJ_PATH = BASE_OBJ_PATH + '/Manager' JOB_OBJ_PATH = BASE_OBJ_PATH + '/Job' # Counters for object path generation pv_id = itertools.count() vg_id = itertools.count() lv_id = itertools.count() thin_id = itertools.count() cache_pool_id = itertools.count() job_id = itertools.count() hidden_lv = itertools.count() # Used to prevent circular imports... load = None event = None # Global cached state db = None # lvm flight recorder blackbox = None # RequestEntry ctor create_request_entry = None LVM2.2.02.176/daemons/lvmdbusd/cmdhandler.py0000644000000000000120000004460213176752421017253 0ustar rootwheel# Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . from subprocess import Popen, PIPE import time import threading from itertools import chain import collections import traceback import os from lvmdbusd import cfg from lvmdbusd.utils import pv_dest_ranges, log_debug, log_error, add_no_notify from lvmdbusd.lvm_shell_proxy import LVMShellProxy try: import simplejson as json except ImportError: import json SEP = '{|}' total_time = 0.0 total_count = 0 # We need to prevent different threads from using the same lvm shell # at the same time. cmd_lock = threading.RLock() class LvmExecutionMeta(object): def __init__(self, start, ended, cmd, ec, stdout_txt, stderr_txt): self.lock = threading.RLock() self.start = start self.ended = ended self.cmd = cmd self.ec = ec self.stdout_txt = stdout_txt self.stderr_txt = stderr_txt def __str__(self): with self.lock: return "EC= %d for %s\n" \ "STARTED: %f, ENDED: %f\n" \ "STDOUT=%s\n" \ "STDERR=%s\n" % \ (self.ec, str(self.cmd), self.start, self.ended, self.stdout_txt, self.stderr_txt) class LvmFlightRecorder(object): def __init__(self, size=16): self.queue = collections.deque(maxlen=size) def add(self, lvm_exec_meta): self.queue.append(lvm_exec_meta) def dump(self): with cmd_lock: if len(self.queue): log_error("LVM dbus flight recorder START") for c in self.queue: log_error(str(c)) log_error("LVM dbus flight recorder END") cfg.blackbox = LvmFlightRecorder() def _debug_c(cmd, exit_code, out): log_error('CMD= %s' % ' '.join(cmd)) log_error(("EC= %d" % exit_code)) log_error(("STDOUT=\n %s\n" % out[0])) log_error(("STDERR=\n %s\n" % out[1])) def call_lvm(command, debug=False): """ Call an executable and return a tuple of exitcode, stdout, stderr :param command: Command to execute :param debug: Dump debug to stdout """ # print 'STACK:' # for line in traceback.format_stack(): # print line.strip() # Prepend the full lvm executable so that we can run different versions # in different locations on the same box command.insert(0, cfg.LVM_CMD) command = add_no_notify(command) process = Popen(command, stdout=PIPE, stderr=PIPE, close_fds=True, env=os.environ) out = process.communicate() stdout_text = bytes(out[0]).decode("utf-8") stderr_text = bytes(out[1]).decode("utf-8") if debug or process.returncode != 0: _debug_c(command, process.returncode, (stdout_text, stderr_text)) return process.returncode, stdout_text, stderr_text # The actual method which gets called to invoke the lvm command, can vary # from forking a new process to using lvm shell _t_call = call_lvm def _shell_cfg(): global _t_call # noinspection PyBroadException try: lvm_shell = LVMShellProxy() _t_call = lvm_shell.call_lvm cfg.SHELL_IN_USE = lvm_shell return True except Exception: _t_call = call_lvm cfg.SHELL_IN_USE = None log_error(traceback.format_exc()) log_error("Unable to utilize lvm shell, dropping back to fork & exec") return False def set_execution(shell): global _t_call with cmd_lock: # If the user requested lvm shell and we are currently setup that # way, just return if cfg.SHELL_IN_USE and shell: return True else: if not shell and cfg.SHELL_IN_USE: cfg.SHELL_IN_USE.exit_shell() cfg.SHELL_IN_USE = None _t_call = call_lvm if shell: if cfg.args.use_json: return _shell_cfg() else: return False return True def time_wrapper(command, debug=False): global total_time global total_count with cmd_lock: start = time.time() results = _t_call(command, debug) ended = time.time() total_time += (ended - start) total_count += 1 cfg.blackbox.add(LvmExecutionMeta(start, ended, command, *results)) return results call = time_wrapper # Default cmd # Place default arguments for every command here. def _dc(cmd, args): c = [cmd, '--noheading', '--separator', '%s' % SEP, '--nosuffix', '--unbuffered', '--units', 'b'] c.extend(args) return c def parse(out): rc = [] for line in out.split('\n'): # This line includes separators, so process them if SEP in line: elem = line.split(SEP) cleaned_elem = [] for e in elem: e = e.strip() cleaned_elem.append(e) if len(cleaned_elem) > 1: rc.append(cleaned_elem) else: t = line.strip() if len(t) > 0: rc.append(t) return rc def parse_column_names(out, column_names): lines = parse(out) rc = [] for i in range(0, len(lines)): d = dict(list(zip(column_names, lines[i]))) rc.append(d) return rc def options_to_cli_args(options): rc = [] for k, v in list(dict(options).items()): if k.startswith("-"): rc.append(k) else: rc.append("--%s" % k) if v != "": rc.append(str(v)) return rc def pv_remove(device, remove_options): cmd = ['pvremove'] cmd.extend(options_to_cli_args(remove_options)) cmd.append(device) return call(cmd) def _qt(tag_name): return '@%s' % tag_name def _tag(operation, what, add, rm, tag_options): cmd = [operation] cmd.extend(options_to_cli_args(tag_options)) if isinstance(what, list): cmd.extend(what) else: cmd.append(what) if add: cmd.extend(list(chain.from_iterable( ('--addtag', _qt(x)) for x in add))) if rm: cmd.extend(list(chain.from_iterable( ('--deltag', _qt(x)) for x in rm))) return call(cmd, False) def pv_tag(pv_devices, add, rm, tag_options): return _tag('pvchange', pv_devices, add, rm, tag_options) def vg_tag(vg_name, add, rm, tag_options): return _tag('vgchange', vg_name, add, rm, tag_options) def lv_tag(lv_name, add, rm, tag_options): return _tag('lvchange', lv_name, add, rm, tag_options) def vg_rename(vg, new_name, rename_options): cmd = ['vgrename'] cmd.extend(options_to_cli_args(rename_options)) cmd.extend([vg, new_name]) return call(cmd) def vg_remove(vg_name, remove_options): cmd = ['vgremove'] cmd.extend(options_to_cli_args(remove_options)) cmd.extend(['-f', vg_name]) return call(cmd) def vg_lv_create(vg_name, create_options, name, size_bytes, pv_dests): cmd = ['lvcreate'] cmd.extend(options_to_cli_args(create_options)) cmd.extend(['--size', str(size_bytes) + 'B']) cmd.extend(['--name', name, vg_name, '--yes']) pv_dest_ranges(cmd, pv_dests) return call(cmd) def vg_lv_snapshot(vg_name, snapshot_options, name, size_bytes): cmd = ['lvcreate'] cmd.extend(options_to_cli_args(snapshot_options)) cmd.extend(["-s"]) if size_bytes != 0: cmd.extend(['--size', str(size_bytes) + 'B']) cmd.extend(['--name', name, vg_name]) return call(cmd) def _vg_lv_create_common_cmd(create_options, size_bytes, thin_pool): cmd = ['lvcreate'] cmd.extend(options_to_cli_args(create_options)) if not thin_pool: cmd.extend(['--size', str(size_bytes) + 'B']) else: cmd.extend(['--thin', '--size', str(size_bytes) + 'B']) cmd.extend(['--yes']) return cmd def vg_lv_create_linear(vg_name, create_options, name, size_bytes, thin_pool): cmd = _vg_lv_create_common_cmd(create_options, size_bytes, thin_pool) cmd.extend(['--name', name, vg_name]) return call(cmd) def vg_lv_create_striped(vg_name, create_options, name, size_bytes, num_stripes, stripe_size_kb, thin_pool): cmd = _vg_lv_create_common_cmd(create_options, size_bytes, thin_pool) cmd.extend(['--stripes', str(num_stripes)]) if stripe_size_kb != 0: cmd.extend(['--stripesize', str(stripe_size_kb)]) cmd.extend(['--name', name, vg_name]) return call(cmd) def _vg_lv_create_raid(vg_name, create_options, name, raid_type, size_bytes, num_stripes, stripe_size_kb): cmd = ['lvcreate'] cmd.extend(options_to_cli_args(create_options)) cmd.extend(['--type', raid_type]) cmd.extend(['--size', str(size_bytes) + 'B']) if num_stripes != 0: cmd.extend(['--stripes', str(num_stripes)]) if stripe_size_kb != 0: cmd.extend(['--stripesize', str(stripe_size_kb)]) cmd.extend(['--name', name, vg_name, '--yes']) return call(cmd) def vg_lv_create_raid(vg_name, create_options, name, raid_type, size_bytes, num_stripes, stripe_size_kb): cmd = ['lvcreate'] cmd.extend(options_to_cli_args(create_options)) return _vg_lv_create_raid(vg_name, create_options, name, raid_type, size_bytes, num_stripes, stripe_size_kb) def vg_lv_create_mirror( vg_name, create_options, name, size_bytes, num_copies): cmd = ['lvcreate'] cmd.extend(options_to_cli_args(create_options)) cmd.extend(['--type', 'mirror']) cmd.extend(['--mirrors', str(num_copies)]) cmd.extend(['--size', str(size_bytes) + 'B']) cmd.extend(['--name', name, vg_name, '--yes']) return call(cmd) def vg_create_cache_pool(md_full_name, data_full_name, create_options): cmd = ['lvconvert'] cmd.extend(options_to_cli_args(create_options)) cmd.extend(['--type', 'cache-pool', '--force', '-y', '--poolmetadata', md_full_name, data_full_name]) return call(cmd) def vg_create_thin_pool(md_full_name, data_full_name, create_options): cmd = ['lvconvert'] cmd.extend(options_to_cli_args(create_options)) cmd.extend(['--type', 'thin-pool', '--force', '-y', '--poolmetadata', md_full_name, data_full_name]) return call(cmd) def lv_remove(lv_path, remove_options): cmd = ['lvremove'] cmd.extend(options_to_cli_args(remove_options)) cmd.extend(['-f', lv_path]) return call(cmd) def lv_rename(lv_path, new_name, rename_options): cmd = ['lvrename'] cmd.extend(options_to_cli_args(rename_options)) cmd.extend([lv_path, new_name]) return call(cmd) def lv_resize(lv_full_name, size_change, pv_dests, resize_options): cmd = ['lvresize', '--force'] cmd.extend(options_to_cli_args(resize_options)) if size_change < 0: cmd.append("-L-%dB" % (-size_change)) else: cmd.append("-L+%dB" % (size_change)) cmd.append(lv_full_name) pv_dest_ranges(cmd, pv_dests) return call(cmd) def lv_lv_create(lv_full_name, create_options, name, size_bytes): cmd = ['lvcreate'] cmd.extend(options_to_cli_args(create_options)) cmd.extend(['--virtualsize', str(size_bytes) + 'B', '-T']) cmd.extend(['--name', name, lv_full_name, '--yes']) return call(cmd) def lv_cache_lv(cache_pool_full_name, lv_full_name, cache_options): # lvconvert --type cache --cachepool VG/CachePoolLV VG/OriginLV cmd = ['lvconvert'] cmd.extend(options_to_cli_args(cache_options)) cmd.extend(['-y', '--type', 'cache', '--cachepool', cache_pool_full_name, lv_full_name]) return call(cmd) def lv_detach_cache(lv_full_name, detach_options, destroy_cache): cmd = ['lvconvert'] if destroy_cache: option = '--uncache' else: # Currently fairly dangerous # see: https://bugzilla.redhat.com/show_bug.cgi?id=1248972 option = '--splitcache' cmd.extend(options_to_cli_args(detach_options)) # needed to prevent interactive questions cmd.extend(["--yes", "--force"]) cmd.extend([option, lv_full_name]) return call(cmd) def supports_json(): cmd = ['help'] rc, out, err = call(cmd) if rc == 0: if cfg.SHELL_IN_USE: return True else: if 'fullreport' in err: return True return False def lvm_full_report_json(): pv_columns = ['pv_name', 'pv_uuid', 'pv_fmt', 'pv_size', 'pv_free', 'pv_used', 'dev_size', 'pv_mda_size', 'pv_mda_free', 'pv_ba_start', 'pv_ba_size', 'pe_start', 'pv_pe_count', 'pv_pe_alloc_count', 'pv_attr', 'pv_tags', 'vg_name', 'vg_uuid', 'pv_missing'] pv_seg_columns = ['pvseg_start', 'pvseg_size', 'segtype', 'pv_uuid', 'lv_uuid', 'pv_name'] vg_columns = ['vg_name', 'vg_uuid', 'vg_fmt', 'vg_size', 'vg_free', 'vg_sysid', 'vg_extent_size', 'vg_extent_count', 'vg_free_count', 'vg_profile', 'max_lv', 'max_pv', 'pv_count', 'lv_count', 'snap_count', 'vg_seqno', 'vg_mda_count', 'vg_mda_free', 'vg_mda_size', 'vg_mda_used_count', 'vg_attr', 'vg_tags'] lv_columns = ['lv_uuid', 'lv_name', 'lv_path', 'lv_size', 'vg_name', 'pool_lv_uuid', 'pool_lv', 'origin_uuid', 'origin', 'data_percent', 'lv_attr', 'lv_tags', 'vg_uuid', 'lv_active', 'data_lv', 'metadata_lv', 'lv_parent', 'lv_role', 'lv_layout', 'snap_percent', 'metadata_percent', 'copy_percent', 'sync_percent', 'lv_metadata_size', 'move_pv', 'move_pv_uuid'] lv_seg_columns = ['seg_pe_ranges', 'segtype', 'lv_uuid'] cmd = _dc('fullreport', [ '-a', # Need hidden too '--configreport', 'pv', '-o', ','.join(pv_columns), '--configreport', 'vg', '-o', ','.join(vg_columns), '--configreport', 'lv', '-o', ','.join(lv_columns), '--configreport', 'seg', '-o', ','.join(lv_seg_columns), '--configreport', 'pvseg', '-o', ','.join(pv_seg_columns), '--reportformat', 'json' ]) rc, out, err = call(cmd) if rc == 0: # With the current implementation, if we are using the shell then we # are using JSON and JSON is returned back to us as it was parsed to # figure out if we completed OK or not if cfg.SHELL_IN_USE: assert(type(out) == dict) return out else: return json.loads(out) return None def pv_retrieve_with_segs(device=None): d = [] err = "" out = "" rc = 0 columns = ['pv_name', 'pv_uuid', 'pv_fmt', 'pv_size', 'pv_free', 'pv_used', 'dev_size', 'pv_mda_size', 'pv_mda_free', 'pv_ba_start', 'pv_ba_size', 'pe_start', 'pv_pe_count', 'pv_pe_alloc_count', 'pv_attr', 'pv_tags', 'vg_name', 'vg_uuid', 'pvseg_start', 'pvseg_size', 'segtype', 'pv_missing'] # Lvm has some issues where it returns failure when querying pvs when other # operations are in process, see: # https://bugzilla.redhat.com/show_bug.cgi?id=1274085 for i in range(0, 10): cmd = _dc('pvs', ['-o', ','.join(columns)]) if device: cmd.extend(device) rc, out, err = call(cmd) if rc == 0: d = parse_column_names(out, columns) break else: time.sleep(0.2) log_debug("LVM Bug workaround, retrying pvs command...") if rc != 0: msg = "We were unable to get pvs to return without error after " \ "trying 10 times, RC=%d, STDERR=(%s), STDOUT=(%s)" % \ (rc, err, out) log_error(msg) raise RuntimeError(msg) return d def pv_resize(device, size_bytes, create_options): cmd = ['pvresize'] cmd.extend(options_to_cli_args(create_options)) if size_bytes != 0: cmd.extend(['--yes', '--setphysicalvolumesize', str(size_bytes) + 'B']) cmd.extend([device]) return call(cmd) def pv_create(create_options, devices): cmd = ['pvcreate', '-ff'] cmd.extend(options_to_cli_args(create_options)) cmd.extend(devices) return call(cmd) def pv_allocatable(device, yes, allocation_options): yn = 'n' if yes: yn = 'y' cmd = ['pvchange'] cmd.extend(options_to_cli_args(allocation_options)) cmd.extend(['-x', yn, device]) return call(cmd) def pv_scan(activate, cache, device_paths, major_minors, scan_options): cmd = ['pvscan'] cmd.extend(options_to_cli_args(scan_options)) if activate: cmd.extend(['--activate', "ay"]) if cache: cmd.append('--cache') if len(device_paths) > 0: for d in device_paths: cmd.append(d) if len(major_minors) > 0: for mm in major_minors: cmd.append("%s:%s" % (mm)) return call(cmd) def vg_create(create_options, pv_devices, name): cmd = ['vgcreate'] cmd.extend(options_to_cli_args(create_options)) cmd.append(name) cmd.extend(pv_devices) return call(cmd) def vg_change(change_options, name): cmd = ['vgchange'] cmd.extend(options_to_cli_args(change_options)) cmd.append(name) return call(cmd) def vg_reduce(vg_name, missing, pv_devices, reduce_options): cmd = ['vgreduce'] cmd.extend(options_to_cli_args(reduce_options)) if missing: cmd.append('--removemissing') elif len(pv_devices) == 0: cmd.append('--all') cmd.append(vg_name) cmd.extend(pv_devices) return call(cmd) def vg_extend(vg_name, extend_devices, extend_options): cmd = ['vgextend'] cmd.extend(options_to_cli_args(extend_options)) cmd.append(vg_name) cmd.extend(extend_devices) return call(cmd) def _vg_value_set(name, arguments, options): cmd = ['vgchange'] cmd.extend(options_to_cli_args(options)) cmd.append(name) cmd.extend(arguments) return call(cmd) def vg_allocation_policy(vg_name, policy, policy_options): return _vg_value_set(vg_name, ['--alloc', policy], policy_options) def vg_max_pv(vg_name, number, max_options): return _vg_value_set(vg_name, ['--maxphysicalvolumes', str(number)], max_options) def vg_max_lv(vg_name, number, max_options): return _vg_value_set(vg_name, ['-l', str(number)], max_options) def vg_uuid_gen(vg_name, ignore, options): assert ignore is None return _vg_value_set(vg_name, ['--uuid'], options) def activate_deactivate(op, name, activate, control_flags, options): cmd = [op] cmd.extend(options_to_cli_args(options)) op = '-a' if control_flags: # Autoactivation if (1 << 0) & control_flags: op += 'a' # Exclusive locking (Cluster) if (1 << 1) & control_flags: op += 'e' # Local node activation if (1 << 2) & control_flags: op += 'l' # Activation modes if (1 << 3) & control_flags: cmd.extend(['--activationmode', 'complete']) elif (1 << 4) & control_flags: cmd.extend(['--activationmode', 'partial']) # Ignore activation skip if (1 << 5) & control_flags: cmd.append('--ignoreactivationskip') if activate: op += 'y' else: op += 'n' cmd.append(op) cmd.append(name) return call(cmd) def vg_retrieve(vg_specific): if vg_specific: assert isinstance(vg_specific, list) columns = ['vg_name', 'vg_uuid', 'vg_fmt', 'vg_size', 'vg_free', 'vg_sysid', 'vg_extent_size', 'vg_extent_count', 'vg_free_count', 'vg_profile', 'max_lv', 'max_pv', 'pv_count', 'lv_count', 'snap_count', 'vg_seqno', 'vg_mda_count', 'vg_mda_free', 'vg_mda_size', 'vg_mda_used_count', 'vg_attr', 'vg_tags'] cmd = _dc('vgs', ['-o', ','.join(columns)]) if vg_specific: cmd.extend(vg_specific) d = [] rc, out, err = call(cmd) if rc == 0: d = parse_column_names(out, columns) return d def lv_retrieve_with_segments(): columns = ['lv_uuid', 'lv_name', 'lv_path', 'lv_size', 'vg_name', 'pool_lv_uuid', 'pool_lv', 'origin_uuid', 'origin', 'data_percent', 'lv_attr', 'lv_tags', 'vg_uuid', 'lv_active', 'data_lv', 'metadata_lv', 'seg_pe_ranges', 'segtype', 'lv_parent', 'lv_role', 'lv_layout', 'snap_percent', 'metadata_percent', 'copy_percent', 'sync_percent', 'lv_metadata_size', 'move_pv', 'move_pv_uuid'] cmd = _dc('lvs', ['-a', '-o', ','.join(columns)]) rc, out, err = call(cmd) d = [] if rc == 0: d = parse_column_names(out, columns) return d if __name__ == '__main__': pv_data = pv_retrieve_with_segs() for p in pv_data: print(str(p)) LVM2.2.02.176/daemons/lvmdbusd/background.py0000644000000000000120000001151413176752421017265 0ustar rootwheel# Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . import subprocess from . import cfg from .cmdhandler import options_to_cli_args, LvmExecutionMeta import dbus from .utils import pv_range_append, pv_dest_ranges, log_error, log_debug,\ add_no_notify import os import threading import time def pv_move_lv_cmd(move_options, lv_full_name, pv_source, pv_source_range, pv_dest_range_list): cmd = ['pvmove', '-i', '1'] cmd.extend(options_to_cli_args(move_options)) if lv_full_name: cmd.extend(['-n', lv_full_name]) pv_range_append(cmd, pv_source, *pv_source_range) pv_dest_ranges(cmd, pv_dest_range_list) return cmd def lv_merge_cmd(merge_options, lv_full_name): cmd = ['lvconvert', '--merge', '-i', '1'] cmd.extend(options_to_cli_args(merge_options)) cmd.append(lv_full_name) return cmd def _move_merge(interface_name, command, job_state): # We need to execute these command stand alone by forking & exec'ing # the command always as we will be getting periodic output from them on # the status of the long running operation. command.insert(0, cfg.LVM_CMD) # Instruct lvm to not register an event with us command = add_no_notify(command) #(self, start, ended, cmd, ec, stdout_txt, stderr_txt) meta = LvmExecutionMeta(time.time(), 0, command, -1000, None, None) cfg.blackbox.add(meta) process = subprocess.Popen(command, stdout=subprocess.PIPE, env=os.environ, stderr=subprocess.PIPE, close_fds=True) log_debug("Background process for %s is %d" % (str(command), process.pid)) lines_iterator = iter(process.stdout.readline, b"") for line in lines_iterator: line_str = line.decode("utf-8") # Check to see if the line has the correct number of separators try: if line_str.count(':') == 2: (device, ignore, percentage) = line_str.split(':') job_state.Percent = round( float(percentage.strip()[:-1]), 1) # While the move is in progress we need to periodically update # the state to reflect where everything is at. cfg.load() except ValueError: log_error("Trying to parse percentage which failed for %s" % line_str) out = process.communicate() with meta.lock: meta.ended = time.time() meta.ec = process.returncode meta.stderr_txt = out[1] if process.returncode == 0: job_state.Percent = 100 else: raise dbus.exceptions.DBusException( interface_name, 'Exit code %s, stderr = %s' % (str(process.returncode), out[1])) cfg.load() return '/' def move(interface_name, lv_name, pv_src_obj, pv_source_range, pv_dests_and_ranges, move_options, job_state): """ Common code for the pvmove handling. :param interface_name: What dbus interface we are providing for :param lv_name: Optional (None or name of LV to move) :param pv_src_obj: dbus object patch for source PV :param pv_source_range: (0,0 to ignore, else start, end segments) :param pv_dests_and_ranges: Array of PV object paths and start/end segs :param move_options: Hash with optional arguments :param job_state: Used to convey information about jobs between processes :return: '/' When complete, the empty object path """ pv_dests = [] pv_src = cfg.om.get_object_by_path(pv_src_obj) if pv_src: # Check to see if we are handling a move to a specific # destination(s) if len(pv_dests_and_ranges): for pr in pv_dests_and_ranges: pv_dbus_obj = cfg.om.get_object_by_path(pr[0]) if not pv_dbus_obj: raise dbus.exceptions.DBusException( interface_name, 'PV Destination (%s) not found' % pr[0]) pv_dests.append((pv_dbus_obj.lvm_id, pr[1], pr[2])) cmd = pv_move_lv_cmd(move_options, lv_name, pv_src.lvm_id, pv_source_range, pv_dests) return _move_merge(interface_name, cmd, job_state) else: raise dbus.exceptions.DBusException( interface_name, 'pv_src_obj (%s) not found' % pv_src_obj) def merge(interface_name, lv_uuid, lv_name, merge_options, job_state): # Make sure we have a dbus object representing it dbo = cfg.om.get_object_by_uuid_lvm_id(lv_uuid, lv_name) if dbo: cmd = lv_merge_cmd(merge_options, dbo.lvm_id) return _move_merge(interface_name, cmd, job_state) else: raise dbus.exceptions.DBusException( interface_name, 'LV with uuid %s and name %s not present!' % (lv_uuid, lv_name)) def _run_cmd(req): log_debug( "_run_cmd: Running method: %s with args %s" % (str(req.method), str(req.arguments))) req.run_cmd() log_debug("_run_cmd: complete!") def cmd_runner(request): t = threading.Thread(target=_run_cmd, args=(request,), name="cmd_runner %s" % str(request.method)) t.start() LVM2.2.02.176/daemons/lvmdbusd/path.py.in0000644000000000000120000000063713176752421016513 0ustar rootwheel# Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . LVM_BINARY = "@LVM_PATH@" LVM2.2.02.176/daemons/lvmdbusd/lvmdb.py0000755000000000000120000003303713176752421016261 0ustar rootwheel#!/usr/bin/env python3 # Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . from collections import OrderedDict import pprint as prettyprint import os import sys from lvmdbusd import cmdhandler from lvmdbusd.utils import log_debug, log_error class DataStore(object): def __init__(self, usejson=True): self.pvs = {} self.vgs = {} self.lvs = {} self.pv_lvs = {} self.lv_pvs = {} self.lvs_hidden = {} self.pv_path_to_uuid = {} self.vg_name_to_uuid = {} self.lv_full_name_to_uuid = {} self.lvs_in_vgs = {} self.pvs_in_vgs = {} # self.refresh() self.num_refreshes = 0 if usejson: self.json = cmdhandler.supports_json() else: self.json = usejson @staticmethod def _insert_record(table, key, record, allowed_multiple): if key in table: existing = table[key] for rec_k, rec_v in record.items(): if rec_k in allowed_multiple: # This column name allows us to store multiple value for # each type if not isinstance(existing[rec_k], list): existing_value = existing[rec_k] existing[rec_k] = [existing_value, rec_v] else: existing[rec_k].append(rec_v) else: # If something is not expected to have changing values # lets ensure that if existing[rec_k] != rec_v: raise RuntimeError( "existing[%s]=%s != %s" % (rec_k, str(existing[rec_k]), str(rec_v))) else: table[key] = record @staticmethod def _pvs_parse_common(c_pvs, c_pvs_in_vgs, c_lookup): for p in c_pvs.values(): # Capture which PVs are associated with which VG if p['vg_uuid'] not in c_pvs_in_vgs: c_pvs_in_vgs[p['vg_uuid']] = [] if p['vg_name']: c_pvs_in_vgs[p['vg_uuid']].append( (p['pv_name'], p['pv_uuid'])) # Lookup for translating between /dev/ and pv uuid c_lookup[p['pv_name']] = p['pv_uuid'] @staticmethod def _parse_pvs(_pvs): pvs = sorted(_pvs, key=lambda pk: pk['pv_name']) c_pvs = OrderedDict() c_lookup = {} c_pvs_in_vgs = {} for p in pvs: DataStore._insert_record( c_pvs, p['pv_uuid'], p, ['pvseg_start', 'pvseg_size', 'segtype']) DataStore._pvs_parse_common(c_pvs, c_pvs_in_vgs, c_lookup) return c_pvs, c_lookup, c_pvs_in_vgs @staticmethod def _parse_pvs_json(_all): c_pvs = OrderedDict() c_lookup = {} c_pvs_in_vgs = {} # Each item item in the report is a collection of information pertaining # to the vg for r in _all['report']: tmp_pv = [] # Get the pv data for this VG. if 'pv' in r: tmp_pv.extend(r['pv']) # Sort them sorted_tmp_pv = sorted(tmp_pv, key=lambda pk: pk['pv_name']) # Add them to result set for p in sorted_tmp_pv: c_pvs[p['pv_uuid']] = p if 'pvseg' in r: for s in r['pvseg']: r = c_pvs[s['pv_uuid']] r.setdefault('pvseg_start', []).append(s['pvseg_start']) r.setdefault('pvseg_size', []).append(s['pvseg_size']) r.setdefault('segtype', []).append(s['segtype']) # TODO: Remove this bug work around when we have orphan segs. for i in c_pvs.values(): if 'pvseg_start' not in i: i['pvseg_start'] = '0' i['pvseg_size'] = i['pv_pe_count'] i['segtype'] = 'free' DataStore._pvs_parse_common(c_pvs, c_pvs_in_vgs, c_lookup) return c_pvs, c_lookup, c_pvs_in_vgs @staticmethod def _parse_vgs(_vgs): vgs = sorted(_vgs, key=lambda vk: vk['vg_name']) c_vgs = OrderedDict() c_lookup = {} for i in vgs: c_lookup[i['vg_name']] = i['vg_uuid'] DataStore._insert_record(c_vgs, i['vg_uuid'], i, []) return c_vgs, c_lookup @staticmethod def _parse_vgs_json(_all): tmp_vg = [] for r in _all['report']: # Get the pv data for this VG. if 'vg' in r: tmp_vg.extend(r['vg']) # Sort for consistent output, however this is optional vgs = sorted(tmp_vg, key=lambda vk: vk['vg_name']) c_vgs = OrderedDict() c_lookup = {} for i in vgs: c_lookup[i['vg_name']] = i['vg_uuid'] c_vgs[i['vg_uuid']] = i return c_vgs, c_lookup @staticmethod def _parse_lvs_common(c_lvs, c_lv_full_lookup): c_lvs_in_vgs = OrderedDict() c_lvs_hidden = OrderedDict() for i in c_lvs.values(): if i['vg_uuid'] not in c_lvs_in_vgs: c_lvs_in_vgs[i['vg_uuid']] = [] c_lvs_in_vgs[ i['vg_uuid']].append( (i['lv_name'], (i['lv_attr'], i['lv_layout'], i['lv_role']), i['lv_uuid'])) if i['lv_parent']: # Lookup what the parent refers too parent_name = i['lv_parent'] full_parent_name = "%s/%s" % (i['vg_name'], parent_name) if full_parent_name not in c_lv_full_lookup: parent_name = '[%s]' % (parent_name) full_parent_name = "%s/%s" % (i['vg_name'], parent_name) parent_uuid = c_lv_full_lookup[full_parent_name] if parent_uuid not in c_lvs_hidden: c_lvs_hidden[parent_uuid] = [] c_lvs_hidden[parent_uuid].append( (i['lv_uuid'], i['lv_name'])) return c_lvs, c_lvs_in_vgs, c_lvs_hidden, c_lv_full_lookup @staticmethod def _parse_lvs(_lvs): lvs = sorted(_lvs, key=lambda vk: vk['lv_name']) c_lvs = OrderedDict() c_lv_full_lookup = OrderedDict() for i in lvs: full_name = "%s/%s" % (i['vg_name'], i['lv_name']) c_lv_full_lookup[full_name] = i['lv_uuid'] DataStore._insert_record( c_lvs, i['lv_uuid'], i, ['seg_pe_ranges', 'segtype']) return DataStore._parse_lvs_common(c_lvs, c_lv_full_lookup) @staticmethod def _parse_lvs_json(_all): c_lvs = OrderedDict() c_lv_full_lookup = {} # Each item item in the report is a collection of information pertaining # to the vg for r in _all['report']: # Get the lv data for this VG. if 'lv' in r: # Add them to result set for i in r['lv']: full_name = "%s/%s" % (i['vg_name'], i['lv_name']) c_lv_full_lookup[full_name] = i['lv_uuid'] c_lvs[i['lv_uuid']] = i # Add in the segment data if 'seg' in r: for s in r['seg']: r = c_lvs[s['lv_uuid']] r.setdefault('seg_pe_ranges', []).append(s['seg_pe_ranges']) r.setdefault('segtype', []).append(s['segtype']) return DataStore._parse_lvs_common(c_lvs, c_lv_full_lookup) @staticmethod def _make_list(l): if not isinstance(l, list): l = [l] return l @staticmethod def _parse_seg_entry(se, segtype): if se: # print("_parse_seg_entry %s %s" % (str(se), str(segtype))) device, segs = se.split(":") start, end = segs.split('-') return (device, (start, end), segtype) else: return ("", (), segtype) @staticmethod def _build_segments(l, seg_types): rc = [] l = DataStore._make_list(l) s = DataStore._make_list(seg_types) assert len(l) == len(s) ls = list(zip(l, s)) for i in ls: if ' ' in i[0]: tmp = i[0].split(' ') for t in tmp: rc.append(DataStore._parse_seg_entry(t, i[1])) else: rc.append(DataStore._parse_seg_entry(*i)) return rc @staticmethod def _pv_device_lv_entry(table, pv_device, lv_uuid, meta, lv_attr, segment_info): if pv_device not in table: table[pv_device] = {} if lv_uuid not in table[pv_device]: table[pv_device][lv_uuid] = {} table[pv_device][lv_uuid]['segs'] = [segment_info] table[pv_device][lv_uuid]['name'] = meta table[pv_device][lv_uuid]['meta'] = lv_attr else: table[pv_device][lv_uuid]['segs'].append(segment_info) @staticmethod def _pv_device_lv_format(pv_device_lvs): rc = {} for pv_device, pd in pv_device_lvs.items(): lvs = [] for lv_uuid, ld in sorted(pd.items()): lvs.append((lv_uuid, ld['name'], ld['meta'], ld['segs'])) rc[pv_device] = lvs return rc @staticmethod def _lvs_device_pv_entry(table, lv_uuid, pv_device, pv_uuid, segment_info): if lv_uuid not in table: table[lv_uuid] = {} if pv_device not in table[lv_uuid]: table[lv_uuid][pv_device] = {} table[lv_uuid][pv_device]['segs'] = [segment_info] table[lv_uuid][pv_device]['pv_uuid'] = pv_uuid else: table[lv_uuid][pv_device]['segs'].append(segment_info) @staticmethod def _lvs_device_pv_format(lvs_device_pvs): rc = {} for lv_uuid, ld in lvs_device_pvs.items(): pvs = [] for pv_device, pd in sorted(ld.items()): pvs.append((pd['pv_uuid'], pv_device, pd['segs'])) rc[lv_uuid] = pvs return rc def _parse_pv_in_lvs(self): pv_device_lvs = {} # What LVs are stored on a PV lvs_device_pv = {} # Where LV data is stored for i in self.lvs.values(): segs = self._build_segments(i['seg_pe_ranges'], i['segtype']) for s in segs: # We are referring to physical device if '/dev/' in s[0]: device, r, seg_type = s DataStore._pv_device_lv_entry( pv_device_lvs, device, i['lv_uuid'], i['lv_name'], (i['lv_attr'], i['lv_layout'], i['lv_role']), (r[0], r[1], seg_type)) # (pv_name, pv_segs, pv_uuid) DataStore._lvs_device_pv_entry( lvs_device_pv, i['lv_uuid'], device, self.pv_path_to_uuid[device], (r[0], r[1], seg_type)) else: # TODO Handle the case where the segments refer to a LV # and not a PV pass # print("Handle this %s %s %s" % (s[0], s[1], s[2])) # Convert form to needed result for consumption pv_device_lvs_result = DataStore._pv_device_lv_format(pv_device_lvs) lvs_device_pv_result = DataStore._lvs_device_pv_format(lvs_device_pv) return pv_device_lvs_result, lvs_device_pv_result def refresh(self, log=True): """ Go out and query lvm for the latest data in as few trips as possible :param log Add debug log entry/exit messages :return: None """ self.num_refreshes += 1 if log: log_debug("lvmdb - refresh entry") # Grab everything first then parse it if self.json: # Do a single lvm retrieve for everything in json a = cmdhandler.lvm_full_report_json() _pvs, _pvs_lookup, _pvs_in_vgs = self._parse_pvs_json(a) _vgs, _vgs_lookup = self._parse_vgs_json(a) _lvs, _lvs_in_vgs, _lvs_hidden, _lvs_lookup = self._parse_lvs_json(a) else: _raw_pvs = cmdhandler.pv_retrieve_with_segs() _raw_vgs = cmdhandler.vg_retrieve(None) _raw_lvs = cmdhandler.lv_retrieve_with_segments() _pvs, _pvs_lookup, _pvs_in_vgs = self._parse_pvs(_raw_pvs) _vgs, _vgs_lookup = self._parse_vgs(_raw_vgs) _lvs, _lvs_in_vgs, _lvs_hidden, _lvs_lookup = self._parse_lvs(_raw_lvs) # Set all self.pvs = _pvs self.pv_path_to_uuid = _pvs_lookup self.vg_name_to_uuid = _vgs_lookup self.lv_full_name_to_uuid = _lvs_lookup self.vgs = _vgs self.lvs = _lvs self.lvs_in_vgs = _lvs_in_vgs self.pvs_in_vgs = _pvs_in_vgs self.lvs_hidden = _lvs_hidden # Create lookup table for which LV and segments are on each PV self.pv_lvs, self.lv_pvs = self._parse_pv_in_lvs() if log: log_debug("lvmdb - refresh exit") def fetch_pvs(self, pv_name): if not pv_name: return self.pvs.values() else: rc = [] for s in pv_name: # Ths user could be using a symlink instead of the actual # block device, make sure we are using actual block device file # if the pv name isn't in the lookup if s not in self.pv_path_to_uuid: s = os.path.realpath(s) rc.append(self.pvs[self.pv_path_to_uuid[s]]) return rc def pv_missing(self, pv_uuid): if pv_uuid in self.pvs: if self.pvs[pv_uuid]['pv_missing'] == '': return False return True def fetch_vgs(self, vg_name): if not vg_name: return self.vgs.values() else: rc = [] for s in vg_name: rc.append(self.vgs[self.vg_name_to_uuid[s]]) return rc def fetch_lvs(self, lv_names): try: if not lv_names: return self.lvs.values() else: rc = [] for s in lv_names: rc.append(self.lvs[self.lv_full_name_to_uuid[s]]) return rc except KeyError as ke: log_error("Key %s not found!" % (str(lv_names))) log_error("lv name to uuid lookup") for keys in sorted(self.lv_full_name_to_uuid.keys()): log_error("%s" % (keys)) log_error("lvs entries by uuid") for keys in sorted(self.lvs.keys()): log_error("%s" % (keys)) raise ke def pv_pe_segments(self, pv_uuid): pv = self.pvs[pv_uuid] return list(zip(pv['pvseg_start'], pv['pvseg_size'])) def pv_contained_lv(self, pv_device): rc = [] if pv_device in self.pv_lvs: rc = self.pv_lvs[pv_device] return rc def lv_contained_pv(self, lv_uuid): rc = [] if lv_uuid in self.lv_pvs: rc = self.lv_pvs[lv_uuid] return rc def lvs_in_vg(self, vg_uuid): # Return an array of # (lv_name, (lv_attr, lv_layout, lv_role), lv_uuid) rc = [] if vg_uuid in self.lvs_in_vgs: rc = self.lvs_in_vgs[vg_uuid] return rc def pvs_in_vg(self, vg_uuid): # Returns an array of (pv_name, pv_uuid) rc = [] if vg_uuid in self.pvs_in_vgs: rc = self.pvs_in_vgs[vg_uuid] return rc def hidden_lvs(self, lv_uuid): # For a specified LV, return a list of hidden lv_uuid, lv_name # for it rc = [] if lv_uuid in self.lvs_hidden: rc = self.lvs_hidden[lv_uuid] return rc if __name__ == "__main__": pp = prettyprint.PrettyPrinter(indent=4) use_json = False if len(sys.argv) != 1: print(len(sys.argv)) use_json = True ds = DataStore(use_json) ds.refresh() print("PVS") for v in ds.pvs.values(): pp.pprint(v) print('PV missing is %s' % ds.pv_missing(v['pv_uuid'])) print("VGS") for v in ds.vgs.values(): pp.pprint(v) print("LVS") for v in ds.lvs.values(): pp.pprint(v) print("LVS in VG") for k, v in ds.lvs_in_vgs.items(): print("VG uuid = %s" % (k)) pp.pprint(v) print("pv_in_lvs") for k, v in ds.pv_lvs.items(): print("PV %s contains LVS:" % (k)) pp.pprint(v) for k, v in ds.lv_pvs.items(): print("LV device = %s" % (k)) pp.pprint(v) LVM2.2.02.176/daemons/lvmdbusd/main.py0000644000000000000120000001313613176752421016074 0ustar rootwheel# Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . from . import cfg from . import objectmanager from . import utils from .cfg import BUS_NAME, BASE_INTERFACE, BASE_OBJ_PATH, MANAGER_OBJ_PATH import threading from . import cmdhandler import time import signal import dbus import dbus.mainloop.glib from . import lvmdb # noinspection PyUnresolvedReferences from gi.repository import GLib from .fetch import StateUpdate from .manager import Manager import traceback import queue from . import udevwatch from .utils import log_debug, log_error import argparse import os import sys from .cmdhandler import LvmFlightRecorder from .request import RequestEntry class Lvm(objectmanager.ObjectManager): def __init__(self, object_path): super(Lvm, self).__init__(object_path, BASE_INTERFACE) def process_request(): while cfg.run.value != 0: # noinspection PyBroadException try: req = cfg.worker_q.get(True, 5) log_debug( "Running method: %s with args %s" % (str(req.method), str(req.arguments))) req.run_cmd() log_debug("Method complete ") except queue.Empty: pass except Exception: st = traceback.format_exc() utils.log_error("process_request exception: \n%s" % st) def check_bb_size(value): v = int(value) if v < 0: raise argparse.ArgumentTypeError( "positive integers only ('%s' invalid)" % value) return v def install_signal_handlers(): # Because of the glib main loop stuff the python signal handler code is # apparently not usable and we need to use the glib calls instead signal_add = None if hasattr(GLib, 'unix_signal_add'): signal_add = GLib.unix_signal_add elif hasattr(GLib, 'unix_signal_add_full'): signal_add = GLib.unix_signal_add_full if signal_add: signal_add(GLib.PRIORITY_HIGH, signal.SIGHUP, utils.handler, signal.SIGHUP) signal_add(GLib.PRIORITY_HIGH, signal.SIGINT, utils.handler, signal.SIGINT) signal_add(GLib.PRIORITY_HIGH, signal.SIGUSR1, utils.handler, signal.SIGUSR1) else: log_error("GLib.unix_signal_[add|add_full] are NOT available!") def main(): start = time.time() # Add simple command line handling parser = argparse.ArgumentParser() parser.add_argument( "--udev", action='store_true', help="Use udev for updating state", default=False, dest='use_udev') parser.add_argument( "--debug", action='store_true', help="Dump debug messages", default=False, dest='debug') parser.add_argument( "--nojson", action='store_false', help="Do not use LVM JSON output (disables lvmshell)", default=True, dest='use_json') parser.add_argument( "--lvmshell", action='store_true', help="Use the lvm shell, not fork & exec lvm", default=False, dest='use_lvm_shell') parser.add_argument( "--blackboxsize", help="Size of the black box flight recorder, 0 to disable", default=10, type=check_bb_size, dest='bb_size') use_session = os.getenv('LVMDBUSD_USE_SESSION', False) # Ensure that we get consistent output for parsing stdout/stderr os.environ["LC_ALL"] = "C" cfg.args = parser.parse_args() cfg.create_request_entry = RequestEntry # We create a flight recorder in cmdhandler too, but we replace it here # as the user may be specifying a different size. The default one in # cmdhandler is for when we are running other code with a different main. cfg.blackbox = LvmFlightRecorder(cfg.args.bb_size) if cfg.args.use_lvm_shell and not cfg.args.use_json: log_error("You cannot specify --lvmshell and --nojson") sys.exit(1) # List of threads that we start up thread_list = [] install_signal_handlers() dbus.mainloop.glib.DBusGMainLoop(set_as_default=True) dbus.mainloop.glib.threads_init() cmdhandler.set_execution(cfg.args.use_lvm_shell) if use_session: cfg.bus = dbus.SessionBus() else: cfg.bus = dbus.SystemBus() # The base name variable needs to exist for things to work. # noinspection PyUnusedLocal base_name = dbus.service.BusName(BUS_NAME, cfg.bus) cfg.om = Lvm(BASE_OBJ_PATH) cfg.om.register_object(Manager(MANAGER_OBJ_PATH)) cfg.db = lvmdb.DataStore(cfg.args.use_json) # Using a thread to process requests, we cannot hang the dbus library # thread that is handling the dbus interface thread_list.append(threading.Thread(target=process_request, name='process_request')) # Have a single thread handling updating lvm and the dbus model so we # don't have multiple threads doing this as the same time updater = StateUpdate() thread_list.append(updater.thread) cfg.load = updater.load cfg.loop = GLib.MainLoop() for thread in thread_list: thread.damon = True thread.start() # Add udev watching if cfg.args.use_udev: log_debug('Utilizing udev to trigger updates') # In all cases we are going to monitor for udev until we get an # ExternalEvent. In the case where we get an external event and the user # didn't specify --udev we will stop monitoring udev udevwatch.add() end = time.time() log_debug( 'Service ready! total time= %.4f, lvm time= %.4f count= %d' % (end - start, cmdhandler.total_time, cmdhandler.total_count), 'bg_black', 'fg_light_green') try: if cfg.run.value != 0: cfg.loop.run() udevwatch.remove() for thread in thread_list: thread.join() except KeyboardInterrupt: # If we are unable to register signal handler, we will end up here when # the service gets a ^C or a kill -2 utils.handler(signal.SIGINT) return 0 LVM2.2.02.176/daemons/lvmdbusd/vg.py0000644000000000000120000005710113176752421015564 0ustar rootwheel# Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . from .automatedproperties import AutomatedProperties from . import utils from .utils import pv_obj_path_generate, vg_obj_path_generate, n import dbus from . import cfg from .cfg import VG_INTERFACE from . import cmdhandler from .request import RequestEntry from .loader import common from .state import State from . import background from .utils import round_size, mt_remove_dbus_objects from .job import JobState # noinspection PyUnusedLocal def vgs_state_retrieve(selection, cache_refresh=True): rc = [] if cache_refresh: cfg.db.refresh() for v in cfg.db.fetch_vgs(selection): rc.append( VgState( v['vg_uuid'], v['vg_name'], v['vg_fmt'], n(v['vg_size']), n(v['vg_free']), v['vg_sysid'], n(v['vg_extent_size']), n(v['vg_extent_count']), n(v['vg_free_count']), v['vg_profile'], n(v['max_lv']), n(v['max_pv']), n(v['pv_count']), n(v['lv_count']), n(v['snap_count']), n(v['vg_seqno']), n(v['vg_mda_count']), n(v['vg_mda_free']), n(v['vg_mda_size']), n(v['vg_mda_used_count']), v['vg_attr'], v['vg_tags'])) return rc def load_vgs(vg_specific=None, object_path=None, refresh=False, emit_signal=False, cache_refresh=True): return common(vgs_state_retrieve, (Vg,), vg_specific, object_path, refresh, emit_signal, cache_refresh) # noinspection PyPep8Naming,PyUnresolvedReferences,PyUnusedLocal class VgState(State): @property def lvm_id(self): return self.Name def identifiers(self): return (self.Uuid, self.Name) def _lv_paths_build(self): rc = [] for lv in cfg.db.lvs_in_vg(self.Uuid): (lv_name, meta, lv_uuid) = lv full_name = "%s/%s" % (self.Name, lv_name) gen = utils.lv_object_path_method(lv_name, meta) lv_path = cfg.om.get_object_path_by_uuid_lvm_id( lv_uuid, full_name, gen) rc.append(lv_path) return dbus.Array(rc, signature='o') def _pv_paths_build(self): rc = [] for p in cfg.db.pvs_in_vg(self.Uuid): (pv_name, pv_uuid) = p rc.append(cfg.om.get_object_path_by_uuid_lvm_id( pv_uuid, pv_name, pv_obj_path_generate)) return rc def __init__(self, Uuid, Name, Fmt, SizeBytes, FreeBytes, SysId, ExtentSizeBytes, ExtentCount, FreeCount, Profile, MaxLv, MaxPv, PvCount, LvCount, SnapCount, Seqno, MdaCount, MdaFree, MdaSizeBytes, MdaUsedCount, attr, tags): utils.init_class_from_arguments(self) self.Pvs = self._pv_paths_build() self.Lvs = self._lv_paths_build() def create_dbus_object(self, path): if not path: path = cfg.om.get_object_path_by_uuid_lvm_id( self.Uuid, self.Name, vg_obj_path_generate) return Vg(path, self) # noinspection PyMethodMayBeStatic def creation_signature(self): return (Vg, vg_obj_path_generate) # noinspection PyPep8Naming @utils.dbus_property(VG_INTERFACE, 'Uuid', 's') @utils.dbus_property(VG_INTERFACE, 'Name', 's') @utils.dbus_property(VG_INTERFACE, 'Fmt', 's') @utils.dbus_property(VG_INTERFACE, 'SizeBytes', 't', 0) @utils.dbus_property(VG_INTERFACE, 'FreeBytes', 't', 0) @utils.dbus_property(VG_INTERFACE, 'SysId', 's') @utils.dbus_property(VG_INTERFACE, 'ExtentSizeBytes', 't') @utils.dbus_property(VG_INTERFACE, 'ExtentCount', 't') @utils.dbus_property(VG_INTERFACE, 'FreeCount', 't') @utils.dbus_property(VG_INTERFACE, 'Profile', 's') @utils.dbus_property(VG_INTERFACE, 'MaxLv', 't') @utils.dbus_property(VG_INTERFACE, 'MaxPv', 't') @utils.dbus_property(VG_INTERFACE, 'PvCount', 't') @utils.dbus_property(VG_INTERFACE, 'LvCount', 't') @utils.dbus_property(VG_INTERFACE, 'SnapCount', 't') @utils.dbus_property(VG_INTERFACE, 'Seqno', 't') @utils.dbus_property(VG_INTERFACE, 'MdaCount', 't') @utils.dbus_property(VG_INTERFACE, 'MdaFree', 't') @utils.dbus_property(VG_INTERFACE, 'MdaSizeBytes', 't') @utils.dbus_property(VG_INTERFACE, 'MdaUsedCount', 't') class Vg(AutomatedProperties): _Tags_meta = ("as", VG_INTERFACE) _Pvs_meta = ("ao", VG_INTERFACE) _Lvs_meta = ("ao", VG_INTERFACE) _Writeable_meta = ("b", VG_INTERFACE) _Readable_meta = ("b", VG_INTERFACE) _Resizeable_meta = ("b", VG_INTERFACE) _Exportable_meta = ('b', VG_INTERFACE) _Partial_meta = ('b', VG_INTERFACE) _AllocContiguous_meta = ('b', VG_INTERFACE) _AllocCling_meta = ('b', VG_INTERFACE) _AllocNormal_meta = ('b', VG_INTERFACE) _AllocAnywhere_meta = ('b', VG_INTERFACE) _Clustered_meta = ('b', VG_INTERFACE) # noinspection PyUnusedLocal,PyPep8Naming def __init__(self, object_path, object_state): super(Vg, self).__init__(object_path, vgs_state_retrieve) self.set_interface(VG_INTERFACE) self._object_path = object_path self.state = object_state @staticmethod def fetch_new_lv(vg_name, lv_name): return cfg.om.get_object_path_by_lvm_id("%s/%s" % (vg_name, lv_name)) @staticmethod def handle_execute(rc, out, err): if rc == 0: cfg.load() else: # Need to work on error handling, need consistent raise dbus.exceptions.DBusException( VG_INTERFACE, 'Exit code %s, stderr = %s' % (str(rc), err)) @staticmethod def validate_dbus_object(vg_uuid, vg_name): dbo = cfg.om.get_object_by_uuid_lvm_id(vg_uuid, vg_name) if not dbo: raise dbus.exceptions.DBusException( VG_INTERFACE, 'VG with uuid %s and name %s not present!' % (vg_uuid, vg_name)) return dbo @staticmethod def _rename(uuid, vg_name, new_name, rename_options): # Make sure we have a dbus object representing it Vg.validate_dbus_object(uuid, vg_name) rc, out, err = cmdhandler.vg_rename( vg_name, new_name, rename_options) Vg.handle_execute(rc, out, err) return '/' @dbus.service.method( dbus_interface=VG_INTERFACE, in_signature='sia{sv}', out_signature='o', async_callbacks=('cb', 'cbe')) def Rename(self, name, tmo, rename_options, cb, cbe): utils.validate_vg_name(VG_INTERFACE, name) r = RequestEntry(tmo, Vg._rename, (self.state.Uuid, self.state.lvm_id, name, rename_options), cb, cbe, False) cfg.worker_q.put(r) @staticmethod def _remove(uuid, vg_name, remove_options): # Make sure we have a dbus object representing it Vg.validate_dbus_object(uuid, vg_name) # Remove the VG, if successful then remove from the model rc, out, err = cmdhandler.vg_remove(vg_name, remove_options) Vg.handle_execute(rc, out, err) return '/' @dbus.service.method( dbus_interface=VG_INTERFACE, in_signature='ia{sv}', out_signature='o', async_callbacks=('cb', 'cbe')) def Remove(self, tmo, remove_options, cb, cbe): r = RequestEntry(tmo, Vg._remove, (self.state.Uuid, self.state.lvm_id, remove_options), cb, cbe, False) cfg.worker_q.put(r) @staticmethod def _change(uuid, vg_name, change_options): Vg.validate_dbus_object(uuid, vg_name) rc, out, err = cmdhandler.vg_change(change_options, vg_name) Vg.handle_execute(rc, out, err) return '/' # TODO: This should be broken into a number of different methods # instead of having one method that takes a hash for parameters. Some of # the changes that vgchange does works on entire system, not just a # specfic vg, thus that should be in the Manager interface. @dbus.service.method( dbus_interface=VG_INTERFACE, in_signature='ia{sv}', out_signature='o', async_callbacks=('cb', 'cbe')) def Change(self, tmo, change_options, cb, cbe): r = RequestEntry(tmo, Vg._change, (self.state.Uuid, self.state.lvm_id, change_options), cb, cbe, False) cfg.worker_q.put(r) @staticmethod def _reduce(uuid, vg_name, missing, pv_object_paths, reduce_options): # Make sure we have a dbus object representing it Vg.validate_dbus_object(uuid, vg_name) pv_devices = [] # If pv_object_paths is not empty, then get the device paths if pv_object_paths and len(pv_object_paths) > 0: for pv_op in pv_object_paths: pv = cfg.om.get_object_by_path(pv_op) if pv: pv_devices.append(pv.lvm_id) else: raise dbus.exceptions.DBusException( VG_INTERFACE, 'PV Object path not found = %s!' % pv_op) rc, out, err = cmdhandler.vg_reduce(vg_name, missing, pv_devices, reduce_options) Vg.handle_execute(rc, out, err) return '/' @dbus.service.method( dbus_interface=VG_INTERFACE, in_signature='baoia{sv}', out_signature='o', async_callbacks=('cb', 'cbe')) def Reduce(self, missing, pv_object_paths, tmo, reduce_options, cb, cbe): r = RequestEntry(tmo, Vg._reduce, (self.state.Uuid, self.state.lvm_id, missing, pv_object_paths, reduce_options), cb, cbe, False) cfg.worker_q.put(r) @staticmethod def _extend(uuid, vg_name, pv_object_paths, extend_options): # Make sure we have a dbus object representing it Vg.validate_dbus_object(uuid, vg_name) extend_devices = [] for i in pv_object_paths: pv = cfg.om.get_object_by_path(i) if pv: extend_devices.append(pv.lvm_id) else: raise dbus.exceptions.DBusException( VG_INTERFACE, 'PV Object path not found = %s!' % i) if len(extend_devices): rc, out, err = cmdhandler.vg_extend(vg_name, extend_devices, extend_options) Vg.handle_execute(rc, out, err) else: raise dbus.exceptions.DBusException( VG_INTERFACE, 'No pv_object_paths provided!') return '/' @dbus.service.method( dbus_interface=VG_INTERFACE, in_signature='aoia{sv}', out_signature='o', async_callbacks=('cb', 'cbe')) def Extend(self, pv_object_paths, tmo, extend_options, cb, cbe): r = RequestEntry(tmo, Vg._extend, (self.state.Uuid, self.state.lvm_id, pv_object_paths, extend_options), cb, cbe, False) cfg.worker_q.put(r) @dbus.service.method( dbus_interface=VG_INTERFACE, in_signature='o(tt)a(ott)ia{sv}', out_signature='o', async_callbacks=('cb', 'cbe')) def Move(self, pv_src_obj, pv_source_range, pv_dests_and_ranges, tmo, move_options, cb, cbe): job_state = JobState() r = RequestEntry( tmo, background.move, (VG_INTERFACE, None, pv_src_obj, pv_source_range, pv_dests_and_ranges, move_options, job_state), cb, cbe, False, job_state) cfg.worker_q.put(r) @staticmethod def _lv_create(uuid, vg_name, name, size_bytes, pv_dests_and_ranges, create_options): # Make sure we have a dbus object representing it pv_dests = [] Vg.validate_dbus_object(uuid, vg_name) if len(pv_dests_and_ranges): for pr in pv_dests_and_ranges: pv_dbus_obj = cfg.om.get_object_by_path(pr[0]) if not pv_dbus_obj: raise dbus.exceptions.DBusException( VG_INTERFACE, 'PV Destination (%s) not found' % pr[0]) pv_dests.append((pv_dbus_obj.lvm_id, pr[1], pr[2])) rc, out, err = cmdhandler.vg_lv_create( vg_name, create_options, name, size_bytes, pv_dests) Vg.handle_execute(rc, out, err) return Vg.fetch_new_lv(vg_name, name) @dbus.service.method( dbus_interface=VG_INTERFACE, in_signature='sta(ott)ia{sv}', out_signature='(oo)', async_callbacks=('cb', 'cbe')) def LvCreate(self, name, size_bytes, pv_dests_and_ranges, tmo, create_options, cb, cbe): """ This one it for the advanced users that want to roll their own :param name: Name of the LV :param size_bytes: Size of LV in bytes :param pv_dests_and_ranges: Optional array of PV object paths and ranges :param tmo: -1 == Wait forever, 0 == return job immediately, > 0 == willing to wait that number of seconds before getting a job :param create_options: hash of key/value pairs :param cb: Internal, not accessible by dbus API user :param cbe: Internal, not accessible by dbus API user :return: (oo) First object path is newly created object, second is job object path if created. Each == '/' when it doesn't apply. """ utils.validate_lv_name(VG_INTERFACE, self.Name, name) r = RequestEntry(tmo, Vg._lv_create, (self.state.Uuid, self.state.lvm_id, name, round_size(size_bytes), pv_dests_and_ranges, create_options), cb, cbe) cfg.worker_q.put(r) @staticmethod def _lv_create_linear(uuid, vg_name, name, size_bytes, thin_pool, create_options): # Make sure we have a dbus object representing it Vg.validate_dbus_object(uuid, vg_name) rc, out, err = cmdhandler.vg_lv_create_linear( vg_name, create_options, name, size_bytes, thin_pool) Vg.handle_execute(rc, out, err) return Vg.fetch_new_lv(vg_name, name) @dbus.service.method( dbus_interface=VG_INTERFACE, in_signature='stbia{sv}', out_signature='(oo)', async_callbacks=('cb', 'cbe')) def LvCreateLinear(self, name, size_bytes, thin_pool, tmo, create_options, cb, cbe): utils.validate_lv_name(VG_INTERFACE, self.Name, name) r = RequestEntry(tmo, Vg._lv_create_linear, (self.state.Uuid, self.state.lvm_id, name, round_size(size_bytes), thin_pool, create_options), cb, cbe) cfg.worker_q.put(r) @staticmethod def _lv_create_striped(uuid, vg_name, name, size_bytes, num_stripes, stripe_size_kb, thin_pool, create_options): # Make sure we have a dbus object representing it Vg.validate_dbus_object(uuid, vg_name) rc, out, err = cmdhandler.vg_lv_create_striped( vg_name, create_options, name, size_bytes, num_stripes, stripe_size_kb, thin_pool) Vg.handle_execute(rc, out, err) return Vg.fetch_new_lv(vg_name, name) @dbus.service.method( dbus_interface=VG_INTERFACE, in_signature='stuubia{sv}', out_signature='(oo)', async_callbacks=('cb', 'cbe')) def LvCreateStriped(self, name, size_bytes, num_stripes, stripe_size_kb, thin_pool, tmo, create_options, cb, cbe): utils.validate_lv_name(VG_INTERFACE, self.Name, name) r = RequestEntry( tmo, Vg._lv_create_striped, (self.state.Uuid, self.state.lvm_id, name, round_size(size_bytes), num_stripes, stripe_size_kb, thin_pool, create_options), cb, cbe) cfg.worker_q.put(r) @staticmethod def _lv_create_mirror(uuid, vg_name, name, size_bytes, num_copies, create_options): # Make sure we have a dbus object representing it Vg.validate_dbus_object(uuid, vg_name) rc, out, err = cmdhandler.vg_lv_create_mirror( vg_name, create_options, name, size_bytes, num_copies) Vg.handle_execute(rc, out, err) return Vg.fetch_new_lv(vg_name, name) @dbus.service.method( dbus_interface=VG_INTERFACE, in_signature='stuia{sv}', out_signature='(oo)', async_callbacks=('cb', 'cbe')) def LvCreateMirror(self, name, size_bytes, num_copies, tmo, create_options, cb, cbe): utils.validate_lv_name(VG_INTERFACE, self.Name, name) r = RequestEntry( tmo, Vg._lv_create_mirror, (self.state.Uuid, self.state.lvm_id, name, round_size(size_bytes), num_copies, create_options), cb, cbe) cfg.worker_q.put(r) @staticmethod def _lv_create_raid(uuid, vg_name, name, raid_type, size_bytes, num_stripes, stripe_size_kb, create_options): # Make sure we have a dbus object representing it Vg.validate_dbus_object(uuid, vg_name) rc, out, err = cmdhandler.vg_lv_create_raid( vg_name, create_options, name, raid_type, size_bytes, num_stripes, stripe_size_kb) Vg.handle_execute(rc, out, err) return Vg.fetch_new_lv(vg_name, name) @dbus.service.method( dbus_interface=VG_INTERFACE, in_signature='sstuuia{sv}', out_signature='(oo)', async_callbacks=('cb', 'cbe')) def LvCreateRaid(self, name, raid_type, size_bytes, num_stripes, stripe_size_kb, tmo, create_options, cb, cbe): utils.validate_lv_name(VG_INTERFACE, self.Name, name) r = RequestEntry(tmo, Vg._lv_create_raid, (self.state.Uuid, self.state.lvm_id, name, raid_type, round_size(size_bytes), num_stripes, stripe_size_kb, create_options), cb, cbe) cfg.worker_q.put(r) @staticmethod def _create_pool(uuid, vg_name, meta_data_lv, data_lv, create_options, create_method): # Make sure we have a dbus object representing it Vg.validate_dbus_object(uuid, vg_name) # Retrieve the full names for the metadata and data lv md = cfg.om.get_object_by_path(meta_data_lv) data = cfg.om.get_object_by_path(data_lv) if md and data: new_name = data.Name rc, out, err = create_method( md.lv_full_name(), data.lv_full_name(), create_options) if rc == 0: mt_remove_dbus_objects((md, data)) Vg.handle_execute(rc, out, err) else: msg = "" if not md: msg += 'Meta data LV with object path %s not present!' % \ (meta_data_lv) if not data_lv: msg += 'Data LV with object path %s not present!' % \ (meta_data_lv) raise dbus.exceptions.DBusException(VG_INTERFACE, msg) return Vg.fetch_new_lv(vg_name, new_name) @dbus.service.method( dbus_interface=VG_INTERFACE, in_signature='ooia{sv}', out_signature='(oo)', async_callbacks=('cb', 'cbe')) def CreateCachePool(self, meta_data_lv, data_lv, tmo, create_options, cb, cbe): r = RequestEntry( tmo, Vg._create_pool, (self.state.Uuid, self.state.lvm_id, meta_data_lv, data_lv, create_options, cmdhandler.vg_create_cache_pool), cb, cbe) cfg.worker_q.put(r) @dbus.service.method( dbus_interface=VG_INTERFACE, in_signature='ooia{sv}', out_signature='(oo)', async_callbacks=('cb', 'cbe')) def CreateThinPool(self, meta_data_lv, data_lv, tmo, create_options, cb, cbe): r = RequestEntry( tmo, Vg._create_pool, (self.state.Uuid, self.state.lvm_id, meta_data_lv, data_lv, create_options, cmdhandler.vg_create_thin_pool), cb, cbe) cfg.worker_q.put(r) @staticmethod def _pv_add_rm_tags(uuid, vg_name, pv_object_paths, tags_add, tags_del, tag_options): pv_devices = [] # Make sure we have a dbus object representing it Vg.validate_dbus_object(uuid, vg_name) # Check for existence of pv object paths for p in pv_object_paths: pv = cfg.om.get_object_by_path(p) if pv: pv_devices.append(pv.Name) else: raise dbus.exceptions.DBusException( VG_INTERFACE, 'PV object path = %s not found' % p) rc, out, err = cmdhandler.pv_tag( pv_devices, tags_add, tags_del, tag_options) Vg.handle_execute(rc, out, err) return '/' @dbus.service.method( dbus_interface=VG_INTERFACE, in_signature='aoasia{sv}', out_signature='o', async_callbacks=('cb', 'cbe')) def PvTagsAdd(self, pvs, tags, tmo, tag_options, cb, cbe): for t in tags: utils.validate_tag(VG_INTERFACE, t) r = RequestEntry(tmo, Vg._pv_add_rm_tags, (self.state.Uuid, self.state.lvm_id, pvs, tags, None, tag_options), cb, cbe, return_tuple=False) cfg.worker_q.put(r) @dbus.service.method( dbus_interface=VG_INTERFACE, in_signature='aoasia{sv}', out_signature='o', async_callbacks=('cb', 'cbe')) def PvTagsDel(self, pvs, tags, tmo, tag_options, cb, cbe): for t in tags: utils.validate_tag(VG_INTERFACE, t) r = RequestEntry( tmo, Vg._pv_add_rm_tags, (self.state.Uuid, self.state.lvm_id, pvs, None, tags, tag_options), cb, cbe, return_tuple=False) cfg.worker_q.put(r) @staticmethod def _vg_add_rm_tags(uuid, vg_name, tags_add, tags_del, tag_options): # Make sure we have a dbus object representing it Vg.validate_dbus_object(uuid, vg_name) rc, out, err = cmdhandler.vg_tag( vg_name, tags_add, tags_del, tag_options) Vg.handle_execute(rc, out, err) return '/' @dbus.service.method( dbus_interface=VG_INTERFACE, in_signature='asia{sv}', out_signature='o', async_callbacks=('cb', 'cbe')) def TagsAdd(self, tags, tmo, tag_options, cb, cbe): for t in tags: utils.validate_tag(VG_INTERFACE, t) r = RequestEntry(tmo, Vg._vg_add_rm_tags, (self.state.Uuid, self.state.lvm_id, tags, None, tag_options), cb, cbe, return_tuple=False) cfg.worker_q.put(r) @dbus.service.method( dbus_interface=VG_INTERFACE, in_signature='asia{sv}', out_signature='o', async_callbacks=('cb', 'cbe')) def TagsDel(self, tags, tmo, tag_options, cb, cbe): for t in tags: utils.validate_tag(VG_INTERFACE, t) r = RequestEntry(tmo, Vg._vg_add_rm_tags, (self.state.Uuid, self.state.lvm_id, None, tags, tag_options), cb, cbe, return_tuple=False) cfg.worker_q.put(r) @staticmethod def _vg_change_set(uuid, vg_name, method, value, options): # Make sure we have a dbus object representing it Vg.validate_dbus_object(uuid, vg_name) rc, out, err = method(vg_name, value, options) Vg.handle_execute(rc, out, err) return '/' @dbus.service.method( dbus_interface=VG_INTERFACE, in_signature='sia{sv}', out_signature='o', async_callbacks=('cb', 'cbe')) def AllocationPolicySet(self, policy, tmo, policy_options, cb, cbe): r = RequestEntry(tmo, Vg._vg_change_set, (self.state.Uuid, self.state.lvm_id, cmdhandler.vg_allocation_policy, policy, policy_options), cb, cbe, return_tuple=False) cfg.worker_q.put(r) @dbus.service.method( dbus_interface=VG_INTERFACE, in_signature='tia{sv}', out_signature='o', async_callbacks=('cb', 'cbe')) def MaxPvSet(self, number, tmo, max_options, cb, cbe): r = RequestEntry(tmo, Vg._vg_change_set, (self.state.Uuid, self.state.lvm_id, cmdhandler.vg_max_pv, number, max_options), cb, cbe, return_tuple=False) cfg.worker_q.put(r) @dbus.service.method( dbus_interface=VG_INTERFACE, in_signature='ia{sv}', out_signature='o', async_callbacks=('cb', 'cbe')) def UuidGenerate(self, tmo, options, cb, cbe): r = RequestEntry(tmo, Vg._vg_change_set, (self.state.Uuid, self.state.lvm_id, cmdhandler.vg_uuid_gen, None, options), cb, cbe, return_tuple=False) cfg.worker_q.put(r) def _attribute(self, pos, ch): return dbus.Boolean(self.state.attr[pos] == ch) @dbus.service.method( dbus_interface=VG_INTERFACE, in_signature='tia{sv}', out_signature='o', async_callbacks=('cb', 'cbe')) def MaxLvSet(self, number, tmo, max_options, cb, cbe): r = RequestEntry(tmo, Vg._vg_change_set, (self.state.Uuid, self.state.lvm_id, cmdhandler.vg_max_lv, number, max_options), cb, cbe, return_tuple=False) cfg.worker_q.put(r) @staticmethod def _vg_activate_deactivate(uuid, vg_name, activate, control_flags, options): # Make sure we have a dbus object representing it Vg.validate_dbus_object(uuid, vg_name) rc, out, err = cmdhandler.activate_deactivate( 'vgchange', vg_name, activate, control_flags, options) Vg.handle_execute(rc, out, err) return '/' @dbus.service.method( dbus_interface=VG_INTERFACE, in_signature='tia{sv}', out_signature='o', async_callbacks=('cb', 'cbe')) def Activate(self, control_flags, tmo, activate_options, cb, cbe): r = RequestEntry(tmo, Vg._vg_activate_deactivate, (self.state.Uuid, self.state.lvm_id, True, control_flags, activate_options), cb, cbe, return_tuple=False) cfg.worker_q.put(r) @dbus.service.method( dbus_interface=VG_INTERFACE, in_signature='tia{sv}', out_signature='o', async_callbacks=('cb', 'cbe')) def Deactivate(self, control_flags, tmo, activate_options, cb, cbe): r = RequestEntry(tmo, Vg._vg_activate_deactivate, (self.state.Uuid, self.state.lvm_id, False, control_flags, activate_options), cb, cbe, return_tuple=False) cfg.worker_q.put(r) @property def Tags(self): return utils.parse_tags(self.state.tags) @property def Pvs(self): return dbus.Array(self.state.Pvs, signature='o') @property def Lvs(self): return dbus.Array(self.state.Lvs, signature='o') @property def lvm_id(self): return self.state.lvm_id @property def Writeable(self): return self._attribute(0, 'w') @property def Readable(self): return self._attribute(0, 'r') @property def Resizeable(self): return self._attribute(1, 'z') @property def Exportable(self): return self._attribute(2, 'x') @property def Partial(self): return self._attribute(3, 'p') @property def AllocContiguous(self): return self._attribute(4, 'c') @property def AllocCling(self): return self._attribute(4, 'l') @property def AllocNormal(self): return self._attribute(4, 'n') @property def AllocAnywhere(self): return self._attribute(4, 'a') @property def Clustered(self): return self._attribute(5, 'c') LVM2.2.02.176/daemons/lvmdbusd/udevwatch.py0000644000000000000120000000372313176752421017143 0ustar rootwheel# Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . import pyudev import threading from . import cfg from .request import RequestEntry from . import utils observer = None observer_lock = threading.RLock() _udev_lock = threading.RLock() _udev_count = 0 def udev_add(): global _udev_count with _udev_lock: if _udev_count == 0: _udev_count += 1 # Place this on the queue so any other operations will sequence # behind it r = RequestEntry( -1, _udev_event, (), None, None, False) cfg.worker_q.put(r) def udev_complete(): global _udev_count with _udev_lock: if _udev_count > 0: _udev_count -= 1 def _udev_event(): utils.log_debug("Processing udev event") udev_complete() cfg.load() # noinspection PyUnusedLocal def filter_event(action, device): # Filter for events of interest and add a request object to be processed # when appropriate. refresh = False if '.ID_FS_TYPE_NEW' in device: fs_type_new = device['.ID_FS_TYPE_NEW'] if 'LVM' in fs_type_new: refresh = True elif fs_type_new == '': # Check to see if the device was one we knew about if 'DEVNAME' in device: found = cfg.om.get_object_by_lvm_id(device['DEVNAME']) if found: refresh = True if 'DM_LV_NAME' in device: refresh = True if refresh: udev_add() def add(): with observer_lock: global observer context = pyudev.Context() monitor = pyudev.Monitor.from_netlink(context) monitor.filter_by('block') observer = pyudev.MonitorObserver(monitor, filter_event) observer.start() def remove(): with observer_lock: global observer if observer: observer.stop() observer = None return True return False LVM2.2.02.176/daemons/lvmdbusd/fetch.py0000644000000000000120000001073113176752421016237 0ustar rootwheel# Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . from .pv import load_pvs from .vg import load_vgs from .lv import load_lvs from . import cfg from .utils import MThreadRunner, log_debug, log_error import threading import queue import traceback def _main_thread_load(refresh=True, emit_signal=True): num_total_changes = 0 num_total_changes += load_pvs( refresh=refresh, emit_signal=emit_signal, cache_refresh=False)[1] num_total_changes += load_vgs( refresh=refresh, emit_signal=emit_signal, cache_refresh=False)[1] num_total_changes += load_lvs( refresh=refresh, emit_signal=emit_signal, cache_refresh=False)[1] return num_total_changes def load(refresh=True, emit_signal=True, cache_refresh=True, log=True, need_main_thread=True): # Go through and load all the PVs, VGs and LVs if cache_refresh: cfg.db.refresh(log) if need_main_thread: rc = MThreadRunner(_main_thread_load, refresh, emit_signal).done() else: rc = _main_thread_load(refresh, emit_signal) return rc # Even though lvm can handle multiple changes concurrently it really doesn't # make sense to make a 1-1 fetch of data for each change of lvm because when # we fetch the data once all previous changes are reflected. class StateUpdate(object): class UpdateRequest(object): def __init__(self, refresh, emit_signal, cache_refresh, log, need_main_thread): self.is_done = False self.refresh = refresh self.emit_signal = emit_signal self.cache_refresh = cache_refresh self.log = log self.need_main_thread = need_main_thread self.result = None self.cond = threading.Condition(threading.Lock()) def done(self): with self.cond: if not self.is_done: self.cond.wait() return self.result def set_result(self, result): with self.cond: self.result = result self.is_done = True self.cond.notify_all() @staticmethod def update_thread(obj): queued_requests = [] while cfg.run.value != 0: # noinspection PyBroadException try: refresh = True emit_signal = True cache_refresh = True log = True need_main_thread = True with obj.lock: wait = not obj.deferred obj.deferred = False if len(queued_requests) == 0 and wait: queued_requests.append(obj.queue.get(True, 2)) # Ok we have one or the deferred queue has some, # check if any others try: while True: queued_requests.append(obj.queue.get(False)) except queue.Empty: pass if len(queued_requests) > 1: log_debug("Processing %d updates!" % len(queued_requests), 'bg_black', 'fg_light_green') # We have what we can, run the update with the needed options for i in queued_requests: if not i.refresh: refresh = False if not i.emit_signal: emit_signal = False if not i.cache_refresh: cache_refresh = False if not i.log: log = False if not i.need_main_thread: need_main_thread = False num_changes = load(refresh, emit_signal, cache_refresh, log, need_main_thread) # Update is done, let everyone know! for i in queued_requests: i.set_result(num_changes) # Only clear out the requests after we have given them a result # otherwise we can orphan the waiting threads and they never # wake up if we get an exception queued_requests = [] except queue.Empty: pass except Exception: st = traceback.format_exc() log_error("update_thread exception: \n%s" % st) cfg.blackbox.dump() def __init__(self): self.lock = threading.RLock() self.queue = queue.Queue() self.deferred = False # Do initial load load(refresh=False, emit_signal=False, need_main_thread=False) self.thread = threading.Thread(target=StateUpdate.update_thread, args=(self,), name="StateUpdate.update_thread") def load(self, refresh=True, emit_signal=True, cache_refresh=True, log=True, need_main_thread=True): # Place this request on the queue and wait for it to be completed req = StateUpdate.UpdateRequest(refresh, emit_signal, cache_refresh, log, need_main_thread) self.queue.put(req) return req.done() def event(self): with self.lock: self.deferred = True LVM2.2.02.176/daemons/lvmdbusd/objectmanager.py0000644000000000000120000002677313176752421017764 0ustar rootwheel# Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . import sys import threading import traceback import dbus import os import copy from . import cfg from .utils import log_debug, pv_obj_path_generate, log_error from .automatedproperties import AutomatedProperties # noinspection PyPep8Naming class ObjectManager(AutomatedProperties): """ Implements the org.freedesktop.DBus.ObjectManager interface """ def __init__(self, object_path, interface): super(ObjectManager, self).__init__(object_path, interface) self.set_interface(interface) self._ap_o_path = object_path self._objects = {} self._id_to_object_path = {} self.rlock = threading.RLock() @staticmethod def _get_managed_objects(obj): with obj.rlock: rc = {} try: for k, v in list(obj._objects.items()): path, props = v[0].emit_data() rc[path] = props except Exception: traceback.print_exc(file=sys.stdout) sys.exit(1) return rc @dbus.service.method( dbus_interface="org.freedesktop.DBus.ObjectManager", out_signature='a{oa{sa{sv}}}', async_callbacks=('cb', 'cbe')) def GetManagedObjects(self, cb, cbe): r = cfg.create_request_entry(-1, ObjectManager._get_managed_objects, (self, ), cb, cbe, False) cfg.worker_q.put(r) def locked(self): """ If some external code need to run across a number of different calls into ObjectManager while blocking others they can use this method to lock others out. :return: """ return ObjectManagerLock(self.rlock) @dbus.service.signal( dbus_interface="org.freedesktop.DBus.ObjectManager", signature='oa{sa{sv}}') def InterfacesAdded(self, object_path, int_name_prop_dict): log_debug( ('SIGNAL: InterfacesAdded(%s, %s)' % (str(object_path), str(int_name_prop_dict)))) @dbus.service.signal( dbus_interface="org.freedesktop.DBus.ObjectManager", signature='oas') def InterfacesRemoved(self, object_path, interface_list): log_debug(('SIGNAL: InterfacesRemoved(%s, %s)' % (str(object_path), str(interface_list)))) def validate_lookups(self): with self.rlock: tmp_lookups = copy.deepcopy(self._id_to_object_path) # iterate over all we know, removing from the copy. If all is well # we will have zero items left over for path, md in self._objects.items(): obj, lvm_id, uuid = md if lvm_id: assert path == tmp_lookups[lvm_id] del tmp_lookups[lvm_id] if uuid: assert path == tmp_lookups[uuid] del tmp_lookups[uuid] rc = len(tmp_lookups) if rc: # Error condition log_error("_id_to_object_path has extraneous lookups!") for key, path in tmp_lookups.items(): log_error("Key= %s, path= %s" % (key, path)) return rc def _lookup_add(self, obj, path, lvm_id, uuid): """ Store information about what we added to the caches so that we can remove it cleanly :param obj: The dbus object we are storing :param lvm_id: The lvm id for the asset :param uuid: The uuid for the asset :return: """ # Note: Only called internally, lock implied # We could have a temp entry from the forward creation of a path self._lookup_remove(path) self._objects[path] = (obj, lvm_id, uuid) # Make sure we have one or the other assert lvm_id or uuid if lvm_id: self._id_to_object_path[lvm_id] = path if uuid: self._id_to_object_path[uuid] = path def _lookup_remove(self, obj_path): # Note: Only called internally, lock implied if obj_path in self._objects: (obj, lvm_id, uuid) = self._objects[obj_path] if lvm_id in self._id_to_object_path: del self._id_to_object_path[lvm_id] if uuid in self._id_to_object_path: del self._id_to_object_path[uuid] del self._objects[obj_path] def lookup_update(self, dbus_obj, new_uuid, new_lvm_id): with self.rlock: obj_path = dbus_obj.dbus_object_path() self._lookup_remove(obj_path) self._lookup_add( dbus_obj, obj_path, new_lvm_id, new_uuid) def object_paths_by_type(self, o_type): with self.rlock: rc = {} for k, v in list(self._objects.items()): if isinstance(v[0], o_type): rc[k] = True return rc def register_object(self, dbus_object, emit_signal=False): """ Given a dbus object add it to the collection :param dbus_object: Dbus object to register :param emit_signal: If true emit a signal for interfaces added """ with self.rlock: path, props = dbus_object.emit_data() # print('Registering object path %s for %s' % # (path, dbus_object.lvm_id)) # We want fast access to the object by a number of different ways # so we use multiple hashs with different keys self._lookup_add(dbus_object, path, dbus_object.lvm_id, dbus_object.Uuid) if emit_signal: self.InterfacesAdded(path, props) def remove_object(self, dbus_object, emit_signal=False): """ Given a dbus object, remove it from the collection and remove it from the dbus framework as well :param dbus_object: Dbus object to remove :param emit_signal: If true emit the interfaces removed signal """ with self.rlock: # Store off the object path and the interface first path = dbus_object.dbus_object_path() interfaces = dbus_object.interface() # print 'UN-Registering object path %s for %s' % \ # (path, dbus_object.lvm_id) self._lookup_remove(path) # Remove from dbus library dbus_object.remove_from_connection(cfg.bus, path) # Optionally emit a signal if emit_signal: self.InterfacesRemoved(path, interfaces) def get_object_by_path(self, path): """ Given a dbus path return the object registered for it :param path: The dbus path :return: The object """ with self.rlock: if path in self._objects: return self._objects[path][0] return None def get_object_by_uuid_lvm_id(self, uuid, lvm_id): with self.rlock: return self.get_object_by_path( self.get_object_path_by_uuid_lvm_id(uuid, lvm_id)) def get_object_by_lvm_id(self, lvm_id): """ Given an lvm identifier, return the object registered for it :param lvm_id: The lvm identifier """ with self.rlock: lookup_rc = self._id_lookup(lvm_id) if lookup_rc: return self.get_object_by_path(lookup_rc) return None def get_object_path_by_lvm_id(self, lvm_id): """ Given an lvm identifier, return the object path for it :param lvm_id: The lvm identifier :return: Object path or '/' if not found """ with self.rlock: lookup_rc = self._id_lookup(lvm_id) if lookup_rc: return lookup_rc return '/' def _uuid_verify(self, path, uuid, lvm_id): """ Ensure uuid is present for a successful lvm_id lookup NOTE: Internal call, assumes under object manager lock :param path: Path to object we looked up :param uuid: lvm uuid to verify :param lvm_id: lvm_id used to find object :return: None """ # This gets called when we found an object based on lvm_id, ensure # uuid is correct too, as they can change. There is no durable # non-changeable name in lvm if lvm_id != uuid: if uuid and uuid not in self._id_to_object_path: obj = self.get_object_by_path(path) self._lookup_add(obj, path, lvm_id, uuid) def _lvm_id_verify(self, path, uuid, lvm_id): """ Ensure lvm_id is present for a successful uuid lookup NOTE: Internal call, assumes under object manager lock :param path: Path to object we looked up :param uuid: uuid used to find object :param lvm_id: lvm_id to verify :return: None """ # This gets called when we found an object based on uuid, ensure # lvm_id is correct too, as they can change. There is no durable # non-changeable name in lvm if lvm_id != uuid: if lvm_id and lvm_id not in self._id_to_object_path: obj = self.get_object_by_path(path) self._lookup_add(obj, path, lvm_id, uuid) def _id_lookup(self, the_id): path = None if the_id: # The _id_to_object_path contains hash keys for everything, so # uuid and lvm_id if the_id in self._id_to_object_path: path = self._id_to_object_path[the_id] else: if "/" in the_id: if the_id.startswith('/'): # We could have a pv device path lookup that failed, # lets try canonical form and try again. canonical = os.path.realpath(the_id) if canonical in self._id_to_object_path: path = self._id_to_object_path[canonical] else: vg, lv = the_id.split("/", 1) int_lvm_id = vg + "/" + ("[%s]" % lv) if int_lvm_id in self._id_to_object_path: path = self._id_to_object_path[int_lvm_id] return path def get_object_path_by_uuid_lvm_id(self, uuid, lvm_id, path_create=None): """ For a given lvm asset return the dbus object path registered for it. This method first looks up by uuid and then by lvm_id. You can search by just one by setting uuid == lvm_id (uuid or lvm_id). If the object is not found and path_create is a not None, the path_create function will be called to create a new object path and register it with the object manager for the specified uuid & lvm_id. Note: If path create is not None, uuid and lvm_id cannot be equal :param uuid: The uuid for the lvm object we are searching for :param lvm_id: The lvm name (eg. pv device path, vg name, lv full name) :param path_create: If not None, create the path using this function if we fail to find the object by uuid or lvm_id. :returns None if lvm asset not found and path_create == None otherwise a valid dbus object path """ with self.rlock: assert lvm_id assert uuid if path_create: assert uuid != lvm_id # Check for Manager.LookUpByLvmId query, we cannot # check/verify/update the uuid and lvm_id lookups so don't! if uuid == lvm_id: path = self._id_lookup(lvm_id) else: # We have a uuid and a lvm_id we can do sanity checks to ensure # that they are consistent # If a PV is missing it's device path is '[unknown]' or some # other text derivation of unknown. When we find that a PV is # missing we will clear out the lvm_id as it's likely not unique # and thus not useful and potentially harmful for lookups. if path_create == pv_obj_path_generate and \ cfg.db.pv_missing(uuid): lvm_id = None # Lets check for the uuid first path = self._id_lookup(uuid) if path: # Verify the lvm_id is sane self._lvm_id_verify(path, uuid, lvm_id) else: # Unable to find by UUID, lets lookup by lvm_id path = self._id_lookup(lvm_id) if path: # Verify the uuid is sane self._uuid_verify(path, uuid, lvm_id) else: # We have exhausted all lookups, let's create if we can if path_create: path = path_create() self._lookup_add(None, path, lvm_id, uuid) # print('get_object_path_by_lvm_id(%s, %s, %s, %s: return %s' % # (uuid, lvm_id, str(path_create), str(gen_new), path)) return path class ObjectManagerLock(object): """ The sole purpose of this class is to allow other code the ability to lock the object manager using a `with` statement, eg. with cfg.om.locked(): # Do stuff with object manager This will ensure that the lock is always released (assuming this is done correctly) """ def __init__(self, recursive_lock): self._lock = recursive_lock def __enter__(self): # Acquire lock self._lock.acquire() # noinspection PyUnusedLocal def __exit__(self, e_type, e_value, e_traceback): # Release lock self._lock.release() self._lock = None LVM2.2.02.176/daemons/lvmdbusd/request.py0000644000000000000120000001064113176752421016636 0ustar rootwheel# Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . import threading # noinspection PyUnresolvedReferences from gi.repository import GLib from .job import Job from . import cfg import traceback from .utils import log_error, mt_async_call class RequestEntry(object): def __init__(self, tmo, method, arguments, cb, cb_error, return_tuple=True, job_state=None): self.method = method self.arguments = arguments self.cb = cb self.cb_error = cb_error self.timer_id = -1 self.lock = threading.RLock() self.done = False self._result = None self._job = None self._rc = 0 self._rc_error = None self._return_tuple = return_tuple self._job_state = job_state if tmo < 0: # Client is willing to block forever pass elif tmo == 0: self._return_job() else: # Note: using 990 instead of 1000 for second to ms conversion to # account for overhead. Goal is to return just before the # timeout amount has expired. Better to be a little early than # late. self.timer_id = GLib.timeout_add( tmo * 990, RequestEntry._request_timeout, self) @staticmethod def _request_timeout(r): """ Method which gets called when the timer runs out! :param r: RequestEntry which timed out :return: Result of timer_expired """ return r.timer_expired() def _return_job(self): # Return job is only called when we create a request object or when # we pop a timer. In both cases we are running in the correct context # and do not need to schedule the call back in main context. self._job = Job(self, self._job_state) cfg.om.register_object(self._job, True) if self._return_tuple: self.cb(('/', self._job.dbus_object_path())) else: self.cb(self._job.dbus_object_path()) def run_cmd(self): try: result = self.method(*self.arguments) self.register_result(result) except Exception as e: # Use the request entry to return the result as the client may # have gotten a job by the time we hit an error # Lets get the stacktrace and set that to the error message st = traceback.format_exc() cfg.blackbox.dump() log_error("Exception returned to client: \n%s" % st) self.register_error(-1, str(e), e) def is_done(self): with self.lock: rc = self.done return rc def get_errors(self): with self.lock: return (self._rc, self._rc_error) def result(self): with self.lock: if self.done: return self._result return '/' def _reg_ending(self, result, error_rc=0, error_msg=None, error_exception=None): with self.lock: self.done = True if self.timer_id != -1: # Try to prevent the timer from firing GLib.source_remove(self.timer_id) self._result = result self._rc = error_rc self._rc_error = error_msg if not self._job: # We finished and there is no job, so return result or error # now! # Note: If we don't have a valid cb or cbe, this indicates a # request that doesn't need a response as we already returned # one before the request was processed. if error_rc == 0: if self.cb: if self._return_tuple: mt_async_call(self.cb, (result, '/')) else: mt_async_call(self.cb, result) else: if self.cb_error: if not error_exception: if not error_msg: error_exception = Exception( "An error occurred, but no reason was " "given, see service logs!") else: error_exception = Exception(error_msg) mt_async_call(self.cb_error, error_exception) else: # We have a job and it's complete, indicate that it's done. self._job.Complete = True self._job = None def register_error(self, error_rc, error_message, error_exception): self._reg_ending('/', error_rc, error_message, error_exception) def register_result(self, result): self._reg_ending(result) def timer_expired(self): with self.lock: # Set the timer back to -1 as we will get a warning if we try # to remove a timer that doesn't exist self.timer_id = -1 if not self.done: # Create dbus job object and return path to caller self._return_job() else: # The job is done, we have nothing to do pass return False LVM2.2.02.176/daemons/lvmdbusd/loader.py0000644000000000000120000000515513176752421016420 0ustar rootwheel# Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . from . import cfg def _compare_construction(o_state, new_state): # We need to check to see if the objects would get constructed # the same existing_ctor, existing_path = o_state.creation_signature() new_ctor, new_path = new_state.creation_signature() # print("%s == %s and %s == %s" % (str(existing_ctor), str(new_ctor), # str(existing_path), str(new_path))) return ((existing_ctor == new_ctor) and (existing_path == new_path)) def common(retrieve, o_type, search_keys, object_path, refresh, emit_signal, cache_refresh): num_changes = 0 existing_paths = [] rc = [] if search_keys: assert isinstance(search_keys, list) if cache_refresh: cfg.db.refresh() objects = retrieve(search_keys, cache_refresh=False) # If we are doing a refresh we need to know what we have in memory, what's # in lvm and add those that are new and remove those that are gone! if refresh: existing_paths = cfg.om.object_paths_by_type(o_type) for o in objects: # Assume we need to add this one to dbus, unless we are refreshing # and it's already present return_object = True if refresh: # We are refreshing all the PVs from LVM, if this one exists # we need to refresh our state. dbus_object = cfg.om.get_object_by_uuid_lvm_id(*o.identifiers()) if dbus_object: del existing_paths[dbus_object.dbus_object_path()] # If the old object state and new object state wouldn't be # created with the same path and same object constructor we # need to remove the old object and construct the new one # instead! if not _compare_construction(dbus_object.state, o): # Remove existing and construct new one cfg.om.remove_object(dbus_object, emit_signal) dbus_object = o.create_dbus_object(None) cfg.om.register_object(dbus_object, emit_signal) num_changes += 1 else: num_changes += dbus_object.refresh(object_state=o) return_object = False if return_object: dbus_object = o.create_dbus_object(object_path) cfg.om.register_object(dbus_object, emit_signal) rc.append(dbus_object) object_path = None if refresh: for k in list(existing_paths.keys()): cfg.om.remove_object(cfg.om.get_object_by_path(k), True) num_changes += 1 num_changes += len(rc) return rc, num_changes LVM2.2.02.176/daemons/lvmdbusd/pv.py0000644000000000000120000001756513176752421015607 0ustar rootwheel# Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . from .automatedproperties import AutomatedProperties from . import utils from . import cfg import dbus from .cfg import PV_INTERFACE from . import cmdhandler from .utils import vg_obj_path_generate, n, pv_obj_path_generate, \ lv_object_path_method from .loader import common from .request import RequestEntry from .state import State from .utils import round_size # noinspection PyUnusedLocal def pvs_state_retrieve(selection, cache_refresh=True): rc = [] if cache_refresh: cfg.db.refresh() for p in cfg.db.fetch_pvs(selection): rc.append( PvState( p["pv_name"], p["pv_uuid"], p["pv_name"], p["pv_fmt"], n(p["pv_size"]), n(p["pv_free"]), n(p["pv_used"]), n(p["dev_size"]), n(p["pv_mda_size"]), n(p["pv_mda_free"]), int(p["pv_ba_start"]), n(p["pv_ba_size"]), n(p["pe_start"]), int(p["pv_pe_count"]), int(p["pv_pe_alloc_count"]), p["pv_attr"], p["pv_tags"], p["vg_name"], p["vg_uuid"])) return rc def load_pvs(device=None, object_path=None, refresh=False, emit_signal=False, cache_refresh=True): return common( pvs_state_retrieve, (Pv,), device, object_path, refresh, emit_signal, cache_refresh) # noinspection PyUnresolvedReferences class PvState(State): @property def lvm_id(self): return self.lvm_path def _lv_object_list(self, vg_name): rc = [] if vg_name: for lv in sorted(cfg.db.pv_contained_lv(self.lvm_id)): lv_uuid, lv_name, meta, segs = lv full_name = "%s/%s" % (vg_name, lv_name) path_create = lv_object_path_method(lv_name, meta) lv_path = cfg.om.get_object_path_by_uuid_lvm_id( lv_uuid, full_name, path_create) rc.append((lv_path, segs)) return rc # noinspection PyUnusedLocal,PyPep8Naming def __init__(self, lvm_path, Uuid, Name, Fmt, SizeBytes, FreeBytes, UsedBytes, DevSizeBytes, MdaSizeBytes, MdaFreeBytes, BaStart, BaSizeBytes, PeStart, PeCount, PeAllocCount, attr, Tags, vg_name, vg_uuid): utils.init_class_from_arguments(self) self.pe_segments = cfg.db.pv_pe_segments(Uuid) self.lv = self._lv_object_list(vg_name) # It's possible to have a vg_name and no uuid with the main example # being when the vg_name == '[unknown]' if vg_uuid and vg_name: self.vg_path = cfg.om.get_object_path_by_uuid_lvm_id( vg_uuid, vg_name, vg_obj_path_generate) else: self.vg_path = '/' def identifiers(self): return (self.Uuid, self.lvm_path) def create_dbus_object(self, path): if not path: path = cfg.om.get_object_path_by_uuid_lvm_id(self.Uuid, self.Name, pv_obj_path_generate) return Pv(path, self) # noinspection PyMethodMayBeStatic def creation_signature(self): return (Pv, pv_obj_path_generate) # noinspection PyPep8Naming @utils.dbus_property(PV_INTERFACE, 'Uuid', 's') # PV UUID/pv_uuid @utils.dbus_property(PV_INTERFACE, 'Name', 's') # PV/pv_name @utils.dbus_property(PV_INTERFACE, 'Fmt', 's') # Fmt/pv_fmt @utils.dbus_property(PV_INTERFACE, 'SizeBytes', 't') # PSize/pv_size @utils.dbus_property(PV_INTERFACE, 'FreeBytes', 't') # PFree/pv_free @utils.dbus_property(PV_INTERFACE, 'UsedBytes', 't') # Used/pv_used @utils.dbus_property(PV_INTERFACE, 'DevSizeBytes', 't') # DevSize/dev_size @utils.dbus_property(PV_INTERFACE, 'MdaSizeBytes', 't') # PMdaSize/pv_mda_size @utils.dbus_property(PV_INTERFACE, 'MdaFreeBytes', 't') # PMdaFree/pv_mda_free @utils.dbus_property(PV_INTERFACE, 'BaStart', 't') # BA start/pv_ba_start @utils.dbus_property(PV_INTERFACE, 'BaSizeBytes', 't') # BA size/pv_ba_size @utils.dbus_property(PV_INTERFACE, 'PeStart', 't') # 1st PE/pe_start @utils.dbus_property(PV_INTERFACE, 'PeCount', 't') # PE/pv_pe_count @utils.dbus_property(PV_INTERFACE, 'PeAllocCount', 't') # PE Allocation count class Pv(AutomatedProperties): # For properties that we need custom handlers we need these, otherwise # we won't get our introspection data _Tags_meta = ("as", PV_INTERFACE) _PeSegments_meta = ("a(tt)", PV_INTERFACE) _Exportable_meta = ("b", PV_INTERFACE) _Allocatable_meta = ("b", PV_INTERFACE) _Missing_meta = ("b", PV_INTERFACE) _Lv_meta = ("a(oa(tts))", PV_INTERFACE) _Vg_meta = ("o", PV_INTERFACE) # noinspection PyUnusedLocal,PyPep8Naming def __init__(self, object_path, state_obj): super(Pv, self).__init__(object_path, pvs_state_retrieve) self.set_interface(PV_INTERFACE) self.state = state_obj @staticmethod def _remove(pv_uuid, pv_name, remove_options): # Remove the PV, if successful then remove from the model # Make sure we have a dbus object representing it Pv.validate_dbus_object(pv_uuid, pv_name) rc, out, err = cmdhandler.pv_remove(pv_name, remove_options) Pv.handle_execute(rc, out, err) return '/' @staticmethod def handle_execute(rc, out, err): if rc == 0: cfg.load() else: # Need to work on error handling, need consistent raise dbus.exceptions.DBusException( PV_INTERFACE, 'Exit code %s, stderr = %s' % (str(rc), err)) @staticmethod def validate_dbus_object(pv_uuid, pv_name): dbo = cfg.om.get_object_by_uuid_lvm_id(pv_uuid, pv_name) if not dbo: raise dbus.exceptions.DBusException( PV_INTERFACE, 'PV with uuid %s and name %s not present!' % (pv_uuid, pv_name)) return dbo @dbus.service.method( dbus_interface=PV_INTERFACE, in_signature='ia{sv}', out_signature='o', async_callbacks=('cb', 'cbe')) def Remove(self, tmo, remove_options, cb, cbe): r = RequestEntry( tmo, Pv._remove, (self.Uuid, self.lvm_id, remove_options), cb, cbe, return_tuple=False) cfg.worker_q.put(r) @staticmethod def _resize(pv_uuid, pv_name, new_size_bytes, resize_options): # Make sure we have a dbus object representing it Pv.validate_dbus_object(pv_uuid, pv_name) rc, out, err = cmdhandler.pv_resize(pv_name, new_size_bytes, resize_options) Pv.handle_execute(rc, out, err) return '/' @dbus.service.method( dbus_interface=PV_INTERFACE, in_signature='tia{sv}', out_signature='o', async_callbacks=('cb', 'cbe')) def ReSize(self, new_size_bytes, tmo, resize_options, cb, cbe): r = RequestEntry( tmo, Pv._resize, (self.Uuid, self.lvm_id, round_size(new_size_bytes), resize_options), cb, cbe, False) cfg.worker_q.put(r) @staticmethod def _allocation_enabled(pv_uuid, pv_name, yes_no, allocation_options): # Make sure we have a dbus object representing it Pv.validate_dbus_object(pv_uuid, pv_name) rc, out, err = cmdhandler.pv_allocatable( pv_name, yes_no, allocation_options) Pv.handle_execute(rc, out, err) return '/' @dbus.service.method( dbus_interface=PV_INTERFACE, in_signature='bia{sv}', out_signature='o', async_callbacks=('cb', 'cbe')) def AllocationEnabled(self, yes, tmo, allocation_options, cb, cbe): r = RequestEntry( tmo, Pv._allocation_enabled, (self.Uuid, self.lvm_id, yes, allocation_options), cb, cbe, False) cfg.worker_q.put(r) @property def Tags(self): return utils.parse_tags(self.state.Tags) @property def PeSegments(self): if len(self.state.pe_segments): return dbus.Array(self.state.pe_segments, signature='(tt)') return dbus.Array([], '(tt)') @property def Exportable(self): return dbus.Boolean(self.state.attr[1] == 'x') @property def Allocatable(self): return dbus.Boolean(self.state.attr[0] == 'a') @property def Missing(self): return dbus.Boolean(self.state.attr[2] == 'm') def object_path(self): return self._object_path @property def lvm_id(self): return self.state.lvm_id @property def identifiers(self): return self.state.identifiers() @property def Lv(self): return dbus.Array(self.state.lv, signature="(oa(tts))") @property def Vg(self): return dbus.ObjectPath(self.state.vg_path) LVM2.2.02.176/daemons/lvmdbusd/__init__.py0000644000000000000120000000063413176752421016706 0ustar rootwheel# Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . from .main import main LVM2.2.02.176/daemons/lvmdbusd/manager.py0000644000000000000120000001716013176752421016563 0ustar rootwheel# Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . from .automatedproperties import AutomatedProperties from . import utils from .cfg import MANAGER_INTERFACE import dbus from . import cfg from . import cmdhandler from .request import RequestEntry from . import udevwatch # noinspection PyPep8Naming class Manager(AutomatedProperties): _Version_meta = ("s", MANAGER_INTERFACE) def __init__(self, object_path): super(Manager, self).__init__(object_path) self.set_interface(MANAGER_INTERFACE) @property def Version(self): return dbus.String('1.0.0') @staticmethod def handle_execute(rc, out, err): if rc == 0: cfg.load() else: # Need to work on error handling, need consistent raise dbus.exceptions.DBusException( MANAGER_INTERFACE, 'Exit code %s, stderr = %s' % (str(rc), err)) @staticmethod def _pv_create(device, create_options): # Check to see if we are already trying to create a PV for an existing # PV pv = cfg.om.get_object_path_by_uuid_lvm_id(device, device) if pv: raise dbus.exceptions.DBusException( MANAGER_INTERFACE, "PV %s Already exists!" % device) rc, out, err = cmdhandler.pv_create(create_options, [device]) Manager.handle_execute(rc, out, err) return cfg.om.get_object_path_by_lvm_id(device) @dbus.service.method( dbus_interface=MANAGER_INTERFACE, in_signature='sia{sv}', out_signature='(oo)', async_callbacks=('cb', 'cbe')) def PvCreate(self, device, tmo, create_options, cb, cbe): utils.validate_device_path(MANAGER_INTERFACE, device) r = RequestEntry( tmo, Manager._pv_create, (device, create_options), cb, cbe) cfg.worker_q.put(r) @staticmethod def _create_vg(name, pv_object_paths, create_options): pv_devices = [] for p in pv_object_paths: pv = cfg.om.get_object_by_path(p) if pv: pv_devices.append(pv.Name) else: raise dbus.exceptions.DBusException( MANAGER_INTERFACE, 'object path = %s not found' % p) rc, out, err = cmdhandler.vg_create(create_options, pv_devices, name) Manager.handle_execute(rc, out, err) return cfg.om.get_object_path_by_lvm_id(name) @dbus.service.method( dbus_interface=MANAGER_INTERFACE, in_signature='saoia{sv}', out_signature='(oo)', async_callbacks=('cb', 'cbe')) def VgCreate(self, name, pv_object_paths, tmo, create_options, cb, cbe): utils.validate_vg_name(MANAGER_INTERFACE, name) r = RequestEntry( tmo, Manager._create_vg, (name, pv_object_paths, create_options,), cb, cbe) cfg.worker_q.put(r) @staticmethod def _refresh(): utils.log_debug('Manager.Refresh - entry') # This is a diagnostic and should not be run in normal operation, so # lets remove the log entries for refresh as it's implied. # Run an internal diagnostic on the object manager look up tables lc = cfg.om.validate_lookups() rc = cfg.load(log=False) if rc != 0: utils.log_debug('Manager.Refresh - exit %d' % (rc), 'bg_black', 'fg_light_red') else: utils.log_debug('Manager.Refresh - exit %d' % (rc)) return rc + lc @dbus.service.method( dbus_interface=MANAGER_INTERFACE, out_signature='t', async_callbacks=('cb', 'cbe')) def Refresh(self, cb, cbe): """ Take all the objects we know about and go out and grab the latest more of a test method at the moment to make sure we are handling object paths correctly. :param cb Callback for result :param cbe Callback for errors Returns the number of changes, object add/remove/properties changed """ r = RequestEntry(-1, Manager._refresh, (), cb, cbe, False) cfg.worker_q.put(r) @dbus.service.method( dbus_interface=MANAGER_INTERFACE) def FlightRecorderDump(self): """ Dump the flight recorder to syslog """ cfg.blackbox.dump() @staticmethod def _lookup_by_lvm_id(key): p = cfg.om.get_object_path_by_uuid_lvm_id(key, key) if not p: p = '/' utils.log_debug('LookUpByLvmId: key = %s, result = %s' % (key, p)) return p @dbus.service.method( dbus_interface=MANAGER_INTERFACE, in_signature='s', out_signature='o', async_callbacks=('cb', 'cbe')) def LookUpByLvmId(self, key, cb, cbe): """ Given a lvm id in one of the forms: /dev/sda some_vg some_vg/some_lv Oe1rPX-Pf0W-15E5-n41N-ZmtF-jXS0-Osg8fn return the object path in O(1) time. :param key: The lookup value :return: Return the object path. If object not found you will get '/' """ r = RequestEntry(-1, Manager._lookup_by_lvm_id, (key,), cb, cbe, False) cfg.worker_q.put(r) @staticmethod def _use_lvm_shell(yes_no): return dbus.Boolean(cmdhandler.set_execution(yes_no)) @dbus.service.method( dbus_interface=MANAGER_INTERFACE, in_signature='b', out_signature='b', async_callbacks=('cb', 'cbe')) def UseLvmShell(self, yes_no, cb, cbe): """ Allow the client to enable/disable lvm shell, used for testing :param yes_no: :param cb: dbus python call back parameter, not client visible :param cbe: dbus python error call back parameter, not client visible :return: Boolean """ r = RequestEntry(-1, Manager._use_lvm_shell, (yes_no,), cb, cbe, False) cfg.worker_q.put(r) @staticmethod def _external_event(command): utils.log_debug("Processing _external_event= %s" % command, 'bg_black', 'fg_orange') cfg.load() @dbus.service.method( dbus_interface=MANAGER_INTERFACE, in_signature='s', out_signature='i') def ExternalEvent(self, command): utils.log_debug("ExternalEvent %s" % command) # If a user didn't explicitly specify udev, we will turn it off now. if not cfg.args.use_udev: if udevwatch.remove(): utils.log_debug("ExternalEvent received, disabling " "udev monitoring") # We are dependent on external events now to stay current! cfg.got_external_event = True r = RequestEntry( -1, Manager._external_event, (command,), None, None, False) cfg.worker_q.put(r) return dbus.Int32(0) @staticmethod def _pv_scan(activate, cache, device_path, major_minor, scan_options): rc, out, err = cmdhandler.pv_scan( activate, cache, device_path, major_minor, scan_options) Manager.handle_execute(rc, out, err) return '/' @dbus.service.method( dbus_interface=MANAGER_INTERFACE, in_signature='bbasa(ii)ia{sv}', out_signature='o', async_callbacks=('cb', 'cbe')) def PvScan(self, activate, cache, device_paths, major_minors, tmo, scan_options, cb, cbe): """ Scan all supported LVM block devices in the system for physical volumes NOTE: major_minors & device_paths only usable when cache == True :param activate: If True, activate any newly found LVs :param cache: If True, update lvmetad :param device_paths: Array of device paths or empty :param major_minors: Array of structures (major,minor) :param tmo: Timeout for operation :param scan_options: Additional options to pvscan :param cb: Not visible in API (used for async. callback) :param cbe: Not visible in API (used for async. error callback) :return: '/' if operation done, else job path """ for d in device_paths: utils.validate_device_path(MANAGER_INTERFACE, d) r = RequestEntry( tmo, Manager._pv_scan, (activate, cache, device_paths, major_minors, scan_options), cb, cbe, False) cfg.worker_q.put(r) @property def lvm_id(self): """ Intended to be overridden by classes that inherit """ return str(id(self)) @property def Uuid(self): """ Intended to be overridden by classes that inherit """ import uuid return uuid.uuid1() LVM2.2.02.176/daemons/lvmdbusd/lvm_shell_proxy.py0000755000000000000120000001570413176752421020404 0ustar rootwheel#!/usr/bin/env python3 # Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # # Copyright 2015-2016, Vratislav Podzimek import subprocess import shlex from fcntl import fcntl, F_GETFL, F_SETFL import os import traceback import sys import tempfile import time import select import copy try: import simplejson as json except ImportError: import json from lvmdbusd.cfg import LVM_CMD from lvmdbusd.utils import log_debug, log_error, add_no_notify SHELL_PROMPT = "lvm> " def _quote_arg(arg): if len(shlex.split(arg)) > 1: return '"%s"' % arg else: return arg class LVMShellProxy(object): @staticmethod def _read(stream): tmp = stream.read() if tmp: return tmp.decode("utf-8") return '' # Read until we get prompt back and a result # @param: no_output Caller expects no output to report FD # Returns stdout, report, stderr (report is JSON!) def _read_until_prompt(self, no_output=False): stdout = "" report = "" stderr = "" keep_reading = True extra_passes = 3 report_json = {} prev_report_len = 0 # Try reading from all FDs to prevent one from filling up and causing # a hang. Keep reading until we get the prompt back and the report # FD does not contain valid JSON while keep_reading: try: rd_fd = [ self.lvm_shell.stdout.fileno(), self.report_stream.fileno(), self.lvm_shell.stderr.fileno()] ready = select.select(rd_fd, [], [], 2) for r in ready[0]: if r == self.lvm_shell.stdout.fileno(): stdout += LVMShellProxy._read(self.lvm_shell.stdout) elif r == self.report_stream.fileno(): report += LVMShellProxy._read(self.report_stream) elif r == self.lvm_shell.stderr.fileno(): stderr += LVMShellProxy._read(self.lvm_shell.stderr) # Check to see if the lvm process died on us if self.lvm_shell.poll(): raise Exception(self.lvm_shell.returncode, "%s" % stderr) if stdout.endswith(SHELL_PROMPT): if no_output: keep_reading = False else: cur_report_len = len(report) if cur_report_len != 0: # Only bother to parse if we have more data if prev_report_len != cur_report_len: prev_report_len = cur_report_len # Parse the JSON if it's good we are done, # if not we will try to read some more. try: report_json = json.loads(report) keep_reading = False except ValueError: pass if keep_reading: extra_passes -= 1 if extra_passes <= 0: if len(report): raise ValueError("Invalid json: %s" % report) else: raise ValueError( "lvm returned no JSON output!") except IOError as ioe: log_debug(str(ioe)) pass return stdout, report_json, stderr def _write_cmd(self, cmd): cmd_bytes = bytes(cmd, "utf-8") num_written = self.lvm_shell.stdin.write(cmd_bytes) assert (num_written == len(cmd_bytes)) self.lvm_shell.stdin.flush() @staticmethod def _make_non_block(stream): flags = fcntl(stream, F_GETFL) fcntl(stream, F_SETFL, flags | os.O_NONBLOCK) def __init__(self): # Create a temp directory tmp_dir = tempfile.mkdtemp(prefix="lvmdbus_") tmp_file = "%s/lvmdbus_report" % (tmp_dir) try: # Lets create fifo for the report output os.mkfifo(tmp_file, 0o600) except FileExistsError: pass # We have to open non-blocking as the other side isn't open until # we actually fork the process. self.report_fd = os.open(tmp_file, os.O_NONBLOCK) self.report_stream = os.fdopen(self.report_fd, 'rb', 0) # Setup the environment for using our own socket for reporting local_env = copy.deepcopy(os.environ) local_env["LVM_REPORT_FD"] = "32" local_env["LVM_COMMAND_PROFILE"] = "lvmdbusd" # Disable the abort logic if lvm logs too much, which easily happens # when utilizing the lvm shell. local_env["LVM_LOG_FILE_MAX_LINES"] = "0" # run the lvm shell self.lvm_shell = subprocess.Popen( [LVM_CMD + " 32>%s" % tmp_file], stdin=subprocess.PIPE, stdout=subprocess.PIPE, env=local_env, stderr=subprocess.PIPE, close_fds=True, shell=True) try: LVMShellProxy._make_non_block(self.lvm_shell.stdout) LVMShellProxy._make_non_block(self.lvm_shell.stderr) # wait for the first prompt errors = self._read_until_prompt(no_output=True)[2] if errors and len(errors): raise RuntimeError(errors) except: raise finally: # These will get deleted when the FD count goes to zero so we # can be sure to clean up correctly no matter how we finish os.unlink(tmp_file) os.rmdir(tmp_dir) def get_error_msg(self): # We got an error, lets go fetch the error message self._write_cmd('lastlog\n') # read everything from the STDOUT to the next prompt stdout, report_json, stderr = self._read_until_prompt() if 'log' in report_json: error_msg = "" # Walk the entire log array and build an error string for log_entry in report_json['log']: if log_entry['log_type'] == "error": if error_msg: error_msg += ', ' + log_entry['log_message'] else: error_msg = log_entry['log_message'] return error_msg return 'No error reason provided! (missing "log" section)' def call_lvm(self, argv, debug=False): rc = 1 error_msg = "" if self.lvm_shell.poll(): raise Exception( self.lvm_shell.returncode, "Underlying lvm shell process is not present!") argv = add_no_notify(argv) # create the command string cmd = " ".join(_quote_arg(arg) for arg in argv) cmd += "\n" # run the command by writing it to the shell's STDIN self._write_cmd(cmd) # read everything from the STDOUT to the next prompt stdout, report_json, stderr = self._read_until_prompt() # Parse the report to see what happened if 'log' in report_json: if report_json['log'][-1:][0]['log_ret_code'] == '1': rc = 0 else: error_msg = self.get_error_msg() if debug or rc != 0: log_error(('CMD: %s' % cmd)) log_error(("EC = %d" % rc)) log_error(("ERROR_MSG=\n %s\n" % error_msg)) return rc, report_json, error_msg def exit_shell(self): try: self._write_cmd('exit\n') except Exception as e: log_error(str(e)) def __del__(self): try: self.lvm_shell.terminate() except: pass if __name__ == "__main__": shell = LVMShellProxy() in_line = "start" try: while in_line: in_line = input("lvm> ") if in_line: start = time.time() ret, out, err = shell.call_lvm(in_line.split()) end = time.time() print(("RC: %d" % ret)) print(("OUT:\n%s" % out)) print(("ERR:\n%s" % err)) print("Command = %f seconds" % (end - start)) except KeyboardInterrupt: pass except EOFError: pass except Exception: traceback.print_exc(file=sys.stdout) LVM2.2.02.176/daemons/lvmdbusd/state.py0000644000000000000120000000127513176752421016271 0ustar rootwheel# Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . from abc import ABCMeta, abstractmethod class State(object, metaclass=ABCMeta): @abstractmethod def lvm_id(self): pass @abstractmethod def identifiers(self): pass @abstractmethod def create_dbus_object(self, path): pass def __str__(self): return '*****\n' + str(self.__dict__) + '\n******\n' LVM2.2.02.176/daemons/lvmdbusd/utils.py0000644000000000000120000004113413176752421016307 0ustar rootwheel# Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . import xml.etree.ElementTree as Et import sys import inspect import ctypes import os import string import datetime import dbus from lvmdbusd import cfg # noinspection PyUnresolvedReferences from gi.repository import GLib import threading import traceback import signal STDOUT_TTY = os.isatty(sys.stdout.fileno()) def rtype(dbus_type): """ Decorator making sure that the decorated function returns a value of specified type. :param dbus_type: The specific dbus type to return value as """ def decorator(fn): def decorated(*args, **kwargs): return dbus_type(fn(*args, **kwargs)) return decorated return decorator # Field is expected to be a number, handle the corner cases when parsing @rtype(dbus.UInt64) def n(v): if not v: return 0 return int(float(v)) @rtype(dbus.UInt32) def n32(v): if not v: return 0 return int(float(v)) # noinspection PyProtectedMember def init_class_from_arguments(obj_instance): for k, v in list(sys._getframe(1).f_locals.items()): if k != 'self': nt = k # If the current attribute has a value, but the incoming does # not, don't overwrite it. Otherwise the default values on the # property decorator don't work as expected. cur = getattr(obj_instance, nt, v) # print 'Init class %s = %s' % (nt, str(v)) if not (cur and len(str(cur)) and (v is None or len(str(v))) == 0): setattr(obj_instance, nt, v) def get_properties(f): """ Walks through an object instance or it's parent class(es) and determines which attributes are properties and if they were created to be used for dbus. :param f: Object to inspect :return: A dictionary of tuples with each tuple being: 0 = An array of dicts with the keys being: p_t, p_name, p_access(type, name, access) 1 = Hash of property names and current value """ interfaces = dict() for c in inspect.getmro(f.__class__): h = vars(c) for p, value in h.items(): if isinstance(value, property): # We found a property, see if it has a metadata type key = attribute_type_name(p) if key in h: interface = h[key][1] if interface not in interfaces: interfaces[interface] = ([], {}) access = '' if getattr(f.__class__, p).fget: access += 'read' if getattr(f.__class__, p).fset: access += 'write' interfaces[interface][0].append( dict( p_t=getattr(f, key)[0], p_name=p, p_access=access)) interfaces[interface][1][p] = getattr(f, p) return interfaces def get_object_property_diff(o_prop, n_prop): """ Walk through each object properties and report what has changed and with the new values :param o_prop: Old keys/values :param n_prop: New keys/values :return: hash of properties that have changed and their new value """ rc = {} for intf_k, intf_v in o_prop.items(): for k, v in list(intf_v[1].items()): # print('Comparing %s:%s to %s:%s' % # (k, o_prop[intf_k][1][k], k, str(n_prop[intf_k][1][k]))) if o_prop[intf_k][1][k] != n_prop[intf_k][1][k]: new_value = n_prop[intf_k][1][k] if intf_k not in rc: rc[intf_k] = dict() rc[intf_k][k] = new_value return rc def add_properties(xml, interface, props): """ Given xml that describes the interface, add property values to the XML for the specified interface. :param xml: XML to edit :param interface: Interface to add the properties too :param props: Output from get_properties :return: updated XML string """ if props: root = Et.fromstring(xml) interface_element = None # Check to see if interface is present for c in root: if c.attrib['name'] == interface: interface_element = c break # Interface is not present, lets create it so we have something to # attach the properties too if interface_element is None: interface_element = Et.Element("interface", name=interface) root.append(interface_element) # Add the properties for p in props: temp = '\n' % \ (p['p_t'], p['p_name'], p['p_access']) interface_element.append(Et.fromstring(temp)) return Et.tostring(root, encoding='utf8') return xml def attribute_type_name(name): """ Given the property name, return string of the attribute type :param name: :return: """ return "_%s_meta" % name _type_map = dict( s=dbus.String, o=dbus.ObjectPath, t=dbus.UInt64, x=dbus.Int64, u=dbus.UInt32, i=dbus.Int32, n=dbus.Int16, q=dbus.UInt16, d=dbus.Double, y=dbus.Byte, b=dbus.Boolean) def _pass_through(v): """ If we have something which is not a simple type we return the original value un-wrapped. :param v: :return: """ return v def _dbus_type(t, value): return _type_map.get(t, _pass_through)(value) def dbus_property(interface_name, name, dbus_type, doc=None): """ Creates the get/set properties for the given name. It assumes that the actual attribute is '_' + name and the attribute metadata is stuffed in _name_type. There is probably a better way todo this. :param interface_name: Dbus interface this property is associated with :param name: Name of property :param dbus_type: dbus string type eg. s,t,i,x :param doc: Python __doc__ for the property :return: """ attribute_name = '_' + name def getter(self): t = getattr(self, attribute_name + '_meta')[0] return _dbus_type(t, getattr(self.state, attribute_name[1:])) prop = property(getter, None, None, doc) def decorator(cls): setattr(cls, attribute_name + '_meta', (dbus_type, interface_name)) setattr(cls, name, prop) return cls return decorator def parse_tags(tags): if len(tags): if ',' in tags: return tags.split(',') return dbus.Array(sorted([tags]), signature='s') return dbus.Array([], signature='s') def _common_log(msg, *attributes): cfg.stdout_lock.acquire() tid = ctypes.CDLL('libc.so.6').syscall(186) if STDOUT_TTY: msg = "%s: %d:%d - %s" % \ (datetime.datetime.now().strftime("%b %d %H:%M:%S.%f"), os.getpid(), tid, msg) else: msg = "%d:%d - %s" % (os.getpid(), tid, msg) if STDOUT_TTY and attributes: print(color(msg, *attributes)) else: print(msg) cfg.stdout_lock.release() sys.stdout.flush() # Serializes access to stdout to prevent interleaved output # @param msg Message to output to stdout # @return None def log_debug(msg, *attributes): if cfg.args and cfg.args.debug: _common_log(msg, *attributes) def log_error(msg, *attributes): _common_log(msg, *attributes) def dump_threads_stackframe(): ident_to_name = {} for thread_object in threading.enumerate(): ident_to_name[thread_object.ident] = thread_object stacks = [] for thread_ident, frame in sys._current_frames().items(): stack = traceback.format_list(traceback.extract_stack(frame)) # There is a possibility that a thread gets created after we have # enumerated all threads, so this lookup table may be incomplete, so # account for this if thread_ident in ident_to_name: thread_name = ident_to_name[thread_ident].name else: thread_name = "unknown" stacks.append("Thread: %s" % (thread_name)) stacks.append("".join(stack)) log_error("Dumping thread stack frames!\n" + "\n".join(stacks)) # noinspection PyUnusedLocal def handler(signum): try: if signum == signal.SIGUSR1: dump_threads_stackframe() else: cfg.run.value = 0 log_debug('Exiting daemon with signal %d' % signum) if cfg.loop is not None: cfg.loop.quit() except: st = traceback.format_exc() log_error("signal handler: exception (logged, not reported!) \n %s" % st) # It's important we report that we handled the exception for the exception # handler to continue to work, especially for signal 10 (SIGUSR1) return True def pv_obj_path_generate(): return cfg.PV_OBJ_PATH + "/%d" % next(cfg.pv_id) def vg_obj_path_generate(): return cfg.VG_OBJ_PATH + "/%d" % next(cfg.vg_id) def lv_object_path_method(name, meta): if name[0] == '[': return _hidden_lv_obj_path_generate elif meta[0][0] == 't': return _thin_pool_obj_path_generate elif meta[0][0] == 'C' and 'pool' in meta[1]: return _cache_pool_obj_path_generate return _lv_obj_path_generate # Note: None of the individual LV path generate functions should be called # directly, they should only be dispatched through lv_object_path_method def _lv_obj_path_generate(): return cfg.LV_OBJ_PATH + "/%d" % next(cfg.lv_id) def _thin_pool_obj_path_generate(): return cfg.THIN_POOL_PATH + "/%d" % next(cfg.thin_id) def _cache_pool_obj_path_generate(): return cfg.CACHE_POOL_PATH + "/%d" % next(cfg.cache_pool_id) def _hidden_lv_obj_path_generate(): return cfg.HIDDEN_LV_PATH + "/%d" % next(cfg.hidden_lv) def job_obj_path_generate(): return cfg.JOB_OBJ_PATH + "/%d" % next(cfg.job_id) def color(text, *user_styles): styles = { # styles 'reset': '\033[0m', 'bold': '\033[01m', 'disabled': '\033[02m', 'underline': '\033[04m', 'reverse': '\033[07m', 'strike_through': '\033[09m', 'invisible': '\033[08m', # text colors 'fg_black': '\033[30m', 'fg_red': '\033[31m', 'fg_green': '\033[32m', 'fg_orange': '\033[33m', 'fg_blue': '\033[34m', 'fg_purple': '\033[35m', 'fg_cyan': '\033[36m', 'fg_light_grey': '\033[37m', 'fg_dark_grey': '\033[90m', 'fg_light_red': '\033[91m', 'fg_light_green': '\033[92m', 'fg_yellow': '\033[93m', 'fg_light_blue': '\033[94m', 'fg_pink': '\033[95m', 'fg_light_cyan': '\033[96m', # background colors 'bg_black': '\033[40m', 'bg_red': '\033[41m', 'bg_green': '\033[42m', 'bg_orange': '\033[43m', 'bg_blue': '\033[44m', 'bg_purple': '\033[45m', 'bg_cyan': '\033[46m', 'bg_light_grey': '\033[47m' } color_text = '' for style in user_styles: try: color_text += styles[style] except KeyError: return 'def color: parameter {} does not exist'.format(style) color_text += text return '\033[0m{0}\033[0m'.format(color_text) def pv_range_append(cmd, device, start, end): if (start, end) == (0, 0): cmd.append(device) else: if start != 0 and end == 0: cmd.append("%s:%d-" % (device, start)) else: cmd.append( "%s:%d-%d" % (device, start, end)) def pv_dest_ranges(cmd, pv_dest_range_list): if len(pv_dest_range_list): for i in pv_dest_range_list: pv_range_append(cmd, *i) def round_size(size_bytes): bs = 512 remainder = size_bytes % bs if not remainder: return size_bytes return size_bytes + bs - remainder _ALLOWABLE_CH = string.ascii_letters + string.digits + '#+-.:=@_\/%' _ALLOWABLE_CH_SET = set(_ALLOWABLE_CH) _ALLOWABLE_VG_LV_CH = string.ascii_letters + string.digits + '.-_+' _ALLOWABLE_VG_LV_CH_SET = set(_ALLOWABLE_VG_LV_CH) _LV_NAME_RESERVED = ("_cdata", "_cmeta", "_corig", "_mimage", "_mlog", "_pmspare", "_rimage", "_rmeta", "_tdata", "_tmeta", "_vorigin") # Tags can have the characters, based on the code # a-zA-Z0-9._-+/=!:&# _ALLOWABLE_TAG_CH = string.ascii_letters + string.digits + "._-+/=!:&#" _ALLOWABLE_TAG_CH_SET = set(_ALLOWABLE_TAG_CH) def _allowable_tag(tag_name): # LVM should impose a length restriction return set(tag_name) <= _ALLOWABLE_TAG_CH_SET def _allowable_vg_name(vg_name): if vg_name is None: raise ValueError("VG name is None or empty") vg_len = len(vg_name) if vg_len == 0 or vg_len > 127: raise ValueError("VG name (%s) length (%d) not in the domain 1..127" % (vg_name, vg_len)) if not set(vg_name) <= _ALLOWABLE_VG_LV_CH_SET: raise ValueError("VG name (%s) contains invalid character, " "allowable set(%s)" % (vg_name, _ALLOWABLE_VG_LV_CH)) if vg_name == "." or vg_name == "..": raise ValueError('VG name (%s) cannot be "." or ".."' % (vg_name)) def _allowable_lv_name(vg_name, lv_name): if lv_name is None: raise ValueError("LV name is None or empty") lv_len = len(lv_name) # This length is derived from empirical testing if lv_len == 0 or (len(vg_name) + lv_len) > 125: raise ValueError("LV name (%s) length (%d) + VG name length " "not in the domain 1..125" % (lv_name, lv_len)) if not set(lv_name) <= _ALLOWABLE_VG_LV_CH_SET: raise ValueError("LV name (%s) contains invalid character, " "allowable (%s)" % (lv_name, _ALLOWABLE_VG_LV_CH)) if any(x in lv_name for x in _LV_NAME_RESERVED): raise ValueError("LV name (%s) contains a reserved word, " "reserved set(%s)" % (lv_name, str(_LV_NAME_RESERVED))) if lv_name.startswith("snapshot") or lv_name.startswith("pvmove"): raise ValueError("LV name (%s) starts with a reserved word, " "reserved set(%s)" % (lv_name, str(["snapshot", "pvmove"]))) if lv_name[0] == '-': raise ValueError("LV name (%s) cannot start with a '-' " "character" % lv_name) def validate_device_path(interface, device): if not set(device) <= _ALLOWABLE_CH_SET: raise dbus.exceptions.DBusException( interface, 'Device path (%s) has invalid characters, ' 'allowable (%s)' % (device, _ALLOWABLE_CH)) def validate_vg_name(interface, vg_name): try: _allowable_vg_name(vg_name) except ValueError as ve: raise dbus.exceptions.DBusException( interface, str(ve)) def validate_lv_name(interface, vg_name, lv_name): try: _allowable_lv_name(vg_name, lv_name) except ValueError as ve: raise dbus.exceptions.DBusException( interface, str(ve)) def validate_tag(interface, tag): if not _allowable_tag(tag): raise dbus.exceptions.DBusException( interface, 'tag (%s) contains invalid character, allowable set(%s)' % (tag, _ALLOWABLE_TAG_CH)) def add_no_notify(cmdline): """ Given a command line to execute we will see if `--config` is present, if it is we will add the global/notify_dbus=0 to it, otherwise we will append it to the end of the list. :param: cmdline: The command line to inspect :type: cmdline: list :return: cmdline with notify_dbus config option present :rtype: list """ # Only after we have seen an external event will be disable lvm from sending # us one when we call lvm if cfg.got_external_event: if 'help' in cmdline: return cmdline if '--config' in cmdline: for i, arg in enumerate(cmdline): if arg == '--config': if len(cmdline) <= i+1: raise dbus.exceptions.DBusException("Missing value for --config option.") cmdline[i+1] += " global/notify_dbus=0" break else: cmdline.extend(['--config', 'global/notify_dbus=0']) return cmdline # The methods below which start with mt_* are used to execute the desired code # on the the main thread of execution to alleviate any issues the dbus-python # library with regards to multi-threaded access. Essentially, we are trying to # ensure all dbus library interaction is done from the same thread! def _async_handler(call_back, parameters): params_str = ", ".join(str(x) for x in parameters) log_debug('Main thread execution, callback = %s, parameters = (%s)' % (str(call_back), params_str)) try: if parameters: call_back(*parameters) else: call_back() except: st = traceback.format_exc() log_error("mt_async_call: exception (logged, not reported!) \n %s" % st) # Execute the function on the main thread with the provided parameters, do # not return *any* value or wait for the execution to complete! def mt_async_call(function_call_back, *parameters): GLib.idle_add(_async_handler, function_call_back, parameters) # Run the supplied function and arguments on the main thread and wait for them # to complete while allowing the ability to get the return value too. # # Example: # result = MThreadRunner(foo, arg1, arg2).done() # class MThreadRunner(object): @staticmethod def runner(obj): # noinspection PyProtectedMember obj._run() with obj.cond: obj.function_complete = True obj.cond.notify_all() def __init__(self, function, *args): self.f = function self.rc = None self.exception = None self.args = args self.function_complete = False self.cond = threading.Condition(threading.Lock()) def done(self): GLib.idle_add(MThreadRunner.runner, self) with self.cond: if not self.function_complete: self.cond.wait() if self.exception: raise self.exception return self.rc def _run(self): try: if self.args: self.rc = self.f(*self.args) else: self.rc = self.f() except BaseException as be: self.exception = be st = traceback.format_exc() log_error("MThreadRunner: exception \n %s" % st) log_error("Exception will be raised in calling thread!") def _remove_objects(dbus_objects_rm): for o in dbus_objects_rm: cfg.om.remove_object(o, emit_signal=True) # Remove dbus objects from main thread def mt_remove_dbus_objects(objs): MThreadRunner(_remove_objects, objs).done() LVM2.2.02.176/daemons/lvmdbusd/automatedproperties.py0000644000000000000120000001441113176752421021245 0ustar rootwheel# Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . import dbus import dbus.service from . import cfg from .utils import get_properties, add_properties, get_object_property_diff, \ log_debug from .state import State # noinspection PyPep8Naming,PyUnresolvedReferences class AutomatedProperties(dbus.service.Object): """ This class implements the needed interfaces for: org.freedesktop.DBus.Properties Other classes inherit from it to get the same behavior """ def __init__(self, object_path, search_method=None): dbus.service.Object.__init__(self, cfg.bus, object_path) self._ap_interface = [] self._ap_o_path = object_path self._ap_search_method = search_method self.state = None def dbus_object_path(self): return self._ap_o_path def emit_data(self): props = {} for i in self.interface(): props[i] = AutomatedProperties._get_all_prop(self, i) return self._ap_o_path, props def set_interface(self, interface): """ With inheritance we can't easily tell what interfaces a class provides so we will have each class that implements an interface tell the base AutomatedProperties what it is they do provide. This is kind of clunky and perhaps we can figure out a better way to do this later. :param interface: An interface the object supports :return: """ if interface not in self._ap_interface: self._ap_interface.append(interface) # noinspection PyUnusedLocal def interface(self, all_interfaces=False): if all_interfaces: cpy = list(self._ap_interface) cpy.extend( ["org.freedesktop.DBus.Introspectable", "org.freedesktop.DBus.Properties"]) return cpy return self._ap_interface @staticmethod def _get_prop(obj, interface_name, property_name): value = getattr(obj, property_name) # Note: If we get an exception in this handler we won't know about it, # only the side effect of no returned value! log_debug('Get (%s), type (%s), value(%s)' % (property_name, str(type(value)), str(value))) return value # Properties # noinspection PyUnusedLocal @dbus.service.method(dbus_interface=dbus.PROPERTIES_IFACE, in_signature='ss', out_signature='v', async_callbacks=('cb', 'cbe')) def Get(self, interface_name, property_name, cb, cbe): # Note: If we get an exception in this handler we won't know about it, # only the side effect of no returned value! r = cfg.create_request_entry( -1, AutomatedProperties._get_prop, (self, interface_name, property_name), cb, cbe, False) cfg.worker_q.put(r) @staticmethod def _get_all_prop(obj, interface_name): if interface_name in obj.interface(True): # Using introspection, lets build this dynamically properties = get_properties(obj) if interface_name in properties: return properties[interface_name][1] return {} raise dbus.exceptions.DBusException( obj._ap_interface, 'The object %s does not implement the %s interface' % (obj.__class__, interface_name)) @dbus.service.method(dbus_interface=dbus.PROPERTIES_IFACE, in_signature='s', out_signature='a{sv}', async_callbacks=('cb', 'cbe')) def GetAll(self, interface_name, cb, cbe): r = cfg.create_request_entry( -1, AutomatedProperties._get_all_prop, (self, interface_name), cb, cbe, False) cfg.worker_q.put(r) @dbus.service.method(dbus_interface=dbus.PROPERTIES_IFACE, in_signature='ssv') def Set(self, interface_name, property_name, new_value): setattr(self, property_name, new_value) self.PropertiesChanged(interface_name, {property_name: new_value}, []) # As dbus-python does not support introspection for properties we will # get the autogenerated xml and then add our wanted properties to it. @dbus.service.method(dbus_interface=dbus.INTROSPECTABLE_IFACE, out_signature='s') def Introspect(self): r = dbus.service.Object.Introspect(self, self._ap_o_path, cfg.bus) # Look at the properties in the class props = get_properties(self) for int_f, v in props.items(): r = add_properties(r, int_f, v[0]) return r @dbus.service.signal(dbus_interface=dbus.PROPERTIES_IFACE, signature='sa{sv}as') def PropertiesChanged(self, interface_name, changed_properties, invalidated_properties): log_debug(('SIGNAL: PropertiesChanged(%s, %s, %s, %s)' % (str(self._ap_o_path), str(interface_name), str(changed_properties), str(invalidated_properties)))) def refresh(self, search_key=None, object_state=None): """ Take the values (properties) of an object and update them with what lvm currently has. You can either fetch the new ones or supply the new state to be updated with :param search_key: The value to use to search for :param object_state: Use this as the new object state """ num_changed = 0 # If we can't do a lookup, bail now, this happens if we blindly walk # through all dbus objects as some don't have a search method, like # 'Manager' object. if not self._ap_search_method: return search = self.lvm_id if search_key: search = search_key # Either we have the new object state or we need to go fetch it if object_state: new_state = object_state else: new_state = self._ap_search_method([search])[0] assert isinstance(new_state, State) assert new_state # When we refresh an object the object identifiers might have changed # because LVM allows the user to change them (name & uuid), thus if # they have changed we need to update the object manager so that # look-ups will happen correctly old_id = self.state.identifiers() new_id = new_state.identifiers() if old_id[0] != new_id[0] or old_id[1] != new_id[1]: cfg.om.lookup_update(self, new_id[0], new_id[1]) # Grab the properties values, then replace the state of the object # and retrieve the new values. o_prop = get_properties(self) self.state = new_state n_prop = get_properties(self) changed = get_object_property_diff(o_prop, n_prop) if changed: for int_f, v in changed.items(): self.PropertiesChanged(int_f, v, []) num_changed += 1 return num_changed LVM2.2.02.176/daemons/lvmdbusd/lvmdbusd0000755000000000000120000000076013176752421016343 0ustar rootwheel#!/usr/bin/env python3 # Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . import sys from lvmdbusd import main if __name__ == '__main__': sys.exit(main()) LVM2.2.02.176/daemons/lvmdbusd/job.py0000644000000000000120000001336313176752421015724 0ustar rootwheel# Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . from .automatedproperties import AutomatedProperties from .utils import job_obj_path_generate, mt_async_call from . import cfg from .cfg import JOB_INTERFACE import dbus import threading # noinspection PyUnresolvedReferences from gi.repository import GLib # Class that handles a client waiting for something to be complete. We either # get a timeout or the operation is done. class WaitingClient(object): # A timeout occurred @staticmethod def _timeout(wc): with wc.rlock: if wc.in_use: wc.in_use = False # Remove ourselves from waiting client wc.job_state.remove_waiting_client(wc) wc.timer_id = -1 mt_async_call(wc.cb, wc.job_state.Complete) wc.job_state = None def __init__(self, job_state, tmo, cb, cbe): self.rlock = threading.RLock() self.job_state = job_state self.cb = cb self.cbe = cbe self.in_use = True # Indicates if object is in play self.timer_id = -1 if tmo > 0: self.timer_id = GLib.timeout_add_seconds( tmo, WaitingClient._timeout, self) # The job finished before the timer popped and we are being notified that # it's done def notify(self): with self.rlock: if self.in_use: self.in_use = False # Clear timer if self.timer_id != -1: GLib.source_remove(self.timer_id) self.timer_id = -1 mt_async_call(self.cb, self.job_state.Complete) self.job_state = None # noinspection PyPep8Naming class JobState(object): def __init__(self, request=None): self.rlock = threading.RLock() self._percent = 0 self._complete = False self._request = request self._ec = 0 self._stderr = '' self._waiting_clients = [] # This is an lvm command that is just taking too long and doesn't # support background operation if self._request: # Faking the percentage when we don't have one self._percent = 1 @property def Percent(self): with self.rlock: return self._percent @Percent.setter def Percent(self, value): with self.rlock: self._percent = value @property def Complete(self): with self.rlock: if self._request: self._complete = self._request.is_done() return self._complete @Complete.setter def Complete(self, value): with self.rlock: self._complete = value self._percent = 100 self.notify_waiting_clients() @property def GetError(self): with self.rlock: if self.Complete: if self._request: (rc, error) = self._request.get_errors() return (rc, str(error)) else: return (self._ec, self._stderr) else: return (-1, 'Job is not complete!') def dtor(self): with self.rlock: self._request = None @property def Result(self): with self.rlock: if self._request: return self._request.result() return '/' def add_waiting_client(self, client): with self.rlock: # Avoid race condition where it goes complete before we get added # to the list of waiting clients if self.Complete: client.notify() else: self._waiting_clients.append(client) def remove_waiting_client(self, client): # If a waiting client timer pops before the job is done we will allow # the client to remove themselves from the list. As we have a lock # here and a lock in the waiting client too, and they can be obtained # in different orders, a dead lock can occur. # As this remove is really optional, we will try to acquire the lock # and remove. If we are unsuccessful it's not fatal, we just delay # the time when the objects can be garbage collected by python if self.rlock.acquire(False): try: self._waiting_clients.remove(client) finally: self.rlock.release() def notify_waiting_clients(self): with self.rlock: for c in self._waiting_clients: c.notify() self._waiting_clients = [] # noinspection PyPep8Naming class Job(AutomatedProperties): _Percent_meta = ('d', JOB_INTERFACE) _Complete_meta = ('b', JOB_INTERFACE) _Result_meta = ('o', JOB_INTERFACE) _GetError_meta = ('(is)', JOB_INTERFACE) def __init__(self, request, job_state=None): super(Job, self).__init__(job_obj_path_generate()) self.set_interface(JOB_INTERFACE) if job_state: self.state = job_state else: self.state = JobState(request) @property def Percent(self): return dbus.Double(float(self.state.Percent)) @property def Complete(self): return dbus.Boolean(self.state.Complete) @staticmethod def _signal_complete(obj): obj.PropertiesChanged( JOB_INTERFACE, dict(Complete=dbus.Boolean(obj.state.Complete)), []) @Complete.setter def Complete(self, value): self.state.Complete = value mt_async_call(Job._signal_complete, self) @property def GetError(self): return dbus.Struct(self.state.GetError, signature="(is)") @dbus.service.method(dbus_interface=JOB_INTERFACE) def Remove(self): if self.state.Complete: cfg.om.remove_object(self, True) self.state.dtor() else: raise dbus.exceptions.DBusException( JOB_INTERFACE, 'Job is not complete!') @dbus.service.method(dbus_interface=JOB_INTERFACE, in_signature='i', out_signature='b', async_callbacks=('cb', 'cbe')) def Wait(self, timeout, cb, cbe): if timeout == 0 or self.state.Complete: cb(dbus.Boolean(self.state.Complete)) else: self.state.add_waiting_client( WaitingClient(self.state, timeout, cb, cbe)) @property def Result(self): return dbus.ObjectPath(self.state.Result) @property def lvm_id(self): return str(id(self)) @property def Uuid(self): import uuid return uuid.uuid1() LVM2.2.02.176/WHATS_NEW_DM0000644000000000000120000020361513176752421013235 0ustar rootwheelVersion 1.02.145 - 3rd November 2017 ==================================== Keep Install section only in dm-event.socket systemd unit. Issue a specific error with dmsetup status if device is unknown. Fix RT_LIBS reference in generated libdevmapper.pc for pkg-config Version 1.02.144 - 6th October 2017 =================================== Schedule exit when received SIGTERM in dmeventd. Also try to unmount /boot on blkdeactivate -u if on top of supported device. Use blkdeactivate -r wait in blk-availability systemd service/initscript. Add blkdeactivate -r wait option to wait for MD resync/recovery/reshape. Fix blkdeactivate regression with failing DM/MD devs deactivation (1.02.142). Fix typo in blkdeactivate's '--{dm,lvm,mpath}options' option name. Correct return value testing when get reserved values for reporting. Take -S with dmsetup suspend/resume/clear/wipe_table/remove/deps/status/table. Version 1.02.143 - 13th September 2017 ====================================== Restore umask when creation of node fails. Add --concise to dmsetup create for many devices with tables in one command. Accept minor number without major in library when it knows dm major number. Introduce single-line concise table output format: dmsetup table --concise Version 1.02.142 - 20th July 2017 ================================= Create /dev/disk/by-part{uuid,label} and gpt-auto-root symlinks with udev. Version 1.02.141 - 28th June 2017 ================================= Fix reusing of dm_task structure for status reading (used by dmeventd). Add dm_percent_to_round_float for adjusted percentage rounding. Reset array with dead rimage devices once raid gets in sync. Drop unneeded --config option from raid dmeventd plugin. dm_get_status_raid() handle better some incosistent md statuses. Accept truncated files in calls to dm_stats_update_regions_from_fd(). Restore Warning by 5% increment when thin-pool is over 80% (1.02.138). Version 1.02.140 - 3rd May 2017 =============================== Add missing configure --enable-dmfilemapd status message and fix --disable. Version 1.02.139 - 13th April 2017 ================================== Fix assignment in _target_version() when dm task can't run. Flush stdout on each iteration when using --count or --interval. Show detailed error message when execvp fails while starting dmfilemapd. Fix segmentation fault when dmfilemapd is run with no arguments. Numerous minor dmfilemapd fixes from coverity. Version 1.02.138 - 28th March 2017 ================================== Support additional raid5/6 configurations. Provide dm_tree_node_add_cache_target@base compatible symbol. Support DM_CACHE_FEATURE_METADATA2, new cache metadata format 2. Improve code to handle mode mask for cache nodes. Cache status check for passthrough also require trailing space. Add extra memory page when limiting pthread stack size in dmeventd. Avoids immediate resume when preloaded device is smaller. Do not suppress kernel key description in dmsetup table output for dm-crypt. Support configurable command executed from dmeventd thin plugin. Support new R|r human readable units output format. Thin dmeventd plugin reacts faster on lvextend failure path with umount. Add dm_stats_bind_from_fd() to bind a stats handle from a file descriptor. Do not try call callback when reverting activation on error path. Fix file mapping for extents with physically adjacent extents in dmstats. Validation vsnprintf result in runtime translate of dm_log (1.02.136). Separate filemap extent allocation from region table in dmstats. Fix segmentation fault when filemap region creation fails in dmstats. Fix performance of region cleanup for failed filemap creation in dmstats. Fix very slow region deletion with many regions in dmstats. Version 1.02.137 - 30th November 2016 ===================================== Document raid status values. Always exit dmsetup with success when asked to display help/version. Version 1.02.136 - 5th November 2016 ==================================== Log failure of raid device with log_error level. Use dm_log_with_errno and translate runtime to dm_log only when needed. Make log messages from dm and lvm library different from dmeventd. Notice and Info messages are again logged from dmeventd and its plugins. Dmeventd now also respects DM_ABORT_ON_INTERNAL_ERRORS as libdm based tool. Report as non default dm logging also when logging with errno was changed. Use log_level() macro to consistently decode message log level in dmeventd. Still produce output when dmsetup dependency tree building finds dev missing. Check and report pthread_sigmask() failure in dmeventd. Check mem alloc fail in _canonicalize_field_ids(). Use unsigned math when checking more then 31 legs of raid. Fix 'dmstats delete' with dmsetup older than v1.02.129 Fix stats walk segfault with dmsetup older than v1.02.129 Version 1.02.135 - 26th September 2016 ====================================== Fix man entry for dmsetup status. Introduce new dm_config_parse_without_dup_node_check(). Don't omit last entry in dmstats list --group. Version 1.02.134 - 7th September 2016 ===================================== Improve explanation of udev fallback in libdevmapper.h. Version 1.02.133 - 10th August 2016 =================================== Add dm_report_destroy_rows/dm_report_group_output_and_pop_all for lvm shell. Adjust group handling and json production for lvm shell. Version 1.02.132 - 28th July 2016 ================================= Fix json reporting to escape '"' character that may appear in reported string. Version 1.02.131 - 15th July 2016 ================================= Disable queueing on mpath devs in blk-availability systemd service/initscript. Add new -m|--mpathoption disablequeueing to blkdeactivate. Automatically group regions with 'create --segments' unless --nogroup. Fix resource leak when deleting the first member of a group. Allow --bounds with 'create --filemap' for dmstats. Enable creation of filemap regions with histograms. Enable histogram aggregation for regions with more than one area. Enable histogram aggregation for groups of regions. Add a --filemap option to 'dmstats create' to allow mapping of files. Add dm_stats_create_regions_from_fd() to map file extents to regions. Version 1.02.130 - 6th July 2016 ================================ Minor fixes from coverity. Version 1.02.129 - 6th July 2016 ================================ Update default dmstats field selections for groups. Add 'obj_type', 'group_id', and 'statsname' fields to dmstats reports. Add --area, --region, and --group to dmstats to control object selection. Add --alias, --groupid, --regions to dmstats for group creation and deletion. Add 'group' and 'ungroup' commands to dmstats. Allow dm_stats_delete_group() to optionally delete all group members. Add dm_stats_get_object_type() to return the type of object present. Add dm_stats_walk_init() allowing control of objects visited by walks. Add dm_stats_get_group_descriptor() to return the member list as a string. Introduce dm_stats_get_nr_groups() and dm_stats_group_present(). Add dm_stats_{get,set}_alias() to set and retrieve alias names for groups. Add dm_stats_get_group_id() to return the group ID for a given region. Add dm_stats_{create,delete}_group() to allow grouping of stats regions. Add enum-driven dm_stats_get_{metric,counter}() interfaces. Add dm_bitset_parse_list() to parse a string representation of a bitset. Thin dmeventd plugin umounts lvm2 volume only when pool is 95% or more. Version 1.02.128 - 25th June 2016 ================================= Recognize 'all' keyword used in selection as synonym for "" (no selection). Add dm_report_set_selection to set selection for multiple output of report. Add DM_REPORT_OUTPUT_MULTIPLE_TIMES flag for multiple output of same report. Move field width handling/sort init from dm_report_object to dm_report_output. Add _LOG_BYPASS_REPORT flag for bypassing any log report currently set. Introduce DM_REPORT_GROUP_JSON for report group with JSON output format. Introduce DM_REPORT_GROUP_BASIC for report group with basic report output. Introduce DM_REPORT_GROUP_SINGLE for report group having single report only. Add dm_report_group_{create,push,pop,destroy} to support report grouping. Version 1.02.127 - 11th June 2016 ================================= Fix blkdeactivate regression causing skipping of dm + md devices. (1.02.126) Version 1.02.126 - 3rd June 2016 ================================ Report passthrough caching mode when parsing cache mode. Version 1.02.125 - 14th May 2016 ================================ Show library version in message even if dm driver version is unavailable. Version 1.02.124 - 30th April 2016 ================================== Add dm_udev_wait_immediate to libdevmapper for waiting outside the library. Version 1.02.123 - 23rd April 2016 ================================== Do not strip LVM- when debug reporting not found uuid. Version 1.02.122 - 9th April 2016 ================================= Change log_debug ioctl flags from single characters into words. Version 1.02.121 - 26th March 2016 ================================== Adjust raid status function. Version 1.02.120 - 11th March 2016 ================================== Improve parsing of cache status and report Fail, Error, needs_check, ro. Version 1.02.119 - 4th March 2016 ================================= Fix dm_config_write_node and variants to return error on subsection failures. Remove 4096 char limit due to buffer size if writing dm_config_node. Version 1.02.118 - 26th February 2016 ===================================== Fix string boundary check in _get_canonical_field_name(). Always initialized hist struct in _stats_parse_histogram(). Version 1.02.117 - 21st February 2016 ===================================== Improve status parsing for thin-pool and thin devices. Version 1.02.116 - 15th February 2016 ===================================== Use fully aligned allocations for dm_pool_strdup/strndup() (1.02.64). Fix thin-pool table parameter feature order to match kernel output. Version 1.02.115 - 25th January 2016 ==================================== Fix man page for dmsetup udevcreatecookie. Version 1.02.114 - 14th December 2015 ===================================== Better support for dmsetup static linkage. Extend validity checks on dmeventd client socket. Version 1.02.113 - 5th December 2015 ==================================== Mirror plugin in dmeventd uses dm_get_status_mirror(). Add dm_get_status_mirror() for parsing mirror status line. Version 1.02.112 - 28th November 2015 ===================================== Show error message when trying to create unsupported raid type. Improve preloading sequence of an active thin-pool target. Drop extra space from cache target line to fix unneded table reloads. Version 1.02.111 - 23rd November 2015 ===================================== Extend dm_hash to support multiple values with the same key. Add missing check for allocation inside dm_split_lvm_name(). Test dm_task_get_message_response for !NULL in dm_stats_print_region(). Add checks for failing dm_stats_create() in dmsetup. Add missing fifo close when failed to initialize client connection. Version 1.02.110 - 30th October 2015 ==================================== Disable thin monitoring plugin when it fails too often (>10 times). Fix/restore parsing of empty field '-' when processing dmeventd event. Enhance dm_tree_node_size_changed() to recognize size reduction. Support exit on idle for dmenventd (1 hour). Add support to allow unmonitor device from plugin itself. New design for thread co-operation in dmeventd. Dmeventd read device status with 'noflush'. Dmeventd closes control device when no device is monitored. Thin plugin for dmeventd improved percentage usage. Snapshot plugin for dmeventd improved percentage usage. Add dm_hold_control_dev to allow holding of control device open. Add dm_report_compact_given_fields to remove given empty fields from report. Use libdm status parsing and local mem raid dmeventd plugin. Use local mem pool and lock only lvm2 execution for mirror dmeventd plugin. Lock protect only lvm2 execution for snapshot and thin dmeventd plugin. Use local mempool for raid and mirror plugins. Reworked thread initialization for dmeventd plugins. Dmeventd handles snapshot overflow for now equally as invalid. Convert dmeventd to use common logging macro system from libdm. Return -ENOMEM when device registration fails instead of 0 (=success). Enforce writethrough mode for cleaner policy. Add support for recognition and deactivation of MD devices to blkdeactivate. Move target status functions out of libdm-deptree. Correct use of max_write_behind parameter when generating raid target line. Fix dm-event systemd service to make sure it is executed before mounting. Version 1.02.109 - 22nd September 2015 ====================================== Update man pages for dmsetup and dmstats. Improve help text for dmsetup. Use --noflush and --nolockfs when removing device with --force. Parse new Overflow status string for snapshot target. Check dir path components are valid if using dm_create_dir, error out if not. Fix /dev/mapper handling to remove dangling entries if symlinks are found. Make it possible to use blank value as selection for string list report field. Version 1.02.108 - 15th September 2015 ====================================== Do not check for full thin pool when activating without messages (1.02.107). Version 1.02.107 - 5th September 2015 ===================================== Parse thin-pool status with one single routine internally. Add --histogram to select default histogram fields for list and report. Add report fields for displaying latency histogram configuration and data. Add dmstats --bounds to specify histogram boundaries for a new region. Add dm_histogram_to_string() to format histogram data in string form. Add public methods to libdm to access numerical histogram config and data. Parse and store histogram data in dm_stats_list() and dm_stats_populate(). Add an argument to specify histogram bounds to dm_stats_create_region(). Add dm_histogram_bounds_from_{string,uint64_t}() to parse histogram bounds. Add dm_histogram handle type to represent a latency histogram and its bounds. Fix devmapper.pc pkgconfig file to not reference non-existent rt.pc file. Reinstate dm_task_get_info@Base to libdevmapper exports. (1.02.106) Version 1.02.106 - 26th August 2015 =================================== Add 'precise' column to statistics reports. Add --precise switch to 'dmstats create' to request nanosecond counters. Add precise argument to dm_stats_create_region(). Add support to libdm-stats for precise_timestamps Version 1.02.105 - 17th August 2015 =================================== Fix 'dmstats list -o all' segfault. Separate dmstats statistics fields from region information fields. Add interval and interval_ns fields to dmstats reports. Do not include internal glibc headers in libdm-timestamp.c (1.02.104) Exit immediately if no device is supplied to dmsetup wipe_table. Suppress dmsetup report headings when no data is output. (1.02.104) Adjust dmsetup usage/help output selection to match command invoked. Fix dmsetup -o all to select correct fields in splitname report. Restructure internal dmsetup argument handling across all commands. Add dm_report_is_empty() to indicate there is no data awaiting output. Add more arg validation for dm_tree_node_add_cache_target(). Add --alldevices switch to replace use of --force for stats create / delete. Version 1.02.104 - 10th August 2015 =================================== Add dmstats.8 man page Add dmstats --segments switch to create one region per device segment. Add dmstats --regionid, --allregions to specify a single / all stats regions. Add dmstats --allprograms for stats commands that filter by program ID. Add dmstats --auxdata and --programid args to specify aux data and program ID. Add report stats sub-command to provide repeating stats reports. Add clear, delete, list, and print stats sub-commands. Add create stats sub-command and --start, --length, --areas and --areasize. Recognize 'dmstats' as an alias for 'dmsetup stats' when run with this name. Add a 'stats' command to dmsetup to configure, manage and report stats data. Add statistics fields to dmsetup -o. Add libdm-stats library to allow management of device-mapper statistics. Add --nosuffix to suppress dmsetup unit suffixes in report output. Add --units to control dmsetup report field output units. Add support to redisplay column headings for repeating column reports. Fix report header and row resource leaks. Report timestamps of ioctls with dmsetup -vvv. Recognize report field name variants without any underscores too. Add dmsetup --interval and --count to repeat reports at specified intervals. Add dm_timestamp functions to libdevmapper. Recognise vg/lv name format in dmsetup. Move size display code to libdevmapper as dm_size_to_string. Version 1.02.103 - 24th July 2015 ================================= Introduce libdevmapper wrappers for all malloc-related functions. Version 1.02.102 - 7th July 2015 ================================ Include tool.h for default non-library use. Introduce format macros with embedded % such as FMTu64. Version 1.02.101 - 3rd July 2015 ================================ Add experimental support to passing messages in suspend tree. Add dm_report_value_cache_{set,get} to support caching during report/select. Add dm_report_reserved_handler to handle report reserved value actions. Support dynamic value in select: DM_REPORT_FIELD_RESERVED_VALUE_DYNAMIC_VALUE. Support fuzzy names in select: DM_REPORT_FIELD_RESERVED_VALUE_FUZZY_NAMES. Thin pool trace messages show a device name and major:minor. Version 1.02.100 - 30th June 2015 ================================= Add since, after, until and before time operators to be used in selection. Add support for time in reports and selection: DM_REPORT_FIELD_TYPE_TIME. Support report reserved value ranges: DM_REPORT_FIELD_RESERVED_VALUE_RANGE. Support report reserved value names: DM_REPORT_FIELD_RESERVED_VALUE_NAMED. Add DM_CONFIG_VALUE_FMT_{INT_OCTAL,STRING_NO_QUOTES} config value format flag. Add DM_CONFIG_VALUE_FMT_COMMON_{ARRAY,EXTRA_SPACE} config value format flag. Add dm_config_value_{get,set}_format_flags to get and set config value format. Version 1.02.99 - 20th June 2015 ================================ New dm_tree_node_set_thin_pool_read_only(DM_1_02_99) for read-only thin pool. Enhance error message when thin-pool message fails. Fix dmeventd logging to avoid threaded use of static variable. Remove redundant dmeventd SIGALRM coded. Version 1.02.98 - 12th June 2015 ================================ Add dm_task_get_errno() to return any unexpected errno from a dm ioctl call. Use copy of errno made after each dm ioctl call in case errno changes later. Version 1.02.97 - 15th May 2015 =============================== New dm_task_get_info(DM_1_02_97) supports internal_suspend state. New symbols are versioned and comes with versioned symbol name (DM_1_02_97). Version 1.02.96 - 2nd May 2015 ============================== Fix selection to not match if using reserved value in criteria with >,<,>=,<. Fix selection to not match reserved values for size fields if using >,<,>=,<. Include uuid or device number in log message after ioctl failure. Add DM_INTERNAL_SUSPEND_FLAG to dm-ioctl.h. Install blkdeactivate script and its man page with make install_device-mapper. Version 1.02.95 - 15th March 2015 ================================= Makefile regenerated. Version 1.02.94 - 4th March 2015 ================================ Add dm_report_object_is_selected for generalized interface for report/select. Version 1.02.93 - 21st January 2015 =================================== Reduce severity of ioctl error message when dmeventd waitevent is interrupted. Report 'unknown version' when incompatible version numbers were not obtained. Report more info from thin pool status (out of data, metadata-ro, fail). Support error_if_no_space for thin pool target. Fix segfault while using selection with regex and unbuffered reporting. Add dm_report_compact_fields to remove empty fields from report output. Remove unimplemented dm_report_set_output_selection from libdevmapper.h. Version 1.02.92 - 24th November 2014 ==================================== Fix memory corruption with sorting empty string lists (1.02.86). Fix man dmsetup.8 syntax warning of Groff Accept unquoted strings and / in place of {} when parsing configs. Version 1.02.91 - 11th November 2014 ==================================== Update cache creation and dm_config_node to pass policy. Allow activation of any thin-pool if transaction_id supplied is 0. Don't print uninitialized stack bytes when non-root uses dm_check_version(). Fix selection criteria to not match reserved values when using >, <, >=, <. Add DM_LIST_HEAD_INIT macro to libdevmapper.h. Fix dm_is_dm_major to not issue error about missing /proc lines for dm module. Version 1.02.90 - 1st September 2014 ==================================== Restore proper buffer size for parsing mountinfo line (1.02.89) Version 1.02.89 - 26th August 2014 ================================== Improve libdevmapper-event select() error handling. Add extra check for matching transation_id after message submitting. Add dm_report_field_string_list_unsorted for str. list report without sorting. Support --deferred with dmsetup remove to defer removal of open devices. Update dm-ioctl.h to include DM_DEFERRED_REMOVE flag. Add support for selection to match string list subset, recognize { } operator. Fix string list selection with '[value]' to not match list that's superset. Fix string list selection to match whole words only, not prefixes. Version 1.02.88 - 5th August 2014 ================================= Add dm_tree_set_optional_uuid_suffixes to handle upgrades. Version 1.02.87 - 23rd July 2014 ================================ Fix dm_report_field_string_list to handle delimiter with multiple chars. Add dm_report_field_reserved_value for per-field reserved value definition. Version 1.02.86 - 23rd June 2014 ================================ Make "help" and "?" reporting fields implicit. Recognize implicit "selected" field if using dm_report_init_with_selection. Add support for implicit reporting fields which are predefined in libdm. Add DM_REPORT_FIELD_TYPE_PERCENT: separate number and percent fields. Add dm_percent_range_t,dm_percent_to_float,dm_make_percent to libdm for reuse. Add dm_report_reserved_value to libdevmapper for reserved value definition. Also display field types when listing all fields in selection help. Recognize "help" keyword in selection string to show brief help for selection. Always order items reported as string list field lexicographically. Add dm_report_field_string_list to libdevmapper for direct string list report. Add DM_REPORT_FIELD_TYPE_STRING_LIST: separate string and string list fields. Add dm_str_list to libdevmapper for string list type definition and its reuse. Add dmsetup -S/--select to define selection criteria for dmsetup reports. Add dm_report_init_with_selection to intialize report with selection criteria. Add DM_REPORT_FIELD_TYPE_SIZE: separate number and size reporting fields. Use RemoveOnStop for dm-event.socket systemd unit. Document env var 'DM_DEFAULT_NAME_MANGLING_MODE' in dmsetup man page. Warn user about incorrect use of cookie with 'dmsetup remove --force'. Also recognize 'help'/'?' as reserved sort key name to show help. Add dm_units_to_factor for size unit parsing. Increase bitset size for minors for thin dmeventd plugin. Version 1.02.85 - 10th April 2014 ================================= Check for sprintf error when building internal device path. Check for sprintf error when creating path for dm control node. When buffer for dm_get_library_version() is too small, return error code. Always reinitialize _name_mangling_mode in dm_lib_init(). Add tracking flag about implicitly added devices into dm_tree. Stop timeout thread immediately when the last worker thread is finished. Fix dmeventd logging with parallel wait event processing. Reuse _node_send_messages() for validation of transaction_id in preload. Transaction_id could be lower by one only when messages are prepared. Do not call callback when preload fails. Wrap is_selinux_enabled() to be called just once. Use correctly signed 64b constant when working with raid volumes. Exit dmeventd with pidfile cleanup instead of raising SIGKILL on DIE request. Add new DM_EVENT_GET_PARAMETERS request to dmeventd protocol. Do not use systemd's reload for dmeventd restart, use dmeventd -R instead. Drop cryptsetup rules from 10-dm.rules - cryptsetup >= 1.1.3 sets them. Version 1.02.84 - 20th January 2014 =================================== Revert activation of activated nodes if a node preload callback fails. Avoid busy looping on CPU when dmeventd reads event DM_WAIT_RETRY. Ensure global mutex is held when working with dmeventd thread. Drop taking timeout mutex for un/registering dmeventd monitor. Allow section names in config file data to be quoted strings. Close fifos before exiting in dmeventd restart() error path. Move printf format string directly into dm_asprintf args list. Catch invalid use of string sort values when reporting numerical fields. Version 1.02.83 - 13th November 2013 ==================================== Consistently report on stderr when device is not found for dmsetup info. Skip race errors when non-udev dmsetup build runs on udev-enabled system. Skip error message when holders are not present in sysfs. Use __linux__ instead of linux define to make libdevmapper.h C compliant. Use mutex to avoid possible race while creating/destroying memory pools. Require libpthread to build now. Version 1.02.82 - 4th October 2013 ================================== Define symbolic names for subsystem udev flags in libdevmapper for easier use. Make subsystem udev rules responsible for importing DM_SUBSYSTEM_UDEV_FLAG*. Version 1.02.81 - 23rd September 2013 ===================================== Tidy dmeventd fifo initialisation. Version 1.02.80 - 20th September 2013 ===================================== Detect invalid sector supplied to 'dmsetup message'. Free any previously-set string if a dm_task_set_* function is called again. Do not allow passing empty new name for dmsetup rename. Display any output returned by 'dmsetup message'. Add dm_task_get_message_response to libdevmapper. Version 1.02.79 - 13th August 2013 ================================== Create dmeventd timeout threads as "detached" so exit status is freed. Add DM_ABORT_ON_INTERNAL_ERRORS env var support to abort on internal errors. Version 1.02.78 - 24th July 2013 ================================ Process thin messages once to active thin pool target for dm_tree. Optimize out setting the same value or read_ahead. Add DM_ARRAY_SIZE public macro. Move syslog code out of signal handle in dmeventd. Add DM_TO_STRING public macro. Always return success on dmeventd -V command call. Fix parsing of 64bit snapshot status in dmeventd snapshot plugin. Add dm_get_status_snapshot() for parsing snapshot status. Detecte mounted fs also via reading /proc/self/mountinfo. Add dm_mountinfo_read() for parsing /proc/self/mountinfo. Report error for nonexisting devices in dmeventd communication. Prevent double free error after dmeventd call of _fill_device_data(). Update dmevent structure message_data to simplify/fix error path handling. Validate passed params to dm_get_status_raid/thin/thin_pool(). Fix 'dmsetup splitname -o' to not fail if used without '-c' switch (1.02.68). Add dm_config_write_{node_out/one_node_out} for enhanced config output. Add dm_config_value_is_bool to check for boolean value in supported formats. Fix config node lookup inside empty sections to not return the section itself. Append discards and read-only fields to exported struct dm_status_thin_pool. Fix segfault for truncated string token in config file after the first '"'. Close open dmeventd FIFO file descriptors on exec (FD_CLOEXEC). Fix resource leak in error path of dmeventd's umount of thin volume. Automatically deactivate failed preloaded dm tree node. Add DM_DISABLE_UDEV environment variable to manage dev nodes by libdm only. Fix dm_task_set_cookie to properly process udev flags if udev_sync disabled. Version 1.02.77 - 15th October 2012 =================================== Support unmount of thin volumes from pool above thin pool threshold. Update man page to reflect that dm UUIDs are being mangled as well. Apply 'dmsetup mangle' for dm UUIDs besides dm names. Add 'mangled_uuid' and 'unmangled_uuid' fields to dmsetup info -c -o. Mangle device UUID on dm_task_set_uuid/newuuid call if necessary. Add dm_task_get_uuid_mangled/unmangled to libdevmapper. Always reset delay_resume_if_new flag when stacking thin pool over anything. Don't create value for dm_config_node and require dm_config_create_value call. Check for existing new_name for dmsetup rename. Fix memory leak in dmsetup _get_split_name() error path. Version 1.02.76 - 7th August 2012 ================================= Add dm_vasprintf to libdevmapper. Allow --noflush with dmsetup status and wait (for thin target). Add dm_config_write_one_node to libdevmapper. Support thin pool message release/reserve_metadata_snap in libdevmapper. Support thin pool discards and external origin features in libdevmapper. Add configure --enable-udev-rule-exec-detection to detect exec path in rules. Use sbindir in udev rules by default and remove executable path detection. Remove hard-coded paths for dmeventd fifos and use default-dm-run-dir. Add configure --with-lvmetad-pidfile to remove hard-coded value. Add configure --with-default-pid-dir for common directory with pid files. Add configure --with-default-dm-run-dir to set run directory for dm tools. Detect kernel_send() errors in cmirrord. Add __attribute__ instrumentation to libdevmapper.h. Print clean_bits instead of sync_bits in pull_state in cmirrord. Add tests for errors from closedir(), close() in cmirrord. Add documentation references in systemd units. Remove veritysetup. Now maintained with cryptsetup. Version 1.02.75 - 8th June 2012 =============================== Upstream source repo now fedorahosted.org git not sources.redhat.com CVS. Remove unsupported udev_get_dev_path libudev call used for checking udev dir. Set delay_resume_if_new on deptree snapshot origin. Log value chosen in _find_config_bool like other variable types do. Wait for dmeventd to exit after sending it DM_EVENT_CMD_DIE when restarting. Append 'Used' to {Blk}DevNames/DevNos dmsetup report headers for clarity. Add configure --with-veritysetup for independent veritysetup tool. Properly support supplied dmevent path in dm_event_register_handler(). Remove dmeventd fifos on exit if they are not managed by systemd. Use SD_ACTIVATION environment variable in systemd units to detect systemd. Only start a new dmeventd instance on restart if one was already running. Extend the time waited for input from dmeventd fifo to 5 secs. (1.02.73) Version 1.02.74 - 6th March 2012 ================================ Check for multiply-mangled names in auto mangling mode. Fix dm_task_get_name_unmangled to not unmangle already unmangled name. Check whether device names are properly mangled on ioctl return. Deactivation of failed thin check on thin pool returns success. Version 1.02.73 - 3rd March 2012 ================================ Test _thread_registry list with holding mutex in dmeventd. Add dm_tree_node_set_callback() for preload and deactivation hooks. Drop unsupported TRIM message for thin pool. Improve logging for fifo startup in dmeventd. Better detection of missing dmeventd fifo connection (1.02.71). Add a few pointer validations in dmsetup. Support dm_task_get_driver_version() query without version string. Log failure of pthread_join when cleaning unused threads in dmeventd. Fix empty string warning logic in _find_config_str. (1.02.68) Fix dm_task_set_name to properly resolve path to dm name (1.02.71). Add dm_strncpy() function as a faster strncpy() replacement. Version 1.02.72 - 23rd February 2012 ==================================== Avoid memory reallocation for dm_asprintf. Version 1.02.71 - 20th February 2012 ==================================== Switch to using built-in blkid in 13-dm-disk.rules. Add "watch" rule to 13-dm-disk.rules. Detect failing fifo and skip 20s retry communication period. Add DM_DEFAULT_NAME_MANGLING_MODE environment variable as an override. Add dm_lib_init to automatically initialise device-mapper library on load. Replace any '\' char with '\\' in dm table specification on input. Add mangle command to dmsetup to provide renaming to correct mangled form. Add 'mangled_name' and 'unmangled_name' fields to dmsetup info -c -o. Add --manglename option to dmsetup to select the name mangling mode. Add dm_task_get_name_mangled/unmangled to libdevmapper. Mangle device name on dm_task_set_name/newname call if necessary. Add dm_set/get_name_mangling_mode to set/get name mangling in libdevmapper. Add configure --with-default-name-mangling for udev-friendly dev name charset. Test for parsed words in _umount() dmeventd snapshot plugin. Fix memory leak in fail path of parse_loop_device_name() in dmsetup. Check for missing reply_uuid in dm_event_get_registered_device(). Check for allocation failure in dmeventd restart(). Add few missing allocation failures tests in dmsetup. Fix potential risk of writing in front of buffer in _sysfs_get_dm_name(). Version 1.02.70 - 12th February 2012 ==================================== Fix dm_event_get_version() check. Add pointer test for dependency check in _add_dev(). Validate name and uuid params of dm_tree_add_new_dev_with_udev_flags(). Do not crash for dm_report_init() sort_key == NULL and behave like "". Return error for failing allocation in dm_asprintf(). Add missing test for failing allocation in dm_realloc() code. Add test for memory allocation failures in regex matcher code. Simplify dm_task_set_geometry() and use dm_asprintf(). Set all parameters to 0 for dm_get_next_target() for NULL return. Fix fd resource leak in error path for _udev_notify_sem_create(). Leave space for '\0' for readline() call in _sysfs_get_kernel_name(). Version 1.02.69 - 1st February 2012 =================================== Clean up dmeventd systemd unit ordering and requirements. Version 1.02.68 - 26th January 2012 =================================== Reset all members of info struct in dm_tree_add_new_dev_with_udev_flags. Add dmsetup wipe_table to replace table with one that uses error target. Add 'blkdevname' and 'blkdevs_used' fields to dmsetup info -c -o. Add 'blkdevname' option to dmsetup ls --tree to see block device names. Add -o devno/blkdevname/devname to dmsetup deps and ls. Add dm_device_get_name to get map name or block device name for given devno. Remove empty devices when clearing left-over inactive tables in deptree. Add dm_uuid_prefix/dm_set_uuid_prefix to override hard-coded LVM- prefix. Improve dmsetup man page description of readahead parameter. Use sysfs to set/get readahead if possible. Fix lvm2-monitor init script to use normalized output when using vgs. Add test for max length (DM_MAX_TYPE_NAME) of target type name. Include a copy of kernel DM documentation in doc/kernel. Improve man page style for dmsetup and mention more targets. Fix _get_proc_number to be tolerant of malformed /proc/misc entries. Fix missing thread list manipulation protection in dmeventd. Add ExecReload to dm-event.service for systemd to reload dmeventd properly. Add dm_config_tree_find_str_allow_empty and dm_config_find_str_allow_empty. Fix compile-time pool memory locking with DEBUG_MEM. Fix valgrind error reports in free of pool chunks with DEBUG_MEM. Align size of structure chunk for fast pool allocator to 8 bytes. Simplify some pointer operations in dm_free_aux() debug code. Remove unused dbg_malloc.h file from source tree. Cleanup backtraces for _create_and_load_v4(). Fix alignment warning in bitcount calculation for raid segment. Allocate dm_tree structure from dm_tree pool. Update debug logging for _resume_node. Add functions to support thin provisioning target. Improve libdm-config error path reporting. Update dmsetup resume man with --addnodeonresume/create options. Add dependency for dm man pages to man subdirectory make all target. Add dm_tree_retry_remove to use retry logic for device removal in a dm_tree. Add dm_device_has_mounted_fs fn to check mounted filesystem on a device. Add dm_device_has_holders fn to to check use of the device by another device. Add dm_sysfs_dir to libdevmapper to retrieve sysfs location set. Add dm_set_sysfs_dir to libdevmapper to set sysfs location. Add --retry option for dmsetup remove to retry removal if not successful. Add dm_task_retry_remove fn to use retry logic for device removal. Remove unused passed parameters for _mirror_emit_segment_line(). Add dm_config and string character escaping functions to libdevmapper. Mark unreleased memory pools as internal error. Version 1.02.67 - 19th August 2011 ================================== Add dm_tree_node_add_null_area for temporarily-missing raid devs tracked. Version 1.02.66 - 12th August 2011 ================================== Release geometry buffer in dm_task_destroy. Update udev rules to skip DM flags decoding for removed devices. Add compile-time pool memory locking options (to debug shared VG structs). Remove device name prefix from dmsetup line output if -j & -m or -u supplied. Remove support for the original version 1 dm ioctls. Add missing check for allocation failure _create_dir_recursive(). Add support for systemd file descriptor handover in dmeventd. Fix memory leak in dmsetup _message() memory allocation error path. Use new oom killer adjustment interface (oom_score_adj) when available. Add systemd unit files for dmeventd. Fix read-only identical table reload supression. Version 1.02.65 - 8th July 2011 =============================== Remove dev name prefix from dmsetup line output if exactly one dev requested. Report internal error if suspending a device using an already-suspended dev. Report error if a table load requiring target parameters has none supplied. Add dmsetup --checks and dm_task_enable_checks framework to validate ioctls. Add age_in_minutes parameter to dmsetup udevcomplete_all. Return immediately from dm_lib_exit() if called more than once. Disable udev fallback by default and add --verifyudev option to dmsetup. Report internal error if any table is loaded while any dev is known suspended. Add dm_get_suspended_counter() for number of devs in suspended state by lib. Fix "all" report field prefix matching to include label fields with pv_all. Delay resuming new preloaded mirror devices with core logs in deptree code. Accept new kernel version 3 uname formats in initialisation. Version 1.02.64 - 29th April 2011 ================================== Require libudev >= 143 when compiling with udev support. Use word alignment for dm_pool_strdup() and dm_pool_strndup(). Use dm_snprintf() to fix signedness warning in dm_set_dev_dir(). Use unsigned loop counter to fix signedness warning in _other_node_ops(). Fix const cast in dmsetup calls of dm_report_field_string(). Streamline /dev/mapper/control node code for common cases. Use hard-coded dm control node device number for 2.6.36 kernels and above. Improve stack debug reporting in dm_task_create(). Fallback to control node creation only if node doesn't exist yet. Change dm_hash binary functions to take void *key instead of char *. Fix uninitialised memory use with empty params in _reload_with_suppression_v4. Lower severity of selabel_lookup and matchpathcon failure to log_debug. Add test for failed allocation from dm_task_set_uuid() in dmeventd. Add dm_event_get_version to dmeventd for use with -R. Avoid dmeventd core dumps when handling request with unknown command ID. Have dmeventd -R start up even when no existing copy is running. Accept multiple mapped device names on many dmsetup command lines. Fix dm_udev_wait calls in dmsetup to occur before readahead display not after. Include an implicit dm_task_update_nodes() within dm_udev_wait(). Fix _create_and_load_v4 not to lose the --addnodeoncreate setting (1.02.62). Add inactive table query support for kernel driver >= 4.11.6 (RHEL 5.7). Log debug open_count in _node_has_closed_parents(). Add a const to dm_report_field_string() data parameter. Version 1.02.63 - 9th February 2011 =================================== Reinstate DEBUG_MEM as it's part of the API. (1.02.62) Version 1.02.62 - 4th February 2011 =================================== Add configure --with-device-nodes-on=create for previous behaviour. Move creation of device nodes from 'create' to 'resume'. Add --addnodeonresume and --addnodeoncreate options to dmsetup. Add dm_task_set_add_node to libdevmapper to control dev node creation time. Add dm_task_secure_data to libdevmapper to wipe ioctl buffers in kernel. Log debug message when expected uevent is not generated. Only compile memory debugging code when DEBUG_MEM is set. Set DM_UDEV_DISABLE_OTHER_RULES_FLAG for suspended DM devices in udev rules. Begin a new pool object for each row in _output_as_rows() correctly. Version 1.02.61 - 10th January 2011 =================================== Add DM_COOKIE_AUTO_CREATE to libdevmapper.h. Export DM_CONTROL_NODE_UMASK and use it while creating /dev/mapper/control. Version 1.02.60 - 20th December 2010 ==================================== Check for unlink failure in remove_lockfile() in dmeventd. Use dm_free for dm_malloc-ed areas in _clog_ctr/_clog_dtr in cmirrord. Use char* arithmetic in _process_all() & _targets() in dmsetup. Change dm_regex_create() API to accept const char * const *patterns. Add new dm_prepare_selinux_context fn to libdevmapper and use it throughout. Detect existence of new SELinux selabel interface during configure. Version 1.02.59 - 6th December 2010 =================================== Add backtraces to _process_mapper_dir and _create_and_load_v4 error paths. Remove superfluous checks for NULL before calling dm_free. Version 1.02.58 - 22nd November 2010 ==================================== Fix _output_field crash from field_id free with DEBUG_MEM. (1.02.57) Version 1.02.57 - 8th November 2010 =================================== Fix regex optimiser not to ignore RHS of OR nodes in _find_leftmost_common. Add dmeventd -R to restart dmeventd without losing monitoring state. (1.02.56) Fix memory leak of field_id in _output_field function. Allocate buffer for reporting functions dynamically to support long outputs. Version 1.02.56 - 25th October 2010 =================================== Return const pointer from dm_basename() in libdevmapper. Implement dmeventd -R to restart without state loss. Add dm_zalloc and use it and dm_pool_zalloc throughout. Add --setuuid to dmsetup rename. Add dm_task_set_newuuid to set uuid of mapped device post-creation. Version 1.02.55 - 24th September 2010 ===================================== Fix the way regions are marked complete to avoid slow --nosync cmirror I/O. Add DM_REPORT_FIELD_TYPE_ID_LEN to libdevmapper.h. Version 1.02.54 - 18th August 2010 ================================== Fix dm-mod autoloading logic to not assume control node is set correctly. Add dmeventd/executable to lvm.conf to test alternative dmeventd. Export dm_event_handler_set_dmeventd_path to override built-in dmeventd path. Generate libdevmapper-event exported symbols. Remove superfluous NULL pointer tests before dm_free from dmeventd. Assume dm-mod autoloading support is in kernel 2.6.36 and higher, not 2.6.35. Fix udev rules to support udev database content generated by older rules. Reinstate detection of inappropriate uevent with DISK_RO set and suppress it. Fix regex ttree off-by-one error. Add --enable-valgrind-pool to configure. Fix segfault in regex matcher with characters of ordinal value > 127. Fix 'void*' arithmetic warnings in dbg_malloc.c and libdm-iface.c. Wait for node creation before displaying debug info in dmsetup. Fix return status 0 for "dmsetup info -c -o help". Add check for kernel semaphore support and disable udev_sync if not available. Version 1.02.53 - 28th July 2010 ================================ Revert failed table load preparation after "create, load and resume". Switch dmeventd to use dm_create_lockfile and drop duplicate code. Add dm_create_lockfile to libdm to handle pidfiles for all daemons. Replace lookup with next in struct dfa_state & calculate states on demand. Improve the regex matcher, reducing the number of charset nodes used. Add dm_regex_fingerprint to facilitate regex testing. Skip ffs(0) in _test_word in bitset functions. Use "nowatch" udev rule for inappropriate devices. Version 1.02.52 - 6th July 2010 =============================== Fix dmlosetup snprintf %llu compiler warning. Add parentheses to some libdevmapper.h macro arguments. Add printf format attributes to dm_{sn,as}printf and fix a caller. Move dmeventd man page from install_lvm2 to install_device-mapper. (1.02.50) Version 1.02.51 - 30th June 2010 ================================ Generate libdevmapper exported symbols from header file. Version 1.02.50 - 23rd June 2010 ================================ Fix INTERNAL_ERROR typo in ioctl iface unknown task message. Fix udev rules to handle spurious events properly. Use C99 [] not [0] in dm_ulog_request struct to avoid abort when fortified. Allow use of devmapper header file in C++ mode (extern "C" and __typeof__). Add dmeventd man page. Version 1.02.49 - 4th June 2010 =============================== Support autoloading of dm-mod module for kernels from 2.6.35. Document 'clear' in dmsetup man page. Fix semctl parameter (union) to avoid misaligned parameter on some arches. Add dm_tree_node_set_presuspend_node() to presuspend child when deactivating. Initial support for replicator target. Version 1.02.48 - 17th May 2010 ================================ Use -d to control level of messages sent to syslog by dmeventd. Change -d to -f to run dmeventd in foreground. Do not print encryption key in message debug output (cryptsetup luksResume). Fix dmeventd static build library dependencies. Fix udev flags on remove in create_and_load error path. Version 1.02.47 - 30th April 2010 ================================= Add support for new IMPORT{db} udev rule. Add DM_UDEV_PRIMARY_SOURCE_FLAG udev flag to recognize proper DM events. Also include udev libs in libdevmapper.pc when udev_sync is enabled. Cache bitset locations to speed up _calc_states. Add a regex optimisation pass for shared prefixes and suffixes. Add dm_bit_and and dm_bitset_equal to libdevmapper. Simplify dm_bitset_create. Speed up dm_bit_get_next with ffs(). Version 1.02.46 - 14th April 2010 ================================= Change dm_tree_deactivate_children to fail if device is open. Wipe memory buffers for dm-ioctl parameters before releasing. Strictly require libudev if udev_sync is used. Add support for ioctl's DM_UEVENT_GENERATED_FLAG. Version 1.02.45 - 9th March 2010 ================================ Add --showkeys parameter description to dmsetup man page. Add --help option as synonym for help command. Version 1.02.44 - 15th February 2010 ==================================== Add DM_UDEV_DISABLE_LIBRARY_FALLBACK udev flag to rely on udev only. Export dm_udev_create_cookie function to create new cookies on demand. Add --udevcookie, udevcreatecookie and udevreleasecookie to dmsetup. Set udev state automatically instead of using DM_UDEV_DISABLE_CHECKING. Version 1.02.43 - 21st January 2010 =================================== Remove bitset, hash and pool headers superceded by libdevmapper.h. Fix off-by-one error causing bad cluster mirror table construction. Version 1.02.42 - 14th January 2010 =================================== Add support for the "snapshot-merge" kernel target (2.6.33-rc1). Introduce a third activation_priority level in dm_tree_activate_children. Version 1.02.41 - 12th January 2010 =================================== If DM_UDEV_DISABLE_CHECKING is set in environment, disable udev warnings. Add dm_tree_add_dev_with_udev_flags to provide wider support for udev flags. Add --noudevrules option for dmsetup to disable /dev node management by udev. Fix 'dmsetup info -c -o all' to show all fields. Return errors if dm_tree_*_children functions fail. Fix coredump and memory leak for 'dmsetup help -c'. Disable udev rules for change events with DISK_RO set. Version 1.02.40 - 19th November 2009 ==================================== Fix install_device-mapper Makefile target to not build dmeventd plugins. Support udev flags even when udev_sync is disabled or not compiled in. Remove 'last_rule' from udev rules: honour DM_UDEV_DISABLE_OTHER_RULES_FLAG. Add dmsetup --inactive support. Add dm_task_query_inactive_table to libdevmapper for kernel driver >= 4.16. Fix hash lookup segfault when keys compared are different lengths. Version 1.02.39 - 26th October 2009 =================================== Remove strict default permissions for DM devices from 95-dm-notify.rules. Add dmsetup udevflags command to decode udev flags in given cookie value. Support udev flags in libdevmapper incl. dm_tree_add_new_dev_with_udev_flags. Make libdm ABI consistent when built with/without selinux support. Version 1.02.38 - 25th September 2009 ===================================== Export DM_DEV_DIR_UMASK, the default umask for /dev directories created. Handle any path supplied to dm_task_set_name by looking up in /dev/mapper. Add several examples to 12-dm-permissions.rules. Add splitname and --yes to dmsetup man page. Fix _mirror_emit_segment_line return code. Fix dmeventd _temporary_log_fn parameters. (2.02.50) Version 1.02.37 - 15th September 2009 ===================================== Add dmsetup manpage entries for udevcomplete_all and udevcookies. Check udev is running when processing cookies and retain state internally. Add y|--yes option to dmsetup for default 'yes' answer to prompts. Fix tools Makefile to process dmsetup sources separately. Restore umask when device node creation fails. Check kernel vsn to use 'block_on_error' or 'handle_errors' in mirror table. Add dm-log-userspace.h to tree for cmirrord builds. Version 1.02.36 - 6th August 2009 ================================= Add udevcookies, udevcomplete, udevcomplete_all and --noudevwait to dmsetup. Add libdevmapper functions to support synchronisation with udev. Version 1.02.35 - 28th July 2009 ================================ Add LOG_LINE_WITH_ERRNO macro. Use log_error macro consistently throughout in place of log_err. Version 1.02.34 - 15th July 2009 ================================ Use _exit() not exit() after forking to avoid flushing libc buffers twice. Rename plog macro to LOG_LINE & add LOG_MESG variant for dm_dump_memory_debug. Change plog to use dm_log_with_errno unless deprecated dm_log_init was used. Add dm_log_with_errno and dm_log_with_errno_init, deprecating the old fns. Fix whitespace in linear target line to fix identical table line detection. Add device number to more log messages during activation. Version 1.02.33 - 30th June 2009 ================================ Don't fallback to default major number: use dm_task_set_major_minor. (1.02.31) Do not fork daemon when dmeventd cannot be found. Add crypt target handling to libdevmapper tree nodes. Add splitname command to dmsetup. Add subsystem, vg_name, lv_name, lv_layer fields to dmsetup reports. Make mempool optional in dm_split_lvm_name(). Version 1.02.32 - 21st May 2009 =============================== Only generate libdevmapper.a when configured to link statically. Export dm_tree_node_size_changed() from libdevmapper. Propagate the table size_changed property up the dm device tree. Detect failure to free memory pools when releasing the library. Fix segfault when getopt processes dmsetup -U, -G and -M options. Version 1.02.31 - 3rd March 2009 ================================ If kernel supports only one dm major number, use in place of any supplied. Version 1.02.30 - 26th January 2009 ==================================== Add "all" field to reports expanding to all fields of report type. Enforce device name length and character limitations in libdm. Replace _dm_snprintf with EMIT_PARAMS macro for creating target lines. Version 1.02.29 - 10th November 2008 ==================================== Merge device-mapper into the LVM2 tree. Split out dm-logging.h from log.h. Use lvm-types.h. Add usrsbindir to configure. Version 1.02.28 - 18th September 2008 ===================================== Only resume devices in dm_tree_preload_children if size changes. Extend deptree buffers so the largest possible device numbers fit. Generate versioned libdevmapper-event.so. Underline longer report help text headings. Version 1.02.27 - 25th June 2008 ================================ Align struct memblock in dbg_malloc for sparc. Add --unquoted and --rows to dmsetup. Avoid compiler warning about cast in dmsetup.c's OFFSET_OF macro. Fix inverted no_flush debug message. Remove --enable-jobs from configure. (Set at runtime instead.) Bring configure.in and list.h into line with the lvm2 versions. Version 1.02.26 - 6th June 2008 =============================== Initialise params buffer to empty string in _emit_segment. Skip add_dev_node when ioctls disabled. Make dm_hash_iter safe against deletion. Accept a NULL pointer to dm_free silently. Add tables_loaded, readonly and suspended columns to reports. Add --nameprefixes to dmsetup. Add field name prefix option to reporting functions. Calculate string size within dm_pool_grow_object. Version 1.02.25 - 10th April 2008 ================================= Remove redundant if-before-free tests. Use log_warn for reporting field help text instead of log_print. Change cluster mirror log type name (s/clustered_/clustered-/) Version 1.02.24 - 20th December 2007 ==================================== Fix deptree to pass new name to _resume_node after a rename. Suppress other node operations if node is deleted. Add node operation stack debug messages. Report error when empty device name passed to readahead functions. Fix minimum readahead debug message. Version 1.02.23 - 5th December 2007 =================================== Update dm-ioctl.h after removal of compat code. Add readahead support to libdevmapper and dmsetup. Fix double free in a libdevmapper-event error path. Fix configure --with-dmeventd-path substitution. Allow a DM_DEV_DIR environment variable to override /dev in dmsetup. Create a libdevmapper.so.$LIB_VERSION symlink within the build tree. Avoid static link failure with some SELinux libraries that require libpthread. Remove obsolete dmfs code from tree and update INSTALL. Version 1.02.22 - 21st August 2007 ================================== Fix inconsistent licence notices: executables are GPLv2; libraries LGPLv2.1. Update to use autoconf 2.61, while still supporting 2.57. Avoid repeated dm_task free on some dm_event_get_registered_device errors. Introduce log_sys_* macros from LVM2. Export dm_fclose and dm_create_dir; remove libdm-file.h. Don't log EROFS mkdir failures in _create_dir_recursive (for LVM2). Add fclose wrapper dm_fclose that catches write failures (using ferror). Version 1.02.21 - 13th July 2007 ================================ Introduce _LOG_STDERR to send log_warn() messages to stderr not stdout. Fix dmsetup -o devno string termination. (1.02.20) Version 1.02.20 - 15th June 2007 ================================ Fix default dmsetup report buffering and add --unbuffered. Add tree-based and dependency fields to dmsetup reports. Version 1.02.19 - 27th April 2007 ================================= Standardise protective include file #defines. Add regex functions to library. Avoid trailing separator in reports when there are hidden sort fields. Fix segfault in 'dmsetup status' without --showkeys against crypt target. Deal with some more compiler warnings. Introduce _add_field() and _is_same_field() to libdm-report.c. Fix some libdevmapper-event and dmeventd memory leaks. Remove unnecessary memset() return value checks. Fix a few leaks in reporting error paths. [1.02.15+] Version 1.02.18 - 13th February 2007 ==================================== Improve dmeventd messaging protocol: drain pipe and tag messages. Version 1.02.17 - 29th January 2007 =================================== Add recent reporting options to dmsetup man page. Revise some report fields names. Add dmsetup 'help' command and update usage text. Use fixed-size fields in report interface and reorder. Version 1.02.16 - 25th January 2007 =================================== Add some missing close() and fclose() return value checks. Migrate dmsetup column-based output over to new libdevmapper report framework. Add descriptions to reporting field definitions. Add a dso-private variable to dmeventd dso interface. Add dm_event_handler_[gs]et_timeout functions. Streamline dm_report_field_* interface. Add cmdline debug & version options to dmeventd. Add DM_LIB_VERSION definition to configure.h. Suppress 'Unrecognised field' error if report field is 'help'. Add --separator and --sort to dmsetup (unused). Make alignment flag optional when specifying report fields. Version 1.02.15 - 17th January 2007 =================================== Add basic reporting functions to libdevmapper. Fix a malloc error path in dmsetup message. More libdevmapper-event interface changes and fixes. Rename dm_saprintf() to dm_asprintf(). Report error if NULL pointer is supplied to dm_strdup_aux(). Reinstate dm_event_get_registered_device. Version 1.02.14 - 11th January 2007 =================================== Add dm_saprintf(). Use CFLAGS when linking so mixed sparc builds can supply -m64. Add dm_tree_use_no_flush_suspend(). Lots of dmevent changes including revised interface. Export dm_basename(). Cope with a trailing space when comparing tables prior to possible reload. Fix dmeventd to cope if monitored device disappears. Version 1.02.13 - 28 Nov 2006 ============================= Update dmsetup man page (setgeometry & message). Fix dmsetup free after getline with debug. Suppress encryption key in 'dmsetup table' output unless --showkeys supplied. Version 1.02.12 - 13 Oct 2006 ============================= Avoid deptree attempting to suspend a device that's already suspended. Version 1.02.11 - 12 Oct 2006 ============================== Add suspend noflush support. Add basic dmsetup loop support. Switch dmsetup to use dm_malloc and dm_free. Version 1.02.10 - 19 Sep 2006 ============================= Add dm_snprintf(), dm_split_words() and dm_split_lvm_name() to libdevmapper. Reorder mm bounds_check code to reduce window for a dmeventd race. Version 1.02.09 - 15 Aug 2006 ============================= Add --table argument to dmsetup for a one-line table. Abort if errors are found during cmdline option processing. Add lockfs indicator to debug output. Version 1.02.08 - 17 July 2006 ============================== Append full patch to check in emails. Avoid duplicate dmeventd subdir with 'make distclean'. Update dmsetup man page. Add --force to dmsetup remove* to load error target. dmsetup remove_all also performs mknodes. Don't suppress identical table reloads if permission changes. Fix corelog segment line. Suppress some compiler warnings. Version 1.02.07 - 11 May 2006 ============================= Add DM_CORELOG flag to dm_tree_node_add_mirror_target(). Avoid a dmeventd compiler warning. Version 1.02.06 - 10 May 2006 ============================= Move DEFS into configure.h. Fix leaks in error paths found by coverity. Remove dmsetup line buffer limitation. Version 1.02.05 - 19 Apr 2006 ============================= Separate install_include target in makefiles. Separate out DEFS from CFLAGS. Support pkg-config. Check for libsepol. Version 1.02.04 - 14 Apr 2006 ============================= Bring dmsetup man page up-to-date. Use name-based device refs if kernel doesn't support device number refs. Fix memory leak (struct dm_ioctl) when struct dm_task is reused. If _create_and_load_v4 fails part way through, revert the creation. dmeventd thread/fifo fixes. Add file & line to dm_strdup_aux(). Add setgeometry. Version 1.02.03 - 7 Feb 2006 ============================ Add exported functions to set uid, gid and mode. Rename _log to dm_log and export. Add dm_tree_skip_lockfs. Fix dm_strdup debug definition. Fix hash function to avoid using a negative array offset. Don't inline _find in hash.c and tidy signed/unsigned etc. Fix libdevmapper.h #endif. Fix dmsetup version driver version. Add sync, nosync and block_on_error mirror log parameters. Add hweight32. Fix dmeventd build. Version 1.02.02 - 2 Dec 2005 ============================ dmeventd added. Export dm_task_update_nodes. Use names instead of numbers in messages when ioctls fail. Version 1.02.01 - 23 Nov 2005 ============================= Resume snapshot-origins last. Drop leading zeros from dm_format_dev. Suppress attempt to reload identical table. Additional LVM- prefix matching for transitional period. Version 1.02.00 - 10 Nov 2005 ============================= Added activation functions to library. Added return macros. Also suppress error if device doesn't exist with DM_DEVICE_STATUS. Export dm_set_selinux_context(). Add dm_driver_version(). Added dependency tree functions to library. Added hash, bitset, pool, dbg_malloc to library. Added ls --tree to dmsetup. Added dmsetup --nolockfs support for suspend/reload. Version 1.01.05 - 26 Sep 2005 ============================= Resync list.h with LVM2. Remember increased buffer size and use for subsequent calls. On 'buffer full' condition, double buffer size and repeat ioctl. Fix termination of getopt_long() option array. Report 'buffer full' condition with v4 ioctl as well as with v1. Version 1.01.04 - 2 Aug 2005 ============================ Fix dmsetup ls -j and status --target with empty table. Version 1.01.03 - 13 Jun 2005 ============================= Use matchpathcon mode parameter. Fix configure script to re-enable selinux. Version 1.01.02 - 17 May 2005 ============================= Call dm_lib_exit() and dm_lib_release() automatically now. Add --target filter to dmsetup table/status/ls. Add --exec to dmsetup ls. Fix dmsetup getopt_long usage. Version 1.01.01 - 29 Mar 2005 ============================= Update dmsetup man page. Drop-in devmap_name replacement. Add option to compile without ioctl for testing. Fix DM_LIB_VERSION sed. Version 1.01.00 - 17 Jan 2005 ============================= Add dm_task_no_open_count() to skip getting open_count. Version 1.00.21 - 7 Jan 2005 ============================ Fix /proc/devices parsing. Version 1.00.20 - 6 Jan 2005 ============================ Attempt to fix /dev/mapper/control transparently if it's wrong. Configuration-time option for setting uid/gid/mode for /dev/mapper nodes. Update kernel patches for 2.4.27/2.4.28-pre-4 (includes minor fixes). Add --noheadings columns option for colon-separated dmsetup output. Support device referencing by uuid or major/minor. Warn if kernel data didn't fit in buffer. Fix a printf. Version 1.00.19 - 3 July 2004 ============================= More autoconf fixes. Fix a dmsetup newline. Fix device number handling for 2.6 kernels. Version 1.00.18 - 20 Jun 2004 ============================= Fix a uuid free in libdm-iface. Fix a targets string size calc in driver. Add -c to dmsetup for column-based output. Add target message-passing ioctl. Version 1.00.17 - 17 Apr 2004 ============================= configure --with-owner= --with-group= to avoid -o and -g args to 'install' Fix library selinux linking. Version 1.00.16 - 16 Apr 2004 ============================= Ignore error setting selinux file context if fs doesn't support it. Version 1.00.15 - 7 Apr 2004 ============================ Fix status overflow check in kernel patches. Version 1.00.14 - 6 Apr 2004 ============================ Fix static selinux build. Version 1.00.13 - 6 Apr 2004 ============================ Add some basic selinux support. Version 1.00.12 - 6 Apr 2004 ============================ Fix dmsetup.static install. Version 1.00.11 - 5 Apr 2004 ============================ configure --enable-static_link does static build in addition to dynamic. Moved Makefile library targets definition into template. Version 1.00.10 - 2 Apr 2004 ============================ Fix DESTDIR handling. Static build installs to dmsetup.static. Basic support for internationalisation. Minor Makefile tidy-ups/fixes. Version 1.00.09 - 31 Mar 2004 ============================= Update copyright notices to Red Hat. Move full mknodes functionality from dmsetup into libdevmapper. Avoid sscanf %as for uClibc compatibility. Cope if DM_LIST_VERSIONS is not defined. Add DM_LIST_VERSIONS functionality to kernel patches. Generate new kernel patches for 2.4.26-rc1. Version 1.00.08 - 27 Feb 2004 ============================= Added 'dmsetup targets'. Added event_nr support to 'dmsetup wait'. Updated dmsetup man page. Allow logging function to be reset to use internal one. Bring log macros in line with LVM2 ones. Added 'make install_static_lib' which installs libdevmapper.a. Made configure/makefiles closer to LVM2 versions. Fixed DESTDIR for make install/install_static_lib. Updated README/INSTALL to reflect move to sources.redhat.com. Updated autoconf files to 2003-06-17. LVM2.2.02.176/test/0000755000000000000120000000000013176752421012303 5ustar rootwheelLVM2.2.02.176/test/Makefile.in0000644000000000000120000003352213176752421014355 0ustar rootwheel# Copyright (C) 2007-2015 Red Hat, Inc. All rights reserved. # # This file is part of LVM2. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA #TEST_OPTS=--verbose --debug SHELL_PATH ?= $(SHELL) RM ?= rm -f subdir = $(shell pwd|sed 's,.*/,,') srcdir = @srcdir@ top_srcdir = @top_srcdir@ top_builddir = @top_builddir@ abs_srcdir = @abs_srcdir@ abs_builddir = @abs_builddir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ datarootdir = @datarootdir@ LVM_TEST_RESULTS ?= results SUBDIRS = api unit SOURCES = lib/not.c lib/harness.c CXXSOURCES = lib/runner.cpp CXXFLAGS += $(EXTRA_EXEC_CFLAGS) include $(top_builddir)/make.tmpl T ?= . S ?= @ # never match anything by default VERBOSE ?= 0 ALL := $(shell find -L $(srcdir) \( -path \*/shell/\*.sh -or -path \*/api/\*.sh \) | $(SORT)) comma = , RUN := $(shell find -L $(srcdir) -regextype posix-egrep \( -path \*/shell/\*.sh -or -path \*/api/\*.sh \) -and -regex "$(srcdir)/.*($(subst $(comma),|,$(T))).*" -and -not -regex "$(srcdir)/.*($(subst $(comma),|,$(S))).*" | $(SORT)) RUN_BASE = $(subst $(srcdir)/,,$(RUN)) ifeq ("@BUILD_LVMETAD@", "yes") LVMETAD_RUN_BASE = $(RUN_BASE) LVMETAD_NDEV_FLAVOUR = ,ndev-lvmetad LVMETAD_UDEV_FLAVOUR = ,udev-lvmetad endif ifeq ("@BUILD_LVMPOLLD@", "yes") LVMPOLLD_RUN_BASE = $(RUN_BASE) LVMPOLLD_NDEV_FLAVOUR = ,ndev-lvmpolld,ndev-cluster-lvmpolld,ndev-lvmetad-lvmpolld LVMPOLLD_UDEV_FLAVOUR = ,udev-lvmpolld,udev-cluster-lvmpolld,udev-lvmetad-lvmpolld endif ifeq ("@BUILD_LVMLOCKD@", "yes") LVMLOCKD_RUN_BASE = $(RUN_BASE) LVMLOCKD_UDEV_FLAVOUR = ,udev-lvmlockd-test endif # Shell quote; SHELL_PATH_SQ := $(subst ','\'',$(SHELL_PATH)) ifeq ("@UDEV_SYNC@", "yes") dm_udev_synchronisation = 1 endif all: .tests-stamp help: @echo -e "\nAvailable targets:" @echo " all Default target, run check." @echo " check Run all tests." @echo " check_system Run all tests using udev." @echo " check_local Run tests without clvmd and lvmetad." @echo " check_cluster Run tests with cluster daemon." @echo " check_lvmetad Run tests with lvmetad daemon." @echo " check_lvmpolld Run tests with lvmpolld daemon." @echo " check_cluster_lvmpolld Run tests with clvmd and lvmpolld daemon." @echo " check_lvmetad_lvmpolld Run tests with lvmetad and lvmpolld daemon." @echo " check_all_lvmpolld Run all tests with lvmpolld daemon." @echo " check_lvmlockd_sanlock Run tests with lvmlockd and sanlock." @echo " check_lvmlockd_dlm Run tests with lvmlockd and dlm." @echo " check_lvmlockd_test Run tests with lvmlockd --test." @echo " clean Clean dir." @echo " help Display callable targets." @echo -e "\nSupported variables:" @echo " LVM_TEST_AUX_TRACE Set for verbose messages for aux scripts []." @echo " LVM_TEST_BACKING_DEVICE Set device used for testing (see also LVM_TEST_DIR)." @echo " LVM_TEST_CAN_CLOBBER_DMESG Allow to clobber dmesg buffer without /dev/kmsg. (1)" @echo " LVM_TEST_DEVDIR Set to '/dev' to run on real /dev." @echo " LVM_TEST_DIR Where to create test files [$(LVM_TEST_DIR)]." @echo " LVM_TEST_LOCKING Normal (1), Cluster (3)." @echo " LVM_TEST_LVMETAD Start lvmetad (1)." @echo " LVM_TEST_LVMETAD_DEBUG_OPTS Allows to override debug opts [-l all]." @echo " LVM_TEST_LVMPOLLD Start lvmpolld" @echo " LVM_TEST_NODEBUG Do not debug lvm commands." @echo " LVM_TEST_PARALLEL May skip agresive wipe of LVMTEST resources." @echo " LVM_TEST_RESULTS Where to create result files [results]." @echo " LVM_TEST_THIN_CHECK_CMD Command for thin_check [$(LVM_TEST_THIN_CHECK_CMD)]." @echo " LVM_TEST_THIN_DUMP_CMD Command for thin_dump [$(LVM_TEST_THIN_DUMP_CMD)]." @echo " LVM_TEST_THIN_REPAIR_CMD Command for thin_repair [$(LVM_TEST_THIN_REPAIR_CMD)]." @echo " LVM_TEST_THIN_RESTORE_CMD Command for thin_restore [$(LVM_TEST_THIN_RESTORE_CMD)]." @echo " LVM_TEST_CACHE_CHECK_CMD Command for cache_check [$(LVM_TEST_CACHE_CHECK_CMD)]." @echo " LVM_TEST_CACHE_DUMP_CMD Command for cache_dump [$(LVM_TEST_CACHE_DUMP_CMD)]." @echo " LVM_TEST_CACHE_REPAIR_CMD Command for cache_repair [$(LVM_TEST_CACHE_REPAIR_CMD)]." @echo " LVM_TEST_CACHE_RESTORE_CMD Command for cache_restore [$(LVM_TEST_CACHE_RESTORE_CMD)]." @echo " LVM_TEST_UNLIMITED Set to get unlimited test log (>32MB)" @echo " LVM_VALGRIND Enable valgrind testing, execs $$"VALGRIND. @echo " LVM_VALGRIND_CLVMD Enable valgrind testing of clvmd (1)." @echo " LVM_VALGRIND_DMEVENTD Enable valgrind testing of dmeventd (1)." @echo " LVM_VALGRIND_LVMETAD Enable valgrind testing of lvmetad (1)." @echo " LVM_STRACE Enable strace logging." @echo " LVM_DEBUG_LEVEL Sets debuging level for valgrind/strace (use > 0)." @echo " LVM_VERIFY_UDEV Default verify state for lvm.conf." @echo " LVM_LOG_FILE_MAX_LINES Maximum number of logged lines for lvm2 command [1000000]." @echo " S Skip given test(s) (regex)." @echo " T Run given test(s) (regex)." @echo " VERBOSE Verbose output (1), timing (2)." check: .tests-stamp VERBOSE=$(VERBOSE) ./lib/runner \ --testdir . --outdir $(LVM_TEST_RESULTS) \ --flavours ndev-vanilla,ndev-cluster$(LVMETAD_NDEV_FLAVOUR)$(LVMPOLLD_NDEV_FLAVOUR) --only $(T) --skip $(S) check_system: .tests-stamp VERBOSE=$(VERBOSE) ./lib/runner \ --testdir . --outdir $(LVM_TEST_RESULTS) \ --flavours udev-vanilla,udev-cluster$(LVMETAD_UDEV_FLAVOUR)$(LVMPOLLD_UDEV_FLAVOUR)$(LVMLOCKD_UDEV_FLAVOUR) --only $(T) --skip $(S) check_cluster: .tests-stamp VERBOSE=$(VERBOSE) ./lib/runner \ --testdir . --outdir $(LVM_TEST_RESULTS) \ --flavours ndev-cluster --only $(T) --skip $(S) check_local: .tests-stamp VERBOSE=$(VERBOSE) ./lib/runner \ --testdir . --outdir $(LVM_TEST_RESULTS) \ --flavours ndev-vanilla --only $(T) --skip $(S) ifeq ("@BUILD_LVMETAD@", "yes") check_lvmetad: .tests-stamp VERBOSE=$(VERBOSE) ./lib/runner \ --testdir . --outdir $(LVM_TEST_RESULTS) \ --flavours ndev-lvmetad --only $(T) --skip $(S) endif ifeq ("@BUILD_LVMPOLLD@", "yes") check_lvmpolld: .tests-stamp VERBOSE=$(VERBOSE) ./lib/runner \ --testdir . --outdir results \ --flavours ndev-lvmpolld --only $(T) --skip $(S) check_cluster_lvmpolld: .tests-stamp VERBOSE=$(VERBOSE) ./lib/runner \ --testdir . --outdir results \ --flavours ndev-cluster-lvmpolld --only $(T) --skip $(S) check_lvmetad_lvmpolld: .tests-stamp VERBOSE=$(VERBOSE) ./lib/runner \ --testdir . --outdir results \ --flavours ndev-lvmetad-lvmpolld --only $(T) --skip $(S) check_all_lvmpolld: .tests-stamp VERBOSE=$(VERBOSE) ./lib/runner \ --testdir . --outdir results \ --flavours ndev-lvmpolld,ndev-cluster-lvmpolld,ndev-lvmetad-lvmpolld --only $(T) --skip $(S) endif ifeq ("@BUILD_LVMLOCKD@", "yes") check_lvmlockd_sanlock: .tests-stamp VERBOSE=$(VERBOSE) ./lib/runner \ --testdir . --outdir results \ --flavours udev-lvmlockd-sanlock --only shell/aa-lvmlockd-sanlock-prepare.sh,$(T),shell/zz-lvmlockd-sanlock-remove.sh --skip $(S) endif ifeq ("@BUILD_LVMLOCKD@", "yes") check_lvmlockd_dlm: .tests-stamp VERBOSE=$(VERBOSE) ./lib/runner \ --testdir . --outdir results \ --flavours udev-lvmlockd-dlm --only shell/aa-lvmlockd-dlm-prepare.sh,$(T),shell/zz-lvmlockd-dlm-remove.sh --skip $(S) endif ifeq ("@BUILD_LVMLOCKD@", "yes") check_lvmlockd_test: .tests-stamp VERBOSE=$(VERBOSE) ./lib/runner \ --testdir . --outdir results \ --flavours udev-lvmlockd-test --only $(T) --skip $(S) endif DATADIR = $(datadir)/lvm2-testsuite EXECDIR = $(libexecdir)/lvm2-testsuite LIB_FLAVOURS = \ flavour-ndev-cluster-lvmpolld\ flavour-ndev-cluster\ flavour-ndev-lvmetad-lvmpolld\ flavour-ndev-lvmetad\ flavour-ndev-lvmpolld\ flavour-ndev-vanilla\ flavour-udev-cluster-lvmpolld\ flavour-udev-cluster\ flavour-udev-lvmetad-lvmpolld\ flavour-udev-lvmetad\ flavour-udev-lvmpolld\ flavour-udev-lvmlockd-sanlock\ flavour-udev-lvmlockd-dlm\ flavour-udev-lvmlockd-test\ flavour-udev-vanilla LIB_LVMLOCKD_CONF = \ test-corosync-conf \ test-dlm-conf \ test-sanlock-conf LIB_LOCAL = paths runner LIB_NOT = not LIB_LINK_NOT = invalid fail should LIB_SHARED = check aux inittest utils get lvm-wrapper install: .tests-stamp lib/paths-installed @echo $(srcdir) $(INSTALL_DIR) $(DATADIR)/{shell,api,lib,dbus} $(EXECDIR) $(INSTALL_DATA) shell/*.sh $(DATADIR)/shell $(INSTALL_DATA) api/*.sh $(DATADIR)/api $(INSTALL_DATA) lib/mke2fs.conf $(DATADIR)/lib $(INSTALL_PROGRAM) api/*.{t,py} $(DATADIR)/api $(INSTALL_PROGRAM) dbus/*.py $(DATADIR)/dbus/ $(INSTALL_DATA) lib/paths-installed $(DATADIR)/lib/paths cd lib && $(INSTALL_DATA) \ $(LIB_FLAVOURS) \ $(LIB_LVMLOCKD_CONF) \ dm-version-expected \ version-expected \ $(DATADIR)/lib @for i in cache-mq cache-smq thin-performance lvmdbusd ; do \ echo "$(INSTALL_DATA) $(abs_top_srcdir)/conf/$$i.profile $(DATADIR)/lib"; \ $(INSTALL_DATA) $(abs_top_srcdir)/conf/$$i.profile $(DATADIR)/lib; done cd lib && $(INSTALL_SCRIPT) $(LIB_SHARED) $(DATADIR)/lib @cd $(DATADIR)/lib && for i in $(CMDS); do \ echo "$(LN_S) -f lvm-wrapper $$i"; \ $(LN_S) -f lvm-wrapper $$i; done $(INSTALL_PROGRAM) lib/$(LIB_NOT) $(EXECDIR) @cd $(EXECDIR) && for i in $(LIB_LINK_NOT); do \ echo "$(LN_S) -f not $$i"; \ $(LN_S) -f not $$i; done $(INSTALL_PROGRAM) -D lib/runner $(bindir)/lvm2-testsuite lib/should: lib/not $(LN_S) -f not lib/should lib/invalid: lib/not $(LN_S) -f not lib/invalid lib/fail: lib/not $(LN_S) -f not lib/fail lib/runner: lib/runner.o .lib-dir-stamp $(CXX) $(LDFLAGS) $(EXTRA_EXEC_LDFLAGS) $(ELDFLAGS) -o $@ $< lib/runner.o: $(wildcard $(srcdir)/lib/*.h) CFLAGS_runner.o += $(EXTRA_EXEC_CFLAGS) lib/%: lib/%.o .lib-dir-stamp $(CC) $(CFLAGS) $(LDFLAGS) $(ELDFLAGS) -o $@ $< lib/%: $(srcdir)/lib/%.sh .lib-dir-stamp cp $< $@ $(CHMOD) +x $@ lib/flavour-%: $(srcdir)/lib/flavour-%.sh .lib-dir-stamp cp $< $@ lib/paths-common: $(srcdir)/Makefile.in .lib-dir-stamp Makefile echo 'DM_UDEV_SYNCHRONISATION=$(dm_udev_synchronisation)' >> $@-t echo 'THIN=@THIN@' >> $@-t echo 'RAID=@RAID@' >> $@-t echo 'CACHE=@CACHE@' >> $@-t echo 'LVMETAD_PIDFILE="@LVMETAD_PIDFILE@"' >> $@-t echo 'LVMPOLLD_PIDFILE="@LVMPOLLD_PIDFILE@"' >> $@-t echo 'DMEVENTD_PIDFILE="@DMEVENTD_PIDFILE@"' >> $@-t echo 'CLVMD_PIDFILE="@CLVMD_PIDFILE@"' >> $@-t echo 'LVM_TEST_THIN_CHECK_CMD=$${LVM_TEST_THIN_CHECK_CMD-@THIN_CHECK_CMD@}' >> $@-t echo 'LVM_TEST_THIN_DUMP_CMD=$${LVM_TEST_THIN_DUMP_CMD-@THIN_DUMP_CMD@}' >> $@-t echo 'LVM_TEST_THIN_REPAIR_CMD=$${LVM_TEST_THIN_REPAIR_CMD-@THIN_REPAIR_CMD@}' >> $@-t echo 'LVM_TEST_THIN_RESTORE_CMD=$${LVM_TEST_THIN_RESTORE_CMD-@THIN_RESTORE_CMD@}' >> $@-t echo 'LVM_TEST_CACHE_CHECK_CMD=$${LVM_TEST_CACHE_CHECK_CMD-@CACHE_CHECK_CMD@}' >> $@-t echo 'LVM_TEST_CACHE_DUMP_CMD=$${LVM_TEST_CACHE_DUMP_CMD-@CACHE_DUMP_CMD@}' >> $@-t echo 'LVM_TEST_CACHE_REPAIR_CMD=$${LVM_TEST_CACHE_REPAIR_CMD-@CACHE_REPAIR_CMD@}' >> $@-t echo 'LVM_TEST_CACHE_RESTORE_CMD=$${LVM_TEST_CACHE_RESTORE_CMD-@CACHE_RESTORE_CMD@}' >> $@-t echo 'export DM_UDEV_SYNCHRONISATION THIN RAID CACHE\' >> $@-t echo ' LVMETAD_PIDFILE LVMPOLLD_PIDFILE DMEVENTD_PIDFILE CLVMD_PIDFILE\' >> $@-t echo ' LVM_TEST_THIN_CHECK_CMD LVM_TEST_THIN_DUMP_CMD LVM_TEST_THIN_REPAIR_CMD LVM_TEST_THIN_RESTORE_CMD\' >> $@-t echo ' LVM_TEST_CACHE_CHECK_CMD LVM_TEST_CACHE_DUMP_CMD LVM_TEST_CACHE_REPAIR_CMD LVM_TEST_CACHE_RESTORE_CMD' >> $@-t mv $@-t $@ lib/paths-installed: lib/paths-common $(RM) $@-t cat lib/paths-common > $@-t echo 'installed_testsuite=1' >> $@-t echo 'export PATH=@libexecdir@/lvm2-testsuite:@datadir@/lvm2-testsuite/lib:@datadir@/lvm2-testsuite/api:$$PATH' >> $@-t mv $@-t $@ lib/paths: lib/paths-common $(RM) $@-t cat lib/paths-common > $@-t echo 'top_srcdir="$(top_srcdir)"' >> $@-t echo 'abs_top_builddir="$(abs_top_builddir)"' >> $@-t echo 'abs_top_srcdir="$(abs_top_srcdir)"' >> $@-t echo 'abs_srcdir="$(abs_srcdir)"' >> $@-t echo 'abs_builddir="$(abs_builddir)"' >> $@-t mv $@-t $@ lib/version-expected: $(top_srcdir)/VERSION .lib-dir-stamp cut -f 1 -d ' ' <$< >$@ lib/dm-version-expected: $(top_srcdir)/VERSION_DM .lib-dir-stamp cut -f 1 -d ' ' <$< >$@ CMDS = lvm $(shell cat $(top_builddir)/tools/.commands 2>/dev/null) LIB = $(addprefix lib/, $(LIB_SHARED) $(LIB_LOCAL) $(LIB_NOT) $(LIB_LINK_NOT) $(LIB_FLAVOURS)) .tests-stamp: $(ALL) $(LIB) $(SUBDIRS) lib/version-expected lib/dm-version-expected @if test "$(srcdir)" != . ; then \ echo "Linking tests to builddir."; \ $(MKDIR_P) shell; \ for f in $(subst $(srcdir)/,,$(ALL)); do \ $(LN_S) -f $(abs_top_srcdir)/test/$$f $$f; \ done; \ fi @$(MKDIR_P) -m a=rwx $(LVM_TEST_RESULTS) touch $@ .lib-dir-stamp: $(MKDIR_P) lib for i in $(CMDS); do $(LN_S) -f lvm-wrapper lib/$$i; done for i in daemons/clvmd/clvmd daemons/dmeventd/dmeventd \ tools/dmsetup daemons/lvmetad/lvmetad \ daemons/lvmpolld/lvmpolld ; do \ $(LN_S) -f $(abs_top_builddir)/$$i lib/; done $(LN_S) -f $(abs_top_builddir)/tools/dmsetup lib/dmstats $(LN_S) -f $(abs_top_srcdir)/conf/lvmdbusd.profile lib/ $(LN_S) -f $(abs_top_srcdir)/conf/thin-performance.profile lib/ $(LN_S) -f $(abs_top_srcdir)/scripts/fsadm.sh lib/fsadm test "$(srcdir)" = . || for i in $(LIB_LVMLOCKD_CONF); do \ $(LN_S) -f $(abs_top_srcdir)/test/lib/$$i lib/; done touch $@ CLEAN_DIRS += $(LVM_TEST_RESULTS) ifneq (.,$(firstword $(srcdir))) CLEAN_TARGETS += $(RUN_BASE) $(addprefix lib/,$(LIB_LVMLOCKD_CONF)) endif CLEAN_TARGETS += .lib-dir-stamp .tests-stamp $(LIB) $(addprefix lib/,\ $(CMDS) clvmd dmeventd dmsetup dmstats lvmetad lvmpolld \ harness lvmdbusd.profile thin-performance.profile fsadm \ dm-version-expected version-expected \ paths-installed paths-installed-t paths-common paths-common-t) Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ .NOTPARALLEL: LVM2.2.02.176/test/dbus/0000755000000000000120000000000013176752421013240 5ustar rootwheelLVM2.2.02.176/test/dbus/lvmdbustest.py0000755000000000000120000014311713176752421016200 0ustar rootwheel#!/usr/bin/env python3 # Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # noinspection PyUnresolvedReferences import dbus # noinspection PyUnresolvedReferences from dbus.mainloop.glib import DBusGMainLoop import unittest import pyudev from testlib import * import testlib from subprocess import Popen, PIPE from glob import glob import os g_tmo = 0 # Prefix on created objects to enable easier clean-up g_prefix = os.getenv('PREFIX', '') # Use the session bus instead of the system bus use_session = os.getenv('LVM_DBUSD_USE_SESSION', False) # Only use the devices listed in the ENV variable pv_device_list = os.getenv('LVM_DBUSD_PV_DEVICE_LIST', None) # Default is to test all modes # 0 == Only test fork & exec mode # 1 == Test both fork & exec & lvm shell mode (default) # Other == Test just lvm shell mode test_shell = os.getenv('LVM_DBUSD_TEST_MODE', 1) # LVM binary to use LVM_EXECUTABLE = os.getenv('LVM_BINARY', '/usr/sbin/lvm') # Empty options dictionary (EOD) EOD = dbus.Dictionary({}, signature=dbus.Signature('sv')) # Base interfaces on LV objects LV_BASE_INT = (LV_COMMON_INT, LV_INT) if use_session: bus = dbus.SessionBus(mainloop=DBusGMainLoop()) else: bus = dbus.SystemBus(mainloop=DBusGMainLoop()) # If we have multiple clients we will globally disable introspection # validation to limit the massive amount of introspection calls we make as # that method prevents things from executing concurrently if pv_device_list: testlib.validate_introspection = False def vg_n(): return g_prefix + rs(8, '_vg') def lv_n(suffix=None): if not suffix: s = '_lv' else: s = suffix return g_prefix + rs(8, s) def _is_testsuite_pv(pv_name): return g_prefix != "" and pv_name[-1].isdigit() and pv_name[:-1].endswith(g_prefix + "pv") def is_nested_pv(pv_name): return pv_name.count('/') == 3 and not _is_testsuite_pv(pv_name) def _root_pv_name(res, pv_name): if not is_nested_pv(pv_name): return pv_name vg_name = pv_name.split('/')[2] for v in res[VG_INT]: if v.Vg.Name == vg_name: pv = ClientProxy(bus, v.Vg.Pvs[0], interfaces=(PV_INT, )) return _root_pv_name(res, pv.Pv.Name) def _prune(res, pv_filter): if pv_filter: pv_lookup = {} pv_list = [] for p in res[PV_INT]: if _root_pv_name(res, p.Pv.Name) in pv_filter: pv_list.append(p) pv_lookup[p.object_path] = p res[PV_INT] = pv_list vg_list = [] for v in res[VG_INT]: # Only need to validate one of the PVs is in the selection set if v.Vg.Pvs[0] in pv_lookup: vg_list.append(v) res[VG_INT] = vg_list return res def get_objects(): rc = { MANAGER_INT: [], PV_INT: [], VG_INT: [], LV_INT: [], THINPOOL_INT: [], JOB_INT: [], SNAPSHOT_INT: [], LV_COMMON_INT: [], CACHE_POOL_INT: [], CACHE_LV_INT: []} object_manager_object = bus.get_object( BUS_NAME, "/com/redhat/lvmdbus1", introspect=False) manager_interface = dbus.Interface(object_manager_object, "org.freedesktop.DBus.ObjectManager") objects = manager_interface.GetManagedObjects() for object_path, v in objects.items(): proxy = ClientProxy(bus, object_path, v) for interface in v.keys(): rc[interface].append(proxy) # At this point we have a full population of everything, we now need to # prune the PV list and the VG list if we are using a sub selection return _prune(rc, pv_device_list), bus def set_execution(lvmshell, test_result): if lvmshell: m = 'lvm shell (non-fork)' else: m = "forking & exec'ing" lvm_manager = dbus.Interface(bus.get_object( BUS_NAME, "/com/redhat/lvmdbus1/Manager", introspect=False), "com.redhat.lvmdbus1.Manager") rc = lvm_manager.UseLvmShell(lvmshell) if rc: std_err_print('Successfully changed execution mode to "%s"' % m) else: std_err_print('ERROR: Failed to change execution mode to "%s"' % m) test_result.register_fail() return rc def call_lvm(command): """ Call lvm executable and return a tuple of exitcode, stdout, stderr :param command: Command to execute :type command: list :returns (exitcode, stdout, stderr) :rtype (int, str, str) """ # Prepend the full lvm executable so that we can run different versions # in different locations on the same box command.insert(0, LVM_EXECUTABLE) process = Popen(command, stdout=PIPE, stderr=PIPE, close_fds=True, env=os.environ) out = process.communicate() stdout_text = bytes(out[0]).decode("utf-8") stderr_text = bytes(out[1]).decode("utf-8") return process.returncode, stdout_text, stderr_text # noinspection PyUnresolvedReferences class TestDbusService(unittest.TestCase): def setUp(self): # Because of the sensitive nature of running LVM tests we will only # run if we have PVs and nothing else, so that we can be confident that # we are not mucking with someones data on their system self.objs, self.bus = get_objects() if len(self.objs[PV_INT]) == 0: std_err_print('No PVs present exiting!') sys.exit(1) if len(self.objs[MANAGER_INT]) != 1: std_err_print('Expecting a manager object!') sys.exit(1) if len(self.objs[VG_INT]) != 0: std_err_print('Expecting no VGs to exist!') sys.exit(1) self.pvs = [] for p in self.objs[PV_INT]: self.pvs.append(p.Pv.Name) def _recurse_vg_delete(self, vg_proxy, pv_proxy, nested_pv_hash): for pv_device_name, t in nested_pv_hash.items(): vg_name = str(vg_proxy.Vg.Name) if vg_name in pv_device_name: self._recurse_vg_delete(t[0], t[1], nested_pv_hash) break vg_proxy.update() self.handle_return(vg_proxy.Vg.Remove(dbus.Int32(g_tmo), EOD)) if is_nested_pv(pv_proxy.Pv.Name): rc = self._pv_remove(pv_proxy) self.assertTrue(rc == '/') def tearDown(self): # If we get here it means we passed setUp, so lets remove anything # and everything that remains, besides the PVs themselves self.objs, self.bus = get_objects() # The self.objs[PV_INT] list only contains those which we should be # mucking with, lets remove any embedded/nested PVs first, then proceed # to walk the base PVs and remove the VGs nested_pvs = {} non_nested = [] for p in self.objs[PV_INT]: if is_nested_pv(p.Pv.Name): if p.Pv.Vg != '/': v = ClientProxy(self.bus, p.Pv.Vg, interfaces=(VG_INT,)) nested_pvs[p.Pv.Name] = (v, p) else: # Nested PV with no VG, so just simply remove it! self._pv_remove(p) else: non_nested.append(p) for p in non_nested: # When we remove a VG for a PV it could ripple across multiple # PVs, so update each PV while removing each VG, to ensure # the properties are current and correct. p.update() if p.Pv.Vg != '/': v = ClientProxy(self.bus, p.Pv.Vg, interfaces=(VG_INT, )) self._recurse_vg_delete(v, p, nested_pvs) # Check to make sure the PVs we had to start exist, else re-create # them self.objs, self.bus = get_objects() if len(self.pvs) != len(self.objs[PV_INT]): for p in self.pvs: found = False for pc in self.objs[PV_INT]: if pc.Pv.Name == p: found = True break if not found: # print('Re-creating PV=', p) self._pv_create(p) def _check_consistency(self): # Only do consistency checks if we aren't running the unit tests # concurrently if pv_device_list is None: self.assertEqual(self._refresh(), 0) def handle_return(self, rc): if isinstance(rc, (tuple, list)): # We have a tuple returned if rc[0] != '/': return rc[0] else: return self._wait_for_job(rc[1]) else: if rc == '/': return rc else: return self._wait_for_job(rc) def _pv_create(self, device): pv_path = self.handle_return( self.objs[MANAGER_INT][0].Manager.PvCreate( dbus.String(device), dbus.Int32(g_tmo), EOD) ) self._validate_lookup(device, pv_path) self.assertTrue(pv_path is not None and len(pv_path) > 0) return pv_path def _manager(self): return self.objs[MANAGER_INT][0] def _refresh(self): return self._manager().Manager.Refresh() def test_refresh(self): self._check_consistency() def test_version(self): rc = self.objs[MANAGER_INT][0].Manager.Version self.assertTrue(rc is not None and len(rc) > 0) self._check_consistency() def _vg_create(self, pv_paths=None): if not pv_paths: pv_paths = [self.objs[PV_INT][0].object_path] vg_name = vg_n() vg_path = self.handle_return( self.objs[MANAGER_INT][0].Manager.VgCreate( dbus.String(vg_name), dbus.Array(pv_paths, signature=dbus.Signature('o')), dbus.Int32(g_tmo), EOD)) self._validate_lookup(vg_name, vg_path) self.assertTrue(vg_path is not None and len(vg_path) > 0) return ClientProxy(self.bus, vg_path, interfaces=(VG_INT, )) def test_vg_create(self): self._vg_create() self._check_consistency() def test_vg_delete(self): vg = self._vg_create().Vg self.handle_return( vg.Remove(dbus.Int32(g_tmo), EOD)) self._check_consistency() def _pv_remove(self, pv): rc = self.handle_return( pv.Pv.Remove(dbus.Int32(g_tmo), EOD)) return rc def test_pv_remove_add(self): target = self.objs[PV_INT][0] # Remove the PV rc = self._pv_remove(target) self.assertTrue(rc == '/') self._check_consistency() # Add it back rc = self._pv_create(target.Pv.Name)[0] self.assertTrue(rc == '/') self._check_consistency() def _create_raid5_thin_pool(self, vg=None): meta_name = "meta_r5" data_name = "data_r5" if not vg: pv_paths = [] for pp in self.objs[PV_INT]: pv_paths.append(pp.object_path) vg = self._vg_create(pv_paths).Vg lv_meta_path = self.handle_return( vg.LvCreateRaid( dbus.String(meta_name), dbus.String("raid5"), dbus.UInt64(mib(4)), dbus.UInt32(0), dbus.UInt32(0), dbus.Int32(g_tmo), EOD) ) self._validate_lookup("%s/%s" % (vg.Name, meta_name), lv_meta_path) lv_data_path = self.handle_return( vg.LvCreateRaid( dbus.String(data_name), dbus.String("raid5"), dbus.UInt64(mib(16)), dbus.UInt32(0), dbus.UInt32(0), dbus.Int32(g_tmo), EOD) ) self._validate_lookup("%s/%s" % (vg.Name, data_name), lv_data_path) thin_pool_path = self.handle_return( vg.CreateThinPool( dbus.ObjectPath(lv_meta_path), dbus.ObjectPath(lv_data_path), dbus.Int32(g_tmo), EOD) ) # Get thin pool client proxy thin_pool = ClientProxy(self.bus, thin_pool_path, interfaces=(LV_COMMON_INT, LV_INT, THINPOOL_INT)) return vg, thin_pool def test_meta_lv_data_lv_props(self): # Ensure that metadata lv and data lv for thin pools and cache pools # point to a valid LV (vg, thin_pool) = self._create_raid5_thin_pool() # Check properties on thin pool self.assertTrue(thin_pool.ThinPool.DataLv != '/') self.assertTrue(thin_pool.ThinPool.MetaDataLv != '/') (vg, cache_pool) = self._create_cache_pool(vg) self.assertTrue(cache_pool.CachePool.DataLv != '/') self.assertTrue(cache_pool.CachePool.MetaDataLv != '/') # Cache the thin pool cached_thin_pool_path = self.handle_return( cache_pool.CachePool.CacheLv( dbus.ObjectPath(thin_pool.object_path), dbus.Int32(g_tmo), EOD) ) # Get object proxy for cached thin pool cached_thin_pool_object = ClientProxy(self.bus, cached_thin_pool_path, interfaces=(LV_COMMON_INT, LV_INT, THINPOOL_INT)) # Check properties on cache pool self.assertTrue(cached_thin_pool_object.ThinPool.DataLv != '/') self.assertTrue(cached_thin_pool_object.ThinPool.MetaDataLv != '/') def _lookup(self, lvm_id): return self.objs[MANAGER_INT][0].\ Manager.LookUpByLvmId(dbus.String(lvm_id)) def _validate_lookup(self, lvm_name, object_path): t = self._lookup(lvm_name) self.assertTrue( object_path == t, "%s != %s for %s" % (object_path, t, lvm_name)) def test_lookup_by_lvm_id(self): # For the moment lets just lookup what we know about which is PVs # When we start testing VGs and LVs we will test lookups for those # during those unit tests for p in self.objs[PV_INT]: rc = self._lookup(p.Pv.Name) self.assertTrue(rc is not None and rc != '/') # Search for something which doesn't exist rc = self._lookup('/dev/null') self.assertTrue(rc == '/') def test_vg_extend(self): # Create a VG self.assertTrue(len(self.objs[PV_INT]) >= 2) if len(self.objs[PV_INT]) >= 2: pv_initial = self.objs[PV_INT][0] pv_next = self.objs[PV_INT][1] vg = self._vg_create([pv_initial.object_path]).Vg path = self.handle_return( vg.Extend( dbus.Array([pv_next.object_path], signature="o"), dbus.Int32(g_tmo), EOD) ) self.assertTrue(path == '/') self._check_consistency() # noinspection PyUnresolvedReferences def test_vg_reduce(self): self.assertTrue(len(self.objs[PV_INT]) >= 2) if len(self.objs[PV_INT]) >= 2: vg = self._vg_create( [self.objs[PV_INT][0].object_path, self.objs[PV_INT][1].object_path]).Vg path = self.handle_return( vg.Reduce( dbus.Boolean(False), dbus.Array([vg.Pvs[0]], signature='o'), dbus.Int32(g_tmo), EOD) ) self.assertTrue(path == '/') self._check_consistency() # noinspection PyUnresolvedReferences def test_vg_rename(self): vg = self._vg_create().Vg # Do a vg lookup path = self._lookup(vg.Name) vg_name_start = vg.Name prev_path = path self.assertTrue(path != '/', "%s" % (path)) # Create some LVs in the VG for i in range(0, 5): lv_t = self._create_lv(size=mib(4), vg=vg) full_name = "%s/%s" % (vg_name_start, lv_t.LvCommon.Name) lv_path = self._lookup(full_name) self.assertTrue(lv_path == lv_t.object_path) new_name = 'renamed_' + vg.Name path = self.handle_return( vg.Rename(dbus.String(new_name), dbus.Int32(g_tmo), EOD)) self.assertTrue(path == '/') self._check_consistency() # Do a vg lookup path = self._lookup(new_name) self.assertTrue(path != '/', "%s" % (path)) self.assertTrue(prev_path == path, "%s != %s" % (prev_path, path)) # Go through each LV and make sure it has the correct path back to the # VG vg.update() lv_paths = vg.Lvs self.assertTrue(len(lv_paths) == 5) for l in lv_paths: lv_proxy = ClientProxy(self.bus, l, interfaces=(LV_COMMON_INT,)).LvCommon self.assertTrue( lv_proxy.Vg == vg.object_path, "%s != %s" % (lv_proxy.Vg, vg.object_path)) full_name = "%s/%s" % (new_name, lv_proxy.Name) lv_path = self._lookup(full_name) self.assertTrue( lv_path == lv_proxy.object_path, "%s != %s" % (lv_path, lv_proxy.object_path)) def _verify_hidden_lookups(self, lv_common_object, vgname): hidden_lv_paths = lv_common_object.HiddenLvs for h in hidden_lv_paths: h_lv = ClientProxy(self.bus, h, interfaces=(LV_COMMON_INT,)).LvCommon if len(h_lv.HiddenLvs) > 0: self._verify_hidden_lookups(h_lv, vgname) full_name = "%s/%s" % (vgname, h_lv.Name) # print("Hidden check %s" % (full_name)) lookup_path = self._lookup(full_name) self.assertTrue(lookup_path != '/') self.assertTrue(lookup_path == h_lv.object_path) # Lets's strip off the '[ ]' and make sure we can find full_name = "%s/%s" % (vgname, h_lv.Name[1:-1]) # print("Hidden check %s" % (full_name)) lookup_path = self._lookup(full_name) self.assertTrue(lookup_path != '/') self.assertTrue(lookup_path == h_lv.object_path) def test_vg_rename_with_thin_pool(self): (vg, thin_pool) = self._create_raid5_thin_pool() vg_name_start = vg.Name # noinspection PyTypeChecker self._verify_hidden_lookups(thin_pool.LvCommon, vg_name_start) for i in range(0, 5): lv_name = lv_n() thin_lv_path = self.handle_return( thin_pool.ThinPool.LvCreate( dbus.String(lv_name), dbus.UInt64(mib(16)), dbus.Int32(g_tmo), EOD)) self._validate_lookup( "%s/%s" % (vg_name_start, lv_name), thin_lv_path) self.assertTrue(thin_lv_path != '/') full_name = "%s/%s" % (vg_name_start, lv_name) lookup_lv_path = self._lookup(full_name) self.assertTrue( thin_lv_path == lookup_lv_path, "%s != %s" % (thin_lv_path, lookup_lv_path)) # Rename the VG new_name = 'renamed_' + vg.Name path = self.handle_return( vg.Rename(dbus.String(new_name), dbus.Int32(g_tmo), EOD)) self.assertTrue(path == '/') self._check_consistency() # Go through each LV and make sure it has the correct path back to the # VG vg.update() thin_pool.update() lv_paths = vg.Lvs for l in lv_paths: lv_proxy = ClientProxy(self.bus, l, interfaces=(LV_COMMON_INT,)).LvCommon self.assertTrue( lv_proxy.Vg == vg.object_path, "%s != %s" % (lv_proxy.Vg, vg.object_path)) full_name = "%s/%s" % (new_name, lv_proxy.Name) # print('Full Name %s' % (full_name)) lv_path = self._lookup(full_name) self.assertTrue( lv_path == lv_proxy.object_path, "%s != %s" % (lv_path, lv_proxy.object_path)) # noinspection PyTypeChecker self._verify_hidden_lookups(thin_pool.LvCommon, new_name) def _test_lv_create(self, method, params, vg, proxy_interfaces=None): lv = None path = self.handle_return(method(*params)) self.assertTrue(vg) if path: lv = ClientProxy(self.bus, path, interfaces=proxy_interfaces) # We are quick enough now that we can get VolumeType changes from # 'I' to 'i' between the time it takes to create a RAID and it returns # and when we refresh state here. Not sure how we can handle this as # we cannot just sit and poll all the time for changes... # self._check_consistency() return lv def test_lv_create(self): lv_name = lv_n() vg = self._vg_create().Vg lv = self._test_lv_create( vg.LvCreate, (dbus.String(lv_name), dbus.UInt64(mib(4)), dbus.Array([], signature='(ott)'), dbus.Int32(g_tmo), EOD), vg, LV_BASE_INT) self._validate_lookup("%s/%s" % (vg.Name, lv_name), lv.object_path) def test_lv_create_job(self): lv_name = lv_n() vg = self._vg_create().Vg (object_path, job_path) = vg.LvCreate( dbus.String(lv_name), dbus.UInt64(mib(4)), dbus.Array([], signature='(ott)'), dbus.Int32(0), EOD) self.assertTrue(object_path == '/') self.assertTrue(job_path != '/') object_path = self._wait_for_job(job_path) self._validate_lookup("%s/%s" % (vg.Name, lv_name), object_path) self.assertTrue(object_path != '/') def test_lv_create_linear(self): lv_name = lv_n() vg = self._vg_create().Vg lv = self._test_lv_create( vg.LvCreateLinear, (dbus.String(lv_name), dbus.UInt64(mib(4)), dbus.Boolean(False), dbus.Int32(g_tmo), EOD), vg, LV_BASE_INT) self._validate_lookup("%s/%s" % (vg.Name, lv_name), lv.object_path) def test_lv_create_striped(self): lv_name = lv_n() pv_paths = [] for pp in self.objs[PV_INT]: pv_paths.append(pp.object_path) vg = self._vg_create(pv_paths).Vg lv = self._test_lv_create( vg.LvCreateStriped, (dbus.String(lv_name), dbus.UInt64(mib(4)), dbus.UInt32(2), dbus.UInt32(8), dbus.Boolean(False), dbus.Int32(g_tmo), EOD), vg, LV_BASE_INT) self._validate_lookup("%s/%s" % (vg.Name, lv_name), lv.object_path) def test_lv_create_mirror(self): lv_name = lv_n() pv_paths = [] for pp in self.objs[PV_INT]: pv_paths.append(pp.object_path) vg = self._vg_create(pv_paths).Vg lv = self._test_lv_create( vg.LvCreateMirror, (dbus.String(lv_name), dbus.UInt64(mib(4)), dbus.UInt32(2), dbus.Int32(g_tmo), EOD), vg, LV_BASE_INT) self._validate_lookup("%s/%s" % (vg.Name, lv_name), lv.object_path) def test_lv_create_raid(self): lv_name = lv_n() pv_paths = [] for pp in self.objs[PV_INT]: pv_paths.append(pp.object_path) vg = self._vg_create(pv_paths).Vg lv = self._test_lv_create( vg.LvCreateRaid, (dbus.String(lv_name), dbus.String('raid5'), dbus.UInt64(mib(16)), dbus.UInt32(2), dbus.UInt32(8), dbus.Int32(g_tmo), EOD), vg, LV_BASE_INT) self._validate_lookup("%s/%s" % (vg.Name, lv_name), lv.object_path) def _create_lv(self, thinpool=False, size=None, vg=None, suffix=None): lv_name = lv_n(suffix=suffix) interfaces = list(LV_BASE_INT) if thinpool: interfaces.append(THINPOOL_INT) if not vg: pv_paths = [] for pp in self.objs[PV_INT]: pv_paths.append(pp.object_path) vg = self._vg_create(pv_paths).Vg if size is None: size = mib(4) lv = self._test_lv_create( vg.LvCreateLinear, (dbus.String(lv_name), dbus.UInt64(size), dbus.Boolean(thinpool), dbus.Int32(g_tmo), EOD), vg, interfaces) self._validate_lookup("%s/%s" % (vg.Name, lv_name), lv.object_path) return lv def test_lv_create_rounding(self): self._create_lv(size=(mib(2) + 13)) def test_lv_create_thin_pool(self): self._create_lv(True) def test_lv_rename(self): # Rename a regular LV lv = self._create_lv() path = self._lookup(lv.LvCommon.Name) prev_path = path new_name = 'renamed_' + lv.LvCommon.Name self.handle_return(lv.Lv.Rename(dbus.String(new_name), dbus.Int32(g_tmo), EOD)) path = self._lookup(new_name) self._check_consistency() self.assertTrue(prev_path == path, "%s != %s" % (prev_path, path)) def test_lv_thinpool_rename(self): # Rename a thin pool tp = self._create_lv(True) self.assertTrue( THINPOOL_LV_PATH in tp.object_path, "%s" % (tp.object_path)) new_name = 'renamed_' + tp.LvCommon.Name self.handle_return(tp.Lv.Rename( dbus.String(new_name), dbus.Int32(g_tmo), EOD)) tp.update() self._check_consistency() self.assertEqual(new_name, tp.LvCommon.Name) # noinspection PyUnresolvedReferences def test_lv_on_thin_pool_rename(self): # Rename a LV on a thin Pool # This returns a LV with the LV interface, need to get a proxy for # thinpool interface too vg = self._vg_create().Vg tp = self._create_lv(thinpool=True, vg=vg) lv_name = lv_n('_thin_lv') thin_path = self.handle_return( tp.ThinPool.LvCreate( dbus.String(lv_name), dbus.UInt64(mib(8)), dbus.Int32(g_tmo), EOD) ) self._validate_lookup("%s/%s" % (vg.Name, lv_name), thin_path) lv = ClientProxy(self.bus, thin_path, interfaces=(LV_COMMON_INT, LV_INT)) re_named = 'rename_test' + lv.LvCommon.Name rc = self.handle_return( lv.Lv.Rename( dbus.String(re_named), dbus.Int32(g_tmo), EOD) ) self._validate_lookup("%s/%s" % (vg.Name, re_named), thin_path) self.assertTrue(rc == '/') self._check_consistency() def test_lv_remove(self): lv = self._create_lv().Lv rc = self.handle_return( lv.Remove( dbus.Int32(g_tmo), EOD)) self.assertTrue(rc == '/') self._check_consistency() def test_lv_snapshot(self): lv_p = self._create_lv() ss_name = 'ss_' + lv_p.LvCommon.Name rc = self.handle_return(lv_p.Lv.Snapshot( dbus.String(ss_name), dbus.UInt64(0), dbus.Int32(g_tmo), EOD)) self.assertTrue(rc != '/') # noinspection PyUnresolvedReferences def _wait_for_job(self, j_path): rc = None j = ClientProxy(self.bus, j_path, interfaces=(JOB_INT, )).Job while True: j.update() if j.Complete: (ec, error_msg) = j.GetError self.assertTrue(ec == 0, "%d :%s" % (ec, error_msg)) if ec == 0: self.assertTrue(j.Percent == 100, "P= %f" % j.Percent) rc = j.Result j.Remove() break if j.Wait(1): j.update() self.assertTrue(j.Complete) return rc def test_lv_create_pv_specific(self): vg = self._vg_create().Vg lv_name = lv_n() pv = vg.Pvs pvp = ClientProxy(self.bus, pv[0], interfaces=(PV_INT,)) lv = self._test_lv_create( vg.LvCreate, ( dbus.String(lv_name), dbus.UInt64(mib(4)), dbus.Array([[pvp.object_path, 0, (pvp.Pv.PeCount - 1)]], signature='(ott)'), dbus.Int32(g_tmo), EOD), vg, LV_BASE_INT) self._validate_lookup("%s/%s" % (vg.Name, lv_name), lv.object_path) def test_lv_resize(self): pv_paths = [] for pp in self.objs[PV_INT]: pv_paths.append(pp.object_path) vg = self._vg_create(pv_paths).Vg lv = self._create_lv(vg=vg, size=mib(16)) for size in \ [ lv.LvCommon.SizeBytes + 4194304, lv.LvCommon.SizeBytes - 4194304, lv.LvCommon.SizeBytes + 2048, lv.LvCommon.SizeBytes - 2048]: pv_in_use = [i[0] for i in lv.LvCommon.Devices] # Select a PV in the VG that isn't in use pv_empty = [p for p in vg.Pvs if p not in pv_in_use] prev = lv.LvCommon.SizeBytes if len(pv_empty): p = ClientProxy(self.bus, pv_empty[0], interfaces=(PV_INT,)) rc = self.handle_return( lv.Lv.Resize( dbus.UInt64(size), dbus.Array( [[p.object_path, 0, p.Pv.PeCount - 1]], '(oii)'), dbus.Int32(g_tmo), EOD)) else: rc = self.handle_return( lv.Lv.Resize( dbus.UInt64(size), dbus.Array([], '(oii)'), dbus.Int32(g_tmo), EOD)) self.assertEqual(rc, '/') self._check_consistency() lv.update() if prev < size: self.assertTrue(lv.LvCommon.SizeBytes > prev) else: # We are testing re-sizing to same size too... self.assertTrue(lv.LvCommon.SizeBytes <= prev) def test_lv_resize_same(self): pv_paths = [] for pp in self.objs[PV_INT]: pv_paths.append(pp.object_path) vg = self._vg_create(pv_paths).Vg lv = self._create_lv(vg=vg) with self.assertRaises(dbus.exceptions.DBusException): lv.Lv.Resize( dbus.UInt64(lv.LvCommon.SizeBytes), dbus.Array([], '(oii)'), dbus.Int32(-1), EOD) def test_lv_move(self): lv = self._create_lv() pv_path_move = str(lv.LvCommon.Devices[0][0]) # Test moving a specific LV rc = self.handle_return( lv.Lv.Move( dbus.ObjectPath(pv_path_move), dbus.Struct((0, 0), signature='(tt)'), dbus.Array([], '(ott)'), dbus.Int32(g_tmo), EOD)) self.assertTrue(rc == '/') self._check_consistency() lv.update() new_pv = str(lv.LvCommon.Devices[0][0]) self.assertTrue( pv_path_move != new_pv, "%s == %s" % (pv_path_move, new_pv)) def test_lv_activate_deactivate(self): lv_p = self._create_lv() lv_p.update() self.handle_return(lv_p.Lv.Deactivate( dbus.UInt64(0), dbus.Int32(g_tmo), EOD)) lv_p.update() self.assertFalse(lv_p.LvCommon.Active) self._check_consistency() self.handle_return(lv_p.Lv.Activate( dbus.UInt64(0), dbus.Int32(g_tmo), EOD)) lv_p.update() self.assertTrue(lv_p.LvCommon.Active) self._check_consistency() # Try control flags for i in range(0, 5): self.handle_return(lv_p.Lv.Activate( dbus.UInt64(1 << i), dbus.Int32(g_tmo), EOD)) self.assertTrue(lv_p.LvCommon.Active) self._check_consistency() def test_move(self): lv = self._create_lv() # Test moving without being LV specific vg = ClientProxy(self.bus, lv.LvCommon.Vg, interfaces=(VG_INT, )).Vg pv_to_move = str(lv.LvCommon.Devices[0][0]) rc = self.handle_return( vg.Move( dbus.ObjectPath(pv_to_move), dbus.Struct((0, 0), signature='tt'), dbus.Array([], '(ott)'), dbus.Int32(0), EOD)) self.assertEqual(rc, '/') self._check_consistency() vg.update() lv.update() location = lv.LvCommon.Devices[0][0] dst = None for p in vg.Pvs: if p != location: dst = p # Fetch the destination pv = ClientProxy(self.bus, dst, interfaces=(PV_INT, )).Pv # Test range, move it to the middle of the new destination job = self.handle_return( vg.Move( dbus.ObjectPath(location), dbus.Struct((0, 0), signature='tt'), dbus.Array([(dst, pv.PeCount / 2, 0), ], '(ott)'), dbus.Int32(g_tmo), EOD)) self.assertEqual(job, '/') self._check_consistency() def test_job_handling(self): pv_paths = [] for pp in self.objs[PV_INT]: pv_paths.append(pp.object_path) vg_name = vg_n() # Test getting a job right away vg_path, vg_job = self.objs[MANAGER_INT][0].Manager.VgCreate( dbus.String(vg_name), dbus.Array(pv_paths, 'o'), dbus.Int32(0), EOD) self.assertTrue(vg_path == '/') self.assertTrue(vg_job and len(vg_job) > 0) vg_path = self._wait_for_job(vg_job) self._validate_lookup(vg_name, vg_path) def _test_expired_timer(self, num_lvs): rc = False pv_paths = [] for pp in self.objs[PV_INT]: pv_paths.append(pp.object_path) # In small configurations lvm is pretty snappy, so lets create a VG # add a number of LVs and then remove the VG and all the contained # LVs which appears to consistently run a little slow. vg_proxy = self._vg_create(pv_paths) for i in range(0, num_lvs): lv_name = lv_n() vg_proxy.update() if vg_proxy.Vg.FreeCount > 0: lv_path = self.handle_return( vg_proxy.Vg.LvCreateLinear( dbus.String(lv_name), dbus.UInt64(mib(4)), dbus.Boolean(False), dbus.Int32(g_tmo), EOD)) self.assertTrue(lv_path != '/') self._validate_lookup( "%s/%s" % (vg_proxy.Vg.Name, lv_name), lv_path) else: # We ran out of space, test will probably fail break # Make sure that we are honoring the timeout start = time.time() remove_job = vg_proxy.Vg.Remove(dbus.Int32(1), EOD) end = time.time() tt_remove = float(end) - float(start) self.assertTrue(tt_remove < 2.0, "remove time %s" % (str(tt_remove))) # Depending on how long it took we could finish either way if remove_job != '/': # We got a job result = self._wait_for_job(remove_job) self.assertTrue(result == '/') rc = True else: # It completed before timer popped pass return rc def test_job_handling_timer(self): yes = False for pp in self.objs[PV_INT]: if '/dev/sd' not in pp.Pv.Name: std_err_print("Skipping test_job_handling_timer on loopback") return # This may not pass for i in [48, 64, 128]: yes = self._test_expired_timer(i) if yes: break std_err_print('Attempt (%d) failed, trying again...' % (i)) self.assertTrue(yes) def test_pv_tags(self): pvs = [] pv_paths = [] for pp in self.objs[PV_INT]: pv_paths.append(pp.object_path) vg = self._vg_create(pv_paths).Vg # Get the PVs for p in vg.Pvs: pvs.append(ClientProxy(self.bus, p, interfaces=(PV_INT, )).Pv) for tags_value in [['hello'], ['foo', 'bar']]: rc = self.handle_return( vg.PvTagsAdd( dbus.Array(vg.Pvs, 'o'), dbus.Array(tags_value, 's'), dbus.Int32(g_tmo), EOD)) self.assertTrue(rc == '/') for p in pvs: p.update() self.assertTrue(sorted(tags_value) == p.Tags) rc = self.handle_return( vg.PvTagsDel( dbus.Array(vg.Pvs, 'o'), dbus.Array(tags_value, 's'), dbus.Int32(g_tmo), EOD)) self.assertEqual(rc, '/') for p in pvs: p.update() self.assertTrue([] == p.Tags) def test_vg_tags(self): vg = self._vg_create().Vg t = ['Testing', 'tags'] self.handle_return( vg.TagsAdd( dbus.Array(t, 's'), dbus.Int32(g_tmo), EOD)) vg.update() self.assertTrue(t == vg.Tags) self.handle_return( vg.TagsDel( dbus.Array(t, 's'), dbus.Int32(g_tmo), EOD)) vg.update() self.assertTrue([] == vg.Tags) def test_lv_tags(self): vg = self._vg_create().Vg lv_name = lv_n() lv = self._test_lv_create( vg.LvCreateLinear, (dbus.String(lv_name), dbus.UInt64(mib(4)), dbus.Boolean(False), dbus.Int32(g_tmo), EOD), vg, LV_BASE_INT) self._validate_lookup("%s/%s" % (vg.Name, lv_name), lv.object_path) t = ['Testing', 'tags'] self.handle_return( lv.Lv.TagsAdd( dbus.Array(t, 's'), dbus.Int32(g_tmo), EOD)) lv.update() self.assertTrue(t == lv.LvCommon.Tags) self.handle_return( lv.Lv.TagsDel( dbus.Array(t, 's'), dbus.Int32(g_tmo), EOD)) lv.update() self.assertTrue([] == lv.LvCommon.Tags) def test_vg_allocation_policy_set(self): vg = self._vg_create().Vg for p in ['anywhere', 'contiguous', 'cling', 'normal']: rc = self.handle_return( vg.AllocationPolicySet( dbus.String(p), dbus.Int32(g_tmo), EOD)) self.assertEqual(rc, '/') vg.update() prop = getattr(vg, 'Alloc' + p.title()) self.assertTrue(prop) def test_vg_max_pv(self): vg = self._vg_create().Vg # BZ: https://bugzilla.redhat.com/show_bug.cgi?id=1280496 # TODO: Add a test back for larger values here when bug is resolved for p in [0, 1, 10, 100, 100, 1024, 2 ** 32 - 1]: rc = self.handle_return( vg.MaxPvSet( dbus.UInt64(p), dbus.Int32(g_tmo), EOD)) self.assertEqual(rc, '/') vg.update() self.assertTrue( vg.MaxPv == p, "Expected %s != Actual %s" % (str(p), str(vg.MaxPv))) def test_vg_max_lv(self): vg = self._vg_create().Vg # BZ: https://bugzilla.redhat.com/show_bug.cgi?id=1280496 # TODO: Add a test back for larger values here when bug is resolved for p in [0, 1, 10, 100, 100, 1024, 2 ** 32 - 1]: rc = self.handle_return( vg.MaxLvSet( dbus.UInt64(p), dbus.Int32(g_tmo), EOD)) self.assertEqual(rc, '/') vg.update() self.assertTrue( vg.MaxLv == p, "Expected %s != Actual %s" % (str(p), str(vg.MaxLv))) def test_vg_uuid_gen(self): vg = self._vg_create().Vg prev_uuid = vg.Uuid rc = self.handle_return( vg.UuidGenerate( dbus.Int32(g_tmo), EOD)) self.assertEqual(rc, '/') vg.update() self.assertTrue( vg.Uuid != prev_uuid, "Expected %s != Actual %s" % (vg.Uuid, prev_uuid)) def test_vg_activate_deactivate(self): vg = self._vg_create().Vg lv_name = lv_n() lv = self._test_lv_create( vg.LvCreateLinear, ( dbus.String(lv_name), dbus.UInt64(mib(4)), dbus.Boolean(False), dbus.Int32(g_tmo), EOD), vg, LV_BASE_INT) self._validate_lookup("%s/%s" % (vg.Name, lv_name), lv.object_path) vg.update() rc = self.handle_return( vg.Deactivate( dbus.UInt64(0), dbus.Int32(g_tmo), EOD)) self.assertEqual(rc, '/') self._check_consistency() rc = self.handle_return( vg.Activate( dbus.UInt64(0), dbus.Int32(g_tmo), EOD)) self.assertEqual(rc, '/') self._check_consistency() # Try control flags for i in range(0, 5): self.handle_return( vg.Activate( dbus.UInt64(1 << i), dbus.Int32(g_tmo), EOD)) def test_pv_resize(self): self.assertTrue(len(self.objs[PV_INT]) > 0) if len(self.objs[PV_INT]) > 0: pv = ClientProxy(self.bus, self.objs[PV_INT][0].object_path, interfaces=(PV_INT, )).Pv original_size = pv.SizeBytes new_size = original_size / 2 self.handle_return( pv.ReSize( dbus.UInt64(new_size), dbus.Int32(g_tmo), EOD)) self._check_consistency() pv.update() self.assertTrue(pv.SizeBytes != original_size) self.handle_return( pv.ReSize( dbus.UInt64(0), dbus.Int32(g_tmo), EOD)) self._check_consistency() pv.update() self.assertTrue(pv.SizeBytes == original_size) def test_pv_allocation(self): pv_paths = [] for pp in self.objs[PV_INT]: pv_paths.append(pp.object_path) vg = self._vg_create(pv_paths).Vg pv = ClientProxy(self.bus, vg.Pvs[0], interfaces=(PV_INT, )).Pv self.handle_return( pv.AllocationEnabled( dbus.Boolean(False), dbus.Int32(g_tmo), EOD)) pv.update() self.assertFalse(pv.Allocatable) self.handle_return( pv.AllocationEnabled( dbus.Boolean(True), dbus.Int32(g_tmo), EOD)) self.handle_return( pv.AllocationEnabled( dbus.Boolean(True), dbus.Int32(g_tmo), EOD)) pv.update() self.assertTrue(pv.Allocatable) self._check_consistency() @staticmethod def _get_devices(): context = pyudev.Context() return context.list_devices(subsystem='block', MAJOR='8') def test_pv_scan(self): devices = TestDbusService._get_devices() mgr = self._manager().Manager self.assertEqual( self.handle_return( mgr.PvScan( dbus.Boolean(False), dbus.Boolean(True), dbus.Array([], 's'), dbus.Array([], '(ii)'), dbus.Int32(g_tmo), EOD)), '/') self._check_consistency() self.assertEqual( self.handle_return( mgr.PvScan( dbus.Boolean(False), dbus.Boolean(False), dbus.Array([], 's'), dbus.Array([], '(ii)'), dbus.Int32(g_tmo), EOD)), '/') self._check_consistency() block_path = [] for d in devices: block_path.append(d.properties['DEVNAME']) self.assertEqual( self.handle_return( mgr.PvScan( dbus.Boolean(False), dbus.Boolean(True), dbus.Array(block_path, 's'), dbus.Array([], '(ii)'), dbus.Int32(g_tmo), EOD)), '/') self._check_consistency() mm = [] for d in devices: mm.append((int(d.properties['MAJOR']), int(d.properties['MINOR']))) self.assertEqual( self.handle_return( mgr.PvScan( dbus.Boolean(False), dbus.Boolean(True), dbus.Array(block_path, 's'), dbus.Array(mm, '(ii)'), dbus.Int32(g_tmo), EOD)), '/') self._check_consistency() self.assertEqual( self.handle_return( mgr.PvScan( dbus.Boolean(False), dbus.Boolean(True), dbus.Array([], 's'), dbus.Array(mm, '(ii)'), dbus.Int32(g_tmo), EOD)), '/') self._check_consistency() @staticmethod def _write_some_data(device_path, size): blocks = int(size / 512) block = bytearray(512) for i in range(0, 512): block[i] = i % 255 with open(device_path, mode='wb') as lv: for i in range(0, blocks): lv.write(block) def test_snapshot_merge(self): # Create a non-thin LV and merge it ss_size = mib(8) lv_p = self._create_lv(size=mib(16)) ss_name = lv_p.LvCommon.Name + '_snap' snapshot_path = self.handle_return( lv_p.Lv.Snapshot( dbus.String(ss_name), dbus.UInt64(ss_size), dbus.Int32(g_tmo), EOD)) ss = ClientProxy(self.bus, snapshot_path, interfaces=(LV_COMMON_INT, LV_INT, SNAPSHOT_INT, )) # Write some data to snapshot so merge takes some time TestDbusService._write_some_data(ss.LvCommon.Path, ss_size / 2) job_path = self.handle_return( ss.Snapshot.Merge( dbus.Int32(g_tmo), EOD)) self.assertEqual(job_path, '/') def test_snapshot_merge_thin(self): # Create a thin LV, snapshot it and merge it vg = self._vg_create().Vg tp = self._create_lv(thinpool=True, vg=vg) lv_name = lv_n('_thin_lv') thin_path = self.handle_return( tp.ThinPool.LvCreate( dbus.String(lv_name), dbus.UInt64(mib(10)), dbus.Int32(g_tmo), EOD)) self._validate_lookup("%s/%s" % (vg.Name, lv_name), thin_path) lv_p = ClientProxy(self.bus, thin_path, interfaces=(LV_INT, LV_COMMON_INT)) ss_name = lv_p.LvCommon.Name + '_snap' snapshot_path = self.handle_return( lv_p.Lv.Snapshot( dbus.String(ss_name), dbus.UInt64(0), dbus.Int32(g_tmo), EOD)) ss = ClientProxy(self.bus, snapshot_path, interfaces=(LV_INT, LV_COMMON_INT, SNAPSHOT_INT)) job_path = self.handle_return( ss.Snapshot.Merge( dbus.Int32(g_tmo), EOD) ) self.assertTrue(job_path == '/') def _create_cache_pool(self, vg=None): if not vg: vg = self._vg_create().Vg md = self._create_lv(size=(mib(8)), vg=vg) data = self._create_lv(size=(mib(8)), vg=vg) cache_pool_path = self.handle_return( vg.CreateCachePool( dbus.ObjectPath(md.object_path), dbus.ObjectPath(data.object_path), dbus.Int32(g_tmo), EOD)) cp = ClientProxy(self.bus, cache_pool_path, interfaces=(CACHE_POOL_INT, )) return vg, cp def test_cache_pool_create(self): vg, cache_pool = self._create_cache_pool() self.assertTrue( '/com/redhat/lvmdbus1/CachePool' in cache_pool.object_path) def test_cache_lv_create(self): for destroy_cache in [True, False]: vg, cache_pool = self._create_cache_pool() lv_to_cache = self._create_lv(size=mib(8), vg=vg) c_lv_path = self.handle_return( cache_pool.CachePool.CacheLv( dbus.ObjectPath(lv_to_cache.object_path), dbus.Int32(g_tmo), EOD)) cached_lv = ClientProxy(self.bus, c_lv_path, interfaces=(LV_COMMON_INT, LV_INT, CACHE_LV_INT)) uncached_lv_path = self.handle_return( cached_lv.CachedLv.DetachCachePool( dbus.Boolean(destroy_cache), dbus.Int32(g_tmo), EOD)) self.assertTrue( '/com/redhat/lvmdbus1/Lv' in uncached_lv_path) rc = self.handle_return( vg.Remove(dbus.Int32(g_tmo), EOD)) self.assertTrue(rc == '/') def test_vg_change(self): vg_proxy = self._vg_create() result = self.handle_return(vg_proxy.Vg.Change( dbus.Int32(g_tmo), dbus.Dictionary({'-a': 'ay'}, 'sv'))) self.assertTrue(result == '/') result = self.handle_return( vg_proxy.Vg.Change( dbus.Int32(g_tmo), dbus.Dictionary({'-a': 'n'}, 'sv'))) self.assertTrue(result == '/') @staticmethod def _invalid_vg_lv_name_characters(): bad_vg_lv_set = set(string.printable) - \ set(string.ascii_letters + string.digits + '.-_+') return ''.join(bad_vg_lv_set) def test_invalid_names(self): mgr = self.objs[MANAGER_INT][0].Manager # Pv device path with self.assertRaises(dbus.exceptions.DBusException): self.handle_return( mgr.PvCreate( dbus.String("/dev/space in name"), dbus.Int32(g_tmo), EOD)) # VG Name testing... # Go through all bad characters pv_paths = [self.objs[PV_INT][0].object_path] bad_chars = TestDbusService._invalid_vg_lv_name_characters() for c in bad_chars: with self.assertRaises(dbus.exceptions.DBusException): self.handle_return( mgr.VgCreate( dbus.String("name%s" % (c)), dbus.Array(pv_paths, 'o'), dbus.Int32(g_tmo), EOD)) # Bad names for bad in [".", ".."]: with self.assertRaises(dbus.exceptions.DBusException): self.handle_return( mgr.VgCreate( dbus.String(bad), dbus.Array(pv_paths, 'o'), dbus.Int32(g_tmo), EOD)) # Exceed name length for i in [128, 1024, 4096]: with self.assertRaises(dbus.exceptions.DBusException): self.handle_return( mgr.VgCreate( dbus.String('T' * i), dbus.Array(pv_paths, 'o'), dbus.Int32(g_tmo), EOD)) # Create a VG and try to create LVs with different bad names vg_name = vg_n() vg_path = self.handle_return( mgr.VgCreate( dbus.String(vg_name), dbus.Array(pv_paths, 'o'), dbus.Int32(g_tmo), EOD)) self._validate_lookup(vg_name, vg_path) vg_proxy = ClientProxy(self.bus, vg_path, interfaces=(VG_INT, )) for c in bad_chars: with self.assertRaises(dbus.exceptions.DBusException): self.handle_return( vg_proxy.Vg.LvCreateLinear( dbus.String(lv_n() + c), dbus.UInt64(mib(4)), dbus.Boolean(False), dbus.Int32(g_tmo), EOD)) for reserved in ( "_cdata", "_cmeta", "_corig", "_mimage", "_mlog", "_pmspare", "_rimage", "_rmeta", "_tdata", "_tmeta", "_vorigin"): with self.assertRaises(dbus.exceptions.DBusException): self.handle_return( vg_proxy.Vg.LvCreateLinear( dbus.String(lv_n() + reserved), dbus.UInt64(mib(4)), dbus.Boolean(False), dbus.Int32(g_tmo), EOD)) for reserved in ("snapshot", "pvmove"): with self.assertRaises(dbus.exceptions.DBusException): self.handle_return( vg_proxy.Vg.LvCreateLinear( dbus.String(reserved + lv_n()), dbus.UInt64(mib(4)), dbus.Boolean(False), dbus.Int32(g_tmo), EOD)) _ALLOWABLE_TAG_CH = string.ascii_letters + string.digits + "._-+/=!:&#" def _invalid_tag_characters(self): bad_tag_ch_set = set(string.printable) - set(self._ALLOWABLE_TAG_CH) return ''.join(bad_tag_ch_set) def test_invalid_tags(self): mgr = self.objs[MANAGER_INT][0].Manager pv_paths = [self.objs[PV_INT][0].object_path] vg_name = vg_n() vg_path = self.handle_return( mgr.VgCreate( dbus.String(vg_name), dbus.Array(pv_paths, 'o'), dbus.Int32(g_tmo), EOD)) self._validate_lookup(vg_name, vg_path) vg_proxy = ClientProxy(self.bus, vg_path, interfaces=(VG_INT, )) for c in self._invalid_tag_characters(): with self.assertRaises(dbus.exceptions.DBusException): self.handle_return( vg_proxy.Vg.TagsAdd( dbus.Array([c], 's'), dbus.Int32(g_tmo), EOD)) for c in self._invalid_tag_characters(): with self.assertRaises(dbus.exceptions.DBusException): self.handle_return( vg_proxy.Vg.TagsAdd( dbus.Array(["a%sb" % (c)], 's'), dbus.Int32(g_tmo), EOD)) def test_tag_names(self): mgr = self.objs[MANAGER_INT][0].Manager pv_paths = [self.objs[PV_INT][0].object_path] vg_name = vg_n() vg_path = self.handle_return( mgr.VgCreate( dbus.String(vg_name), dbus.Array(pv_paths, 'o'), dbus.Int32(g_tmo), EOD)) self._validate_lookup(vg_name, vg_path) vg_proxy = ClientProxy(self.bus, vg_path, interfaces=(VG_INT, )) for i in range(1, 64): tag = rs(i, "", self._ALLOWABLE_TAG_CH) tmp = self.handle_return( vg_proxy.Vg.TagsAdd( dbus.Array([tag], 's'), dbus.Int32(g_tmo), EOD)) self.assertTrue(tmp == '/') vg_proxy.update() self.assertTrue( tag in vg_proxy.Vg.Tags, "%s not in %s" % (tag, str(vg_proxy.Vg.Tags))) self.assertEqual( i, len(vg_proxy.Vg.Tags), "%d != %d" % (i, len(vg_proxy.Vg.Tags))) def test_tag_regression(self): mgr = self.objs[MANAGER_INT][0].Manager pv_paths = [self.objs[PV_INT][0].object_path] vg_name = vg_n() vg_path = self.handle_return( mgr.VgCreate( dbus.String(vg_name), dbus.Array(pv_paths, 'o'), dbus.Int32(g_tmo), EOD)) self._validate_lookup(vg_name, vg_path) vg_proxy = ClientProxy(self.bus, vg_path, interfaces=(VG_INT, )) tag = '--h/K.6g0A4FOEatf3+k_nI/Yp&L_u2oy-=j649x:+dUcYWPEo6.IWT0c' tmp = self.handle_return( vg_proxy.Vg.TagsAdd( dbus.Array([tag], 's'), dbus.Int32(g_tmo), EOD)) self.assertTrue(tmp == '/') vg_proxy.update() self.assertTrue( tag in vg_proxy.Vg.Tags, "%s not in %s" % (tag, str(vg_proxy.Vg.Tags))) def _verify_existence(self, cmd, operation, resource_name): ec, stdout, stderr = call_lvm(cmd) if ec == 0: path = self._lookup(resource_name) self.assertTrue(path != '/') else: std_err_print( "%s failed with stdout= %s, stderr= %s" % (operation, stdout, stderr)) self.assertTrue(ec == 0, "%s exit code = %d" % (operation, ec)) def test_external_vg_create(self): # We need to ensure that if a user creates something outside of lvm # dbus service that things are sequenced correctly so that if a dbus # user calls into the service they will find the same information. vg_name = vg_n() # Get all the PV device paths pv_paths = [p.Pv.Name for p in self.objs[PV_INT]] cmd = ['vgcreate', vg_name] cmd.extend(pv_paths) self._verify_existence(cmd, cmd[0], vg_name) def test_external_lv_create(self): # Lets create a LV outside of service and see if we correctly handle # it's inclusion vg = self._vg_create().Vg lv_name = lv_n() full_name = "%s/%s" % (vg.Name, lv_name) cmd = ['lvcreate', '-L4M', '-n', lv_name, vg.Name] self._verify_existence(cmd, cmd[0], full_name) def test_external_pv_create(self): # Lets create a PV outside of service and see if we correctly handle # it's inclusion target = self.objs[PV_INT][0] # Remove the PV rc = self._pv_remove(target) self.assertTrue(rc == '/') self._check_consistency() # Make sure the PV we removed no longer exists self.assertTrue(self._lookup(target.Pv.Name) == '/') # Add it back with external command line cmd = ['pvcreate', target.Pv.Name] self._verify_existence(cmd, cmd[0], target.Pv.Name) def _create_nested(self, pv_object_path): vg = self._vg_create([pv_object_path]) pv = ClientProxy(self.bus, pv_object_path, interfaces=(PV_INT,)) self.assertEqual(pv.Pv.Vg, vg.object_path) self.assertIn(pv_object_path, vg.Vg.Pvs, "Expecting PV object path in Vg.Pvs") lv = self._create_lv(vg=vg.Vg, size=vg.Vg.FreeBytes, suffix="_pv") device_path = '/dev/%s/%s' % (vg.Vg.Name, lv.LvCommon.Name) new_pv_object_path = self._pv_create(device_path) vg.update() self.assertEqual(lv.LvCommon.Vg, vg.object_path) self.assertIn(lv.object_path, vg.Vg.Lvs, "Expecting LV object path in Vg.Lvs") new_pv_proxy = ClientProxy(self.bus, new_pv_object_path, interfaces=(PV_INT, )) self.assertEqual(new_pv_proxy.Pv.Name, device_path) return new_pv_object_path def test_nesting(self): # check to see if we handle an LV becoming a PV which has it's own # LV pv_object_path = self.objs[PV_INT][0].object_path for i in range(0, 5): pv_object_path = self._create_nested(pv_object_path) def test_pv_symlinks(self): # Lets take one of our test PVs, pvremove it, find a symlink to it # and re-create using the symlink to ensure we return an object # path to it. Additionally, we will take the symlink and do a lookup # (Manager.LookUpByLvmId) using it and the original device path to # ensure that we can find the PV. symlink = None pv = self.objs[PV_INT][0] pv_device_path = pv.Pv.Name self._pv_remove(pv) # Make sure we no longer find the pv rc = self._lookup(pv_device_path) self.assertEqual(rc, '/') # Lets locate a symlink for it devices = glob('/dev/disk/*/*') for d in devices: if pv_device_path == os.path.realpath(d): symlink = d break self.assertIsNotNone(symlink, "We expected to find at least 1 symlink!") # Make sure symlink look up fails too rc = self._lookup(symlink) self.assertEqual(rc, '/') pv_object_path = self._pv_create(symlink) self.assertNotEqual(pv_object_path, '/') pv_proxy = ClientProxy(self.bus, pv_object_path, interfaces=(PV_INT, )) self.assertEqual(pv_proxy.Pv.Name, pv_device_path) # Lets check symlink lookup self.assertEqual(pv_object_path, self._lookup(symlink)) self.assertEqual(pv_object_path, self._lookup(pv_device_path)) class AggregateResults(object): def __init__(self): self.no_errors = True def register_result(self, result): if not result.result.wasSuccessful(): self.no_errors = False def register_fail(self): self.no_errors = False def exit_run(self): if self.no_errors: sys.exit(0) sys.exit(1) if __name__ == '__main__': r = AggregateResults() mode = int(test_shell) if mode == 0: std_err_print('\n*** Testing only lvm fork & exec test mode ***\n') elif mode == 1: std_err_print('\n*** Testing fork & exec & lvm shell mode ***\n') else: std_err_print('\n*** Testing only lvm shell mode ***\n') for g_tmo in [0, 15]: if mode == 0: if set_execution(False, r): r.register_result(unittest.main(exit=False)) elif mode == 2: if set_execution(True, r): r.register_result(unittest.main(exit=False)) else: if set_execution(False, r): r.register_result(unittest.main(exit=False)) # Test lvm shell if set_execution(True, r): r.register_result(unittest.main(exit=False)) if not r.no_errors: break r.exit_run() LVM2.2.02.176/test/dbus/testlib.py0000644000000000000120000001771713176752421015275 0ustar rootwheel#!/usr/bin/env python3 # Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . import string import random import functools import xml.etree.ElementTree as Et from collections import OrderedDict import dbus import os import sys import time BUS_NAME = os.getenv('LVM_DBUS_NAME', 'com.redhat.lvmdbus1') BASE_INTERFACE = 'com.redhat.lvmdbus1' MANAGER_INT = BASE_INTERFACE + '.Manager' MANAGER_OBJ = '/' + BASE_INTERFACE.replace('.', '/') + '/Manager' PV_INT = BASE_INTERFACE + ".Pv" VG_INT = BASE_INTERFACE + ".Vg" LV_INT = BASE_INTERFACE + ".Lv" THINPOOL_INT = BASE_INTERFACE + ".ThinPool" SNAPSHOT_INT = BASE_INTERFACE + ".Snapshot" LV_COMMON_INT = BASE_INTERFACE + ".LvCommon" JOB_INT = BASE_INTERFACE + ".Job" CACHE_POOL_INT = BASE_INTERFACE + ".CachePool" CACHE_LV_INT = BASE_INTERFACE + ".CachedLv" THINPOOL_LV_PATH = '/' + THINPOOL_INT.replace('.', '/') validate_introspection = True def rs(length, suffix, character_set=string.ascii_lowercase): return ''.join(random.choice(character_set) for _ in range(length)) + suffix def mib(s): return 1024 * 1024 * s def std_err_print(*args): sys.stderr.write(' '.join(map(str, args)) + '\n') sys.stderr.flush() class DbusIntrospection(object): @staticmethod def introspect(xml_representation): interfaces = {} root = Et.fromstring(xml_representation) for c in root: if c.tag == "interface": in_f = c.attrib['name'] interfaces[in_f] = dict(methods=OrderedDict(), properties={}) for nested in c: if nested.tag == "method": mn = nested.attrib['name'] interfaces[in_f]['methods'][mn] = OrderedDict() for arg in nested: if arg.tag == 'arg': arg_dir = arg.attrib['direction'] if arg_dir == 'in': n = arg.attrib['name'] else: n = 'RETURN_VALUE' arg_type = arg.attrib['type'] if n: v = dict( name=mn, a_dir=arg_dir, a_type=arg_type ) interfaces[in_f]['methods'][mn][n] = v elif nested.tag == 'property': pn = nested.attrib['name'] p_access = nested.attrib['access'] p_type = nested.attrib['type'] interfaces[in_f]['properties'][pn] = \ dict(p_access=p_access, p_type=p_type) else: pass # print('Interfaces...') # for k, v in list(interfaces.items()): # print('Interface %s' % k) # if v['methods']: # for m, args in list(v['methods'].items()): # print(' method: %s' % m) # for a, aa in args.items(): # print(' method arg: %s type %s' % # (a, aa['a_type'])) # if v['properties']: # for p, d in list(v['properties'].items()): # print(' Property: %s type= %s' % (p, d['p_type'])) # print('End interfaces') return interfaces def btsr(value): t = type(value) if t == dbus.Boolean: return 'b' elif t == dbus.ObjectPath: return 'o' elif t == dbus.String: return 's' elif t == dbus.Byte: return 'y' elif t == dbus.Int16: return 'n' elif t == dbus.Int32: return 'i' elif t == dbus.Int64: return 'x' elif t == dbus.UInt16: return 'q' elif t == dbus.UInt32: return 'u' elif t == dbus.UInt64: return 't' elif t == dbus.Double: return 'd' elif t == dbus.Struct: rc = '(' for vt in value: rc += btsr(vt) rc += ')' return rc elif t == dbus.Array: rc = "a" for i in value: rc += btsr(i) break return rc else: raise RuntimeError("Unhandled type %s" % str(t)) def verify_type(value, dbus_str_rep): actual_str_rep = btsr(value) if dbus_str_rep != actual_str_rep: # print("%s ~= %s" % (dbus_str_rep, actual_str_rep)) # Unless we have a full filled out type we won't match exactly if not dbus_str_rep.startswith(actual_str_rep): raise RuntimeError( "Incorrect type, expected= %s actual = %s object= %s" % (dbus_str_rep, actual_str_rep, str(type(value)))) class RemoteInterface(object): def _set_props(self, props=None): if not props: for _ in range(0, 3): try: prop_interface = dbus.Interface(self.dbus_object, 'org.freedesktop.DBus.Properties') props = prop_interface.GetAll(self.interface) break except dbus.exceptions.DBusException as dbe: if "GetAll" not in str(dbe): raise dbe if props: for kl, vl in list(props.items()): # Verify type is correct! if self.introspect: verify_type(vl, self.introspect[self.interface] ['properties'][kl]['p_type']) setattr(self, kl, vl) @property def object_path(self): return self.dbus_object.object_path def __init__( self, dbus_object, interface, introspect, properties=None, timelimit=-1): self.dbus_object = dbus_object self.interface = interface self.introspect = introspect self.tmo = 0 if timelimit >= 0: self.tmo = float(timelimit) self.tmo *= 1.10 self.dbus_interface = dbus.Interface(self.dbus_object, self.interface) self._set_props(properties) def __getattr__(self, item): if hasattr(self.dbus_interface, item): return functools.partial(self._wrapper, item) else: return functools.partial(self, item) def _wrapper(self, _method_name, *args, **kwargs): # Lets see how long a method takes to execute, in call cases we should # return something when the time limit has been reached. start = time.time() result = getattr(self.dbus_interface, _method_name)(*args, **kwargs) end = time.time() diff = end - start if self.tmo > 0.0: if diff > self.tmo: std_err_print("\n Time exceeded: %f > %f %s" % (diff, self.tmo, _method_name)) if self.introspect: if 'RETURN_VALUE' in self.introspect[ self.interface]['methods'][_method_name]: r_type = self.introspect[ self.interface]['methods'][ _method_name]['RETURN_VALUE']['a_type'] verify_type(result, r_type) return result def update(self): self._set_props() class ClientProxy(object): @staticmethod def _intf_short_name(nm): return nm.split('.')[-1:][0] def get_introspect(self): i = dbus.Interface( self.dbus_object, 'org.freedesktop.DBus.Introspectable') return DbusIntrospection.introspect(i.Introspect()) def _common(self, interface, introspect, properties): short_name = ClientProxy._intf_short_name(interface) self.short_interface_names.append(short_name) ro = RemoteInterface(self.dbus_object, interface, introspect, properties, timelimit=self.tmo) setattr(self, short_name, ro) def __init__(self, bus, object_path, interface_prop_hash=None, interfaces=None, timelimit=-1): self.object_path = object_path self.short_interface_names = [] self.tmo = timelimit self.dbus_object = bus.get_object( BUS_NAME, self.object_path, introspect=False) if interface_prop_hash: assert interfaces is None if interfaces: assert interface_prop_hash is None if interface_prop_hash and not validate_introspection: # We have everything including the values of the properties for i, props in interface_prop_hash.items(): self._common(i, None, props) elif interfaces and not validate_introspection: # We are retrieving the values of the properties for i in interfaces: self._common(i, None, None) else: # We need to query the interfaces and gather all the properties # for each interface, as we have the introspection data we # will also utilize it to verify what we get back verifies introspect = self.get_introspect() if interface_prop_hash: introspect_interfaces = list(introspect.keys()) for object_manager_key in interface_prop_hash.keys(): assert object_manager_key in introspect_interfaces for i in list(introspect.keys()): self._common(i, introspect, None) def update(self): # Go through all interfaces and update them for sn in self.short_interface_names: getattr(self, sn).update() LVM2.2.02.176/test/dbus/validatestate.py0000755000000000000120000000162713176752421016455 0ustar rootwheel#!/usr/bin/env python3 # Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # # Simply connects to the dbus service and calls Refresh and ensures that the # value returned is zero import testlib import dbus from dbus.mainloop.glib import DBusGMainLoop import sys import os if __name__ == "__main__": use_session = os.getenv('LVMDBUSD_USE_SESSION', False) if use_session: bus = dbus.SessionBus(mainloop=DBusGMainLoop()) else: bus = dbus.SystemBus(mainloop=DBusGMainLoop()) mgr_proxy = testlib.ClientProxy(bus, testlib.MANAGER_OBJ) sys.exit(mgr_proxy.Manager.Refresh()) LVM2.2.02.176/test/unit/0000755000000000000120000000000013176752421013262 5ustar rootwheelLVM2.2.02.176/test/unit/Makefile.in0000644000000000000120000000233113176752421015326 0ustar rootwheel# Copyright (C) 2011-2017 Red Hat, Inc. All rights reserved. # # This file is part of LVM2. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA srcdir = @srcdir@ top_srcdir = @top_srcdir@ top_builddir = @top_builddir@ VPATH = $(srcdir) UNITS = \ bitset_t.c\ config_t.c\ dmlist_t.c\ dmstatus_t.c\ matcher_t.c\ percent_t.c\ string_t.c\ run.c ifeq ("@TESTING@", "yes") SOURCES = $(UNITS) TARGETS = run endif include $(top_builddir)/make.tmpl ifeq ($(MAKECMDGOALS),distclean) SOURCES = $(UNITS) endif ifeq ("$(TESTING)", "yes") LDLIBS += -ldevmapper @CUNIT_LIBS@ CFLAGS += @CUNIT_CFLAGS@ check: unit $(TARGETS): $(OBJECTS) $(top_builddir)/libdm/libdevmapper.$(LIB_SUFFIX) $(CC) $(CFLAGS) $(LDFLAGS) $(EXTRA_EXEC_LDFLAGS) -L$(top_builddir)/libdm \ -o $@ $(OBJECTS) $(LDLIBS) unit: $(TARGETS) @echo Running unit tests LD_LIBRARY_PATH=$(top_builddir)/libdm ./$(TARGETS) endif LVM2.2.02.176/test/unit/percent_t.c0000644000000000000120000000607013176752421015414 0ustar rootwheel/* * Copyright (C) 2017 Red Hat, Inc. All rights reserved. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License v.2. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "units.h" #include #include int percent_init(void) { return 0; } int percent_fini(void) { return 0; } static void test_percent_100(void) { char buf[32]; /* Check 100% is shown only for DM_PERCENT_100*/ dm_percent_t p_100 = dm_make_percent(100, 100); dm_percent_t p1_100 = dm_make_percent(100000, 100000); dm_percent_t n_100 = dm_make_percent(999999, 1000000); CU_ASSERT_EQUAL(p_100, DM_PERCENT_100); CU_ASSERT_EQUAL(p1_100, DM_PERCENT_100); CU_ASSERT_NOT_EQUAL(n_100, DM_PERCENT_100); dm_snprintf(buf, sizeof(buf), "%.2f", dm_percent_to_float(p_100)); CU_ASSERT_EQUAL(strcmp(buf, "100.00"), 0); dm_snprintf(buf, sizeof(buf), "%.2f", dm_percent_to_float(p1_100)); CU_ASSERT_EQUAL(strcmp(buf, "100.00"), 0); dm_snprintf(buf, sizeof(buf), "%.2f", dm_percent_to_float(n_100)); CU_ASSERT_NOT_EQUAL(strcmp(buf, "99.99"), 0); /* Would like to gett */ dm_snprintf(buf, sizeof(buf), "%.2f", dm_percent_to_round_float(n_100, 2)); CU_ASSERT_EQUAL(strcmp(buf, "99.99"), 0); dm_snprintf(buf, sizeof(buf), "%.3f", dm_percent_to_round_float(n_100, 3)); CU_ASSERT_EQUAL(strcmp(buf, "99.999"), 0); dm_snprintf(buf, sizeof(buf), "%.4f", dm_percent_to_round_float(n_100, 4)); CU_ASSERT_EQUAL(strcmp(buf, "99.9999"), 0); dm_snprintf(buf, sizeof(buf), "%d", (int)dm_percent_to_round_float(n_100, 0)); CU_ASSERT_EQUAL(strcmp(buf, "99"), 0); } static void test_percent_0(void) { char buf[32]; /* Check 0% is shown only for DM_PERCENT_0 */ dm_percent_t p_0 = dm_make_percent(0, 100); dm_percent_t p1_0 = dm_make_percent(0, 100000); dm_percent_t n_0 = dm_make_percent(1, 1000000); CU_ASSERT_EQUAL(p_0, DM_PERCENT_0); CU_ASSERT_EQUAL(p1_0, DM_PERCENT_0); CU_ASSERT_NOT_EQUAL(n_0, DM_PERCENT_0); dm_snprintf(buf, sizeof(buf), "%.2f", dm_percent_to_float(p_0)); CU_ASSERT_EQUAL(strcmp(buf, "0.00"), 0); dm_snprintf(buf, sizeof(buf), "%.2f", dm_percent_to_float(p1_0)); CU_ASSERT_EQUAL(strcmp(buf, "0.00"), 0); dm_snprintf(buf, sizeof(buf), "%.2f", dm_percent_to_float(n_0)); CU_ASSERT_NOT_EQUAL(strcmp(buf, "0.01"), 0); dm_snprintf(buf, sizeof(buf), "%.2f", dm_percent_to_round_float(n_0, 2)); CU_ASSERT_EQUAL(strcmp(buf, "0.01"), 0); dm_snprintf(buf, sizeof(buf), "%.3f", dm_percent_to_round_float(n_0, 3)); CU_ASSERT_EQUAL(strcmp(buf, "0.001"), 0); dm_snprintf(buf, sizeof(buf), "%d", (int)dm_percent_to_round_float(n_0, 0)); CU_ASSERT_EQUAL(strcmp(buf, "1"), 0); } CU_TestInfo percent_list[] = { { (char*)"percent_100", test_percent_100 }, { (char*)"percent_0", test_percent_0 }, CU_TEST_INFO_NULL }; LVM2.2.02.176/test/unit/string_t.c0000644000000000000120000000340213176752421015256 0ustar rootwheel/* * Copyright (C) 2012 Red Hat, Inc. All rights reserved. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License v.2. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "units.h" #include #include static struct dm_pool *mem = NULL; int string_init(void) { mem = dm_pool_create("string test", 1024); return (mem == NULL); } int string_fini(void) { dm_pool_destroy(mem); return 0; } /* TODO: Add more string unit tests here */ static void test_strncpy(void) { const char st[] = "1234567890"; char buf[sizeof(st)]; CU_ASSERT_EQUAL(dm_strncpy(buf, st, sizeof(buf)), 1); CU_ASSERT_EQUAL(strcmp(buf, st), 0); CU_ASSERT_EQUAL(dm_strncpy(buf, st, sizeof(buf) - 1), 0); CU_ASSERT_EQUAL(strlen(buf) + 1, sizeof(buf) - 1); } static void test_asprint(void) { const char st0[] = ""; const char st1[] = "12345678901"; const char st2[] = "1234567890123456789012345678901234567890123456789012345678901234567"; char *buf; int a; a = dm_asprintf(&buf, "%s", st0); CU_ASSERT_EQUAL(strcmp(buf, st0), 0); CU_ASSERT_EQUAL(a, sizeof(st0)); free(buf); a = dm_asprintf(&buf, "%s", st1); CU_ASSERT_EQUAL(strcmp(buf, st1), 0); CU_ASSERT_EQUAL(a, sizeof(st1)); free(buf); a = dm_asprintf(&buf, "%s", st2); CU_ASSERT_EQUAL(a, sizeof(st2)); CU_ASSERT_EQUAL(strcmp(buf, st2), 0); free(buf); } CU_TestInfo string_list[] = { { (char*)"asprint", test_asprint }, { (char*)"strncpy", test_strncpy }, CU_TEST_INFO_NULL }; LVM2.2.02.176/test/unit/bitset_t.c0000644000000000000120000000645413176752421015254 0ustar rootwheel/* * Copyright (C) 2010 Red Hat, Inc. All rights reserved. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License v.2. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "units.h" enum { NR_BITS = 137 }; static struct dm_pool *mem; int bitset_init(void) { mem = dm_pool_create("bitset test", 1024); return mem == NULL; } int bitset_fini(void) { dm_pool_destroy(mem); return 0; } static void test_get_next(void) { int i, j, last = 0, first; dm_bitset_t bs = dm_bitset_create(mem, NR_BITS); for (i = 0; i < NR_BITS; i++) CU_ASSERT(!dm_bit(bs, i)); for (i = 0, j = 1; i < NR_BITS; i += j, j++) dm_bit_set(bs, i); first = 1; for (i = 0, j = 1; i < NR_BITS; i += j, j++) { if (first) { last = dm_bit_get_first(bs); first = 0; } else last = dm_bit_get_next(bs, last); CU_ASSERT(last == i); } CU_ASSERT(dm_bit_get_next(bs, last) == -1); } static void bit_flip(dm_bitset_t bs, int bit) { int old = dm_bit(bs, bit); if (old) dm_bit_clear(bs, bit); else dm_bit_set(bs, bit); } static void test_equal(void) { dm_bitset_t bs1 = dm_bitset_create(mem, NR_BITS); dm_bitset_t bs2 = dm_bitset_create(mem, NR_BITS); int i, j; for (i = 0, j = 1; i < NR_BITS; i += j, j++) { dm_bit_set(bs1, i); dm_bit_set(bs2, i); } CU_ASSERT(dm_bitset_equal(bs1, bs2)); CU_ASSERT(dm_bitset_equal(bs2, bs1)); for (i = 0; i < NR_BITS; i++) { bit_flip(bs1, i); CU_ASSERT(!dm_bitset_equal(bs1, bs2)); CU_ASSERT(!dm_bitset_equal(bs2, bs1)); CU_ASSERT(dm_bitset_equal(bs1, bs1)); /* comparing with self */ bit_flip(bs1, i); } } static void test_and(void) { dm_bitset_t bs1 = dm_bitset_create(mem, NR_BITS); dm_bitset_t bs2 = dm_bitset_create(mem, NR_BITS); dm_bitset_t bs3 = dm_bitset_create(mem, NR_BITS); int i, j; for (i = 0, j = 1; i < NR_BITS; i += j, j++) { dm_bit_set(bs1, i); dm_bit_set(bs2, i); } dm_bit_and(bs3, bs1, bs2); CU_ASSERT(dm_bitset_equal(bs1, bs2)); CU_ASSERT(dm_bitset_equal(bs1, bs3)); CU_ASSERT(dm_bitset_equal(bs2, bs3)); dm_bit_clear_all(bs1); dm_bit_clear_all(bs2); for (i = 0; i < NR_BITS; i++) { if (i % 2) dm_bit_set(bs1, i); else dm_bit_set(bs2, i); } dm_bit_and(bs3, bs1, bs2); for (i = 0; i < NR_BITS; i++) CU_ASSERT(!dm_bit(bs3, i)); } CU_TestInfo bitset_list[] = { { (char*)"get_next", test_get_next }, { (char*)"equal", test_equal }, { (char*)"and", test_and }, CU_TEST_INFO_NULL }; LVM2.2.02.176/test/unit/run.c0000644000000000000120000000150413176752421014232 0ustar rootwheel#include "units.h" #include #include #include /* Setup SuiteInfo struct in a compatible way across different CUnit versions */ /* old version of CUnit has used char* for .pName, so using cast here */ #define USE(n) { \ .pName = (char*) #n, \ .pInitFunc = n##_init, \ .pCleanupFunc = n##_fini, \ .pTests = n##_list } CU_SuiteInfo suites[] = { USE(bitset), USE(config), USE(dmlist), USE(dmstatus), USE(regex), USE(percent), USE(string), CU_SUITE_INFO_NULL }; int main(int argc, char **argv) { if (CU_initialize_registry() != CUE_SUCCESS) { printf("Initialization of Test Registry failed.\n"); return CU_get_error(); } CU_register_suites(suites); CU_basic_set_mode(CU_BRM_VERBOSE); CU_basic_run_tests(); CU_cleanup_registry(); return (CU_get_number_of_failures() != 0); } LVM2.2.02.176/test/unit/units.h0000644000000000000120000000144513176752421014601 0ustar rootwheel/* * Copyright (C) 2015-2017 Red Hat, Inc. All rights reserved. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License v.2. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef _UNITS_H #define _UNITS_H #include "libdevmapper.h" #include #define DECL(n) \ extern CU_TestInfo n ## _list[];\ int n ## _init(void); \ int n ## _fini(void); DECL(bitset); DECL(config); DECL(dmlist); DECL(dmstatus); DECL(regex); DECL(percent); DECL(string); #endif LVM2.2.02.176/test/unit/dmstatus_t.c0000644000000000000120000000372613176752421015625 0ustar rootwheel/* * Copyright (C) 2015 Red Hat, Inc. All rights reserved. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License v.2. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "units.h" static struct dm_pool *_mem; int dmstatus_init(void) { _mem = dm_pool_create("dmstatus test", 1024); return (_mem == NULL); } int dmstatus_fini(void) { dm_pool_destroy(_mem); return 0; } static void _test_mirror_status(void) { struct dm_status_mirror *s = NULL; CU_ASSERT(dm_get_status_mirror(_mem, "2 253:1 253:2 80/81 1 AD 3 disk 253:0 A", &s)); if (s) { CU_ASSERT_EQUAL(s->total_regions, 81); CU_ASSERT_EQUAL(s->insync_regions, 80); CU_ASSERT_EQUAL(s->dev_count, 2); CU_ASSERT_EQUAL(s->devs[0].health, 'A'); CU_ASSERT_EQUAL(s->devs[0].major, 253); CU_ASSERT_EQUAL(s->devs[0].minor, 1); CU_ASSERT_EQUAL(s->devs[1].health, 'D'); CU_ASSERT_EQUAL(s->devs[1].major, 253); CU_ASSERT_EQUAL(s->devs[1].minor, 2); CU_ASSERT_EQUAL(s->log_count, 1); CU_ASSERT_EQUAL(s->logs[0].major, 253); CU_ASSERT_EQUAL(s->logs[0].minor, 0); CU_ASSERT_EQUAL(s->logs[0].health, 'A'); CU_ASSERT(!strcmp(s->log_type, "disk")); } CU_ASSERT(dm_get_status_mirror(_mem, "4 253:1 253:2 253:3 253:4 10/10 1 ADFF 1 core", &s)); if (s) { CU_ASSERT_EQUAL(s->total_regions, 10); CU_ASSERT_EQUAL(s->insync_regions, 10); CU_ASSERT_EQUAL(s->dev_count, 4); CU_ASSERT_EQUAL(s->devs[3].minor, 4); CU_ASSERT_EQUAL(s->devs[3].health, 'F'); CU_ASSERT_EQUAL(s->log_count, 0); CU_ASSERT(!strcmp(s->log_type, "core")); } } CU_TestInfo dmstatus_list[] = { { (char*)"mirror_status", _test_mirror_status }, CU_TEST_INFO_NULL }; LVM2.2.02.176/test/unit/config_t.c0000644000000000000120000001202313176752421015214 0ustar rootwheel/* * Copyright (C) 2010 Red Hat, Inc. All rights reserved. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License v.2. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "units.h" static struct dm_pool *mem; int config_init(void) { mem = dm_pool_create("config test", 1024); return mem == NULL; } int config_fini(void) { dm_pool_destroy(mem); return 0; } static const char *conf = "id = \"yada-yada\"\n" "seqno = 15\n" "status = [\"READ\", \"WRITE\"]\n" "flags = []\n" "extent_size = 8192\n" "physical_volumes {\n" " pv0 {\n" " id = \"abcd-efgh\"\n" " }\n" " pv1 {\n" " id = \"bbcd-efgh\"\n" " }\n" " pv2 {\n" " id = \"cbcd-efgh\"\n" " }\n" "}\n"; static const char *overlay = "id = \"yoda-soda\"\n" "flags = [\"FOO\"]\n" "physical_volumes {\n" " pv1 {\n" " id = \"hgfe-dcba\"\n" " }\n" " pv3 {\n" " id = \"dbcd-efgh\"\n" " }\n" "}\n"; static void test_parse(void) { struct dm_config_tree *tree = dm_config_from_string(conf); const struct dm_config_value *value; CU_ASSERT((long) tree); CU_ASSERT(dm_config_has_node(tree->root, "id")); CU_ASSERT(dm_config_has_node(tree->root, "physical_volumes")); CU_ASSERT(dm_config_has_node(tree->root, "physical_volumes/pv0")); CU_ASSERT(dm_config_has_node(tree->root, "physical_volumes/pv0/id")); CU_ASSERT(!strcmp(dm_config_find_str(tree->root, "id", "foo"), "yada-yada")); CU_ASSERT(!strcmp(dm_config_find_str(tree->root, "idt", "foo"), "foo")); CU_ASSERT(!strcmp(dm_config_find_str(tree->root, "physical_volumes/pv0/bb", "foo"), "foo")); CU_ASSERT(!strcmp(dm_config_find_str(tree->root, "physical_volumes/pv0/id", "foo"), "abcd-efgh")); CU_ASSERT(!dm_config_get_uint32(tree->root, "id", NULL)); CU_ASSERT(dm_config_get_uint32(tree->root, "extent_size", NULL)); /* FIXME: Currently everything parses as a list, even if it's not */ // CU_ASSERT(!dm_config_get_list(tree->root, "id", NULL)); // CU_ASSERT(!dm_config_get_list(tree->root, "extent_size", NULL)); CU_ASSERT(dm_config_get_list(tree->root, "flags", &value)); CU_ASSERT(value->next == NULL); /* an empty list */ CU_ASSERT(dm_config_get_list(tree->root, "status", &value)); CU_ASSERT(value->next != NULL); /* a non-empty list */ dm_config_destroy(tree); } static void test_clone(void) { struct dm_config_tree *tree = dm_config_from_string(conf); struct dm_config_node *n = dm_config_clone_node(tree, tree->root, 1); const struct dm_config_value *value; /* Check that the nodes are actually distinct. */ CU_ASSERT(n != tree->root); CU_ASSERT(n->sib != tree->root->sib); CU_ASSERT(dm_config_find_node(n, "physical_volumes") != NULL); CU_ASSERT(dm_config_find_node(tree->root, "physical_volumes") != NULL); CU_ASSERT(dm_config_find_node(n, "physical_volumes") != dm_config_find_node(tree->root, "physical_volumes")); CU_ASSERT(dm_config_has_node(n, "id")); CU_ASSERT(dm_config_has_node(n, "physical_volumes")); CU_ASSERT(dm_config_has_node(n, "physical_volumes/pv0")); CU_ASSERT(dm_config_has_node(n, "physical_volumes/pv0/id")); CU_ASSERT(!strcmp(dm_config_find_str(n, "id", "foo"), "yada-yada")); CU_ASSERT(!strcmp(dm_config_find_str(n, "idt", "foo"), "foo")); CU_ASSERT(!strcmp(dm_config_find_str(n, "physical_volumes/pv0/bb", "foo"), "foo")); CU_ASSERT(!strcmp(dm_config_find_str(n, "physical_volumes/pv0/id", "foo"), "abcd-efgh")); CU_ASSERT(!dm_config_get_uint32(n, "id", NULL)); CU_ASSERT(dm_config_get_uint32(n, "extent_size", NULL)); /* FIXME: Currently everything parses as a list, even if it's not */ // CU_ASSERT(!dm_config_get_list(tree->root, "id", NULL)); // CU_ASSERT(!dm_config_get_list(tree->root, "extent_size", NULL)); CU_ASSERT(dm_config_get_list(n, "flags", &value)); CU_ASSERT(value->next == NULL); /* an empty list */ CU_ASSERT(dm_config_get_list(n, "status", &value)); CU_ASSERT(value->next != NULL); /* a non-empty list */ dm_config_destroy(tree); } static void test_cascade(void) { struct dm_config_tree *t1 = dm_config_from_string(conf), *t2 = dm_config_from_string(overlay), *tree = dm_config_insert_cascaded_tree(t2, t1); CU_ASSERT(!strcmp(dm_config_tree_find_str(tree, "id", "foo"), "yoda-soda")); CU_ASSERT(!strcmp(dm_config_tree_find_str(tree, "idt", "foo"), "foo")); CU_ASSERT(!strcmp(dm_config_tree_find_str(tree, "physical_volumes/pv0/bb", "foo"), "foo")); CU_ASSERT(!strcmp(dm_config_tree_find_str(tree, "physical_volumes/pv1/id", "foo"), "hgfe-dcba")); CU_ASSERT(!strcmp(dm_config_tree_find_str(tree, "physical_volumes/pv3/id", "foo"), "dbcd-efgh")); dm_config_destroy(t1); dm_config_destroy(t2); } CU_TestInfo config_list[] = { { (char*)"parse", test_parse }, { (char*)"clone", test_clone }, { (char*)"cascade", test_cascade }, CU_TEST_INFO_NULL }; LVM2.2.02.176/test/unit/matcher_t.c0000644000000000000120000000346113176752421015400 0ustar rootwheel/* * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved. * Copyright (C) 2004-2010 Red Hat, Inc. All rights reserved. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License v.2. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "units.h" #include "matcher_data.h" static struct dm_pool *mem = NULL; int regex_init(void) { mem = dm_pool_create("bitset test", 1024); return mem == NULL; } int regex_fini(void) { dm_pool_destroy(mem); return 0; } static struct dm_regex *make_scanner(const char **rx) { struct dm_regex *scanner; int nrx = 0; for (; rx[nrx]; ++nrx); scanner = dm_regex_create(mem, rx, nrx); CU_ASSERT_FATAL(scanner != NULL); return scanner; } static void test_fingerprints(void) { struct dm_regex *scanner; scanner = make_scanner(dev_patterns); CU_ASSERT_EQUAL(dm_regex_fingerprint(scanner), 0x7f556c09); scanner = make_scanner(random_patterns); CU_ASSERT_EQUAL(dm_regex_fingerprint(scanner), 0x9f11076c); } static void test_matching(void) { struct dm_regex *scanner; int i; scanner = make_scanner(dev_patterns); for (i = 0; devices[i].str; ++i) CU_ASSERT_EQUAL(dm_regex_match(scanner, devices[i].str), devices[i].expected - 1); scanner = make_scanner(nonprint_patterns); for (i = 0; nonprint[i].str; ++i) CU_ASSERT_EQUAL(dm_regex_match(scanner, nonprint[i].str), nonprint[i].expected - 1); } CU_TestInfo regex_list[] = { { (char*)"fingerprints", test_fingerprints }, { (char*)"matching", test_matching }, CU_TEST_INFO_NULL }; LVM2.2.02.176/test/unit/matcher_data.h0000644000000000000120000005775013176752421016065 0ustar rootwheelstruct check_item { const char *str; int expected; }; static const char *dev_patterns[] = { "loop/[0-9]+", "hd[a-d][0-5]+", NULL }; static const char *nonprint_patterns[] = { "foo\x80" "bar", "foo\xc2" "b", "\x80", NULL }; static const struct check_item nonprint[] = { { "foo\x2e" "bar", 0 }, { "foo\x80" "bar", 3 }, { "foo\xc2" "b", 2 }, { "\x80", 3 }, { NULL, 0 } }; static const char *random_patterns[] = { "(((a?)(([Ub]*)|z))((([qr]|X)+)([Qn]*)))+", "[HZejtuw]*", "((B|s)*)|(((([Fv]l)(N+))(([el]|C)(tJ)))?)", "((([Ma]?)|(t*))*)|((([cm]E)|(M?))|(([BE][EV])|([Qj][Mh])))", "(((([bw]*)|([IO]*))((zK)*))|(((pU)|(i|q))|((z?)|([HL]?))))*", "((([Pt]?)|[Tr])?)((Hq)*)", "[HOXcfgikosvwxz]", "[BCEFGHNPTUWfjlprsy]", "((((aD)*)|([Xo]+))+)(([HKn](([Eq]|[JQ])(I*)))*)", "([LNWYeghv]|e)*", "(((y(L*))*)|((([EP]+)(W+))*))*", "U*", "((((R+)(W|[Qr]))|([py]+))+)([LM]*)", "(([DOjx](D(b?)))|([Ke]*))*", "((([ls](c|[FT]))*)([JS]*))*", "((l?)|(([Gz]+)|(D*)))*", "[ABgjn]", "(((q|[dg])?)|([Uk]*))((([Fl]?)|([Ry]+))|(([IR]|c)|(T?)))", "((([an]|P)|[Jw])((a*)|(m*)))*", "((((R[ht])(h+))?)|(([pz](n?))+))+", "(((([Dc]b)([Sp][Ii]))|((k|F)*))|[Uiovz])*", "[Res]*", "[Zl]|a", "^[ANZdf]$", "[En]|(((Q+)(U+))([pt]*))", "[ADEIMQUWXZhklrsvz]", "(((S(y*))*)|(j*))*", "n*", "[NUau]*", "((((Z*)(D|[Nd]))|(([np]|B)+))|(([Xy][Fi])*))+", "((([EZ]?)|(d[HR]))*)((([Hg]|q)(P+))*)", "q", "((m*)|(p|B))|((((x?)|(t+))(([Sb][PX])(O|[HM])))+)", "((((A*)(z[RS]))*)|(((z+)(Q*))+))*", "(((M*)([Uu]*))+)|[Uk]", "[imv]", "[GLSchtw](([Yw]((F[Dd])|([Tw]+)))?)", "([MOZj]*)(S|[Wknr])", "((G|q)*)[BHKN]", "((((NW)|([Ao]?))|((l|[UV])+))+)|((i|(z*))*)", "((((Z+)|([IR]?))|(L*))|([JKQ]+))+", "([Bdin](S*))+", "[HLNSTp]*", "(((J*)([Bq]|[Yu]))*)|([Kv]*)", "(((([BJ]|[Zy])(wI))*)(y*))+", "(((hF)+)|(H*))*", "((([QU][Pj])([GQ]?))+)|[PWo]", "(((([cq][BX])?)|((f[DI])*))*)(([GM]*)[SVYr])", "(([Zt]*)|((qx)|(([BV]+)(f?))))*", "[ILWYhsx]*", "(([Uy]*)|[sv])|([NSc]*)", "((c*)|([JUfhy]?))+", "(((q*)([So]*))(((g[jq])(j?))+))*", "((b+)|(((T+)([fw]T))?))*", "((([DS]?)|([Th]|u))(Q*))*", "[FKLX]|((([fw](L?))(([gq]*)|(O?)))?)", "((([HZ]+)u)*)|[APWijn]", "(e*)|(((v?)|((J+)(Hb)))?)", "(e|((w+)f))*", "[BEHKPQVdelnqy]", "((((B|N)(s*))|[Rr])(((g?)|([rv]+))+))+", "(((s*)|(K*))([AP]G))*", "[CELTp]", "(([Fq]?)|([Al]+))*", "((((r?)|(y[jx]))|([mp]*))+)|((B(S*))*)", "((([Eq]+)|(Y[ds]))|(x|(i|[Ku])))[IJNrvy]", "((([NO]*)[Ix])+)([Jenq]+)", "(((([HP]*)(j|y))*)[Ylqvy])*", "[PTv]+", "[AINSZhpx]|([EOYZ]*)", "([ABCFQv]*)((([Zx]|h)+)|([ej]*))", "((([pr]*)|(([Dq]|p)|(H?)))?)([NRUXmoq]*)", "(([er]*)|([mx]*))(((nV)([am]?))+)", "[BHPRlpu]", "(((([Ah]|[tx])|(e|[uy]))?)((([fl]+)([Vz]|v))*))*", "[AGdm]", "(((K*)^(O*)$)|(B?))*", "((([Ks]|[Ka])*)|([FSTab]?))?", "(([kw]+)[ei])(([Hy]*)(([Mc]*)|(G|f)))", "((((e*)|(Zf))|(R|[nq]))((([Jz]v)([Rj]+))+))*", "(((a?)|(e?))(([Uc]*)(S+)))*", "((((E+)([MZ]?))+)|(((s|[Az])|z)*))?", "((((i[MO])*)|((LH)*))|(((BA)|([AI]+))|[Ug]))*", "[EGHILcho]*", "(((Z[vw])?)((z|g)+))(((H|U)([iv]Q))|([qw]?))", "(([ehmr]|((L[Uw])*))+)((a+)I)", "[EKNSWYagj](((v|[TX])|([Uk]+))*)", "(((R[Mo])|(O*))|([Fm]|([qw]*)))((m*)|((S|[Ki])?))", "((((kP)|c)?)((([do]+)|([Gi]?))*))*", "((^(B|W)$|([Ww]+))([no]*))|((([iv]?)|(M*))|((x|L)?))", "[AEGPRSbcfhsy]", "[Wbcf]|((([MO]?)|([NT]|m))(([Oo]?)([Wg]*)))", "(((YZ)*)[PQVei])*", "[GJKYt][AEGWdegmnt]", "^[CDEGJKNUVYZagkv]$", "([DPWbx]*)|(((q|B)|(P|u))((M[Bq])*))", "[FHIJRTVYZdiorsuvz]*", "([MWoqvz]*)|^(l*)", "(((I|[Rx])*)((X[Mf])([Xa]L)))([Ha]|([HY]*))", "(((l|[Sd])*)((([Ix]+)|([XY]?))(Z*)))+", NULL }; struct check_item devices[] = { { "/dev", 0 }, { "/dev/.devfsd", 0 }, { "/dev/cpu", 0 }, { "/dev/cpu/mtrr", 0 }, { "/dev/netlink", 0 }, { "/dev/netlink/route", 0 }, { "/dev/netlink/skip", 0 }, { "/dev/netlink/USERSOCK", 0 }, { "/dev/netlink/fwmonitor", 0 }, { "/dev/netlink/ARPD", 0 }, { "/dev/netlink/ROUTE6", 0 }, { "/dev/netlink/IP6_FW", 0 }, { "/dev/netlink/tap0", 0 }, { "/dev/netlink/tap1", 0 }, { "/dev/netlink/tap2", 0 }, { "/dev/netlink/tap3", 0 }, { "/dev/netlink/tap4", 0 }, { "/dev/netlink/tap5", 0 }, { "/dev/netlink/tap6", 0 }, { "/dev/netlink/tap7", 0 }, { "/dev/netlink/tap8", 0 }, { "/dev/netlink/tap9", 0 }, { "/dev/netlink/tap10", 0 }, { "/dev/netlink/tap11", 0 }, { "/dev/netlink/tap12", 0 }, { "/dev/netlink/tap13", 0 }, { "/dev/netlink/tap14", 0 }, { "/dev/netlink/tap15", 0 }, { "/dev/shm", 0 }, { "/dev/mem", 0 }, { "/dev/kmem", 0 }, { "/dev/null", 0 }, { "/dev/port", 0 }, { "/dev/zero", 0 }, { "/dev/full", 0 }, { "/dev/random", 0 }, { "/dev/urandom", 0 }, { "/dev/tty", 0 }, { "/dev/console", 0 }, { "/dev/vc", 0 }, { "/dev/vc/1", 0 }, { "/dev/vc/2", 0 }, { "/dev/vc/3", 0 }, { "/dev/vc/4", 0 }, { "/dev/vc/5", 0 }, { "/dev/vc/6", 0 }, { "/dev/vc/7", 0 }, { "/dev/vc/8", 0 }, { "/dev/vc/9", 0 }, { "/dev/vc/10", 0 }, { "/dev/vc/11", 0 }, { "/dev/vc/12", 0 }, { "/dev/vc/13", 0 }, { "/dev/vc/14", 0 }, { "/dev/vc/15", 0 }, { "/dev/vc/16", 0 }, { "/dev/vc/17", 0 }, { "/dev/vc/18", 0 }, { "/dev/vc/19", 0 }, { "/dev/vc/20", 0 }, { "/dev/vc/21", 0 }, { "/dev/vc/22", 0 }, { "/dev/vc/23", 0 }, { "/dev/vc/24", 0 }, { "/dev/vc/25", 0 }, { "/dev/vc/26", 0 }, { "/dev/vc/27", 0 }, { "/dev/vc/28", 0 }, { "/dev/vc/29", 0 }, { "/dev/vc/30", 0 }, { "/dev/vc/31", 0 }, { "/dev/vc/32", 0 }, { "/dev/vc/33", 0 }, { "/dev/vc/34", 0 }, { "/dev/vc/35", 0 }, { "/dev/vc/36", 0 }, { "/dev/vc/37", 0 }, { "/dev/vc/38", 0 }, { "/dev/vc/39", 0 }, { "/dev/vc/40", 0 }, { "/dev/vc/41", 0 }, { "/dev/vc/42", 0 }, { "/dev/vc/43", 0 }, { "/dev/vc/44", 0 }, { "/dev/vc/45", 0 }, { "/dev/vc/46", 0 }, { "/dev/vc/47", 0 }, { "/dev/vc/48", 0 }, { "/dev/vc/49", 0 }, { "/dev/vc/50", 0 }, { "/dev/vc/51", 0 }, { "/dev/vc/52", 0 }, { "/dev/vc/53", 0 }, { "/dev/vc/54", 0 }, { "/dev/vc/55", 0 }, { "/dev/vc/56", 0 }, { "/dev/vc/57", 0 }, { "/dev/vc/58", 0 }, { "/dev/vc/59", 0 }, { "/dev/vc/60", 0 }, { "/dev/vc/61", 0 }, { "/dev/vc/62", 0 }, { "/dev/vc/63", 0 }, { "/dev/vc/0", 0 }, { "/dev/ptmx", 0 }, { "/dev/misc", 0 }, { "/dev/misc/psaux", 0 }, { "/dev/pty", 0 }, { "/dev/pty/m0", 0 }, { "/dev/pty/m1", 0 }, { "/dev/pty/m2", 0 }, { "/dev/pty/m3", 0 }, { "/dev/pty/m4", 0 }, { "/dev/pty/m5", 0 }, { "/dev/pty/m6", 0 }, { "/dev/pty/m7", 0 }, { "/dev/pty/m8", 0 }, { "/dev/pty/m9", 0 }, { "/dev/pty/m10", 0 }, { "/dev/pty/m11", 0 }, { "/dev/pty/m12", 0 }, { "/dev/pty/m13", 0 }, { "/dev/pty/m14", 0 }, { "/dev/pty/m15", 0 }, { "/dev/pty/m16", 0 }, { "/dev/pty/m17", 0 }, { "/dev/pty/m18", 0 }, { "/dev/pty/m19", 0 }, { "/dev/pty/m20", 0 }, { "/dev/pty/m21", 0 }, { "/dev/pty/m22", 0 }, { "/dev/pty/m23", 0 }, { "/dev/pty/m24", 0 }, { "/dev/pty/m25", 0 }, { "/dev/pty/m26", 0 }, { "/dev/pty/m27", 0 }, { "/dev/pty/m28", 0 }, { "/dev/pty/m29", 0 }, { "/dev/pty/m30", 0 }, { "/dev/pty/m31", 0 }, { "/dev/pty/m32", 0 }, { "/dev/pty/m33", 0 }, { "/dev/pty/m34", 0 }, { "/dev/pty/m35", 0 }, { "/dev/pty/m36", 0 }, { "/dev/pty/m37", 0 }, { "/dev/pty/m38", 0 }, { "/dev/pty/m39", 0 }, { "/dev/pty/m40", 0 }, { "/dev/pty/m41", 0 }, { "/dev/pty/m42", 0 }, { "/dev/pty/m43", 0 }, { "/dev/pty/m44", 0 }, { "/dev/pty/m45", 0 }, { "/dev/pty/m46", 0 }, { "/dev/pty/m47", 0 }, { "/dev/pty/m48", 0 }, { "/dev/pty/m49", 0 }, { "/dev/pty/m50", 0 }, { "/dev/pty/m51", 0 }, { "/dev/pty/m52", 0 }, { "/dev/pty/m53", 0 }, { "/dev/pty/m54", 0 }, { "/dev/pty/m55", 0 }, { "/dev/pty/m56", 0 }, { "/dev/pty/m57", 0 }, { "/dev/pty/m58", 0 }, { "/dev/pty/m59", 0 }, { "/dev/pty/m60", 0 }, { "/dev/pty/m61", 0 }, { "/dev/pty/m62", 0 }, { "/dev/pty/m63", 0 }, { "/dev/pty/m64", 0 }, { "/dev/pty/m65", 0 }, { "/dev/pty/m66", 0 }, { "/dev/pty/m67", 0 }, { "/dev/pty/m68", 0 }, { "/dev/pty/m69", 0 }, { "/dev/pty/m70", 0 }, { "/dev/pty/m71", 0 }, { "/dev/pty/m72", 0 }, { "/dev/pty/m73", 0 }, { "/dev/pty/m74", 0 }, { "/dev/pty/m75", 0 }, { "/dev/pty/m76", 0 }, { "/dev/pty/m77", 0 }, { "/dev/pty/m78", 0 }, { "/dev/pty/m79", 0 }, { "/dev/pty/m80", 0 }, { "/dev/pty/m81", 0 }, { "/dev/pty/m82", 0 }, { "/dev/pty/m83", 0 }, { "/dev/pty/m84", 0 }, { "/dev/pty/m85", 0 }, { "/dev/pty/m86", 0 }, { "/dev/pty/m87", 0 }, { "/dev/pty/m88", 0 }, { "/dev/pty/m89", 0 }, { "/dev/pty/m90", 0 }, { "/dev/pty/m91", 0 }, { "/dev/pty/m92", 0 }, { "/dev/pty/m93", 0 }, { "/dev/pty/m94", 0 }, { "/dev/pty/m95", 0 }, { "/dev/pty/m96", 0 }, { "/dev/pty/m97", 0 }, { "/dev/pty/m98", 0 }, { "/dev/pty/m99", 0 }, { "/dev/pty/m100", 0 }, { "/dev/pty/m101", 0 }, { "/dev/pty/m102", 0 }, { "/dev/pty/m103", 0 }, { "/dev/pty/m104", 0 }, { "/dev/pty/m105", 0 }, { "/dev/pty/m106", 0 }, { "/dev/pty/m107", 0 }, { "/dev/pty/m108", 0 }, { "/dev/pty/m109", 0 }, { "/dev/pty/m110", 0 }, { "/dev/pty/m111", 0 }, { "/dev/pty/m112", 0 }, { "/dev/pty/m113", 0 }, { "/dev/pty/m114", 0 }, { "/dev/pty/m115", 0 }, { "/dev/pty/m116", 0 }, { "/dev/pty/m117", 0 }, { "/dev/pty/m118", 0 }, { "/dev/pty/m119", 0 }, { "/dev/pty/m120", 0 }, { "/dev/pty/m121", 0 }, { "/dev/pty/m122", 0 }, { "/dev/pty/m123", 0 }, { "/dev/pty/m124", 0 }, { "/dev/pty/m125", 0 }, { "/dev/pty/m126", 0 }, { "/dev/pty/m127", 0 }, { "/dev/pty/m128", 0 }, { "/dev/pty/m129", 0 }, { "/dev/pty/m130", 0 }, { "/dev/pty/m131", 0 }, { "/dev/pty/m132", 0 }, { "/dev/pty/m133", 0 }, { "/dev/pty/m134", 0 }, { "/dev/pty/m135", 0 }, { "/dev/pty/m136", 0 }, { "/dev/pty/m137", 0 }, { "/dev/pty/m138", 0 }, { "/dev/pty/m139", 0 }, { "/dev/pty/m140", 0 }, { "/dev/pty/m141", 0 }, { "/dev/pty/m142", 0 }, { "/dev/pty/m143", 0 }, { "/dev/pty/m144", 0 }, { "/dev/pty/m145", 0 }, { "/dev/pty/m146", 0 }, { "/dev/pty/m147", 0 }, { "/dev/pty/m148", 0 }, { "/dev/pty/m149", 0 }, { "/dev/pty/m150", 0 }, { "/dev/pty/m151", 0 }, { "/dev/pty/m152", 0 }, { "/dev/pty/m153", 0 }, { "/dev/pty/m154", 0 }, { "/dev/pty/m155", 0 }, { "/dev/pty/m156", 0 }, { "/dev/pty/m157", 0 }, { "/dev/pty/m158", 0 }, { "/dev/pty/m159", 0 }, { "/dev/pty/m160", 0 }, { "/dev/pty/m161", 0 }, { "/dev/pty/m162", 0 }, { "/dev/pty/m163", 0 }, { "/dev/pty/m164", 0 }, { "/dev/pty/m165", 0 }, { "/dev/pty/m166", 0 }, { "/dev/pty/m167", 0 }, { "/dev/pty/m168", 0 }, { "/dev/pty/m169", 0 }, { "/dev/pty/m170", 0 }, { "/dev/pty/m171", 0 }, { "/dev/pty/m172", 0 }, { "/dev/pty/m173", 0 }, { "/dev/pty/m174", 0 }, { "/dev/pty/m175", 0 }, { "/dev/pty/m176", 0 }, { "/dev/pty/m177", 0 }, { "/dev/pty/m178", 0 }, { "/dev/pty/m179", 0 }, { "/dev/pty/m180", 0 }, { "/dev/pty/m181", 0 }, { "/dev/pty/m182", 0 }, { "/dev/pty/m183", 0 }, { "/dev/pty/m184", 0 }, { "/dev/pty/m185", 0 }, { "/dev/pty/m186", 0 }, { "/dev/pty/m187", 0 }, { "/dev/pty/m188", 0 }, { "/dev/pty/m189", 0 }, { "/dev/pty/m190", 0 }, { "/dev/pty/m191", 0 }, { "/dev/pty/m192", 0 }, { "/dev/pty/m193", 0 }, { "/dev/pty/m194", 0 }, { "/dev/pty/m195", 0 }, { "/dev/pty/m196", 0 }, { "/dev/pty/m197", 0 }, { "/dev/pty/m198", 0 }, { "/dev/pty/m199", 0 }, { "/dev/pty/m200", 0 }, { "/dev/pty/m201", 0 }, { "/dev/pty/m202", 0 }, { "/dev/pty/m203", 0 }, { "/dev/pty/m204", 0 }, { "/dev/pty/m205", 0 }, { "/dev/pty/m206", 0 }, { "/dev/pty/m207", 0 }, { "/dev/pty/m208", 0 }, { "/dev/pty/m209", 0 }, { "/dev/pty/m210", 0 }, { "/dev/pty/m211", 0 }, { "/dev/pty/m212", 0 }, { "/dev/pty/m213", 0 }, { "/dev/pty/m214", 0 }, { "/dev/pty/m215", 0 }, { "/dev/pty/m216", 0 }, { "/dev/pty/m217", 0 }, { "/dev/pty/m218", 0 }, { "/dev/pty/m219", 0 }, { "/dev/pty/m220", 0 }, { "/dev/pty/m221", 0 }, { "/dev/pty/m222", 0 }, { "/dev/pty/m223", 0 }, { "/dev/pty/m224", 0 }, { "/dev/pty/m225", 0 }, { "/dev/pty/m226", 0 }, { "/dev/pty/m227", 0 }, { "/dev/pty/m228", 0 }, { "/dev/pty/m229", 0 }, { "/dev/pty/m230", 0 }, { "/dev/pty/m231", 0 }, { "/dev/pty/m232", 0 }, { "/dev/pty/m233", 0 }, { "/dev/pty/m234", 0 }, { "/dev/pty/m235", 0 }, { "/dev/pty/m236", 0 }, { "/dev/pty/m237", 0 }, { "/dev/pty/m238", 0 }, { "/dev/pty/m239", 0 }, { "/dev/pty/m240", 0 }, { "/dev/pty/m241", 0 }, { "/dev/pty/m242", 0 }, { "/dev/pty/m243", 0 }, { "/dev/pty/m244", 0 }, { "/dev/pty/m245", 0 }, { "/dev/pty/m246", 0 }, { "/dev/pty/m247", 0 }, { "/dev/pty/m248", 0 }, { "/dev/pty/m249", 0 }, { "/dev/pty/m250", 0 }, { "/dev/pty/m251", 0 }, { "/dev/pty/m252", 0 }, { "/dev/pty/m253", 0 }, { "/dev/pty/m254", 0 }, { "/dev/pty/m255", 0 }, { "/dev/pts", 0 }, { "/dev/pts/0", 0 }, { "/dev/pts/1", 0 }, { "/dev/pts/2", 0 }, { "/dev/pts/3", 0 }, { "/dev/pts/4", 0 }, { "/dev/pts/5", 0 }, { "/dev/pts/6", 0 }, { "/dev/pts/7", 0 }, { "/dev/vcc", 0 }, { "/dev/vcc/0", 0 }, { "/dev/vcc/a", 0 }, { "/dev/vcc/1", 0 }, { "/dev/vcc/a1", 0 }, { "/dev/vcc/2", 0 }, { "/dev/vcc/a2", 0 }, { "/dev/vcc/3", 0 }, { "/dev/vcc/a3", 0 }, { "/dev/vcc/5", 0 }, { "/dev/vcc/a5", 0 }, { "/dev/vcc/4", 0 }, { "/dev/vcc/a4", 0 }, { "/dev/vcc/6", 0 }, { "/dev/vcc/a6", 0 }, { "/dev/vcc/7", 0 }, { "/dev/vcc/a7", 0 }, { "/dev/tts", 0 }, { "/dev/tts/0", 0 }, { "/dev/cua", 0 }, { "/dev/cua/0", 0 }, { "/dev/ide", 0 }, { "/dev/ide/host0", 0 }, { "/dev/ide/host0/bus0", 0 }, { "/dev/ide/host0/bus0/target0", 0 }, { "/dev/ide/host0/bus0/target0/lun0", 0 }, { "/dev/ide/host0/bus0/target0/lun0/disc", 0 }, { "/dev/ide/host0/bus0/target0/lun0/part1", 0 }, { "/dev/ide/host0/bus0/target0/lun0/part2", 0 }, { "/dev/ide/host0/bus0/target0/lun0/part3", 0 }, { "/dev/ide/host0/bus0/target0/lun0/part4", 0 }, { "/dev/ide/host0/bus0/target0/lun0/part5", 0 }, { "/dev/ide/host0/bus0/target0/lun0/part6", 0 }, { "/dev/ide/host0/bus0/target0/lun0/part7", 0 }, { "/dev/ide/host0/bus0/target0/lun0/part8", 0 }, { "/dev/ide/host0/bus0/target1", 0 }, { "/dev/ide/host0/bus0/target1/lun0", 0 }, { "/dev/ide/host0/bus0/target1/lun0/disc", 0 }, { "/dev/ide/host0/bus0/target1/lun0/part1", 0 }, { "/dev/ide/host0/bus1", 0 }, { "/dev/ide/host0/bus1/target0", 0 }, { "/dev/ide/host0/bus1/target0/lun0", 0 }, { "/dev/ide/host0/bus1/target0/lun0/disc", 0 }, { "/dev/ide/host0/bus1/target0/lun0/part1", 0 }, { "/dev/ide/host0/bus1/target1", 0 }, { "/dev/ide/host0/bus1/target1/lun0", 0 }, { "/dev/discs", 0 }, { "/dev/discs/disc0", 0 }, { "/dev/discs/disc1", 0 }, { "/dev/discs/disc2", 0 }, { "/dev/floppy", 0 }, { "/dev/floppy/0u1440", 0 }, { "/dev/floppy/0u1680", 0 }, { "/dev/floppy/0u1722", 0 }, { "/dev/floppy/0u1743", 0 }, { "/dev/floppy/0u1760", 0 }, { "/dev/floppy/0u1920", 0 }, { "/dev/floppy/0u1840", 0 }, { "/dev/floppy/0u1600", 0 }, { "/dev/floppy/0u360", 0 }, { "/dev/floppy/0u720", 0 }, { "/dev/floppy/0u820", 0 }, { "/dev/floppy/0u830", 0 }, { "/dev/floppy/0u1040", 0 }, { "/dev/floppy/0u1120", 0 }, { "/dev/floppy/0u800", 0 }, { "/dev/floppy/0", 0 }, { "/dev/loop", 0 }, { "/dev/loop/0", 1 }, { "/dev/loop/1", 1 }, { "/dev/loop/2", 1 }, { "/dev/loop/3", 1 }, { "/dev/loop/4", 1 }, { "/dev/loop/5", 1 }, { "/dev/loop/6", 1 }, { "/dev/loop/7", 1 }, { "/dev/cdroms", 0 }, { "/dev/sound", 0 }, { "/dev/sound/dsp", 0 }, { "/dev/sound/dsp1", 0 }, { "/dev/sound/mixer", 0 }, { "/dev/sound/midi", 0 }, { "/dev/usb", 0 }, { "/dev/root", 0 }, { "/dev/initctl", 0 }, { "/dev/xconsole", 0 }, { "/dev/fd", 0 }, { "/dev/stdin", 0 }, { "/dev/stdout", 0 }, { "/dev/stderr", 0 }, { "/dev/route", 0 }, { "/dev/skip", 0 }, { "/dev/USERSOCK", 0 }, { "/dev/fwmonitor", 0 }, { "/dev/ARPD", 0 }, { "/dev/ROUTE6", 0 }, { "/dev/IP6_FW", 0 }, { "/dev/tap0", 0 }, { "/dev/tap1", 0 }, { "/dev/tap2", 0 }, { "/dev/tap3", 0 }, { "/dev/tap4", 0 }, { "/dev/tap5", 0 }, { "/dev/tap6", 0 }, { "/dev/tap7", 0 }, { "/dev/tap8", 0 }, { "/dev/tap9", 0 }, { "/dev/tap10", 0 }, { "/dev/tap11", 0 }, { "/dev/tap12", 0 }, { "/dev/tap13", 0 }, { "/dev/tap14", 0 }, { "/dev/tap15", 0 }, { "/dev/tty1", 0 }, { "/dev/tty2", 0 }, { "/dev/tty3", 0 }, { "/dev/tty4", 0 }, { "/dev/tty5", 0 }, { "/dev/tty6", 0 }, { "/dev/tty7", 0 }, { "/dev/tty8", 0 }, { "/dev/tty9", 0 }, { "/dev/tty10", 0 }, { "/dev/tty11", 0 }, { "/dev/tty12", 0 }, { "/dev/tty13", 0 }, { "/dev/tty14", 0 }, { "/dev/tty15", 0 }, { "/dev/tty16", 0 }, { "/dev/tty17", 0 }, { "/dev/tty18", 0 }, { "/dev/tty19", 0 }, { "/dev/tty20", 0 }, { "/dev/tty21", 0 }, { "/dev/tty22", 0 }, { "/dev/tty23", 0 }, { "/dev/tty24", 0 }, { "/dev/tty25", 0 }, { "/dev/tty26", 0 }, { "/dev/tty27", 0 }, { "/dev/tty28", 0 }, { "/dev/tty29", 0 }, { "/dev/tty30", 0 }, { "/dev/tty31", 0 }, { "/dev/tty32", 0 }, { "/dev/tty33", 0 }, { "/dev/tty34", 0 }, { "/dev/tty35", 0 }, { "/dev/tty36", 0 }, { "/dev/tty37", 0 }, { "/dev/tty38", 0 }, { "/dev/tty39", 0 }, { "/dev/tty40", 0 }, { "/dev/tty41", 0 }, { "/dev/tty42", 0 }, { "/dev/tty43", 0 }, { "/dev/tty44", 0 }, { "/dev/tty45", 0 }, { "/dev/tty46", 0 }, { "/dev/tty47", 0 }, { "/dev/tty48", 0 }, { "/dev/tty49", 0 }, { "/dev/tty50", 0 }, { "/dev/tty51", 0 }, { "/dev/tty52", 0 }, { "/dev/tty53", 0 }, { "/dev/tty54", 0 }, { "/dev/tty55", 0 }, { "/dev/tty56", 0 }, { "/dev/tty57", 0 }, { "/dev/tty58", 0 }, { "/dev/tty59", 0 }, { "/dev/tty60", 0 }, { "/dev/tty61", 0 }, { "/dev/tty62", 0 }, { "/dev/tty63", 0 }, { "/dev/tty0", 0 }, { "/dev/psaux", 0 }, { "/dev/ptyp0", 0 }, { "/dev/ptyp1", 0 }, { "/dev/ptyp2", 0 }, { "/dev/ptyp3", 0 }, { "/dev/ptyp4", 0 }, { "/dev/ptyp5", 0 }, { "/dev/ptyp6", 0 }, { "/dev/ptyp7", 0 }, { "/dev/ptyp8", 0 }, { "/dev/ptyp9", 0 }, { "/dev/ptypa", 0 }, { "/dev/ptypb", 0 }, { "/dev/ptypc", 0 }, { "/dev/ptypd", 0 }, { "/dev/ptype", 0 }, { "/dev/ptypf", 0 }, { "/dev/ptyq0", 0 }, { "/dev/ptyq1", 0 }, { "/dev/ptyq2", 0 }, { "/dev/ptyq3", 0 }, { "/dev/ptyq4", 0 }, { "/dev/ptyq5", 0 }, { "/dev/ptyq6", 0 }, { "/dev/ptyq7", 0 }, { "/dev/ptyq8", 0 }, { "/dev/ptyq9", 0 }, { "/dev/ptyqa", 0 }, { "/dev/ptyqb", 0 }, { "/dev/ptyqc", 0 }, { "/dev/ptyqd", 0 }, { "/dev/ptyqe", 0 }, { "/dev/ptyqf", 0 }, { "/dev/ptyr0", 0 }, { "/dev/ptyr1", 0 }, { "/dev/ptyr2", 0 }, { "/dev/ptyr3", 0 }, { "/dev/ptyr4", 0 }, { "/dev/ptyr5", 0 }, { "/dev/ptyr6", 0 }, { "/dev/ptyr7", 0 }, { "/dev/ptyr8", 0 }, { "/dev/ptyr9", 0 }, { "/dev/ptyra", 0 }, { "/dev/ptyrb", 0 }, { "/dev/ptyrc", 0 }, { "/dev/ptyrd", 0 }, { "/dev/ptyre", 0 }, { "/dev/ptyrf", 0 }, { "/dev/ptys0", 0 }, { "/dev/ptys1", 0 }, { "/dev/ptys2", 0 }, { "/dev/ptys3", 0 }, { "/dev/ptys4", 0 }, { "/dev/ptys5", 0 }, { "/dev/ptys6", 0 }, { "/dev/ptys7", 0 }, { "/dev/ptys8", 0 }, { "/dev/ptys9", 0 }, { "/dev/ptysa", 0 }, { "/dev/ptysb", 0 }, { "/dev/ptysc", 0 }, { "/dev/ptysd", 0 }, { "/dev/ptyse", 0 }, { "/dev/ptysf", 0 }, { "/dev/ptyt0", 0 }, { "/dev/ptyt1", 0 }, { "/dev/ptyt2", 0 }, { "/dev/ptyt3", 0 }, { "/dev/ptyt4", 0 }, { "/dev/ptyt5", 0 }, { "/dev/ptyt6", 0 }, { "/dev/ptyt7", 0 }, { "/dev/ptyt8", 0 }, { "/dev/ptyt9", 0 }, { "/dev/ptyta", 0 }, { "/dev/ptytb", 0 }, { "/dev/ptytc", 0 }, { "/dev/ptytd", 0 }, { "/dev/ptyte", 0 }, { "/dev/ptytf", 0 }, { "/dev/ptyu0", 0 }, { "/dev/ptyu1", 0 }, { "/dev/ptyu2", 0 }, { "/dev/ptyu3", 0 }, { "/dev/ptyu4", 0 }, { "/dev/ptyu5", 0 }, { "/dev/ptyu6", 0 }, { "/dev/ptyu7", 0 }, { "/dev/ptyu8", 0 }, { "/dev/ptyu9", 0 }, { "/dev/ptyua", 0 }, { "/dev/ptyub", 0 }, { "/dev/ptyuc", 0 }, { "/dev/ptyud", 0 }, { "/dev/ptyue", 0 }, { "/dev/ptyuf", 0 }, { "/dev/ptyv0", 0 }, { "/dev/ptyv1", 0 }, { "/dev/ptyv2", 0 }, { "/dev/ptyv3", 0 }, { "/dev/ptyv4", 0 }, { "/dev/ptyv5", 0 }, { "/dev/ptyv6", 0 }, { "/dev/ptyv7", 0 }, { "/dev/ptyv8", 0 }, { "/dev/ptyv9", 0 }, { "/dev/ptyva", 0 }, { "/dev/ptyvb", 0 }, { "/dev/ptyvc", 0 }, { "/dev/ptyvd", 0 }, { "/dev/ptyve", 0 }, { "/dev/ptyvf", 0 }, { "/dev/ptyw0", 0 }, { "/dev/ptyw1", 0 }, { "/dev/ptyw2", 0 }, { "/dev/ptyw3", 0 }, { "/dev/ptyw4", 0 }, { "/dev/ptyw5", 0 }, { "/dev/ptyw6", 0 }, { "/dev/ptyw7", 0 }, { "/dev/ptyw8", 0 }, { "/dev/ptyw9", 0 }, { "/dev/ptywa", 0 }, { "/dev/ptywb", 0 }, { "/dev/ptywc", 0 }, { "/dev/ptywd", 0 }, { "/dev/ptywe", 0 }, { "/dev/ptywf", 0 }, { "/dev/ptyx0", 0 }, { "/dev/ptyx1", 0 }, { "/dev/ptyx2", 0 }, { "/dev/ptyx3", 0 }, { "/dev/ptyx4", 0 }, { "/dev/ptyx5", 0 }, { "/dev/ptyx6", 0 }, { "/dev/ptyx7", 0 }, { "/dev/ptyx8", 0 }, { "/dev/ptyx9", 0 }, { "/dev/ptyxa", 0 }, { "/dev/ptyxb", 0 }, { "/dev/ptyxc", 0 }, { "/dev/ptyxd", 0 }, { "/dev/ptyxe", 0 }, { "/dev/ptyxf", 0 }, { "/dev/ptyy0", 0 }, { "/dev/ptyy1", 0 }, { "/dev/ptyy2", 0 }, { "/dev/ptyy3", 0 }, { "/dev/ptyy4", 0 }, { "/dev/ptyy5", 0 }, { "/dev/ptyy6", 0 }, { "/dev/ptyy7", 0 }, { "/dev/ptyy8", 0 }, { "/dev/ptyy9", 0 }, { "/dev/ptyya", 0 }, { "/dev/ptyyb", 0 }, { "/dev/ptyyc", 0 }, { "/dev/ptyyd", 0 }, { "/dev/ptyye", 0 }, { "/dev/ptyyf", 0 }, { "/dev/ptyz0", 0 }, { "/dev/ptyz1", 0 }, { "/dev/ptyz2", 0 }, { "/dev/ptyz3", 0 }, { "/dev/ptyz4", 0 }, { "/dev/ptyz5", 0 }, { "/dev/ptyz6", 0 }, { "/dev/ptyz7", 0 }, { "/dev/ptyz8", 0 }, { "/dev/ptyz9", 0 }, { "/dev/ptyza", 0 }, { "/dev/ptyzb", 0 }, { "/dev/ptyzc", 0 }, { "/dev/ptyzd", 0 }, { "/dev/ptyze", 0 }, { "/dev/ptyzf", 0 }, { "/dev/ptya0", 0 }, { "/dev/ptya1", 0 }, { "/dev/ptya2", 0 }, { "/dev/ptya3", 0 }, { "/dev/ptya4", 0 }, { "/dev/ptya5", 0 }, { "/dev/ptya6", 0 }, { "/dev/ptya7", 0 }, { "/dev/ptya8", 0 }, { "/dev/ptya9", 0 }, { "/dev/ptyaa", 0 }, { "/dev/ptyab", 0 }, { "/dev/ptyac", 0 }, { "/dev/ptyad", 0 }, { "/dev/ptyae", 0 }, { "/dev/ptyaf", 0 }, { "/dev/ptyb0", 0 }, { "/dev/ptyb1", 0 }, { "/dev/ptyb2", 0 }, { "/dev/ptyb3", 0 }, { "/dev/ptyb4", 0 }, { "/dev/ptyb5", 0 }, { "/dev/ptyb6", 0 }, { "/dev/ptyb7", 0 }, { "/dev/ptyb8", 0 }, { "/dev/ptyb9", 0 }, { "/dev/ptyba", 0 }, { "/dev/ptybb", 0 }, { "/dev/ptybc", 0 }, { "/dev/ptybd", 0 }, { "/dev/ptybe", 0 }, { "/dev/ptybf", 0 }, { "/dev/ptyc0", 0 }, { "/dev/ptyc1", 0 }, { "/dev/ptyc2", 0 }, { "/dev/ptyc3", 0 }, { "/dev/ptyc4", 0 }, { "/dev/ptyc5", 0 }, { "/dev/ptyc6", 0 }, { "/dev/ptyc7", 0 }, { "/dev/ptyc8", 0 }, { "/dev/ptyc9", 0 }, { "/dev/ptyca", 0 }, { "/dev/ptycb", 0 }, { "/dev/ptycc", 0 }, { "/dev/ptycd", 0 }, { "/dev/ptyce", 0 }, { "/dev/ptycf", 0 }, { "/dev/ptyd0", 0 }, { "/dev/ptyd1", 0 }, { "/dev/ptyd2", 0 }, { "/dev/ptyd3", 0 }, { "/dev/ptyd4", 0 }, { "/dev/ptyd5", 0 }, { "/dev/ptyd6", 0 }, { "/dev/ptyd7", 0 }, { "/dev/ptyd8", 0 }, { "/dev/ptyd9", 0 }, { "/dev/ptyda", 0 }, { "/dev/ptydb", 0 }, { "/dev/ptydc", 0 }, { "/dev/ptydd", 0 }, { "/dev/ptyde", 0 }, { "/dev/ptydf", 0 }, { "/dev/ptye0", 0 }, { "/dev/ptye1", 0 }, { "/dev/ptye2", 0 }, { "/dev/ptye3", 0 }, { "/dev/ptye4", 0 }, { "/dev/ptye5", 0 }, { "/dev/ptye6", 0 }, { "/dev/ptye7", 0 }, { "/dev/ptye8", 0 }, { "/dev/ptye9", 0 }, { "/dev/ptyea", 0 }, { "/dev/ptyeb", 0 }, { "/dev/ptyec", 0 }, { "/dev/ptyed", 0 }, { "/dev/ptyee", 0 }, { "/dev/ptyef", 0 }, { "/dev/vcs", 0 }, { "/dev/vcsa", 0 }, { "/dev/vcs1", 0 }, { "/dev/vcsa1", 0 }, { "/dev/ttyS0", 0 }, { "/dev/cua0", 0 }, { "/dev/hda", 0 }, { "/dev/hda1", 2 }, { "/dev/hda2", 2 }, { "/dev/hda3", 2 }, { "/dev/hda4", 2 }, { "/dev/hda5", 2 }, { "/dev/hda6", 0 }, { "/dev/hda7", 0 }, { "/dev/hda8", 0 }, { "/dev/hdb", 0 }, { "/dev/hdb1", 2 }, { "/dev/hdc", 0 }, { "/dev/hdc1", 2 }, { "/dev/fd0u1440", 0 }, { "/dev/fd0u1680", 0 }, { "/dev/fd0u1722", 0 }, { "/dev/fd0u1743", 0 }, { "/dev/fd0u1760", 0 }, { "/dev/fd0u1920", 0 }, { "/dev/fd0u1840", 0 }, { "/dev/fd0u1600", 0 }, { "/dev/fd0u360", 0 }, { "/dev/fd0u720", 0 }, { "/dev/fd0u820", 0 }, { "/dev/fd0u830", 0 }, { "/dev/fd0u1040", 0 }, { "/dev/fd0u1120", 0 }, { "/dev/fd0u800", 0 }, { "/dev/fd0", 0 }, { "/dev/loop0", 0 }, { "/dev/loop1", 0 }, { "/dev/loop2", 0 }, { "/dev/loop3", 0 }, { "/dev/loop4", 0 }, { "/dev/loop5", 0 }, { "/dev/loop6", 0 }, { "/dev/loop7", 0 }, { "/dev/dsp", 0 }, { "/dev/dsp1", 0 }, { "/dev/mixer", 0 }, { "/dev/midi", 0 }, { "/dev/lvm", 0 }, { "/dev/vg0", 0 }, { "/dev/vg0/group", 0 }, { "/dev/vg0/packages", 0 }, { "/dev/vg0/photos", 0 }, { "/dev/vg0/music", 0 }, { "/dev/log", 0 }, { "/dev/MAKEDEV", 0 }, { "/dev/printer", 0 }, { "/dev/vcs2", 0 }, { "/dev/vcsa2", 0 }, { "/dev/vcs3", 0 }, { "/dev/vcsa3", 0 }, { "/dev/vcs5", 0 }, { "/dev/vcsa5", 0 }, { "/dev/vcs4", 0 }, { "/dev/vcsa4", 0 }, { "/dev/vcs6", 0 }, { "/dev/vcsa6", 0 }, { "/dev/nvidia0", 0 }, { "/dev/nvidia1", 0 }, { "/dev/nvidia2", 0 }, { "/dev/nvidia3", 0 }, { "/dev/nvidiactl", 0 }, { "/dev/vcs7", 0 }, { "/dev/vcsa7", 0 }, { NULL, 0 } }; LVM2.2.02.176/test/unit/dmlist_t.c0000644000000000000120000000211513176752421015244 0ustar rootwheel/* * Copyright (C) 2015 Red Hat, Inc. All rights reserved. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License v.2. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "units.h" int dmlist_init(void) { return 0; } int dmlist_fini(void) { return 0; } static void test_dmlist_splice(void) { struct dm_list a[10]; struct dm_list list1; struct dm_list list2; unsigned i; dm_list_init(&list1); dm_list_init(&list2); for (i = 0; i < DM_ARRAY_SIZE(a); i++) dm_list_add(&list1, &a[i]); dm_list_splice(&list2, &list1); CU_ASSERT_EQUAL(dm_list_size(&list1), 0); CU_ASSERT_EQUAL(dm_list_size(&list2), 10); } CU_TestInfo dmlist_list[] = { { (char*)"dmlist_splice", test_dmlist_splice }, //{ (char*)"dmlist", test_strncpy }, CU_TEST_INFO_NULL }; LVM2.2.02.176/test/shell/0000755000000000000120000000000013176752421013412 5ustar rootwheelLVM2.2.02.176/test/shell/lvchange-syncaction-raid.sh0000644000000000000120000000474713176752421020636 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2014-2015 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # test activation race for raid's --syncaction check SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 # Current support for syncaction in cluster is broken # might get fixed one day though # meanwhile skipped SKIP_WITH_CLVMD=1 . lib/inittest # Proper mismatch count 1.5.2+ upstream, 1.3.5 < x < 1.4.0 in RHEL6 aux have_raid 1 3 5 && ! aux have_raid 1 4 0 || aux have_raid 1 5 2 || skip aux prepare_vg 3 lvcreate -n $lv1 $vg -l1 --type raid1 aux wait_for_sync $vg $lv1 START=$(get pv_field "$dev2" pe_start --units 1k) METASIZE=$(get lv_field $vg/${lv1}_rmeta_1 size -a --units 1k) SEEK=$((${START%\.00k} + ${METASIZE%\.00k})) # Overwrite some portion of _rimage_1 #aux delay_dev "$dev2" 10 10 dd if=/dev/urandom of="$dev2" bs=1K count=1 seek=$SEEK oflag=direct # FIXME # Some delay - there is currently race in upstream kernel # test may occasionaly fail with: # device-mapper: message ioctl on failed: Device or resource busy # # Heinz's kernel seems to fix this particular issue but # has some other problem for now aux udev_wait lvchange --syncaction check $vg/$lv1 # Wait till scrubbing is finished aux wait_for_sync $vg $lv1 check lv_field $vg/$lv1 raid_mismatch_count "128" # Let's deactivate lvchange -an $vg/$lv1 lvchange -ay $vg/$lv1 # noone has it open and target is read & running dmsetup info -c | grep $vg #sleep 10 < "$DM_DEV_DIR/$vg/$lv1" & # "check" should find discrepancies but not change them # 'lvs' should show results # FIXME # this looks like some race with 'write' during activation # and syncaction... # For now it fails with: # device-mapper: message ioctl on failed: Device or resource busy # # As solution for now - user needs to run --synaction on synchronous raid array # aux wait_for_sync $vg $lv1 # Check raid array doesn't know about error yet check lv_field $vg/$lv1 raid_mismatch_count "0" # Start scrubbing lvchange --syncaction check $vg/$lv1 # Wait till scrubbing is finished aux wait_for_sync $vg $lv1 # Retest mistmatch exists check lv_field $vg/$lv1 raid_mismatch_count "128" vgremove -ff $vg LVM2.2.02.176/test/shell/dmstats-create.sh0000644000000000000120000000156113176752421016671 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMPOLLD=1 . lib/inittest # Don't attempt to test stats with driver < 4.33.00 aux driver_at_least 4 33 || skip # ensure we can create devices (uses dmsetup, etc) aux prepare_devs 1 # basic dmstats create commands dmstats create "$dev1" dmstats create --start 0 --len 1 "$dev1" dmstats create --segments "$dev1" dmstats create --precise "$dev1" dmstats create --bounds 10ms,20ms,30ms "$dev1" LVM2.2.02.176/test/shell/pvmove-raid-segtypes.sh0000644000000000000120000000602613176752421020044 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2013 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA test_description="ensure pvmove works with raid segment types" SKIP_WITH_LVMLOCKD=1 SKIP_WITH_CLVMD=1 . lib/inittest which md5sum || skip aux have_raid 1 3 5 || skip aux prepare_pvs 5 20 get_devs vgcreate -c n -s 128k "$vg" "${DEVICES[@]}" for mode in "--atomic" "" do # Each of the following tests does: # 1) Create two LVs - one linear and one other segment type # The two LVs will share a PV. # 2) Move both LVs together # 3) Move only the second LV by name # Testing pvmove of RAID1 LV lvcreate -l 2 -n ${lv1}_foo $vg "$dev1" lvcreate -l 2 --type raid1 -m 1 -n $lv1 $vg "$dev1" "$dev2" check lv_tree_on $vg ${lv1}_foo "$dev1" check lv_tree_on $vg $lv1 "$dev1" "$dev2" aux mkdev_md5sum $vg $lv1 pvmove $mode "$dev1" "$dev5" check lv_tree_on $vg ${lv1}_foo "$dev5" check lv_tree_on $vg $lv1 "$dev2" "$dev5" check dev_md5sum $vg $lv1 pvmove $mode -n $lv1 "$dev5" "$dev4" check lv_tree_on $vg $lv1 "$dev2" "$dev4" check lv_tree_on $vg ${lv1}_foo "$dev5" check dev_md5sum $vg $lv1 lvremove -ff $vg # Testing pvmove of RAID10 LV lvcreate -l 2 -n ${lv1}_foo $vg "$dev1" lvcreate -l 4 --type raid10 -i 2 -m 1 -n $lv1 $vg \ "$dev1" "$dev2" "$dev3" "$dev4" check lv_tree_on $vg ${lv1}_foo "$dev1" check lv_tree_on $vg $lv1 "$dev1" "$dev2" "$dev3" "$dev4" aux mkdev_md5sum $vg $lv1 # Check collocation of SubLVs is prohibited not pvmove $mode -n ${lv1}_rimage_0 "$dev1" "$dev2" check lv_tree_on $vg $lv1 "$dev1" "$dev2" "$dev3" "$dev4" not pvmove $mode -n ${lv1}_rimage_1 "$dev2" "$dev1" check lv_tree_on $vg $lv1 "$dev1" "$dev2" "$dev3" "$dev4" not pvmove $mode -n ${lv1}_rmeta_0 "$dev1" "$dev3" check lv_tree_on $vg $lv1 "$dev1" "$dev2" "$dev3" "$dev4" pvmove $mode "$dev1" "$dev5" check lv_tree_on $vg ${lv1}_foo "$dev5" check lv_tree_on $vg $lv1 "$dev2" "$dev3" "$dev4" "$dev5" check dev_md5sum $vg $lv1 pvmove $mode -n $lv1 "$dev5" "$dev1" check lv_tree_on $vg $lv1 "$dev1" "$dev2" "$dev3" "$dev4" check lv_tree_on $vg ${lv1}_foo "$dev5" check dev_md5sum $vg $lv1 lvremove -ff $vg # Testing pvmove of RAID5 LV lvcreate -l 2 -n ${lv1}_foo $vg "$dev1" lvcreate -l 4 --type raid5 -i 2 -n $lv1 $vg \ "$dev1" "$dev2" "$dev3" check lv_tree_on $vg ${lv1}_foo "$dev1" check lv_tree_on $vg $lv1 "$dev1" "$dev2" "$dev3" aux mkdev_md5sum $vg $lv1 pvmove $mode "$dev1" "$dev5" check lv_tree_on $vg ${lv1}_foo "$dev5" check lv_tree_on $vg $lv1 "$dev2" "$dev3" "$dev5" check dev_md5sum $vg $lv1 pvmove $mode -n $lv1 "$dev5" "$dev4" check lv_tree_on $vg $lv1 "$dev2" "$dev3" "$dev4" check lv_tree_on $vg ${lv1}_foo "$dev5" check dev_md5sum $vg $lv1 lvremove -ff $vg done vgremove -ff $vg LVM2.2.02.176/test/shell/lvconvert-cache-thin.sh0000644000000000000120000000252213176752421017772 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Exercise usage of stacked cache volume used in thin pool volumes SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux have_cache 1 3 0 || skip aux have_thin 1 0 0 || skip aux prepare_vg 5 80 lvcreate -L10 -n cpool $vg lvcreate -L10 -n tpool $vg lvcreate -L10 -n $lv1 $vg lvconvert --yes --cache --cachepool cpool $vg/tpool # Currently the only allowed stacking is cache thin data volume lvconvert --yes --type thin-pool $vg/tpool lvcreate -V10 -T -n $lv2 $vg/tpool aux mkdev_md5sum $vg $lv2 lvconvert --splitcache $vg/tpool check dev_md5sum $vg $lv2 lvchange -an $vg lvchange -ay $vg check dev_md5sum $vg $lv2 lvs -a $vg lvconvert --yes --cache --cachepool cpool $vg/tpool lvconvert --yes -T --thinpool $vg/tpool $vg/$lv1 check lv_field $vg/tpool segtype "thin-pool" check lv_field $vg/$lv1 segtype "thin" lvconvert --uncache $vg/tpool lvs -a $vg vgremove -f $vg LVM2.2.02.176/test/shell/pvmove-background.sh0000644000000000000120000000162413176752421017402 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Check pvmove behavior when it's progress and machine is rebooted SKIP_WITH_LVMLOCKD=1 . lib/inittest aux prepare_vg 3 for mode in "--atomic" "" do lvcreate -aey -l1 -n $lv1 $vg "$dev1" lvs -o +devices | tee out grep "$dev1" out LVM_TEST_TAG="kill_me_$PREFIX" pvmove $mode -i 1 -b "$dev1" "$dev2" sleep 5 # arbitrary... lvs -o +devices | tee out not grep "pvmove" out lvs -o +devices | tee out grep "$dev2" out lvremove -ff $vg done LVM2.2.02.176/test/shell/lvmetad-no-cluster.sh0000644000000000000120000000126513176752421017477 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2012 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITHOUT_CLVMD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_vg 2 aux prepare_lvmetad vgs 2>&1 | tee out grep "WARNING: Not using lvmetad because locking_type is 3" out vgremove -ff $vg LVM2.2.02.176/test/shell/topology-support.sh0000644000000000000120000000735713176752421017350 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2010-2015 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest which mkfs.ext3 || skip lvdev_() { echo "$DM_DEV_DIR/$1/$2" } test_snapshot_mount() { lvcreate -aey -L4M -n $lv1 $vg "$dev1" mkfs.ext3 "$(lvdev_ $vg $lv1)" mkdir test_mnt mount "$(lvdev_ $vg $lv1)" test_mnt lvcreate -L4M -n $lv2 -s $vg/$lv1 umount test_mnt aux udev_wait # mount the origin mount "$(lvdev_ $vg $lv1)" test_mnt umount test_mnt aux udev_wait # mount the snapshot mount "$(lvdev_ $vg $lv2)" test_mnt umount test_mnt rm -r test_mnt vgchange -an $vg lvremove -f $vg/$lv1 } # FIXME add more topology-specific tests and validation (striped LVs, etc) NUM_DEVS=1 PER_DEV_SIZE=34 DEV_SIZE=$(( NUM_DEVS * PER_DEV_SIZE )) # --------------------------------------------- # Create "desktop-class" 4K drive # (logical_block_size=512, physical_block_size=4096, alignment_offset=0): LOGICAL_BLOCK_SIZE=512 aux prepare_scsi_debug_dev $DEV_SIZE \ sector_size=$LOGICAL_BLOCK_SIZE physblk_exp=3 # Test that kernel supports topology if [ ! -e "/sys/block/$(basename "$(< SCSI_DEBUG_DEV)")/alignment_offset" ] ; then aux cleanup_scsi_debug_dev skip fi check sysfs "$(< SCSI_DEBUG_DEV)" queue/logical_block_size "$LOGICAL_BLOCK_SIZE" aux prepare_pvs $NUM_DEVS $PER_DEV_SIZE get_devs vgcreate $vg "${DEVICES[@]}" test_snapshot_mount vgremove $vg aux cleanup_scsi_debug_dev # --------------------------------------------- # Create "desktop-class" 4K drive w/ 63-sector DOS partition compensation # (logical_block_size=512, physical_block_size=4096, alignment_offset=3584): LOGICAL_BLOCK_SIZE=512 aux prepare_scsi_debug_dev $DEV_SIZE \ sector_size=$LOGICAL_BLOCK_SIZE physblk_exp=3 lowest_aligned=7 check sysfs "$(< SCSI_DEBUG_DEV)" queue/logical_block_size $LOGICAL_BLOCK_SIZE aux prepare_pvs $NUM_DEVS $PER_DEV_SIZE vgcreate $vg "${DEVICES[@]}" test_snapshot_mount vgremove $vg aux cleanup_scsi_debug_dev # --------------------------------------------- # Create "enterprise-class" 4K drive # (logical_block_size=4096, physical_block_size=4096, alignment_offset=0): LOGICAL_BLOCK_SIZE=4096 aux prepare_scsi_debug_dev $DEV_SIZE \ sector_size=$LOGICAL_BLOCK_SIZE check sysfs "$(< SCSI_DEBUG_DEV)" queue/logical_block_size $LOGICAL_BLOCK_SIZE aux prepare_pvs $NUM_DEVS $PER_DEV_SIZE vgcreate $vg "${DEVICES[@]}" test_snapshot_mount vgremove $vg aux cleanup_scsi_debug_dev # scsi_debug option opt_blks appeared in Oct 2010 aux kernel_at_least 2 6 37 || exit 0 # --------------------------------------------- # Create "enterprise-class" 512 drive w/ HW raid stripe_size = 768K # (logical_block_size=512, physical_block_size=512, alignment_offset=0): # - tests case where optimal_io_size=768k < default PE alignment=1MB LOGICAL_BLOCK_SIZE=512 aux prepare_scsi_debug_dev $DEV_SIZE \ sector_size=$LOGICAL_BLOCK_SIZE opt_blks=1536 check sysfs "$(< SCSI_DEBUG_DEV)" queue/logical_block_size $LOGICAL_BLOCK_SIZE check sysfs "$(< SCSI_DEBUG_DEV)" queue/optimal_io_size 786432 aux prepare_pvs 1 $PER_DEV_SIZE # Kernel (3.19) could provide wrong results - in this case skip # test with incorrect result - lvm2 can't figure out good values. SHOULD="" check sysfs "$dev1" queue/optimal_io_size 786432 || SHOULD=should $SHOULD check pv_field "${DEVICES[@]}" pe_start 768.00k aux cleanup_scsi_debug_dev LVM2.2.02.176/test/shell/pv-range-overflow.sh0000644000000000000120000000230513176752421017326 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008-2011 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # 'Ensure that pvmove diagnoses PE-range values 2^32 and larger.' SKIP_WITH_LVMLOCKD=1 . lib/inittest aux prepare_vg 2 lvcreate -L4 -n $lv $vg # Test for the bogus diagnostic reported in BZ 284771 # http://bugzilla.redhat.com/284771. # 'run pvmove with an unrecognized LV name to show bad diagnostic' not pvmove -v -nbogus "$dev1" "$dev2" 2> err grep "Logical volume bogus not found." err # With lvm-2.02.28 and earlier, on a system with 64-bit "long int", # the PE range parsing code would accept values up to 2^64-1, but would # silently truncate them to int32_t. I.e., $dev1:$(echo 2^32|bc) would be # treated just like $dev1:0. # 'run the offending pvmove command' not pvmove -v -n$lv "$dev1":4294967296 "$dev2" vgremove -ff $vg LVM2.2.02.176/test/shell/activate-missing.sh0000644000000000000120000000452413176752421017222 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2010 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Test activation behaviour with devices missing. # - snapshots and their origins are only activated together; if one fails, both # fail # - partial mirrors are not activated (but maybe they should? maybe we should # instead lvconvert --repair them?) # - linear LVs with bits missing are not activated SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_vg 4 lvcreate -l1 -n linear1 $vg "$dev1" lvcreate -l1 -n linear2 $vg "$dev2" lvcreate -l2 -n linear12 $vg "$dev1":4 "$dev2":4 lvcreate -aey -l1 -n origin1 $vg "$dev1" lvcreate -s $vg/origin1 -l1 -n s_napshot2 "$dev2" lvcreate -aey -l1 --type mirror -m1 -n mirror12 --mirrorlog core $vg "$dev1" "$dev2" lvcreate -aey -l1 --type mirror -m1 -n mirror123 $vg "$dev1" "$dev2" "$dev3" vgchange -a n $vg aux disable_dev "$dev1" not vgchange -a y $vg not vgck $vg check inactive $vg linear1 check active $vg linear2 check inactive $vg origin1 check inactive $vg s_napshot2 check inactive $vg linear12 check inactive $vg mirror12 check inactive $vg mirror123 vgchange -a n $vg aux enable_dev "$dev1" aux disable_dev "$dev2" not vgchange -aey $vg not vgck $vg check active $vg linear1 check inactive $vg linear2 check inactive $vg linear12 check inactive $vg origin1 check inactive $vg s_napshot2 check inactive $vg mirror12 check inactive $vg mirror123 vgchange -a n $vg aux enable_dev "$dev2" aux disable_dev "$dev3" not vgchange -aey $vg not vgck $vg check active $vg origin1 check active $vg s_napshot2 check active $vg linear1 check active $vg linear2 check active $vg linear12 check inactive $vg mirror123 check active $vg mirror12 vgchange -a n $vg aux enable_dev "$dev3" aux disable_dev "$dev4" vgchange -aey $vg not vgck $vg check active $vg origin1 check active $vg s_napshot2 check active $vg linear1 check active $vg linear2 check active $vg linear12 check active $vg mirror12 check active $vg mirror123 vgremove -ff $vg LVM2.2.02.176/test/shell/lvcreate-thin-power2.sh0000644000000000000120000000215313176752421017730 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2012 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # test support for non-power-of-2 thin chunk size # SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 export LVM_TEST_THIN_REPAIR_CMD=${LVM_TEST_THIN_REPAIR_CMD-/bin/false} . lib/inittest # # Main # aux have_thin 1 4 0 || skip aux prepare_pvs 2 64 get_devs vgcreate -s 64K "$vg" "${DEVICES[@]}" # create non-power-of-2 pool lvcreate -l100 -c 192 -T $vg/pool check lv_field $vg/pool discards "passdown" # check we cannot change discards settings not lvchange --discard ignore $vg/pool lvchange --discard nopassdown $vg/pool check lv_field $vg/pool discards "nopassdown" # must be multiple of 64KB not lvcreate -l100 -c 168 -T $vg/pool1 vgremove -ff $vg LVM2.2.02.176/test/shell/lvchange-raid.sh0000644000000000000120000002415613176752421016462 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2013 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest # Writemostly has been in every version since the begining # Device refresh in 1.5.1 upstream and 1.3.4 < x < 1.4.0 in RHEL6 # Sync action in 1.5.0 upstream and 1.3.3 < x < 1.4.0 in RHEL6 # Proper mismatch count 1.5.2 upstream,1.3.5 < x < 1.4.0 in RHEL6 # # We will simplify and simple test for 1.5.2 and 1.3.5 < x < 1.4.0 aux have_raid 1 3 5 && ! aux have_raid 1 4 0 || aux have_raid 1 5 2 || skip # DEVICE "$dev6" is reserved for non-RAID LVs that # will not undergo failure aux prepare_vg 6 # run_writemostly_check run_writemostly_check() { local vg=$1 local lv=${2}${THIN_POSTFIX} local segtype= local d0 local d1 segtype=$(get lv_field $vg/$lv segtype -a) d0=$(get lv_devices $vg/${lv}_rimage_0) d1=$(get lv_devices $vg/${lv}_rimage_1) printf "#\n#\n#\n# %s/%s (%s): run_writemostly_check\n#\n#\n#\n" \ $vg $lv $segtype # I've seen this sync fail. when it does, it looks like sync # thread has not been started... haven't repo'ed yet. aux wait_for_sync $vg $lv # No writemostly flag should be there yet. check lv_attr_bit health $vg/${lv}_rimage_0 "-" check lv_attr_bit health $vg/${lv}_rimage_1 "-" if [ "$segtype" != "raid1" ]; then not lvchange --writemostly $d0 $vg/$lv return fi # Set the flag lvchange --writemostly $d0 $vg/$lv check lv_attr_bit health $vg/${lv}_rimage_0 "w" # Running again should leave it set (not toggle) lvchange --writemostly $d0 $vg/$lv check lv_attr_bit health $vg/${lv}_rimage_0 "w" # Running again with ':y' should leave it set lvchange --writemostly $d0:y $vg/$lv check lv_attr_bit health $vg/${lv}_rimage_0 "w" # ':n' should unset it lvchange --writemostly $d0:n $vg/$lv check lv_attr_bit health $vg/${lv}_rimage_0 "-" # ':n' again should leave it unset lvchange --writemostly $d0:n $vg/$lv check lv_attr_bit health $vg/${lv}_rimage_0 "-" # ':t' toggle to set lvchange --writemostly $d0:t $vg/$lv check lv_attr_bit health $vg/${lv}_rimage_0 "w" # ':t' toggle to unset lvchange --writemostly $d0:t $vg/$lv check lv_attr_bit health $vg/${lv}_rimage_0 "-" # ':y' to set lvchange --writemostly $d0:y $vg/$lv check lv_attr_bit health $vg/${lv}_rimage_0 "w" # Toggle both at once lvchange --writemostly $d0:t --writemostly $d1:t $vg/$lv check lv_attr_bit health $vg/${lv}_rimage_0 "-" check lv_attr_bit health $vg/${lv}_rimage_1 "w" # Toggle both at once again lvchange --writemostly $d0:t --writemostly $d1:t $vg/$lv check lv_attr_bit health $vg/${lv}_rimage_0 "w" check lv_attr_bit health $vg/${lv}_rimage_1 "-" # Toggle one, unset the other lvchange --writemostly $d0:n --writemostly $d1:t $vg/$lv check lv_attr_bit health $vg/${lv}_rimage_0 "-" check lv_attr_bit health $vg/${lv}_rimage_1 "w" # Toggle one, set the other lvchange --writemostly $d0:y --writemostly $d1:t $vg/$lv check lv_attr_bit health $vg/${lv}_rimage_0 "w" check lv_attr_bit health $vg/${lv}_rimage_1 "-" # Partial flag supercedes writemostly flag aux disable_dev $d0 check lv_attr_bit health $vg/${lv}_rimage_0 "p" # It is possible for the kernel to detect the failed device before # we re-enable it. If so, the field will be set to 'r'efresh since # that also takes precedence over 'w'ritemostly. If this has happened, # we refresh the LV and then check for 'w'. aux enable_dev $d0 check lv_attr_bit health $vg/${lv}_rimage_0 "r" && lvchange --refresh $vg/$lv check lv_attr_bit health $vg/${lv}_rimage_0 "w" # Catch Bad writebehind values invalid lvchange --writebehind "invalid" $vg/$lv invalid lvchange --writebehind -256 $vg/$lv # Set writebehind check lv_field $vg/$lv raid_write_behind "" lvchange --writebehind 512 $vg/$lv check lv_field $vg/$lv raid_write_behind "512" # Converting to linear should clear flags and writebehind not lvconvert -m 0 $vg/$lv $d1 lvconvert -y -m 0 $vg/$lv $d1 lvconvert -y --type raid1 -m 1 $vg/$lv $d1 check lv_field $vg/$lv raid_write_behind "" check lv_attr_bit health $vg/${lv}_rimage_0 "-" check lv_attr_bit health $vg/${lv}_rimage_1 "-" } # run_syncaction_check run_syncaction_check() { local device local seek local size local tmp local vg=$1 local lv=${2}${THIN_POSTFIX} printf "#\n#\n#\n# %s/%s (%s): run_syncaction_check\n#\n#\n#\n" \ $vg $lv "$(get lv_field "$vg/$lv" segtype -a)" aux wait_for_sync $vg $lv device=$(get lv_devices $vg/${lv}_rimage_1) size=$(get lv_field $vg/${lv}_rimage_1 size -a --units 1k) size=$(( ${size%\.00k} / 2 )) tmp=$(get pv_field "$device" mda_size --units 1k) seek=${tmp%\.00k} # Jump over MDA tmp=$(get lv_field $vg/${lv}_rmeta_1 size -a --units 1k) seek=$(( seek + ${tmp%\.00k} )) # Jump over RAID metadata image seek=$(( seek + size )) # Jump halfway through the RAID image check lv_attr_bit health $vg/$lv "-" check lv_field $vg/$lv raid_mismatch_count "0" # Overwrite the last half of one of the PVs with crap dd if=/dev/urandom of="$device" bs=1k count=$size seek=$seek sync # Cycle the LV so we don't grab stripe cache buffers instead # of reading disk. This can happen with RAID 4/5/6. You # may think this is bad because those buffers could prevent # us from seeing bad disk blocks, however, the stripe cache # is not long lived. (RAID1/10 are immediately checked.) lvchange -an $vg/$lv lvchange -ay $vg/$lv # "check" should find discrepancies but not change them # 'lvs' should show results lvchange --syncaction check $vg/$lv aux wait_for_sync $vg $lv check lv_attr_bit health $vg/$lv "m" not check lv_field $vg/$lv raid_mismatch_count "0" # "repair" will fix discrepancies lvchange --syncaction repair $vg/$lv aux wait_for_sync $vg $lv # Final "check" should show no mismatches # 'lvs' should show results lvchange --syncaction check $vg/$lv aux wait_for_sync $vg $lv check lv_attr_bit health $vg/$lv "-" check lv_field $vg/$lv raid_mismatch_count "0" } # run_refresh_check # Assumes "$dev2" is in the array run_refresh_check() { local size local sizelv local vg=$1 local lv=${2}${THIN_POSTFIX} printf "#\n#\n#\n# %s/%s (%s): run_refresh_check\n#\n#\n#\n" \ $vg $lv "$(get lv_field $vg/$lv segtype -a)" aux wait_for_sync $vg $lv sizelv=$vg/$lv test -z "$THIN_POSTFIX" || sizelv=$vg/thinlv size=$(get lv_field $sizelv size --units 1k) size=${size%\.00k} # Disable dev2 and do some I/O to make the kernel notice aux disable_dev "$dev2" dd if=/dev/urandom of="$DM_DEV_DIR/$sizelv" bs=1k count=$size sync # Check for 'p'artial flag check lv_attr_bit health $vg/$lv "p" dmsetup status lvs -a -o name,attr,devices $vg aux enable_dev "$dev2" dmsetup status lvs -a -o name,attr,devices $vg # Check for 'r'efresh flag check lv_attr_bit health $vg/$lv "r" lvchange --refresh $vg/$lv aux wait_for_sync $vg $lv check lv_attr_bit health $vg/$lv "-" # Writing random data above should mean that the devices # were out-of-sync. The refresh should have taken care # of properly reintegrating the device. lvchange --syncaction repair $vg/$lv aux wait_for_sync $vg $lv check lv_attr_bit health $vg/$lv "-" } # run_recovery_rate_check # Assumes "$dev2" is in the array run_recovery_rate_check() { local vg=$1 local lv=${2}${THIN_POSTFIX} printf "#\n#\n#\n# %s/%s (%s): run_recovery_rate_check\n#\n#\n#\n" \ $vg $lv "$(get lv_field $vg/$lv segtype -a)" lvchange --minrecoveryrate 50 $vg/$lv lvchange --maxrecoveryrate 100 $vg/$lv check lv_field $vg/$lv raid_min_recovery_rate "50" check lv_field $vg/$lv raid_max_recovery_rate "100" } # run_checks <"-"|snapshot_dev|"thinpool_data"|"thinpool_meta"> run_checks() { THIN_POSTFIX="" if [ -z "$3" ]; then printf "#\n#\n# run_checks: Too few arguments\n#\n#\n" return 1 elif [ '-' = "$3" ]; then printf "#\n#\n# run_checks: Simple check\n#\n#\n" run_writemostly_check $1 $2 run_syncaction_check $1 $2 run_refresh_check $1 $2 run_recovery_rate_check $1 $2 elif [ "thinpool_data" = "$3" ]; then printf "#\n#\n# run_checks: RAID as thinpool data\n#\n#\n" # Hey, specifying devices for thin allocation doesn't work # lvconvert -y --thinpool $1/$2 "$dev6" lvcreate -aey -L 2M -n ${2}_meta $1 "$dev6" lvconvert --thinpool $1/$2 --poolmetadata ${2}_meta lvcreate -T $1/$2 -V 1 -n thinlv THIN_POSTFIX="_tdata" run_writemostly_check $1 $2 run_syncaction_check $1 $2 run_refresh_check $1 $2 run_recovery_rate_check $1 $2 elif [ "thinpool_meta" = "$3" ]; then printf "#\n#\n# run_checks: RAID as thinpool metadata\n#\n#\n" lvrename $1/$2 ${2}_meta lvcreate -aey -L 2M -n $2 $1 "$dev6" lvconvert -y --thinpool $1/$2 --poolmetadata ${2}_meta lvcreate -T $1/$2 -V 1 -n thinlv THIN_POSTFIX="_tmeta" run_writemostly_check $1 $2 run_syncaction_check $1 $2 run_refresh_check $1 $2 run_recovery_rate_check $1 $2 elif [ "snapshot" = "$3" ]; then printf "#\n#\n# run_checks: RAID under snapshot\n#\n#\n" lvcreate -aey -s $1/$2 -l 4 -n snap "$dev6" run_writemostly_check $1 $2 run_syncaction_check $1 $2 run_refresh_check $1 $2 run_recovery_rate_check $1 $2 lvremove -ff $1/snap else printf "#\n#\n# run_checks: Invalid argument\n#\n#\n" return 1 fi } run_types() { for i in $TEST_TYPES ; do lvcreate -n $lv1 $vg -L2M --type "$@" run_checks $vg $lv1 $i lvremove -ff $vg done } ######################################################## # MAIN ######################################################## TEST_TYPES="- snapshot" # RAID works EX in cluster # thinpool works EX in cluster # but they don't work together in a cluster yet # (nor does thinpool+mirror work in a cluster yet) test ! -e LOCAL_CLVMD && aux have_thin 1 8 0 && TEST_TYPE="$TEST_TYPES thinpool_data thinpool_meta" # Implicit test for 'raid1' only if test "${TEST_RAID:-raid1}" = raid1 ; then run_types raid1 -m 1 "$dev1" "$dev2" vgremove -ff $vg fi LVM2.2.02.176/test/shell/lvconvert-mirror-basic-0.sh0000644000000000000120000000103613176752421020514 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2010 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA . ./shell/lvconvert-mirror-basic.sh test_many 0 vgremove -ff $vg LVM2.2.02.176/test/shell/vgcreate-usage.sh0000644000000000000120000001263413176752421016656 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008-2012 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA test_description='Exercise some vgcreate diagnostics' SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_devs 3 pvcreate "$dev1" "$dev2" pvcreate --metadatacopies 0 "$dev3" vg=${PREFIX}vg #COMM 'vgcreate accepts 8.00m physicalextentsize for VG' vgcreate $vg --physicalextentsize 8.00m "$dev1" "$dev2" check vg_field $vg vg_extent_size 8.00m vgremove $vg # try vgck and to remove it again - should fail (but not segfault) not vgremove $vg not vgck $vg #COMM 'vgcreate accepts smaller (128) maxlogicalvolumes for VG' vgcreate $vg --maxlogicalvolumes 128 "$dev1" "$dev2" check vg_field $vg max_lv 128 vgremove $vg #COMM 'vgcreate accepts smaller (128) maxphysicalvolumes for VG' vgcreate $vg --maxphysicalvolumes 128 "$dev1" "$dev2" check vg_field $vg max_pv 128 vgremove $vg #COMM 'vgcreate rejects a zero physical extent size' not vgcreate --physicalextentsize 0 $vg "$dev1" "$dev2" 2>err grep "Physical extent size may not be zero" err #COMM 'vgcreate rejects "inherit" allocation policy' not vgcreate --alloc inherit $vg "$dev1" "$dev2" 2>err grep "Volume Group allocation policy cannot inherit from anything" err #COMM 'vgcreate rejects vgname "."' vginvalid=.; not vgcreate $vginvalid "$dev1" "$dev2" 2>err grep "New volume group name \"$vginvalid\" is invalid" err #COMM 'vgcreate rejects vgname greater than 128 characters' vginvalid=thisnameisridiculouslylongtotestvalidationcodecheckingmaximumsizethisiswhathappenswhenprogrammersgetboredandorarenotcreativedonttrythisathome not vgcreate $vginvalid "$dev1" "$dev2" 2>err grep "New volume group name \"$vginvalid\" is invalid" err #COMM 'vgcreate rejects already existing vgname "/tmp/$vg"' #touch /tmp/$vg #not vgcreate $vg "$dev1" "$dev2" 2>err #grep "New volume group name \"$vg\" is invalid\$" err #COMM "vgcreate rejects repeated invocation (run 2 times) (bz178216)" vgcreate $vg "$dev1" "$dev2" not vgcreate $vg "$dev1" "$dev2" vgremove -ff $vg #COMM 'vgcreate rejects MaxLogicalVolumes > 255' not vgcreate --metadatatype 1 --maxlogicalvolumes 1024 $vg "$dev1" "$dev2" 2>err grep "Number of volumes may not exceed 255" err #COMM "vgcreate fails when the only pv has --metadatacopies 0" not vgcreate $vg "$dev3" # Test default (4MB) vg_extent_size as well as limits of extent_size not vgcreate --physicalextentsize 0k $vg "$dev1" "$dev2" vgcreate --physicalextentsize 1k $vg "$dev1" "$dev2" check vg_field $vg vg_extent_size 1.00k vgremove -ff $vg not vgcreate --physicalextentsize 3K $vg "$dev1" "$dev2" not vgcreate --physicalextentsize 1024t $vg "$dev1" "$dev2" #not vgcreate --physicalextentsize 1T $vg "$dev1" "$dev2" # FIXME: vgcreate allows physicalextentsize larger than pv size! # Test default max_lv, max_pv, extent_size, alloc_policy, clustered vgcreate $vg "$dev1" "$dev2" check vg_field $vg vg_extent_size 4.00m check vg_field $vg max_lv 0 check vg_field $vg max_pv 0 ATTRS="wz--n-" test -e LOCAL_CLVMD && ATTRS="wz--nc" check vg_field $vg vg_attr $ATTRS vgremove -ff $vg # Implicit pvcreate tests, test pvcreate options on vgcreate # --force, --yes, --metadata{size|copies|type}, --zero # --dataalignment[offset] pvremove "$dev1" "$dev2" vgcreate --force --yes --zero y $vg "$dev1" "$dev2" vgremove -f $vg pvremove -f "$dev1" for i in 0 1 2 3 do # vgcreate (lvm2) succeeds writing LVM label at sector $i vgcreate --labelsector $i $vg "$dev1" dd if="$dev1" bs=512 skip=$i count=1 2>/dev/null | strings | grep LABELONE >/dev/null vgremove -f $vg pvremove -f "$dev1" done # pvmetadatacopies for i in 1 2 do vgcreate --pvmetadatacopies $i $vg "$dev1" check pv_field "$dev1" pv_mda_count $i vgremove -f $vg pvremove -f "$dev1" done not vgcreate --pvmetadatacopies 0 $vg "$dev1" pvcreate --metadatacopies 1 "$dev2" vgcreate --pvmetadatacopies 0 $vg "$dev1" "$dev2" check pv_field "$dev1" pv_mda_count 0 check pv_field "$dev2" pv_mda_count 1 vgremove -f $vg pvremove -f "$dev1" # metadatasize, dataalignment, dataalignmentoffset #COMM 'pvcreate sets data offset next to mda area' vgcreate --metadatasize 100k --dataalignment 100k $vg "$dev1" check pv_field "$dev1" pe_start 200.00k vgremove -f $vg pvremove -f "$dev1" # data area is aligned to 1M by default, # data area start is shifted by the specified alignment_offset pv_align=1052160 # 1048576 + (7*512) vgcreate --metadatasize 128k --dataalignmentoffset 7s $vg "$dev1" check pv_field "$dev1" pe_start ${pv_align}B --units b vgremove -f $vg pvremove -f "$dev1" if test -n "$LVM_TEST_LVM1" ; then mdatypes='1 2' else mdatypes='2' fi # metadatatype for i in $mdatypes do vgcreate -M $i $vg "$dev1" check vg_field $vg vg_fmt lvm$i vgremove -f $vg pvremove -f "$dev1" done # vgcreate fails if pv belongs to existing vg vgcreate $vg1 "$dev1" "$dev2" not vgcreate $vg2 "$dev2" vgremove -f $vg1 pvremove -f "$dev1" "$dev2" # all PVs exist in the VG after created pvcreate "$dev1" vgcreate $vg1 "$dev1" "$dev2" "$dev3" check pv_field "$dev1" vg_name $vg1 check pv_field "$dev2" vg_name $vg1 check pv_field "$dev3" vg_name $vg1 vgremove -f $vg1 LVM2.2.02.176/test/shell/lvmetad-disabled.sh0000644000000000000120000000415213176752421017151 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2012 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITHOUT_LVMETAD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_devs 2 kill "$(< LOCAL_LVMETAD)" while test -e "$TESTDIR/lvmetad.socket"; do echo -n .; sleep .1; done # wait for the socket close test ! -e "$LVM_LVMETAD_PIDFILE" aux lvmconf "global/use_lvmetad = 0" pvcreate --metadatatype 1 "$dev1" pvcreate "$dev2" vgcreate -M1 $vg1 "$dev1" vgcreate $vg2 "$dev2" pvs 2>&1 | tee out grep "$dev1" out grep "$dev2" out vgs 2>&1 | tee out grep $vg1 out grep $vg2 out aux lvmconf "global/use_lvmetad = 1" lvmetad while ! test -e "$TESTDIR/lvmetad.socket"; do echo -n .; sleep .1; done # wait for the socket test -e "$LVM_LVMETAD_PIDFILE" cp "$LVM_LVMETAD_PIDFILE" LOCAL_LVMETAD pvscan --cache 2>&1 | tee out grep "WARNING: Disabling lvmetad cache" out pvs 2>&1 | tee out grep "$dev1" out grep "$dev2" out grep "WARNING: Not using lvmetad" out vgs 2>&1 | tee out grep $vg1 out grep $vg2 out grep "WARNING: Not using lvmetad" out vgremove $vg1 2>&1 | tee out grep "WARNING: Not using lvmetad" out pvremove "$dev1" 2>&1 | tee out grep "WARNING: Not using lvmetad" out pvscan --cache 2>&1 | tee out not grep "WARNING: Disabling lvmetad cache" out pvs 2>&1 | tee out not grep "$dev1" out grep "$dev2" out not grep "WARNING: Not using lvmetad" out vgs 2>&1 | tee out not grep $vg1 out grep $vg2 out not grep "WARNING: Not using lvmetad" out pvs --config 'global/use_lvmetad=0' 2>&1 | tee out not grep "$dev1" out grep "$dev2" out grep "WARNING: Not using lvmetad" out grep "use_lvmetad=0" out vgs --config 'global/use_lvmetad=0' 2>&1 | tee out not grep $vg1 out grep $vg2 out grep "WARNING: Not using lvmetad" out grep "use_lvmetad=0" out LVM2.2.02.176/test/shell/lvcreate-small-snap.sh0000644000000000000120000000215213176752421017620 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2010-2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_pvs get_devs vgcreate -s 1k "$vg" "${DEVICES[@]}" # 3 Chunks lvcreate -aey -n one -l 10 $vg lvcreate -s -l 12 -n snapA $vg/one lvcreate -s -c 4k -l 12 -n snapX1 $vg/one lvcreate -s -c 8k -l 24 -n snapX2 $vg/one # Check that snapshots that are too small are caught with correct error. not lvcreate -s -c 8k -l 8 -n snapX3 $vg/one 2>&1 | tee lvcreate.out not grep "suspend origin one" lvcreate.out grep "smaller" lvcreate.out not lvcreate -s -l 4 -n snapB $vg/one 2>&1 | tee lvcreate.out not grep "suspend origin one" lvcreate.out grep "smaller" lvcreate.out vgremove -ff $vg LVM2.2.02.176/test/shell/lvchange-raid1-writemostly.sh0000644000000000000120000000225713176752421021141 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2017 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA2110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest which mkfs.ext4 || skip aux have_raid 1 3 5 || skip aux prepare_vg 4 get_devs for d in "$dev1" "$dev2" "$dev3" "$dev4" do aux delay_dev "$d" 0 20 "$(get first_extent_sector "$d")" done # # Test writemostly prohibited on resynchronizing raid1 # # Create 4-way raid1 LV lvcreate -aey --ty raid1 -m 3 -Zn -L16M -n $lv1 $vg not lvchange -y --writemostly "$dev1" "$vg/$lv1" check lv_field $vg/$lv1 segtype "raid1" check lv_field $vg/$lv1 stripes 4 check lv_attr_bit health $vg/${lv1}_rimage_0 "-" aux enable_dev "${DEVICES[@]}" aux wait_for_sync $vg $lv1 lvchange -y --writemostly "$dev1" "$vg/$lv1" check lv_attr_bit health $vg/${lv1}_rimage_0 "w" vgremove -ff $vg LVM2.2.02.176/test/shell/lvconvert-raid.sh0000644000000000000120000002276013176752421016714 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2011-2017 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # disable lvmetad logging as it bogs down test systems SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 export LVM_TEST_LVMETAD_DEBUG_OPTS=${LVM_TEST_LVMETAD_DEBUG_OPTS-} . lib/inittest get_image_pvs() { local d local images=() images=( $(dmsetup ls | grep "${1}-${2}_.image_.*" | cut -f1 | sed -e s:-:/:) ) lvs --noheadings -a -o devices "${images[@]}" | sed s/\(.\)// } ######################################################## # MAIN ######################################################## aux have_raid 1 3 0 || skip aux prepare_pvs 9 get_devs # vgcreate -s 256k "$vg" "${DEVICES[@]}" vgcreate -s 2m "$vg" "${DEVICES[@]}" ########################################### # RAID1 convert tests ########################################### for under_snap in false true; do for i in 1 2 3; do for j in 1 2 3; do if [ $i -eq 1 ]; then from="linear" else from="$i-way" fi if [ $j -eq 1 ]; then to="linear" else to="$j-way" fi echo -n "Converting from $from to $to" if $under_snap; then echo -n " (while under a snapshot)" fi echo if [ $i -eq 1 ]; then # Shouldn't be able to create with just 1 image not lvcreate --type raid1 -m 0 -l 2 -n $lv1 $vg lvcreate -aey -l 2 -n $lv1 $vg else lvcreate --type raid1 -m $(( i - 1 )) -l 2 -n $lv1 $vg aux wait_for_sync $vg $lv1 fi if $under_snap; then lvcreate -aey -s $vg/$lv1 -n snap -l 2 fi mirrors=$((j - 1)) if [ $i -eq 1 ] then [ $mirrors -eq 0 ] && lvconvert -y -m $mirrors $vg/$lv1 else if [ $mirrors -eq 0 ] then not lvconvert -m $mirrors $vg/$lv1 lvconvert -y -m $mirrors $vg/$lv1 else lvconvert -y -m $mirrors $vg/$lv1 fi fi # FIXME: ensure no residual devices if [ $j -eq 1 ]; then check linear $vg $lv1 fi lvremove -ff $vg done done done ############################################## # RAID1 - shouldn't be able to add image # if created '--nosync', but should # be able to after 'lvchange --resync' ############################################## lvcreate --type raid1 -m 1 -l 2 -n $lv1 $vg --nosync not lvconvert -m +1 $vg/$lv1 lvchange --resync -y $vg/$lv1 aux wait_for_sync $vg $lv1 lvconvert -y -m +1 $vg/$lv1 lvremove -ff $vg # 3-way to 2-way convert while specifying devices lvcreate --type raid1 -m 2 -l 2 -n $lv1 $vg "$dev1" "$dev2" "$dev3" aux wait_for_sync $vg $lv1 lvconvert -y -m 1 $vg/$lv1 "$dev2" lvremove -ff $vg # # FIXME: Add tests that specify particular devices to be removed # ########################################### # RAID1 split tests ########################################### # 3-way to 2-way/linear lvcreate --type raid1 -m 2 -l 2 -n $lv1 $vg aux wait_for_sync $vg $lv1 lvconvert --splitmirrors 1 -n $lv2 $vg/$lv1 check lv_exists $vg $lv1 check linear $vg $lv2 check active $vg $lv2 # FIXME: ensure no residual devices lvremove -ff $vg # 2-way to linear/linear lvcreate --type raid1 -m 1 -l 2 -n $lv1 $vg aux wait_for_sync $vg $lv1 not lvconvert --splitmirrors 1 -n $lv2 $vg/$lv1 lvconvert --yes --splitmirrors 1 -n $lv2 $vg/$lv1 check linear $vg $lv1 check linear $vg $lv2 check active $vg $lv2 # FIXME: ensure no residual devices lvremove -ff $vg # 4-way lvcreate --type raid1 -m 4 -l 2 -n $lv1 $vg aux wait_for_sync $vg $lv1 lvconvert --splitmirrors 1 --name $lv2 $vg/$lv1 "$dev2" lvremove -ff $vg ########################################### # RAID1 split + trackchanges / merge with content check ########################################### # 3-way to 2-way/linear lvcreate --type raid1 -m 2 -l 1 -n $lv1 $vg mkfs.ext4 "$DM_DEV_DIR/$vg/$lv1" fsck.ext4 -fn "$DM_DEV_DIR/$vg/$lv1" aux wait_for_sync $vg $lv1 fsck.ext4 -fn "$DM_DEV_DIR/$vg/$lv1" lvconvert --splitmirrors 1 --trackchanges $vg/$lv1 check lv_exists $vg $lv1 check linear $vg ${lv1}_rimage_2 fsck.ext4 -fn "$DM_DEV_DIR/mapper/$vg-${lv1}_rimage_2" dd of="$DM_DEV_DIR/$vg/$lv1" if=/dev/zero bs=512 oflag=direct count="$(blockdev --getsz "$DM_DEV_DIR/$vg/$lv1")" not fsck.ext4 -fn "$DM_DEV_DIR/$vg/$lv1" fsck.ext4 -fn "$DM_DEV_DIR/mapper/$vg-${lv1}_rimage_2" # FIXME: needed on tiny loop but not on real block backend ? lvchange --refresh $vg/$lv1 lvconvert --merge $vg/${lv1}_rimage_2 aux wait_for_sync $vg $lv1 lvconvert --splitmirrors 1 --trackchanges $vg/$lv1 not fsck.ext4 -fn "$DM_DEV_DIR/mapper/$vg-${lv1}_rimage_2" # FIXME: ensure no residual devices lvremove -ff $vg # Check split track changes gets rejected w/o -y on 2-legged raid1 lvcreate --type raid1 -m 1 -l 1 -n $lv1 $vg mkfs.ext4 "$DM_DEV_DIR/$vg/$lv1" fsck.ext4 -fn "$DM_DEV_DIR/$vg/$lv1" aux wait_for_sync $vg $lv1 fsck.ext4 -fn "$DM_DEV_DIR/$vg/$lv1" not lvconvert --splitmirrors 1 --trackchanges $vg/$lv1 lvconvert --yes --splitmirrors 1 --trackchanges $vg/$lv1 # FIXME: ensure no residual devices lvremove -ff $vg ########################################### # Linear to RAID1 conversion ("raid1" default segtype) ########################################### lvcreate -aey -l 2 -n $lv1 $vg lvconvert -y -m 1 $vg/$lv1 \ --config 'global { mirror_segtype_default = "raid1" }' lvs --noheadings -o attr $vg/$lv1 | grep '^[[:space:]]*r' lvremove -ff $vg ########################################### # Linear to RAID1 conversion (override "mirror" default segtype) ########################################### lvcreate -aey -l 2 -n $lv1 $vg lvconvert --yes --type raid1 -m 1 $vg/$lv1 \ --config 'global { mirror_segtype_default = "mirror" }' lvs --noheadings -o attr $vg/$lv1 | grep '^[[:space:]]*r' lvremove -ff $vg ########################################### # Must not be able to convert non-EX LVs in a cluster ########################################### if [ -e LOCAL_CLVMD ]; then lvcreate -l 2 -n $lv1 $vg not lvconvert -y --type raid1 -m 1 $vg/$lv1 \ --config 'global { mirror_segtype_default = "mirror" }' lvremove -ff $vg fi ########################################### # Mirror to RAID1 conversion ########################################### for i in 1 2 3 ; do lvcreate -aey --type mirror -m $i -l 2 -n $lv1 $vg aux wait_for_sync $vg $lv1 lvconvert -y --type raid1 $vg/$lv1 lvremove -ff $vg done ########################################### # Upconverted RAID1 should not allow loss of primary # - don't allow removal of primary while syncing # - DO allow removal of secondaries while syncing ########################################### aux delay_dev "$dev2" 0 100 lvcreate -aey -l 2 -n $lv1 $vg "$dev1" lvconvert -y -m 1 $vg/$lv1 \ --config 'global { mirror_segtype_default = "raid1" }' "$dev2" lvs --noheadings -o attr $vg/$lv1 | grep '^[[:space:]]*r' not lvconvert --yes -m 0 $vg/$lv1 "$dev1" lvconvert --yes -m 0 $vg/$lv1 "$dev2" aux enable_dev "$dev2" lvremove -ff $vg ########################################### # lvcreated RAID1 should allow all down-conversion # - DO allow removal of primary while syncing # - DO allow removal of secondaries while syncing ########################################### aux delay_dev "$dev2" 0 100 lvcreate --type raid1 -m 2 -aey -l 2 -n $lv1 $vg "$dev1" "$dev2" "$dev3" lvconvert --yes -m 1 $vg/$lv1 "$dev3" lvconvert --yes -m 0 $vg/$lv1 "$dev1" aux enable_dev "$dev2" lvremove -ff $vg ########################################### # Converting from 2-way RAID1 to 3-way # - DO allow removal of one of primary sources # - Do not allow removal of all primary sources ########################################### lvcreate --type raid1 -m 1 -aey -l 2 -n $lv1 $vg "$dev1" "$dev2" aux wait_for_sync $vg $lv1 aux delay_dev "$dev3" 0 100 lvconvert --yes -m +1 $vg/$lv1 "$dev3" # should allow 1st primary to be removed lvconvert --yes -m -1 $vg/$lv1 "$dev1" # should NOT allow last primary to be removed not lvconvert --yes -m -1 $vg/$lv1 "$dev2" # should allow non-primary to be removed lvconvert --yes -m 0 $vg/$lv1 "$dev3" aux enable_dev "$dev3" lvremove -ff $vg ########################################### # Converting from 2-way RAID1 to 3-way # - Should allow removal of two devices, # as long as they aren't both primary ########################################### lvcreate --type raid1 -m 1 -aey -l 2 -n $lv1 $vg "$dev1" "$dev2" aux wait_for_sync $vg $lv1 aux delay_dev "$dev3" 0 100 lvconvert --yes -m +1 $vg/$lv1 "$dev3" # should NOT allow both primaries to be removed not lvconvert -m 0 $vg/$lv1 "$dev1" "$dev2" # should allow primary + non-primary lvconvert --yes -m 0 $vg/$lv1 "$dev1" "$dev3" aux enable_dev "$dev3" lvremove -ff $vg ########################################### # Device Replacement Testing ########################################### # RAID1: Replace up to n-1 devices - trying different combinations # Test for 2-way to 4-way RAID1 LVs for i in {1..3}; do lvcreate --type raid1 -m "$i" -l 2 -n $lv1 $vg for j in $(seq $(( i + 1 ))); do # The number of devs to replace at once for o in $(seq 0 $i); do # The offset into the device list replace=() devices=( $(get_image_pvs $vg $lv1) ) for k in $(seq "$j"); do index=$(( ( k + o ) % ( i + 1 ) )) replace+=( "--replace" ) replace+=( "${devices[$index]}" ) done aux wait_for_sync $vg $lv1 if [ "$j" -ge $(( i + 1 )) ]; then # Can't replace all at once. not lvconvert "${replace[@]}" $vg/$lv1 else lvconvert "${replace[@]}" $vg/$lv1 fi done done lvremove -ff $vg done vgremove -ff $vg LVM2.2.02.176/test/shell/snapshot-maxsize.sh0000644000000000000120000000172213176752421017265 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Testing calculation of snapshot space # https://bugzilla.redhat.com/show_bug.cgi?id=1035871 SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_pvs 1 get_devs vgcreate -s 1K "$vg" "${DEVICES[@]}" lvcreate -aey -L1 -n $lv1 $vg # Snapshot should be large enough to handle any writes lvcreate -L2 -s $vg/$lv1 -n $lv2 dd if=/dev/zero of="$DM_DEV_DIR/$vg/$lv2" bs=1M count=1 conv=fdatasync # Snapshot must not be 'I'nvalid here check lv_attr_bit state $vg/$lv2 "a" vgremove -f $vg LVM2.2.02.176/test/shell/lvcreate-large-raid10.sh0000644000000000000120000000262713176752421017730 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2012,2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # 'Exercise some lvcreate diagnostics' SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest # FIXME update test to make something useful on <16T aux can_use_16T || skip aux have_raid 1 3 0 || skip aux prepare_vg 5 # Fake ~2.5PiB volume group $vg1 via snapshot LVs for device in "$lv1" "$lv2" "$lv3" "$lv4" "$lv5" do lvcreate --type snapshot -s -l 20%FREE -n $device $vg --virtualsize 520T done aux extend_filter_LVMTEST pvcreate "$DM_DEV_DIR"/$vg/$lv[12345] vgcreate $vg1 "$DM_DEV_DIR"/$vg/$lv[12345] # # Create and extend large RAID10 LV # # We need '--nosync' or our virtual devices won't work lvcreate --type raid10 -m 1 -i 2 -L 200T -n $lv1 $vg1 --nosync check lv_field $vg1/$lv1 size "200.00t" lvextend -L +200T $vg1/$lv1 check lv_field $vg1/$lv1 size "400.00t" lvextend -L +100T $vg1/$lv1 check lv_field $vg1/$lv1 size "500.00t" lvextend -L 1P $vg1/$lv1 check lv_field $vg1/$lv1 size "1.00p" vgremove -ff $vg1 vgremove -ff $vg LVM2.2.02.176/test/shell/lvconvert-raid-reshape-striped_to_linear.sh0000644000000000000120000000734613176752421024050 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2017 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA2110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest which mkfs.ext4 || skip aux have_raid 1 12 0 || skip # Temporarily skip reshape tests on single-core CPUs until there's a fix for # https://bugzilla.redhat.com/1443999 - AGK 2017/04/20 aux have_multi_core || skip aux prepare_vg 5 # # Test single step linear -> striped conversion # # Create 4-way striped LV lvcreate -aey -i 4 -I 32k -L 16M -n $lv1 $vg check lv_field $vg/$lv1 segtype "striped" check lv_field $vg/$lv1 data_stripes 4 check lv_field $vg/$lv1 stripes 4 check lv_field $vg/$lv1 stripesize "32.00k" check lv_field $vg/$lv1 reshape_len_le "" echo y|mkfs -t ext4 $DM_DEV_DIR/$vg/$lv1 fsck -fn $DM_DEV_DIR/$vg/$lv1 # Convert striped -> raid5(_n) lvconvert -y --ty raid5 -R 128k $vg/$lv1 fsck -fn $DM_DEV_DIR/$vg/$lv1 check lv_field $vg/$lv1 segtype "raid5_n" check lv_field $vg/$lv1 data_stripes 4 check lv_field $vg/$lv1 stripes 5 check lv_field $vg/$lv1 stripesize "32.00k" check lv_field $vg/$lv1 regionsize "128.00k" check lv_field $vg/$lv1 reshape_len_le 0 aux wait_for_sync $vg $lv1 fsck -fn $DM_DEV_DIR/$vg/$lv1 # Extend raid5_n LV by factor 4 to keep size once linear lvresize -y -L 64 $vg/$lv1 check lv_field $vg/$lv1 segtype "raid5_n" check lv_field $vg/$lv1 data_stripes 4 check lv_field $vg/$lv1 stripes 5 check lv_field $vg/$lv1 stripesize "32.00k" check lv_field $vg/$lv1 regionsize "128.00k" check lv_field $vg/$lv1 reshape_len_le "0" aux wait_for_sync $vg $lv1 fsck -fn $DM_DEV_DIR/$vg/$lv1 # Convert raid5_n LV to 1 stripe (2 legs total), # 64k stripesize and 1024k regionsize # FIXME: "--type" superfluous (cli fix needed) lvconvert -y -f --ty raid5_n --stripes 1 -I 64k -R 1024k $vg/$lv1 fsck -fn $DM_DEV_DIR/$vg/$lv1 check lv_first_seg_field $vg/$lv1 segtype "raid5_n" check lv_first_seg_field $vg/$lv1 data_stripes 1 check lv_first_seg_field $vg/$lv1 stripes 5 check lv_first_seg_field $vg/$lv1 stripesize "64.00k" check lv_first_seg_field $vg/$lv1 regionsize "1.00m" check lv_first_seg_field $vg/$lv1 reshape_len_le 10 # for slv in {0..4} # do # check lv_first_seg_field $vg/${lv1}_rimage_${slv} reshape_len_le 2 # done aux wait_for_sync $vg $lv1 1 fsck -fn $DM_DEV_DIR/$vg/$lv1 # Remove the now freed legs lvconvert -y --stripes 1 $vg/$lv1 check lv_first_seg_field $vg/$lv1 segtype "raid5_n" check lv_first_seg_field $vg/$lv1 data_stripes 1 check lv_first_seg_field $vg/$lv1 stripes 2 check lv_first_seg_field $vg/$lv1 stripesize "64.00k" check lv_first_seg_field $vg/$lv1 regionsize "1.00m" check lv_first_seg_field $vg/$lv1 reshape_len_le 4 # Convert raid5_n to raid1 lvconvert -y --type raid1 $vg/$lv1 fsck -fn $DM_DEV_DIR/$vg/$lv1 check lv_first_seg_field $vg/$lv1 segtype "raid1" check lv_first_seg_field $vg/$lv1 data_stripes 2 check lv_first_seg_field $vg/$lv1 stripes 2 check lv_first_seg_field $vg/$lv1 stripesize "0" check lv_first_seg_field $vg/$lv1 regionsize "1.00m" check lv_first_seg_field $vg/$lv1 reshape_len_le "" # Convert raid5_n -> linear lvconvert -y --type linear $vg/$lv1 fsck -fn $DM_DEV_DIR/$vg/$lv1 check lv_first_seg_field $vg/$lv1 segtype "linear" check lv_first_seg_field $vg/$lv1 data_stripes 1 check lv_first_seg_field $vg/$lv1 stripes 1 check lv_first_seg_field $vg/$lv1 stripesize "0" check lv_first_seg_field $vg/$lv1 regionsize "0" check lv_first_seg_field $vg/$lv1 reshape_len_le "" vgremove -ff $vg LVM2.2.02.176/test/shell/lvconvert-repair-dmeventd.sh0000644000000000000120000000167113176752421021061 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest which mkfs.ext2 || skip aux mirror_recovery_works || skip aux prepare_dmeventd aux prepare_vg 5 lvcreate -aey --type mirror -m 3 --ignoremonitoring -L 1 -n 4way $vg lvchange --monitor y $vg/4way aux disable_dev "$dev2" "$dev4" mkfs.ext2 "$DM_DEV_DIR/$vg/4way" sleep 10 # FIXME: need a "poll" utility, akin to "check" aux enable_dev "$dev2" "$dev4" check mirror $vg 4way check mirror_legs $vg 4way 2 vgremove -ff $vg LVM2.2.02.176/test/shell/lvcreate-thin.sh0000644000000000000120000001726213176752421016523 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2011-2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # test currently needs to drop # 'return NULL' in _lv_create_an_lv after log_error("Can't create %s without using " SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 export LVM_TEST_THIN_REPAIR_CMD=${LVM_TEST_THIN_REPAIR_CMD-/bin/false} . lib/inittest check_lv_field_modules_() { mod=$1 shift for d in "$@"; do check lv_field $vg/$d modules $mod done } # # Main # aux have_thin 1 0 0 || skip which mkfs.ext4 || skip aux prepare_pvs 2 64 get_devs vgcreate -s 64K "$vg" "${DEVICES[@]}" # Create named pool only lvcreate -l1 -T $vg/pool1 lvcreate -l1 -T --thinpool $vg/pool2 lvcreate -l1 -T --thinpool pool3 $vg invalid lvcreate -l1 --type thin $vg/pool4 invalid lvcreate -l1 --type thin --thinpool $vg/pool5 invalid lvcreate -l1 --type thin --thinpool pool6 $vg lvcreate -l1 --type thin-pool $vg/pool7 lvcreate -l1 --type thin-pool --thinpool $vg/pool8 lvcreate -l1 --type thin-pool --thinpool pool9 $vg lvremove -ff $vg/pool1 $vg/pool2 $vg/pool3 $vg/pool7 $vg/pool8 $vg/pool9 check vg_field $vg lv_count 0 # Let's pretend pool is like normal LV when using --type thin-pool support --name # Reject ambiguous thin pool names invalid lvcreate --type thin-pool -l1 --name pool1 $vg/pool2 invalid lvcreate --type thin-pool -l1 --name pool3 --thinpool pool4 $vg invalid lvcreate --type thin-pool -l1 --name pool5 --thinpool pool6 $vg/pool7 invalid lvcreate --type thin-pool -l1 --name pool8 --thinpool pool8 $vg/pool9 # no size specified and no origin name give for snapshot invalid lvcreate --thinpool pool $vg check vg_field $vg lv_count 0 lvcreate --type thin-pool -l1 --name pool1 $vg lvcreate --type thin-pool -l1 --name $vg/pool2 # If the thin pool name is unambiguous let it proceed lvcreate --type thin-pool -l1 --name pool3 $vg/pool3 lvcreate --type thin-pool -l1 --name pool4 --thinpool $vg/pool4 lvcreate --type thin-pool -l1 --name pool5 --thinpool $vg/pool5 $vg/pool5 check lv_field $vg/pool1 segtype "thin-pool" check lv_field $vg/pool2 segtype "thin-pool" check lv_field $vg/pool3 segtype "thin-pool" check lv_field $vg/pool4 segtype "thin-pool" check lv_field $vg/pool5 segtype "thin-pool" lvremove -ff $vg # Create default pool name lvcreate -l1 -T $vg invalid lvcreate -l1 --type thin $vg lvcreate -l1 --type thin-pool $vg lvremove -ff $vg check vg_field $vg lv_count 0 # Create default pool and default thin LV lvcreate -l1 -V2G -T $vg lvcreate -l1 -V2G --type thin $vg lvremove -ff $vg # Create named pool and default thin LV lvcreate -L4M -V2G --name lvo1 -T $vg/pool1 lvcreate -L4M -V2G --name lvo2 -T --thinpool $vg/pool2 lvcreate -L4M -V2G --name lvo3 -T --thinpool pool3 $vg lvcreate -L4M -V2G --name lvo4 --type thin $vg/pool4 lvcreate -L4M -V2G --name lvo5 --type thin --thinpool $vg/pool5 lvcreate -L4M -V2G --name lvo6 --type thin --thinpool pool6 $vg check lv_exists $vg lvo1 lvo2 lvo3 lvremove -ff $vg # Create named pool and named thin LV lvcreate -L4M -V2G -T $vg/pool1 --name lv1 lvcreate -L4M -V2G -T $vg/pool2 --name $vg/lv2 lvcreate -L4M -V2G -T --thinpool $vg/pool3 --name lv3 lvcreate -L4M -V2G -T --thinpool $vg/pool4 --name $vg/lv4 lvcreate -L4M -V2G -T --thinpool pool5 --name lv5 $vg lvcreate -L4M -V2G -T --thinpool pool6 --name $vg/lv6 $vg check lv_exists $vg lv1 lv2 lv3 lv4 lv5 lv6 lvremove -ff $vg lvcreate -L4M -V2G --type thin $vg/pool1 --name lv1 lvcreate -L4M -V2G --type thin $vg/pool2 --name $vg/lv2 lvcreate -L4M -V2G --type thin --thinpool $vg/pool3 --name lv3 lvcreate -L4M -V2G --type thin --thinpool $vg/pool4 --name $vg/lv4 lvcreate -L4M -V2G --type thin --thinpool pool5 --name lv5 $vg lvcreate -L4M -V2G --type thin --thinpool pool6 --name $vg/lv6 $vg check lv_exists $vg lv1 lv2 lv3 lv4 lv5 lv6 lvremove -ff $vg # Create default thin LV in existing pool lvcreate -L4M -T $vg/pool lvcreate -V2G --name lvo0 -T $vg/pool lvcreate -V2G --name lvo1 -T --thinpool $vg/pool lvcreate -V2G --name lvo2 -T --thinpool pool $vg lvcreate -V2G --name lvo3 --type thin $vg/pool lvcreate -V2G --name lvo4 --type thin --thinpool $vg/pool lvcreate -V2G --name lvo5 --type thin --thinpool pool $vg check lv_exists $vg lvo0 lvo1 lvo2 lvo3 lvo4 lvo5 # Create named thin LV in existing pool lvcreate -V2G -T $vg/pool --name lv1 lvcreate -V2G -T $vg/pool --name $vg/lv2 lvcreate -V2G -T --thinpool $vg/pool --name lv3 lvcreate -V2G -T --thinpool $vg/pool --name $vg/lv4 lvcreate -V2G -T --thinpool pool --name lv5 $vg lvcreate -V2G -T --thinpool pool --name $vg/lv6 $vg lvcreate -V2G --type thin $vg/pool --name lv7 lvcreate -V2G --type thin $vg/pool --name $vg/lv8 lvcreate -V2G --type thin --thinpool $vg/pool --name lv9 lvcreate -V2G --type thin --thinpool $vg/pool --name $vg/lv10 lvcreate -V2G --type thin --thinpool pool --name lv11 $vg lvcreate -V2G --type thin --thinpool pool --name $vg/lv12 $vg check lv_exists $vg lv1 lv2 lv3 lv4 lv5 lv6 lv7 lv8 lv9 lv10 lv11 lv12 check vg_field $vg lv_count 19 check lv_field $vg/lv1 thin_id 7 lvremove -ff $vg check vg_field $vg lv_count 0 # Create thin snapshot of thinLV lvcreate -L10M -I4 -i2 -V10M -T $vg/pool --name lv1 mkfs.ext4 "$DM_DEV_DIR/$vg/lv1" lvcreate -K -s $vg/lv1 --name snap_lv1 fsck -n "$DM_DEV_DIR/$vg/snap_lv1" lvcreate -s $vg/lv1 --name lv2 lvcreate -s $vg/lv1 --name $vg/lv3 invalid lvcreate --type snapshot $vg/lv1 --name lv6 invalid lvcreate --type snapshot $vg/lv1 --name lv4 invalid lvcreate --type snapshot $vg/lv1 --name $vg/lv5 lvdisplay --maps $vg check_lv_field_modules_ thin,thin-pool lv1 snap_lv1 lv2 lv3 check vg_field $vg lv_count 5 lvremove -ff $vg # Normal Snapshots of thinLV lvcreate -L4M -V2G -T $vg/pool --name lv1 lvcreate -s $vg/lv1 -l1 --name snap_lv1 lvcreate -s $vg/lv1 -l1 --name lv2 lvcreate -s $vg/lv1 -l1 --name $vg/lv3 lvcreate -s lv1 -L4M --name $vg/lv4 check_lv_field_modules_ snapshot snap_lv1 lv2 lv3 lv4 check vg_field $vg lv_count 6 lvremove -ff $vg check vg_field $vg lv_count 0 # Fail cases # Too small pool size (1 extent 64KB) for given chunk size not lvcreate --chunksize 256 -l1 -T $vg/pool1 # Too small chunk size (min is 64KB - 128 sectors) not lvcreate --chunksize 32 -l1 -T $vg/pool1 # Too large chunk size (max is 1GB) not lvcreate -L4M --chunksize 2G -T $vg/pool1 # Cannot specify --minor with pool fail lvcreate -L10M --minor 100 -T $vg/pool_minor # FIXME: Currently ambigous - is it for thin, thin-pool, both ? fail lvcreate -L4M -Mn -m0 -T --readahead 32 -V20 -n $lv $vg/pool_normal # Check read-ahead setting will also pass with -Mn -m0 lvcreate -L4M -Mn -m0 -T --readahead 64k $vg/pool_readahead lvcreate -V20M -Mn -m0 -T --readahead 128k -n thin_readahead $vg/pool_readahead check lv_field $vg/pool_readahead lv_read_ahead "64.00k" check lv_field $vg/thin_readahead lv_read_ahead "128.00k" if test ! -d /sys/block/dm-2345; then # Check some unused minor and support for --minor with thins lvcreate --minor 2345 -T -V20M -n thin_minor $vg/pool_readahead check lv_field $vg/thin_minor lv_minor "2345" fi # Test creation of inactive pool lvcreate -an -L4M -T $vg/pool1 lvcreate -V2G --name lv1 -T $vg/pool1 # Check we are able remove spare volume if we want to lvremove -f $vg/lvol0_pmspare # Origin name is not accepted not lvcreate -s $vg/lv1 -L4M -V2G --name $vg/lv4 # Check we cannot create mirror/raid1 and thin or thinpool together not lvcreate -T mirpool -L4M --alloc anywhere -m1 $vg not lvcreate --thinpool mirpool -L4M --alloc anywhere -m1 $vg vgremove -ff $vg LVM2.2.02.176/test/shell/thin-flags.sh0000644000000000000120000000555613176752421016015 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # test presence of various thin-pool/thin flags SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 export LVM_TEST_THIN_REPAIR_CMD=${LVM_TEST_THIN_REPAIR_CMD-/bin/false} . lib/inittest # # Main # aux have_thin 1 3 0 || skip aux thin_pool_error_works_32 || skip aux prepare_vg 2 256 ############################################### # Testing failing thin-pool metadata device # ############################################### lvcreate -T -L1M --errorwhenfull y $vg/pool lvcreate -V2 -n $lv2 $vg/pool aux error_dev "$dev2" 2054:2 # Check our 'lvs' is not flushing pool - should be still OK check lv_attr_bit health $vg/pool "-" # Enforce flush on thin pool device to notice error device. dmsetup status $vg-pool-tpool check lv_attr_bit health $vg/pool "F" check lv_attr_bit health $vg/$lv2 "F" aux enable_dev "$dev2" lvchange -an $vg # Overfill data area lvchange -ay $vg dd if=/dev/zero of="$DM_DEV_DIR/mapper/$vg-$lv2" bs=1M count=2 check lv_attr_bit health $vg/pool "D" # TODO use spaces ?? check lv_field $vg/pool lv_health_status "out_of_data" lvremove -ff $vg ####################################################### # Testing what happens on system without thin-check # ####################################################### lvcreate -L200M --errorwhenfull y -T $vg/pool lvcreate -V2 -n $lv2 $vg/pool lvchange -an $vg # Drop usage of thin_check aux lvmconf 'global/thin_check_executable = ""' # Prepare some fake metadata prefilled to ~100% lvcreate -L2 -n $lv1 $vg # tmp for metadata aux prepare_thin_metadata 490 1 | tee data "$LVM_TEST_THIN_RESTORE_CMD" -i data -o "$DM_DEV_DIR/mapper/$vg-$lv1" # Swap volume with restored fake metadata lvconvert -y --thinpool $vg/pool --poolmetadata $vg/$lv1 lvchange -ay $vg lvchange -ay $vg/$lv2 # Provisiong and last free bits in metadata dd if=/dev/zero of="$DM_DEV_DIR/mapper/$vg-$lv2" bs=32K count=1 check lv_attr_bit health $vg/pool "M" # TODO - use spaces ?? check lv_field $vg/pool lv_health_status "metadata_read_only" check lv_attr_bit health $vg/$lv2 "-" # needs_check needs newer version if aux have_thin 1 16 0 ; then check lv_attr_bit state $vg/pool "c" check lv_field $vg/pool lv_check_needed "check needed" dmsetup suspend $vg-pool-tpool # suspended thin-pool with Capital 'c' check lv_attr_bit state $vg/pool "C" dmsetup resume $vg-pool-tpool lvresize -L+2M $vg/pool_tmeta # still require thin_check check lv_attr_bit state $vg/pool "c" fi vgremove -ff $vg LVM2.2.02.176/test/shell/lvextend-percent-extents.sh0000644000000000000120000000751513176752421020735 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008-2011 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # 'Check extents percentage arguments' SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_pvs 2 128 get_devs aux vgcreate "$vg" "${DEVICES[@]}" lvcreate -L64 -n $lv $vg # 'lvextend rejects both size and extents without PVs' not lvextend -l 10 -L 64m $vg/$lv 2>err grep "Please specify either size or extents but not both." err # 'lvextend rejects both size and extents with PVs' not lvextend -l 10 -L 64m $vg/$lv "$dev1" 2>err grep "Please specify either size or extents but not both." err # 'lvextend accepts no size or extents but one PV - bz154691' lvextend $vg/$lv "$dev1" | tee out grep "Logical volume $vg/$lv successfully resized" out check pv_field "$dev1" pv_free "0" lvremove -f $vg/$lv # 'lvextend computes necessary free space correctly - bz213552' vgsize=$(get vg_field $vg vg_extent_count) lvcreate -l $vgsize -n $lv $vg lvreduce -f -l $(( vgsize / 2 )) $vg/$lv lvextend -l $vgsize $vg/$lv # 'Reset LV to original size' lvremove -f $vg/$lv lvcreate -L 64m -n $lv $vg # 'lvextend accepts no size but extents 100%PVS and two PVs - bz154691' lvextend -l +100%PVS $vg/$lv "$dev1" "$dev2" | tee out grep "Logical volume $vg/$lv successfully resized" out check pv_field "$dev1" pv_free "0" check pv_field "$dev2" pv_free "0" # Exercise the range overlap code. Allocate every 2 extents. # # Physical Extents # 1 2 #012345678901234567890123 # #aaXXaaXXaaXXaaXXaaXXaaXX - (a)llocated #rrrXXXrrrXXXrrrXXXrrrXXX - (r)ange on cmdline #ooXXXXXXoXXXooXXXXXXoXXX - (o)verlap of range and allocated # # Key: a - allocated # F - free # r - part of a range on the cmdline # N - not on cmdline # # Create the LV with 12 extents, allocated every other 2 extents. # Then extend it, with a range of PVs on the cmdline of every other 3 extents. # Total number of extents should be 12 + overlap = 12 + 6 = 18. # Thus, total size for the LV should be 18 * 4M = 72M # # 'Reset LV to 12 extents, allocate every other 2 extents' create_pvs=$(for i in $(seq 0 4 20); do echo -n "$dev1:$i-$(( i + 1 )) "; done) lvremove -f $vg/$lv lvcreate -l 12 -n $lv $vg $create_pvs check lv_field $vg/$lv lv_size "48.00m" # 'lvextend with partially allocated PVs and extents 100%PVS with PE ranges' extend_pvs=$(for i in $(seq 0 6 18); do echo -n "$dev1:$i-$(( i + 2 )) "; done) lvextend -l +100%PVS $vg/$lv $extend_pvs | tee out grep "Logical volume $vg/$lv successfully resized" out check lv_field $vg/$lv lv_size "72.00m" # Simple seg_count validation; initially create the LV with half the # of # extents (should be 1 lv segment), extend it (should go to 2 segments), # then reduce (should be back to 1) # FIXME: test other segment fields such as seg_size, pvseg_start, pvseg_size lvremove -f $vg/$lv pe_count=$(get pv_field "$dev1" pv_pe_count) pe1=$(( pe_count / 2 )) lvcreate -l $pe1 -n $lv $vg pesize=$(get lv_field $vg/$lv vg_extent_size --units b --nosuffix) segsize=$(( pe1 * pesize / 1024 / 1024 ))m check lv_field $vg/$lv seg_count "1" check lv_field $vg/$lv seg_start "0" check lv_field $vg/$lv seg_start_pe "0" #check lv_field $vg/$lv seg_size $segsize lvextend -l +$(( pe_count * 1 )) $vg/$lv check lv_field $vg/$lv seg_count "2" lvreduce -f -l -$(( pe_count * 1 )) $vg/$lv check lv_field $vg/$lv seg_count "1" # do not reduce to 0 extents lvremove -f $vg/$lv lvcreate -i2 -I 64k -l10 -n $lv $vg lvreduce -f -l1 $vg/$lv check lv_field $vg/$lv lv_size "8.00m" vgremove -ff $vg LVM2.2.02.176/test/shell/profiles.sh0000644000000000000120000001500313176752421015570 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2013 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # test basic profile functionality # SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest MSG_FAILED_TO_APPLY_CMD_PROFILE="Failed to apply command profile" MSG_IGNORING_INVALID_CMD_PROFILE="Ignoring invalid command profile" MSG_FAILED_TO_APPLY_MDA_PROFILE="Failed to apply metadata profile" MSG_IGNORING_INVALID_MDA_PROFILE="Ignoring invalid metadata profile" MSG_NOT_PROFILABLE="not customizable by a profile" MSG_CMD_PROFILABLE_ONLY="customizable by command profile only, not metadata profile" MSG_MDA_PROFILABLE_ONLY="customizable by metadata profile only, not command profile" # fail if the profile requested by --profile cmdline option is not present not pvs --profile nonexistent 2>&1 | grep "$MSG_FAILED_TO_APPLY_CMD_PROFILE" # config/checks=1: warning message about setting not being profilable + # summary error message about invalid profile # config/checks=0: just summary error message about invalid profile aux profileconf invalid 'log/prefix=" "' aux lvmconf 'config/checks = 0' not pvs --profile invalid 2>msg not grep "$MSG_NOT_PROFILABLE" msg grep "$MSG_IGNORING_INVALID_CMD_PROFILE" msg grep "$MSG_FAILED_TO_APPLY_CMD_PROFILE" msg aux lvmconf 'config/checks = 1' not pvs --profile invalid 2>msg grep "$MSG_NOT_PROFILABLE" msg grep "$MSG_IGNORING_INVALID_CMD_PROFILE" msg grep "$MSG_FAILED_TO_APPLY_CMD_PROFILE" msg aux lvmconf 'allocation/thin_pool_zero = 1' # all profilable items listed here - should pass aux profileconf valid_cmd_profile 'global/units = "h"' \ 'global/si_unit_consistency = 1' \ 'global/suffix = 1' \ 'global/lvdisplay_shows_full_device_path = 0' \ 'report/aligned = 1' \ 'report/buffered = 1' \ 'report/headings = 1' \ 'report/separator = " "' \ 'report/prefixes = 0' \ 'report/quoted = 1' \ 'report/columns_as_rows = 0' \ 'report/devtypes_sort = "devtype_name"' \ 'report/devtypes_cols = "devtype_name,devtype_max_partitions,devtype_description"' \ 'report/devtypes_cols_verbose = "devtype_name,devtype_max_partitions,devtype_description"' \ 'report/lvs_sort = "vg_name,lv_name"' \ 'report/lvs_cols = "lv_name,vg_name,lv_attr,lv_size,pool_lv,origin,data_percent,move_pv,mirror_log,copy_percent,convert_lv"' \ 'report/lvs_cols_verbose = "lv_name,vg_name,seg_count,lv_attr,lv_size,lv_major,lv_minor,lv_kernel_major,lv_kernel_minor,pool_lv,origin,data_percent,metadata_percent,move_pv,copy_percent,mirror_log,convert_lv,lv_uuid,lv_profile"' \ 'report/vgs_sort = "vg_name"' \ 'report/vgs_cols = "vg_name,pv_count,lv_count,snap_count,vg_attr,vg_size,vg_free"' \ 'report/vgs_cols_verbose = "vg_name,vg_attr,vg_extent_size,pv_count,lv_count,snap_count,vg_size,vg_free,vg_uuid,vg_profile"' \ 'report/pvs_sort = "pv_name"' \ 'report/pvs_cols = "pv_name,vg_name,pv_fmt,pv_attr,pv_size,pv_free"' \ 'report/pvs_cols_verbose = "pv_name,vg_name,pv_fmt,pv_attr,pv_size,pv_free,dev_size,pv_uuid"' \ 'report/segs_sort = "vg_name,lv_name,seg_start"' \ 'report/segs_cols = "lv_name,vg_name,lv_attr,stripes,segtype,seg_size"' \ 'report/segs_cols_verbose = "lv_name,vg_name,lv_attr,seg_start,seg_size,stripes,segtype,stripesize,chunksize"' \ 'report/pvsegs_sort = "pv_name,pvseg_start"' \ 'report/pvsegs_cols = "pv_name,vg_name,pv_fmt,pv_attr,pv_size,pv_free,pvseg_start,pvseg_size"' \ 'report/pvsegs_cols_verbose = "pv_name,vg_name,pv_fmt,pv_attr,pv_size,pv_free,pvseg_start,pvseg_size,lv_name,seg_start_pe,segtype,seg_pe_ranges"' aux profileconf valid_mda_profile 'allocation/thin_pool_zero = 0' \ 'allocation/thin_pool_discards = "passdown"' \ 'allocation/thin_pool_chunk_size = 64' \ 'activation/thin_pool_autoextend_threshold = 100' \ 'activation/thin_pool_autoextend_percent = 20' aux profileconf extra_mda_profile 'allocation/thin_pool_chunk_size = 128' pvs --profile valid_cmd_profile 2>msg not grep "$MSG_NOT_PROFILABLE" msg not grep "$MSG_IGNORING_INVALID_CMD_PROFILE" msg not grep "$MSG_IGNORING_INVALID_MDA_PROFILE" msg not pvs --profile valid_mda_profile 2>msg grep "$MSG_MDA_PROFILABLE_ONLY" msg grep "$MSG_IGNORING_INVALID_CMD_PROFILE" msg not grep "$MSG_IGNORING_INVALID_MDA_PROFILE" msg # attaching/detaching profiles to VG/LV aux prepare_pvs 1 8 pvcreate "$dev1" vgcreate $vg1 "$dev1" check vg_field $vg1 vg_profile "" lvcreate -l 1 -n $lv1 $vg1 check lv_field $vg1/$lv1 lv_profile "" vgchange --profile valid_mda_profile $vg1 check vg_field $vg1 vg_profile valid_mda_profile check lv_field $vg1/$lv1 lv_profile "" lvchange --profile extra_mda_profile $vg1/$lv1 check vg_field $vg1 vg_profile valid_mda_profile check lv_field $vg1/$lv1 lv_profile extra_mda_profile vgchange --detachprofile $vg1 check vg_field $vg1 vg_profile "" check lv_field $vg1/$lv1 lv_profile extra_mda_profile lvchange --detachprofile $vg1/$lv1 check vg_field $vg1 vg_profile "" check lv_field $vg1/$lv1 lv_profile "" # dumpconfig and merged lvm.conf + profile aux lvmconf 'global/units="m"' aux profileconf extra_cmd_profile 'global/units="h"' lvm dumpconfig &>out grep 'units="m"' out lvm dumpconfig --profile extra_cmd_profile --mergedconfig >out grep 'units="h"' out # dumpconfig --profilable output must be usable as a profile lvm dumpconfig --type profilable-command --file etc/profile/generated.profile pvs --profile generated &> msg not grep "$MSG_NOT_PROFILABLE" msg not grep "$MSG_IGNORING_INVALID_CMD_PROFILE" msg vgremove -ff $vg1 LVM2.2.02.176/test/shell/lvcreate-thin-external.sh0000644000000000000120000000625113176752421020337 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2013-2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Test creation of thin snapshots using external origin SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 export LVM_TEST_THIN_REPAIR_CMD=${LVM_TEST_THIN_REPAIR_CMD-/bin/false} . lib/inittest which mkfs.ext2 || skip which fsck || skip # # Main # aux have_thin 1 3 0 || skip aux prepare_pvs 2 64 get_devs vgcreate -s 64K "$vg" "${DEVICES[@]}" # Newer thin-pool target (>= 1.13) supports unaligned external origin # But this test is written to test and expect older behavior aux lvmconf 'global/thin_disabled_features = [ "external_origin_extend" ]' # Test validation for external origin being multiple of thin pool chunk size lvcreate -L10M -T $vg/pool192 -c 192k lvcreate -an -pr -Zn -l1 -n $lv1 $vg not lvcreate -s $vg/$lv1 --thinpool $vg/pool192 lvcreate -an -pr -Zn -l5 -n $lv2 $vg not lvcreate -s $vg/$lv2 --thinpool $vg/pool192 lvremove -f $vg # Prepare pool and external origin with filesystem lvcreate -L10M -V10M -T $vg/pool --name $lv1 mkfs.ext2 "$DM_DEV_DIR/$vg/$lv1" lvcreate -L4M -n $lv2 $vg mkfs.ext2 "$DM_DEV_DIR/$vg/$lv2" # Fail to create external origin snapshot of rw LV not lvcreate -s $vg/$lv2 --thinpool $vg/pool lvchange -p r $vg/$lv2 # Fail to create snapshot of active r LV # FIXME: kernel update needed not lvcreate -s $vg/$lv2 --thinpool $vg/pool # Deactivate LV we want to use as external origin # once kernel will ensure read-only this condition may go away lvchange -an $vg/$lv2 lvcreate -s $vg/$lv2 --thinpool $vg/pool # Fail with --thin and --snapshot not lvcreate -s $vg/$lv5 --name $vg/$lv7 -T $vg/newpool # Cannot specify size and thin pool. # TODO: maybe with --poolsize invalid lvcreate -s $vg/$lv2 -L10 --thinpool $vg/pool invalid lvcreate -s -K $vg/$lv2 --name $vg/$lv3 -L20 --chunksize 128 --thinpool $vg/newpool not lvcreate -s $vg/$lv2 --chunksize 64 --thinpool $vg/pool not lvcreate -s $vg/$lv2 --zero y --thinpool $vg/pool not lvcreate -s $vg/$lv2 --poolmetadata $vg/$lv1 --thinpool $vg/pool # Fail with nonexistent pool not lvcreate -s $vg/$lv2 --thinpool $vg/newpool # Create pool and snap lvcreate -T --name $vg/$lv3 -V10 -L20 --chunksize 128 --thinpool $vg/newpool lvcreate -s -K $vg/$lv3 --name $vg/$lv4 lvcreate -s -K $vg/$lv2 --name $vg/$lv5 --thinpool $vg/newpool # Make normal thin snapshot lvcreate -s -K $vg/$lv5 --name $vg/$lv6 # We do not need to specify thinpool when doing thin snap, but it should work lvcreate -s -K $vg/$lv5 --name $vg/$lv7 --thinpool $vg/newpool check inactive $vg $lv2 lvchange -ay $vg/$lv2 lvcreate -s -K $vg/$lv2 --name $vg/$lv8 --thinpool $vg/newpool lvs -o+chunksize $vg check active $vg $lv3 check active $vg $lv4 check active $vg $lv5 check active $vg $lv6 check active $vg $lv7 fsck -n "$DM_DEV_DIR/$vg/$lv1" fsck -n "$DM_DEV_DIR/$vg/$lv7" vgremove -ff $vg LVM2.2.02.176/test/shell/backup-read-only.sh0000644000000000000120000000333713176752421017111 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2015 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_CLVMD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest which mkfs.ext3 || skip aux prepare_vg 2 # Note: inittest.sh sets LVM_SYSTEM_DIR to 'just' etc etc_lv="$DM_DEV_DIR/$vg/$lv1" cleanup_mounted_and_teardown() { umount "$mount_dir" || true aux teardown } vgreduce $vg "$dev2" lvcreate -n $lv1 -l 20%FREE $vg mkfs.ext3 -b4096 -j "$etc_lv" # # check read-only archive dir # mount_dir="etc/archive" trap 'cleanup_mounted_and_teardown' EXIT mkdir -p "$mount_dir" mount -n -r "$etc_lv" "$mount_dir" aux lvmconf "backup/archive = 1" "backup/backup = 1" # cannot archive to read-only - requires user to specify -An not lvcreate -n $lv2 -l 10%FREE $vg lvcreate -An -n $lv2 -l 10%FREE $vg not vgextend $vg "$dev2" vgextend -An $vg "$dev2" umount "$mount_dir" || true vgreduce $vg "$dev2" # # check read-only backup dir # mount_dir="etc/backup" mount -n -r "$etc_lv" "$mount_dir" # Must not fail on making backup vgscan lvcreate -An -n $lv3 -l 10%FREE $vg vgextend $vg "$dev2" # # Now check both archive & backup read-only # rm -rf etc/archive ln -s backup etc/archive # Must not fail on making backup vgscan lvcreate -An -n $lv4 -l 10%FREE $vg umount "$mount_dir" || true # TODO maybe also support --ignorelockingfailure ?? vgremove -ff $vg LVM2.2.02.176/test/shell/vgreduce-removemissing-snapshot.sh0000644000000000000120000000232213176752421022273 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2011 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_CLVMD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest # # Snapshots of 'mirrors' are not supported. They can no longer be created. # This file could be used to test some aspect of vgreduce, snapshot, and # RAID at some point though... # aux prepare_vg 5 lvcreate --type mirror -m 3 -L 2M -n 4way $vg "$dev1" "$dev2" "$dev3" "$dev4" "$dev5":0 lvcreate -s $vg/4way -L 2M -n snap lvcreate -i 2 -L 2M $vg "$dev1" "$dev2" -n stripe aux disable_dev "$dev2" "$dev4" echo n | lvconvert --repair $vg/4way aux enable_dev "$dev2" "$dev4" #not vgreduce --removemissing $vg vgreduce -v --removemissing --force $vg # "$dev2" "$dev4" lvs -a -o +devices $vg | not grep unknown lvs -a -o +devices $vg check mirror $vg 4way "$dev5" vgremove -ff $vg LVM2.2.02.176/test/shell/pv-ext-flags.sh0000644000000000000120000001235513176752421016271 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_devs 2 # PV_EXT_USED flag MARKED_AS_USED_MSG="is used by a VG but its metadata is missing" ###################################### ### CHECK PV WITH 0 METADATA AREAS ### ###################################### pvcreate -ff -y --metadatacopies 0 "$dev1" pvcreate -ff -y --metadatacopies 1 "$dev2" # $dev1 and $dev2 not in any VG - pv_in_use field should be blank check pv_field "$dev1" pv_in_use "" check pv_field "$dev2" pv_in_use "" # $dev1 and $dev now in a VG - pv_in_use should display "used" vgcreate $vg1 "$dev1" "$dev2" check pv_field "$dev1" pv_in_use "used" check pv_field "$dev2" pv_in_use "used" # disable $dev2 and dev1 with 0 MDAs remains, but still # marked as used, so pvcreate/vgcreate/pvremove should fail aux disable_dev "$dev2" pvscan --cache check pv_field "$dev1" pv_in_use "used" not pvcreate "$dev1" 2>err cat err grep "$MARKED_AS_USED_MSG" err not pvchange -u "$dev1" 2>err grep "$MARKED_AS_USED_MSG" err not vgcreate $vg2 "$dev1" 2>err grep "$MARKED_AS_USED_MSG" err not pvremove "$dev1" 2>err grep "$MARKED_AS_USED_MSG" err # save PV signature from dev1 for reuse later on in this # test so we don't need to initialize all the VG stuff again dd if="$dev1" of=dev1_backup bs=1M # pvcreate and pvremove can be forced even if the PV is marked as used pvremove -ff -y "$dev1" dd if=dev1_backup of="$dev1" bs=1M pvcreate -ff -y "$dev1" dd if=dev1_backup of="$dev1" bs=1M # prepare a VG with $dev1 and $dev both having 1 MDA aux enable_dev "$dev2" vgremove -ff $vg1 pvcreate --metadatacopies 1 "$dev1" vgcreate $vg1 "$dev1" "$dev2" # disable $dev1, then repair the VG - $dev1 is removed from VG aux disable_dev "$dev1" vgreduce --removemissing $vg1 # now, enable $dev1, automatic repair will happen on pvs call # (or any other lvm command that does vg_read with repair inside) aux enable_dev "$dev1" # FIXME: once persistent cache does not cause races with timestamps # causing LVM tools to not see the VG inconsistency and once # VG repair is always done, delete this line which removes # persistent .cache as a workaround rm -f "$TESTDIR/etc/.cache" vgck $vg1 # check $dev1 does not contain the PV_EXT_FLAG anymore - it # should be removed as part of the repaid during vg_read since # $dev1 is not part of $vg1 anymore check pv_field "$dev1" pv_in_use "" ############################################# ### CHECK PV WITH DISABLED METADATA AREAS ### ############################################# pvcreate -ff -y --metadatacopies 1 "$dev1" pvcreate -ff -y --metadatacopies 1 "$dev2" # $dev1 and $dev2 not in any VG - pv_in_use field should be blank check pv_field "$dev1" pv_in_use "" check pv_field "$dev2" pv_in_use "" # $dev1 and $dev now in a VG - pv_in_use should display "used" vgcreate $vg1 "$dev1" "$dev2" check pv_field "$dev1" pv_in_use "used" check pv_field "$dev2" pv_in_use "used" pvchange --metadataignore y "$dev1" aux disable_dev "$dev2" pvscan --cache check pv_field "$dev1" pv_in_use "used" not pvcreate "$dev1" 2>err grep "$MARKED_AS_USED_MSG" err not pvchange -u "$dev1" 2>err grep "$MARKED_AS_USED_MSG" err not vgcreate $vg2 "$dev1" 2>err grep "$MARKED_AS_USED_MSG" err not pvremove "$dev1" 2>err grep "$MARKED_AS_USED_MSG" err # save PV signature from dev1 for reuse later on in this # test so we don't need to initialize all the VG stuff again dd if="$dev1" of=dev1_backup bs=1M # pvcreate and pvremove can be forced even if the PV is marked as used pvremove -ff -y "$dev1" dd if=dev1_backup of="$dev1" bs=1M pvcreate -ff -y "$dev1" dd if=dev1_backup of="$dev1" bs=1M # prepare a VG with $dev1 and $dev both having 1 MDA aux enable_dev "$dev2" vgremove -ff $vg1 pvcreate --metadatacopies 1 "$dev1" vgcreate $vg1 "$dev1" "$dev2" # disable $dev1, then repair the VG - $dev1 is removed from VG aux disable_dev "$dev1" vgreduce --removemissing $vg1 # now, enable $dev1, automatic repair will happen on pvs call # (or any other lvm command that does vg_read with repair inside) aux enable_dev "$dev1" # FIXME: once persistent cache does not cause races with timestamps # causing LVM tools to not see the VG inconsistency and once # VG repair is always done, delete this line which removes # persistent .cache as a workaround rm -f "$TESTDIR/etc/.cache" vgck $vg1 # check $dev1 does not contain the PV_EXT_FLAG anymore - it # should be removed as part of the repaid during vg_read since # $dev1 is not part of $vg1 anymore check pv_field "$dev1" pv_in_use "" ########################### # OTHER PV-RELATED CHECKS # ########################### # vgcfgrestore should also set PV_EXT_FLAG on PVs where VG is restored vgcfgbackup -f vg_backup $vg1 check pv_field "$dev2" pv_in_use "used" vgremove -ff $vg1 check pv_field "$dev2" pv_in_use "" vgcfgrestore -f vg_backup $vg1 check pv_field "$dev2" pv_in_use "used" LVM2.2.02.176/test/shell/lvconvert-raid-reshape.sh0000644000000000000120000001460513176752421020340 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2017 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA2110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest if [[ "$TESTDIR" == /dev/shm/* ]]; then echo "Disabled. This tests is permanently causing /dev/shm exhaustion. RHBZ#1501145" false fi which mkfs.ext4 || skip aux have_raid 1 12 0 || skip # Temporarily skip reshape tests on single-core CPUs until there's a fix for # https://bugzilla.redhat.com/1443999 - AGK 2017/04/20 #aux have_multi_core || skip # dropping single-core limitation with 1.12 target aux prepare_pvs 65 32 get_devs vgcreate -s 1M "$vg" "${DEVICES[@]}" function _lvcreate { local level=$1 local req_stripes=$2 local stripes=$3 local size=$4 local vg=$5 local lv=$6 lvcreate -y -aey --type $level -i $req_stripes -L $size -n $lv $vg check lv_first_seg_field $vg/$lv segtype "$level" check lv_first_seg_field $vg/$lv datastripes $req_stripes check lv_first_seg_field $vg/$lv stripes $stripes mkfs.ext4 "$DM_DEV_DIR/$vg/$lv" fsck -fn "$DM_DEV_DIR/$vg/$lv" } function _lvconvert { local req_level=$1 local level=$2 local data_stripes=$3 local stripes=$4 local vg=$5 local lv=$6 local region_size=${7-} local wait_and_check=1 local R="" [ -n "$region_size" ] && R="-R $region_size" [ "${level:0:7}" = "striped" ] && wait_and_check=0 [ "${level:0:5}" = "raid0" ] && wait_and_check=0 lvconvert -y --ty $req_level $R $vg/$lv || return $? check lv_first_seg_field $vg/$lv segtype "$level" check lv_first_seg_field $vg/$lv data_stripes $data_stripes check lv_first_seg_field $vg/$lv stripes $stripes [ -n "$region_size" ] && check lv_field $vg/$lv regionsize $region_size if [ "$wait_and_check" -eq 1 ] then fsck -fn "$DM_DEV_DIR/$vg/$lv" aux wait_for_sync $vg $lv fi fsck -fn "$DM_DEV_DIR/$vg/$lv" } function _reshape_layout { local type=$1 shift local data_stripes=$1 shift local stripes=$1 shift local vg=$1 shift local lv=$1 shift local opts="$*" local ignore_a_chars=0 [[ "$opts" =~ "--stripes" ]] && ignore_a_chars=1 lvconvert -vvvv -y --ty $type $opts $vg/$lv check lv_first_seg_field $vg/$lv segtype "$type" check lv_first_seg_field $vg/$lv data_stripes $data_stripes check lv_first_seg_field $vg/$lv stripes $stripes aux wait_for_sync $vg $lv $ignore_a_chars fsck -fn "$DM_DEV_DIR/$vg/$lv" } # Delay leg so that rebuilding status characters # can be read before resync finished too quick. # aux delay_dev "$dev1" 1 # # Start out with raid5(_ls) # # Create 3-way striped raid5 (4 legs total) _lvcreate raid5_ls 3 4 16M $vg $lv1 check lv_first_seg_field $vg/$lv1 segtype "raid5_ls" aux wait_for_sync $vg $lv1 # Reshape it to 256K stripe size _reshape_layout raid5_ls 3 4 $vg $lv1 --stripesize 256K check lv_first_seg_field $vg/$lv1 stripesize "256.00k" # Convert raid5(_n) -> striped testing raid5_ls gets rejected not _lvconvert striped striped 3 3 $vg $lv1 512k _reshape_layout raid5_n 3 4 $vg $lv1 _lvconvert striped striped 3 3 $vg $lv1 # Convert striped -> raid5_n _lvconvert raid5_n raid5_n 3 4 $vg $lv1 "" 1 # Convert raid5_n -> raid5_ls _reshape_layout raid5_ls 3 4 $vg $lv1 # Convert raid5_ls to 5 stripes _reshape_layout raid5_ls 5 6 $vg $lv1 --stripes 5 # Convert raid5_ls back to 3 stripes _reshape_layout raid5_ls 3 6 $vg $lv1 --stripes 3 --force _reshape_layout raid5_ls 3 4 $vg $lv1 --stripes 3 # Convert raid5_ls to 7 stripes _reshape_layout raid5_ls 7 8 $vg $lv1 --stripes 7 # Convert raid5_ls to 9 stripes _reshape_layout raid5_ls 9 10 $vg $lv1 --stripes 9 # Convert raid5_ls to 14 stripes _reshape_layout raid5_ls 14 15 $vg $lv1 --stripes 14 # Convert raid5_ls to 63 stripes _reshape_layout raid5_ls 63 64 $vg $lv1 --stripes 63 # Convert raid5_ls back to 27 stripes _reshape_layout raid5_ls 27 64 $vg $lv1 --stripes 27 --force _reshape_layout raid5_ls 27 28 $vg $lv1 --stripes 27 # Convert raid5_ls back to 4 stripes checking # conversion to striped/raid* gets rejected # with existing LVs to be removed afer reshape _reshape_layout raid5_ls 4 28 $vg $lv1 --stripes 4 --force # No we got the data reshaped and the freed SubLVs still present # -> check takeover request gets rejected not lvconvert --yes --type striped $vg/$lv1 not lvconvert --yes --type raid0 $vg/$lv1 not lvconvert --yes --type raid0_meta $vg/$lv1 not lvconvert --yes --type raid6 $vg/$lv1 # Remove the freed SubLVs _reshape_layout raid5_ls 4 5 $vg $lv1 --stripes 4 # Convert raid5_ls back to 3 stripes _reshape_layout raid5_ls 3 5 $vg $lv1 --stripes 3 --force _reshape_layout raid5_ls 3 4 $vg $lv1 --stripes 3 # Convert raid5_ls -> raid5_rs _reshape_layout raid5_rs 3 4 $vg $lv1 # Convert raid5_rs -> raid5_la _reshape_layout raid5_la 3 4 $vg $lv1 # Convert raid5_la -> raid5_ra _reshape_layout raid5_ra 3 4 $vg $lv1 # Convert raid5_ra -> raid6_ra_6 _lvconvert raid6_ra_6 raid6_ra_6 3 5 $vg $lv1 "4.00m" 1 # Convert raid5_la -> raid6(_zr) _reshape_layout raid6 3 5 $vg $lv1 # Convert raid6(_zr) -> raid6_nc _reshape_layout raid6_nc 3 5 $vg $lv1 # Convert raid6(_nc) -> raid6_nr _reshape_layout raid6_nr 3 5 $vg $lv1 # Convert raid6_nr) -> raid6_rs_6 _reshape_layout raid6_rs_6 3 5 $vg $lv1 # Convert raid6_rs_6 to 5 stripes _reshape_layout raid6_rs_6 5 7 $vg $lv1 --stripes 5 # Convert raid6_rs_6 to 4 stripes _reshape_layout raid6_rs_6 4 7 $vg $lv1 --stripes 4 --force _reshape_layout raid6_rs_6 4 6 $vg $lv1 --stripes 4 check lv_first_seg_field $vg/$lv1 stripesize "256.00k" # Convert raid6_rs_6 to raid6_n_6 _reshape_layout raid6_n_6 4 6 $vg $lv1 # Convert raid6_n_6 -> striped _lvconvert striped striped 4 4 $vg $lv1 check lv_first_seg_field $vg/$lv1 stripesize "256.00k" # Convert striped -> raid10(_near) _lvconvert raid10 raid10 4 8 $vg $lv1 # Convert raid10 to 10 stripes and 64K stripesize # FIXME: change once we support odd numbers of raid10 stripes not _reshape_layout raid10 4 9 $vg $lv1 --stripes 9 --stripesize 64K _reshape_layout raid10 10 20 $vg $lv1 --stripes 10 --stripesize 64K check lv_first_seg_field $vg/$lv1 stripesize "64.00k" # Convert raid6_n_6 -> striped _lvconvert striped striped 10 10 $vg $lv1 check lv_first_seg_field $vg/$lv1 stripesize "64.00k" vgremove -ff $vg LVM2.2.02.176/test/shell/thin-large.sh0000644000000000000120000000276613176752421016013 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2017 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # 'Exercise logic around boundary sizes of thin-pool data and chunksize SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest # FIXME update test to make something useful on <16T aux can_use_16T || skip aux have_thin 1 0 0 || skip # Prepare ~1P sized devices aux prepare_vg 1 1000000000 lvcreate -an -T -L250T $vg/pool250 lvcreate -an -T -L250T --poolmetadatasize 16G $vg/pool16 fail lvcreate -an -T -L250T --chunksize 64K --poolmetadatasize 16G $vg/pool64 # Creation of thin-pool with proper chunk-size but not enough metadata size # which can grow later needs to pass lvcreate -an -T -L250T --chunksize 1M --poolmetadatasize 4G $vg/pool1024 # Creation of chunk should fit lvcreate -an -T -L12T --chunksize 64K --poolmetadatasize 16G $vg/pool64 check lv_field $vg/pool64 chunksize "64.00k" lvremove -ff $vg ### Check also lvconvert ### lvcreate -an -L250T -n pool $vg fail lvconvert -y --chunksize 64 --thinpool $vg/pool lvconvert -y --chunksize 1M --thinpool $vg/pool check lv_field $vg/pool chunksize "1.00m" vgremove -ff $vg LVM2.2.02.176/test/shell/process-each-duplicate-vgnames.sh0000644000000000000120000000231413176752421021730 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008-2013 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. test_description='Test vgs with duplicate vg names' SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_devs 2 pvcreate "$dev1" pvcreate "$dev2" aux disable_dev "$dev1" "$dev2" aux enable_dev "$dev1" vgscan vgcreate $vg1 "$dev1" UUID1=$(vgs --noheading -o vg_uuid $vg1) aux disable_dev "$dev1" aux enable_dev "$dev2" vgscan vgcreate $vg1 "$dev2" UUID2=$(vgs --noheading -o vg_uuid $vg1) aux enable_dev "$dev1" # need vgscan after enabling/disabling devs # so that the next commands properly see them vgscan pvs "$dev1" pvs "$dev2" vgs -o+vg_uuid | tee err grep $UUID1 err grep $UUID2 err # should we specify and test which should be displayed? # vgs --noheading -o vg_uuid $vg1 >err # grep $UUID1 err aux disable_dev "$dev2" vgs -o+vg_uuid | tee err grep $UUID1 err not grep $UUID2 err aux enable_dev "$dev2" vgscan aux disable_dev "$dev1" vgs -o+vg_uuid | tee err grep $UUID2 err not grep $UUID1 err aux enable_dev "$dev1" vgscan LVM2.2.02.176/test/shell/lvmetad-client-filter.sh0000644000000000000120000000132713176752421020144 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITHOUT_LVMETAD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_pvs 2 pvs --config 'devices { filter = [ "r%.*%" ] }' 2>&1 | grep rejected pvs --config 'devices { filter = [ "r%.*%" ] }' 2>&1 | not grep 'No device found' LVM2.2.02.176/test/shell/lvcreate-external-dmeventd.sh0000644000000000000120000000237413176752421021205 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2017 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Test check converted external origin remains monitored SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 export LVM_TEST_THIN_REPAIR_CMD=${LVM_TEST_THIN_REPAIR_CMD-/bin/false} . lib/inittest # # Main # aux have_thin 1 3 0 || skip aux have_raid 1 3 0 || skip aux prepare_dmeventd aux prepare_vg 2 # Test validation for external origin being multiple of thin pool chunk size lvcreate -L10M -T $vg/pool # Create raid LV (needs monitoring) for external origin. lvcreate -m1 -L1 -n $lv1 $vg lvconvert -T --thinpool $vg/pool --originname $lv2 $vg/$lv1 # Check raid LV now as external origing with $lv2 name is still monitored check lv_first_seg_field $vg/$lv2 seg_monitor "monitored" lvchange -an $vg lvchange -ay $vg/$lv1 check lv_first_seg_field $vg/$lv2 seg_monitor "monitored" vgremove -ff $vg LVM2.2.02.176/test/shell/lvconvert-raid-status-validation.sh0000644000000000000120000001270513176752421022363 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2017 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA2110-1301 USA ####################################################################### # This series of tests is meant to validate the correctness of # 'dmsetup status' for RAID LVs - especially during various sync action # transitions, like: recover, resync, check, repair, idle, reshape, etc ####################################################################### SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 export LVM_TEST_LVMETAD_DEBUG_OPTS=${LVM_TEST_LVMETAD_DEBUG_OPTS-} . lib/inittest # check for version 1.9.0 # - it is the point at which linear->raid1 uses "recover" # check for version 1.13.0 instead # - it is the point at which a finishing "recover" doesn't print all 'a's aux have_raid 1 13 0 || skip aux prepare_pvs 9 get_devs vgcreate -s 2m "$vg" "${DEVICES[@]}" ########################################### # Upconverted RAID1 should never have all 'a's in status output ########################################### aux delay_dev "$dev2" 0 50 lvcreate -aey -l 2 -n $lv1 $vg "$dev1" lvconvert --type raid1 -y -m 1 $vg/$lv1 "$dev2" while ! check in_sync $vg $lv1; do a=( $(dmsetup status $vg-$lv1) ) || die "Unable to get status of $vg/$lv1" b=( $(echo "${a[6]}" | sed s:/:' ':) ) if [ "${b[0]}" -ne "${b[1]}" ]; then # First, 'check in_sync' should only need to check the ratio # If we are here, it is probably doing more than that. # If not in-sync, then we should only ever see "Aa" [ "${a[5]}" == "Aa" ] else [ "${a[5]}" != "aa" ] should [ "${a[5]}" == "AA" ] # RHBZ 1507719 fi sleep .1 done aux enable_dev "$dev2" lvremove -ff $vg ########################################### # Upconverted RAID1 should not be at 100% right after upconvert ########################################### aux delay_dev "$dev2" 0 50 lvcreate -aey -l 2 -n $lv1 $vg "$dev1" lvconvert --type raid1 -y -m 1 $vg/$lv1 "$dev2" a=( $(dmsetup status $vg-$lv1) ) || die "Unable to get status of $vg/$lv1" b=( $(echo "${a[6]}" | sed s:/:' ':) ) should [ "${b[0]}" -ne "${b[1]}" ] # RHBZ 1507729 aux enable_dev "$dev2" lvremove -ff $vg ########################################### # Catch anything suspicious with linear -> RAID1 upconvert ########################################### aux delay_dev "$dev2" 0 50 lvcreate -aey -l 2 -n $lv1 $vg "$dev1" lvconvert --type raid1 -y -m 1 $vg/$lv1 "$dev2" while true; do a=( $(dmsetup status $vg-$lv1) ) || die "Unable to get status of $vg/$lv1" b=( $(echo "${a[6]}" | sed s:/:' ':) ) if [ "${b[0]}" -ne "${b[1]}" ]; then # If the sync operation ("recover" in this case) is not # finished, then it better be as follows: [ "${a[5]}" = "Aa" ] # Might be transitioning from "idle" to "recover". # Kernel could check mddev->recovery for the intent to # begin a "recover" and report that... probably would be # better. RHBZ 1507719 should [ "${a[7]}" = "recover" ] else # Tough to tell the INVALID case, # Before starting sync thread: "Aa X/X recover" # from the valid case, # Just finished sync thread: "Aa X/X recover" should [ "${a[5]}" = "AA" ] # RHBZ 1507719 should [ "${a[7]}" = "idle" ] # RHBZ 1507719 break fi sleep .1 done aux enable_dev "$dev2" lvremove -ff $vg ########################################### # Catch anything suspicious with RAID1 2-way -> 3-way upconvert ########################################### aux delay_dev "$dev3" 0 50 lvcreate --type raid1 -m 1 -aey -l 2 -n $lv1 $vg "$dev1" "$dev2" aux wait_for_sync $vg $lv1 lvconvert -y -m +1 $vg/$lv1 "$dev3" while true; do a=( $(dmsetup status $vg-$lv1) ) || die "Unable to get status of $vg/$lv1" b=( $(echo "${a[6]}" | sed s:/:' ':) ) if [ "${b[0]}" -ne "${b[1]}" ]; then # If the sync operation ("recover" in this case) is not # finished, then it better be as follows: [ "${a[5]}" = "AAa" ] [ "${a[7]}" = "recover" ] else # Tough to tell the INVALID case, # Before starting sync thread: "AAa X/X recover" # from the valid case, # Just finished sync thread: "AAa X/X recover" should [ "${a[5]}" = "AAA" ] # RHBZ 1507719 should [ "${a[7]}" = "idle" ] # RHBZ 1507719 break fi sleep .1 done aux enable_dev "$dev3" lvremove -ff $vg ########################################### # Catch anything suspicious with RAID1 initial resync ########################################### aux delay_dev "$dev2" 0 50 lvcreate --type raid1 -m 1 -aey -l 2 -n $lv1 $vg "$dev1" "$dev2" while true; do a=( $(dmsetup status $vg-$lv1) ) || die "Unable to get status of $vg/$lv1" b=( $(echo "${a[6]}" | sed s:/:' ':) ) if [ "${b[0]}" -ne "${b[1]}" ]; then # If the sync operation ("resync" in this case) is not # finished, then it better be as follows: [ "${a[5]}" = "aa" ] # Should be in "resync", but it is possible things are only # just getting going - in which case, it could be "idle" # with 0% sync ratio [ "${a[7]}" = "resync" ] || \ [[ "${a[7]}" = "idle" && "${b[0]}" -eq "0" ]] else should [ "${a[5]}" = "AA" ] # RHBZ 1507719 should [ "${a[7]}" = "idle" ] # RHBZ 1507719 break fi sleep .1 done aux enable_dev "$dev2" lvremove -ff $vg vgremove -ff $vg LVM2.2.02.176/test/shell/pvremove-thin.sh0000644000000000000120000000147413176752421016557 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Checks we are not reading our own devices # https://bugzilla.redhat.com/show_bug.cgi?id=1064374 SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_vg aux have_thin 1 8 0 || skip aux extend_filter_LVMTEST lvcreate -L10 -V10 -n $lv1 -T $vg/pool1 pvcreate "$DM_DEV_DIR/$vg/$lv1" pvremove "$DM_DEV_DIR/$vg/$lv1" vgremove -ff $vg LVM2.2.02.176/test/shell/thin-foreign-dmeventd.sh0000644000000000000120000000517413176752421020152 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # test foreing user of thin-pool SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest MOUNT_DIR=mnt cleanup_mounted_and_teardown() { umount "$MOUNT_DIR" || true dmsetup remove $THIN vgremove -ff $vg aux teardown } percent_() { get lv_field $vg/pool data_percent | cut -d. -f1 } # # Main # aux have_thin 1 0 0 || skip which mkfs.ext4 || skip # Use our mkfs config file to get approximately same results # TODO: maybe use it for all test via some 'prepare' function export MKE2FS_CONFIG="$TESTOLDPWD/lib/mke2fs.conf" aux prepare_dmeventd aux prepare_vg 2 64 # Create named pool only lvcreate --errorwhenfull y -L2 -T $vg/pool POOL="$vg-pool" THIN="${PREFIX}_thin" # Foreing user is using own ioctl command to create thin devices dmsetup message $POOL 0 "create_thin 0" dmsetup message $POOL 0 "set_transaction_id 0 1" dmsetup status # Once the transaction id has changed, lvm2 shall not be able to create thinLV fail lvcreate -V10 $vg/pool trap 'cleanup_mounted_and_teardown' EXIT # 20M thin device dmsetup create $THIN --table "0 40960 thin $DM_DEV_DIR/mapper/$POOL 0" dmsetup table dmsetup info -c mkdir "$MOUNT_DIR" # This mkfs should fill 2MB pool over 95% # no autoresize is configured mkfs.ext4 "$DM_DEV_DIR/mapper/$THIN" test "$(percent_)" -gt 95 mount "$DM_DEV_DIR/mapper/$THIN" "$MOUNT_DIR" pvchange -x n "$dev1" "$dev2" test "$(percent_)" -gt 95 # Configure autoresize aux lvmconf 'activation/thin_pool_autoextend_percent = 10' \ 'activation/thin_pool_autoextend_threshold = 75' # Give it some time to left dmeventd do some (failing to resize) work sleep 20 # And check foreign thin device is still mounted mount | grep "$MOUNT_DIR" | grep "$THIN" test "$(percent_)" -gt 95 pvchange -x y "$dev1" "$dev2" # FIXME: ATM tell dmeventd explicitely we've changed metadata # however dmeventd shall be aware of any metadata change # and automagically retry resize operation after that. lvchange --refresh $vg/pool # Give it some time and let dmeventd do some work for i in $(seq 1 15) ; do test "$(percent_)" -ge 75 || break sleep 1 done test "$(percent_)" -lt 75 # And check foreign thin device is still mounted mount | grep "$MOUNT_DIR" | grep "$THIN" LVM2.2.02.176/test/shell/lvconvert-repair-replace.sh0000644000000000000120000000702413176752421020664 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 . lib/inittest aux prepare_vg 6 aux lvmconf 'allocation/maximise_cling = 0' \ 'allocation/mirror_logs_require_separate_pvs = 1' # 3-way, disk log # multiple failures, full replace lvcreate -aey --mirrorlog disk --type mirror -m 2 --ignoremonitoring --nosync -L 1 -n 3way $vg "$dev1" "$dev2" "$dev3" "$dev4":0-1 aux disable_dev "$dev1" "$dev2" lvconvert -y --repair $vg/3way 2>&1 | tee 3way.out lvs -a -o +devices $vg | not grep unknown not grep "WARNING: Failed" 3way.out vgreduce --removemissing $vg check mirror $vg 3way aux enable_dev "$dev1" "$dev2" vgremove -ff $vg # 3-way, disk log # multiple failures, partial replace vgcreate $vg "$dev1" "$dev2" "$dev3" "$dev4" "$dev5" lvcreate -aey --mirrorlog disk --type mirror -m 2 --ignoremonitoring --nosync -L 1 -n 3way $vg "$dev1" "$dev2" "$dev3" "$dev4" aux disable_dev "$dev1" "$dev2" lvconvert -y --repair $vg/3way 2>&1 | tee 3way.out grep "WARNING: Failed" 3way.out lvs -a -o +devices $vg | not grep unknown vgreduce --removemissing $vg check mirror $vg 3way aux enable_dev "$dev1" "$dev2" vgremove -ff $vg vgcreate $vg "$dev1" "$dev2" "$dev3" lvcreate -aey --mirrorlog disk --type mirror -m 1 --ignoremonitoring --nosync -l 1 -n 2way $vg "$dev1" "$dev2" "$dev3" aux disable_dev "$dev1" lvconvert -y --repair $vg/2way 2>&1 | tee 2way.out grep "WARNING: Failed" 2way.out lvs -a -o +devices $vg | not grep unknown vgreduce --removemissing $vg check mirror $vg 2way aux enable_dev "$dev1" "$dev2" vgremove -ff $vg # FIXME - exclusive activation for mirrors should work here # conversion of inactive cluster logs is also unsupported test -e LOCAL_CLVMD && exit 0 # Test repair of inactive mirror with log failure # Replacement should fail, but convert should succeed (switch to corelog) vgcreate $vg "$dev1" "$dev2" "$dev3" "$dev4" lvcreate -aey --type mirror -m 2 --ignoremonitoring -l 2 -n mirror2 $vg "$dev1" "$dev2" "$dev3" "$dev4":0 vgchange -a n $vg pvremove -ff -y "$dev4" lvconvert -y --repair $vg/mirror2 check mirror $vg mirror2 vgs $vg vgremove -ff $vg if aux kernel_at_least 3 0 0; then # 2-way, mirrored log # Double log failure, full replace vgcreate $vg "$dev1" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6" lvcreate -aey --mirrorlog mirrored --type mirror -m 1 --ignoremonitoring --nosync -L 1 -n 2way $vg \ "$dev1" "$dev2" "$dev3":0 "$dev4":0 aux disable_dev "$dev3" "$dev4" lvconvert -y --repair $vg/2way 2>&1 | tee 2way.out lvs -a -o +devices $vg | not grep unknown not grep "WARNING: Failed" 2way.out vgreduce --removemissing $vg check mirror $vg 2way aux enable_dev "$dev3" "$dev4" vgremove -ff $vg fi # 3-way, mirrored log # Single log failure, replace vgcreate $vg "$dev1" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6" lvcreate -aey --mirrorlog mirrored --type mirror -m 2 --ignoremonitoring --nosync -L 1 -n 3way $vg \ "$dev1" "$dev2" "$dev3" "$dev4":0 "$dev5":0 aux disable_dev "$dev4" lvconvert -y --repair $vg/3way 2>&1 | tee 3way.out lvs -a -o +devices $vg | not grep unknown not grep "WARNING: Failed" 3way.out vgreduce --removemissing $vg check mirror $vg 3way aux enable_dev "$dev4" vgremove -ff $vg LVM2.2.02.176/test/shell/activate-partial.sh0000644000000000000120000000171313176752421017202 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2010 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_vg 3 lvcreate -aey --type mirror -m 1 -l 1 --nosync -n mirror $vg lvchange -a n $vg/mirror aux disable_dev "$dev1" not vgreduce --removemissing $vg not lvchange -v -aey $vg/mirror lvchange -v --partial -aey $vg/mirror not lvchange -v --refresh $vg/mirror lvchange -v --refresh --partial $vg/mirror # also check that vgchange works vgchange -a n --partial $vg vgchange -aey --partial $vg # check vgremove vgremove -ff $vg LVM2.2.02.176/test/shell/system_id.sh0000644000000000000120000005250413176752421015754 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2015 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA test_description='Test system_id' SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 print_lvmlocal() { { echo "local {"; printf "%s\n" "$@"; echo "}"; } >"$LVMLOCAL" } . lib/inittest aux prepare_devs 1 SIDFILE="etc/lvm_test.conf" LVMLOCAL="etc/lvmlocal.conf" # with clvm enabled, vgcreate with no -c option creates a clustered vg, # which should have no system id if [ -e LOCAL_CLVMD ]; then SID1=sidfoolocal SID2="" print_lvmlocal " system_id = $SID1" aux lvmconf "global/system_id_source = lvmlocal" vgcreate $vg1 "$dev1" vgs -o+systemid $vg1 check vg_field $vg1 systemid "$SID2" vgremove $vg1 rm -f "$LVMLOCAL" exit 0 fi # create vg with system_id using each source ## none SID="" aux lvmconf "global/system_id_source = none" vgcreate $vg1 "$dev1" check vg_field $vg1 systemid "$SID" vgremove $vg1 # FIXME - print 'life' config data eval "$(lvmconfig global/etc 2>/dev/null || lvmconfig --type default global/etc)" ## machineid if [ -e "$etc/machine-id" ]; then SID=$(cat "$etc/machine-id") aux lvmconf "global/system_id_source = machineid" vgcreate $vg1 "$dev1" vgs -o+systemid $vg1 check vg_field $vg1 systemid "$SID" vgremove $vg1 fi ## uname SID1=$(uname -n) if [ -n "$SID1" ]; then aux lvmconf "global/system_id_source = uname" SID2=$(lvm systemid | awk '{ print $3 }') vgcreate $vg1 "$dev1" vgs -o+systemid $vg1 check vg_field $vg1 systemid "$SID2" vgremove $vg1 fi ## lvmlocal SID=sidfoolocal print_lvmlocal " system_id = $SID" aux lvmconf "global/system_id_source = lvmlocal" vgcreate $vg1 "$dev1" vgs -o+systemid $vg1 check vg_field $vg1 systemid "$SID" vgremove $vg1 rm -f "$LVMLOCAL" ## file SID=sidfoofile echo "$SID" > "$SIDFILE" aux lvmconf "global/system_id_source = file" \ "global/system_id_file = \"$SIDFILE\"" vgcreate $vg1 "$dev1" vgs -o+systemid $vg1 check vg_field $vg1 systemid "$SID" vgremove $vg1 # override system_id to create a foreign vg, then fail to use the vg SID1=sidfoofile1 SID2=sidfoofile2 echo "$SID1" > "$SIDFILE" aux lvmconf "global/system_id_source = file" \ "global/system_id_file = \"$SIDFILE\"" # create a vg, overriding the local system_id so the vg looks foreign vgcreate --systemid "$SID2" "$vg1" "$dev1" # normal vgs is not an error and does not see the vg vgs >err not grep $vg1 err # vgs on the foreign vg is an error and not displayed not vgs $vg1 >err not grep $vg1 err # fail to remove foreign vg not vgremove $vg1 # using --foreign we can see foreign vg vgs --foreign >err grep $vg1 err vgs --foreign $vg1 >err grep $vg1 err # change the local system_id to the second value, making the vg not foreign echo "$SID2" > "$SIDFILE" # we can now see and remove the vg vgs $vg1 >err grep $vg1 err vgremove $vg1 # create a vg, then change the local system_id, making the vg foreign SID1=sidfoofile1 SID2=sidfoofile2 echo "$SID1" > "$SIDFILE" aux lvmconf "global/system_id_source = file" \ "global/system_id_file = \"$SIDFILE\"" # create a vg vgcreate $vg1 "$dev1" # normal vgs sees the vg vgs >err grep $vg1 err # change the local system_id, making the vg foreign echo "$SID2" > "$SIDFILE" # normal vgs doesn't see the vg vgs >err not grep $vg1 err # using --foreign we can see the vg vgs --foreign >err grep $vg1 err # change the local system_id back to the first value, making the vg not foreign echo "$SID1" > "$SIDFILE" vgs >err grep $vg1 err vgremove $vg1 # create a vg, then change the vg's system_id, making it foreign SID1=sidfoofile1 SID2=sidfoofile2 echo "$SID1" > "$SIDFILE" aux lvmconf "global/system_id_source = file" \ "global/system_id_file = \"$SIDFILE\"" # create a vg vgcreate $vg1 "$dev1" # normal vgs sees the vg vgs >err grep $vg1 err # change the vg's system_id, making the vg foreign vgchange --yes --systemid "$SID2" $vg1 # normal vgs doesn't see the vg vgs >err not grep $vg1 err # using --foreign we can see the vg vgs --foreign >err grep $vg1 err # change the local system_id to the second system_id so we can remove the vg echo "$SID2" > "$SIDFILE" vgs >err grep $vg1 err vgremove $vg1 # create a vg, create active lvs in it, change our system_id, making # the VG foreign, verify that we can still see the foreign VG, # and can deactivate the LVs SID1=sidfoofile1 SID2=sidfoofile2 echo "$SID1" > "$SIDFILE" aux lvmconf "global/system_id_source = file" \ "global/system_id_file = \"$SIDFILE\"" # create a vg vgcreate $vg1 "$dev1" lvcreate -n $lv1 -l 2 $vg1 # normal vgs sees the vg and lv vgs >err grep $vg1 err check lv_exists $vg1 $lv1 # change our system_id, making the vg foreign, but accessible echo "$SID2" > "$SIDFILE" vgs >err grep $vg1 err check lv_exists $vg1 $lv1 # can deactivate the lv lvchange -an $vg1/$lv1 # now that the foreign vg has no active lvs, we can't access it not lvremove $vg1/$lv1 not vgremove $vg1 # change our system_id back to match the vg so it's not foreign echo "$SID1" > "$SIDFILE" vgs >err grep $vg1 err lvremove $vg1/$lv1 vgremove $vg1 # local system has no system_id, so it can't access a vg with a system_id SID1=sidfoofile1 echo "$SID1" > "$SIDFILE" aux lvmconf "global/system_id_source = file" \ "global/system_id_file = \"$SIDFILE\"" # create a vg vgcreate $vg1 "$dev1" aux lvmconf "global/system_id_source = none" vgs >err not grep $vg1 err not vgs $vg1 >err not grep $vg1 err aux lvmconf "global/system_id_source = file" vgs >err grep $vg1 err vgremove $vg1 # local system has a system_id, and can use a vg without a system_id SID1=sidfoofile1 rm -f "$SIDFILE" # create a vg with no system_id aux lvmconf "global/system_id_source = none" vgcreate $vg1 "$dev1" check vg_field $vg1 systemid "" # set a local system_id echo "$SID1" > "$SIDFILE" aux lvmconf "global/system_id_source = file" \ "global/system_id_file = \"$SIDFILE\"" # check we can see and use the vg with no system_id vgs >err grep $vg1 err vgs $vg1 >err grep $vg1 err vgremove $vg1 # vgexport clears system_id, vgimport sets system_id SID1=sidfoofile1 echo "$SID1" > "$SIDFILE" aux lvmconf "global/system_id_source = file" \ "global/system_id_file = \"$SIDFILE\"" # create a vg vgcreate $vg1 "$dev1" # normal vgs sees the vg vgs -o+systemid >err grep $vg1 err grep "$SID1" err # after vgexport there is no systemid vgexport $vg1 vgs -o+systemid >err grep $vg1 err not grep "$SID1" err # after vgimport there is a systemid vgimport $vg1 vgs -o+systemid >err grep $vg1 err grep "$SID1" err vgremove $vg1 # vgchange -cy clears system_id, vgchange -cn sets system_id SID1=sidfoofile1 echo "$SID1" > "$SIDFILE" aux lvmconf "global/system_id_source = file" \ "global/system_id_file = \"$SIDFILE\"" # create a vg vgcreate $vg1 "$dev1" # normal vgs sees the vg vgs -o+systemid >err grep $vg1 err grep "$SID1" err # after vgchange -cy there is no systemid vgchange --yes -cy $vg1 vgs --config 'global { locking_type=0 }' -o+systemid $vg1 >err grep $vg1 err not grep "$SID1" err # after vgchange -cn there is a systemid vgchange --config 'global { locking_type=0 }' -cn $vg1 vgs -o+systemid >err grep $vg1 err grep "$SID1" err vgremove $vg1 # Test max system_id length (128) and invalid system_id characters. # The 128 length limit is imposed before invalid characters are omitted. # 120 numbers followed by 8 letters (max len) SID1=012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789abcdefgh # 120 numbers followed by 9 letters (too long by 1 character, the last is omitted) SID2=012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789abcdefghi # max len system_id should appear normally echo "$SID1" > "$SIDFILE" aux lvmconf "global/system_id_source = file" \ "global/system_id_file = \"$SIDFILE\"" # create a vg vgcreate $vg1 "$dev1" # normal vgs sees the vg vgs -o+systemid $vg1 >err grep $vg1 err grep "$SID1" err vgremove $vg1 # max+1 len system_id should be missing the last character echo "$SID2" > "$SIDFILE" aux lvmconf "global/system_id_source = file" \ "global/system_id_file = \"$SIDFILE\"" # create a vg vgcreate $vg1 "$dev1" # normal vgs sees the vg vgs -o+systemid $vg1 >err grep $vg1 err grep "$SID1" err not grep "$SID2" err vgremove $vg1 # max len system_id containing an invalid character should appear without # the invalid character # 120 numbers followed by invalid '%' character followed by 8 letters (too long by 1 character) SID1=012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789%abcdefgh # After the invalid character is omitted from SID1 # The string is truncated to max length (128) before the invalid character is omitted SID2=012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789abcdefg echo "$SID1" > "$SIDFILE" aux lvmconf "global/system_id_source = file" \ "global/system_id_file = \"$SIDFILE\"" # create a vg vgcreate $vg1 "$dev1" # normal vgs sees the vg vgs -o+systemid $vg1 >err grep $vg1 err not grep $SID1 err grep $SID2 err vgremove $vg1 # contains a bunch of invalid characters SID1="?%$&A.@1]" # SID1 without the invalid characters SID2=A.1 echo "$SID1" > "$SIDFILE" aux lvmconf "global/system_id_source = file" \ "global/system_id_file = \"$SIDFILE\"" # create a vg vgcreate $vg1 "$dev1" # normal vgs sees the vg vgs -o+systemid $vg1 >err grep $vg1 err not grep "$SID1" err grep "$SID2" err vgremove $vg1 # pvs: pv in a foreign vg not reported # pvs --foreign: pv in a foreign vg is reported SID1=sidfoofile1 SID2=sidfoofile2 echo "$SID1" > "$SIDFILE" aux lvmconf "global/system_id_source = file" \ "global/system_id_file = \"$SIDFILE\"" # create a vg vgcreate $vg1 "$dev1" # normal pvs sees the vg and pv pvs >err grep $vg1 err grep "$dev1" err # change the local system_id, making the vg foreign echo "$SID2" > "$SIDFILE" # normal pvs does not see the vg or pv pvs >err not grep $vg1 err not grep "$dev1" err # pvs --foreign does see the vg and pv pvs --foreign >err grep $vg1 err grep "$dev1" err # change the local system_id back so the vg can be removed echo "$SID1" > "$SIDFILE" vgremove $vg1 rm -f "$SIDFILE" # lvs: lvs in a foreign vg not reported # lvs --foreign: lvs in a foreign vg are reported SID1=sidfoofile1 SID2=sidfoofile2 echo "$SID1" > "$SIDFILE" aux lvmconf "global/system_id_source = file" \ "global/system_id_file = \"$SIDFILE\"" # create a vg vgcreate $vg1 "$dev1" lvcreate -n $lv1 -l 2 $vg1 lvchange -an $vg1/$lv1 # normal lvs sees the vg and lv lvs >err grep $vg1 err grep $lv1 err # change the local system_id, making the vg foreign echo "$SID2" > "$SIDFILE" # normal lvs does not see the vg or lv lvs >err not grep $vg1 err not grep $lv1 err # lvs --foreign does see the vg and lv lvs --foreign >err grep $vg1 err grep $lv1 err # change the local system_id back so the vg can be removed echo "$SID1" > "$SIDFILE" lvremove $vg1/$lv1 vgremove $vg1 rm -f "$SIDFILE" # use extra_system_ids to read a foreign VG SID1=sidfoofile1 SID2=sidfoofile2 rm -f "$LVMLOCAL" echo "$SID1" > "$SIDFILE" aux lvmconf "global/system_id_source = file" \ "global/system_id_file = \"$SIDFILE\"" # create a vg vgcreate $vg1 "$dev1" # normal vgs sees the vg vgs >err grep $vg1 err # change the local system_id, making the vg foreign echo "$SID2" > "$SIDFILE" # normal vgs doesn't see the vg vgs >err not grep $vg1 err # using --foreign we can see the vg vgs --foreign >err grep $vg1 err # add the first system_id to extra_system_ids so we can see the vg print_lvmlocal " extra_system_ids = [ $SID1 ] " vgs >err grep $vg1 err vgremove $vg1 rm -f "$LVMLOCAL" # vgcreate --systemid "" creates a vg without a system_id even if source is set SID1=sidfoofile1 echo "$SID1" > "$SIDFILE" aux lvmconf "global/system_id_source = file" \ "global/system_id_file = \"$SIDFILE\"" # create a vg vgcreate --systemid "" $vg1 "$dev1" # normal vgs sees the vg vgs >err grep $vg1 err # our system_id is not displayed for the vg vgs -o+systemid >err not grep "$SID1" err vgremove $vg1 rm -f "$SIDFILE" # vgchange --systemid "" clears the system_id on owned vg SID1=sidfoofile1 echo "$SID1" > "$SIDFILE" aux lvmconf "global/system_id_source = file" \ "global/system_id_file = \"$SIDFILE\"" # create a vg vgcreate $vg1 "$dev1" # normal vgs sees the vg vgs >err grep $vg1 err # the vg has our system_id vgs -o+systemid >err grep $SID1 err # clear the system_id vgchange --yes --systemid "" $vg1 # normal vgs sees the vg vgs >err grep $vg1 err # the vg does not have our system_id vgs -o+systemid >err not grep "$SID1" err vgremove $vg1 # vgchange --systemid does not set the system_id on foreign vg SID1=sidfoofile1 SID2=sidfoofile2 rm -f "$LVMLOCAL" echo "$SID1" > "$SIDFILE" aux lvmconf "global/system_id_source = file" \ "global/system_id_file = \"$SIDFILE\"" # create a vg vgcreate $vg1 "$dev1" # normal vgs sees the vg vgs >err grep $vg1 err # change the local system_id, making the vg foreign echo "$SID2" > "$SIDFILE" # normal vgs doesn't see the vg vgs >err not grep $vg1 err # using --foreign we can see the vg vgs --foreign >err grep $vg1 err # cannot clear the system_id of the foreign vg not vgchange --yes --systemid "" $vg1 # cannot set the system_id of the foreign vg not vgchange --yes --systemid foo $vg1 # change our system_id back so we can remove the vg echo "$SID1" > "$SIDFILE" vgremove $vg1 # vgcfgbackup backs up foreign vg with --foreign SID1=sidfoofile1 SID2=sidfoofile2 rm -f "$LVMLOCAL" echo "$SID1" > "$SIDFILE" aux lvmconf "global/system_id_source = file" \ "global/system_id_file = \"$SIDFILE\"" # create a vg vgcreate $vg1 "$dev1" # normal vgs sees the vg vgs >err grep $vg1 err # change the local system_id, making the vg foreign echo "$SID2" > "$SIDFILE" # normal vgs doesn't see the vg vgs >err not grep $vg1 err # using --foreign we can back up the vg not vgcfgbackup $vg1 vgcfgbackup --foreign $vg1 # change our system_id back so we can remove the vg echo "$SID1" > "$SIDFILE" vgremove $vg1 rm -f "$SIDFILE" # Test handling of bad system_id source configurations # The commands should proceed without a system_id. # Look at the warning/error messages. # vgcreate with source machineid, where no $etc/machine-id file exists if [ ! -e "$etc/machine-id" ]; then SID="" aux lvmconf "global/system_id_source = machineid" vgcreate $vg1 "$dev1" 2>&1 | tee err vgs -o+systemid $vg1 check vg_field $vg1 systemid $SID grep "No system ID found from system_id_source" err vgremove $vg1 fi # vgcreate with source uname, but uname is localhost # TODO: don't want to change the hostname on the test machine... # vgcreate with source lvmlocal, but no lvmlocal.conf file SID="" rm -f $LVMLOCAL aux lvmconf "global/system_id_source = lvmlocal" vgcreate $vg1 "$dev1" 2>&1 | tee err vgs -o+systemid $vg1 check vg_field $vg1 systemid $SID grep "No system ID found from system_id_source" err vgremove $vg1 # vgcreate with source lvmlocal, but no system_id = "x" entry SID="" print_lvmlocal # " system_id = $SID" aux lvmconf "global/system_id_source = lvmlocal" vgcreate $vg1 "$dev1" 2>&1 | tee err vgs -o+systemid $vg1 check vg_field $vg1 systemid $SID grep "No system ID found from system_id_source" err vgremove $vg1 # vgcreate with source lvmlocal, and empty string system_id = "" SID="" print_lvmlocal " system_id = \"\"" aux lvmconf "global/system_id_source = lvmlocal" vgcreate $vg1 "$dev1" 2>&1 | tee err vgs -o+systemid $vg1 check vg_field $vg1 systemid "$SID" grep "No system ID found from system_id_source" err vgremove $vg1 rm -f $LVMLOCAL # vgcreate with source file, but no system_id_file config SID="" rm -f "$SIDFILE" aux lvmconf "global/system_id_source = file" vgcreate $vg1 "$dev1" 2>&1 | tee err vgs -o+systemid $vg1 check vg_field $vg1 systemid "$SID" grep "No system ID found from system_id_source" err vgremove $vg1 # vgcreate with source file, but system_id_file does not exist SID="" rm -f "$SIDFILE" aux lvmconf "global/system_id_source = file" \ "global/system_id_file = \"$SIDFILE\"" vgcreate $vg1 "$dev1" 2>&1 | tee err vgs -o+systemid $vg1 check vg_field $vg1 systemid "$SID" grep "No system ID found from system_id_source" err vgremove $vg1 # Test cases where lvmetad cache of a foreign VG are out of date # because the foreign owner has changed the VG. test ! -e LOCAL_LVMETAD && exit 0 # When a foreign vg is newer on disk than in lvmetad, using --foreign # should find the newer version. This simulates a foreign host changing # foreign vg by turning off lvmetad when we create an lv in the vg. SID1=sidfoofile1 SID2=sidfoofile2 echo "$SID1" > "$SIDFILE" aux lvmconf "global/system_id_source = file" \ "global/system_id_file = \"$SIDFILE\"" # create a vg with an lv vgcreate $vg1 "$dev1" lvcreate -n $lv1 -l 2 -an $vg1 # normal vgs sees the vg and lv vgs >err grep $vg1 err check lv_exists $vg1 $lv1 # go around lvmetad to create another lv in the vg, # forcing the lvmetad copy to be older than on disk. aux lvmconf 'global/use_lvmetad = 0' lvcreate -n $lv2 -l 2 -an $vg1 aux lvmconf 'global/use_lvmetad = 1' # verify that the second lv is not in lvmetad lvs $vg1 >err grep $lv1 err not grep $lv2 err # change our system_id, making the vg foreign echo "$SID2" > "$SIDFILE" vgs >err not grep $vg1 err # using --foreign, we will get the latest vg from disk lvs --foreign $vg1 >err grep $vg1 err grep $lv1 err grep $lv2 err # change our system_id back to match the vg so it's not foreign echo "$SID1" > "$SIDFILE" lvremove $vg1/$lv1 lvremove $vg1/$lv2 vgremove $vg1 # vgimport should find the exported vg on disk even though # lvmetad's copy of the vg shows it's foreign. SID1=sidfoofile1 SID2=sidfoofile2 echo "$SID1" > "$SIDFILE" aux lvmconf "global/system_id_source = file" \ "global/system_id_file = \"$SIDFILE\"" # create a vg with an lv vgcreate $vg1 "$dev1" lvcreate -n $lv1 -l 2 -an $vg1 # normal vgs sees the vg and lv vgs >err grep $vg1 err check lv_exists $vg1 $lv1 # go around lvmetad to export the vg so that lvmetad still # has the original vg owned by SID1 in its cache aux lvmconf 'global/use_lvmetad = 0' vgexport $vg1 aux lvmconf 'global/use_lvmetad = 1' # change the local system_id so the lvmetad copy of the vg is foreign echo "$SID2" > "$SIDFILE" # verify that lvmetad thinks the vg is foreign # (don't use --foreign to verify this because that will cause # the lvmetad cache to be updated, which we don't want yet) not vgs $vg1 # attempt to import the vg that has been exported, but # which lvmetad thinks is foreign vgimport $vg1 # verify that the imported vg has our system_id vgs -o+systemid $vg1 >err grep $vg1 err grep $SID2 err check lv_exists $vg1 $lv1 lvremove $vg1/$lv1 vgremove $vg1 rm -f "$SIDFILE" # pvscan --cache should cause the latest version of a foreign VG to be # cached in lvmetad. Without the --cache option, pvscan will see the old # version of the VG. SID1=sidfoofile1 SID2=sidfoofile2 echo "$SID1" > "$SIDFILE" aux lvmconf "global/system_id_source = file" \ "global/system_id_file = \"$SIDFILE\"" # create a vg with an lv vgcreate $vg1 "$dev1" lvcreate -n $lv1 -l 2 -an $vg1 # normal vgs sees the vg and lv vgs >err grep $vg1 err check lv_exists $vg1 $lv1 # go around lvmetad to create another lv in the vg, # forcing the lvmetad copy to be older than on disk. aux lvmconf 'global/use_lvmetad = 0' lvcreate -n $lv2 -l 2 -an $vg1 aux lvmconf 'global/use_lvmetad = 1' # verify that the second lv is not in lvmetad lvs $vg1 >err grep $lv1 err not grep $lv2 err # verify that after pvscan without --cache, lvmetad still # reports the old version pvscan lvs $vg1 >err grep $lv1 err not grep $lv2 err # change our system_id, making the vg foreign echo "$SID2" > "$SIDFILE" not vgs $vg1 >err not grep $vg1 err # use pvscan --cache to update the foreign vg in lvmetad pvscan --cache not vgs $vg1 >err not grep $vg1 err # change our system_id back to SID1 so we can check that # lvmetad has the latest copy of the vg (without having # to use --foreign to check) echo "$SID1" > "$SIDFILE" vgs $vg1 >err grep $vg1 err lvs $vg1 >err grep $lv1 err grep $lv2 err lvremove $vg1/$lv1 lvremove $vg1/$lv2 vgremove $vg1 # repeat the same test for vgscan instead of pvscan SID1=sidfoofile1 SID2=sidfoofile2 echo "$SID1" > "$SIDFILE" aux lvmconf "global/system_id_source = file" \ "global/system_id_file = \"$SIDFILE\"" # create a vg with an lv vgcreate $vg1 "$dev1" lvcreate -n $lv1 -l 2 -an $vg1 # normal vgs sees the vg and lv vgs >err grep $vg1 err check lv_exists $vg1 $lv1 # go around lvmetad to create another lv in the vg, # forcing the lvmetad copy to be older than on disk. aux lvmconf 'global/use_lvmetad = 0' lvcreate -n $lv2 -l 2 -an $vg1 aux lvmconf 'global/use_lvmetad = 1' # verify that the second lv is not in lvmetad lvs $vg1 >err grep $lv1 err not grep $lv2 err # verify that after vgscan without --cache, lvmetad still # reports the old version vgscan lvs $vg1 >err grep $lv1 err not grep $lv2 err # change our system_id, making the vg foreign echo "$SID2" > "$SIDFILE" not vgs $vg1 >err not grep $vg1 err # use vgscan --cache to update the foreign vg in lvmetad vgscan --cache not vgs $vg1 >err not grep $vg1 err # change our system_id back to SID1 so we can check that # lvmetad has the latest copy of the vg (without having # to use --foreign to check) echo "$SID1" > "$SIDFILE" vgs $vg1 >err grep $vg1 err lvs $vg1 >err grep $lv1 err grep $lv2 err lvremove $vg1/$lv1 lvremove $vg1/$lv2 vgremove $vg1 LVM2.2.02.176/test/shell/lock-parallel.sh0000644000000000000120000000224013176752421016466 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2014-2015 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Test parallel use of lvm commands and check locks aren't dropped # RHBZ: https://bugzilla.redhat.com/show_bug.cgi?id=1049296 SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest which mkfs.ext3 || skip which fsck || skip aux prepare_vg lvcreate -L10 -n $lv1 $vg lvcreate -l1 -n $lv2 $vg mkfs.ext3 "$DM_DEV_DIR/$vg/$lv1" # Slowdown PV for resized LV aux delay_dev "$dev1" 50 50 "$(get first_extent_sector "$dev1"):" lvresize -L-5 -r $vg/$lv1 & # Let's wait till resize starts for i in $(seq 1 300); do pgrep fsck && break sleep .1 done lvremove -f $vg/$lv2 wait aux enable_dev "$dev1" # Check removed $lv2 does not reappear not check lv_exists $vg $lv2 vgremove -ff $vg LVM2.2.02.176/test/shell/mda-rollback.sh0000644000000000000120000000163413176752421016302 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2013 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_devs 3 vgcreate --metadatasize 128k $vg1 "$dev1" "$dev2" "$dev3" vgreduce $vg1 "$dev1" dd if="$dev1" of=badmda bs=256K count=1 vgextend $vg1 "$dev1" dd if=badmda of="$dev1" bs=256K count=1 # dev1 is part of vg1 (as witnessed by metadata on dev2 and dev3), but its mda # was corrupt (written over by a backup from time dev1 was an orphan) check pv_field "$dev1" vg_name $vg1 LVM2.2.02.176/test/shell/vgcfgbackup-usage.sh0000644000000000000120000000413113176752421017331 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008-2013 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_pvs 4 get_devs pvcreate --metadatacopies 0 "$dev4" # No automatic backup aux lvmconf "backup/backup = 0" # vgcfgbackup handles similar VG names (bz458941) vg1=${PREFIX}vg00 vg2=${PREFIX}vg01 vgcreate $vg1 "$dev1" vgcreate $vg2 "$dev2" # Enforces system backup test ! -e etc/backup/$vg1 test ! -e etc/backup/$vg2 vgcfgbackup test -e etc/backup/$vg1 test -e etc/backup/$vg2 aux lvmconf "backup/archive = 1" vgcfgbackup -f "bak-%s" >out grep "Volume group \"$vg1\" successfully backed up." out grep "Volume group \"$vg2\" successfully backed up." out # increase seqno lvcreate -an -Zn -l1 $vg1 invalid vgcfgrestore -f "bak-$vg1" $vg1-inv@lid invalid vgcfgrestore -f "bak-$vg1" $vg1 $vg2 vgcfgrestore -l $vg1 | tee out test "$(grep -c Description out)" -eq 2 vgcfgrestore -l -f "bak-$vg1" $vg1 vgremove -ff $vg1 $vg2 # vgcfgbackup correctly stores metadata with missing PVs # and vgcfgrestore able to restore them when device reappears pv1_uuid=$(get pv_field "$dev1" pv_uuid) pv2_uuid=$(get pv_field "$dev2" pv_uuid) vgcreate "$vg" "${DEVICES[@]}" lvcreate -l1 -n $lv1 $vg "$dev1" lvcreate -l1 -n $lv2 $vg "$dev2" lvcreate -l1 -n $lv3 $vg "$dev3" vgchange -a n $vg pvcreate -ff -y "$dev1" pvcreate -ff -y "$dev2" vgcfgbackup -f "backup.$$" $vg sed 's/flags = \[\"MISSING\"\]/flags = \[\]/' "backup.$$" > "backup.$$1" pvcreate -ff -y --norestorefile -u $pv1_uuid "$dev1" pvcreate -ff -y --norestorefile -u $pv2_uuid "$dev2" # Try to recover nonexisting vgname not vgcfgrestore -f "backup.$$1" ${vg}_nonexistent vgcfgrestore -f "backup.$$1" $vg vgchange -an $vg vgremove -f $vg LVM2.2.02.176/test/shell/lvcreate-missing.sh0000644000000000000120000000121213176752421017216 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2013 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_vg 2 aux disable_dev "$dev1" not lvcreate -n "foo" $vg -l 1 aux enable_dev "$dev1" vgremove -ff $vg LVM2.2.02.176/test/shell/lvchange-partial-raid10.sh0000644000000000000120000000161313176752421020246 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2013 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux have_raid 1 3 2 || skip aux prepare_vg 4 # rhbz 889358 # Should be able to activate when RAID10 # has failed devs in different mirror sets. lvcreate --type raid10 -m 1 -i 2 -l 2 -n $lv1 $vg aux wait_for_sync $vg $lv1 lvchange -an $vg/$lv1 aux disable_dev "$dev1" "$dev3" lvchange -ay $vg/$lv1 --partial lvchange -an $vg/$lv1 aux enable_dev "$dev1" vgremove -ff $vg LVM2.2.02.176/test/shell/snapshot-reactivate.sh0000644000000000000120000000277313176752421017743 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # Checking consistency of old-snapshot metadata after de/activation # Validates recent snapshot target kernel updates and error # is triggered by kernel 3.14-rc[1..5] # http://www.redhat.com/archives/dm-devel/2014-March/msg00005.html # SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest # Snapshot should remain unmodified check_s_() { check dev_md5sum $vg s #diff data "$DM_DEV_DIR/$vg/s" } which md5sum || skip aux prepare_vg # 8M file with some random data dd if=/dev/urandom of=data bs=1M count=1 conv=fdatasync dd if=data of=data bs=1M count=7 seek=1 conv=fdatasync echo "$(md5sum data | cut -d' ' -f1) $DM_DEV_DIR/$vg/s" >md5.${vg}-s lvcreate -aey -L 8M -n o $vg dd if=data of="$DM_DEV_DIR/$vg/o" bs=1M conv=fdatasync lvcreate -L 8M -s -n s $vg/o check_s_ dd if=data of="$DM_DEV_DIR/$vg/o" bs=1234567 count=1 skip=1 conv=fdatasync check_s_ lvchange -an $vg lvchange -ay $vg check_s_ dd if=data of="$DM_DEV_DIR/$vg/o" bs=1234567 count=2 skip=1 conv=fdatasync check_s_ lvchange -an $vg lvchange -ay $vg check_s_ vgremove -f $vg LVM2.2.02.176/test/shell/clvmd-restart.sh0000644000000000000120000000333113176752421016535 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2011-2015 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # set before test's clvmd is started, so it's passed in environ export LVM_CLVMD_BINARY=clvmd export LVM_BINARY=lvm SKIP_WITH_LVMLOCKD=1 SKIP_WITHOUT_CLVMD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest # only clvmd based test, skip otherwise read -r LOCAL_CLVMD < LOCAL_CLVMD # TODO read from build, for now hardcoded CLVMD_SOCKET="/var/run/lvm/clvmd.sock" restart_clvmd_() { "$LVM_CLVMD_BINARY" -S ls -la "$CLVMD_SOCKET" || true for i in $(seq 1 20) ; do test -S "$CLVMD_SOCKET" && break sleep .1 done # restarted clvmd has the same PID (no fork, only execvp) NEW_LOCAL_CLVMD=$(pgrep clvmd) test "$LOCAL_CLVMD" -eq "$NEW_LOCAL_CLVMD" } aux prepare_vg lvcreate -an --zero n -n $lv1 -l1 $vg lvcreate -an --zero n -n $lv2 -l1 $vg lvcreate -l1 $vg lvchange -aey $vg/$lv1 lvchange -aey $vg/$lv2 restart_clvmd_ # try restart once more restart_clvmd_ # FIXME: Hmm - how could we test exclusivity is preserved in singlenode ? lvchange -an $vg/$lv1 lvchange -aey $vg/$lv1 lvcreate -s -l3 -n snap $vg/$lv1 "$LVM_CLVMD_BINARY" -R vgchange -an $vg # Test what happens after 'reboot' kill "$LOCAL_CLVMD" while test -e "$CLVMD_PIDFILE"; do echo -n .; sleep .1; done # wait for the pid removal aux prepare_clvmd vgchange -ay $vg lvremove -f $vg/snap vgremove -ff $vg LVM2.2.02.176/test/shell/snapshot-cluster.sh0000644000000000000120000000141013176752421017260 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Testing renaming snapshots in cluster # https://bugzilla.redhat.com/show_bug.cgi?id=1136925 SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_vg 1 lvcreate -aey -L1 -n $lv1 $vg lvcreate -s -L1 -n $lv2 $vg/$lv1 lvrename $vg/$lv2 $vg/$lv3 lvremove -f $vg/$lv1 vgremove -f $vg LVM2.2.02.176/test/shell/process-each-pvresize.sh0000644000000000000120000005452313176752421020200 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008-2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA test_description='Exercise toollib process_each_pv' SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_devs 14 # # process_each_pv is used by a number of pv commands: # pvdisplay # pvresize # pvs # vgreduce # # # set up # # use use dev10 instead of dev1 because simple grep for # dev1 matchines dev10,dev11,etc # vgcreate $vg1 "$dev10" vgcreate $vg2 "$dev2" "$dev3" "$dev4" "$dev5" vgcreate $vg3 "$dev6" "$dev7" "$dev8" "$dev9" pvchange --addtag V2D3 "$dev3" pvchange --addtag V2D4 "$dev4" pvchange --addtag V2D45 "$dev4" pvchange --addtag V2D5 "$dev5" pvchange --addtag V2D45 "$dev5" pvchange --addtag V3 "$dev6" "$dev7" "$dev8" "$dev9" pvchange --addtag V3D9 "$dev9" # orphan pvcreate "$dev11" # dev (a non-pv device) pvcreate "$dev12" pvremove "$dev12" # dev13 is intentionally untouched so we can # test that it is handled appropriately as a non-pv # orphan pvcreate "$dev14" # # test pvresize without orphans and and without non-pv devs # # For pvs in vgs, pvresize setphysicalvolumesize does not give us # the size requested, but reduces the requested size by some the # amount for alignment, metadata areas and pv headers. So, when we resize # to 30M, the result is 28M, and when we resize to 20M, the result is 16M. # For orphans, the resulting size is the same as requested. # It suspect that these reduction amounts might be inconsistent, and # depend on other changing factors, so it may be that we eventually # want to give up checking the exact resulting size, but just check # that the result is less than the original size. old_request="30.00m" old_reduced="28.00m" new_request="20.00m" new_reduced="16.00m" pvresize --setphysicalvolumesize $old_request -y "$dev10" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6" "$dev7" "$dev8" "$dev9" check pv_field "$dev10" pv_size $old_reduced check pv_field "$dev2" pv_size $old_reduced check pv_field "$dev3" pv_size $old_reduced check pv_field "$dev4" pv_size $old_reduced check pv_field "$dev5" pv_size $old_reduced check pv_field "$dev6" pv_size $old_reduced check pv_field "$dev7" pv_size $old_reduced check pv_field "$dev8" pv_size $old_reduced check pv_field "$dev9" pv_size $old_reduced # one pv pvresize --setphysicalvolumesize $new_request -y "$dev10" check pv_field "$dev10" pv_size $new_reduced # unchanged check pv_field "$dev2" pv_size $old_reduced check pv_field "$dev3" pv_size $old_reduced check pv_field "$dev4" pv_size $old_reduced check pv_field "$dev5" pv_size $old_reduced check pv_field "$dev6" pv_size $old_reduced check pv_field "$dev7" pv_size $old_reduced check pv_field "$dev8" pv_size $old_reduced check pv_field "$dev9" pv_size $old_reduced # reset back to old size pvresize --setphysicalvolumesize $old_request -y "$dev10" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6" "$dev7" "$dev8" "$dev9" # two pvs in separate vgs pvresize --setphysicalvolumesize $new_request -y "$dev2" "$dev6" check pv_field "$dev2" pv_size $new_reduced check pv_field "$dev6" pv_size $new_reduced # unchanged check pv_field "$dev10" pv_size $old_reduced check pv_field "$dev3" pv_size $old_reduced check pv_field "$dev4" pv_size $old_reduced check pv_field "$dev5" pv_size $old_reduced check pv_field "$dev7" pv_size $old_reduced check pv_field "$dev8" pv_size $old_reduced check pv_field "$dev9" pv_size $old_reduced # reset back to old size pvresize --setphysicalvolumesize $old_request -y "$dev10" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6" "$dev7" "$dev8" "$dev9" # one tag on one pv pvresize --setphysicalvolumesize $new_request -y @V2D4 check pv_field "$dev4" pv_size $new_reduced # unchanged check pv_field "$dev10" pv_size $old_reduced check pv_field "$dev2" pv_size $old_reduced check pv_field "$dev3" pv_size $old_reduced check pv_field "$dev5" pv_size $old_reduced check pv_field "$dev6" pv_size $old_reduced check pv_field "$dev7" pv_size $old_reduced check pv_field "$dev8" pv_size $old_reduced check pv_field "$dev9" pv_size $old_reduced # reset back to old size pvresize --setphysicalvolumesize $old_request -y "$dev10" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6" "$dev7" "$dev8" "$dev9" # one tag on all pvs in one vg pvresize --setphysicalvolumesize $new_request -y @V3 check pv_field "$dev6" pv_size $new_reduced check pv_field "$dev7" pv_size $new_reduced check pv_field "$dev8" pv_size $new_reduced check pv_field "$dev9" pv_size $new_reduced # unchanged check pv_field "$dev10" pv_size $old_reduced check pv_field "$dev2" pv_size $old_reduced check pv_field "$dev3" pv_size $old_reduced check pv_field "$dev4" pv_size $old_reduced check pv_field "$dev5" pv_size $old_reduced # reset back to old size pvresize --setphysicalvolumesize $old_request -y "$dev10" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6" "$dev7" "$dev8" "$dev9" # one tag on some pvs in one vg pvresize --setphysicalvolumesize $new_request -y @V2D45 check pv_field "$dev4" pv_size $new_reduced check pv_field "$dev5" pv_size $new_reduced # unchanged check pv_field "$dev10" pv_size $old_reduced check pv_field "$dev2" pv_size $old_reduced check pv_field "$dev3" pv_size $old_reduced check pv_field "$dev6" pv_size $old_reduced check pv_field "$dev7" pv_size $old_reduced check pv_field "$dev8" pv_size $old_reduced check pv_field "$dev9" pv_size $old_reduced # reset back to old size pvresize --setphysicalvolumesize $old_request -y "$dev10" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6" "$dev7" "$dev8" "$dev9" # one tag on multiple pvs in separate vgs pvchange --addtag V12 "$dev10" "$dev2" "$dev3" "$dev4" "$dev5" pvresize --setphysicalvolumesize $new_request -y @V12 check pv_field "$dev10" pv_size $new_reduced check pv_field "$dev2" pv_size $new_reduced check pv_field "$dev3" pv_size $new_reduced check pv_field "$dev4" pv_size $new_reduced check pv_field "$dev5" pv_size $new_reduced # unchanged check pv_field "$dev6" pv_size $old_reduced check pv_field "$dev7" pv_size $old_reduced check pv_field "$dev8" pv_size $old_reduced check pv_field "$dev9" pv_size $old_reduced # reset back to old size pvresize --setphysicalvolumesize $old_request -y "$dev10" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6" "$dev7" "$dev8" "$dev9" # one pv and one tag on different pv pvresize --setphysicalvolumesize $new_request -y "$dev10" @V3D9 check pv_field "$dev10" pv_size $new_reduced check pv_field "$dev9" pv_size $new_reduced # unchanged check pv_field "$dev2" pv_size $old_reduced check pv_field "$dev3" pv_size $old_reduced check pv_field "$dev4" pv_size $old_reduced check pv_field "$dev5" pv_size $old_reduced check pv_field "$dev6" pv_size $old_reduced check pv_field "$dev7" pv_size $old_reduced check pv_field "$dev8" pv_size $old_reduced # reset back to old size pvresize --setphysicalvolumesize $old_request -y "$dev10" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6" "$dev7" "$dev8" "$dev9" # redundant pv and tag pvresize --setphysicalvolumesize $new_request -y "$dev9" @V3D9 check pv_field "$dev9" pv_size $new_reduced # unchanged check pv_field "$dev10" pv_size $old_reduced check pv_field "$dev2" pv_size $old_reduced check pv_field "$dev3" pv_size $old_reduced check pv_field "$dev4" pv_size $old_reduced check pv_field "$dev5" pv_size $old_reduced check pv_field "$dev6" pv_size $old_reduced check pv_field "$dev7" pv_size $old_reduced check pv_field "$dev8" pv_size $old_reduced # reset back to old size pvresize --setphysicalvolumesize $old_request -y "$dev10" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6" "$dev7" "$dev8" "$dev9" # two tags on pvs in separate vgs pvresize --setphysicalvolumesize $new_request -y @V3D9 @V2D3 check pv_field "$dev9" pv_size $new_reduced check pv_field "$dev3" pv_size $new_reduced # unchanged check pv_field "$dev10" pv_size $old_reduced check pv_field "$dev2" pv_size $old_reduced check pv_field "$dev4" pv_size $old_reduced check pv_field "$dev5" pv_size $old_reduced check pv_field "$dev6" pv_size $old_reduced check pv_field "$dev7" pv_size $old_reduced check pv_field "$dev8" pv_size $old_reduced # reset back to old size pvresize --setphysicalvolumesize $old_request -y "$dev10" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6" "$dev7" "$dev8" "$dev9" # # test pvresize with orphans # old_request="30.00m" old_reduced="28.00m" old_orphan="30.00m" new_request="20.00m" new_reduced="16.00m" new_orphan="20.00m" pvresize --setphysicalvolumesize $old_request -y "$dev10" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6" "$dev7" "$dev8" "$dev9" pvresize --setphysicalvolumesize $old_request -y "$dev11" "$dev14" check pv_field "$dev10" pv_size $old_reduced check pv_field "$dev2" pv_size $old_reduced check pv_field "$dev3" pv_size $old_reduced check pv_field "$dev4" pv_size $old_reduced check pv_field "$dev5" pv_size $old_reduced check pv_field "$dev6" pv_size $old_reduced check pv_field "$dev7" pv_size $old_reduced check pv_field "$dev8" pv_size $old_reduced check pv_field "$dev9" pv_size $old_reduced check pv_field "$dev11" pv_size $old_orphan check pv_field "$dev14" pv_size $old_orphan # one orphan pvresize --setphysicalvolumesize $new_request -y "$dev11" check pv_field "$dev11" pv_size $new_orphan # unchanged check pv_field "$dev10" pv_size $old_reduced check pv_field "$dev2" pv_size $old_reduced check pv_field "$dev3" pv_size $old_reduced check pv_field "$dev4" pv_size $old_reduced check pv_field "$dev5" pv_size $old_reduced check pv_field "$dev6" pv_size $old_reduced check pv_field "$dev7" pv_size $old_reduced check pv_field "$dev8" pv_size $old_reduced check pv_field "$dev9" pv_size $old_reduced check pv_field "$dev14" pv_size $old_orphan # reset back to old size pvresize --setphysicalvolumesize $old_request -y "$dev10" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6" "$dev7" "$dev8" "$dev9" pvresize --setphysicalvolumesize $old_request -y "$dev11" "$dev14" # two orphans pvresize --setphysicalvolumesize $new_request -y "$dev11" "$dev14" check pv_field "$dev11" pv_size $new_orphan check pv_field "$dev14" pv_size $new_orphan # unchanged check pv_field "$dev10" pv_size $old_reduced check pv_field "$dev2" pv_size $old_reduced check pv_field "$dev3" pv_size $old_reduced check pv_field "$dev4" pv_size $old_reduced check pv_field "$dev5" pv_size $old_reduced check pv_field "$dev6" pv_size $old_reduced check pv_field "$dev7" pv_size $old_reduced check pv_field "$dev8" pv_size $old_reduced check pv_field "$dev9" pv_size $old_reduced # reset back to old size pvresize --setphysicalvolumesize $old_request -y "$dev10" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6" "$dev7" "$dev8" "$dev9" pvresize --setphysicalvolumesize $old_request -y "$dev11" "$dev14" # one orphan, one tag pvresize --setphysicalvolumesize $new_request -y @V3D9 "$dev14" check pv_field "$dev9" pv_size $new_reduced check pv_field "$dev14" pv_size $new_orphan # unchanged check pv_field "$dev10" pv_size $old_reduced check pv_field "$dev2" pv_size $old_reduced check pv_field "$dev3" pv_size $old_reduced check pv_field "$dev4" pv_size $old_reduced check pv_field "$dev5" pv_size $old_reduced check pv_field "$dev6" pv_size $old_reduced check pv_field "$dev7" pv_size $old_reduced check pv_field "$dev8" pv_size $old_reduced check pv_field "$dev11" pv_size $old_orphan # reset back to old size pvresize --setphysicalvolumesize $old_request -y "$dev10" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6" "$dev7" "$dev8" "$dev9" pvresize --setphysicalvolumesize $old_request -y "$dev11" "$dev14" # one pv, one orphan, one tag pvresize --setphysicalvolumesize $new_request -y @V3D9 "$dev14" "$dev10" check pv_field "$dev9" pv_size $new_reduced check pv_field "$dev10" pv_size $new_reduced check pv_field "$dev14" pv_size $new_orphan # unchanged check pv_field "$dev2" pv_size $old_reduced check pv_field "$dev3" pv_size $old_reduced check pv_field "$dev4" pv_size $old_reduced check pv_field "$dev5" pv_size $old_reduced check pv_field "$dev6" pv_size $old_reduced check pv_field "$dev7" pv_size $old_reduced check pv_field "$dev8" pv_size $old_reduced check pv_field "$dev11" pv_size $old_orphan # reset back to old size pvresize --setphysicalvolumesize $old_request -y "$dev10" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6" "$dev7" "$dev8" "$dev9" pvresize --setphysicalvolumesize $old_request -y "$dev11" "$dev14" # # test pvresize with non-pv devs # # one dev (non-pv) not pvresize --setphysicalvolumesize $new_request -y "$dev13" # unchanged check pv_field "$dev10" pv_size $old_reduced check pv_field "$dev2" pv_size $old_reduced check pv_field "$dev3" pv_size $old_reduced check pv_field "$dev4" pv_size $old_reduced check pv_field "$dev5" pv_size $old_reduced check pv_field "$dev6" pv_size $old_reduced check pv_field "$dev7" pv_size $old_reduced check pv_field "$dev8" pv_size $old_reduced check pv_field "$dev9" pv_size $old_reduced check pv_field "$dev11" pv_size $old_orphan check pv_field "$dev14" pv_size $old_orphan # one orphan and one dev (non-pv) not pvresize --setphysicalvolumesize $new_request -y "$dev14" "$dev13" check pv_field "$dev14" pv_size $new_orphan # unchanged check pv_field "$dev10" pv_size $old_reduced check pv_field "$dev2" pv_size $old_reduced check pv_field "$dev3" pv_size $old_reduced check pv_field "$dev4" pv_size $old_reduced check pv_field "$dev5" pv_size $old_reduced check pv_field "$dev6" pv_size $old_reduced check pv_field "$dev7" pv_size $old_reduced check pv_field "$dev8" pv_size $old_reduced check pv_field "$dev9" pv_size $old_reduced check pv_field "$dev11" pv_size $old_orphan # reset back to old size pvresize --setphysicalvolumesize $old_request -y "$dev10" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6" "$dev7" "$dev8" "$dev9" pvresize --setphysicalvolumesize $old_request -y "$dev11" "$dev14" # one pv and one dev (non-pv) not pvresize --setphysicalvolumesize $new_request -y "$dev9" "$dev13" check pv_field "$dev9" pv_size $new_reduced # unchanged check pv_field "$dev10" pv_size $old_reduced check pv_field "$dev2" pv_size $old_reduced check pv_field "$dev3" pv_size $old_reduced check pv_field "$dev4" pv_size $old_reduced check pv_field "$dev5" pv_size $old_reduced check pv_field "$dev6" pv_size $old_reduced check pv_field "$dev7" pv_size $old_reduced check pv_field "$dev8" pv_size $old_reduced check pv_field "$dev11" pv_size $old_orphan check pv_field "$dev14" pv_size $old_orphan # reset back to old size pvresize --setphysicalvolumesize $old_request -y "$dev10" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6" "$dev7" "$dev8" "$dev9" pvresize --setphysicalvolumesize $old_request -y "$dev11" "$dev14" # one tag and one dev (non-pv) not pvresize --setphysicalvolumesize $new_request -y @V3D9 "$dev13" check pv_field "$dev9" pv_size $new_reduced # unchanged check pv_field "$dev10" pv_size $old_reduced check pv_field "$dev2" pv_size $old_reduced check pv_field "$dev3" pv_size $old_reduced check pv_field "$dev4" pv_size $old_reduced check pv_field "$dev5" pv_size $old_reduced check pv_field "$dev6" pv_size $old_reduced check pv_field "$dev7" pv_size $old_reduced check pv_field "$dev8" pv_size $old_reduced check pv_field "$dev11" pv_size $old_orphan check pv_field "$dev14" pv_size $old_orphan # reset back to old size pvresize --setphysicalvolumesize $old_request -y "$dev10" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6" "$dev7" "$dev8" "$dev9" pvresize --setphysicalvolumesize $old_request -y "$dev11" "$dev14" # one pv, one orphan, one tag, one dev not pvresize --setphysicalvolumesize $new_request -y @V3D9 "$dev13" "$dev14" "$dev10" check pv_field "$dev9" pv_size $new_reduced check pv_field "$dev10" pv_size $new_reduced check pv_field "$dev14" pv_size $new_orphan # unchanged check pv_field "$dev2" pv_size $old_reduced check pv_field "$dev3" pv_size $old_reduced check pv_field "$dev4" pv_size $old_reduced check pv_field "$dev5" pv_size $old_reduced check pv_field "$dev6" pv_size $old_reduced check pv_field "$dev7" pv_size $old_reduced check pv_field "$dev8" pv_size $old_reduced check pv_field "$dev11" pv_size $old_orphan # reset back to old size pvresize --setphysicalvolumesize $old_request -y "$dev10" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6" "$dev7" "$dev8" "$dev9" pvresize --setphysicalvolumesize $old_request -y "$dev11" "$dev14" # # pvresize including pvs without mdas # pvresize --setphysicalvolumesize $old_request -y "$dev10" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6" "$dev7" "$dev8" "$dev9" pvresize --setphysicalvolumesize $old_request -y "$dev11" "$dev14" check pv_field "$dev10" pv_size $old_reduced check pv_field "$dev2" pv_size $old_reduced check pv_field "$dev3" pv_size $old_reduced check pv_field "$dev4" pv_size $old_reduced check pv_field "$dev5" pv_size $old_reduced check pv_field "$dev6" pv_size $old_reduced check pv_field "$dev7" pv_size $old_reduced check pv_field "$dev8" pv_size $old_reduced check pv_field "$dev9" pv_size $old_reduced check pv_field "$dev11" pv_size $old_orphan check pv_field "$dev14" pv_size $old_orphan # one pv without mda pvresize --setphysicalvolumesize $new_request -y "$dev2" check pv_field "$dev2" pv_size $new_reduced # unchanged check pv_field "$dev10" pv_size $old_reduced check pv_field "$dev3" pv_size $old_reduced check pv_field "$dev4" pv_size $old_reduced check pv_field "$dev5" pv_size $old_reduced check pv_field "$dev6" pv_size $old_reduced check pv_field "$dev7" pv_size $old_reduced check pv_field "$dev8" pv_size $old_reduced check pv_field "$dev9" pv_size $old_reduced check pv_field "$dev11" pv_size $old_orphan check pv_field "$dev14" pv_size $old_orphan # reset back to old size pvresize --setphysicalvolumesize $old_request -y "$dev10" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6" "$dev7" "$dev8" "$dev9" pvresize --setphysicalvolumesize $old_request -y "$dev11" "$dev14" # two pvs without mdas pvresize --setphysicalvolumesize $new_request -y "$dev6" "$dev7" check pv_field "$dev6" pv_size $new_reduced check pv_field "$dev7" pv_size $new_reduced # unchanged check pv_field "$dev10" pv_size $old_reduced check pv_field "$dev2" pv_size $old_reduced check pv_field "$dev3" pv_size $old_reduced check pv_field "$dev4" pv_size $old_reduced check pv_field "$dev5" pv_size $old_reduced check pv_field "$dev8" pv_size $old_reduced check pv_field "$dev9" pv_size $old_reduced check pv_field "$dev11" pv_size $old_orphan check pv_field "$dev14" pv_size $old_orphan # reset back to old size pvresize --setphysicalvolumesize $old_request -y "$dev10" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6" "$dev7" "$dev8" "$dev9" pvresize --setphysicalvolumesize $old_request -y "$dev11" "$dev14" # one pv with mda and one pv without mda pvresize --setphysicalvolumesize $new_request -y "$dev8" "$dev9" check pv_field "$dev8" pv_size $new_reduced check pv_field "$dev9" pv_size $new_reduced # unchanged check pv_field "$dev10" pv_size $old_reduced check pv_field "$dev2" pv_size $old_reduced check pv_field "$dev3" pv_size $old_reduced check pv_field "$dev4" pv_size $old_reduced check pv_field "$dev5" pv_size $old_reduced check pv_field "$dev6" pv_size $old_reduced check pv_field "$dev7" pv_size $old_reduced check pv_field "$dev11" pv_size $old_orphan check pv_field "$dev14" pv_size $old_orphan # reset back to old size pvresize --setphysicalvolumesize $old_request -y "$dev10" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6" "$dev7" "$dev8" "$dev9" pvresize --setphysicalvolumesize $old_request -y "$dev11" "$dev14" # one orphan with mda pvresize --setphysicalvolumesize $new_request -y "$dev11" check pv_field "$dev11" pv_size $new_orphan # unchanged check pv_field "$dev10" pv_size $old_reduced check pv_field "$dev2" pv_size $old_reduced check pv_field "$dev3" pv_size $old_reduced check pv_field "$dev4" pv_size $old_reduced check pv_field "$dev5" pv_size $old_reduced check pv_field "$dev6" pv_size $old_reduced check pv_field "$dev7" pv_size $old_reduced check pv_field "$dev8" pv_size $old_reduced check pv_field "$dev9" pv_size $old_reduced check pv_field "$dev14" pv_size $old_orphan # reset back to old size pvresize --setphysicalvolumesize $old_request -y "$dev10" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6" "$dev7" "$dev8" "$dev9" pvresize --setphysicalvolumesize $old_request -y "$dev11" "$dev14" # one orphan without mda pvresize --setphysicalvolumesize $new_request -y "$dev14" check pv_field "$dev14" pv_size $new_orphan # unchanged check pv_field "$dev10" pv_size $old_reduced check pv_field "$dev2" pv_size $old_reduced check pv_field "$dev3" pv_size $old_reduced check pv_field "$dev4" pv_size $old_reduced check pv_field "$dev5" pv_size $old_reduced check pv_field "$dev6" pv_size $old_reduced check pv_field "$dev7" pv_size $old_reduced check pv_field "$dev8" pv_size $old_reduced check pv_field "$dev9" pv_size $old_reduced check pv_field "$dev11" pv_size $old_orphan # reset back to old size pvresize --setphysicalvolumesize $old_request -y "$dev10" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6" "$dev7" "$dev8" "$dev9" pvresize --setphysicalvolumesize $old_request -y "$dev11" "$dev14" # one orphan with mda and one orphan without mda pvresize --setphysicalvolumesize $new_request -y "$dev14" "$dev11" check pv_field "$dev11" pv_size $new_orphan check pv_field "$dev14" pv_size $new_orphan # unchanged check pv_field "$dev10" pv_size $old_reduced check pv_field "$dev2" pv_size $old_reduced check pv_field "$dev3" pv_size $old_reduced check pv_field "$dev4" pv_size $old_reduced check pv_field "$dev5" pv_size $old_reduced check pv_field "$dev6" pv_size $old_reduced check pv_field "$dev7" pv_size $old_reduced check pv_field "$dev8" pv_size $old_reduced check pv_field "$dev9" pv_size $old_reduced # reset back to old size pvresize --setphysicalvolumesize $old_request -y "$dev10" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6" "$dev7" "$dev8" "$dev9" pvresize --setphysicalvolumesize $old_request -y "$dev11" "$dev14" # one pv with mda and one pv without mda, and # one orphan with mda and one orphan without mda pvresize --setphysicalvolumesize $new_request -y "$dev8" "$dev9" "$dev14" "$dev11" check pv_field "$dev8" pv_size $new_reduced check pv_field "$dev9" pv_size $new_reduced check pv_field "$dev11" pv_size $new_orphan check pv_field "$dev14" pv_size $new_orphan # unchanged check pv_field "$dev10" pv_size $old_reduced check pv_field "$dev2" pv_size $old_reduced check pv_field "$dev3" pv_size $old_reduced check pv_field "$dev4" pv_size $old_reduced check pv_field "$dev5" pv_size $old_reduced check pv_field "$dev6" pv_size $old_reduced check pv_field "$dev7" pv_size $old_reduced # reset back to old size pvresize --setphysicalvolumesize $old_request -y "$dev10" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6" "$dev7" "$dev8" "$dev9" pvresize --setphysicalvolumesize $old_request -y "$dev11" "$dev14" LVM2.2.02.176/test/shell/snapshot-usage.sh0000644000000000000120000001300313176752421016704 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2013 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # no automatic extensions please SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest MKFS=mkfs.ext2 which $MKFS || skip fill() { dd if=/dev/zero of="$DM_DEV_DIR/${2:-$vg1/lvol0}" bs=$1 count=1 oflag=direct || \ die "Snapshot does not fit $1" } cleanup_tail() { test -z "$SLEEP_PID" || kill $SLEEP_PID || true wait vgremove -ff $vg1 || true vgremove -ff $vg aux teardown } TSIZE=15P aux can_use_16T || TSIZE=15T # With different snapshot target driver we may obtain different results. # Older targets have metadata leak bug which needs extra compenstion. # Ancient targets do not even provide separate info for metadata. EXPECT1="16.00k" EXPECT2="512.00k" EXPECT3="32.00k" EXPECT4="66.67" if aux target_at_least dm-snapshot 1 10 0 ; then # Extra metadata size EXPECT4="0.00" if aux target_at_least dm-snapshot 1 12 0 ; then # When fixed leak, expect smaller sizes EXPECT1="12.00k" EXPECT2="384.00k" EXPECT3="28.00k" fi fi aux prepare_pvs 1 get_devs vgcreate -s 4M "$vg" "${DEVICES[@]}" # Play with 1 extent lvcreate -aey -l1 -n $lv $vg # 100%LV is not supported for snapshot fail lvcreate -s -l 100%LV -n snap $vg/$lv 2>&1 | tee out grep 'Please express size as %FREE, %ORIGIN, %PVS or %VG' out # 100%ORIGIN needs to have enough space for all data and needs to round-up lvcreate -s -l 100%ORIGIN -n $lv1 $vg/$lv # everything needs to fit fill 4M $vg/$lv1 lvremove -f $vg # Automatically activates exclusively in cluster lvcreate --type snapshot -s -l 100%FREE -n $lv $vg --virtualsize $TSIZE aux extend_filter_LVMTEST aux lvmconf "activation/snapshot_autoextend_percent = 20" \ "activation/snapshot_autoextend_threshold = 50" # Check usability with smallest (1k) extent size ($lv has 15P) pvcreate --yes --setphysicalvolumesize 4T "$DM_DEV_DIR/$vg/$lv" trap 'cleanup_tail' EXIT vgcreate -s 1K $vg1 "$DM_DEV_DIR/$vg/$lv" # Play with small 1k 128 extents lvcreate -aey -L128K -n $lv $vg1 # 100%ORIGIN needs to have enough space for all data lvcreate -s -l 100%ORIGIN -n snap100 $vg1/$lv # everything needs to fit fill 128k $vg1/snap100 # 50%ORIGIN needs to have enough space for 50% of data lvcreate -s -l 50%ORIGIN -n snap50 $vg1/$lv fill 64k $vg1/snap50 lvcreate -s -l 25%ORIGIN -n snap25 $vg1/$lv fill 32k $vg1/snap25 # Check we do not provide too much extra space not fill 33k $vg1/snap25 lvs -a $vg1 lvremove -f $vg1 # Test virtual snapshot over /dev/zero lvcreate --type snapshot -V50 -L10 -n $lv1 -s $vg1 CHECK_ACTIVE="active" test ! -e LOCAL_CLVMD || CHECK_ACTIVE="local exclusive" check lv_field $vg1/$lv1 lv_active "$CHECK_ACTIVE" lvchange -an $vg1 # On cluster snapshot gets exclusive activation lvchange -ay $vg1 check lv_field $vg1/$lv1 lv_active "$CHECK_ACTIVE" # Test removal of opened (but unmounted) snapshot (device busy) for a while SLEEP_PID=$(aux hold_device_open $vg1 $lv1 60) # Opened virtual snapshot device is not removable # it should retry device removal for a few seconds not lvremove -f $vg1/$lv1 kill $SLEEP_PID SLEEP_PID= # Wait for killed task, so there is no device holder wait lvremove -f $vg1/$lv1 check lv_not_exists $vg1 $lv1 # Check border size lvcreate -aey -L4095G $vg1 lvcreate -s -L100K $vg1/lvol0 fill 1K check lv_field $vg1/lvol1 data_percent "12.00" lvremove -ff $vg1 # Create 1KB snapshot, does not need to be active here lvcreate -an -Zn -l1 -n $lv1 $vg1 not lvcreate -s -l1 $vg1/$lv1 not lvcreate -s -l3 $vg1/$lv1 lvcreate -s -l30 -n $lv2 $vg1/$lv1 check lv_field $vg1/$lv2 size "$EXPECT1" not lvcreate -s -c512 -l512 $vg1/$lv1 lvcreate -s -c128 -l1700 -n $lv3 $vg1/$lv1 # 3 * 128 check lv_field $vg1/$lv3 size "$EXPECT2" lvremove -ff $vg1 lvcreate -aey -l20 $vg1 lvcreate -s -l12 $vg1/lvol0 # Fill 1KB -> 100% snapshot (1x 4KB chunk) fill 1K check lv_field $vg1/lvol1 data_percent "100.00" # Check it resizes 100% full valid snapshot to fit threshold lvextend --use-policies $vg1/lvol1 check lv_field $vg1/lvol1 data_percent "50.00" fill 4K lvextend --use-policies $vg1/lvol1 check lv_field $vg1/lvol1 size "24.00k" lvextend -l+33 $vg1/lvol1 check lv_field $vg1/lvol1 size "$EXPECT3" fill 20K lvremove -f $vg1 # Check snapshot really deletes COW header for read-only snapshot # Test needs special relation between chunk size and extent size # This test expects extent size 1K aux lvmconf "allocation/wipe_signatures_when_zeroing_new_lvs = 1" lvcreate -aey -L4 -n $lv $vg1 lvcreate -c 8 -s -L1 -n snap $vg1/$lv # Populate snapshot #dd if=/dev/urandom of="$DM_DEV_DIR/$vg1/$lv" bs=4096 count=10 $MKFS "$DM_DEV_DIR/$vg1/$lv" lvremove -f $vg1/snap # Undeleted header would trigger attempt to access # beyond end of COW device # Fails to create when chunk size is different lvcreate -s -pr -l12 -n snap $vg1/$lv # When header is undelete, fails to read snapshot without read errors #dd if="$DM_DEV_DIR/$vg1/snap" of=/dev/null bs=1M count=2 fsck -n "$DM_DEV_DIR/$vg1/snap" # This test would trigger read of weird percentage for undeleted header # And since older snapshot target counts with metadata sectors # we have 2 valid results (unsure about correct version number) check lv_field $vg1/snap data_percent "$EXPECT4" vgremove -ff $vg1 LVM2.2.02.176/test/shell/test-partition.sh0000644000000000000120000000152113176752421016733 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2010 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # Testcase for bugzilla #621173 # excercises partition table scanning code path # SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 LVM_TEST_CONFIG_DEVICES="types = [\"device-mapper\", 142]" . lib/inittest which sfdisk || skip aux prepare_pvs 1 30 pvs "$dev1" # create small partition table echo "1 2" | sfdisk --force "$dev1" aux notify_lvmetad "$dev1" not pvs "$dev1" LVM2.2.02.176/test/shell/zz-lvmlockd-dlm-remove.sh0000644000000000000120000000151613176752421020272 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008-2012 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA test_description='Remove the dlm test setup' . lib/inittest [ -z "$LVM_TEST_LOCK_TYPE_DLM" ] && skip; # FIXME: collect debug logs (only if a test failed?) # lvmlockctl -d > lvmlockd-debug.txt # dlm_tool dump > dlm-debug.txt lvmlockctl --stop-lockspaces sleep 1 killall lvmlockd sleep 1 killall lvmlockd || true sleep 1 systemctl stop dlm systemctl stop corosync LVM2.2.02.176/test/shell/lvchange-rebuild-raid.sh0000644000000000000120000001157113176752421020103 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux have_raid 1 3 2 || skip aux prepare_vg 8 get_devs _sync() { aux enable_dev "${DEVICES[@]}" aux wait_for_sync $vg $lv1 test "$#" -eq 0 || check raid_leg_status $vg $lv1 "$@" # restore to delay_dev tables for all devices aux restore_from_devtable "${DEVICES[@]}" } # Delay legs so that rebuilding status characters can be read for d in "${DEVICES[@]}" do aux delay_dev "$d" 0 50 "$(get first_extent_sector "$d")" done # rhbz 1064592 ############################################## # Create an 8-way striped raid10 with 4 mirror # groups and rebuild selected PVs. lvcreate --type raid10 -m 1 -i 4 -l 2 -n $lv1 $vg _sync # Rebuild 1st and 2nd device would rebuild a # whole mirror group and needs to be rejected. not lvchange --yes --rebuild "$dev1" --rebuild "$dev2" $vg/$lv1 not check raid_leg_status $vg $lv1 "aAaAAAAA" _sync "AAAAAAAA" # Rebuild 1st and 3rd device from different mirror groups is fine. lvchange --yes --rebuild "$dev1" --rebuild "$dev3" $vg/$lv1 aux have_raid 1 9 && check raid_leg_status $vg $lv1 "aAaAAAAA" _sync "AAAAAAAA" # Rebuild devices 1, 3, 6 from different mirror groups is fine. lvchange --yes --rebuild "$dev1" --rebuild "$dev3" --rebuild "$dev6" $vg/$lv1 aux have_raid 1 9 && check raid_leg_status $vg $lv1 "aAaAAaAA" _sync "AAAAAAAA" # Rebuild devices 1, 3, 5 and 6 with 5+6 being # being a whole mirror group needs to be rejected. not lvchange --yes --rebuild "$dev1" --rebuild "$dev3" --rebuild "$dev6" --rebuild "$dev5" $vg/$lv1 not check raid_leg_status $vg $lv1 "aAaAaaAA" _sync "AAAAAAAA" # Rebuild devices 1, 3, 5 and 7 from different mirror groups is fine. lvchange --yes --rebuild "$dev1" --rebuild "$dev3" --rebuild "$dev5" --rebuild "$dev7" $vg/$lv1 aux have_raid 1 9 && check raid_leg_status $vg $lv1 "aAaAaAaA" _sync # Rebuild devices 2, 4, 6 and 8 from different mirror groups is fine. lvchange --yes --rebuild "$dev2" --rebuild "$dev4" --rebuild "$dev6" --rebuild "$dev8" $vg/$lv1 aux have_raid 1 9 && check raid_leg_status $vg $lv1 "AaAaAaAa" _sync "AAAAAAAA" ############################################## # Create an 8-legged raid1 and rebuild selected PVs lvremove --yes $vg/$lv1 lvcreate --yes --type raid1 -m 7 -l 2 -n $lv1 $vg _sync "AAAAAAAA" # Rebuilding all raid1 legs needs to be rejected. not lvchange --yes --rebuild "$dev1" --rebuild "$dev2" --rebuild "$dev3" --rebuild "$dev4" \ --rebuild "$dev5" --rebuild "$dev6" --rebuild "$dev7" --rebuild "$dev8" $vg/$lv1 not check raid_leg_status $vg $lv1 "aaaaaaaa" _sync "AAAAAAAA" # Rebuilding all but the raid1 master leg is fine. lvchange --yes --rebuild "$dev2" --rebuild "$dev3" --rebuild "$dev4" \ --rebuild "$dev5" --rebuild "$dev6" --rebuild "$dev7" --rebuild "$dev8" $vg/$lv1 aux have_raid 1 9 && check raid_leg_status $vg $lv1 "Aaaaaaaa" _sync "AAAAAAAA" # Rebuilding the raid1 master leg is fine. lvchange --yes --rebuild "$dev1" $vg/$lv1 aux have_raid 1 9 && check raid_leg_status $vg $lv1 "aAAAAAAA" _sync "AAAAAAAA" # Rebuild legs on devices 2, 4, 6 and 8 is fine. lvchange --yes --rebuild "$dev2" --rebuild "$dev4" --rebuild "$dev6" --rebuild "$dev8" $vg/$lv1 aux have_raid 1 9 && check raid_leg_status $vg $lv1 "AaAaAaAa" _sync "AAAAAAAA" ############################################## # Create an 6-legged raid6 and rebuild selected PVs lvremove --yes $vg/$lv1 lvcreate --yes --type raid6 -i 4 -l 2 -n $lv1 $vg _sync "AAAAAA" # Rebuilding all raid6 stripes needs to be rejected. not lvchange --yes --rebuild "$dev1" --rebuild "$dev2" --rebuild "$dev3" \ --rebuild "$dev4" --rebuild "$dev5" --rebuild "$dev6" $vg/$lv1 not check raid_leg_status $vg $lv1 "aaaaaa" _sync "AAAAAA" # Rebuilding more than 2 raid6 stripes needs to be rejected. not lvchange --yes --rebuild "$dev2" --rebuild "$dev4" --rebuild "$dev6" $vg/$lv1 not check raid_leg_status $vg $lv1 "AaAaAa" _sync "AAAAAA" # Rebuilding any 1 raid6 stripe is fine. lvchange --yes --rebuild "$dev2" $vg/$lv1 aux have_raid 1 9 && check raid_leg_status $vg $lv1 "AaAAAA" _sync lvchange --yes --rebuild "$dev5" $vg/$lv1 aux have_raid 1 9 && check raid_leg_status $vg $lv1 "AAAAaA" _sync "AAAAAA" # Rebuilding any 2 raid6 stripes is fine. lvchange --yes --rebuild "$dev2" --rebuild "$dev4" $vg/$lv1 aux have_raid 1 9 && check raid_leg_status $vg $lv1 "AaAaAA" _sync "AAAAAA" lvchange --yes --rebuild "$dev1" --rebuild "$dev5" $vg/$lv1 aux have_raid 1 9 && check raid_leg_status $vg $lv1 "aAAAaA" _sync "AAAAAA" vgremove -ff $vg LVM2.2.02.176/test/shell/lvcreate-usage.sh0000644000000000000120000001615613176752421016666 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008-2013 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # 'Exercise some lvcreate diagnostics' SKIP_WITH_LVMLOCKD=1 . lib/inittest aux prepare_pvs 4 get_devs aux pvcreate --metadatacopies 0 "$dev1" aux vgcreate "$vg" "${DEVICES[@]}" invalid lvcreate --type free -l1 -n $lv1 $vg 2>err grep "Invalid argument for --type" err invalid lvcreate --type $RANDOM -l1 -n $lv1 $vg invalid lvcreate --type unknown -l1 -n $lv1 $vg invalid lvcreate -L10000000000000000000 -n $lv $vg 2>&1 | tee err grep "Size is too big" err invalid lvcreate -L+-10 -n $lv $vg 2>&1 | tee err grep "Multiple sign" err invalid lvcreate -L-.1 -n $lv $vg 2>&1 | tee err grep "Size may not be negative" err invalid lvcreate -L..1 -n $lv $vg 2>&1 | tee err grep "Can't parse size" err lvcreate --type linear -aey -m0 -l1 -n $lv1 $vg lvcreate --type snapshot -l1 -n $lv2 $vg/$lv1 # Supporting decimal point with size lvcreate -L.1 -n $lv3 $vg # Reject repeated invocation (run 2 times) (bz178216) lvcreate -n $lv -l 4 $vg not lvcreate -n $lv -l 4 $vg lvremove -ff $vg/$lv # Try to remove it again - should fail (but not segfault) not lvremove -ff $vg/$lv # Reject a negative stripe_size invalid lvcreate -L 64m -n $lv -i2 --stripesize -4 $vg 2>err; grep "may not be negative" err # Reject a too-large stripesize invalid lvcreate -L 64m -n $lv -i2 --stripesize 4294967291 $vg 2>err grep "Stripe size cannot be larger than" err # w/single stripe succeeds with diagnostics to stdout lvcreate -L 64m -n $lv -i1 --stripesize 4 $vg 2> err | tee out grep "Ignoring stripesize argument with single stripe" out lvdisplay $vg lvremove -ff $vg # w/default (64KB) stripe size succeeds with diagnostics to stdout lvcreate -L 64m -n $lv -i2 $vg > out grep "Using default stripesize" out lvdisplay $vg check lv_field $vg/$lv stripesize "64.00k" lvremove -ff $vg # Reject an invalid number of stripes invalid lvcreate -L 64m -n $lv -i129 $vg 2>err grep "Number of stripes (129) must be between 1 and 128" err # Reject an invalid stripe size invalid lvcreate -L 64m -n $lv -i2 --stripesize 3 $vg 2>err grep "Invalid stripe size" err # Verify that the LV was not created via lvdisplay empty output test -z "$(lvdisplay $vg)" # Setting max_lv works. (bz490298) check vg_field $vg max_lv "0" vgchange -l 3 $vg check vg_field $vg max_lv "3" lvcreate -aey -l1 -n $lv1 $vg lvcreate -l1 -s -n $lv2 $vg/$lv1 lvcreate -l1 -n $lv3 $vg fail lvcreate -l1 -n $lv4 $vg lvremove -ff $vg/$lv3 # Check snapshot of inactive origin lvchange -an $vg/$lv1 lvcreate -l1 -s -n $lv3 $vg/$lv1 fail lvcreate -l1 -n $lv4 $vg fail lvcreate -l1 --type mirror -m1 -n $lv4 $vg lvremove -ff $vg/$lv3 lvcreate -aey -l1 --type mirror -m1 -n $lv3 $vg not lvcreate -l1 -n $lv4 $vg not lvcreate -l1 --type mirror -m1 -n $lv4 $vg lvconvert -m0 $vg/$lv3 lvconvert -m2 --type mirror -i 1 $vg/$lv3 lvconvert -m1 $vg/$lv3 fail vgchange -l 2 check vg_field $vg max_lv "3" vgchange -l 4 check vg_field $vg max_lv "4" lvremove -ff $vg vgchange -l 0 $vg check vg_field $vg max_lv "0" # Rejects invalid chunksize, accepts between 4K and 512K # and validate origin_size lvcreate -aey -L 32m -n $lv1 $vg not lvcreate -L 8m -n $lv2 -s --chunksize 3k $vg/$lv1 not lvcreate -L 8m -n $lv2 -s --chunksize 1024k $vg/$lv1 lvcreate -L 8m -n $lv2 -s --chunksize 4k $vg/$lv1 check lv_field $vg/$lv2 chunk_size "4.00k" check lv_field $vg/$lv2 origin_size "32.00m" lvcreate -L 8m -n $lv3 -s --chunksize 512k $vg/$lv1 check lv_field $vg/$lv3 chunk_size "512.00k" check lv_field $vg/$lv3 origin_size "32.00m" lvremove -f $vg # Mirror regionsize must be # - nonzero (bz186013) # - a power of 2 and a multiple of page size # - <= size of LV invalid lvcreate --type mirror -m 1 -L 32m -n $lv -R 0 $vg 2>err grep "may not be zero" err invalid lvcreate --type mirror -m 1 -L 32m -n $lv -R 11k $vg invalid lvcreate --type mirror -m 1 -L 32m -n $lv -R 1k $vg lvcreate -aey -L 32m -n $lv --regionsize 128m --type mirror -m 1 $vg check lv_field $vg/$lv regionsize "32.00m" lvremove -f $vg lvcreate -aey -L 32m -n $lv --regionsize 4m --type mirror -m 1 $vg check lv_field $vg/$lv regionsize "4.00m" # -m0 is creating non-mirrored segment and give info about redundant option lvcreate -m 0 -l1 -n $lv1 $vg 2>&1 | tee err grep "Redundant" err check lv_field $vg/$lv1 segtype "linear" lvremove -ff $vg # Old --type snapshot works with -s lvcreate --type snapshot -s -V64 -L32 -n $lv1 $vg check lv_field $vg/$lv1 segtype "linear" lvcreate --type snapshot -V64 -L32 -n $lv2 $vg check lv_field $vg/$lv2 segtype "linear" lvremove -ff $vg # --virtualoriginsize always makes old snapshot lvcreate -s --virtualoriginsize 64m -L 32m -n $lv1 $vg check lv_field $vg/$lv1 segtype "linear" lvrename $vg/$lv1 $vg/$lv2 lvcreate -s --virtualoriginsize 64m -L 32m -n $lv1 $vg lvchange -a n $vg/$lv1 lvremove -ff $vg/$lv1 lvremove -ff $vg # readahead default (auto), none, #, auto lvcreate -L 8 -n $lv1 $vg check lv_field $vg/$lv1 lv_read_ahead "auto" lvcreate -L 8 -n $lv2 --readahead none $vg check lv_field $vg/$lv2 lv_read_ahead "0" check lv_field $vg/$lv2 lv_kernel_read_ahead "0" lvcreate -L 8 -n $lv3 --readahead 8k $vg check lv_field $vg/$lv3 lv_read_ahead "8.00k" check lv_field $vg/$lv3 lv_kernel_read_ahead "8.00k" lvcreate -L 8 -n $lv4 --readahead auto $vg check lv_field $vg/$lv4 lv_read_ahead "auto" check lv_field $vg/$lv4 lv_kernel_read_ahead "128.00k" lvcreate -L 8 -n $lv5 -i2 --stripesize 16k --readahead auto $vg check lv_field $vg/$lv5 lv_read_ahead "auto" check lv_field $vg/$lv5 lv_kernel_read_ahead "128.00k" lvcreate -L 8 -n $lv6 -i2 --stripesize 128k --readahead auto $vg check lv_field $vg/$lv6 lv_read_ahead "auto" check lv_field $vg/$lv6 lv_kernel_read_ahead "512.00k" lvremove -ff $vg # # Validate --major --minor, we need to know VG, thus failing # fail lvcreate -My --major 234 -l1 $vg # cannot specify --major or --minor with -Mn fail lvcreate -Mn --major 234 -l1 $vg fail lvcreate --persistent n --minor 234 -l1 $vg # out-of-range minor value fail lvcreate --minor 9999999 -l1 $vg if aux kernel_at_least 2 4 0; then # On >2.4 we ignore --major lvcreate --major 234 -l1 $vg 2>&1 | tee err; grep "Ignoring" err # Try some bigger possibly unused minor if test ! -d /sys/block/dm-2345; then lvcreate --minor 2345 -l1 -n $lv1 $vg check lv_field $vg/$lv1 lv_kernel_minor "2345" fi if test ! -d /sys/block/dm-23456; then lvcreate -My --minor 23456 -j 122 -l1 -n $lv2 $vg check lv_field $vg/$lv2 lv_kernel_minor "23456" fi fi # 2.4 lvremove -f $vg # prohibited names for i in pvmove snapshot ; do invalid lvcreate -l1 -n ${i}1 $vg done for i in _cdata _cmeta _mimage _mlog _pmspare _tdata _tmeta _vorigin ; do invalid lvcreate -l1 -n s_${i}_1 $vg done # Check invalid error for pool-only options invalid lvcreate --poolmetadataspare y -l1 $vg invalid lvcreate --poolmetadatasize 10 -l1 $vg invalid lvcreate --discards passdown -l1 $vg LVM2.2.02.176/test/shell/inconsistent-metadata.sh0000644000000000000120000000471313176752421020251 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_vg 3 12 get_devs lvcreate -aye --type mirror -m 1 -l 1 -n mirror $vg lvcreate -l 1 -n resized $vg lvchange -a n $vg/mirror aux backup_dev "${DEVICES[@]}" init() { aux restore_dev "${DEVICES[@]}" not check lv_field $vg/resized lv_size "8.00m" lvresize -L 8192K $vg/resized aux restore_dev "$dev1" } # vgscan fixes up metadata (needs --cache option for direct scan if lvmetad is used) cache="" test -e LOCAL_LVMETAD && cache="--cache" init vgscan $cache 2>&1 | tee cmd.out grep "Inconsistent metadata found for VG $vg" cmd.out test -e LOCAL_LVMETAD && vgrename $vg foo && vgrename foo $vg # trigger a write vgscan $cache 2>&1 | tee cmd.out not grep "Inconsistent metadata found for VG $vg" cmd.out check lv_field $vg/resized lv_size "8.00m" # only vgscan would have noticed metadata inconsistencies when lvmetad is active if test ! -e LOCAL_LVMETAD; then # vgdisplay fixes init vgdisplay $vg 2>&1 | tee cmd.out grep "Inconsistent metadata found for VG $vg" cmd.out vgdisplay $vg 2>&1 | tee cmd.out not grep "Inconsistent metadata found for VG $vg" cmd.out check lv_field $vg/resized lv_size "8.00m" # lvs fixes up init lvs $vg 2>&1 | tee cmd.out grep "Inconsistent metadata found for VG $vg" cmd.out vgdisplay $vg 2>&1 | tee cmd.out not grep "Inconsistent metadata found for VG $vg" cmd.out check lv_field $vg/resized lv_size "8.00m" # vgs fixes up as well init vgs $vg 2>&1 | tee cmd.out grep "Inconsistent metadata found for VG $vg" cmd.out vgs $vg 2>&1 | tee cmd.out not grep "Inconsistent metadata found for VG $vg" cmd.out check lv_field $vg/resized lv_size "8.00m" fi echo Check auto-repair of failed vgextend - metadata written to original pv but not new pv vgremove -f $vg pvremove -ff "${DEVICES[@]}" pvcreate "${DEVICES[@]}" aux backup_dev "$dev2" vgcreate $vg "$dev1" vgextend $vg "$dev2" aux restore_dev "$dev2" vgscan $cache should check compare_fields vgs $vg vg_mda_count pvs "$dev2" vg_mda_count vgremove -ff $vg LVM2.2.02.176/test/shell/lvresize-raid10.sh0000644000000000000120000000167213176752421016675 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2012 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux have_raid 1 3 0 || skip aux prepare_vg 5 for deactivate in true false; do # Extend RAID10 (2-stripes, 2-mirror) lvcreate --type raid10 -m 1 -i 2 -l 2 -n $lv1 $vg test $deactivate && lvchange -an $vg/$lv1 lvresize -l +2 $vg/$lv1 #check raid_images_contiguous $vg $lv1 # Reduce RAID10 (2-stripes, 2-mirror) should lvresize -y -l -2 $vg/$lv1 #check raid_images_contiguous $vg $lv1 lvremove -ff $vg done LVM2.2.02.176/test/shell/lvconvert-mirror-basic-2.sh0000644000000000000120000000103613176752421020516 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2010 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA . ./shell/lvconvert-mirror-basic.sh test_many 2 vgremove -ff $vg LVM2.2.02.176/test/shell/lvcreate-thin-external-size.sh0000644000000000000120000000446713176752421021316 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2015 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Test unaligned size of external origin and thin pool chunk size SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 export LVM_TEST_THIN_REPAIR_CMD=${LVM_TEST_THIN_REPAIR_CMD-/bin/false} . lib/inittest which cmp || skip # # Main # # Test needs thin-pool target with unaligned ext-orig size support aux have_thin 1 13 0 || skip aux prepare_pvs 2 640 get_devs # Use 8K extent size vgcreate -s 8K "$vg" "${DEVICES[@]}" # Prepare some numeric pattern with ~64K size seq -s ' ' -w 0 10922 > 64K d1="$DM_DEV_DIR/$vg/$lv1" d2="$DM_DEV_DIR/$vg/$lv2" # Prepare external origin LV with size not being a multiple of thin pool chunk size lvcreate -l47 -n $lv1 $vg # Fill end with pattern dd if=64K of="$d1" bs=8192 seek=45 count=2 conv=fdatasync # Switch to read-only volume lvchange -an $vg/$lv1 lvchange -pr $vg/$lv1 lvcreate -L2M -T $vg/pool -c 192K lvcreate -s $vg/$lv1 --name $lv2 --thinpool $vg/pool # Check the tail of $lv2 matches $lv1 dd if="$d2" of=16K bs=8192 skip=45 count=2 cmp -n 16384 -l 64K 16K # Now extend and rewrite lvextend -l+2 $vg/$lv2 dd if=64K of="$d2" bs=8192 seek=46 count=3 conv=fdatasync dd if="$d2" of=24K bs=8192 skip=46 count=3 iflag=direct cmp -n 24576 -l 64K 24K # Consumes 2 192K chunks -> 66.67% check lv_field $vg/$lv2 data_percent "66.67" lvreduce -f -l-24 $vg/$lv2 dd if=64K of="$d2" bs=8192 seek=24 count=1 conv=fdatasync dd if="$d2" of=8K bs=8192 skip=24 count=1 iflag=direct cmp -n 8192 -l 64K 8K # Check extension still works lvextend -l+2 $vg/$lv2 lvremove -f $vg/pool lvcreate -L256M -T $vg/pool -c 64M lvcreate -s $vg/$lv1 --name $lv2 --thinpool $vg/pool lvextend -l+2 $vg/$lv2 dd if=64K of="$d2" bs=8192 seek=45 count=4 conv=fdatasync dd if="$d2" of=32K bs=8192 skip=45 count=4 iflag=direct cmp -n 32768 -l 64K 32K lvextend -L+64M $vg/$lv2 # Consumes 64M chunk -> 50% check lv_field $vg/$lv2 data_percent "50.00" vgremove -ff $vg LVM2.2.02.176/test/shell/lvextend-thin.sh0000644000000000000120000000141213176752421016535 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 export LVM_TEST_THIN_REPAIR_CMD=${LVM_TEST_THIN_REPAIR_CMD-/bin/false} . lib/inittest aux have_thin 1 0 0 || skip aux prepare_vg 3 lvcreate -i2 -l2 -T $vg/pool2 lvextend -l+2 $vg/pool2 "$dev2" "$dev3" should lvextend -l+100%FREE $vg/pool2 vgremove -ff $vg LVM2.2.02.176/test/shell/vgmerge-operation.sh0000644000000000000120000000442513176752421017405 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2007-2012 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA test_description='Test vgmerge operation' SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_pvs 4 64 # 'vgmerge succeeds with single linear LV in source VG' vgcreate $vg1 "$dev1" "$dev2" vgcreate $vg2 "$dev3" "$dev4" lvcreate -l 4 -n $lv1 $vg1 "$dev1" vgchange -an $vg1 check pvlv_counts $vg1 2 1 0 check pvlv_counts $vg2 2 0 0 vgmerge $vg2 $vg1 check pvlv_counts $vg2 4 1 0 vgremove -f $vg2 # 'vgmerge succeeds with single linear LV in source and destination VG' vgcreate $vg1 "$dev1" "$dev2" vgcreate $vg2 "$dev3" "$dev4" lvcreate -l 4 -n $lv1 $vg1 lvcreate -l 4 -n $lv2 $vg2 vgchange -an $vg1 vgchange -an $vg2 check pvlv_counts $vg1 2 1 0 check pvlv_counts $vg2 2 1 0 vgmerge $vg2 $vg1 check pvlv_counts $vg2 4 2 0 vgremove -f $vg2 # 'vgmerge succeeds with linear LV + snapshots in source VG' vgcreate $vg1 "$dev1" "$dev2" vgcreate $vg2 "$dev3" "$dev4" lvcreate -aey -l 16 -n $lv1 $vg1 lvcreate -l 4 -s -n $lv2 $vg1/$lv1 vgchange -an $vg1 check pvlv_counts $vg1 2 2 1 check pvlv_counts $vg2 2 0 0 vgmerge $vg2 $vg1 check pvlv_counts $vg2 4 2 1 lvremove -f $vg2/$lv2 vgremove -f $vg2 # 'vgmerge succeeds with mirrored LV in source VG' vgcreate $vg1 "$dev1" "$dev2" "$dev3" vgcreate $vg2 "$dev4" lvcreate -aey -l 4 -n $lv1 --type mirror -m1 $vg1 vgchange -an $vg1 check pvlv_counts $vg1 3 1 0 check pvlv_counts $vg2 1 0 0 vgmerge $vg2 $vg1 check pvlv_counts $vg2 4 1 0 lvremove -f $vg2/$lv1 vgremove -f $vg2 # 'vgmerge rejects LV name collision' vgcreate $vg1 "$dev1" "$dev2" vgcreate $vg2 "$dev3" "$dev4" lvcreate -l 4 -n $lv1 $vg1 lvcreate -l 4 -n $lv1 $vg2 vgchange -an $vg1 check pvlv_counts $vg1 2 1 0 check pvlv_counts $vg2 2 1 0 not vgmerge $vg2 $vg1 2>err grep "Duplicate logical volume name \"$lv1\" in \"$vg2\" and \"$vg1" err check pvlv_counts $vg1 2 1 0 check pvlv_counts $vg2 2 1 0 vgremove -f $vg1 $vg2 LVM2.2.02.176/test/shell/snapshot-merge.sh0000644000000000000120000000756713176752421016721 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2010-2012 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 . lib/inittest # test if snapshot-merge target is available aux target_at_least dm-snapshot-merge 1 0 0 || skip which mkfs.ext3 || skip lvdev_() { echo "$DM_DEV_DIR/$1/$2" } snap_lv_name_() { echo "${1}_snap" } setup_merge_() { local VG_NAME=$1 local LV_NAME=$2 local NUM_EXTRA_SNAPS=${3:-0} local BASE_SNAP_LV_NAME BASE_SNAP_LV_NAME=$(snap_lv_name_ $LV_NAME) lvcreate -aey -n $LV_NAME -l 50%FREE $VG_NAME lvcreate -s -n $BASE_SNAP_LV_NAME -l 20%FREE ${VG_NAME}/${LV_NAME} mkfs.ext3 "$(lvdev_ $VG_NAME $LV_NAME)" if [ $NUM_EXTRA_SNAPS -gt 0 ]; then for i in $(seq 1 $NUM_EXTRA_SNAPS); do lvcreate -s -n ${BASE_SNAP_LV_NAME}_${i} -l 20%ORIGIN ${VG_NAME}/${LV_NAME} done fi } aux prepare_vg 1 100 mkdir test_mnt # test full merge of a single LV setup_merge_ $vg $lv1 # make sure lvconvert --merge requires explicit LV listing not lvconvert --merge lvconvert --merge "$vg/$(snap_lv_name_ "$lv1")" lvremove -f $vg/$lv1 setup_merge_ $vg $lv1 lvconvert --mergesnapshot "$vg/$(snap_lv_name_ "$lv1")" lvremove -f $vg/$lv1 # test that an actively merging snapshot may not be removed setup_merge_ $vg $lv1 lvconvert -i+100 --merge --background "$vg/$(snap_lv_name_ "$lv1")" not lvremove -f "$vg/$(snap_lv_name_ "$lv1")" lvremove -f $vg/$lv1 # "onactivate merge" test setup_merge_ $vg $lv1 mount "$(lvdev_ $vg $lv1)" test_mnt lvconvert --merge "$vg/$(snap_lv_name_ "$lv1")" # -- refresh LV while FS is still mounted (merge must not start), # verify 'snapshot-origin' target is still being used lvchange --refresh $vg/$lv1 umount test_mnt dm_table $vg-$lv1 | grep " snapshot-origin " # -- refresh LV to start merge (now that FS is unmounted), # an active merge uses the 'snapshot-merge' target lvchange --refresh $vg/$lv1 # check whether it's still merging - or maybe got already merged (slow test) dm_table $vg-$lv1 | grep " snapshot-merge " || dm_table $vg-$lv1 | grep " linear " # -- don't care if merge is still active; lvremove at this point # may test stopping an active merge lvremove -f $vg/$lv1 # "onactivate merge" test # -- deactivate/remove after disallowed merge attempt, tests # to make sure preload of origin's metadata is _not_ performed setup_merge_ $vg $lv1 mount "$(lvdev_ $vg $lv1)" test_mnt lvconvert --merge "$vg/$(snap_lv_name_ "$lv1")" # -- refresh LV while FS is still mounted (merge must not start), # verify 'snapshot-origin' target is still being used lvchange --refresh $vg/$lv1 umount test_mnt dm_table $vg-$lv1 | grep " snapshot-origin " >/dev/null lvremove -f $vg/$lv1 # test multiple snapshot merge; tests copy out that is driven by merge setup_merge_ $vg $lv1 1 lvconvert --merge "$vg/$(snap_lv_name_ "$lv1")" lvremove -f $vg/$lv1 # test merging multiple snapshots that share the same tag setup_merge_ $vg $lv1 setup_merge_ $vg $lv2 lvchange --addtag this_is_a_test "$vg/$(snap_lv_name_ "$lv1")" lvchange --addtag this_is_a_test "$vg/$(snap_lv_name_ "$lv2")" lvconvert --merge @this_is_a_test lvs $vg | tee out not grep "$(snap_lv_name_ "$lv1")" out not grep "$(snap_lv_name_ "$lv2")" out lvremove -f $vg/$lv1 $vg/$lv2 # FIXME following tests would need to poll merge progress, via periodic lvs? # Background processes don't lend themselves to lvm testsuite... # test: onactivate merge of a single lv # test: do onactivate, deactivate the origin LV, reactivate the LV, merge should resume # test: multiple onactivate merge vgremove -f $vg LVM2.2.02.176/test/shell/vgmerge-usage.sh0000644000000000000120000000416413176752421016511 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008-2011 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # 'Test vgmerge command options for validity' SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_pvs 4 # 'vgmerge normal operation' # ensure ordering does not matter vgcreate $vg1 "$dev1" "$dev2" vgcreate $vg2 "$dev3" "$dev4" vgmerge $vg1 $vg2 vgremove $vg1 vgcreate $vg2 "$dev1" "$dev2" vgcreate $vg1 "$dev3" "$dev4" vgmerge $vg2 $vg1 vgremove $vg2 # 'vgmerge rejects duplicate vg name' vgcreate $vg1 "$dev1" "$dev2" vgcreate $vg2 "$dev3" "$dev4" not vgmerge $vg1 $vg1 2>err grep "Duplicate volume group name \"$vg1\"\$" err vgremove $vg1 $vg2 # 'vgmerge rejects vgs with incompatible extent_size' vgcreate --physicalextentsize 4M $vg1 "$dev1" "$dev2" vgcreate --physicalextentsize 8M $vg2 "$dev3" "$dev4" not vgmerge $vg1 $vg2 2>err grep "Extent sizes differ" err vgremove $vg1 $vg2 # 'vgmerge rejects vgmerge because max_pv is exceeded' vgcreate --maxphysicalvolumes 2 $vg1 "$dev1" "$dev2" vgcreate --maxphysicalvolumes 2 $vg2 "$dev3" "$dev4" not vgmerge $vg1 $vg2 2>err grep "Maximum number of physical volumes (2) exceeded" err vgremove $vg1 $vg2 # 'vgmerge rejects vg with active lv' vgcreate $vg1 "$dev1" "$dev2" vgcreate $vg2 "$dev3" "$dev4" lvcreate -l 4 -n lv1 $vg2 not vgmerge $vg1 $vg2 2>err grep "Logical volumes in \"$vg2\" must be inactive" err vgremove -f $vg1 $vg2 # 'vgmerge rejects vgmerge because max_lv is exceeded' vgcreate --maxlogicalvolumes 2 $vg1 "$dev1" "$dev2" vgcreate --maxlogicalvolumes 2 $vg2 "$dev3" "$dev4" lvcreate -l 4 -n lv1 $vg1 lvcreate -l 4 -n lv2 $vg1 lvcreate -l 4 -n lv3 $vg2 vgchange -an $vg1 $vg2 not vgmerge $vg1 $vg2 2>err grep "Maximum number of logical volumes (2) exceeded" err vgremove -f $vg1 $vg2 LVM2.2.02.176/test/shell/pvremove-usage.sh0000644000000000000120000000455613176752421016725 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_devs 3 pvcreate "$dev1" pvcreate --metadatacopies 0 "$dev2" pvcreate --metadatacopies 2 "$dev3" # Fails without give argument not pvremove pvremove "$dev2" # failing, but still removing everything what can be removed # is somewhat odd as default, what do we have -f for? pvs | not grep "$dev2" pvs -a | grep "$dev2" # bz1108394 no crash on nonPV label listing pvs -a -o+devices pvcreate --metadatacopies 0 "$dev2" # check pvremove refuses to remove pv in a vg vgcreate $vg "$dev1" "$dev2" not pvremove "$dev2" "$dev3" for mdacp in 0 1 2; do # check pvremove truly wipes the label (pvscan wont find) (---metadatacopies $mdacp) pvcreate --metadatacopies $mdacp "$dev3" pvremove "$dev3" # try to remove agail - should fail cleanly not pvremove "$dev3" pvscan | not grep "$dev3" # bz179473 refuse to wipe non-PV device without -f not pvremove "$dev3" pvremove -f "$dev3" # reset setup vgremove -ff $vg pvcreate --metadatacopies $mdacp "$dev1" pvcreate "$dev2" vgcreate $vg "$dev1" "$dev2" # pvremove -f fails when pv in a vg (---metadatacopies $mdacp) not pvremove -f "$dev1" 2>&1 | tee out grep "is used" out pvs "$dev1" # pvremove -ff fails without confirmation when pv in a vg (---metadatacopies $mdacp) not pvremove -ff "$dev1" 2>&1 | tee out grep "is used" out # pvremove -ff succeds with confirmation when pv in a vg (---metadatacopies $mdacp) pvremove -ffy "$dev1" 2>&1 | tee out grep "is used" out not pvs "$dev1" vgreduce --removemissing $vg pvcreate --metadatacopies $mdacp "$dev1" vgextend $vg "$dev1" # pvremove -ff -y is sufficient when pv in a vg (---metadatacopies $mdacp)" ' pvremove -ff -y "$dev1" vgreduce --removemissing $vg pvcreate --metadatacopies $mdacp "$dev1" vgextend $vg "$dev1" done vgremove -ff $vg LVM2.2.02.176/test/shell/process-each-pv-nomda-all.sh0000644000000000000120000000255613176752421020617 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008-2013 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA test_description='Test process_each_pv with zero mda' SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_devs 14 # for vg1 pvcreate "$dev10" # for vg2 pvcreate "$dev2" --metadatacopies 0 pvcreate "$dev3" pvcreate "$dev4" pvcreate "$dev5" # for vg3 pvcreate "$dev6" --metadatacopies 0 pvcreate "$dev7" --metadatacopies 0 pvcreate "$dev8" --metadatacopies 0 pvcreate "$dev9" # orphan with mda pvcreate "$dev11" # orphan without mda pvcreate "$dev14" --metadatacopies 0 # non-pv devs # dev12 # dev13 vgcreate $SHARED $vg1 "$dev10" vgcreate $SHARED $vg2 "$dev2" "$dev3" "$dev4" "$dev5" vgcreate $SHARED $vg3 "$dev6" "$dev7" "$dev8" "$dev9" pvs -a | tee err grep "$dev10" err grep "$dev2" err grep "$dev3" err grep "$dev4" err grep "$dev5" err grep "$dev6" err grep "$dev7" err grep "$dev8" err grep "$dev9" err grep "$dev11" err grep "$dev12" err grep "$dev13" err grep "$dev14" err vgremove $vg1 $vg2 $vg3 LVM2.2.02.176/test/shell/vgchange-partial.sh0000644000000000000120000000224113176752421017161 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2012 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_vg 2 aux disable_dev "$dev1" # # Test for allowable metadata changes # addtag_ARG # deltag_ARG vgchange --addtag foo $vg vgchange --deltag foo $vg # # Test for disallowed metadata changes # # maxphysicalvolumes_ARG not vgchange -p 10 $vg # resizeable_ARG not vgchange -x n $vg # uuid_ARG not vgchange -u $vg # physicalextentsize_ARG not vgchange -s 2M $vg # clustered_ARG not vgchange -c y $vg # alloc_ARG not vgchange --alloc anywhere $vg # vgmetadatacopies_ARG not vgchange --vgmetadatacopies 2 $vg # # Ensure that allowed args don't cause disallowed args to get through # not vgchange -p 10 --addtag foo $vg vgremove -ff $vg LVM2.2.02.176/test/shell/lvconvert-raid-regionsize.sh0000644000000000000120000000526113176752421021065 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2017 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest which mkfs.ext4 || skip aux have_raid 1 9 0 || skip aux prepare_vg 6 function _test_regionsize { local type=$1 local regionsize=$2 local regionsize_str=$3 local vg=$4 local lv=$5 lvconvert --type "$type" --yes -R "$regionsize" "$vg/$lv" check lv_field $vg/$lv regionsize "$regionsize_str" not lvconvert --regionsize "$regionsize" "$vg/$lv" 2>err grep "is already" err fsck -fn "$DM_DEV_DIR/$vg/$lv" } function _test_regionsizes { # FIXME: have to provide raid type or region size ain't set until cli validation merged local type=$1 # Test RAID regionsize changes _test_regionsize "$type" 128K "128.00k" $vg $lv1 _test_regionsize "$type" 256K "256.00k" $vg $lv1 not _test_regionsize "$type" 1K "1.00k" $vg $lv1 _test_regionsize "$type" 1m "1.00m" $vg $lv1 not _test_regionsize "$type" 1G "1.00g" $vg $lv1 not _test_regionsize "$type" 16K "16.00k" $vg $lv1 } # Create 3-way raid1 lvcreate --yes -aey --type raid1 -m 2 -R64K -L8M -n $lv1 $vg check lv_field $vg/$lv1 segtype "raid1" check lv_field $vg/$lv1 stripes 3 check lv_field $vg/$lv1 regionsize "64.00k" mkfs.ext4 "$DM_DEV_DIR/$vg/$lv1" aux wait_for_sync $vg $lv1 fsck -fn "$DM_DEV_DIR/$vg/$lv1" _test_regionsizes raid1 # Clean up lvremove --yes $vg if aux have_raid 1 10 1; then # Create 5-way raid6 lvcreate --yes -aey --type raid6 -i 3 --stripesize 128K -R 256K -L8M -n $lv1 $vg check lv_field $vg/$lv1 segtype "raid6" check lv_field $vg/$lv1 stripes 5 check lv_field $vg/$lv1 stripesize "128.00k" check lv_field $vg/$lv1 regionsize "256.00k" mkfs.ext4 "$DM_DEV_DIR/$vg/$lv1" aux wait_for_sync $vg $lv1 fsck -fn "$DM_DEV_DIR/$vg/$lv1" _test_regionsizes raid6 # Clean up lvremove --yes $vg else echo "Skipping RAID6 tests" fi if aux have_raid 1 10 1; then # Create 6-way raid01 lvcreate --yes -aey --type raid10 -i 3 -m 1 --stripesize 128K -R 256K -L8M -n $lv1 $vg check lv_field $vg/$lv1 segtype "raid10" check lv_field $vg/$lv1 stripes 6 check lv_field $vg/$lv1 stripesize "128.00k" check lv_field $vg/$lv1 regionsize "256.00k" mkfs.ext4 -t ext4 "$DM_DEV_DIR/$vg/$lv1" aux wait_for_sync $vg $lv1 fsck -fn "$DM_DEV_DIR/$vg/$lv1" _test_regionsizes raid10 else echo "Skipping RAID10 tests" fi vgremove -ff $vg LVM2.2.02.176/test/shell/lvextend-snapshot-dmeventd.sh0000644000000000000120000000346013176752421021243 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2010-2012 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest extend() { lvextend --use-policies --config "activation { snapshot_autoextend_threshold = $1 }" $vg/snap } write_() { dd if=/dev/zero of="$DM_DEV_DIR/$vg/snap" bs=1k count=$2 seek=$1 oflag=direct } percent_() { get lv_field $vg/snap snap_percent | cut -d. -f1 } wait_for_change_() { # dmeventd only checks every 10 seconds :( for i in $(seq 1 25) ; do test "$(percent_)" != "$1" && return sleep 1 done return 1 # timeout } aux prepare_dmeventd aux prepare_vg 2 lvcreate -aey -L16M -n base $vg lvcreate -s -L4M -n snap $vg/base write_ 0 1000 test 24 -eq "$(percent_)" lvchange --monitor y $vg/snap write_ 1000 1700 pre=$(percent_) # Normally the usage should be ~66% here, however on slower systems # dmeventd could be actually 'fast' enough to have COW already resized now # so mark test skipped if we are below 50% by now test "$pre" -gt 50 || skip wait_for_change_ $pre test "$pre" -gt "$(percent_)" # check that a second extension happens; we used to fail to extend when the # utilisation ended up between THRESH and (THRESH + 10)... see RHBZ 754198 # (the utilisation after the write should be 57 %) write_ 2700 2000 pre=$(percent_) # Mark test as skipped if already resized... test "$pre" -gt 70 || skip wait_for_change_ $pre test "$pre" -gt "$(percent_)" vgremove -f $vg LVM2.2.02.176/test/shell/vgcreate-many-pvs.sh0000644000000000000120000000316613176752421017324 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2015 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest # # Test to exercise larger number of PVs in a VG # Related to https://bugzilla.redhat.com/show_bug.cgi?id=736027 # # Original measured times of the whole test case before # and with the acceleration patch from my bare metal hw # (Lenovo T61, 2.2GHz, 4G RAM, rawhide 2015-03-06 with ndebug kernel): # # export LVM_TEST_PVS=300 # # make check_local ~52sec (U:29s, S:13s) # make check_lvmetad ~20sec (U: 4s, S: 5s) # # With patch from 2015-03-06: # # make check_local ~30sec (U:10s, S:12s) # make check_lvmetad ~20sec (U: 4s, S: 5s) # # TODO: extend test suite to monitor performance and report regressions... # Use just 100 to get 'decent' speed on slow boxes LVM_TEST_PVS=${LVM_TEST_PVS:-100} #aux prepare_devs $LVM_TEST_PVS 8 #vgcreate $vg $(< DEVICES) # prepare_vg is now directly using steps above aux prepare_vg $LVM_TEST_PVS # Check we have decent speed with typical commands vgs lvs pvs lvcreate -l1 -n $lv1 $vg lvremove -f $vg/$lv1 vgremove -ff $vg # # TODO Turn this into another test case: # #for i in $(seq 1 $LVM_TEST_PVS); do # vgcreate ${vg}$i "$DM_DEV_DIR/mapper/${PREFIX}pv$i" #done LVM2.2.02.176/test/shell/snapshot-rename.sh0000644000000000000120000000142513176752421017054 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Testing renaming snapshots (had problem in cluster) # https://bugzilla.redhat.com/show_bug.cgi?id=1136925 SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_vg 1 lvcreate -aey -L1 -n $lv1 $vg lvcreate -s -L1 -n $lv2 $vg/$lv1 lvrename $vg/$lv2 $vg/$lv3 lvremove -f $vg/$lv1 vgremove -f $vg LVM2.2.02.176/test/shell/lvchange-cache.sh0000644000000000000120000000676213176752421016611 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2014-2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux have_cache 1 3 0 || skip aux prepare_vg 3 aux lvmconf 'global/cache_disabled_features = [ "policy_smq" ]' lvcreate --type cache-pool -an -v -L 2 -n cpool $vg lvcreate -H -L 4 -n corigin --cachepool $vg/cpool lvcreate -n noncache -l 1 $vg # cannot change major minor for pools not lvchange --yes -M y --minor 235 --major 253 $vg/cpool not lvchange -M n $vg/cpool not lvchange --cachepolicy mq $vg/noncache not lvchange --cachesettings foo=bar $vg/noncache lvchange --cachepolicy cleaner $vg/corigin check lv_field $vg/corigin kernel_cache_policy "cleaner" # Skip these test on older cache driver as it shows errors with these lvchanges # device-mapper: space map common: index_check failed: blocknr 17179869216 != wanted 11 if aux have_cache 1 5 0 ; then lvchange --cachepolicy mq --cachesettings migration_threshold=333 $vg/corigin # TODO once mq->smq happens we will get here some 0 for mq settings check lv_field $vg/corigin kernel_cache_policy "mq" get lv_field $vg/corigin kernel_cache_settings | grep 'migration_threshold=333' lvchange --refresh $vg/corigin get lv_field $vg/corigin kernel_cache_settings | grep 'migration_threshold=333' lvchange -an $vg lvchange -ay $vg get lv_field $vg/corigin kernel_cache_settings | grep 'migration_threshold=333' lvchange --cachesettings 'migration_threshold = 233 sequential_threshold = 13' $vg/corigin get lv_field $vg/corigin kernel_cache_settings | tee out grep 'migration_threshold=233' out if grep 'sequential_threshold=13' out ; then lvchange --cachesettings 'migration_threshold = 17' $vg/corigin get lv_field $vg/corigin kernel_cache_settings | tee out grep 'migration_threshold=17' out grep 'sequential_threshold=13' out lvchange --cachesettings 'migration_threshold = default' $vg/corigin get lv_field $vg/corigin kernel_cache_settings | tee out grep 'migration_threshold=2048' out grep 'sequential_threshold=13' out lvchange --cachesettings 'migration_threshold = 233 sequential_threshold = 13 random_threshold = 1' $vg/corigin lvchange --cachesettings 'random_threshold = default migration_threshold = default' $vg/corigin get lv_field $vg/corigin kernel_cache_settings | tee out grep 'migration_threshold=2048' out grep 'sequential_threshold=13' out grep 'random_threshold=4' out lvchange --cachesettings migration_threshold=233 --cachesettings sequential_threshold=13 --cachesettings random_threshold=1 $vg/corigin get lv_field $vg/corigin kernel_cache_settings | tee out grep 'migration_threshold=233' out grep 'sequential_threshold=13' out grep 'random_threshold=1' out lvchange --cachesettings random_threshold=default --cachesettings migration_threshold=default $vg/corigin get lv_field $vg/corigin kernel_cache_settings | tee out grep 'migration_threshold=2048' out grep 'sequential_threshold=13' out grep 'random_threshold=4' out else # When MQ is emulated by SMQ policy it does not hold settings. # So just skip testing of param changes when sequential_threshold=0 grep 'sequential_threshold=0' out fi fi # have_cache 1 5 0 vgremove -f $vg LVM2.2.02.176/test/shell/lvconvert-thin-external.sh0000644000000000000120000001136313176752421020554 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2013 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Test conversion to thin external origin SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 export LVM_TEST_THIN_REPAIR_CMD=${LVM_TEST_THIN_REPAIR_CMD-/bin/false} . lib/inittest which mkfs.ext2 || skip which fsck || skip # # Main # aux have_thin 1 5 0 || skip aux prepare_pvs 2 64 get_devs vgcreate "$vg" --metadatasize 128K -s 64K "${DEVICES[@]}" if test 0 -eq 1 ; then # FIXME: needs patch to allow inactive old-snap creation lvcreate -l10 -T $vg/pool lvcreate -an -pr --zero n -l10 --name $lv1 $vg lvcreate -s $vg/$lv1 --name $lv2 --thinpool $vg/pool vgchange -an $vg # oldstyle read-only inactive snapshot lvcreate -an -s $vg/$lv2 -l10 -p r --name $lv3 lvcreate -s $vg/$lv3 --name $lv4 --thinpool $vg/pool lvremove -ff $vg/$lv3 lvremove -ff $vg fi #lvcreate -L20M --name orig $vg #lvconvert -T --thinpool $vg/pool $vg/orig #lvcreate -s -aey -L10M $vg/orig #lvremove -f $vg #exit 0 lvcreate -l10 -T $vg/pool # Can't convert pool to external origin lvcreate -l10 -T $vg/pool1 -c 192k not lvconvert -T --thinpool $vg/pool1 $vg/pool --originname origin # Create pool1 chunk_size unaligned LV and check failing conversion lvcreate -l2 -n $lv1 $vg # Newer thin-pool target (>= 1.13) supports unaligned external origin aux lvmconf 'global/thin_disabled_features = [ "external_origin_extend" ]' not lvconvert -T --thinpool $vg/pool1 $vg/$lv1 lvremove -f $vg/pool1 $vg/$lv1 # create plain LV (will be used for external origin) lvcreate -L8M -n $lv1 $vg # Can't convert same LV to the thin pool and thin volume not lvconvert --thinpool $vg/$lv1 -T $vg/$lv1 check lv_field $vg/$lv1 segtype linear mkfs.ext2 "$DM_DEV_DIR/$vg/$lv1" mkdir mnt mount "$DM_DEV_DIR/$vg/$lv1" mnt dd if=/dev/zero of=mnt/test1 bs=1M count=1 # convert plain LV into thin external snapshot volume # during conversion dd above could be still flushed lvconvert -T --originname extorg --thinpool $vg/pool $vg/$lv1 check active $vg $lv1 # FIXME handling attr is ... get lv_field $vg/extorg attr | grep "^ori" check inactive $vg extorg touch mnt/test umount mnt # check fs is without errors fsck -n "$DM_DEV_DIR/$vg/$lv1" lvchange -aey $vg/extorg lvchange -an $vg/$lv1 check active $vg extorg check inactive $vg $lv1 # fsck in read-only mode fsck -n "$DM_DEV_DIR/$vg/extorg" not lvresize -l+8 $vg/extorg not lvresize -l-4 $vg/extorg not lvchange -p rw $vg/extorg #lvresize -L+8M $vg/$lv1 #lvresize -L-4M $vg/$lv1 #lvchange -p r $vg/$lv1 #lvchange -p rw $vg/$lv1 lvchange -aey $vg lvs -a -o+origin_size,seg_size $vg # Chain external origins lvconvert --type thin --originname extorg1 --thinpool $vg/pool $vg/extorg check inactive $vg extorg1 lvconvert --originname extorg2 --thinpool $vg/pool -T $vg/extorg1 check inactive $vg extorg1 check inactive $vg extorg2 lvchange -an $vg/extorg lvchange -ay $vg/extorg1 lvcreate -l4 -s $vg/$lv1 -n $lv2 lvcreate -l8 -s $vg/extorg -n $lv3 lvcreate -l12 -s $vg/extorg1 -n $lv4 lvcreate -l16 -s $vg/extorg2 -n $lv5 #vgchange -aey $vg #lvremove -f $vg/extorg2 #exit 0 # Converting old-snapshot into external origin is not supported not lvconvert -T --thinpool $vg/pool --originname lv5origin $vg/$lv4 lvs -a -o +segtype $vg check lv_field $vg/$lv1 segtype thin check lv_field $vg/$lv2 segtype linear check lv_field $vg/$lv3 segtype linear check lv_field $vg/$lv4 segtype linear check lv_field $vg/$lv5 segtype linear check lv_field $vg/extorg segtype thin check lv_field $vg/extorg1 segtype thin check lv_field $vg/extorg2 segtype linear vgchange -ay $vg lvs -a -o+origin_size,seg_size $vg lvchange -an $vg/extorg2 check inactive $vg extorg2 # Remove all volumes dependent on external origin lvs -a -o+origin_size,seg_size,segtype $vg lvremove -f $vg/extorg2 # Only pool is left check vg_field $vg lv_count 1 lvremove -ff $vg # Test conversion to the pool and thin external at the same time (rhbz #1003461) lvcreate -l50 -n pool $vg lvcreate -l100 -n thin $vg lvconvert --yes --thin --thinpool $vg/pool $vg/thin --originname thin-origin check lv_field $vg/thin segtype thin check lv_field $vg/thin-origin segtype linear lvremove -ff $vg # Test conversion with non-zeroing thin-pool, should not WARN about zeroing lvcreate -l50 -n pool $vg lvcreate -l100 -n thin $vg lvconvert --yes --thin --thinpool $vg/pool $vg/thin --zero n --originname thin-origin 2>&1 | tee out not grep "not zeroed" out check lv_field $vg/pool zero "" vgremove -ff $vg LVM2.2.02.176/test/shell/process-each-pv.sh0000644000000000000120000005174313176752421016757 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA test_description='Exercise toollib process_each_pv' SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_devs 14 # # process_each_pv is used by a number of pv commands: # pvdisplay # pvresize # pvs # # process-each-pvresize.sh covers pvresize. # process-each-vgreduce.sh covers vgreduce. # # # set up # # use use dev10 instead of dev1 because simple grep for # dev1 matchines dev10,dev11,etc # vgcreate $SHARED $vg1 "$dev10" vgcreate $SHARED $vg2 "$dev2" "$dev3" "$dev4" "$dev5" vgcreate $SHARED $vg3 "$dev6" "$dev7" "$dev8" "$dev9" pvchange --addtag V2D3 "$dev3" pvchange --addtag V2D4 "$dev4" pvchange --addtag V2D45 "$dev4" pvchange --addtag V2D5 "$dev5" pvchange --addtag V2D45 "$dev5" pvchange --addtag V3 "$dev6" "$dev7" "$dev8" "$dev9" pvchange --addtag V3D9 "$dev9" # orphan pvcreate "$dev11" # dev (a non-pv device) pvcreate "$dev12" pvremove "$dev12" # dev13 is intentionally untouched so we can # test that it is handled appropriately as a non-pv # orphan pvcreate "$dev14" # # test pvdisplay # # pv in vg pvdisplay -s "$dev10" | tee err grep "$dev10" err not grep "$dev2" err not grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err not grep "$dev9" err not grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # pv not in vg (one orphan) pvdisplay -s "$dev11" | tee err not grep "$dev10" err not grep "$dev2" err not grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err not grep "$dev9" err grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # dev is not a pv not pvdisplay -s "$dev12" | tee err not grep "$dev10" err not grep "$dev2" err not grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err not grep "$dev9" err not grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # two pvs in different vgs pvdisplay -s "$dev10" "$dev2" | tee err grep "$dev10" err grep "$dev2" err not grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err not grep "$dev9" err not grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # -a is invalid when used alone not pvdisplay -a | tee err not grep "$dev10" err not grep "$dev2" err not grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err not grep "$dev9" err not grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # one pv and one orphan pvdisplay -s "$dev10" "$dev11" | tee err grep "$dev10" err not grep "$dev2" err not grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err not grep "$dev9" err grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # one pv and one dev (dev refers to a non-pv device) not pvdisplay -s "$dev10" "$dev12" | tee err grep "$dev10" err not grep "$dev2" err not grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err not grep "$dev9" err not grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # one orphan and one dev not pvdisplay -s "$dev11" "$dev12" | tee err not grep "$dev10" err not grep "$dev2" err not grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err not grep "$dev9" err grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # all pvs (pvs in vgs and orphan pvs) pvdisplay -s | tee err grep "$dev10" err grep "$dev2" err grep "$dev3" err grep "$dev4" err grep "$dev5" err grep "$dev6" err grep "$dev7" err grep "$dev8" err grep "$dev9" err grep "$dev11" err not grep "$dev12" err not grep "$dev13" err grep "$dev14" err # all devs (pvs in vgs, orphan pvs, and devs) pvdisplay -a -C | tee err grep "$dev10" err grep "$dev2" err grep "$dev3" err grep "$dev4" err grep "$dev5" err grep "$dev6" err grep "$dev7" err grep "$dev8" err grep "$dev9" err grep "$dev11" err grep "$dev12" err grep "$dev13" err grep "$dev14" err # pv and orphan and dev not pvdisplay -s "$dev9" "$dev11" "$dev12" | tee err not grep "$dev10" err not grep "$dev2" err not grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err grep "$dev9" err grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # -s option not allowed with -a -C not pvdisplay -s -a -C | tee err not grep "$dev10" err not grep "$dev2" err not grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err not grep "$dev9" err not grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # pv and all (all ignored) pvdisplay -a -C "$dev9" | tee err not grep "$dev10" err not grep "$dev2" err not grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err grep "$dev9" err not grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # orphan and all (all ignored) pvdisplay -a -C "$dev11" | tee err not grep "$dev10" err not grep "$dev2" err not grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err not grep "$dev9" err grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # one tag pvdisplay -s @V2D3 | tee err not grep "$dev10" err not grep "$dev2" err grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err not grep "$dev9" err not grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # two tags pvdisplay -s @V2D3 @V2D45 | tee err not grep "$dev10" err not grep "$dev2" err grep "$dev3" err grep "$dev4" err grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err not grep "$dev9" err not grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # tag and pv pvdisplay -s @V2D3 "$dev4" | tee err not grep "$dev10" err not grep "$dev2" err grep "$dev3" err grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err not grep "$dev9" err not grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # tag and orphan pvdisplay -s @V2D3 "$dev11" | tee err not grep "$dev10" err not grep "$dev2" err grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err not grep "$dev9" err grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # tag and dev not pvdisplay -s @V2D3 "$dev12" | tee err not grep "$dev10" err not grep "$dev2" err grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err not grep "$dev9" err not grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # tag and all (all ignored) pvdisplay @V2D3 -a -C | tee err not grep "$dev10" err not grep "$dev2" err grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err not grep "$dev9" err not grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # tag and pv redundant pvdisplay -s @V2D3 "$dev3" | tee err not grep "$dev10" err not grep "$dev2" err grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err not grep "$dev9" err not grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # # test pvs # # pv in vg pvs "$dev10" | tee err grep "$dev10" err not grep "$dev2" err not grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err not grep "$dev9" err not grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # pv not in vg (one orphan) pvs "$dev11" | tee err not grep "$dev10" err not grep "$dev2" err not grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err not grep "$dev9" err grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # dev is not a pv not pvs "$dev12" | tee err not grep "$dev10" err not grep "$dev2" err not grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err not grep "$dev9" err not grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # two pvs in different vgs pvs "$dev10" "$dev2" | tee err grep "$dev10" err grep "$dev2" err not grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err not grep "$dev9" err not grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # one pv and one orphan pvs "$dev10" "$dev11" | tee err grep "$dev10" err not grep "$dev2" err not grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err not grep "$dev9" err grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # one pv and one dev not pvs "$dev10" "$dev12" | tee err grep "$dev10" err not grep "$dev2" err not grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err not grep "$dev9" err not grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # one orphan and one dev not pvs "$dev11" "$dev12" | tee err not grep "$dev10" err not grep "$dev2" err not grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err not grep "$dev9" err grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # all pvs (pvs in vgs and orphan pvs) pvs | tee err grep "$dev10" err grep "$dev2" err grep "$dev3" err grep "$dev4" err grep "$dev5" err grep "$dev6" err grep "$dev7" err grep "$dev8" err grep "$dev9" err grep "$dev11" err not grep "$dev12" err not grep "$dev13" err grep "$dev14" err # all devs (pvs in vgs, orphan pvs, and devs) pvs -a | tee err grep "$dev10" err grep "$dev2" err grep "$dev3" err grep "$dev4" err grep "$dev5" err grep "$dev6" err grep "$dev7" err grep "$dev8" err grep "$dev9" err grep "$dev11" err grep "$dev12" err grep "$dev13" err grep "$dev14" err # pv and orphan and dev not pvs "$dev9" "$dev11" "$dev12" | tee err not grep "$dev10" err not grep "$dev2" err not grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err grep "$dev9" err grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # pv and all (all ignored) pvs -a "$dev9" | tee err not grep "$dev10" err not grep "$dev2" err not grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err grep "$dev9" err not grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # orphan and all (all ignored) pvs -a "$dev11" | tee err not grep "$dev10" err not grep "$dev2" err not grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err not grep "$dev9" err grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # one tag pvs @V2D3 | tee err not grep "$dev10" err not grep "$dev2" err grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err not grep "$dev9" err not grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # two tags pvs @V2D3 @V2D45 | tee err not grep "$dev10" err not grep "$dev2" err grep "$dev3" err grep "$dev4" err grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err not grep "$dev9" err not grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # tag and pv pvs @V2D3 "$dev4" | tee err not grep "$dev10" err not grep "$dev2" err grep "$dev3" err grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err not grep "$dev9" err not grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # tag and orphan pvs @V2D3 "$dev11" | tee err not grep "$dev10" err not grep "$dev2" err grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err not grep "$dev9" err grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # tag and dev not pvs @V2D3 "$dev12" | tee err not grep "$dev10" err not grep "$dev2" err grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err not grep "$dev9" err not grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # tag and all (all ignored) pvs @V2D3 -a | tee err not grep "$dev10" err not grep "$dev2" err grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err not grep "$dev9" err not grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # tag and pv redundant pvs @V2D3 "$dev3" | tee err not grep "$dev10" err not grep "$dev2" err grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err not grep "$dev9" err not grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # # tests including pvs without mdas # # remove old config vgremove $vg1 vgremove $vg2 vgremove $vg3 pvremove "$dev11" pvremove "$dev14" # new config with some pvs that have zero mdas # for vg1 pvcreate "$dev10" # for vg2 pvcreate "$dev2" --metadatacopies 0 pvcreate "$dev3" pvcreate "$dev4" pvcreate "$dev5" # for vg3 pvcreate "$dev6" --metadatacopies 0 pvcreate "$dev7" --metadatacopies 0 pvcreate "$dev8" --metadatacopies 0 pvcreate "$dev9" # orphan with mda pvcreate "$dev11" # orphan without mda pvcreate "$dev14" --metadatacopies 0 # non-pv devs # dev12 # dev13 vgcreate $SHARED $vg1 "$dev10" vgcreate $SHARED $vg2 "$dev2" "$dev3" "$dev4" "$dev5" vgcreate $SHARED $vg3 "$dev6" "$dev7" "$dev8" "$dev9" pvchange --addtag V2D3 "$dev3" pvchange --addtag V2D4 "$dev4" pvchange --addtag V2D45 "$dev4" pvchange --addtag V2D5 "$dev5" pvchange --addtag V2D45 "$dev5" pvchange --addtag V3 "$dev6" "$dev7" "$dev8" "$dev9" pvchange --addtag V3D8 "$dev8" pvchange --addtag V3D9 "$dev9" # # pvdisplay including pvs without mdas # # pv with mda pvdisplay -s "$dev10" | tee err grep "$dev10" err not grep "$dev2" err not grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err not grep "$dev9" err not grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # pv without mda pvdisplay -s "$dev2" | tee err not grep "$dev10" err grep "$dev2" err not grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err not grep "$dev9" err not grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # orphan with mda pvdisplay -s "$dev11" | tee err not grep "$dev10" err not grep "$dev2" err not grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err not grep "$dev9" err grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # orphan without mda pvdisplay -s "$dev14" | tee err not grep "$dev10" err not grep "$dev2" err not grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err not grep "$dev9" err not grep "$dev11" err not grep "$dev12" err not grep "$dev13" err grep "$dev14" err # pv with mda, pv without mda, orphan with mda, orphan without mda pvdisplay -s "$dev10" "$dev2" "$dev11" "$dev14" | tee err grep "$dev10" err grep "$dev2" err not grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err not grep "$dev9" err grep "$dev11" err not grep "$dev12" err not grep "$dev13" err grep "$dev14" err # tag refering to pv with mda and pv without mda pvdisplay -s @V3 | tee err not grep "$dev10" err not grep "$dev2" err not grep "$dev3" err not grep "$dev4" err not grep "$dev5" err grep "$dev6" err grep "$dev7" err grep "$dev8" err grep "$dev9" err not grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # tag refering to one pv without mda pvdisplay -s @V3D8 | tee err not grep "$dev10" err not grep "$dev2" err not grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err grep "$dev8" err not grep "$dev9" err not grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # all pvs (pvs in vgs and orphan pvs) pvdisplay -s | tee err grep "$dev10" err grep "$dev2" err grep "$dev3" err grep "$dev4" err grep "$dev5" err grep "$dev6" err grep "$dev7" err grep "$dev8" err grep "$dev9" err grep "$dev11" err not grep "$dev12" err not grep "$dev13" err grep "$dev14" err # all devs (pvs in vgs, orphan pvs, and devs) pvdisplay -a -C | tee err grep "$dev10" err grep "$dev2" err grep "$dev3" err grep "$dev4" err grep "$dev5" err grep "$dev6" err grep "$dev7" err grep "$dev8" err grep "$dev9" err grep "$dev11" err grep "$dev12" err grep "$dev13" err grep "$dev14" err # # pvs including pvs without mdas # # pv with mda pvs "$dev10" | tee err grep "$dev10" err not grep "$dev2" err not grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err not grep "$dev9" err not grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # pv without mda pvs "$dev2" | tee err not grep "$dev10" err grep "$dev2" err not grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err not grep "$dev9" err not grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # orphan with mda pvs "$dev11" | tee err not grep "$dev10" err not grep "$dev2" err not grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err not grep "$dev9" err grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # orphan without mda pvs "$dev14" | tee err not grep "$dev10" err not grep "$dev2" err not grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err not grep "$dev9" err not grep "$dev11" err not grep "$dev12" err not grep "$dev13" err grep "$dev14" err # pv with mda, pv without mda, orphan with mda, orphan without mda pvs "$dev10" "$dev2" "$dev11" "$dev14" | tee err grep "$dev10" err grep "$dev2" err not grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err not grep "$dev8" err not grep "$dev9" err grep "$dev11" err not grep "$dev12" err not grep "$dev13" err grep "$dev14" err # tag refering to pv with mda and pv without mda pvs @V3 | tee err not grep "$dev10" err not grep "$dev2" err not grep "$dev3" err not grep "$dev4" err not grep "$dev5" err grep "$dev6" err grep "$dev7" err grep "$dev8" err grep "$dev9" err not grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # tag refering to one pv without mda pvs @V3D8 | tee err not grep "$dev10" err not grep "$dev2" err not grep "$dev3" err not grep "$dev4" err not grep "$dev5" err not grep "$dev6" err not grep "$dev7" err grep "$dev8" err not grep "$dev9" err not grep "$dev11" err not grep "$dev12" err not grep "$dev13" err not grep "$dev14" err # all pvs (pvs in vgs and orphan pvs) pvs | tee err grep "$dev10" err grep "$dev2" err grep "$dev3" err grep "$dev4" err grep "$dev5" err grep "$dev6" err grep "$dev7" err grep "$dev8" err grep "$dev9" err grep "$dev11" err not grep "$dev12" err not grep "$dev13" err grep "$dev14" err # all devs (pvs in vgs, orphan pvs, and devs) pvs -a | tee err grep "$dev10" err grep "$dev2" err grep "$dev3" err grep "$dev4" err grep "$dev5" err grep "$dev6" err grep "$dev7" err grep "$dev8" err grep "$dev9" err grep "$dev11" err grep "$dev12" err grep "$dev13" err grep "$dev14" err vgremove $vg1 $vg2 $vg3 LVM2.2.02.176/test/shell/vgsplit-usage.sh0000644000000000000120000001414313176752421016543 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2007-2011 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Test vgsplit command options for validity SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_devs 5 get_devs if test -n "$LVM_TEST_LVM1" ; then mdatypes='1 2' else mdatypes='2' fi for mdatype in $mdatypes do pvcreate -M$mdatype "${DEVICES[@]}" # ensure name order does not matter # NOTE: if we're using lvm1, we must use -M on vgsplit vgcreate -M$mdatype "$vg1" "${DEVICES[@]}" vgsplit -M$mdatype $vg1 $vg2 "$dev1" vgremove $vg1 $vg2 vgcreate -M$mdatype "$vg2" "${DEVICES[@]}" vgsplit -M$mdatype $vg2 $vg1 "$dev1" vgremove $vg1 $vg2 # vgsplit accepts new vg as destination of split # lvm1 -- bz244792 vgcreate -M$mdatype "$vg1" "${DEVICES[@]}" vgsplit $vg1 $vg2 "$dev1" 1>err grep "New volume group \"$vg2\" successfully split from \"$vg1\"" err vgremove $vg1 $vg2 # vgsplit accepts existing vg as destination of split vgcreate -M$mdatype $vg1 "$dev1" "$dev2" vgcreate -M$mdatype $vg2 "$dev3" "$dev4" vgsplit $vg1 $vg2 "$dev1" 1>err grep "Existing volume group \"$vg2\" successfully split from \"$vg1\"" err vgremove $vg1 $vg2 # vgsplit accepts --maxphysicalvolumes 128 on new VG vgcreate -M$mdatype $vg1 "$dev1" "$dev2" vgsplit --maxphysicalvolumes 128 $vg1 $vg2 "$dev1" check vg_field $vg2 max_pv 128 vgremove $vg1 $vg2 # vgsplit accepts --maxlogicalvolumes 128 on new VG vgcreate -M$mdatype $vg1 "$dev1" "$dev2" vgsplit --maxlogicalvolumes 128 $vg1 $vg2 "$dev1" check vg_field $vg2 max_lv 128 vgremove $vg1 $vg2 # vgsplit rejects split because max_pv of destination would be exceeded vgcreate -M$mdatype --maxphysicalvolumes 2 $vg1 "$dev1" "$dev2" vgcreate -M$mdatype --maxphysicalvolumes 2 $vg2 "$dev3" "$dev4" not vgsplit $vg1 $vg2 "$dev1" 2>err; grep "Maximum number of physical volumes (2) exceeded" err vgremove $vg1 $vg2 # vgsplit rejects split because maxphysicalvolumes given with existing vg vgcreate -M$mdatype --maxphysicalvolumes 2 $vg1 "$dev1" "$dev2" vgcreate -M$mdatype --maxphysicalvolumes 2 $vg2 "$dev3" "$dev4" not vgsplit --maxphysicalvolumes 2 $vg1 $vg2 "$dev1" 2>err; grep "Volume group \"$vg2\" exists, but new VG option specified" err vgremove $vg1 $vg2 # vgsplit rejects split because maxlogicalvolumes given with existing vg vgcreate -M$mdatype --maxlogicalvolumes 2 $vg1 "$dev1" "$dev2" vgcreate -M$mdatype --maxlogicalvolumes 2 $vg2 "$dev3" "$dev4" not vgsplit --maxlogicalvolumes 2 $vg1 $vg2 "$dev1" 2>err grep "Volume group \"$vg2\" exists, but new VG option specified" err vgremove $vg1 $vg2 # vgsplit rejects split because alloc given with existing vg vgcreate -M$mdatype --alloc cling $vg1 "$dev1" "$dev2" vgcreate -M$mdatype --alloc cling $vg2 "$dev3" "$dev4" not vgsplit --alloc cling $vg1 $vg2 "$dev1" 2>err; grep "Volume group \"$vg2\" exists, but new VG option specified" err vgremove $vg1 $vg2 # vgsplit rejects split because clustered given with existing vg vgcreate -M$mdatype --clustered n $vg1 "$dev1" "$dev2" vgcreate -M$mdatype --clustered n $vg2 "$dev3" "$dev4" not vgsplit --clustered n $vg1 $vg2 "$dev1" 2>err grep "Volume group \"$vg2\" exists, but new VG option specified" err vgremove $vg1 $vg2 # vgsplit rejects vg with active lv pvcreate -M$mdatype -ff "$dev3" "$dev4" vgcreate -M$mdatype $vg1 "$dev1" "$dev2" vgcreate -M$mdatype $vg2 "$dev3" "$dev4" lvcreate -l 4 -n $lv1 $vg1 not vgsplit $vg1 $vg2 "$dev1" 2>err; grep "Logical volumes in \"$vg1\" must be inactive\$" err vgremove -f $vg1 $vg2 # vgsplit rejects split because max_lv is exceeded vgcreate -M$mdatype --maxlogicalvolumes 2 $vg1 "$dev1" "$dev2" vgcreate -M$mdatype --maxlogicalvolumes 2 $vg2 "$dev3" "$dev4" lvcreate -l 4 -n $lv1 $vg1 lvcreate -l 4 -n $lv2 $vg1 lvcreate -l 4 -n $lv3 $vg2 vgchange -an $vg1 $vg2 not vgsplit $vg1 $vg2 "$dev1" 2>err; grep "Maximum number of logical volumes (2) exceeded" err vgremove -f $vg1 $vg2 # vgsplit verify default - max_lv attribute from new VG is same as source VG" \ vgcreate -M$mdatype $vg1 "$dev1" "$dev2" lvcreate -l 4 -n $lv1 $vg1 vgchange -an $vg1 vgsplit $vg1 $vg2 "$dev1" check compare_vg_field $vg1 $vg2 max_lv vgremove -f $vg1 $vg2 # vgsplit verify default - max_pv attribute from new VG is same as source VG" \ vgcreate -M$mdatype $vg1 "$dev1" "$dev2" lvcreate -l 4 -n $lv1 $vg1 vgchange -an $vg1 vgsplit $vg1 $vg2 "$dev1" check compare_vg_field $vg1 $vg2 max_pv vgremove -f $vg1 $vg2 # vgsplit verify default - vg_fmt attribute from new VG is same as source VG" \ vgcreate -M$mdatype $vg1 "$dev1" "$dev2" lvcreate -l 4 -n $lv1 $vg1 vgchange -an $vg1 vgsplit $vg1 $vg2 "$dev1" check compare_vg_field $vg1 $vg2 vg_fmt vgremove -f $vg2 $vg1 # vgsplit rejects split because PV not in VG vgcreate -M$mdatype $vg1 "$dev1" "$dev2" vgcreate -M$mdatype $vg2 "$dev3" "$dev4" lvcreate -l 4 -n $lv1 $vg1 lvcreate -l 4 -n $lv2 $vg1 vgchange -an $vg1 not vgsplit $vg1 $vg2 "$dev3" 2>err; vgremove -f $vg2 $vg1 # Restart clvm because using the same # devs as lvm1 and then lvm2 causes problems. if test -e LOCAL_CLVMD ; then kill "$(< LOCAL_CLVMD)" for i in $(seq 1 100) ; do test $i -eq 100 && die "Shutdown of clvmd is too slow." pgrep clvmd || break sleep .1 done # wait for the pid removal aux prepare_clvmd fi done if test -z "$LVM_TEST_LVM1" ; then # ONLY LVM2 metadata # setup PVs" ' pvcreate --metadatacopies 0 "$dev5" # vgsplit rejects to give away pv with the last mda copy vgcreate $vg1 "$dev5" "$dev2" lvcreate -l 10 -n $lv1 $vg1 lvchange -an $vg1/$lv1 check pvlv_counts $vg1 2 1 0 not vgsplit $vg1 $vg2 "$dev5"; check pvlv_counts $vg1 2 1 0 vgremove -f $vg1 # vgsplit rejects split because metadata types differ pvcreate -ff -M1 "$dev3" "$dev4" pvcreate -ff "$dev1" "$dev2" vgcreate -M1 $vg1 "$dev3" "$dev4" vgcreate $vg2 "$dev1" "$dev2" not vgsplit $vg1 $vg2 "$dev3" 2>err; grep "Metadata types differ" err vgremove -f $vg1 $vg2 fi LVM2.2.02.176/test/shell/relative-sign-options.sh0000644000000000000120000000413713176752421020215 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2017 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA test_description='Exercise toollib process_each_lv' SKIP_WITH_LVMPOLLD=1 # disable lvmetad logging as it bogs down test systems export LVM_TEST_LVMETAD_DEBUG_OPTS=${LVM_TEST_LVMETAD_DEBUG_OPTS-} . lib/inittest aux prepare_vg 1 256 lvcreate -an -n $lv1 -l4 $vg lvcreate -an -n $lv2 -L4 $vg lvcreate -an -n $lv3 -l+4 $vg lvcreate -an -n $lv4 -L+4 $vg not lvcreate -n $lv5 -l-4 $vg not lvcreate -n $lv5 -L-4 $vg lvremove $vg/$lv1 lvremove $vg/$lv2 lvremove $vg/$lv3 lvremove $vg/$lv4 lvcreate -an -n $lv1 -l4 $vg lvresize -y -l8 $vg/$lv1 lvresize -y -L16 $vg/$lv1 lvresize -y -l+1 $vg/$lv1 lvresize -y -L+1 $vg/$lv1 lvresize -y -l-1 $vg/$lv1 lvresize -y -L-1 $vg/$lv1 lvcreate -an -n $lv2 -l4 $vg lvextend -y -l8 $vg/$lv2 lvextend -y -L16 $vg/$lv2 lvextend -y -l+1 $vg/$lv2 lvextend -y -L+1 $vg/$lv2 not lvextend -y -l-1 $vg/$lv2 not lvextend -y -L-1 $vg/$lv2 lvcreate -an -n $lv3 -l64 $vg lvreduce -y -l32 $vg/$lv3 lvreduce -y -L8 $vg/$lv3 lvreduce -y -l-1 $vg/$lv3 lvreduce -y -L-1 $vg/$lv3 not lvreduce -y -l+1 $vg/$lv3 not lvreduce -y -L+1 $vg/$lv3 # relative with percent extents lvcreate -an -n $lv6 -l+100%FREE $vg lvremove $vg/$lv6 lvcreate -an -n $lv6 -l1 $vg lvextend -y -l+100%FREE $vg/$lv6 lvremove $vg/$lv6 lvcreate -an -n $lv6 -l1 $vg lvresize -y -l+100%FREE $vg/$lv6 lvremove $vg/$lv6 if aux have_thin 1 0 0 ; then # relative poolmetadatasize lvcreate --type thin-pool -L64 --poolmetadatasize 32 -n $lv7 $vg lvresize --poolmetadatasize 64 $vg/$lv7 lvresize --poolmetadatasize +8 $vg/$lv7 not lvresize -y --poolmetadatasize -8 $vg/$lv7 lvextend --poolmetadatasize +8 $vg/$lv7 not lvextend -y --poolmetadatasize -8 $vg/$lv7 fi vgremove -y $vg LVM2.2.02.176/test/shell/zero-usage.sh0000644000000000000120000000200013176752421016017 0ustar rootwheel#!/bin/bash # Copyright (C) 2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Basic usage of zero target SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest which md5sum || skip aux prepare_vg 1 lvcreate --type zero -L1 -n $lv1 $vg lvextend -L+1 $vg/$lv1 sum1=$(dd if=/dev/zero bs=2M count=1 | md5sum | cut -f1 -d' ') sum2=$(dd if="$DM_DEV_DIR/$vg/$lv1" bs=2M count=1 | md5sum | cut -f1 -d' ') # has to match test "$sum1" = "$sum2" check lv_field $vg/$lv1 lv_modules "zero" check lv_field $vg/$lv1 segtype "zero" check lv_field $vg/$lv1 seg_count "1" check lv_field $vg/$lv1 seg_size_pe "4" # 4 * 512 vgremove -ff $vg LVM2.2.02.176/test/shell/vgextend-restoremissing.sh0000644000000000000120000000360413176752421020650 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2010 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_vg 3 lvcreate -an -Zn --type mirror -m 1 -l 1 -n mirror $vg lvcreate -l 1 -n lv1 $vg "$dev1" # vgextend require vgname invalid vgextend # --metadatacopies => use --pvmetadatacopies invalid vgextend --metadatacopies 3 $vg "$dev1" 2>&1 | tee out #grep -- "use --pvmetadatacopies" out grep -E -- "unrecognized option.*--metadatacopies" out # VG name should exist fail vgextend --restoremissing $vg-invalid "$dev1" # try to just change metadata; we expect the new version (with MISSING_PV set # on the reappeared volume) to be written out to the previously missing PV aux disable_dev "$dev1" lvremove $vg/mirror # try restore the still existing device fail vgextend --restore $vg "$dev1" aux enable_dev "$dev1" not vgck $vg 2>&1 | tee log grep "missing 1 physical volume" log not lvcreate -aey --type mirror -m 1 -l 1 -n mirror $vg # write operations fail # try restore the non-missing device fail vgextend --restore $vg "$dev2" # try restore the non-existing device fail vgextend --restore $vg "$dev2-invalid" # restore the missing device vgextend --restore $vg "$dev1" vgreduce $vg "$dev3" vgchange --metadatacopies 1 $vg # 'n' failing to change volume group fail vgextend --metadataignore y --pvmetadatacopies 2 $vg "$dev3" vgextend --yes --metadataignore y --pvmetadatacopies 2 $vg "$dev3" vgck $vg lvcreate -an -Zn --type mirror -m 1 -l 1 -n mirror $vg vgremove -ff $vg LVM2.2.02.176/test/shell/lvconvert-raid-takeover.sh0000644000000000000120000002332713176752421020532 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2016,2017 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA2110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest which mkfs.ext4 || skip aux have_raid 1 9 0 || skip correct_raid4_layout=0 aux have_raid 1 9 1 && correct_raid4_layout=1 aux prepare_vg 8 # FIXME: lvconvert leaks 'error' devices detect_error_leak_() { local err="" for i in $(dmsetup info -c -o name --noheadings) ; do case "$i" in "$vg*") (dmsetup table "$i" | grep "error ") && err="$err $i" ;; esac done test -z "$err" || { dmsetup table | grep $vg dmsetup ls --tree die "Device(s) $err should not be here." } } function _lvcreate { local level=$1 local req_stripes=$2 local stripes=$3 local size=$4 local vg=$5 local lv=$6 lvcreate -y -aey --type $level -i $req_stripes -L $size -n $lv $vg check lv_field $vg/$lv segtype "$level" check lv_field $vg/$lv data_stripes $req_stripes check lv_field $vg/$lv stripes $stripes mkfs.ext4 "$DM_DEV_DIR/$vg/$lv" fsck -fn "$DM_DEV_DIR/$vg/$lv" } function _lvconvert { local req_level=$1 local level=$2 local data_stripes=$3 local stripes=$4 local vg=$5 local lv=$6 local region_size=${7-} local wait_and_check=1 local R="" [ -n "$region_size" ] && R="-R $region_size" [ "${level:0:7}" = "striped" ] && wait_and_check=0 [ "${level:0:5}" = "raid0" ] && wait_and_check=0 lvconvert -y --ty $req_level $R $vg/$lv detect_error_leak_ check lv_field $vg/$lv segtype "$level" check lv_field $vg/$lv data_stripes $data_stripes check lv_field $vg/$lv stripes $stripes if [ "$wait_and_check" -eq 1 ] then fsck -fn "$DM_DEV_DIR/$vg/$lv" aux wait_for_sync $vg $lv fi fsck -fn "$DM_DEV_DIR/$vg/$lv" } function _invalid_raid5_conversions { local vg=$1 local lv=$2 not _lvconvert striped 4 4 $vg $lv1 not _lvconvert raid0 raid0 4 4 $vg $lv1 not _lvconvert raid0_meta raid0_meta 4 4 $vg $lv1 not _lvconvert raid4 raid4 4 5 $vg $lv1 not _lvconvert raid5_ls raid5_ls 4 5 $vg $lv1 not _lvconvert raid5_rs raid5_rs 4 5 $vg $lv1 not _lvconvert raid5_la raid5_la 4 5 $vg $lv1 not _lvconvert raid5_ra raid5_ra 4 5 $vg $lv1 not _lvconvert raid6_zr raid6_zr 4 6 $vg $lv1 not _lvconvert raid6_nr raid6_nr 4 6 $vg $lv1 not _lvconvert raid6_nc raid6_nc 4 6 $vg $lv1 not _lvconvert raid6_n_6 raid6_n_6 4 6 $vg $lv1 not _lvconvert raid6 raid6_n_6 4 6 $vg $lv1 } # Check raid6 conversion constrainst of minimum 3 stripes _lvcreate striped 2 2 4m $vg $lv1 not _lvconvert raid6 raid6_n_6 2 4 $vg $lv1 lvremove -y $vg _lvcreate raid0 3 3 4m $vg $lv1 _lvconvert raid6 raid6_n_6 3 5 $vg $lv1 lvremove -y $vg # Delay 1st leg so that rebuilding status characters # can be read before resync finished too quick. # aux delay_dev "$dev1" 1 # Create 3-way mirror lvcreate --yes -aey --type mirror -R 64K -m 2 -L8M -n $lv1 $vg check lv_field $vg/$lv1 segtype "mirror" check lv_field $vg/$lv1 stripes 3 check lv_field $vg/$lv1 regionsize "64.00k" mkfs.ext4 "$DM_DEV_DIR/$vg/$lv1" aux wait_for_sync $vg $lv1 fsck -fn "$DM_DEV_DIR/$vg/$lv1" # Convert 3-way to 4-way mirror lvconvert -y -m 3 $vg/$lv1 detect_error_leak_ check lv_field $vg/$lv1 segtype "mirror" check lv_field $vg/$lv1 stripes 4 fsck -fn "$DM_DEV_DIR/$vg/$lv1" aux wait_for_sync $vg $lv1 fsck -fn "$DM_DEV_DIR/$vg/$lv1" # Takeover 4-way mirror to raid1 lvconvert --yes --type raid1 -R 64k $vg/$lv1 detect_error_leak_ check lv_field $vg/$lv1 segtype "raid1" check lv_field $vg/$lv1 stripes 4 check lv_field $vg/$lv1 regionsize "64.00k" fsck -fn "$DM_DEV_DIR/$vg/$lv1" ## Convert 4-way raid1 to 5-way lvconvert -y -m 4 -R 128K $vg/$lv1 detect_error_leak_ check lv_field $vg/$lv1 segtype "raid1" check lv_field $vg/$lv1 stripes 5 check lv_field $vg/$lv1 regionsize "128.00k" fsck -fn "$DM_DEV_DIR/$vg/$lv1" aux wait_for_sync $vg $lv1 fsck -fn "$DM_DEV_DIR/$vg/$lv1" # FIXME: enable once lvconvert rejects early ## Try converting 4-way raid1 to 9-way #not lvconvert --yes -m 8 $vg/$lv1 #check lv_field $vg/$lv1 segtype "raid1" #check lv_field $vg/$lv1 stripes 4 # Convert 5-way raid1 to 2-way lvconvert --yes -m 1 $vg/$lv1 detect_error_leak_ lvs $vg/$lv1 dmsetup status $vg-$lv1 dmsetup table $vg-$lv1 check lv_field $vg/$lv1 segtype "raid1" check lv_field $vg/$lv1 stripes 2 fsck -fn "$DM_DEV_DIR/$vg/$lv1" # Convert 2-way raid1 to mirror lvconvert --yes --type mirror -R 32K $vg/$lv1 detect_error_leak_ check lv_field $vg/$lv1 segtype "mirror" check lv_field $vg/$lv1 stripes 2 check lv_field $vg/$lv1 regionsize "32.00k" aux wait_for_sync $vg $lv1 fsck -fn "$DM_DEV_DIR/$vg/$lv1" aux wait_for_sync $vg $lv1 # Clean up lvremove --yes $vg/$lv1 if [ $correct_raid4_layout -eq 1 ] then # # Start out with raid4 # # Create 3-way striped raid4 (4 legs total) _lvcreate raid4 3 4 8M $vg $lv1 aux wait_for_sync $vg $lv1 # Convert raid4 -> striped not _lvconvert striped striped 3 3 $vg $lv1 512k _lvconvert striped striped 3 3 $vg $lv1 # Convert striped -> raid4 _lvconvert raid4 raid4 3 4 $vg $lv1 64k check lv_field $vg/$lv1 regionsize "64.00k" # Convert raid4 -> raid5_n _lvconvert raid5 raid5_n 3 4 $vg $lv1 128k check lv_field $vg/$lv1 regionsize "128.00k" # Convert raid5_n -> striped _lvconvert striped striped 3 3 $vg $lv1 # Convert striped -> raid5_n _lvconvert raid5_n raid5_n 3 4 $vg $lv1 # Convert raid5_n -> raid4 _lvconvert raid4 raid4 3 4 $vg $lv1 # Convert raid4 -> raid0 _lvconvert raid0 raid0 3 3 $vg $lv1 # Convert raid0 -> raid5_n _lvconvert raid5_n raid5_n 3 4 $vg $lv1 # Convert raid5_n -> raid0_meta _lvconvert raid0_meta raid0_meta 3 3 $vg $lv1 # Convert raid0_meta -> raid5_n _lvconvert raid5 raid5_n 3 4 $vg $lv1 # Convert raid4 -> raid0_meta not _lvconvert raid0_meta raid0_meta 3 3 $vg $lv1 256k _lvconvert raid0_meta raid0_meta 3 3 $vg $lv1 # Convert raid0_meta -> raid4 _lvconvert raid4 raid4 3 4 $vg $lv1 # Convert raid4 -> raid0 _lvconvert raid0 raid0 3 3 $vg $lv1 # Convert raid0 -> raid4 _lvconvert raid4 raid4 3 4 $vg $lv1 # Convert raid4 -> striped _lvconvert striped striped 3 3 $vg $lv1 # Convert striped -> raid6_n_6 _lvconvert raid6_n_6 raid6_n_6 3 5 $vg $lv1 # Convert raid6_n_6 -> striped _lvconvert striped striped 3 3 $vg $lv1 # Convert striped -> raid6_n_6 _lvconvert raid6 raid6_n_6 3 5 $vg $lv1 # Convert raid6_n_6 -> raid5_n _lvconvert raid5_n raid5_n 3 4 $vg $lv1 # Convert raid5_n -> raid6_n_6 _lvconvert raid6_n_6 raid6_n_6 3 5 $vg $lv1 # Convert raid6_n_6 -> raid4 _lvconvert raid4 raid4 3 4 $vg $lv1 # Convert raid4 -> raid6_n_6 _lvconvert raid6 raid6_n_6 3 5 $vg $lv1 # Convert raid6_n_6 -> raid0 _lvconvert raid0 raid0 3 3 $vg $lv1 # Convert raid0 -> raid6_n_6 _lvconvert raid6_n_6 raid6_n_6 3 5 $vg $lv1 # Convert raid6_n_6 -> raid0_meta _lvconvert raid0_meta raid0_meta 3 3 $vg $lv1 # Convert raid0_meta -> raid6_n_6 _lvconvert raid6 raid6_n_6 3 5 $vg $lv1 # Convert raid6_n_6 -> striped not _lvconvert striped striped 3 3 $vg $lv1 128k _lvconvert striped striped 3 3 $vg $lv1 # Convert striped -> raid10 _lvconvert raid10 raid10 3 6 $vg $lv1 # Convert raid10 -> raid0 not _lvconvert raid0 raid0 3 3 $vg $lv1 64k _lvconvert raid0 raid0 3 3 $vg $lv1 # Convert raid0 -> raid10 _lvconvert raid10 raid10 3 6 $vg $lv1 # Convert raid10 -> raid0_meta _lvconvert raid0_meta raid0_meta 3 3 $vg $lv1 # Convert raid0_meta -> raid5 _lvconvert raid5_n raid5_n 3 4 $vg $lv1 # Convert raid5_n -> raid0_meta _lvconvert raid0_meta raid0_meta 3 3 $vg $lv1 # Convert raid0_meta -> raid10 _lvconvert raid10 raid10 3 6 $vg $lv1 # Convert raid10 -> striped not _lvconvert striped striped 3 3 $vg $lv1 256k _lvconvert striped striped 3 3 $vg $lv1 # Clean up lvremove -y $vg # Create + convert 4-way raid5 variations _lvcreate raid5 4 5 8M $vg $lv1 aux wait_for_sync $vg $lv1 _invalid_raid5_conversions $vg $lv1 not _lvconvert raid6_rs_6 raid6_rs_6 4 6 $vg $lv1 not _lvconvert raid6_la_6 raid6_la_6 4 6 $vg $lv1 not _lvconvert raid6_ra_6 raid6_ra_6 4 6 $vg $lv1 _lvconvert raid6_ls_6 raid6_ls_6 4 6 $vg $lv1 _lvconvert raid5_ls raid5_ls 4 5 $vg $lv1 lvremove -y $vg _lvcreate raid5_ls 4 5 8M $vg $lv1 aux wait_for_sync $vg $lv1 _invalid_raid5_conversions $vg $lv1 not _lvconvert raid6_rs_6 raid6_rs_6 4 6 $vg $lv1 not _lvconvert raid6_la_6 raid6_la_6 4 6 $vg $lv1 not _lvconvert raid6_ra_6 raid6_ra_6 4 6 $vg $lv1 _lvconvert raid6_ls_6 raid6_ls_6 4 6 $vg $lv1 _lvconvert raid5_ls raid5_ls 4 5 $vg $lv1 lvremove -y $vg _lvcreate raid5_rs 4 5 8M $vg $lv1 aux wait_for_sync $vg $lv1 _invalid_raid5_conversions $vg $lv1 not _lvconvert raid6_ra_6 raid6_ra_6 4 6 $vg $lv1 not _lvconvert raid6_la_6 raid6_la_6 4 6 $vg $lv1 not _lvconvert raid6_ra_6 raid6_ra_6 4 6 $vg $lv1 _lvconvert raid6_rs_6 raid6_rs_6 4 6 $vg $lv1 _lvconvert raid5_rs raid5_rs 4 5 $vg $lv1 lvremove -y $vg _lvcreate raid5_la 4 5 8M $vg $lv1 aux wait_for_sync $vg $lv1 _invalid_raid5_conversions $vg $lv1 not _lvconvert raid6_ls_6 raid6_ls_6 4 6 $vg $lv1 not _lvconvert raid6_rs_6 raid6_rs_6 4 6 $vg $lv1 not _lvconvert raid6_ra_6 raid6_ra_6 4 6 $vg $lv1 _lvconvert raid6_la_6 raid6_la_6 4 6 $vg $lv1 _lvconvert raid5_la raid5_la 4 5 $vg $lv1 lvremove -y $vg _lvcreate raid5_ra 4 5 8M $vg $lv1 aux wait_for_sync $vg $lv1 _invalid_raid5_conversions $vg $lv1 not _lvconvert raid6_ls_6 raid6_ls_6 4 6 $vg $lv1 not _lvconvert raid6_rs_6 raid6_rs_6 4 6 $vg $lv1 not _lvconvert raid6_la_6 raid6_la_6 4 6 $vg $lv1 _lvconvert raid6_ra_6 raid6_ra_6 4 6 $vg $lv1 _lvconvert raid5_ra raid5_ra 4 5 $vg $lv1 lvremove -y $vg else not lvcreate -y -aey --type raid4 -i 3 -L8M -n $lv4 $vg not lvconvert -y --ty raid4 $vg/$lv1 not lvconvert -y --ty raid4 $vg/$lv2 fi vgremove -ff $vg LVM2.2.02.176/test/shell/fsadm-renamed.sh0000644000000000000120000000477513176752421016466 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2017 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA test_description='Exercise fsadm operation on renamed device' SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_vg 1 80 vg_lv=$vg/$lv1 vg_lv_ren=${vg_lv}_renamed dev_vg_lv="$DM_DEV_DIR/$vg_lv" dev_vg_lv_ren="$DM_DEV_DIR/$vg_lv_ren" mount_dir="mnt" mount_space_dir="mnt space dir" mount_dolar_dir="mnt \$SPACE dir" # for recursive call LVM_BINARY=$(which lvm) export LVM_BINARY test ! -d "$mount_dir" && mkdir "$mount_dir" test ! -d "$mount_space_dir" && mkdir "$mount_space_dir" test ! -d "$mount_dolar_dir" && mkdir "$mount_dolar_dir" cleanup_mounted_and_teardown() { umount "$mount_dir" || true umount "$mount_space_dir" || true umount "$mount_dolar_dir" || true aux teardown } # Test for block sizes != 1024 (rhbz #480022) trap 'cleanup_mounted_and_teardown' EXIT # Iterate over supported filesystems for i in mkfs.ext3 mkfs.xfs mkfs.reiserfs do if not which "$i" ; then echo "Skipping tests for missing $i" continue fi lvcreate -n $lv1 -L20M $vg case "$i" in *ext3) MKFS_ARGS="-b1024 -j" ;; *xfs) MKFS_ARGS="-l internal,size=1000b -f" ;; *reiserfs) MKFS_ARGS="-s 513 -f" ;; esac echo "$i" "$i" $MKFS_ARGS "$dev_vg_lv" # Adding couple udev wait ops as some older systemd # might get confused and was 'randomly/racy' umounting # devices just mounted. # # See for explanation: # https://github.com/systemd/systemd/commit/628c89cc68ab96fce2de7ebba5933725d147aecc # https://github.com/systemd/systemd/pull/2017 aux udev_wait mount "$dev_vg_lv" "$mount_dir" aux udev_wait lvrename $vg_lv $vg_lv_ren mount | tee out grep $vg out # fails on renamed LV fail lvresize -L+10M -r $vg_lv_ren # fails on unknown mountpoint (FIXME: umount) not umount "$dev_vg_lv" lvcreate -L20 -n $lv1 $vg "$i" $MKFS_ARGS "$dev_vg_lv" aux udev_wait mount "$dev_vg_lv" "$mount_dolar_dir" mount | tee out grep $vg out not lvresize -L+10M -r $vg_lv_ren umount "$mount_dir" # FIXME: lvresize CANNOT handle/propagage '--yes' to fsadm echo y | lvresize -L+10M -r $vg_lv aux udev_wait umount "$mount_dolar_dir" lvremove -ff $vg done vgremove -ff $vg LVM2.2.02.176/test/shell/lvcreate-repair.sh0000644000000000000120000000511613176752421017036 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2011-2012 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_vg 3 # fail multiple devices for i in "$dev1" "$dev2" "$dev3" ; do for j in "$dev2" "$dev3" ; do if test "$i" = "$j" ; then continue ; fi vgremove -ff $vg vgcreate $vg "$dev1" "$dev2" "$dev3" # exit 1 lvcreate -l1 -n $lv1 $vg "$dev1" aux disable_dev "$i" "$j" vgreduce --removemissing --force $vg # check if reduced device was removed test "$i" = "$dev1" && dm_table | not grep -E "$vg-$lv1: *[^ ]+" lvcreate -l1 -n $lv2 $vg test "$i" != "$dev1" && check lv_exists $vg $lv1 check lv_exists $vg $lv2 aux enable_dev "$i" "$j" vgscan test "$i" != "$dev1" && check lv_exists $vg $lv1 check lv_exists $vg $lv2 done done vgremove -ff $vg vgcreate $vg "$dev1" "$dev2" "$dev3" # use tricky 'dd' for i in "$dev1" "$dev2" "$dev3" ; do for j in "$dev2" "$dev3" ; do if test "$i" = "$j" ; then continue ; fi dd if="$i" of=backup_i bs=256K count=1 dd if="$j" of=backup_j bs=256K count=1 lvcreate -l1 -n $lv1 $vg "$dev1" dd if=backup_j of="$j" bs=256K count=1 dd if=backup_i of="$i" bs=256K count=1 check lv_exists $vg $lv1 # mda should be now consistent lvremove -f $vg/$lv1 done done # confuse lvm with active LV left behind dd if="$dev1" of=backup_i bs=256K count=1 dd if="$dev2" of=backup_j bs=256K count=1 lvcreate -l1 $vg "$dev1" dd if=backup_j of="$dev2" bs=256K count=1 dd if=backup_i of="$dev1" bs=256K count=1 # CHECKME: following command writes here: # vgreduce --removemissing --force $vg # # WARNING: Inconsistent metadata found for VG LVMTESTvg - updating to use version 2 # Volume group "LVMTESTvg" is already consistent # dirty game dd if=/dev/zero of="$dev3" bs=256K count=1 aux notify_lvmetad "$dev3" # udev be watching you vgreduce --removemissing --force $vg # FIXME: here is LV1 left active - but metadata does not know about it # and lvcreate does not check whether such device exists in the table # so it ends with: # # device-mapper: create ioctl failed: Device or resource busy # Failed to activate new LV. should lvcreate -l1 $vg "$dev1" should not dmsetup remove ${vg}-lvol0 vgremove -ff $vg LVM2.2.02.176/test/shell/thin-volume-list.sh0000644000000000000120000000260613176752421017172 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # test pool behaviour when volume_list masks activation SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 export LVM_TEST_THIN_REPAIR_CMD=${LVM_TEST_THIN_REPAIR_CMD-/bin/false} . lib/inittest # # Main # aux have_thin 1 0 0 || skip aux prepare_vg 2 lvcreate -T -L8M $vg/pool -V10M -n $lv1 # skip $vg from activation aux lvmconf "activation/volume_list = [ \"$vg1\" ]" # We still could pass - since pool is still active lvcreate -V10 -n $lv2 -T $vg/pool # but $lv2 is not active check inactive $vg $lv2 vgchange -an $vg # Pool is not active - so it cannot create thin volume not lvcreate -V10 -T $vg/pool # Cannot create even new pool # check there are not left devices (RHBZ #1140128) not lvcreate -L10 -T $vg/new_pool check lv_not_exists $vg/new_pool aux lvmconf "activation/volume_list = [ \"$vg\" ]" lvcreate -V10 -T $vg/pool lvs -o +transaction_id,thin_id $vg lvremove -ff $vg check vg_field $vg lv_count "0" vgremove -ff $vg LVM2.2.02.176/test/shell/discards-thin.sh0000644000000000000120000000556513176752421016515 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2012-2013 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # test support of thin discards # SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 export LVM_TEST_THIN_REPAIR_CMD=${LVM_TEST_THIN_REPAIR_CMD-/bin/false} . lib/inittest # # Main # aux have_thin 1 1 0 || skip aux prepare_vg 2 64 get_devs aux extend_filter_LVMTEST # Create named pool only lvcreate -l1 --discards ignore -T $vg/pool check lv_field $vg/pool discards "ignore" check lv_field $vg/pool kernel_discards "ignore" lvcreate -l1 --discards nopassdown -T $vg/pool1 check lv_field $vg/pool1 discards "nopassdown" check lv_field $vg/pool1 kernel_discards "nopassdown" lvcreate -l1 --discards passdown -T $vg/pool2 check lv_field $vg/pool2 discards "passdown" check lv_field $vg/pool2 discards "passdown" lvchange --discards nopassdown $vg/pool2 lvcreate -V1M -n origin -T $vg/pool lvcreate -s $vg/origin -n snap # Cannot convert active nopassdown -> ignore not lvchange --discards nopassdown $vg/pool # Cannot convert active ignore -> passdown not lvchange --discards passdown $vg/pool # Cannot convert active nopassdown -> ignore not lvchange --discards ignore $vg/pool1 # Deactivate pool only lvchange -an $vg/pool $vg/pool1 # Cannot convert, since thin volumes are still active not lvchange --discards passdown $vg/pool # Deactive thin volumes lvchange -an $vg/origin $vg/snap lvchange --discards passdown $vg/pool check lv_field $vg/pool discards "passdown" lvchange --discards ignore $vg/pool1 check lv_field $vg/pool1 discards "ignore" vgremove -ff $vg # Create thin pool with discards set to "ignore". # If we create a thin volume which we use for a PV # which we use to create another thin pool on top # with discards set to "passdown", the discards value # in metadata is still "passdown", but because the # device below does not support it, the kernel value # of discards actually used will be "nopassdown". # This is why we have "-o discards" and "-o kernel_discards". vgcreate -s 1m "${vg}_1" "${DEVICES[@]}" lvcreate -l 10 -T ${vg}_1/pool --discards ignore lvcreate -V 9m -T ${vg}_1/pool -n device_with_ignored_discards vgcreate -s 1m ${vg}_2 "$DM_DEV_DIR/${vg}_1/device_with_ignored_discards" lvcreate -l 1 -T ${vg}_2/pool --discards passdown lvcreate -V 1 -T ${vg}_2/pool check lv_field ${vg}_1/pool discards "ignore" check lv_field ${vg}_1/pool kernel_discards "ignore" check lv_field ${vg}_2/pool discards "passdown" check lv_field ${vg}_2/pool kernel_discards "nopassdown" vgremove -ff ${vg}_2 vgremove -ff ${vg}_1 LVM2.2.02.176/test/shell/pvmove-resume-2.sh0000644000000000000120000001301313176752421016715 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2015 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Check whether all available pvmove resume methods works as expected. # lvchange is able to resume pvmoves in progress. # Moving 2 LVs in VG variant SKIP_WITH_LVMLOCKD=1 SKIP_WITH_CLVMD=1 . lib/inittest aux prepare_pvs 2 30 vgcreate -s 128k $vg "$dev1" pvcreate --metadatacopies 0 "$dev2" vgextend $vg "$dev2" test_pvmove_resume() { # 2 LVs on same device lvcreate -an -Zn -l15 -n $lv1 $vg "$dev1" lvcreate -an -Zn -l15 -n $lv2 $vg "$dev1" aux delay_dev "$dev2" 0 1000 "$(get first_extent_sector "$dev2"):" pvmove -i5 "$dev1" & PVMOVE=$! aux wait_pvmove_lv_ready "$vg-pvmove0" 300 kill -9 $PVMOVE if test -e LOCAL_LVMPOLLD ; then aux prepare_lvmpolld fi wait local finished for i in {1..100}; do finished=1 for d in "$vg-$lv1" "$vg-$lv2" "$vg-pvmove0" ; do dmsetup status "$d" 2>/dev/null && { dmsetup remove "$d" || finished=0 } done test "$finished" -eq 0 || break done test "$finished" -eq 0 && die "Can't remove device" check lv_attr_bit type $vg/pvmove0 "p" if test -e LOCAL_CLVMD ; then # giveup all clvmd locks (faster then restarting clvmd) # no deactivation happen, nodes are already removed #vgchange -an $vg # FIXME: However above solution has one big problem # as clvmd starts to abort on internal errors on various # errors, based on the fact pvmove is killed -9 # Restart clvmd kill "$(< LOCAL_CLVMD)" for i in {1 100} ; do test $i -eq 100 && die "Shutdown of clvmd is too slow." test -e "$CLVMD_PIDFILE" || break sleep .1 done # wait for the pid removal aux prepare_clvmd fi aux notify_lvmetad "$dev1" "$dev2" # call resume function (see below) # with expected number of spawned # bg polling as parameter $1 1 aux enable_dev "$dev2" i=0 while get lv_field $vg name -a | grep -E "^\[?pvmove"; do # wait for 30 secs at max test $i -ge 300 && die "Pvmove is too slow or does not progress." sleep .1 i=$((i + 1)) done aux kill_tagged_processes lvremove -ff $vg } lvchange_single() { LVM_TEST_TAG="kill_me_$PREFIX" lvchange -aey $vg/$lv1 LVM_TEST_TAG="kill_me_$PREFIX" lvchange -aey $vg/$lv2 } lvchange_all() { LVM_TEST_TAG="kill_me_$PREFIX" lvchange -aey $vg/$lv1 $vg/$lv2 # we don't want to spawn more than $1 background pollings if test -e LOCAL_LVMPOLLD; then aux lvmpolld_dump | tee lvmpolld_dump.txt aux check_lvmpolld_init_rq_count 1 "$vg/pvmove0" || should false elif test -e HAVE_DM_DELAY; then test "$(aux count_processes_with_tag)" -eq "$1" || should false fi } vgchange_single() { LVM_TEST_TAG="kill_me_$PREFIX" vgchange -aey $vg if test -e LOCAL_LVMPOLLD; then aux lvmpolld_dump | tee lvmpolld_dump.txt aux check_lvmpolld_init_rq_count 1 "$vg/pvmove0" elif test -e HAVE_DM_DELAY; then test "$(aux count_processes_with_tag)" -eq "$1" fi } pvmove_fg() { # pvmove resume requires LVs active... LVM_TEST_TAG="kill_me_$PREFIX" vgchange --config 'activation{polling_interval=10}' -aey --poll n $vg # ...also vgchange --poll n must not spawn any bg processes if test -e LOCAL_LVMPOLLD; then aux lvmpolld_dump | tee lvmpolld_dump.txt aux check_lvmpolld_init_rq_count 0 "$vg/pvmove0" else test "$(aux count_processes_with_tag)" -eq 0 fi # ...thus finish polling get lv_field $vg name -a | grep -E "^\[?pvmove0" aux enable_dev "$dev2" pvmove } pvmove_bg() { # pvmove resume requires LVs active... LVM_TEST_TAG="kill_me_$PREFIX" vgchange --config 'activation{polling_interval=10}' -aey --poll n $vg # ...also vgchange --poll n must not spawn any bg processes if test -e LOCAL_LVMPOLLD; then aux lvmpolld_dump | tee lvmpolld_dump.txt aux check_lvmpolld_init_rq_count 0 "$vg/pvmove0" else test "$(aux count_processes_with_tag)" -eq 0 fi # ...thus finish polling get lv_field $vg name -a | grep -E "^\[?pvmove0" LVM_TEST_TAG="kill_me_$PREFIX" pvmove -b } pvmove_fg_single() { # pvmove resume requires LVs active... LVM_TEST_TAG="kill_me_$PREFIX" vgchange --config 'activation{polling_interval=10}' -aey --poll n $vg # ...also vgchange --poll n must not spawn any bg processes if test -e LOCAL_LVMPOLLD; then aux lvmpolld_dump | tee lvmpolld_dump.txt aux check_lvmpolld_init_rq_count 0 "$vg/pvmove0" else test "$(aux count_processes_with_tag)" -eq 0 fi # ...thus finish polling get lv_field $vg name -a | grep -E "^\[?pvmove0" aux enable_dev "$dev2" pvmove "$dev1" } pvmove_bg_single() { # pvmove resume requires LVs active... LVM_TEST_TAG="kill_me_$PREFIX" vgchange --config 'activation{polling_interval=10}' -aey --poll n $vg # ...also vgchange --poll n must not spawn any bg processes... if test -e LOCAL_LVMPOLLD; then aux lvmpolld_dump | tee lvmpolld_dump.txt aux check_lvmpolld_init_rq_count 0 "$vg/pvmove0" else test "$(aux count_processes_with_tag)" -eq 0 fi # ...thus finish polling get lv_field $vg name -a | grep -E "^\[?pvmove0" LVM_TEST_TAG="kill_me_$PREFIX" pvmove -b "$dev1" } test_pvmove_resume lvchange_single test_pvmove_resume lvchange_all test_pvmove_resume vgchange_single test_pvmove_resume pvmove_fg test_pvmove_resume pvmove_fg_single test_pvmove_resume pvmove_bg test_pvmove_resume pvmove_bg_single vgremove -ff $vg LVM2.2.02.176/test/shell/lvcreate-thin-cache.sh0000644000000000000120000000210613176752421017553 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2017 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Exercise caching thin-pool's data LV SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 export LVM_TEST_THIN_REPAIR_CMD=${LVM_TEST_THIN_REPAIR_CMD-/bin/false} . lib/inittest # # Main # aux have_thin 1 0 0 || skip aux have_cache 1 3 0 || skip which mkfs.ext4 || skip aux prepare_pvs 2 64 get_devs vgcreate -s 64K "$vg" "${DEVICES[@]}" lvcreate -L10M -V10M -T $vg/pool --name $lv1 lvcreate -H -L10 $vg/pool mkfs.ext4 "$DM_DEV_DIR/$vg/$lv1" lvconvert --uncache $vg/pool fsck -n "$DM_DEV_DIR/$vg/$lv1" lvcreate -H -L10 $vg/pool_tdata fsck -n "$DM_DEV_DIR/$vg/$lv1" lvconvert --uncache $vg/pool_tdata vgremove -ff $vg LVM2.2.02.176/test/shell/pv-ext-update.sh0000644000000000000120000002435213176752421016457 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # lvmetad does not handle pool labels so skip test. SKIP_WITH_LVMPOLLD=1 . lib/inittest env printf "" || skip # skip if printf is not available # create the PV with PV ext vsn 1 and a vg create_pv_with_ext_vsn1_and_vg() { VG_NAME="LVMTEST12345pvextupdatevg" LV_NAME="lvol0" # FIXME # echo -e is bashism, dash builtin sh doesn't do \xNN in printf either # printf comes from coreutils, and is probably not posix either # PV header with PV extension version 1 env printf \ "\x4c\x41\x42\x45\x4c\x4f\x4e\x45\x01\x00\x00\x00\x00\x00\x00\x00"\ "\x78\x1c\x12\x43\x20\x00\x00\x00\x4c\x56\x4d\x32\x20\x30\x30\x31"\ "\x64\x35\x56\x33\x38\x5a\x57\x49\x63\x7a\x64\x63\x34\x38\x37\x67"\ "\x4d\x79\x46\x4b\x6c\x6d\x68\x39\x4e\x73\x34\x6f\x78\x61\x6b\x51"\ "\x00\x00\x80\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00"\ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"\ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00"\ "\x00\xf0\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"\ "\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00"\ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" | dd of="$1" bs=512 seek=1 conv=notrunc # MDA header env printf \ "\xd8\x36\x2c\xf6\x20\x4c\x56\x4d\x32\x20\x78\x5b\x35\x41\x25\x72"\ "\x30\x4e\x2a\x3e\x01\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00"\ "\x00\xf0\x0f\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00"\ "\x06\x04\x00\x00\x00\x00\x00\x00\x07\x3d\x06\x28\x00\x00\x00\x00"\ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" | dd of="$1" bs=4096 seek=1 conv=notrunc # VG metadata env printf \ "\x4c\x56\x4d\x54\x45\x53\x54\x31\x32\x33\x34\x35\x70\x76\x65\x78"\ "\x74\x75\x70\x64\x61\x74\x65\x76\x67\x20\x7b\x0a\x69\x64\x20\x3d"\ "\x20\x22\x42\x72\x47\x6f\x34\x33\x2d\x36\x35\x62\x48\x2d\x41\x55"\ "\x6d\x43\x2d\x77\x56\x74\x71\x2d\x51\x53\x63\x66\x2d\x6b\x5a\x51"\ "\x45\x2d\x58\x51\x6e\x79\x31\x44\x22\x0a\x73\x65\x71\x6e\x6f\x20"\ "\x3d\x20\x31\x0a\x66\x6f\x72\x6d\x61\x74\x20\x3d\x20\x22\x6c\x76"\ "\x6d\x32\x22\x0a\x73\x74\x61\x74\x75\x73\x20\x3d\x20\x5b\x22\x52"\ "\x45\x53\x49\x5a\x45\x41\x42\x4c\x45\x22\x2c\x20\x22\x52\x45\x41"\ "\x44\x22\x2c\x20\x22\x57\x52\x49\x54\x45\x22\x5d\x0a\x66\x6c\x61"\ "\x67\x73\x20\x3d\x20\x5b\x5d\x0a\x65\x78\x74\x65\x6e\x74\x5f\x73"\ "\x69\x7a\x65\x20\x3d\x20\x38\x31\x39\x32\x0a\x6d\x61\x78\x5f\x6c"\ "\x76\x20\x3d\x20\x30\x0a\x6d\x61\x78\x5f\x70\x76\x20\x3d\x20\x30"\ "\x0a\x6d\x65\x74\x61\x64\x61\x74\x61\x5f\x63\x6f\x70\x69\x65\x73"\ "\x20\x3d\x20\x30\x0a\x0a\x70\x68\x79\x73\x69\x63\x61\x6c\x5f\x76"\ "\x6f\x6c\x75\x6d\x65\x73\x20\x7b\x0a\x0a\x70\x76\x30\x20\x7b\x0a"\ "\x69\x64\x20\x3d\x20\x22\x64\x35\x56\x33\x38\x5a\x2d\x57\x49\x63"\ "\x7a\x2d\x64\x63\x34\x38\x2d\x37\x67\x4d\x79\x2d\x46\x4b\x6c\x6d"\ "\x2d\x68\x39\x4e\x73\x2d\x34\x6f\x78\x61\x6b\x51\x22\x0a\x64\x65"\ "\x76\x69\x63\x65\x20\x3d\x20\x22\x2f\x64\x65\x76\x2f\x6c\x6f\x6f"\ "\x70\x30\x22\x0a\x0a\x73\x74\x61\x74\x75\x73\x20\x3d\x20\x5b\x22"\ "\x41\x4c\x4c\x4f\x43\x41\x54\x41\x42\x4c\x45\x22\x5d\x0a\x66\x6c"\ "\x61\x67\x73\x20\x3d\x20\x5b\x5d\x0a\x64\x65\x76\x5f\x73\x69\x7a"\ "\x65\x20\x3d\x20\x31\x36\x33\x38\x34\x0a\x70\x65\x5f\x73\x74\x61"\ "\x72\x74\x20\x3d\x20\x32\x30\x34\x38\x0a\x70\x65\x5f\x63\x6f\x75"\ "\x6e\x74\x20\x3d\x20\x31\x0a\x7d\x0a\x7d\x0a\x0a\x7d\x0a\x23\x20"\ "\x47\x65\x6e\x65\x72\x61\x74\x65\x64\x20\x62\x79\x20\x4c\x56\x4d"\ "\x32\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x32\x2e\x30\x32\x2e\x31"\ "\x34\x32\x28\x32\x29\x2d\x67\x69\x74\x20\x28\x32\x30\x31\x36\x2d"\ "\x30\x32\x2d\x31\x35\x29\x3a\x20\x57\x65\x64\x20\x4a\x75\x6c\x20"\ "\x32\x37\x20\x31\x31\x3a\x32\x35\x3a\x30\x37\x20\x32\x30\x31\x36"\ "\x0a\x0a\x63\x6f\x6e\x74\x65\x6e\x74\x73\x20\x3d\x20\x22\x54\x65"\ "\x78\x74\x20\x46\x6f\x72\x6d\x61\x74\x20\x56\x6f\x6c\x75\x6d\x65"\ "\x20\x47\x72\x6f\x75\x70\x22\x0a\x76\x65\x72\x73\x69\x6f\x6e\x20"\ "\x3d\x20\x31\x0a\x0a\x64\x65\x73\x63\x72\x69\x70\x74\x69\x6f\x6e"\ "\x20\x3d\x20\x22\x22\x0a\x0a\x63\x72\x65\x61\x74\x69\x6f\x6e\x5f"\ "\x68\x6f\x73\x74\x20\x3d\x20\x22\x66\x65\x64\x6f\x72\x61\x2e\x76"\ "\x69\x72\x74\x22\x09\x23\x20\x4c\x69\x6e\x75\x78\x20\x66\x65\x64"\ "\x6f\x72\x61\x2e\x76\x69\x72\x74\x20\x34\x2e\x36\x2e\x34\x2d\x33"\ "\x30\x31\x2e\x66\x63\x32\x34\x2e\x78\x38\x36\x5f\x36\x34\x20\x23"\ "\x31\x20\x53\x4d\x50\x20\x54\x75\x65\x20\x4a\x75\x6c\x20\x31\x32"\ "\x20\x31\x31\x3a\x35\x30\x3a\x30\x30\x20\x55\x54\x43\x20\x32\x30"\ "\x31\x36\x20\x78\x38\x36\x5f\x36\x34\x0a\x63\x72\x65\x61\x74\x69"\ "\x6f\x6e\x5f\x74\x69\x6d\x65\x20\x3d\x20\x31\x34\x36\x39\x36\x31"\ "\x31\x35\x30\x37\x09\x23\x20\x57\x65\x64\x20\x4a\x75\x6c\x20\x32"\ "\x37\x20\x31\x31\x3a\x32\x35\x3a\x30\x37\x20\x32\x30\x31\x36\x0a"\ "\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"\ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" | dd of="$1" bs=4608 seek=1 conv=notrunc env printf \ "\x4c\x56\x4d\x54\x45\x53\x54\x31\x32\x33\x34\x35\x70\x76\x65\x78"\ "\x74\x75\x70\x64\x61\x74\x65\x76\x67\x20\x7b\x0a\x69\x64\x20\x3d"\ "\x20\x22\x42\x72\x47\x6f\x34\x33\x2d\x36\x35\x62\x48\x2d\x41\x55"\ "\x6d\x43\x2d\x77\x56\x74\x71\x2d\x51\x53\x63\x66\x2d\x6b\x5a\x51"\ "\x45\x2d\x58\x51\x6e\x79\x31\x44\x22\x0a\x73\x65\x71\x6e\x6f\x20"\ "\x3d\x20\x32\x0a\x66\x6f\x72\x6d\x61\x74\x20\x3d\x20\x22\x6c\x76"\ "\x6d\x32\x22\x0a\x73\x74\x61\x74\x75\x73\x20\x3d\x20\x5b\x22\x52"\ "\x45\x53\x49\x5a\x45\x41\x42\x4c\x45\x22\x2c\x20\x22\x52\x45\x41"\ "\x44\x22\x2c\x20\x22\x57\x52\x49\x54\x45\x22\x5d\x0a\x66\x6c\x61"\ "\x67\x73\x20\x3d\x20\x5b\x5d\x0a\x65\x78\x74\x65\x6e\x74\x5f\x73"\ "\x69\x7a\x65\x20\x3d\x20\x38\x31\x39\x32\x0a\x6d\x61\x78\x5f\x6c"\ "\x76\x20\x3d\x20\x30\x0a\x6d\x61\x78\x5f\x70\x76\x20\x3d\x20\x30"\ "\x0a\x6d\x65\x74\x61\x64\x61\x74\x61\x5f\x63\x6f\x70\x69\x65\x73"\ "\x20\x3d\x20\x30\x0a\x0a\x70\x68\x79\x73\x69\x63\x61\x6c\x5f\x76"\ "\x6f\x6c\x75\x6d\x65\x73\x20\x7b\x0a\x0a\x70\x76\x30\x20\x7b\x0a"\ "\x69\x64\x20\x3d\x20\x22\x64\x35\x56\x33\x38\x5a\x2d\x57\x49\x63"\ "\x7a\x2d\x64\x63\x34\x38\x2d\x37\x67\x4d\x79\x2d\x46\x4b\x6c\x6d"\ "\x2d\x68\x39\x4e\x73\x2d\x34\x6f\x78\x61\x6b\x51\x22\x0a\x64\x65"\ "\x76\x69\x63\x65\x20\x3d\x20\x22\x2f\x64\x65\x76\x2f\x6c\x6f\x6f"\ "\x70\x30\x22\x0a\x0a\x73\x74\x61\x74\x75\x73\x20\x3d\x20\x5b\x22"\ "\x41\x4c\x4c\x4f\x43\x41\x54\x41\x42\x4c\x45\x22\x5d\x0a\x66\x6c"\ "\x61\x67\x73\x20\x3d\x20\x5b\x5d\x0a\x64\x65\x76\x5f\x73\x69\x7a"\ "\x65\x20\x3d\x20\x31\x36\x33\x38\x34\x0a\x70\x65\x5f\x73\x74\x61"\ "\x72\x74\x20\x3d\x20\x32\x30\x34\x38\x0a\x70\x65\x5f\x63\x6f\x75"\ "\x6e\x74\x20\x3d\x20\x31\x0a\x7d\x0a\x7d\x0a\x0a\x6c\x6f\x67\x69"\ "\x63\x61\x6c\x5f\x76\x6f\x6c\x75\x6d\x65\x73\x20\x7b\x0a\x0a\x6c"\ "\x76\x6f\x6c\x30\x20\x7b\x0a\x69\x64\x20\x3d\x20\x22\x46\x73\x36"\ "\x6c\x6a\x6b\x2d\x4a\x65\x5a\x35\x2d\x55\x4e\x75\x37\x2d\x32\x41"\ "\x33\x50\x2d\x76\x30\x41\x43\x2d\x64\x63\x64\x36\x2d\x32\x33\x38"\ "\x39\x4d\x76\x22\x0a\x73\x74\x61\x74\x75\x73\x20\x3d\x20\x5b\x22"\ "\x52\x45\x41\x44\x22\x2c\x20\x22\x57\x52\x49\x54\x45\x22\x2c\x20"\ "\x22\x56\x49\x53\x49\x42\x4c\x45\x22\x5d\x0a\x66\x6c\x61\x67\x73"\ "\x20\x3d\x20\x5b\x5d\x0a\x63\x72\x65\x61\x74\x69\x6f\x6e\x5f\x68"\ "\x6f\x73\x74\x20\x3d\x20\x22\x66\x65\x64\x6f\x72\x61\x2e\x76\x69"\ "\x72\x74\x22\x0a\x63\x72\x65\x61\x74\x69\x6f\x6e\x5f\x74\x69\x6d"\ "\x65\x20\x3d\x20\x31\x34\x36\x39\x36\x31\x31\x35\x31\x30\x0a\x73"\ "\x65\x67\x6d\x65\x6e\x74\x5f\x63\x6f\x75\x6e\x74\x20\x3d\x20\x31"\ "\x0a\x0a\x73\x65\x67\x6d\x65\x6e\x74\x31\x20\x7b\x0a\x73\x74\x61"\ "\x72\x74\x5f\x65\x78\x74\x65\x6e\x74\x20\x3d\x20\x30\x0a\x65\x78"\ "\x74\x65\x6e\x74\x5f\x63\x6f\x75\x6e\x74\x20\x3d\x20\x31\x0a\x0a"\ "\x74\x79\x70\x65\x20\x3d\x20\x22\x73\x74\x72\x69\x70\x65\x64\x22"\ "\x0a\x73\x74\x72\x69\x70\x65\x5f\x63\x6f\x75\x6e\x74\x20\x3d\x20"\ "\x31\x0a\x0a\x73\x74\x72\x69\x70\x65\x73\x20\x3d\x20\x5b\x0a\x22"\ "\x70\x76\x30\x22\x2c\x20\x30\x0a\x5d\x0a\x7d\x0a\x7d\x0a\x7d\x0a"\ "\x7d\x0a\x23\x20\x47\x65\x6e\x65\x72\x61\x74\x65\x64\x20\x62\x79"\ "\x20\x4c\x56\x4d\x32\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x32\x2e"\ "\x30\x32\x2e\x31\x34\x32\x28\x32\x29\x2d\x67\x69\x74\x20\x28\x32"\ "\x30\x31\x36\x2d\x30\x32\x2d\x31\x35\x29\x3a\x20\x57\x65\x64\x20"\ "\x4a\x75\x6c\x20\x32\x37\x20\x31\x31\x3a\x32\x35\x3a\x31\x30\x20"\ "\x32\x30\x31\x36\x0a\x0a\x63\x6f\x6e\x74\x65\x6e\x74\x73\x20\x3d"\ "\x20\x22\x54\x65\x78\x74\x20\x46\x6f\x72\x6d\x61\x74\x20\x56\x6f"\ "\x6c\x75\x6d\x65\x20\x47\x72\x6f\x75\x70\x22\x0a\x76\x65\x72\x73"\ "\x69\x6f\x6e\x20\x3d\x20\x31\x0a\x0a\x64\x65\x73\x63\x72\x69\x70"\ "\x74\x69\x6f\x6e\x20\x3d\x20\x22\x22\x0a\x0a\x63\x72\x65\x61\x74"\ "\x69\x6f\x6e\x5f\x68\x6f\x73\x74\x20\x3d\x20\x22\x66\x65\x64\x6f"\ "\x72\x61\x2e\x76\x69\x72\x74\x22\x09\x23\x20\x4c\x69\x6e\x75\x78"\ "\x20\x66\x65\x64\x6f\x72\x61\x2e\x76\x69\x72\x74\x20\x34\x2e\x36"\ "\x2e\x34\x2d\x33\x30\x31\x2e\x66\x63\x32\x34\x2e\x78\x38\x36\x5f"\ "\x36\x34\x20\x23\x31\x20\x53\x4d\x50\x20\x54\x75\x65\x20\x4a\x75"\ "\x6c\x20\x31\x32\x20\x31\x31\x3a\x35\x30\x3a\x30\x30\x20\x55\x54"\ "\x43\x20\x32\x30\x31\x36\x20\x78\x38\x36\x5f\x36\x34\x0a\x63\x72"\ "\x65\x61\x74\x69\x6f\x6e\x5f\x74\x69\x6d\x65\x20\x3d\x20\x31\x34"\ "\x36\x39\x36\x31\x31\x35\x31\x30\x09\x23\x20\x57\x65\x64\x20\x4a"\ "\x75\x6c\x20\x32\x37\x20\x31\x31\x3a\x32\x35\x3a\x31\x30\x20\x32"\ "\x30\x31\x36\x0a\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"\ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" | dd of="$1" bs=5632 seek=1 conv=notrunc aux notify_lvmetad "$1" } aux prepare_devs 1 8 create_pv_with_ext_vsn1_and_vg "$dev1" # pvs doesn't update PV header because it holds only VG read lock check pv_field "$dev1" pv_ext_vsn 1 check pv_field "$dev1" pv_in_use "used" check pv_field "$dev1" vg_name "$VG_NAME" lvs "$VG_NAME"/"$LV_NAME" # an LVM command taking VG write lock will also cause PV header update to recent version vgchange --addtag test $VG_NAME check pv_field "$dev1" pv_ext_vsn 2 check pv_field "$dev1" pv_in_use "used" check pv_field "$dev1" vg_name "$VG_NAME" lvs "$VG_NAME"/"$LV_NAME" LVM2.2.02.176/test/shell/pvcreate-usage.sh0000644000000000000120000001543413176752421016670 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA test_description='Test pvcreate option values' SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 PAGESIZE=$(getconf PAGESIZE) # MDA_SIZE_MIN defined in lib/format_text/layout.h MDA_SIZE_MIN=$(( 8 * PAGESIZE )) . lib/inittest aux prepare_devs 4 #COMM 'pvcreate rejects negative setphysicalvolumesize' not pvcreate --setphysicalvolumesize -1024 "$dev1" #COMM 'pvcreate rejects negative metadatasize' not pvcreate --metadatasize -1024 "$dev1" #COMM 'pvcreate rejects metadatasize that is less than minimum size' not pvcreate --dataalignment $(( MDA_SIZE_MIN / 2 ))b --metadatasize $(( MDA_SIZE_MIN / 2 ))b "$dev1" 2>err grep "Metadata area size too small" err #COMM 'pvcreate accepts metadatasize that is at least the minimum size' pvcreate --dataalignment ${MDA_SIZE_MIN}b --metadatasize ${MDA_SIZE_MIN}b "$dev1" # x. metadatasize 0, defaults to 255 # FIXME: unable to check default value, not in reporting cmds # should default to 255 according to code # check pv_field pv_mda_size 255 #COMM 'pvcreate accepts metadatasize 0' pvcreate --metadatasize 0 "$dev1" pvremove "$dev1" #Verify vg_mda_size is smaller pv_mda_size pvcreate --metadatasize 512k "$dev1" pvcreate --metadatasize 96k "$dev2" vgcreate $vg "$dev1" "$dev2" pvs -o +pv_mda_size check compare_fields vgs $vg vg_mda_size pvs "$dev2" pv_mda_size vgremove $vg # x. metadatasize too large # For some reason we allow this, even though there's no room for data? ##COMM 'pvcreate rejects metadatasize too large' #not pvcreate --metadatasize 100000000000000 "$dev1" #COMM 'pvcreate rejects metadatacopies < 0' not pvcreate --metadatacopies -1 "$dev1" #COMM 'pvcreate accepts metadatacopies = 0, 1, 2' for j in metadatacopies pvmetadatacopies do pvcreate --$j 0 "$dev1" pvcreate --$j 1 "$dev2" pvcreate --$j 2 "$dev3" check pv_field "$dev1" pv_mda_count 0 check pv_field "$dev2" pv_mda_count 1 check pv_field "$dev3" pv_mda_count 2 pvremove "$dev1" "$dev2" "$dev3" done #COMM 'pvcreate rejects metadatacopies > 2' not pvcreate --metadatacopies 3 "$dev1" #COMM 'pvcreate rejects invalid device' not pvcreate "$dev1"bogus #COMM 'pvcreate rejects labelsector < 0' not pvcreate --labelsector -1 "$dev1" #COMM 'pvcreate rejects labelsector > 1000000000000' not pvcreate --labelsector 1000000000000 "$dev1" # other possibilites based on code inspection (not sure how hard) # x. device too small (min of 512 * 1024 KB) # x. device filtered out # x. unable to open /dev/urandom RDONLY # x. device too large (pe_count > UINT32_MAX) # x. device read-only # x. unable to open device readonly # x. BLKGETSIZE64 fails # x. set size to value inconsistent with device / PE size #COMM 'pvcreate basic dataalignment sanity checks' not pvcreate --dataalignment -1 "$dev1" not pvcreate --dataalignment 1e "$dev1" if test -n "$LVM_TEST_LVM1" ; then not pvcreate -M1 --dataalignment 1 "$dev1" fi #COMM 'pvcreate always rounded up to page size for start of device' #pvcreate --metadatacopies 0 --dataalignment 1 "$dev1" # amuse shell experts #check pv_field "$dev1" pe_start $(($(getconf PAGESIZE)/1024))".00k" #COMM 'pvcreate sets data offset directly' pvcreate --dataalignment 512k "$dev1" check pv_field "$dev1" pe_start "512.00k" #COMM 'vgcreate/vgremove do not modify data offset of existing PV' vgcreate $vg "$dev1" --config 'devices { data_alignment = 1024 }' check pv_field "$dev1" pe_start "512.00k" vgremove $vg --config 'devices { data_alignment = 1024 }' check pv_field "$dev1" pe_start "512.00k" #COMM 'pvcreate sets data offset next to mda area' pvcreate --metadatasize 100k --dataalignment 100k "$dev1" check pv_field "$dev1" pe_start "200.00k" # metadata area start is aligned according to pagesize case "$PAGESIZE" in 65536) pv_align="192.50k" ;; 8192) pv_align="136.50k" ;; *) pv_align="133.00k" ;; esac pvcreate --metadatasize 128k --dataalignment 3.5k "$dev1" check pv_field "$dev1" pe_start $pv_align pvcreate --metadatasize 128k --metadatacopies 2 --dataalignment 3.5k "$dev1" check pv_field "$dev1" pe_start $pv_align # data area is aligned to 1M by default, # data area start is shifted by the specified alignment_offset pv_align=1052160B # 1048576 + (7*512) pvcreate --metadatasize 128k --dataalignmentoffset 7s "$dev1" check pv_field "$dev1" pe_start $pv_align --units b # 2nd metadata area is created without problems when # data area start is shifted by the specified alignment_offset pvcreate --metadatasize 128k --metadatacopies 2 --dataalignmentoffset 7s "$dev1" check pv_field "$dev1" pv_mda_count 2 # FIXME: compare start of 2nd mda with and without --dataalignmentoffset #COMM 'pv with LVM1 compatible data alignment can be convereted' #compatible == LVM1_PE_ALIGN == 64k if test -n "$LVM_TEST_LVM1" ; then pvcreate --dataalignment 256k "$dev1" vgcreate -s 1m $vg "$dev1" vgconvert -M1 $vg vgconvert -M2 $vg check pv_field "$dev1" pe_start 256.00k vgremove $vg fi #COMM 'pv with LVM1 incompatible data alignment cannot be convereted' if test -n "$LVM_TEST_LVM1" ; then pvcreate --dataalignment 10k "$dev1" vgcreate -s 1m $vg "$dev1" not vgconvert -M1 $vg vgremove $vg fi #COMM 'vgcfgrestore allows pe_start=0' #basically it produces nonsense, but it tests vgcfgrestore, #not that final cfg is usable... pvcreate --metadatacopies 0 "$dev1" pvcreate "$dev2" vgcreate $vg "$dev1" "$dev2" vgcfgbackup -f backup.$$ $vg sed 's/pe_start = [0-9]*/pe_start = 0/' backup.$$ > backup.$$1 vgcfgrestore -f backup.$$1 $vg check pv_field "$dev1" pe_start "0" check pv_field "$dev2" pe_start "0" vgremove $vg echo "test pvcreate --metadataignore" for pv_in_vg in 1 0; do for mdacp in 1 2; do for ignore in y n; do echo "pvcreate --metadataignore has proper mda_count and mda_used_count" pvcreate --metadatacopies $mdacp --metadataignore $ignore "$dev1" "$dev2" check pv_field "$dev1" pv_mda_count "$mdacp" check pv_field "$dev2" pv_mda_count "$mdacp" if [ $ignore = y ]; then check pv_field "$dev1" pv_mda_used_count "0" check pv_field "$dev2" pv_mda_used_count "0" else check pv_field "$dev1" pv_mda_used_count "$mdacp" check pv_field "$dev2" pv_mda_used_count "$mdacp" fi echo "vgcreate has proper vg_mda_count and vg_mda_used_count" if [ $pv_in_vg = 1 ]; then vgcreate $vg "$dev1" "$dev2" check vg_field $vg vg_mda_count $(( mdacp * 2 )) if [ $ignore = y ]; then check vg_field $vg vg_mda_used_count "1" else check vg_field $vg vg_mda_used_count "$(( mdacp * 2 ))" fi check vg_field $vg vg_mda_copies "unmanaged" vgremove $vg fi done done done LVM2.2.02.176/test/shell/aa-lvmlockd-dlm-prepare.sh0000644000000000000120000000117113176752421020346 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008-2012 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA test_description='Set up things to run tests with dlm' . lib/inittest [ -z "$LVM_TEST_LOCK_TYPE_DLM" ] && skip; aux prepare_dlm aux prepare_lvmlockd LVM2.2.02.176/test/shell/listings.sh0000644000000000000120000001274713176752421015615 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008-2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # tests functionality of lvs, pvs, vgs, *display tools # SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_devs 5 get_devs # Check there is no PV pvscan | tee out grep "No matching" out pvcreate --uuid BADBEE-BAAD-BAAD-BAAD-BAAD-BAAD-BADBEE --norestorefile "$dev1" pvcreate --metadatacopies 0 "$dev2" pvcreate --metadatacopies 0 "$dev3" pvcreate "$dev4" pvcreate --metadatacopies 0 "$dev5" #COMM bz195276 -- pvs doesn't show PVs until a VG is created pvs --noheadings "${DEVICES[@]}" test "$(pvs --noheadings "${DEVICES[@]}" | wc -l)" -eq 5 pvdisplay #COMM pvs with segment attributes works even for orphans test "$(pvs --noheadings -o seg_all,pv_all,lv_all,vg_all "${DEVICES[@]}" | wc -l)" -eq 5 vgcreate $vg "${DEVICES[@]}" check pv_field "$dev1" pv_uuid BADBEE-BAAD-BAAD-BAAD-BAAD-BAAD-BADBEE #COMM pvs and vgs report mda_count, mda_free (bz202886, bz247444) pvs -o +pv_mda_count,pv_mda_free "${DEVICES[@]}" for I in "$dev2" "$dev3" "$dev5"; do check pv_field "$I" pv_mda_count 0 check pv_field "$I" pv_mda_free 0 done vgs -o +vg_mda_count,vg_mda_free $vg check vg_field $vg vg_mda_count 2 #COMM pvs doesn't display --metadatacopies 0 PVs as orphans (bz409061) pvdisplay "$dev2"|grep "VG Name.*$vg" check pv_field "$dev2" vg_name $vg #COMM lvs displays snapshots (bz171215) lvcreate -aey -l4 -n $lv1 $vg lvcreate -l4 -s -n $lv2 $vg/$lv1 test "$(lvs --noheadings $vg | wc -l)" -eq 2 # should lvs -a display cow && real devices? (it doesn't) test "$(lvs -a --noheadings $vg | wc -l)" -eq 2 dmsetup ls | grep "$PREFIX" | grep -v "LVMTEST.*pv." lvremove -f $vg/$lv2 #COMM lvs -a displays mirror legs and log lvcreate -aey -l2 --type mirror -m2 -n $lv3 $vg test "$(lvs --noheadings $vg | wc -l)" -eq 2 test "$(lvs -a --noheadings $vg | wc -l)" -eq 6 dmsetup ls | grep "$PREFIX" | grep -v "LVMTEST.*pv." # Check we parse /dev/mapper/vg-lv lvdisplay "$DM_DEV_DIR/mapper/$vg-$lv3" # Check we parse /dev/vg/lv lvdisplay "$DM_DEV_DIR/$vg/$lv3" lvcreate -l2 -s $vg/$lv3 lvcreate -l1 -s -n inval $vg/$lv3 lvcreate -l4 -I4 -i2 -n stripe $vg # Invalidate snapshot not dd if=/dev/zero of="$DM_DEV_DIR/$vg/inval" bs=4K invalid lvscan "$dev1" lvdisplay --maps lvscan --all #COMM vgs with options from pvs still treats arguments as VGs (bz193543) vgs -o pv_name,vg_name $vg # would complain if not vgs -o all $vg #COMM pvdisplay --maps feature (bz149814) pvdisplay "${DEVICES[@]}" >out pvdisplay --maps "${DEVICES[@]}" >out2 not diff out out2 aux disable_dev "$dev1" pvs -o +pv_uuid | grep BADBEE-BAAD-BAAD-BAAD-BAAD-BAAD-BADBEE aux enable_dev "$dev1" pvscan --uuid pvscan -e pvscan -s pvscan --novolumegroup vgscan --mknodes vgmknodes --refresh lvscan lvmdiskscan invalid pvscan "$dev1" invalid pvscan -aay invalid pvscan --major 254 invalid pvscan --minor 0 invalid pvscan --novolumegroup -e invalid vgscan $vg invalid lvscan $vg if aux have_readline; then cat <&1 | tee err grep "not supported" err # Cannot create snapshot of pool's data not lvcreate -s -L1 $vg/pool_tdata 2>&1 | tee err grep "not supported" err # Cannot use thin-type as COW not lvconvert --yes --type snapshot $vg/cow $vg/th 2>&1 | tee err grep "not accept" err not lvconvert --yes --type snapshot $vg/cow $vg/pool 2>&1 | tee err grep "not accept" err not lvconvert --yes --type snapshot $vg/cow $vg/$LVM2_LV_NAME 2>&1 | tee err grep "lv_is_visible" err not lvconvert --yes --type snapshot $vg/cow $vg/pool_tdata 2>&1 | tee err grep "lv_is_visible" err not lvconvert --yes --type snapshot $vg/cow $vg/pool_tmeta 2>&1 | tee err grep "lv_is_visible" err # Cannot use thin-pool, _tdata, _tmeta as origin not lvconvert --yes --type snapshot $vg/pool $vg/cow 2>&1 | tee err grep "not supported" err not lvconvert --yes --type snapshot $vg/$LVM2_LV_NAME $vg/cow 2>&1 | tee err grep "not supported" err not lvconvert --yes --type snapshot $vg/pool_tdata $vg/cow 2>&1 | tee err grep "not supported" err not lvconvert --yes --type snapshot $vg/pool_tmeta $vg/cow 2>&1 | tee err grep "not supported" err lvconvert --yes -s $vg/th $vg/cow check lv_field $vg/th segtype thin check lv_field $vg/cow segtype linear check lv_attr_bit type $vg/cow "s" check lv_attr_bit type $vg/th "o" lvs -a -o+lv_role,lv_layout $vg vgremove -f $vg LVM2.2.02.176/test/shell/lvextend-thin-cache.sh0000644000000000000120000000164313176752421017604 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2017 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Exercise resize of cached thin pool data volumes SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 export LVM_TEST_THIN_REPAIR_CMD=${LVM_TEST_THIN_REPAIR_CMD-/bin/false} . lib/inittest aux have_thin 1 0 0 || skip aux have_cache 1 3 0 || skip aux prepare_vg 2 lvcreate -l2 -T $vg/pool # Caching of thin-pool's dataLV lvcreate -H -L10 $vg/pool # Resize is unsupported not lvextend -l+2 $vg/pool 2>&1 | tee out grep "Unable to resize" out vgremove -ff $vg LVM2.2.02.176/test/shell/dmstats-report.sh0000644000000000000120000000153313176752421016740 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMPOLLD=1 . lib/inittest # Don't attempt to test stats with driver < 4.33.00 aux driver_at_least 4 33 || skip # ensure we can create devices (uses dmsetup, etc) aux prepare_devs 1 # prepare a stats region with a histogram dmstats create --bounds 10ms,20ms,30ms "$dev1" # basic dmstats report commands dmstats report dmstats report --count 1 dmstats report --histogram LVM2.2.02.176/test/shell/lvmetad-override.sh0000644000000000000120000000272213176752421017222 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2012 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITHOUT_LVMETAD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_pvs 2 vgcreate $vg1 "$dev1" "$dev2" lvcreate -an -l1 --zero n -n $lv1 $vg1 lvchange -ay $vg1 2>&1 | tee out not grep "WARNING: Failed to connect" out check active $vg1 $lv1 lvchange -an $vg1 check inactive $vg1 $lv1 kill "$(< LOCAL_LVMETAD)" lvchange -ay $vg1 2>&1 | tee out grep "WARNING: Failed to connect" out check active $vg1 $lv1 lvchange -an $vg1 check inactive $vg1 $lv1 lvchange -ay --config global/use_lvmetad=0 $vg1 2>&1 | tee out # FIXME: this warning appears when the command tries to connect to # lvmetad during refresh at the end after the --config is cleared. should not grep "WARNING: Failed to connect" out check active $vg1 $lv1 lvchange -an $vg1 check inactive $vg1 $lv1 aux lvmconf "global/use_lvmetad = 0" lvchange -ay --config global/use_lvmetad=1 $vg1 2>&1 | tee out grep "WARNING: Failed to connect" out check active $vg1 $lv1 lvchange -an $vg1 check inactive $vg1 $lv1 vgremove -ff $vg1 LVM2.2.02.176/test/shell/profiles-thin.sh0000644000000000000120000000527313176752421016540 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # test thin profile functionality # SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 export LVM_TEST_THIN_REPAIR_CMD=${LVM_TEST_THIN_REPAIR_CMD-/bin/false} . lib/inittest DEV_SIZE=32 # check we have thinp support compiled in aux have_thin 1 0 0 || skip aux prepare_profiles "thin-performance" # Create scsi debug dev with sector size of 4096B and 1MiB optimal_io_size aux prepare_scsi_debug_dev $DEV_SIZE sector_size=4096 opt_blks=256 || skip EXPECT=1048576 check sysfs "$(< SCSI_DEBUG_DEV)" queue/optimal_io_size "$EXPECT" aux prepare_pvs 1 "$DEV_SIZE" # Check we are not running on buggy kernel (broken lcm()) # If so, turn chunk_size test into 'should' SHOULD="" check sysfs "$dev1" queue/optimal_io_size "$EXPECT" || SHOULD=should vgcreate $vg "$dev1" # By default, "generic" policy is used to # calculate chunk size which is 64KiB by default # or minimum_io_size if it's higher. Also, zeroing is used # under default operation. lvcreate -L8m -T $vg/pool_generic check lv_field $vg/pool_generic profile "" check lv_field $vg/pool_generic chunk_size 64.00k check lv_field $vg/pool_generic zero "zero" # If "thin-performance" profile is used, the "performance" # policy is used to calculate chunk size which is 512KiB # or optimal_io_suize if it's higher. Our test device has # 1MiB, so that should be used. Also, zeroing is not used # under "thin-perforance" profile. lvcreate --profile thin-performance -L8m -T $vg/pool_performance check lv_field $vg/pool_performance profile "thin-performance" $SHOULD check lv_field $vg/pool_performance chunk_size 1.00m check lv_field $vg/pool_performance zero "" vgremove -ff $vg if test -d "$DM_DEV_DIR/$vg" ; then should not echo "Udev has left \"$DM_DEV_DIR/$vg\"!" rm -rf "${DM_DEV_DIR:?/dev}/$vg" fi # The profile must be also applied if using the profile # for the whole VG - any LVs inherit this profile then. vgcreate --profile thin-performance $vg "$dev1" lvcreate -L8m -T $vg/pool_performance_inherited # ...the LV does not have the profile attached, but VG does! check vg_field $vg profile "thin-performance" check lv_field $vg/pool_performance_inherited profile "" $SHOULD check lv_field $vg/pool_performance_inherited chunk_size 1.00m check lv_field $vg/pool_performance_inherited zero "" vgremove -ff $vg LVM2.2.02.176/test/shell/lvcreate-cache-raid.sh0000644000000000000120000000220013176752421017523 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2015 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Exercise creation of cache and raids SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux have_cache 1 3 0 || skip aux have_raid 1 0 0 || skip # FIXME: parallel cache metadata allocator is crashing when used value 8000! aux prepare_vg 5 80000 aux lvmconf 'global/cache_disabled_features = [ "policy_smq" ]' # Bug 1110026 & Bug 1095843 # Create RAID1 origin, then cache pool and cache lvcreate -aey -l 2 --type raid1 -m1 -n $lv2 $vg lvcreate --cache -l 1 $vg/$lv2 check lv_exists $vg/${lv2}_corig_rimage_0 # ensure images are properly renamed check active $vg ${lv2}_corig dmsetup table ${vg}-$lv2 | grep cache # ensure it is loaded in kernel vgremove -ff $vg LVM2.2.02.176/test/shell/pvresize-mdas.sh0000644000000000000120000000151113176752421016535 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_devs 1 8 pvcreate --setphysicalvolumesize 8m --metadatacopies 2 "$dev1" check pv_field "$dev1" pv_size 8.00m check pv_field "$dev1" pv_mda_count 2 pvs "$dev1" pvresize --setphysicalvolumesize 4m -y "$dev1" check pv_field "$dev1" pv_size 4.00m check pv_field "$dev1" pv_mda_count 2 pvs "$dev1" LVM2.2.02.176/test/shell/lvconvert-snapshot-cache.sh0000644000000000000120000000420513176752421020667 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2017 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Test various supported conversion of snapshot with cache SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 export LVM_TEST_THIN_REPAIR_CMD=${LVM_TEST_THIN_REPAIR_CMD-/bin/false} . lib/inittest aux have_cache 1 3 0 || skip aux prepare_vg 1 vgchange -s 16k $vg lvcreate -L1 -n cow $vg # Thin and snapshot conversion lvcreate -aey -L1 -n ch $vg lvcreate -H -L1 -n cpool $vg/ch # Cannot create snapshot of cpool not lvcreate -s -L1 $vg/cpool 2>&1 | tee err grep "not supported" err # Cannot create snapshot of cpool's meta not lvcreate -s -L1 $vg/cpool_cmeta 2>&1 | tee err grep "not supported" err # Cannot create snapshot of cpool's data not lvcreate -s -L1 $vg/cpool_cdata 2>&1 | tee err grep "not supported" err # Cannot use cache-type as COW not lvconvert --yes --type snapshot $vg/cow $vg/ch 2>&1 | tee err grep "not accept" err not lvconvert --yes --type snapshot $vg/cow $vg/cpool 2>&1 | tee err grep "not accept" err not lvconvert --yes --type snapshot $vg/cow $vg/cpool_cdata 2>&1 | tee err grep "lv_is_visible" err not lvconvert --yes --type snapshot $vg/cow $vg/cpool_cmeta 2>&1 | tee err grep "lv_is_visible" err # Cannot use thin-pool, _tdata, _tmeta as origin not lvconvert --yes --type snapshot $vg/cpool $vg/cow 2>&1 | tee err grep "not supported" err not lvconvert --yes --type snapshot $vg/cpool_cdata $vg/cow 2>&1 | tee err grep "not supported" err not lvconvert --yes --type snapshot $vg/cpool_cmeta $vg/cow 2>&1 | tee err grep "not supported" err lvconvert --yes -s $vg/ch $vg/cow check lv_field $vg/ch segtype cache check lv_field $vg/cow segtype linear check lv_attr_bit type $vg/cow "s" check lv_attr_bit type $vg/ch "o" lvs -a -o+lv_role,lv_layout $vg vgremove -f $vg LVM2.2.02.176/test/shell/report-fields.sh0000644000000000000120000000462013176752421016527 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2015 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMETAD=1 SKIP_WITH_LVMPOLLD=1 SKIP_WITH_CLVMD=1 . lib/inittest # Test only that there's correct set of fields displayed on output. aux prepare_pvs 1 OPTS="--nameprefixes --noheadings --rows" aux lvmconf 'report/pvs_cols="pv_name,pv_size"' aux lvmconf 'report/compact_output=0' aux lvmconf 'report/compact_output_cols=""' pvs $OPTS > out grep LVM2_PV_NAME out grep LVM2_PV_SIZE out pvs $OPTS -o pv_attr > out grep LVM2_PV_ATTR out not grep -v LVM2_PV_ATTR out pvs $OPTS -o+pv_attr > out grep LVM2_PV_NAME out grep LVM2_PV_SIZE out grep LVM2_PV_ATTR out pvs $OPTS -o-pv_name > out not grep LVM2_PV_NAME out grep LVM2_PV_SIZE out pvs $OPTS -o+pv_attr -o-pv_attr > out grep LVM2_PV_NAME out grep LVM2_PV_SIZE out not grep LVM2_PV_ATTR out pvs $OPTS -o-pv_attr -o+pv_attr > out grep LVM2_PV_NAME out grep LVM2_PV_SIZE out grep LVM2_PV_ATTR out pvs $OPTS -o+pv_attr -o-pv_attr -o pv_attr > out grep LVM2_PV_ATTR out not grep -v LVM2_PV_ATTR out # -o-size is the same as -o-pv_size - the prefix is recognized pvs $OPTS -o-size > out not grep LVM2_PV_SIZE out # PV does not have tags nor is it exported if we haven't done that explicitly. # Check compaction per field is done correctly. pvs $OPTS -o pv_name,pv_exported,pv_tags -o#pv_tags > out grep LVM2_PV_NAME out grep LVM2_PV_EXPORTED out not grep LVM2_PV_TAGS out aux lvmconf 'report/compact_output_cols="pv_tags"' pvs $OPTS -o pv_name,pv_exported,pv_tags > out grep LVM2_PV_NAME out grep LVM2_PV_EXPORTED out not grep LVM2_PV_TAGS out pvs $OPTS -o pv_name,pv_exported,pv_tags -o#pv_exported > out grep LVM2_PV_NAME out not grep LVM2_PV_EXPORTED out grep LVM2_PV_TAGS out aux lvmconf 'report/compact_output=1' pvs $OPTS -o pv_name,pv_exported,pv_tags > out grep LVM2_PV_NAME out not grep LVM2_PV_EXPORTED out not grep LVM2_PV_TAGS out pvs $OPTS -o pv_name,pv_exported,pv_tags -o#pv_exported > out grep LVM2_PV_NAME out not grep LVM2_PV_EXPORTED out not grep LVM2_PV_TAGS out LVM2.2.02.176/test/shell/lvconvert-repair-snapshot.sh0000644000000000000120000000207013176752421021104 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2011 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_vg 5 aux lvmconf 'allocation/maximise_cling = 0' \ 'allocation/mirror_logs_require_separate_pvs = 1' lvcreate -aey --type mirror -m 3 --ignoremonitoring -L 2M -n 4way $vg "$dev1" "$dev2" "$dev3" "$dev4" "$dev5":0 lvcreate -s $vg/4way -L 2M -n snap aux disable_dev "$dev2" "$dev4" echo n | lvconvert --repair $vg/4way 2>&1 | tee 4way.out lvs -a -o +devices $vg | not grep unknown vgreduce --removemissing $vg aux enable_dev "$dev2" "$dev4" lvs -a -o +devices $vg check mirror $vg 4way "$dev5" vgchange -an $vg vgremove -ff $vg LVM2.2.02.176/test/shell/lvchange-raid10.sh0000644000000000000120000000116113176752421016612 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA TEST_RAID=raid10 . shell/lvchange-raid.sh aux have_raid 1 5 2 || skip run_types raid10 -m 1 -i 2 "$dev1" "$dev2" "$dev3" "$dev4" vgremove -ff $vg LVM2.2.02.176/test/shell/lock-blocking.sh0000644000000000000120000000306313176752421016466 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA test_description='test some blocking / non-blocking multi-vg operations' SKIP_WITH_LVMLOCKD=1 SKIP_WITH_CLVMD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_devs 3 pvcreate "$dev1" "$dev2" vgcreate $vg "$dev1" "$dev2" # if wait_for_locks set, vgremove should wait for orphan lock # flock process should have exited by the time first vgremove completes flock -w 5 "$TESTDIR/var/lock/lvm/P_orphans" sleep 10 & while ! test -f "$TESTDIR/var/lock/lvm/P_orphans" ; do sleep .1 ; done vgremove --config 'global { wait_for_locks = 1 }' $vg not vgremove --config 'global { wait_for_locks = 1 }' $vg test ! -f "$TESTDIR/var/lock/lvm/P_orphans" # if wait_for_locks not set, vgremove should fail on non-blocking lock # we must wait for flock process at the end - vgremove won't wait vgcreate $vg "$dev1" "$dev2" flock -w 5 "$TESTDIR/var/lock/lvm/P_orphans" sleep 10 & while ! test -f "$TESTDIR/var/lock/lvm/P_orphans" ; do sleep .1 ; done flock_pid=$(jobs -p) not vgremove --config 'global { wait_for_locks = 0 }' $vg test -f "$TESTDIR/var/lock/lvm/P_orphans" # still running kill "$flock_pid" vgremove -ff $vg LVM2.2.02.176/test/shell/lvresize-mirror.sh0000644000000000000120000000237713176752421017132 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2010 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_vg 5 for deactivate in true false; do # extend 2-way mirror lvcreate -aye -l2 --type mirror -m1 -n $lv1 $vg "$dev1" "$dev2" "$dev3":0-1 test $deactivate && lvchange -an $vg/$lv1 lvextend -l+2 $vg/$lv1 check mirror $vg $lv1 "$dev3" check mirror_images_contiguous $vg $lv1 # reduce 2-way mirror lvreduce -f -l-2 $vg/$lv1 check mirror $vg $lv1 "$dev3" # extend 2-way mirror (cling if not contiguous) lvcreate -aye -l2 --type mirror -m1 -n $lv2 $vg "$dev1" "$dev2" "$dev3":0-1 lvcreate -l1 -n $lv3 $vg "$dev1" lvcreate -l1 -n $lv4 $vg "$dev2" test $deactivate && lvchange -an $vg/$lv2 lvextend -l+2 $vg/$lv2 check mirror $vg $lv2 "$dev3" check mirror_images_clung $vg $lv2 lvremove -ff $vg done vgremove -ff $vg LVM2.2.02.176/test/shell/lvconvert-cache-snapshot.sh0000644000000000000120000000332113176752421020665 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Test various supported conversion of snapshot of cached volume SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux have_cache 1 3 0 || skip aux prepare_vg 1 # Prepare cached LV lvcreate -aey -L1 -n $lv1 $vg lvcreate -H -L2 -n cpool $vg/$lv1 # Prepare snapshot 'cow' LV lvcreate -L3 -n cow $vg # Can't use 'cached' cow volume not lvconvert -s cow $vg/$lv1 # Use cached LV with 'striped' cow volume lvconvert -y -s $vg/$lv1 cow check lv_field $vg/cow segtype linear check lv_field $vg/$lv1 segtype cache # Drop cache while being in-use origin lvconvert --splitcache $vg/$lv1 check lv_field $vg/$lv1 segtype linear # Cache existing origin lvconvert -y --cache $vg/$lv1 --cachepool $vg/cpool check lv_field $vg/$lv1 segtype cache # Cannot split from 'origin' (being cached LV) not lvconvert -y --splitsnapshot $vg/$lv1 lvchange --cachemode writeback $vg/$lv1 check lv_field $vg/$lv1 cache_mode "writeback" check grep_dmsetup status ${vg}-${lv1}-real "writeback" lvchange --cachemode writethrough $vg/$lv1 check lv_field $vg/$lv1 cache_mode "writethrough" check grep_dmsetup status ${vg}-${lv1}-real "writethrough" # Split 'cow' from cached origin lvconvert -y --splitsnapshot $vg/cow get lv_field $vg/cow attr | grep "^-wi" vgremove -f $vg LVM2.2.02.176/test/shell/pvcreate-ff.sh0000644000000000000120000000120013176752421016141 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_devs 2 pvcreate "$dev1" vgcreate foo "$dev1" pvcreate -ff -y "$dev1" vgs vgcreate foo "$dev1" LVM2.2.02.176/test/shell/vgchange-maxlv.sh0000644000000000000120000000174713176752421016666 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2010 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_dmeventd aux prepare_pvs 3 get_devs vgcreate -l 2 "$vg" "${DEVICES[@]}" lvcreate -aey -n one -l 1 $vg lvcreate -n two -l 1 $vg not lvcreate -n three -l 1 $vg vgremove -ff $vg vgcreate -l 3 "$vg" "${DEVICES[@]}" lvcreate -aey -n one -l 1 $vg lvcreate -n snap -s -l 1 $vg/one lvcreate -n two -l 1 $vg not lvcreate -n three -l 1 $vg vgchange --monitor y $vg vgchange -an $vg 2>&1 | tee vgchange.out not grep "event server" vgchange.out vgremove -ff $vg LVM2.2.02.176/test/shell/lvchange-raid-transient-failures.sh0000644000000000000120000000341013176752421022265 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux have_raid 1 10 1 || skip aux prepare_vg 6 # # FIXME: add multi-segment leg tests # function _check_raid { local vg=$1 shift local lv=$1 shift local fail=$1 shift local good=$1 shift local devs=( "$@" ) aux wait_for_sync $vg $lv aux disable_dev --error --silent "${devs[@]}" mkfs.ext4 "$DM_DEV_DIR/$vg/$lv" fsck.ext4 -fn "$DM_DEV_DIR/$vg/$lv" check raid_leg_status $vg $lv "$fail" aux enable_dev --silent "${devs[@]}" lvs -a -o +devices $vg | tee out not grep unknown out lvchange --refresh $vg/$lv fsck.ext4 -fn "$DM_DEV_DIR/$vg/$lv" aux wait_for_sync $vg $lv fsck.ext4 -fn "$DM_DEV_DIR/$vg/$lv" check raid_leg_status $vg $lv "$good" } # raid1 with transiently failing devices lv=4way lvcreate -aey --type raid1 -m 3 --ignoremonitoring -L 1 -n $lv $vg _check_raid $vg $lv "ADAD" "AAAA" "$dev2" "$dev4" lvremove -y $vg/$lv # raid6 with transiently failing devices lv=6way lvcreate -aey --type raid6 -i 4 --ignoremonitoring -L 1 -n $lv $vg _check_raid $vg $lv "ADADAA" "AAAAAA" "$dev2" "$dev4" lvremove -y $vg/$lv # raid10 with transiently failing devices lv=6way lvcreate -aey --type raid10 -i 3 -m 1 --ignoremonitoring -L 1 -n $lv $vg _check_raid $vg $lv "ADADDA" "AAAAAA" "$dev2" "$dev4" "$dev5" lvremove -y $vg/$lv vgremove -f $vg LVM2.2.02.176/test/shell/lvmetad-pvscan-filter.sh0000644000000000000120000000330613176752421020157 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2012 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITHOUT_LVMETAD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_pvs 2 maj=$(($(stat -L --printf=0x%t "$dev2"))) min=$(($(stat -L --printf=0x%T "$dev2"))) # Filter out device, pvscan should trigger # clearing of the device from lvmetad cache. # We can't use aux hide_dev here because that # changes the global_filter which triggers a # token mismatch rescan by subsequent pvscan # commands instead of the single-dev scans # that are testing here. mv "$dev2" "$dev2-HIDDEN" pvscan --cache "$dev2" 2>&1 | tee out || true grep "not found" out # pvscan with --major/--minor does not fail: lvmetad needs to # be notified about device removal on REMOVE uevent, hence # this should not fail so udev does not grab a "failed" state # incorrectly. We notify device addition and removal with # exactly the same command "pvscan --cache" - in case of removal, # this is detected by nonexistence of the device itself. pvscan --cache --major $maj --minor $min 2>&1 | tee out || true grep "not found" out # aux unhide_dev "$dev2" mv "$dev2-HIDDEN" "$dev2" pvscan --cache "$dev2" 2>&1 | tee out || true not grep "not found" out pvscan --cache --major $maj --minor $min 2>&1 | tee out || true not grep "not found" out pvs | grep "$dev2" LVM2.2.02.176/test/shell/pvcreate-bootloaderarea.sh0000644000000000000120000000452213176752421020543 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA test_description='Test pvcreate bootloader area support' SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_devs 1 aux lvmconf 'global/suffix=0' 'global/units="b"' #COMM 'pvcreate sets/aligns bootloader area correctly' pvcreate --dataalignment 262144b --bootloaderareasize 614400b "$dev1" # ba_start must be aligned based on dataalignment # pe_start starts at next dataalignment multiple # ba_size is the whole space in between ba_start and pe_start check pv_field "$dev1" ba_start "262144" check pv_field "$dev1" ba_size "786432" check pv_field "$dev1" pe_start "1048576" #COMM 'pvcreate with booloader area size - test corner cases' dev_size=$(pvs -o pv_size --noheadings "$dev1") pv_size=$(( dev_size - 1048576 )) # device size - 1m pe_start = area for data # try to use the whole data area for bootloader area, remaining data area is zero then (pe_start = pv_size) pvcreate --bootloaderareasize ${pv_size}b --dataalignment 1048576b "$dev1" check pv_field "$dev1" pe_start $dev_size check pv_field "$dev1" ba_start 1048576 check pv_field "$dev1" ba_size $pv_size # try to use the whole data area for bootloader area only and add one more byte - this must error out not pvcreate --bootloaderareasize $(( pv_size + 1 )) --dataalignment 1048576b "$dev1" 2>err grep "Bootloader area with data-aligned start must not exceed device size" err # restoring the PV should also restore the bootloader area correctly pvremove -ff "$dev1" pvcreate --dataalignment 256k --bootloaderareasize 600k "$dev1" vgcreate $vg "$dev1" vgcfgbackup -f "$TESTDIR/vg_with_ba_backup" "$vg" pv_uuid=$(get pv_field "$dev1" pv_uuid) vgremove -ff $vg pvremove -ff "$dev1" pvcreate --dataalignment 256k --restorefile "$TESTDIR/vg_with_ba_backup" --uuid "$pv_uuid" "$dev1" check pv_field "$dev1" ba_start "262144" check pv_field "$dev1" ba_size "786432" check pv_field "$dev1" pe_start "1048576" pvremove -ff "$dev1" LVM2.2.02.176/test/shell/lvconvert-thin-external-cache.sh0000644000000000000120000000504113176752421021611 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Test conversion cached LV to thin with cached external origin SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 export LVM_TEST_THIN_REPAIR_CMD=${LVM_TEST_THIN_REPAIR_CMD-/bin/false} . lib/inittest which mkfs.ext2 || skip which fsck || skip # # Main # aux have_thin 1 5 0 || skip aux have_cache 1 7 0 || skip aux prepare_vg 2 64 # Test will use thin-pool lvcreate -L10 -T $vg/tpool lvcreate -aey -L20 -n $lv1 $vg mkfs.ext2 "$DM_DEV_DIR/$vg/$lv1" mkdir mnt mount "$DM_DEV_DIR/$vg/$lv1" mnt touch mnt/test # Prepared cached LV - first in 'writeback' mode lvcreate -H --cachemode writeback -L10 -n cpool $vg/$lv1 # Can't convert 'writeback' cache not lvconvert --thin --thinpool $vg/tpool $vg/$lv1 # Switch to 'writethrough' - this should be supported lvchange --cachemode writethrough $vg/$lv1 # Check $lv1 remains mounted (so it's not been unmounted by systemd) not mount "$DM_DEV_DIR/$vg/$lv1" mnt lvconvert --thin $vg/$lv1 --originname extorg --thinpool $vg/tpool # check cache exist as extorg-real check grep_dmsetup table ${vg}-extorg-real "cache" # Split cache from external origin (while in-use) lvconvert --splitcache $vg/extorg # check linear exist as extorg-real check grep_dmsetup table ${vg}-extorg-real "linear" check lv_field $vg/extorg segtype linear # Cache external origin in-use again lvconvert -y -H $vg/extorg --cachepool $vg/cpool get lv_field $vg/extorg attr | grep "^ori" umount mnt # Is filesystem still ok ? fsck -n "$DM_DEV_DIR/$vg/$lv1" lvchange -an $vg lvchange -ay $vg # Remove thin, external origin remains lvremove -f $vg/$lv1 #lvchange -prw $vg/extorg lvconvert --uncache $vg/extorg lvremove -f $vg # # Check some more API variants # lvcreate -L10 -n pool $vg lvcreate -aey -L2 -n $lv1 $vg lvcreate -H -L2 $vg/$lv1 # Converts $vg/pool to thin-pool AND $vg/$lv1 to thin lvconvert -y --type thin $vg/$lv1 --originname extorg --thinpool $vg/pool check lv_field $vg/$lv1 segtype thin check lv_field $vg/pool segtype thin-pool check lv_field $vg/extorg segtype cache lvconvert --uncache $vg/extorg check lv_field $vg/extorg segtype linear vgremove -ff $vg LVM2.2.02.176/test/shell/pvmove-basic.sh0000644000000000000120000002505313176752421016346 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008-2013 Red Hat, Inc. All rights reserved. # Copyright (C) 2007 NEC Corporation # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA test_description="ensure that pvmove works with basic options" SKIP_WITH_LVMLOCKD=1 # disable lvmetad logging as it bogs down test systems export LVM_TEST_LVMETAD_DEBUG_OPTS=${LVM_TEST_LVMETAD_DEBUG_OPTS-} . lib/inittest which md5sum || skip # --------------------------------------------------------------------- # Utilities create_vg_() { vgcreate -c n -s 128k "$vg" "${DEVICES[@]}" } # --------------------------------------------------------------------- # Common environment setup/cleanup for each sub testcases prepare_lvs_() { lvcreate -l2 -n $lv1 $vg "$dev1" check lv_on $vg $lv1 "$dev1" lvcreate -l9 -i3 -n $lv2 $vg "$dev2" "$dev3" "$dev4" check lv_on $vg $lv2 "$dev2" "$dev3" "$dev4" lvextend -l+2 $vg/$lv1 "$dev2" check lv_on $vg $lv1 "$dev1" "$dev2" lvextend -l+2 $vg/$lv1 "$dev3" lvextend -l+2 $vg/$lv1 "$dev1" check lv_on $vg $lv1 "$dev1" "$dev2" "$dev3" lvcreate -l1 -n $lv3 $vg "$dev2" check lv_on $vg $lv3 "$dev2" aux mkdev_md5sum $vg $lv1 aux mkdev_md5sum $vg $lv2 aux mkdev_md5sum $vg $lv3 get lv_devices "$vg/$lv1" > "${lv1}_devs" get lv_devices "$vg/$lv2" > "${lv2}_devs" get lv_devices "$vg/$lv3" > "${lv3}_devs" lvs -a -o name,size,seg_pe_ranges $vg vgcfgbackup -f bak-$$ $vg } # Restore metadata content, since data are pvmove-ed # original content should be preserved restore_lvs_() { vgcfgrestore -f bak-$$ $vg vgchange -ay $vg } lvs_not_changed_() { for i in "${@}"; do get lv_devices "$vg/$i" | tee out diff "${i}_devs" out || \ (cat "${i}_devs"; die "Devices for LV $vg/$i differs!") done } check_and_cleanup_lvs_() { check dev_md5sum $vg $lv1 check dev_md5sum $vg $lv2 check dev_md5sum $vg $lv3 get lv_field $vg name -a >out not grep "^\[pvmove" out vgchange -an $vg lvremove -ff $vg (dm_table | not grep $vg) || \ die "ERROR: lvremove did leave some mappings in DM behind!" } # --------------------------------------------------------------------- # Initialize PVs and VGs aux prepare_pvs 5 5 get_devs create_vg_ for mode in "--atomic" "" do #COMM "check environment setup/cleanup" prepare_lvs_ check_and_cleanup_lvs_ # --------------------------------------------------------------------- # pvmove tests # --- # filter by LV #COMM "only specified LV is moved: from pv2 to pv5 only for lv1" restore_lvs_ pvmove $mode -i1 -n $vg/$lv1 "$dev2" "$dev5" check lv_on $vg $lv1 "$dev1" "$dev5" "$dev3" lvs_not_changed_ $lv2 $lv3 check_and_cleanup_lvs_ # --- # segments in a LV #COMM "the 1st seg of 3-segs LV is moved: from pv1 of lv1 to pv4" restore_lvs_ pvmove $mode -i0 -n $vg/$lv1 "$dev1" "$dev4" check lv_on $vg $lv1 "$dev4" "$dev2" "$dev3" lvs_not_changed_ $lv2 $lv3 check_and_cleanup_lvs_ #COMM "the 2nd seg of 3-segs LV is moved: from pv2 of lv1 to pv4" restore_lvs_ pvmove $mode -i0 -n $vg/$lv1 "$dev2" "$dev4" check lv_on $vg $lv1 "$dev1" "$dev4" "$dev3" lvs_not_changed_ $lv2 $lv3 check_and_cleanup_lvs_ #COMM "the 3rd seg of 3-segs LV is moved: from pv3 of lv1 to pv4" restore_lvs_ pvmove $mode -i0 -n $vg/$lv1 "$dev3" "$dev4" check lv_on $vg $lv1 "$dev1" "$dev2" "$dev4" lvs_not_changed_ $lv2 $lv3 check_and_cleanup_lvs_ # --- # multiple LVs matching #COMM "1 out of 3 LVs is moved: from pv4 to pv5" restore_lvs_ pvmove $mode -i0 "$dev4" "$dev5" check lv_on $vg $lv2 "$dev2" "$dev3" "$dev5" lvs_not_changed_ $lv1 $lv3 check_and_cleanup_lvs_ #COMM "2 out of 3 LVs are moved: from pv3 to pv5" restore_lvs_ pvmove $mode -i0 "$dev3" "$dev5" check lv_on $vg $lv1 "$dev1" "$dev2" "$dev5" check lv_on $vg $lv2 "$dev2" "$dev5" "$dev4" lvs_not_changed_ $lv3 check_and_cleanup_lvs_ #COMM "3 out of 3 LVs are moved: from pv2 to pv5" restore_lvs_ pvmove $mode -i0 "$dev2" "$dev5" check lv_on $vg $lv1 "$dev1" "$dev5" "$dev3" check lv_on $vg $lv2 "$dev5" "$dev3" "$dev4" check lv_on $vg $lv3 "$dev5" check_and_cleanup_lvs_ # --- # areas of striping #COMM "move the 1st stripe: from pv2 of lv2 to pv1" restore_lvs_ pvmove $mode -i0 -n $vg/$lv2 "$dev2" "$dev1" check lv_on $vg $lv2 "$dev1" "$dev3" "$dev4" lvs_not_changed_ $lv1 $lv3 check_and_cleanup_lvs_ #COMM "move the 2nd stripe: from pv3 of lv2 to pv1" restore_lvs_ pvmove $mode -i0 -n $vg/$lv2 "$dev3" "$dev1" check lv_on $vg $lv2 "$dev2" "$dev1" "$dev4" lvs_not_changed_ $lv1 $lv3 check_and_cleanup_lvs_ #COMM "move the 3rd stripe: from pv4 of lv2 to pv1" restore_lvs_ pvmove $mode -i0 -n $vg/$lv2 "$dev4" "$dev1" check lv_on $vg $lv2 "$dev2" "$dev3" "$dev1" lvs_not_changed_ $lv1 $lv3 check_and_cleanup_lvs_ # --- # partial segment match (source segment splitted) #COMM "match to the start of segment:from pv2:0-0 to pv5" restore_lvs_ pvmove $mode -i0 "$dev2":0-0 "$dev5" check lv_on $vg $lv2 "$dev5" "$dev2" "$dev3" "$dev4" lvs_not_changed_ $lv1 $lv3 check_and_cleanup_lvs_ #exit 0 #COMM "match to the middle of segment: from pv2:1-1 to pv5" restore_lvs_ pvmove $mode -i0 "$dev2":1-1 "$dev5" check lv_on $vg $lv2 "$dev2" "$dev3" "$dev4" "$dev5" lvs_not_changed_ $lv1 $lv3 check_and_cleanup_lvs_ #COMM "match to the end of segment: from pv2:2-2 to pv5" restore_lvs_ pvmove $mode -i0 "$dev2":2-2 "$dev5" check lv_on $vg $lv2 "$dev2" "$dev5" "$dev3" "$dev4" lvs_not_changed_ $lv1 $lv3 check_and_cleanup_lvs_ # --- # destination segment splitted #COMM "no destination split: from pv2:0-2 to pv5" restore_lvs_ pvmove $mode -i0 "$dev2":0-2 "$dev5" check lv_on $vg $lv2 "$dev5" "$dev3" "$dev4" lvs_not_changed_ $lv1 $lv3 check_and_cleanup_lvs_ #COMM "destination split into 2: from pv2:0-2 to pv5:5-5 and pv4:5-6" restore_lvs_ pvmove $mode -i0 --alloc anywhere "$dev2":0-2 "$dev5":5-5 "$dev4":5-6 check lv_on $vg $lv2 "$dev5" "$dev4" "$dev3" lvs_not_changed_ $lv1 $lv3 check_and_cleanup_lvs_ #COMM "destination split into 3: from pv2:0-2 to {pv3,4,5}:5-5" restore_lvs_ pvmove $mode -i0 --alloc anywhere "$dev2":0-2 "$dev3":5-5 "$dev4":5-5 "$dev5":5-5 check lv_on $vg $lv2 "$dev3" "$dev4" "$dev5" lvs_not_changed_ $lv1 $lv3 check_and_cleanup_lvs_ # --- # alloc policy (anywhere, contiguous) with both success and failure cases #COMM "alloc normal on same PV for source and destination: from pv3:0-2 to pv3:5-7" restore_lvs_ not pvmove $mode -i0 "$dev3":0-2 "$dev3":5-7 # "(cleanup previous test)" lvs_not_changed_ $lv1 $lv2 $lv3 check_and_cleanup_lvs_ #COMM "alloc anywhere on same PV for source and destination: from pv3:0-2 to pv3:5-7" restore_lvs_ pvmove $mode -i0 --alloc anywhere "$dev3":0-2 "$dev3":5-7 check lv_on $vg $lv2 "$dev2" "$dev3" "$dev4" lvs_not_changed_ $lv1 $lv3 check_and_cleanup_lvs_ #COMM "alloc anywhere but better area available: from pv3:0-2 to pv3:5-7 or pv5:5-6,pv4:5-5" restore_lvs_ #lvs -a -o name,size,seg_pe_ranges $vg #LV2 1.12m @TESTDIR@/dev/mapper/@PREFIX@pv2:0-2 @TESTDIR@/dev/mapper/@PREFIX@pv3:0-2 @TESTDIR@/dev/mapper/@PREFIX@pv4:0-2 pvmove $mode -i0 --alloc anywhere "$dev3":0-2 "$dev3":5-7 "$dev5":5-6 "$dev4":5-5 #lvs -a -o name,size,seg_pe_ranges $vg # Hmm is this correct ? - why pv2 is split #LV2 1.12m @TESTDIR@/dev/mapper/@PREFIX@pv2:0-1 @TESTDIR@/dev/mapper/@PREFIX@pv5:5-6 @TESTDIR@/dev/mapper/@PREFIX@pv4:0-1 #LV2 1.12m @TESTDIR@/dev/mapper/@PREFIX@pv2:2-2 @TESTDIR@/dev/mapper/@PREFIX@pv3:5-5 @TESTDIR@/dev/mapper/@PREFIX@pv4:2-2 check lv_on $vg $lv2 "$dev2" "$dev3" "$dev4" "$dev5" lvs_not_changed_ $lv1 $lv3 check_and_cleanup_lvs_ #COMM "alloc contiguous but area not available: from pv2:0-2 to pv5:5-5 and pv4:5-6" restore_lvs_ not pvmove $mode -i0 --alloc contiguous "$dev2":0-2 "$dev5":5-5 "$dev4":5-6 # "(cleanup previous test)" lvs_not_changed_ $lv1 $lv2 $lv3 check_and_cleanup_lvs_ #COMM "alloc contiguous and contiguous area available: from pv2:0-2 to pv5:0-0,pv5:3-5 and pv4:5-6" restore_lvs_ pvmove $mode -i0 --alloc contiguous "$dev2":0-2 "$dev5":0-0 "$dev5":3-5 "$dev4":5-6 check lv_on $vg $lv2 "$dev5" "$dev3" "$dev4" lvs_not_changed_ $lv1 $lv3 check_and_cleanup_lvs_ # --- # multiple segments in a LV #COMM "multiple source LVs: from pv3 to pv5" restore_lvs_ pvmove $mode -i0 "$dev3" "$dev5" check lv_on $vg $lv1 "$dev1" "$dev2" "$dev5" check lv_on $vg $lv2 "$dev2" "$dev5" "$dev4" lvs_not_changed_ $lv3 check_and_cleanup_lvs_ # --- # move inactive LV #COMM "move inactive LV: from pv2 to pv5" restore_lvs_ lvchange -an $vg/$lv1 lvchange -an $vg/$lv3 pvmove $mode -i0 "$dev2" "$dev5" check lv_on $vg $lv1 "$dev1" "$dev5" "$dev3" check lv_on $vg $lv2 "$dev5" "$dev3" "$dev4" check lv_on $vg $lv3 "$dev5" check_and_cleanup_lvs_ # --- # other failure cases #COMM "no PEs to move: from pv3 to pv1" restore_lvs_ pvmove $mode -i0 "$dev3" "$dev1" not pvmove $mode -i0 "$dev3" "$dev1" # "(cleanup previous test)" check lv_on $vg $lv1 "$dev1" "$dev2" "$dev1" check lv_on $vg $lv2 "$dev2" "$dev1" "$dev4" lvs_not_changed_ $lv3 check_and_cleanup_lvs_ #COMM "no space available: from pv2:0-0 to pv1:0-0" restore_lvs_ not pvmove $mode -i0 "$dev2":0-0 "$dev1":0-0 # "(cleanup previous test)" lvs_not_changed_ $lv1 $lv2 $lv3 check_and_cleanup_lvs_ #COMM 'same source and destination: from pv1 to pv1' restore_lvs_ not pvmove $mode -i0 "$dev1" "$dev1" #"(cleanup previous test)" lvs_not_changed_ $lv1 $lv2 $lv3 check_and_cleanup_lvs_ #COMM "sum of specified destination PEs is large enough, but it includes source PEs and the free PEs are not enough" restore_lvs_ not pvmove $mode --alloc anywhere "$dev1":0-2 "$dev1":0-2 "$dev5":0-0 2> err #"(cleanup previous test)" grep "Insufficient free space" err lvs_not_changed_ $lv1 $lv2 $lv3 check_and_cleanup_lvs_ # --------------------------------------------------------------------- #COMM "pvmove abort" restore_lvs_ LVM_TEST_TAG="kill_me_$PREFIX" pvmove $mode -i100 -b "$dev1" "$dev3" pvmove --abort check_and_cleanup_lvs_ #COMM "pvmove out of --metadatacopies 0 PV (bz252150)" vgremove -ff $vg pvcreate "${DEVICES[@]}" pvcreate --metadatacopies 0 "$dev1" "$dev2" create_vg_ lvcreate -l4 -n $lv1 $vg "$dev1" pvmove $mode "$dev1" #COMM "pvmove fails activating mirror, properly restores state before pvmove" dmsetup create $vg-pvmove0 --notable not pvmove $mode -i 1 "$dev2" dmsetup info --noheadings -c -o suspended $vg-$lv1 test "$(dmsetup info --noheadings -c -o suspended "$vg-$lv1")" = "Active" if dmsetup info $vg-pvmove0_mimage_0 > /dev/null; then dmsetup remove $vg-pvmove0 $vg-pvmove0_mimage_0 $vg-pvmove0_mimage_1 else dmsetup remove $vg-pvmove0 fi lvremove -ff $vg done LVM2.2.02.176/test/shell/lvcreate-cache.sh0000644000000000000120000002321313176752421016615 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Exercise creation of cache and cache pool volumes # Full CLI uses --type # Shorthand CLI uses -H | --cache SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux have_cache 1 3 0 || skip # FIXME: parallel cache metadata allocator is crashing when used value 8000! aux prepare_vg 5 80000 aux lvmconf 'global/cache_disabled_features = [ "policy_smq" ]' ####################### # Cache_Pool creation # ####################### # TODO: Unsupported yet creation of cache pool and cached volume at once # TODO: Introduce --pooldatasize # TODO: Policy to determine cache pool size and cache pool name invalid lvcreate -H -l 1 $vg invalid lvcreate -H -l 1 --name $lv1 $vg invalid lvcreate -l 1 --cache $vg # Only cached volume could be created invalid lvcreate -l 1 --type cache $vg # Striping is not supported with cache-pool creation invalid lvcreate -l 1 -i 2 --type cache-pool $vg # Fails as it needs to see VG content fail lvcreate -l 1 --type cache --cachepool $vg/pool1 fail lvcreate -l 1 --type cache --cachepool pool2 $vg fail lvcreate -l 1 --cache $vg/pool3 fail lvcreate -l 1 -H --cachepool pool4 $vg fail lvcreate -l 1 -H --name $lv2 $vg/pool5 fail lvcreate -l 1 -H --name $lv3 --cachepool $vg/pool6 fail lvcreate -l 1 -H --name $vg/$lv4 --cachepool pool7 # Unlike in thin pool case - cache pool and cache volume both need size arg. # So we require cache pool to exist and need to fail when it's missing. # # --cachepool gives implicit --cache fail lvcreate -l 1 --cachepool pool8 $vg # no size specified invalid lvcreate --cachepool pool $vg 2>&1 | tee err #grep "specify either size or extents" err grep "No command with matching syntax recognised" err # Check nothing has been created yet check vg_field $vg lv_count 0 # Checks that argument passed with --cachepool is really a cache-pool lvcreate -an -l 1 -n $lv1 $vg # Hint: nice way to 'tee' only stderr.log so we can check it's log_error() fail lvcreate -L10 --cachepool $vg/$lv1 2> >(tee -a stderr.log >&2) grep "not a cache pool" stderr.log # With --type cache-pool we are clear which segtype has to be created lvcreate -l 1 --type cache-pool $vg/pool1 check lv_field $vg/pool1 segtype "cache-pool" lvcreate -l 1 --type cache-pool --name $vg/pool2 $vg check lv_field $vg/pool2 segtype "cache-pool" lvcreate -l 1 --type cache-pool --cachepool $vg/pool3 $vg check lv_field $vg/pool3 segtype "cache-pool" lvcreate -l 1 --type cache-pool --cachepool $vg/pool4 check lv_field $vg/pool4 segtype "cache-pool" lvcreate -l 1 --type cache-pool --cachepool pool5 $vg check lv_field $vg/pool5 segtype "cache-pool" lvcreate -l 1 --type cache-pool --name pool6 $vg check lv_field $vg/pool6 segtype "cache-pool" lvcreate -l 1 --type cache-pool --name $vg/pool7 check lv_field $vg/pool7 segtype "cache-pool" lvremove -f $vg # Check the percentage values are reported for both cache and cache-pool lvcreate --type cache-pool -L1 $vg/cpool lvcreate -H -L4 -n $lv1 $vg/cpool check lv_field $vg/$lv1 origin "[${lv1}_corig]" check lv_field $vg/$lv1 copy_percent "0.00" # there should be something present (value differs per policy version) test -n "$(get lv_field $vg/$lv1 data_percent)" test -n "$(get lv_field $vg/$lv1 metadata_percent)" check lv_field $vg/cpool copy_percent "0.00" test -n "$(get lv_field $vg/cpool data_percent)" test -n "$(get lv_field $vg/cpool metadata_percent)" # check we also display percent value for segmented output (-o+devices) lvs -a -o+devices $vg/cpool | tee out grep "0.00" out lvremove -f $vg # Validate ambiguous pool name is detected invalid lvcreate -l 1 --type cache-pool --cachepool pool1 $vg/pool2 invalid lvcreate -l 1 --type cache-pool --name pool3 --cachepool pool4 $vg invalid lvcreate -l 1 --type cache-pool --name pool6 --cachepool pool6 $vg/pool7 invalid lvcreate -l 1 --type cache-pool --name pool8 $vg/pool9 # Unsupported with cache & cache pool invalid lvcreate --type cache-pool --discards passdown -l1 $vg invalid lvcreate -H --discards passdown -l1 $vg invalid lvcreate --type cache-pool --virtualsize 1T -l1 $vg invalid lvcreate -H --virtualsize 1T -l1 $vg check vg_field $vg lv_count 0 for mode in "" "--cachemode writethrough" do ################ # Cache creation # Creating a cache is a two phase process # - first, cache_pool (or origin) # - then, the cache LV (lvcreate distinguishes supplied origin vs cache_pool) ################ lvcreate --type cache-pool -l 1 -n pool $vg $mode # Select automatic name for cached LV lvcreate --type cache -l1 $vg/pool lvcreate --type cache-pool -l 1 -n pool1 $vg $mode lvcreate --cache -l1 -n $lv1 --cachepool $vg/pool1 dmsetup table ${vg}-$lv1 | grep cache # ensure it is loaded in kernel lvcreate --type cache-pool -l 1 -n pool2 $vg $mode lvcreate -H -l1 -n $lv2 --cachepool pool2 $vg # # Now check removals # # Removal of cached LV removes every related LV check lv_field $vg/$lv1 segtype "cache" lvremove -f $vg/$lv1 check lv_not_exists $vg $lv1 pool1 pool1_cdata pool1_cmeta # to preserve cachepool use lvconvert --splitcache $vg/$lv1 # Removal of cache pool leaves origin uncached check lv_field $vg/$lv2 segtype "cache" lvremove -f $vg/pool2 check lv_not_exists $vg pool2 pool2_cdata pool2_cmeta check lv_field $vg/$lv2 segtype "linear" lvremove -f $vg done # Conversion through lvcreate case # Bug 1110026 # Create origin, then cache pool and cache the origin lvcreate -aey -l 2 -n $lv1 $vg lvcreate --type cache -l 1 $vg/$lv1 dmsetup table ${vg}-$lv1 | grep cache # ensure it is loaded in kernel lvremove -f $vg # Check minimum cache pool metadata size lvcreate -l 1 --type cache-pool --poolmetadatasize 1 $vg 2>out grep "WARNING: Minimum" out # FIXME: This test is failing in allocator with smaller VG sizes lvcreate -l 1 --type cache-pool --poolmetadatasize 17G $vg 2>out grep "WARNING: Maximum" out lvremove -f $vg ######################################## # Cache conversion and r/w permissions # ######################################## # writeable origin and 'default' => writable cache + origin lvcreate -an -l1 -n $vg/$lv1 # do not allow stripping for cache-pool fail lvcreate -H -i 2 -l1 -n cpool1 $vg/$lv1 lvcreate -H -l1 -n cpool1 $vg/$lv1 check lv_attr_bit perm $vg/cpool1 "w" check lv_attr_bit perm $vg/${lv1}_corig "w" check lv_attr_bit perm $vg/$lv1 "w" # writeable origin and -pr => conversion is not supported lvcreate -an -l1 -n $vg/$lv2 fail lvcreate -H -l1 -pr -n cpool2 $vg/$lv2 # read-only origin and -pr => read-only cache + origin lvcreate -an -pr -l1 -n $vg/$lv3 lvcreate -an -H -l1 -pr -n cpool3 $vg/$lv3 check lv_attr_bit perm $vg/cpool3 "w" check lv_attr_bit perm $vg/${lv3}_corig "r" check lv_attr_bit perm $vg/$lv3 "r" check inactive $vg $lv3 check inactive $vg cpool3 # read-only origin and 'default' => read-only cache + origin lvcreate -an -pr -l1 -n $vg/$lv4 lvcreate -H -l1 -n cpool4 $vg/$lv4 check lv_attr_bit perm $vg/cpool4 "w" check lv_attr_bit perm $vg/${lv4}_corig "r" check lv_attr_bit perm $vg/$lv4 "r" # read-only origin and -prw => conversion unsupported lvcreate -an -pr -l1 -n $vg/$lv5 fail lvcreate -H -l1 -prw -n cpool5 $vg/$lv5 # cached volume respects permissions lvcreate --type cache-pool -l1 -n $vg/cpool lvcreate -H -l1 -pr -n $lv6 $vg/cpool check lv_attr_bit perm $vg/cpool "w" check lv_attr_bit perm $vg/$lv6 "r" lvremove -f $vg ######################################## # Validate args are properly preserved # ######################################## lvcreate --type cache-pool -L10 --chunksize 256 --cachemode writeback $vg/cpool1 check lv_field $vg/cpool1 chunksize "256.00k" check lv_field $vg/cpool1 cachemode "writeback" # check striping is supported when creating a cached LV lvcreate -H -L10 -i 2 -n $lv1 $vg/cpool1 check lv_field $vg/${lv1}_corig stripes "2" -a check lv_field $vg/$lv1 chunksize "256.00k" check lv_field $vg/$lv1 cachemode "writeback" lvcreate --type cache-pool -L10 --chunksize 256 --cachemode writethrough $vg/cpool2 lvcreate -H -L10 --chunksize 512 --cachemode writeback -n $lv2 $vg/cpool2 check lv_field $vg/$lv2 chunksize "512.00k" check lv_field $vg/$lv2 cachemode "writeback" # Chunk bigger then pool size fail lvcreate --type cache-pool -l1 --chunksize 1G $vg/cpool3 lvcreate --type cache-pool -L10 $vg/cpool4 fail lvcreate -H -L10 --chunksize 16M $vg/cpool4 lvdisplay --maps $vg lvremove -f $vg lvcreate --type cache-pool -L10 $vg/cpool lvcreate --type cache -l 1 --cachepool $vg/cpool -n corigin $vg --cachesettings migration_threshold=233 dmsetup status | grep $vg dmsetup status | grep $vg-corigin | grep 'migration_threshold 233' lvchange -an $vg lvchange -ay $vg dmsetup status | grep $vg-corigin | grep 'migration_threshold 233' lvremove -f $vg lvcreate --type cache-pool -L10 --cachepolicy mq --cachesettings migration_threshold=233 $vg/cpool lvcreate --type cache -l 1 --cachepool $vg/cpool -n corigin $vg dmsetup status | grep $vg dmsetup status | grep $vg-corigin | grep 'migration_threshold 233' lvremove -f $vg ############################## # Test things that should fail ############################## # Creation of read-only cache pool is not supported invalid lvcreate -pr --type cache-pool -l1 -n $vg/cpool # Atempt to use bigger chunk size then cache pool data size fail lvcreate -l 1 --type cache-pool --chunksize 16M $vg 2>out grep "chunk size" out # Option testing # --chunksize # --cachepolicy # --poolmetadatasize # --poolmetadataspare vgremove -ff $vg LVM2.2.02.176/test/shell/activate-missing-segment.sh0000644000000000000120000000200013176752421020645 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2010 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Test activation behaviour with devices missing. # - snapshots and their origins are only activated together; if one fails, both # fail # - partial mirrors are not activated (but maybe they should? maybe we should # instead lvconvert --repair them?) # - linear LVs with bits missing are not activated SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_vg 2 lvcreate -l100%FREE -n span $vg vgchange -a n $vg aux disable_dev "$dev1" not vgchange -a y $vg vgchange -a y --partial $vg check active $vg span vgremove -ff $vg LVM2.2.02.176/test/shell/pvcreate-metadata0.sh0000644000000000000120000000164213176752421017420 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # Testcase for bugzilla #450651 # also checks that vgremove properly removes all lv devices in the right order # # 'Test pvcreate without metadata on all pvs' SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_devs 2 128 #lv_snap=$lv2 pvcreate "$dev1" pvcreate --metadatacopies 0 "$dev2" # "check lv snapshot" vgcreate $vg "$dev1" "$dev2" lvcreate -aey -n $lv -l 60%FREE $vg lvcreate -s -n $lv2 -l 10%FREE $vg/$lv vgremove -f $vg LVM2.2.02.176/test/shell/lvconvert-cache-raid.sh0000644000000000000120000000753613176752421017761 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2014-2015 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Exercise usage of stacked cache volume using raid volume SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux have_cache 1 3 0 || skip aux have_raid 1 0 0 || skip aux lvmconf 'global/cache_disabled_features = [ "policy_smq" ]' aux prepare_vg 5 80 # Bug 1095843 # lvcreate RAID1 origin, lvcreate cache-pool, and lvconvert to cache lvcreate --type raid1 -m 1 --nosync -l 2 -n $lv1 $vg lvcreate --type cache-pool -l 1 -n ${lv1}_cachepool $vg lvconvert --cache -Zy --cachepool $vg/${lv1}_cachepool $vg/$lv1 check lv_exists $vg/${lv1}_corig_rimage_0 # ensure images are properly renamed dmsetup table ${vg}-$lv1 | grep cache # ensure it is loaded in kernel lvremove -f $vg # lvcreate RAID1 origin, lvcreate RAID1 cache-pool, and lvconvert to cache lvcreate --type raid1 -m 1 --nosync -l 2 -n $lv1 $vg lvcreate --type raid1 -m 1 --nosync -l 2 -n ${lv1}_cachepool $vg #should lvs -a $vg/${lv1}_cdata_rimage_0 # ensure images are properly renamed lvconvert --yes --type cache --cachemode writeback --cachepool $vg/${lv1}_cachepool $vg/$lv1 2>&1 | tee out grep "WARNING: Data redundancy is lost" out check lv_exists $vg/${lv1}_corig_rimage_0 # ensure images are properly renamed dmsetup table ${vg}-$lv1 | grep cache # ensure it is loaded in kernel lvremove -f $vg lvcreate -n corigin -m 1 --type raid1 --nosync -l 10 $vg lvcreate -n cpool --type cache $vg/corigin --cachemode writeback -l 10 2>&1 | tee out grep "WARNING: Data redundancy is lost" out not lvconvert --splitmirrors 1 --name split $vg/corigin "$dev1" lvconvert --yes --splitmirrors 1 --name split $vg/corigin "$dev1" lvremove -f $vg lvcreate -n cpool_meta -m 1 --type raid1 -l 10 $vg lvcreate -n cpool -m 1 --type raid1 -l 10 $vg aux wait_for_sync $vg cpool_meta aux wait_for_sync $vg cpool lvs -a -o+seg_pe_ranges $vg lvconvert --yes --type cache-pool --poolmetadata $vg/cpool_meta $vg/cpool lvcreate -n corigin --type cache --cachepool $vg/cpool -l 10 lvchange --syncaction repair $vg/cpool_cmeta aux wait_for_sync $vg cpool_cmeta lvchange --syncaction repair $vg/cpool_cdata aux wait_for_sync $vg cpool_cdata lvconvert -y --repair $vg/cpool_cmeta lvconvert -y --repair $vg/cpool_cdata # do not allow reserved names for *new* LVs not lvconvert --splitmirrors 1 --name split_cmeta $vg/cpool_cmeta "$dev1" not lvconvert --splitmirrors 1 --name split_cdata $vg/cpool_cdata "$dev1" # but allow manipulating existing LVs with reserved names aux wait_for_sync $vg cpool_cmeta aux wait_for_sync $vg cpool_cdata lvconvert --yes --splitmirrors 1 --name split_meta $vg/cpool_cmeta "$dev1" lvconvert --yes --splitmirrors 1 --name split_data $vg/cpool_cdata "$dev1" not lvconvert --splitmirrors 1 --name split_data $vg/cpool_cdata "$dev1" lvremove -f $vg # Test up/down raid conversion of cache pool data and metadata lvcreate --type cache-pool $vg/cpool -l 10 lvcreate -n corigin -H $vg/cpool -l 20 lvconvert -y -m +1 --type raid1 $vg/cpool_cmeta check lv_field $vg/cpool_cmeta layout "raid,raid1" check lv_field $vg/cpool_cmeta role "private,cache,pool,metadata" lvconvert -y -m +1 --type raid1 $vg/cpool_cdata check lv_field $vg/cpool_cdata layout "raid,raid1" check lv_field $vg/cpool_cdata role "private,cache,pool,data" not lvconvert -m -1 $vg/cpool_cmeta lvconvert -y -m -1 $vg/cpool_cmeta check lv_field $vg/cpool_cmeta layout "linear" lvconvert -y -m -1 $vg/cpool_cdata check lv_field $vg/cpool_cdata layout "linear" lvremove -f $vg vgremove -f $vg LVM2.2.02.176/test/shell/pvmove-abort-all.sh0000644000000000000120000000456113176752421017143 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2015 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Check pvmove --abort behaviour for all VGs and PVs SKIP_WITH_LVMLOCKD=1 # Ignore known failure when clvmd is processing sequences of commands for two VGs in parallel - 2015/07/17 agk # CLVMD: ioctl/libdm-iface.c:1940 Internal error: Performing unsafe table load while 3 device(s) are known to be suspended: (253:19) export DM_ABORT_ON_INTERNAL_ERRORS=0 . lib/inittest aux prepare_pvs 6 60 vgcreate -s 128k $vg "$dev1" "$dev2" pvcreate --metadatacopies 0 "$dev3" vgextend $vg "$dev3" vgcreate -s 128k $vg1 "$dev4" "$dev5" pvcreate --metadatacopies 0 "$dev6" vgextend $vg1 "$dev6" # Slowdown writes aux delay_dev "$dev3" 0 800 "$(get first_extent_sector "$dev3"):" aux delay_dev "$dev6" 0 800 "$(get first_extent_sector "$dev6"):" for mode in "--atomic" "" ; do for backgroundarg in "-b" "" ; do # Create multisegment LV lvcreate -an -Zn -l30 -n $lv1 $vg "$dev1" lvcreate -an -Zn -l30 -n $lv2 $vg "$dev2" lvcreate -an -Zn -l30 -n $lv1 $vg1 "$dev4" lvextend -l+30 -n $vg1/$lv1 "$dev5" cmd1=(pvmove -i1 $backgroundarg $mode "$dev1" "$dev3") cmd2=(pvmove -i1 $backgroundarg $mode "$dev2" "$dev3") cmd3=(pvmove -i1 $backgroundarg $mode -n $vg1/$lv1 "$dev4" "$dev6") if test -e HAVE_DM_DELAY; then if test -z "$backgroundarg" ; then "${cmd1[@]}" & aux wait_pvmove_lv_ready "$vg-pvmove0" "${cmd2[@]}" & aux wait_pvmove_lv_ready "$vg-pvmove1" "${cmd3[@]}" & aux wait_pvmove_lv_ready "$vg1-pvmove0" lvs -a $vg $vg1 else LVM_TEST_TAG="kill_me_$PREFIX" "${cmd1[@]}" LVM_TEST_TAG="kill_me_$PREFIX" "${cmd2[@]}" LVM_TEST_TAG="kill_me_$PREFIX" "${cmd3[@]}" fi fi # test removal of all pvmove LVs pvmove --abort # check if proper pvmove was canceled get lv_field $vg name -a | tee out not grep "^\[pvmove" out get lv_field $vg1 name -a | tee out not grep "^\[pvmove" out lvremove -ff $vg $vg1 wait aux kill_tagged_processes done done # Restore delayed device back aux enable_dev "$dev3" "$dev6" vgremove -ff $vg $vg1 LVM2.2.02.176/test/shell/dmsetup-keyring.sh0000644000000000000120000000551113176752421017077 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2017 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # unrelated to lvm2 daemons SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 SKIP_WITH_CLVMD=1 SKIP_WITH_LVMETAD=1 . lib/inittest CIPHER=aes-xts-plain64 HEXKEY_32=0102030405060708090a0102030405060102030405060708090a010203040506 HIDENKEY_32=0000000000000000000000000000000000000000000000000000000000000000 KEY_NAME="$PREFIX:keydesc" function _teardown() { keyctl unlink "%:$PREFIX-keyring" aux teardown_devs_prefixed "$PREFIX" } aux target_at_least dm-zero 1 0 0 || skip "missing dm-zero target" aux target_at_least dm-crypt 1 15 0 || skip "dm-crypt doesn't support keys in kernel keyring service" which keyctl || skip "test requires keyctl utility" keyctl new_session || true # fails with 'su', works with 'su -' keyctl newring "$PREFIX-keyring" @s keyctl timeout "%:$PREFIX-keyring" 60 trap '_teardown' EXIT keyctl add logon "$KEY_NAME" "${HEXKEY_32:0:32}" "%:$PREFIX-keyring" dmsetup create "$PREFIX-zero" --table "0 1 zero" # put key in kernel keyring for active table dmsetup create "$PREFIX-crypt" --table "0 1 crypt $CIPHER :32:logon:$KEY_NAME 0 $DM_DEV_DIR/mapper/$PREFIX-zero 0" # put hexbyte key in dm-crypt directly in inactive table dmsetup load "$PREFIX-crypt" --table "0 1 crypt $CIPHER $HEXKEY_32 0 $DM_DEV_DIR/mapper/$PREFIX-zero 0" # test dmsetup doesn't hide key descriptions... str=$(dmsetup table "$PREFIX-crypt" | cut -d ' ' -f 5) test "$str" = ":32:logon:$KEY_NAME" str=$(dmsetup table --showkeys "$PREFIX-crypt" | cut -d ' ' -f 5) test "$str" = ":32:logon:$KEY_NAME" # ...but it hides hexbyte representation of keys... str=$(dmsetup table --inactive "$PREFIX-crypt" | cut -d ' ' -f 5) test "$str" = "$HIDENKEY_32" #...unless --showkeys explictly requested str=$(dmsetup table --showkeys --inactive "$PREFIX-crypt" | cut -d ' ' -f 5) test "$str" = "$HEXKEY_32" # let's swap the tables dmsetup resume "$PREFIX-crypt" dmsetup load "$PREFIX-crypt" --table "0 1 crypt $CIPHER :32:logon:$KEY_NAME 0 $DM_DEV_DIR/mapper/$PREFIX-zero 0" str=$(dmsetup table --inactive "$PREFIX-crypt" | cut -d ' ' -f 5) test "$str" = ":32:logon:$KEY_NAME" str=$(dmsetup table --showkeys --inactive "$PREFIX-crypt" | cut -d ' ' -f 5) test "$str" = ":32:logon:$KEY_NAME" str=$(dmsetup table "$PREFIX-crypt" | cut -d ' ' -f 5) test "$str" = "$HIDENKEY_32" str=$(dmsetup table --showkeys "$PREFIX-crypt" | cut -d ' ' -f 5) test "$str" = "$HEXKEY_32" dmsetup remove "$PREFIX-crypt" dmsetup remove "$PREFIX-zero" LVM2.2.02.176/test/shell/lvchange-partial.sh0000644000000000000120000000301113176752421017162 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2012 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_vg 4 TYPE=raid1 aux have_raid 1 3 0 || TYPE=mirror lvcreate -aey --type $TYPE -m 1 -l 2 -n $lv1 $vg lvchange -an $vg/$lv1 aux disable_dev "$dev1" # # Test for allowable metadata changes # # contiguous_ARG lvchange -C y $vg/$lv1 lvchange -C n $vg/$lv1 # permission_ARG lvchange -p r $vg/$lv1 lvchange -p rw $vg/$lv1 # readahead_ARG lvchange -r none $vg/$lv1 lvchange -r auto $vg/$lv1 # persistent_ARG lvchange -M y --minor 56 --major 253 $vg/$lv1 lvchange -M n $vg/$lv1 # addtag_ARG # deltag_ARG lvchange --addtag foo $vg/$lv1 lvchange --deltag foo $vg/$lv1 # # Test for disallowed metadata changes # # resync_ARG not lvchange --resync $vg/$lv1 # alloc_ARG not lvchange --alloc anywhere $vg/$lv1 # discards_ARG not lvchange --discards ignore $vg/$lv1 # zero_ARG not lvchange --zero y $vg/$lv1 # # Ensure that allowed args don't cause disallowed args to get through # not lvchange --resync -ay $vg/$lv1 not lvchange --resync --addtag foo $vg/$lv1 aux enable_dev "$dev1" vgremove -ff $vg LVM2.2.02.176/test/shell/orphan-ondisk.sh0000644000000000000120000000113513176752421016522 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2010 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_vg 2 vgreduce $vg "$dev1" 2>&1 | not grep -i 'parse error' LVM2.2.02.176/test/shell/lvmetad-pvscan-nomda.sh0000644000000000000120000000302113176752421017762 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2012 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITHOUT_LVMETAD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest kill "$(< LOCAL_LVMETAD)" rm LOCAL_LVMETAD aux prepare_devs 2 pvcreate --metadatacopies 0 "$dev1" pvcreate --metadatacopies 1 "$dev2" vgcreate $vg1 "$dev1" "$dev2" lvcreate -n foo -l 1 -an --zero n $vg1 # start lvmetad but make sure it doesn't know about $dev1 or $dev2 aux disable_dev "$dev1" "$dev2" aux prepare_lvmetad lvs mv LOCAL_LVMETAD XXX aux enable_dev "$dev2" "$dev1" mv XXX LOCAL_LVMETAD aux lvmconf 'global/use_lvmetad = 0' check inactive $vg1 foo aux lvmconf 'global/use_lvmetad = 1' # Tell lvmetad about dev2, but the VG is not complete with # only dev2, so the -aay should not yet activate the LV. pvscan --cache -aay "$dev2" aux lvmconf 'global/use_lvmetad = 0' check inactive $vg1 foo aux lvmconf 'global/use_lvmetad = 1' # Tell lvmetad about dev1, now the VG is complete with # both devs, so the -aay should activate the LV. pvscan --cache -aay "$dev1" aux lvmconf 'global/use_lvmetad = 0' check active $vg1 foo aux lvmconf 'global/use_lvmetad = 1' vgremove -ff $vg1 LVM2.2.02.176/test/shell/lvextend-snapshot-policy.sh0000644000000000000120000000223313176752421020731 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2010 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest which mkfs.ext2 || skip extend() { lvextend --use-policies --config "activation { snapshot_autoextend_threshold = $1 }" $vg/snap } write() { mount "$DM_DEV_DIR/$vg/snap" mnt dd if=/dev/zero of="mnt/file$1" bs=1k count=$2 umount mnt } percent() { get lv_field $vg/snap snap_percent | cut -d. -f1 } # no dmeventd running in this test, testing --use-policies aux prepare_vg 2 lvcreate -aey -L24 -n base $vg mkfs.ext2 "$DM_DEV_DIR/$vg/base" lvcreate -s -L16 -n snap $vg/base mkdir mnt write 1 4096 pre=$(percent) extend 50 test "$pre" -eq "$(percent)" write 2 4096 pre=$(percent) extend 50 test "$pre" -gt "$(percent)" vgremove -f $vg LVM2.2.02.176/test/shell/process-each-pv-nomda.sh0000644000000000000120000000141013176752421020035 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA test_description='Test process_each_pv with zero mda' SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_devs 2 pvcreate "$dev1" --metadatacopies 0 pvcreate "$dev2" vgcreate $SHARED $vg1 "$dev1" "$dev2" pvdisplay -a -C | tee err grep "$dev1" err grep "$dev2" err vgremove $vg1 LVM2.2.02.176/test/shell/vgchange-usage.sh0000644000000000000120000000772113176752421016641 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA test_description='Exercise some vgchange diagnostics' SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_pvs 4 pvcreate --metadatacopies 0 "$dev1" vgcreate -s 4M $vg "$dev1" "$dev2" "$dev3" # cannot change anything in exported vg vgexport $vg fail vgchange -ay $vg fail vgchange -p 8 $vg fail vgchange -x n $vg fail vgchange --addtag tag $vg fail vgchange --deltag tag $vg fail vgchange -s 4k $vg fail vgchange --uuid $vg fail vgchange --alloc anywhere $vg fail vgchange -c y $vg vgimport $vg # unsupported combinations of options... invalid vgchange --ignorelockingfailure --uuid $vg invalid vgchange --sysinit --alloc normal $vg invalid vgchange --sysinit --poll y $vg invalid vgchange -an --poll y $vg invalid vgchange -an --monitor y $vg invalid vgchange -ay --refresh $vg vgdisplay $vg # vgchange -p MaxPhysicalVolumes (bz202232) check vg_field $vg max_pv 0 vgchange -p 128 $vg check vg_field $vg max_pv 128 pv_count=$(get vg_field $vg pv_count) not vgchange -p 2 $vg 2>err grep "MaxPhysicalVolumes is less than the current number $pv_count of PVs for" err check vg_field $vg max_pv 128 # try some numbers around MAX limit (uint32) vgchange -p 4294967295 $vg invalid vgchange -p 4294967296 $vg invalid vgchange -p 18446744073709551615 $vg invalid vgchange -p 18446744073709551616 $vg check vg_field $vg max_pv 4294967295 # vgchange -l MaxLogicalVolumes check vg_field $vg max_lv 0 invalid vgchange -l -128 $vg vgchange -l 4294967295 $vg invalid vgchange -l 4294967296 $vg invalid vgchange -l 18446744073709551615 $vg invalid vgchange -l 18446744073709551616 $vg check vg_field $vg max_lv 4294967295 vgchange -l 128 $vg check vg_field $vg max_lv 128 # vgchange -s lvcreate -l4 -n $lv1 $vg lvcreate -l4 -n $lv2 $vg SIZELV2=$(get lv_field $vg/$lv2 size) check lv_field $vg/$lv2 seg_size_pe "4" vgchange -s 4K $vg check vg_field $vg vg_extent_size "4.00k" check lv_field $vg/$lv2 size "$SIZELV2" check lv_field $vg/$lv2 seg_size_pe "4096" lv_count=$(get vg_field $vg lv_count) not vgchange -l 1 $vg 2>err grep "MaxLogicalVolume is less than the current number $lv_count of LVs for" err check vg_field $vg max_lv 128 # check non-resizebility fail vgchange -x y $vg check vg_attr_bit resizeable $vg "z" vgchange -x n $vg check vg_attr_bit resizeable $vg "-" fail vgchange -x n $vg fail vgextend $vg "$dev4" vgremove -ff $vg # set cluster bit vgcreate -cn $vg "$dev1" "$dev2" "$dev3" # check prompt to change cluster bit without giving explicit vg name fail vgchange -cy 2>&1 | tee out grep "y/n" out check vg_attr_bit cluster $vg "-" lvcreate -l1 -n $lv1 $vg # check on cluster # either skipped as clustered (non-cluster), or already clustered (on cluster) if test -e LOCAL_CLVMD ; then # can switch with active LV vgchange -cy $vg fail vgchange -cy $vg # check volume is active locally exclusively check lv_field $vg/$lv1 lv_active "local exclusive" check vg_attr_bit cluster $vg "c" # check we do not support conversion of just locally active LVs lvchange -an $vg lvchange -ay $vg not vgchange -cn $vg lvchange -an $vg lvchange -aey $vg vgchange -cn $vg else # no clvmd is running fail vgchange -cy $vg # can't switch with active LV vgchange --yes -cy $vg fail vgchange --yes -cy $vg fail vgs $vg 2>&1 | tee out grep "Skipping clustered volume group" out vgs --ignoreskippedcluster $vg 2>&1 | tee out not grep "Skipping clustered volume group" out # reset back to non-clustered VG with disabled locking vgchange -cn $vg --config 'global{locking_type=0}' $vg fi check vg_attr_bit cluster $vg "-" vgremove -ff $vg LVM2.2.02.176/test/shell/lvconvert-thin.sh0000644000000000000120000001157213176752421016736 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2012 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 export LVM_TEST_THIN_REPAIR_CMD=${LVM_TEST_THIN_REPAIR_CMD-/bin/false} . lib/inittest prepare_lvs() { lvremove -f $vg lvcreate -L10M -n $lv1 $vg lvcreate -L8M -n $lv2 $vg } # # Main # aux have_thin 1 0 0 || skip aux prepare_pvs 4 64 get_devs # build one large PV vgcreate $vg1 "$dev1" "$dev2" "$dev3" # 32bit linux kernels are fragille with device size >= 16T # maybe uname -m [ x86_64 | i686 ] TSIZE=64T aux can_use_16T || TSIZE=15T lvcreate --type snapshot -l 100%FREE -n $lv $vg1 --virtualsize $TSIZE aux extend_filter_LVMTEST pvcreate "$DM_DEV_DIR/$vg1/$lv" vgcreate $vg -s 64K "$dev4" "$DM_DEV_DIR/$vg1/$lv" lvcreate -L1T -n $lv1 $vg invalid lvconvert --yes -c 8M --type thin --poolmetadatasize 1G $vg/$lv1 # needs some --cachepool or --thinpool invalid lvconvert --yes --poolmetadatasize 1G $vg/$lv1 lvremove -f $vg # create mirrored LVs for data and metadata volumes lvcreate -aey -L10M --type mirror -m1 --mirrorlog core -n $lv1 $vg lvcreate -aey -L10M -n $lv2 $vg lvchange -an $vg/$lv1 # conversion fails for mirror segment type fail lvconvert --thinpool $vg/$lv1 # FIXME: temporarily we return error code 5 INVALID=not # cannot use same LV $INVALID lvconvert --yes --thinpool $vg/$lv2 --poolmetadata $vg/$lv2 prepare_lvs # conversion fails for internal volumes # can't use --readahead with --poolmetadata invalid lvconvert --thinpool $vg/$lv1 --poolmetadata $vg/$lv2 --readahead 512 lvconvert --yes --thinpool $vg/$lv1 --poolmetadata $vg/$lv2 prepare_lvs lvconvert --yes -c 64 --stripes 2 --thinpool $vg/$lv1 --readahead 48 lvremove -f $vg # Swaping of metadata volume lvcreate -L1T -n $lv1 $vg lvcreate -L32 -n $lv2 $vg lvconvert --yes -c 8M --type thin-pool $vg/$lv1 2>&1 | tee err # Check there is a warning for large chunk size and zeroing enabled grep "WARNING: Pool zeroing and" err UUID=$(get lv_field $vg/$lv2 uuid) # Fail is pool is active # TODO maybe detect inactive pool and deactivate fail lvconvert --yes --thinpool $vg/$lv1 --poolmetadata $lv2 lvchange -an $vg lvconvert --yes --thinpool $vg/$lv1 --poolmetadata $lv2 check lv_field $vg/${lv1}_tmeta uuid "$UUID" # and swap again with new command --swapmetadata lvconvert --yes --swapmetadata $vg/$lv1 --poolmetadata $lv2 check lv_field $vg/$lv2 uuid "$UUID" lvremove -f $vg # test with bigger sizes lvcreate -L1T -n $lv1 $vg lvcreate -L8M -n $lv2 $vg lvcreate -L1M -n $lv3 $vg # chunk size is bigger then size of thin pool data fail lvconvert --yes -c 1G --thinpool $vg/$lv3 # stripes can't be used with poolmetadata invalid lvconvert --stripes 2 --thinpool $vg/$lv1 --poolmetadata $vg/$lv2 # too small metadata (<2M) fail lvconvert --yes -c 64 --thinpool $vg/$lv1 --poolmetadata $vg/$lv3 # too small chunk size fails $INVALID lvconvert -c 4 --thinpool $vg/$lv1 --poolmetadata $vg/$lv2 # too big chunk size fails $INVALID lvconvert -c 2G --thinpool $vg/$lv1 --poolmetadata $vg/$lv2 # negative chunk size fails $INVALID lvconvert -c -256 --thinpool $vg/$lv1 --poolmetadata $vg/$lv2 # non multiple of 64KiB fails $INVALID lvconvert -c 88 --thinpool $vg/$lv1 --poolmetadata $vg/$lv2 # cannot use same LV for pool and convertion $INVALID lvconvert --yes --thinpool $vg/$lv3 -T $vg/$lv3 # Warning about smaller then suggested lvconvert --yes -c 256 --thinpool $vg/$lv1 --poolmetadata $vg/$lv2 2>&1 | tee err grep "WARNING: Chunk size is smaller" err lvremove -f $vg lvcreate -L1T -n $lv1 $vg lvcreate -L32G -n $lv2 $vg # Warning about bigger then needed lvconvert --yes --thinpool $vg/$lv1 --poolmetadata $vg/$lv2 2>&1 | tee err grep "WARNING: Maximum" err lvremove -f $vg if test "$TSIZE" = 64T; then lvcreate -L24T -n $lv1 $vg # Warning about bigger then needed (24T data and 16G -> 128K chunk) fail lvconvert --yes -c 64 --thinpool $vg/$lv1 2>&1 | tee err grep "WARNING: Chunk size is too small" err lvremove -f $vg fi #lvs -a -o+chunk_size,stripe_size,seg_pe_ranges #################################### # Prohibites thin pool conversions # #################################### lvcreate -L32 -n $lv1 $vg lvcreate -L16 -n $lv2 $vg lvconvert --yes --thinpool $vg/$lv1 not aux have_cache 1 3 0 || fail lvconvert --yes --type cache-pool $vg/$lv1 fail lvconvert --yes --type mirror -m1 $vg/$lv1 not aux have_raid 1 0 0 || fail lvconvert --yes --type raid1 -m1 $vg/$lv1 fail lvconvert --yes --type snapshot $vg/$lv1 $vg/$lv2 fail lvconvert --yes --type snapshot $vg/$lv2 $vg/$lv1 fail lvconvert --yes --type thin-pool $vg/$lv1 vgremove -ff $vg vgremove -ff $vg1 LVM2.2.02.176/test/shell/pvmove-resume-1.sh0000644000000000000120000001655613176752421016733 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2015 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Check whether all available pvmove resume methods works as expected. # lvchange is able to resume pvmoves in progress. # 2 pvmove LVs in 2 VGs (1 per VG) SKIP_WITH_LVMLOCKD=1 SKIP_WITH_CLVMD=1 . lib/inittest aux prepare_pvs 4 30 vgcreate -s 128k $vg "$dev1" vgcreate -s 128k $vg1 "$dev2" pvcreate --metadatacopies 0 "$dev3" pvcreate --metadatacopies 0 "$dev4" vgextend $vg "$dev3" vgextend $vg1 "$dev4" # $1 resume fn test_pvmove_resume() { lvcreate -an -Zn -l30 -n $lv1 $vg lvcreate -an -Zn -l30 -n $lv1 $vg1 aux delay_dev "$dev3" 0 1000 "$(get first_extent_sector "$dev3"):" test -e HAVE_DM_DELAY || { lvremove -f $vg $vg1; return 0; } aux delay_dev "$dev4" 0 1000 "$(get first_extent_sector "$dev4"):" pvmove -i5 "$dev1" & PVMOVE=$! aux wait_pvmove_lv_ready "$vg-pvmove0" 300 kill -9 $PVMOVE pvmove -i5 "$dev2" & PVMOVE=$! aux wait_pvmove_lv_ready "$vg1-pvmove0" 300 kill -9 $PVMOVE if test -e LOCAL_LVMPOLLD ; then aux prepare_lvmpolld fi wait local finished for i in {1..100}; do finished=1 for d in "$vg-$lv1" "$vg1-$lv1" "$vg-pvmove0" "$vg1-pvmove0" ; do dmsetup status "$d" 2>/dev/null && { dmsetup remove "$d" || finished=0 } done test "$finished" -eq 0 || break done test "$finished" -eq 0 && die "Can't remove device" check lv_attr_bit type $vg/pvmove0 "p" check lv_attr_bit type $vg1/pvmove0 "p" if test -e LOCAL_CLVMD ; then # giveup all clvmd locks (faster then restarting clvmd) # no deactivation happen, nodes are already removed #vgchange -an $vg # FIXME: However above solution has one big problem # as clvmd starts to abort on internal errors on various # errors, based on the fact pvmove is killed -9 # Restart clvmd kill "$(< LOCAL_CLVMD)" for i in $(seq 1 100) ; do test $i -eq 100 && die "Shutdown of clvmd is too slow." test -e "$CLVMD_PIDFILE" || break sleep .1 done # wait for the pid removal aux prepare_clvmd fi aux notify_lvmetad "$dev1" "$dev2" "$dev3" "$dev4" # call resume function (see below) # with expected number of spawned # bg polling as parameter $1 2 aux enable_dev "$dev3" aux enable_dev "$dev4" i=0 while get lv_field $vg name -a | grep -E "^\[?pvmove"; do # wait for 30 secs at max test $i -ge 300 && die "Pvmove is too slow or does not progress." sleep .1 i=$((i + 1)) done while get lv_field $vg1 name -a | grep -E "^\[?pvmove"; do # wait for 30 secs at max test $i -ge 300 && die "Pvmove is too slow or does not progress." sleep .1 i=$((i + 1)) done aux kill_tagged_processes lvremove -ff $vg $vg1 } lvchange_single() { LVM_TEST_TAG="kill_me_$PREFIX" lvchange -aey $vg/$lv1 LVM_TEST_TAG="kill_me_$PREFIX" lvchange -aey $vg1/$lv1 if test -e LOCAL_LVMPOLLD; then aux lvmpolld_dump | tee lvmpolld_dump.txt aux check_lvmpolld_init_rq_count 1 "$vg/pvmove0" aux check_lvmpolld_init_rq_count 1 "$vg1/pvmove0" else test "$(aux count_processes_with_tag)" -eq $1 fi } lvchange_all() { LVM_TEST_TAG="kill_me_$PREFIX" lvchange -aey $vg/$lv1 $vg1/$lv1 if test -e LOCAL_LVMPOLLD; then aux lvmpolld_dump | tee lvmpolld_dump.txt aux check_lvmpolld_init_rq_count 1 "$vg/pvmove0" aux check_lvmpolld_init_rq_count 1 "$vg1/pvmove0" else test "$(aux count_processes_with_tag)" -eq $1 fi } vgchange_single() { LVM_TEST_TAG="kill_me_$PREFIX" vgchange -aey $vg LVM_TEST_TAG="kill_me_$PREFIX" vgchange -aey $vg1 if test -e LOCAL_LVMPOLLD; then aux lvmpolld_dump | tee lvmpolld_dump.txt aux check_lvmpolld_init_rq_count 1 "$vg/pvmove0" aux check_lvmpolld_init_rq_count 1 "$vg1/pvmove0" else test "$(aux count_processes_with_tag)" -eq "$1" fi } vgchange_all() { LVM_TEST_TAG="kill_me_$PREFIX" vgchange -aey $vg $vg1 if test -e LOCAL_LVMPOLLD; then aux lvmpolld_dump | tee lvmpolld_dump.txt aux check_lvmpolld_init_rq_count 1 "$vg/pvmove0" aux check_lvmpolld_init_rq_count 1 "$vg1/pvmove0" else test "$(aux count_processes_with_tag)" -eq "$1" fi } pvmove_fg() { # pvmove resume requires LVs active... LVM_TEST_TAG="kill_me_$PREFIX" vgchange --config 'activation{polling_interval=10}' -aey --poll n $vg $vg1 # ...also vgchange --poll n must not spawn any bg processes... if test -e LOCAL_LVMPOLLD; then aux lvmpolld_dump | tee lvmpolld_dump.txt aux check_lvmpolld_init_rq_count 0 "$vg/pvmove0" aux check_lvmpolld_init_rq_count 0 "$vg1/pvmove0" else test "$(aux count_processes_with_tag)" -eq 0 fi # ...thus finish polling get lv_field $vg name -a | grep -E "^\[?pvmove0" get lv_field $vg1 name -a | grep -E "^\[?pvmove0" # disable delay device # fg pvmove would take ages to complete otherwise aux enable_dev "$dev3" aux enable_dev "$dev4" pvmove } pvmove_bg() { # pvmove resume requires LVs active... LVM_TEST_TAG="kill_me_$PREFIX" vgchange --config 'activation{polling_interval=10}' -aey --poll n $vg $vg1 # ...also vgchange --poll n must not spawn any bg processes... if test -e LOCAL_LVMPOLLD; then aux lvmpolld_dump | tee lvmpolld_dump.txt aux check_lvmpolld_init_rq_count 0 "$vg/pvmove0" aux check_lvmpolld_init_rq_count 0 "$vg1/pvmove0" else test "$(aux count_processes_with_tag)" -eq 0 fi # ...thus finish polling get lv_field $vg name -a | grep -E "^\[?pvmove0" get lv_field $vg1 name -a | grep -E "^\[?pvmove0" LVM_TEST_TAG="kill_me_$PREFIX" pvmove -b -i0 } pvmove_fg_single() { # pvmove resume requires LVs active... LVM_TEST_TAG="kill_me_$PREFIX" vgchange --config 'activation{polling_interval=10}' -aey --poll n $vg # ...also vgchange --poll n must not spawn any bg processes... if test -e LOCAL_LVMPOLLD; then aux lvmpolld_dump | tee lvmpolld_dump.txt aux check_lvmpolld_init_rq_count 0 "$vg/pvmove0" aux check_lvmpolld_init_rq_count 0 "$vg1/pvmove0" else test "$(aux count_processes_with_tag)" -eq 0 fi # ...thus finish polling get lv_field $vg name -a | grep -E "^\[?pvmove0" get lv_field $vg1 name -a | grep -E "^\[?pvmove0" # disable delay device # fg pvmove would take ages to complete otherwise aux enable_dev "$dev3" aux enable_dev "$dev4" pvmove "$dev1" pvmove "$dev2" } pvmove_bg_single() { # pvmove resume requires LVs active... LVM_TEST_TAG="kill_me_$PREFIX" vgchange --config 'activation{polling_interval=10}' -aey --poll n $vg # ...also vgchange --poll n must not spawn any bg processes... if test -e LOCAL_LVMPOLLD; then aux lvmpolld_dump | tee lvmpolld_dump.txt aux check_lvmpolld_init_rq_count 0 "$vg/pvmove0" aux check_lvmpolld_init_rq_count 0 "$vg1/pvmove0" else test "$(aux count_processes_with_tag)" -eq 0 fi # ...thus finish polling get lv_field $vg name -a | grep -E "^\[?pvmove0" get lv_field $vg1 name -a | grep -E "^\[?pvmove0" LVM_TEST_TAG="kill_me_$PREFIX" pvmove -b "$dev1" LVM_TEST_TAG="kill_me_$PREFIX" pvmove -b "$dev2" } test_pvmove_resume lvchange_single test_pvmove_resume lvchange_all test_pvmove_resume vgchange_single test_pvmove_resume vgchange_all test_pvmove_resume pvmove_fg test_pvmove_resume pvmove_fg_single test_pvmove_resume pvmove_bg test_pvmove_resume pvmove_bg_single vgremove -ff $vg $vg1 LVM2.2.02.176/test/shell/metadata-balance.sh0000644000000000000120000002303713176752421017116 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_devs 6 echo Make sure we can ignore / un-ignore mdas on a per-PV basis for pv_in_vg in 1 0; do for mdacp in 1 2; do pvcreate --metadatacopies $mdacp "$dev1" "$dev2" pvcreate --metadatacopies 0 "$dev3" if [ $pv_in_vg = 1 ]; then vgcreate $vg "$dev1" "$dev2" "$dev3" fi pvchange --metadataignore y "$dev1" check pv_field "$dev1" pv_mda_count $mdacp check pv_field "$dev1" pv_mda_used_count 0 check pv_field "$dev2" pv_mda_count $mdacp check pv_field "$dev2" pv_mda_used_count $mdacp if [ $pv_in_vg = 1 ]; then check vg_field $vg vg_mda_count $(( mdacp * 2 )) check vg_field $vg vg_mda_used_count $mdacp check vg_field $vg vg_mda_copies unmanaged fi pvchange --metadataignore n "$dev1" check pv_field "$dev1" pv_mda_count $mdacp check pv_field "$dev1" pv_mda_used_count $mdacp if [ $pv_in_vg = 1 ]; then check vg_field $vg vg_mda_count $(( mdacp * 2 )) check vg_field $vg vg_mda_used_count $(( mdacp * 2 )) check vg_field $vg vg_mda_copies unmanaged vgremove -f $vg fi done done # Check if a PV has unignored (used) mdas, and if so, ignore pvignore_ () { pv_mda_used_count=$(get pv_field "$1" pv_mda_used_count) if [ $pv_mda_used_count -ne 0 ]; then pvchange --metadataignore y $1 fi } # Check if a PV has ignored mdas, and if so, unignore (make used) pvunignore_ () { pv_mda_count=$(get pv_field "$1" pv_mda_count) pv_mda_used_count=$(get pv_field "$1" pv_mda_used_count) if [ $pv_mda_count -gt $pv_mda_used_count ]; then pvchange --metadataignore n $1 fi } echo Test of vgmetadatacopies with vgcreate and vgchange for mdacp in 1 2; do pvcreate --metadatacopies $mdacp "$dev1" "$dev2" "$dev4" "$dev5" check pv_field "$dev1" pv_mda_used_count $mdacp check pv_field "$dev2" pv_mda_used_count $mdacp check pv_field "$dev4" pv_mda_used_count $mdacp check pv_field "$dev5" pv_mda_used_count $mdacp pvcreate --metadatacopies 0 "$dev3" vgcreate $vg "$dev1" "$dev2" "$dev3" check vg_field $vg vg_mda_copies unmanaged echo ensure both --vgmetadatacopies and --metadatacopies accepted vgchange --metadatacopies $(( mdacp * 1 )) $vg echo --vgmetadatacopies is persistent on disk echo --vgmetadatacopies affects underlying pv mda ignore check vg_field $vg vg_mda_copies $(( mdacp * 1 )) check vg_field $vg vg_mda_used_count $(( mdacp * 1 )) vgchange --vgmetadatacopies $(( mdacp * 2 )) $vg check vg_field $vg vg_mda_copies $(( mdacp * 2 )) check vg_field $vg vg_mda_used_count $(( mdacp * 2 )) echo allow setting metadatacopies larger than number of PVs vgchange --vgmetadatacopies $(( mdacp * 5 )) $vg check vg_field $vg vg_mda_copies $(( mdacp * 5 )) check vg_field $vg vg_mda_used_count $(( mdacp * 2 )) echo setting to 0 disables automatic balancing vgchange --vgmetadatacopies unmanaged $vg check vg_field $vg vg_mda_copies unmanaged vgremove -f $vg echo vgcreate succeeds even when creating a VG w/all ignored mdas pvchange --metadataignore y "$dev1" "$dev2" check pv_field "$dev1" pv_mda_count $mdacp check pv_field "$dev2" pv_mda_used_count 0 vgcreate $vg "$dev1" "$dev2" check vg_field $vg vg_mda_copies unmanaged vgremove -f $vg echo vgcreate succeeds with a specific number of metadata copies vgcreate --vgmetadatacopies $(( mdacp * 2 )) $vg "$dev1" "$dev2" check vg_field $vg vg_mda_copies $(( mdacp * 2 )) vgremove -f $vg vgcreate --vgmetadatacopies $(( mdacp * 1 )) $vg "$dev1" "$dev2" check vg_field $vg vg_mda_copies $(( mdacp * 1 )) vgremove -f $vg echo vgcreate succeeds with a larger value than total metadatacopies vgcreate --vgmetadatacopies $(( mdacp * 5 )) $vg "$dev1" "$dev2" check vg_field $vg vg_mda_copies $(( mdacp * 5 )) vgremove -f $vg echo vgcreate succeeds with --vgmetadatacopies unmanaged vgcreate --vgmetadatacopies unmanaged $vg "$dev1" "$dev2" check vg_field $vg vg_mda_copies unmanaged vgremove -f $vg pvunignore_ "$dev1" pvunignore_ "$dev2" pvunignore_ "$dev4" pvunignore_ "$dev5" echo vgcreate succeds with small value of --metadatacopies, ignores mdas vgcreate --vgmetadatacopies 1 $vg "$dev1" "$dev2" "$dev4" "$dev5" check vg_field $vg vg_mda_copies 1 check vg_field $vg vg_mda_count $(( mdacp * 4 )) check vg_field $vg vg_mda_used_count 1 echo Setting a larger value should trigger non-ignore of mdas vgchange --metadatacopies 3 $vg check vg_field $vg vg_mda_copies 3 check vg_field $vg vg_mda_used_count 3 echo Setting all should trigger unignore of all mdas vgchange --vgmetadatacopies all $vg check vg_field $vg vg_mda_count $(( mdacp * 4 )) check vg_field $vg vg_mda_copies unmanaged check vg_field $vg vg_mda_used_count $(( mdacp * 4 )) echo --vgmetadatacopies 0 should be unmanaged for vgchange and vgcreate vgchange --vgmetadatacopies 0 $vg check vg_field $vg vg_mda_copies unmanaged vgremove -f $vg vgcreate --vgmetadatacopies 0 $vg "$dev1" "$dev2" "$dev4" "$dev5" check vg_field $vg vg_mda_copies unmanaged vgremove -f $vg done echo Test vgextend / vgreduce with vgmetadatacopies for mdacp in 1 2; do pvcreate --metadatacopies $mdacp "$dev1" "$dev2" "$dev4" "$dev5" pvcreate --metadatacopies 0 "$dev3" echo Set a large value of vgmetadatacopies vgcreate --vgmetadatacopies $(( mdacp * 5 )) $vg "$dev1" "$dev2" "$dev3" check vg_field $vg vg_mda_copies $(( mdacp * 5 )) echo Ignore mdas on devices to be used for vgextend echo Large value of vgetadatacopies should automatically un-ignore mdas pvchange --metadataignore y "$dev4" "$dev5" check pv_field "$dev4" pv_mda_used_count 0 vgextend $vg "$dev4" "$dev5" check pv_field "$dev4" pv_mda_used_count $mdacp check pv_field "$dev5" pv_mda_used_count $mdacp vgremove -f $vg echo Set a small value of vgmetadatacopies vgcreate --vgmetadatacopies $(( mdacp * 1 )) $vg "$dev1" "$dev2" "$dev3" check vg_field $vg vg_mda_copies $(( mdacp * 1 )) echo Ignore mdas on devices to be used for vgextend echo Small value of vgetadatacopies should leave mdas as ignored pvchange --metadataignore y "$dev4" "$dev5" check pv_field "$dev4" pv_mda_used_count 0 vgextend $vg "$dev4" "$dev5" check pv_field "$dev4" pv_mda_used_count 0 check pv_field "$dev5" pv_mda_used_count 0 echo vgreduce of ignored pv w/mda should not trigger any change to ignore bits vgreduce $vg "$dev4" check pv_field "$dev4" pv_mda_used_count 0 check pv_field "$dev5" pv_mda_used_count 0 echo vgreduce of un-ignored pv w/mda should trigger un-ignore on an mda vgreduce $vg "$dev1" "$dev2" "$dev3" check pv_field "$dev5" pv_mda_used_count $mdacp check vg_field $vg vg_mda_copies $(( mdacp * 1 )) pvunignore_ "$dev1" pvunignore_ "$dev2" echo setting vgmetadatacopies to unmanaged should allow vgextend to add w/out balancing vgchange --vgmetadatacopies unmanaged $vg vgextend $vg "$dev1" "$dev2" check vg_field $vg vg_mda_copies unmanaged check vg_field $vg vg_mda_count $(( mdacp * 3 )) check vg_field $vg vg_mda_used_count $(( mdacp * 3 )) check pv_field "$dev1" pv_mda_used_count $mdacp check pv_field "$dev2" pv_mda_used_count $mdacp vgremove -f $vg done echo Test special situations, vgsplit, vgmerge, etc for mdacp in 1 2; do pvcreate --metadatacopies $mdacp "$dev1" "$dev2" "$dev3" "$dev4" "$dev5" vgcreate --vgmetadatacopies 2 $vg1 "$dev1" "$dev2" "$dev3" vgcreate --vgmetadatacopies $(( mdacp * 1 )) $vg2 "$dev4" "$dev5" echo vgsplit/vgmerge preserves value of metadata copies check vg_field $vg1 vg_mda_copies 2 check vg_field $vg2 vg_mda_copies $(( mdacp * 1 )) vgsplit $vg1 $vg2 "$dev1" check vg_field $vg2 vg_mda_copies $(( mdacp * 1 )) vgmerge $vg1 $vg2 check vg_field $vg1 vg_mda_copies 2 check vg_field $vg1 vg_mda_count $(( mdacp * 5 )) echo vgsplit into new vg sets proper value of vgmetadatacopies vgsplit --vgmetadatacopies $(( mdacp * 2 )) $vg1 $vg2 "$dev1" "$dev2" check vg_field $vg2 vg_mda_copies $(( mdacp * 2 )) echo vgchange fails if given both vgmetadatacopies and metadatacopies not vgchange --vgmetadatacopies 5 --metadatacopies 7 $vg2 vgremove -f $vg1 $vg2 done echo Test combination of --vgmetadatacopies and pvchange --metadataignore for mdacp in 1 2; do pvcreate --metadatacopies $mdacp "$dev1" "$dev2" "$dev3" "$dev4" "$dev5" vgcreate --vgmetadatacopies $(( mdacp * 1 )) $vg1 "$dev1" "$dev2" check vg_field $vg1 vg_mda_copies $(( mdacp * 1 )) check vg_field $vg1 vg_mda_used_count $(( mdacp * 1 )) pvignore_ "$dev3" echo Ensure vgextend of PVs with ignored MDAs does not add to vg_mda_used_count vgextend $vg1 "$dev3" check vg_field $vg1 vg_mda_used_count $(( mdacp * 1 )) echo Using pvchange to unignore should update vg_mda_used_count pvchange -f --metadataignore n "$dev3" check pv_field "$dev3" pv_mda_used_count $mdacp check vg_field $vg1 vg_mda_used_count $(( mdacp * 2 )) echo Set unmanaged on the vg should keep ignore bits the same during vgextend vgchange --vgmetadatacopies unmanaged $vg1 check vg_field $vg1 vg_mda_used_count $(( mdacp * 2 )) pvunignore_ "$dev4" vgextend $vg1 "$dev4" check pv_field "$dev4" pv_mda_used_count $mdacp check vg_field $vg1 vg_mda_used_count $(( mdacp * 3 )) echo Using pvchange to ignore should update vg_mda_used_count pvchange -f --metadataignore y "$dev4" check pv_field "$dev4" pv_mda_used_count 0 check vg_field $vg1 vg_mda_used_count $(( mdacp * 2 )) vgremove -f $vg1 done LVM2.2.02.176/test/shell/lvchange-raid456.sh0000644000000000000120000000142213176752421016710 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA TEST_RAID=raid456 . shell/lvchange-raid.sh aux raid456_replace_works || skip aux have_raid 1 5 2 || skip aux have_raid4 && run_types raid4 -i 2 "$dev1" "$dev2" "$dev3" "$dev4" run_types raid5 -i 2 "$dev1" "$dev2" "$dev3" "$dev4" run_types raid6 -i 3 "$dev1" "$dev2" "$dev3" "$dev4" "$dev5" vgremove -ff $vg LVM2.2.02.176/test/shell/thin-defaults.sh0000644000000000000120000000202413176752421016513 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2012 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # test defaults entered through lvm.conf SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 export LVM_TEST_THIN_REPAIR_CMD=${LVM_TEST_THIN_REPAIR_CMD-/bin/false} . lib/inittest # # Main # aux have_thin 1 0 0 || skip aux prepare_vg 2 lvcreate -T -L8M $vg/pool0 aux lvmconf "allocation/thin_pool_chunk_size = 128" \ "allocation/thin_pool_discards = \"ignore\"" \ "allocation/thin_pool_zero = 0" lvcreate -T -L8M $vg/pool1 check lv_field $vg/pool1 chunksize "128.00k" check lv_field $vg/pool1 discards "ignore" check lv_field $vg/pool1 zero "" vgremove -f $vg LVM2.2.02.176/test/shell/unlost-pv.sh0000644000000000000120000000312113176752421015712 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest check_() { local cache="" # vgscan needs --cache option for direct scan if lvmetad is used test -e LOCAL_LVMETAD && cache="--cache" vgscan $cache 2>&1 | tee vgscan.out "$@" grep "Inconsistent metadata found for VG $vg" vgscan.out } aux prepare_vg 3 lvcreate -an -Zn --type mirror -m 1 -l 1 -n mirror $vg #lvchange -a n $vg # try orphaning a missing PV (bz45867) aux disable_dev "$dev1" vgreduce --removemissing --force $vg aux enable_dev "$dev1" check_ test -e LOCAL_LVMETAD && pvcreate -f "$dev1" check_ not # try to just change metadata; we expect the new version (with MISSING_PV set # on the reappeared volume) to be written out to the previously missing PV vgextend $vg "$dev1" lvcreate -l 1 -n boo -a n --zero n $vg aux disable_dev "$dev1" lvremove $vg/mirror aux enable_dev "$dev1" check_ test -e LOCAL_LVMETAD && lvremove $vg/boo # FIXME trigger a write :-( check_ not aux disable_dev "$dev1" vgreduce --removemissing --force $vg aux enable_dev "$dev1" vgscan 2>&1 | tee out grep 'Removing PV' out vgs 2>&1 | tee out not grep 'Removing PV' out vgremove -ff $vg LVM2.2.02.176/test/shell/pvmove-all-segtypes.sh0000644000000000000120000000610413176752421017672 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2013 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA test_description="ensure pvmove works with all common segment types" SKIP_WITH_LVMLOCKD=1 . lib/inittest which md5sum || skip aux prepare_vg 5 20 # Each of the following tests does: # 1) Create two LVs - one linear and one other segment type # The two LVs will share a PV. # 2) Move both LVs together # 3) Move only the second LV by name # Testing pvmove of linear LV lvcreate -aey -l 2 -n ${lv1}_foo $vg "$dev1" lvcreate -aey -l 2 -n $lv1 $vg "$dev1" check lv_tree_on $vg ${lv1}_foo "$dev1" check lv_tree_on $vg $lv1 "$dev1" aux mkdev_md5sum $vg $lv1 pvmove "$dev1" "$dev5" check lv_tree_on $vg ${lv1}_foo "$dev5" check lv_tree_on $vg $lv1 "$dev5" check dev_md5sum $vg $lv1 pvmove -n $lv1 "$dev5" "$dev4" check lv_tree_on $vg $lv1 "$dev4" check lv_tree_on $vg ${lv1}_foo "$dev5" check dev_md5sum $vg $lv1 lvremove -ff $vg # Testing pvmove of stripe LV lvcreate -aey -l 2 -n ${lv1}_foo $vg "$dev1" lvcreate -aey -l 4 -i 2 -n $lv1 $vg "$dev1" "$dev2" check lv_tree_on $vg ${lv1}_foo "$dev1" check lv_tree_on $vg $lv1 "$dev1" "$dev2" aux mkdev_md5sum $vg $lv1 pvmove "$dev1" "$dev5" check lv_tree_on $vg ${lv1}_foo "$dev5" check lv_tree_on $vg $lv1 "$dev2" "$dev5" check dev_md5sum $vg $lv1 pvmove -n $lv1 "$dev5" "$dev4" check lv_tree_on $vg $lv1 "$dev2" "$dev4" check lv_tree_on $vg ${lv1}_foo "$dev5" check dev_md5sum $vg $lv1 lvremove -ff $vg if test -e LOCAL_CLVMD ; then #FIXME these tests currently fail end require cmirrord echo "$(should false)FIXME!!! pvmove in clustered VG not fully supported!" else # Testing pvmove of mirror LV lvcreate -aey -l 2 -n ${lv1}_foo $vg "$dev1" lvcreate -aey -l 2 --type mirror -m 1 -n $lv1 $vg "$dev1" "$dev2" check lv_tree_on $vg ${lv1}_foo "$dev1" check lv_tree_on $vg $lv1 "$dev1" "$dev2" aux mkdev_md5sum $vg $lv1 pvmove "$dev1" "$dev5" check lv_tree_on $vg ${lv1}_foo "$dev5" check lv_tree_on $vg $lv1 "$dev2" "$dev5" check dev_md5sum $vg $lv1 pvmove -n $lv1 "$dev5" "$dev4" check lv_tree_on $vg $lv1 "$dev2" "$dev4" check lv_tree_on $vg ${lv1}_foo "$dev5" check dev_md5sum $vg $lv1 lvremove -ff $vg # Dummy LV and snap share dev1, while origin is on dev2 # Testing pvmove of snapshot LV lvcreate -aey -l 2 -n ${lv1}_foo $vg "$dev1" lvcreate -aey -l 2 -n $lv1 $vg "$dev2" lvcreate -s $vg/$lv1 -l 2 -n snap "$dev1" check lv_tree_on $vg ${lv1}_foo "$dev1" check lv_tree_on $vg snap "$dev1" aux mkdev_md5sum $vg snap pvmove "$dev1" "$dev5" check lv_tree_on $vg ${lv1}_foo "$dev5" check lv_tree_on $vg snap "$dev5" check dev_md5sum $vg snap pvmove -n snap "$dev5" "$dev4" check lv_tree_on $vg snap "$dev4" check lv_tree_on $vg ${lv1}_foo "$dev5" check dev_md5sum $vg snap lvremove -ff $vg fi vgremove -ff $vg LVM2.2.02.176/test/shell/select-tools.sh0000644000000000000120000002243513176752421016371 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2015 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_pvs 4 12 vgcreate -s 4m $vg1 "$dev1" "$dev2" vgcreate -s 4m $vg2 "$dev3" "$dev4" # vg1/lv1 mapped onto dev1 lvcreate -l1 -n "lv1" $vg1 "$dev1" # vg1/lv2 mapped onto dev1 and dev2 (2 segments) lvcreate -l3 -n "lv2" $vg1 "$dev1" "$dev2" # vg2/lv3 mapped onto dev3 lvcreate -l1 -n "lv3" $vg2 "$dev3" # vg2/lv4 mapped onto dev3 lvcreate -l1 -n "lv4" $vg2 "$dev3" "$dev4" # vg2/lv1 mapped onto "$dev4" (same LV name as vg1/lv1) lvcreate -l1 -n "lv1" $vg2 "$dev4" ########################################### # exercise process_each_vg with selection # ########################################### # select contains VGS field # direct vg name match vgchange --addtag 101 -S "vg_name=$vg1" check vg_field $vg1 vg_tags 101 not check vg_field $vg2 vg_tags 101 vgchange --deltag 101 # select contains LVS fiels vgchange --addtag 102 -S "lv_name=lv2" check vg_field $vg1 vg_tags 102 not check vg_field $vg2 vg_tags 102 vgchange --deltag 102 vgchange --addtag 103 -S "lv_name=lv1" check vg_field $vg1 vg_tags 103 check vg_field $vg2 vg_tags 103 vgchange --deltag 103 # select contains SEGS field vgchange --addtag 104 -S 'seg_start=8m' check vg_field $vg1 vg_tags 104 not check vg_field $vg2 vg_tags 104 vgchange --deltag 104 vgchange --addtag 105 -S "seg_start=0m" check vg_field $vg1 vg_tags 105 check vg_field $vg2 vg_tags 105 vgchange --deltag 105 # select contains PVS field vgchange --addtag 106 -S pv_name="$dev1" check vg_field $vg1 vg_tags 106 not check vg_field $vg2 vg_tags 106 vgchange --deltag 106 vgchange --addtag 107 -S "pv_size>0m" check vg_field $vg1 vg_tags 107 check vg_field $vg2 vg_tags 107 vgchange --deltag 107 # select contains PVSEGS field vgchange --addtag 108 -S "pvseg_size=2" check vg_field $vg1 vg_tags 108 not check vg_field $vg2 vg_tags 108 vgchange --deltag 108 vgchange --addtag 109 -S "pvseg_size=1" check vg_field $vg1 vg_tags 109 check vg_field $vg2 vg_tags 109 vgchange --deltag 109 # if VG name or tag is supplied together with the # selection, the result is an intersection of both vgchange --addtag 110 -S "vg_name=$vg1" $vg2 not check vg_field $vg1 vg_tags 110 not check vg_field $vg2 vg_tags 110 vgchange --deltag 110 vgchange --addtag 111 -S "vg_name=$vg1" $vg1 check vg_field $vg1 vg_tags 111 not check vg_field $vg2 vg_tags 111 vgchange --deltag 111 vgchange --addtag "tag" $vg1 vgchange --addtag 112 -S "vg_name=$vg2" @tag not check vg_field $vg1 vg_tags "tag,112" not check vg_field $vg2 vg_tags "tag,112" vgchange --deltag 112 vgchange --addtag 113 -S "vg_name=$vg1" @tag check vg_field $vg1 vg_tags "113,tag" not check vg_field $vg2 vg_tags "113,tag" vgchange --deltag 113 --deltag tag ########################################### # exercise process_each_lv with selection # ########################################### # select contains VGS field lvchange --addtag 201 -S "vg_name=$vg1" check lv_field $vg1/lv1 lv_tags 201 check lv_field $vg1/lv2 lv_tags 201 not check lv_field $vg2/lv3 lv_tags 201 not check lv_field $vg2/lv4 lv_tags 201 not check lv_field $vg2/lv1 lv_tags 201 lvchange --deltag 201 $vg1 $vg2 # select contains LVS fiels lvchange --addtag 202 -S "lv_name=lv2" not check lv_field $vg1/lv1 lv_tags 202 check lv_field $vg1/lv2 lv_tags 202 not check lv_field $vg2/lv3 lv_tags 202 not check lv_field $vg2/lv4 lv_tags 202 not check lv_field $vg2/lv1 lv_tags 202 lvchange --deltag 202 $vg1 $vg2 lvchange --addtag 203 -S "lv_name=lv1" check lv_field $vg1/lv1 lv_tags 203 not check lv_field $vg1/lv2 lv_tags 203 not check lv_field $vg2/lv3 lv_tags 203 not check lv_field $vg2/lv4 lv_tags 203 check lv_field $vg2/lv1 lv_tags 203 lvchange --deltag 203 $vg1 $vg2 # select contains SEGS field lvchange --addtag 204 -S "seg_start=8m" not check lv_field $vg1/lv1 lv_tags 204 check lv_field $vg1/lv2 lv_tags 204 not check lv_field $vg2/lv3 lv_tags 204 not check lv_field $vg2/lv4 lv_tags 204 not check lv_field $vg2/lv1 lv_tags 204 lvchange --deltag 204 $vg1 $vg2 # select contains PVS field - COMBINATION NOT ALLOWED! lvchange --addtag 205 -S pv_name="$dev1" 2>err grep "Can't report LV and PV fields at the same time" err grep "Selection failed for LV" err not check lv_field $vg1/lv1 lv_tags 205 not check lv_field $vg1/lv2 lv_tags 205 not check lv_field $vg2/lv3 lv_tags 205 not check lv_field $vg2/lv4 lv_tags 205 not check lv_field $vg2/lv1 lv_tags 205 # select contains PVSEGS field - COMBINATION NOT ALLOWED! lvchange --addtag 206 -S "pvseg_start>=0" 2>err grep "Can't report LV and PV fields at the same time" err grep "Selection failed for LV" err not check lv_field $vg1/lv1 lv_tags 206 not check lv_field $vg1/lv2 lv_tags 206 not check lv_field $vg2/lv3 lv_tags 206 not check lv_field $vg2/lv4 lv_tags 206 not check lv_field $vg2/lv1 lv_tags 206 # if LV name or tag is supplied together with the # selection, the result is an intersection of both lvchange --addtag 207 -S "lv_name=lv2" $vg1/lv1 not check lv_field $vg1/lv1 lv_tags 207 not check lv_field $vg1/lv2 lv_tags 207 not check lv_field $vg2/lv3 lv_tags 207 not check lv_field $vg2/lv4 lv_tags 207 not check lv_field $vg2/lv1 lv_tags 207 lvchange --deltag 207 $vg1 $vg2 lvchange --addtag 208 -S "lv_name=lv2" $vg1/lv2 not check lv_field $vg1/lv1 lv_tags 208 check lv_field $vg1/lv2 lv_tags 208 not check lv_field $vg2/lv3 lv_tags 208 not check lv_field $vg2/lv4 lv_tags 208 not check lv_field $vg2/lv1 lv_tags 208 lvchange --deltag 208 $vg1 $vg2 lvchange --addtag "tag" $vg1/lv2 lvchange --addtag 209 -S "lv_name=lv3" @tag not check lv_field $vg1/lv1 lv_tags "209,tag" not check lv_field $vg1/lv2 lv_tags "209,tag" not check lv_field $vg2/lv3 lv_tags "209,tag" not check lv_field $vg2/lv4 lv_tags "209,tag" not check lv_field $vg2/lv1 lv_tags "209,tag" lvchange --deltag 209 $vg1 $vg2 lvchange --addtag 210 -S "lv_name=lv2" @tag not check lv_field $vg1/lv1 lv_tags "210,tag" check lv_field $vg1/lv2 lv_tags "210,tag" not check lv_field $vg2/lv3 lv_tags "210,tag" not check lv_field $vg2/lv4 lv_tags "210,tag" not check lv_field $vg2/lv1 lv_tags "210,tag" lvchange --deltag 210 --deltag tag $vg1 $vg2 ########################################### # exercise process_each_pv with selection # ########################################### # select contains VGS field pvchange --addtag 301 -S "vg_name=$vg1" check pv_field "$dev1" pv_tags 301 check pv_field "$dev2" pv_tags 301 not check pv_field "$dev3" pv_tags 301 not check pv_field "$dev4" pv_tags 301 pvchange -a --deltag 301 # select contains LVS field pvchange --addtag 302 -S "lv_name=lv2" check pv_field "$dev1" pv_tags 302 check pv_field "$dev2" pv_tags 302 not check pv_field "$dev3" pv_tags 302 not check pv_field "$dev4" pv_tags 302 pvchange -a --deltag 302 # select contains SEGS field pvchange --addtag 303 -S "seg_start=8m" check pv_field "$dev1" pv_tags 303 not check pv_field "$dev2" pv_tags 303 not check pv_field "$dev3" pv_tags 303 not check pv_field "$dev4" pv_tags 303 pvchange -a --deltag 303 # select contains PVS field pvchange --addtag 304 -S pv_name="$dev1" check pv_field "$dev1" pv_tags 304 not check pv_field "$dev2" pv_tags 304 not check pv_field "$dev3" pv_tags 304 not check pv_field "$dev4" pv_tags 304 pvchange -a --deltag 304 # select contains PVSEGS field pvchange --addtag 305 -S "pvseg_size=2" not check pv_field "$dev1" pv_tags 305 check pv_field "$dev2" pv_tags 305 not check pv_field "$dev3" pv_tags 305 not check pv_field "$dev4" pv_tags 305 pvchange -a --deltag 305 # if PV name or tag is supplied together with the # selection, the result is an intersection of both pvchange --addtag 306 -S pv_name="$dev1" "$dev2" not check pv_field "$dev1" pv_tags 306 not check pv_field "$dev2" pv_tags 306 not check pv_field "$dev3" pv_tags 306 not check pv_field "$dev4" pv_tags 306 pvchange -a --deltag 306 pvchange --addtag 307 -S pv_name="$dev1" "$dev1" check pv_field "$dev1" pv_tags 307 not check pv_field "$dev2" pv_tags 307 not check pv_field "$dev3" pv_tags 307 not check pv_field "$dev4" pv_tags 307 pvchange -a --deltag 307 pvchange --addtag "tag" "$dev1" pvchange --addtag 308 -S pv_name="$dev2" @tag not check pv_field "$dev1" pv_tags "308,tag" not check pv_field "$dev2" pv_tags "308,tag" not check pv_field "$dev3" pv_tags "308,tag" not check pv_field "$dev4" pv_tags "308,tag" pvchange --deltag 308 "$dev1" pvchange --addtag 309 -S pv_name="$dev1" @tag check pv_field "$dev1" pv_tags "309,tag" not check pv_field "$dev2" pv_tags "309,tag" not check pv_field "$dev3" pv_tags "309,tag" not check pv_field "$dev4" pv_tags "309,tag" pvchange -a --deltag 309 --deltag tag ######################### # special cases to test # ######################### # if calling vgremove, make sure we're doing selection per-VG, not per-LV # (vgremove calls process_each_vg with vgremove_single which itself # iterates over LVs with process_each_lv_in_vg - so internally it actually # operates per-LV, but we still need the selection to be done per-VG) vgremove --yes -S 'lv_name=lv2' # should remove whole vg1, not just the lv2 vgremove --yes $vg2 LVM2.2.02.176/test/shell/thin-dmeventd-warns.sh0000644000000000000120000000357513176752421017656 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2017 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # test if dmeventd produces multiple warnings when pools runs above 80% SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 SKIP_WITH_CLVMD=1 SKIP_WITH_LVMETAD=1 . lib/inittest which blkdiscard || skip percent_() { get lv_field $vg/pool data_percent | cut -d. -f1 } wait_warn_() { for i in $(seq 1 7) do test "$(grep -E -c "WARNING: Thin pool.*is now" debug.log_DMEVENTD_out)" -eq "$1" && return 0 sleep 2 done die "Waiting too log for dmeventd log warning" } # # Main # aux have_thin 1 0 0 || skip aux prepare_dmeventd aux prepare_vg lvcreate -L8 -V8 -T $vg/pool -n $lv1 dd if=/dev/zero of="$DM_DEV_DIR/$vg/$lv1" bs=256K count=26 test "$(percent_)" -gt 80 # Give it some time to dmeventd to log WARNING wait_warn_ 1 dd if=/dev/zero of="$DM_DEV_DIR/$vg/$lv1" bs=256K count=30 test "$(percent_)" -gt 90 # Give it some time to dmeventd to log WARNING wait_warn_ 2 dd if=/dev/zero of="$DM_DEV_DIR/$vg/$lv1" bs=1M count=8 test "$(percent_)" -eq 100 wait_warn_ 3 blkdiscard "$DM_DEV_DIR/$vg/$lv1" # FIXME: Enforce thin-pool metadata commit with flushing status dmsetup status ${vg}-pool-tpool # Wait for thin-pool monitoring to notice lower values sleep 11 # ATM dmeventd is not logging event for thin-pool getting # below 'WARNED' threshold. dd if=/dev/zero of="$DM_DEV_DIR/$vg/$lv1" bs=256K count=30 test "$(percent_)" -gt 90 lvs -a $vg dmsetup status ${vg}-pool-tpool # Check pool again Warns wait_warn_ 4 vgremove -f $vg LVM2.2.02.176/test/shell/fsadm.sh0000644000000000000120000001023113176752421015035 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008-2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA test_description='Exercise fsadm filesystem resize' SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_vg 1 100 # set to "skip" to avoid testing given fs and test warning result # i.e. check_reiserfs=skip check_ext2= check_ext3= check_xfs= check_reiserfs= which mkfs.ext2 || check_ext2=${check_ext2:-mkfs.ext2} which mkfs.ext3 || check_ext3=${check_ext3:-mkfs.ext3} which fsck.ext3 || check_ext3=${check_ext3:-fsck.ext3} which mkfs.xfs || check_xfs=${check_xfs:-mkfs.xfs} which xfs_check || { which xfs_repair || check_xfs=${check_xfs:-xfs_repair} } grep xfs /proc/filesystems || check_xfs=${check_xfs:-no_xfs} which mkfs.reiserfs || check_reiserfs=${check_reiserfs:-mkfs.reiserfs} which reiserfsck || check_reiserfs=${check_reiserfs:-reiserfsck} modprobe reiserfs || true grep reiserfs /proc/filesystems || check_reiserfs=${check_reiserfs:-no_reiserfs} vg_lv=$vg/$lv1 vg_lv2=$vg/${lv1}bar dev_vg_lv="$DM_DEV_DIR/$vg_lv" dev_vg_lv2="$DM_DEV_DIR/$vg_lv2" mount_dir="mnt" mount_space_dir="mnt space dir" # for recursive call LVM_BINARY=$(which lvm) export LVM_BINARY test ! -d "$mount_dir" && mkdir "$mount_dir" test ! -d "$mount_space_dir" && mkdir "$mount_space_dir" cleanup_mounted_and_teardown() { umount "$mount_dir" || true umount "$mount_space_dir" || true aux teardown } fscheck_ext3() { fsck.ext3 -p -F -f "$dev_vg_lv" } fscheck_xfs() { if which xfs_repair ; then xfs_repair -n "$dev_vg_lv" else xfs_check "$dev_vg_lv" fi } fscheck_reiserfs() { reiserfsck --check -p -f "$dev_vg_lv" ' echo ' ' echo ' ' echo ' ' echo ' ' for i in $(seq 10 $1) do echo ' ' echo ' ' echo ' ' set +x done echo "" set -x } test -n "$LVM_TEST_THIN_RESTORE_CMD" || LVM_TEST_THIN_RESTORE_CMD=$(which thin_restore) || skip "$LVM_TEST_THIN_RESTORE_CMD" -V || skip aux have_thin 1 10 0 || skip aux prepare_dmeventd aux prepare_pvs 3 256 get_devs vgcreate -s 1M "$vg" "${DEVICES[@]}" # Testing dmeventd does NOT autoresize when default threshold 100% is left lvcreate -L200M -V50M -n thin -T $vg/pool lvcreate -V2M -n thin2 $vg/pool lvcreate -L2M -n $lv1 $vg lvcreate -L32M -n $lv2 $vg lvcreate -L32M -n $lv3 $vg lvchange -an $vg/thin $vg/thin2 $vg/pool # Filling 2M metadata volume # (Test for less then 25% free space in metadata) fake_metadata_ 400 2 >data "$LVM_TEST_THIN_RESTORE_CMD" -i data -o "$DM_DEV_DIR/mapper/$vg-$lv1" # Swap volume with restored fake metadata lvconvert -y --chunksize 64k --thinpool $vg/pool --poolmetadata $vg/$lv1 # Not alllowed when thin-pool metadata free space is <75% for 2M meta fail lvcreate -V20 $vg/pool lvchange -an $vg/pool # Consume more then (100% - 4MiB) out of 32MiB metadata volume (>87.5%) # (Test for less then 4MiB free space in metadata, which is less then 25%) fake_metadata_ 7400 2 >data "$LVM_TEST_THIN_RESTORE_CMD" -i data -o "$DM_DEV_DIR/mapper/$vg-$lv2" # Swap volume with restored fake metadata lvconvert -y --chunksize 64k --thinpool $vg/pool --poolmetadata $vg/$lv2 lvchange -ay $vg/pool # Check generated metadata consume more then 88% test "$(meta_percent_)" -gt "88" lvchange -an $vg/pool # Creation of thin LV is prohibited when metadata are above this value fail lvcreate -V20 $vg/pool 2>&1 | tee out grep "free space" out lvs -a $vg # Check that even with 99% threshold policy - metadata will go below 88% lvextend --use-policies --config "\ activation/thin_pool_autoextend_percent=1 \ activation/thin_pool_autoextend_threshold=99" $vg/pool test "$(meta_percent_)" -lt "88" # After such operatoin creation of thin LV has to pass lvcreate -V20 $vg/pool # Let's revalidate pool metadata (thin_check upon deactivation/activation) lvchange -an $vg lvchange -ay $vg/pool lvremove -f $vg ######################################################### # Test automatic resize with help of dmeventd DOES work # ######################################################### aux lvmconf "activation/thin_pool_autoextend_percent = 10" \ "activation/thin_pool_autoextend_threshold = 70" # Testing dmeventd autoresize lvcreate -L200M -V500M -n thin -T $vg/pool 2>&1 | tee out not grep "WARNING: Sum" out lvcreate -V2M -n thin2 $vg/pool lvcreate -L2M -n $lv1 $vg lvchange -an $vg/thin $vg/thin2 $vg/pool # Prepare some fake metadata with unmatching id # Transaction_id is lower by 1 and there are no messages -> ERROR fake_metadata_ 10 0 >data "$LVM_TEST_THIN_RESTORE_CMD" -i data -o "$DM_DEV_DIR/mapper/$vg-$lv1" lvconvert -y --thinpool $vg/pool --poolmetadata $vg/$lv1 not vgchange -ay $vg 2>&1 | tee out grep expected out check inactive $vg pool_tmeta # Transaction_id is higher by 1 fake_metadata_ 10 3 >data "$LVM_TEST_THIN_RESTORE_CMD" -i data -o "$DM_DEV_DIR/mapper/$vg-$lv1" lvconvert -y --thinpool $vg/pool --poolmetadata $vg/$lv1 not vgchange -ay $vg 2>&1 | tee out grep expected out check inactive $vg pool_tmeta # Prepare some fake metadata prefilled to ~81% (>70%) fake_metadata_ 400 2 >data "$LVM_TEST_THIN_RESTORE_CMD" -i data -o "$DM_DEV_DIR/mapper/$vg-$lv1" # Swap volume with restored fake metadata lvconvert -y --chunksize 64k --thinpool $vg/pool --poolmetadata $vg/$lv1 vgchange -ay $vg # Check dmeventd resizes metadata via timeout (nothing is written to pool) pre=$(meta_percent_) wait_for_change_ $pre lvchange -an $vg # fake_metadata_ 350 2 >data lvchange -ay $vg/$lv1 "$LVM_TEST_THIN_RESTORE_CMD" -i data -o "$DM_DEV_DIR/mapper/$vg-$lv1" lvconvert -y --chunksize 64k --thinpool $vg/pool --poolmetadata $vg/$lv1 lvchange -ay $vg/pool $vg/$lv1 lvs -a $vg lvcreate -s -Ky -n $lv2 $vg/thin pre=$(meta_percent_) # go over thin metadata threshold echo 2 >"$DM_DEV_DIR/mapper/$vg-$lv2" wait_for_change_ $pre lvs -a $vg vgremove -f $vg LVM2.2.02.176/test/shell/lvconvert-twostep.sh0000644000000000000120000000206613176752421017477 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2010 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 . lib/inittest aux prepare_vg 4 lvcreate -aey --type mirror -m 1 --mirrorlog disk --ignoremonitoring -L 1 -n mirror $vg not lvconvert -m 2 --mirrorlog core $vg/mirror "$dev3" 2>&1 | tee errs grep "two steps" errs lvconvert -m 2 $vg/mirror "$dev3" lvconvert --mirrorlog core $vg/mirror not lvconvert -m 1 --mirrorlog disk $vg/mirror "$dev3" 2>&1 | tee errs grep "two steps" errs if test ! -e LOCAL_CLVMD ; then # FIXME mirrored unsupported in cluster not lvconvert -m 1 --mirrorlog mirrored $vg/mirror "$dev3" "$dev4" 2>&1 | tee errs grep "two steps" errs fi vgremove -ff $vg LVM2.2.02.176/test/shell/lvmetad-pvscan-md.sh0000644000000000000120000000201713176752421017270 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2014-2015 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITHOUT_LVMETAD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest test -f /proc/mdstat && grep -q raid0 /proc/mdstat || \ modprobe raid0 || skip aux prepare_devs 2 # create 2 disk MD raid0 array (stripe_width=128K) aux prepare_md_dev 0 64 2 "$dev1" "$dev2" aux lvmconf 'devices/md_component_detection = 1' aux extend_filter_LVMTEST aux extend_filter "a|/dev/md.*|" pvdev=$(< MD_DEV_PV) pvcreate "$pvdev" # ensure that lvmetad can only see the toplevel MD device pvs | tee out grep "$pvdev" out not grep "$dev1" out not grep "$dev2" out LVM2.2.02.176/test/shell/snapshots-of-mirrors.sh0000644000000000000120000000244713176752421020074 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2010 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 . lib/inittest aux prepare_vg 4 lvcreate -aey --type mirror -m 1 -L 10M --nosync -n lv $vg # Create snapshot of a mirror origin lvcreate -s $vg/lv -L 10M -n snap # Down-convert (mirror -> linear) under a snapshot lvconvert -m0 $vg/lv # Up-convert (linear -> mirror) lvconvert --type mirror -m2 $vg/lv # Down-convert (mirror -> mirror) lvconvert -m 1 $vg/lv # Up-convert (mirror -> mirror) -- Not supported! not lvconvert -m2 $vg/lv # Log conversion (disk -> core) lvconvert --mirrorlog core $vg/lv # Log conversion (core -> mirrored) # FIXME on cluster SHOULD="" test -e LOCAL_CLVMD && SHOULD=should $SHOULD lvconvert --mirrorlog mirrored $vg/lv # Log conversion (mirrored -> core) lvconvert --mirrorlog core $vg/lv # Log conversion (core -> disk) lvconvert --mirrorlog disk $vg/lv ## Clean-up vgremove -f $vg LVM2.2.02.176/test/shell/pool-labels.sh0000644000000000000120000000326113176752421016161 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2007 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # lvmetad does not handle pool labels so skip test. SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMETAD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest env printf "" || skip # skip if printf is not available # create the old GFS pool labeled linear devices create_pool_label_() { # FIXME # echo -e is bashism, dash builtin sh doesn't do \xNN in printf either # printf comes from coreutils, and is probably not posix either env printf "\x01\x16\x70\x06\x5f\xcf\xff\xb9\xf8\x24\x8apool1" | dd of="$2" bs=5 seek=1 conv=notrunc env printf "\x04\x01\x03\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x0$1\x68\x01\x16\x70\x00\x00\x00\x00\x00\x06\x5f\xd0" | dd of=$2 bs=273 seek=1 conv=notrunc aux notify_lvmetad "$2" } aux prepare_devs 2 create_pool_label_ 0 "$dev1" create_pool_label_ 1 "$dev2" # check that pvcreate fails without -ff on the pool device not pvcreate "$dev1" # check that vgdisplay and pvcreate -ff works with the pool device vgdisplay --config 'global { locking_type = 0 }' aux disable_dev "$dev2" # FIXME! since pool1 cannot be opened, vgdisplay gives error... should we say # "not" there instead, checking that it indeed does fail? vgdisplay --config 'global { locking_type = 0 }' || true pvcreate -ff -y "$dev1" LVM2.2.02.176/test/shell/metadata.sh0000644000000000000120000000450713176752421015534 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_devs 5 get_devs pvcreate "$dev1" pvcreate --metadatacopies 0 "$dev2" pvcreate --metadatacopies 0 "$dev3" pvcreate "$dev4" pvcreate --metadatacopies 0 "$dev5" vgcreate "$vg" "${DEVICES[@]}" lvcreate -n $lv -l 1 -i5 -I256 $vg pvchange -x n "$dev1" pvchange -x y "$dev1" vgchange -a n $vg pvchange --uuid "$dev1" pvchange --uuid "$dev2" vgremove -f $vg # check that PVs without metadata don't cause too many full device rescans (bz452606) for mdacp in 1 0; do pvcreate --metadatacopies "$mdacp" "${DEVICES[@]}" pvcreate "$dev1" vgcreate "$vg" "${DEVICES[@]}" lvcreate -n $lv1 -l 2 -i5 -I256 $vg lvcreate -aey -n $lv2 --type mirror -m2 -l 2 $vg lvchange -an $vg/$lv1 $vg/$lv2 vgchange -aey $vg lvchange -an $vg/$lv1 $vg/$lv2 vgremove -f $vg done not grep "Cached VG .* incorrect PV list" out0 # begin M1 metadata tests if test -n "$LVM_TEST_LVM1" ; then pvcreate -M1 "$dev1" "$dev2" "$dev3" pv3_uuid=$(get pv_field "$dev3" pv_uuid) vgcreate -M1 $vg "$dev1" "$dev2" "$dev3" pvchange --uuid "$dev1" # verify pe_start of all M1 PVs pv_align="128.00k" check pv_field "$dev1" pe_start $pv_align check pv_field "$dev2" pe_start $pv_align check pv_field "$dev3" pe_start $pv_align pvs --units k -o name,pe_start,vg_mda_size,vg_name "${DEVICES[@]}" # upgrade from v1 to v2 metadata vgconvert -M2 $vg # verify pe_start of all M2 PVs check pv_field "$dev1" pe_start $pv_align check pv_field "$dev2" pe_start $pv_align check pv_field "$dev3" pe_start $pv_align pvs --units k -o name,pe_start,vg_mda_size,vg_name "${DEVICES[@]}" # create backup and then restore $dev3 vgcfgbackup -f "$TESTDIR/bak-%s" "$vg" pvcreate -ff -y --restorefile "$TESTDIR/bak-$vg" --uuid "$pv3_uuid" "$dev3" vgcfgrestore -f "$TESTDIR/bak-$vg" "$vg" # verify pe_start of $dev3 check pv_field "$dev3" pe_start $pv_align fi # end M1 metadata tests LVM2.2.02.176/test/shell/lvconvert-repair-thin.sh0000644000000000000120000000562013176752421020213 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2013-2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Test repairing of broken thin pool metadata SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest which mkfs.ext2 || skip # # Main # aux have_thin 1 0 0 || skip aux prepare_vg 4 # Create LV # TODO: investigate problem with --zero n and my repairable damage trick #lvcreate -T -L20 -V10 -n $lv1 $vg/pool --discards ignore --zero n --chunksize 128 "$dev1" "$dev2" lvcreate -T -L20 -V10 -n $lv1 $vg/pool --chunksize 128 --discards ignore "$dev1" "$dev2" lvcreate -T -V10 -n $lv2 $vg/pool mkfs.ext2 "$DM_DEV_DIR/$vg/$lv1" mkfs.ext2 "$DM_DEV_DIR/$vg/$lv2" lvcreate -L20 -n repair $vg lvcreate -L2 -n fixed $vg lvs -a -o+seg_pe_ranges $vg #aux error_dev "$dev2" 2050:1 lvchange -an $vg/$lv2 $vg/$lv1 $vg/pool $vg/repair # Manual repair steps: # Test swapping - swap out thin-pool's metadata with our repair volume lvconvert -y -f --poolmetadata $vg/repair --thinpool $vg/pool lvchange -ay $vg/repair # # To continue this test - we need real tools available # When they are not present mark test as skipped, but still # let proceed initial part which should work even without tools # aux have_tool_at_least "$LVM_TEST_THIN_CHECK_CMD" 0 3 1 || skip aux have_tool_at_least "$LVM_TEST_THIN_DUMP_CMD" 0 3 1 || skip aux have_tool_at_least "$LVM_TEST_THIN_REPAIR_CMD" 0 3 1 || skip # Make some 'repairable' damage?? dd if=/dev/zero of="$DM_DEV_DIR/$vg/repair" bs=1 seek=40960 count=1 not "$LVM_TEST_THIN_CHECK_CMD" "$DM_DEV_DIR/$vg/repair" not "$LVM_TEST_THIN_DUMP_CMD" "$DM_DEV_DIR/$vg/repair" | tee dump "$LVM_TEST_THIN_REPAIR_CMD" -i "$DM_DEV_DIR/$vg/repair" -o "$DM_DEV_DIR/$vg/fixed" "$LVM_TEST_THIN_DUMP_CMD" --repair "$DM_DEV_DIR/$vg/repair" | tee repaired_xml "$LVM_TEST_THIN_CHECK_CMD" "$DM_DEV_DIR/$vg/fixed" lvchange -an $vg # Swap repaired metadata back lvconvert -y -f --poolmetadata $vg/fixed --thinpool $vg/pool # Check pool still preserves its original settings check lv_field $vg/pool chunksize "128.00k" check lv_field $vg/pool discards "ignore" check lv_field $vg/pool zero "zero" # Activate pool - this should now work vgchange -ay $vg vgchange -an $vg # Put back 'broken' metadata lvconvert -y -f --poolmetadata $vg/repair --thinpool $vg/pool # Check --repair usage lvconvert -v --repair $vg/pool # Check repaired pool could be activated lvchange -ay $vg/pool vgchange -an $vg # Restore damaged metadata lvconvert -y -f --poolmetadata $vg/pool_meta0 --thinpool $vg/pool # Check lvremove -ff works even with damaged pool lvremove -ff $vg LVM2.2.02.176/test/shell/lvmetad-test.sh0000644000000000000120000000161213176752421016357 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2012 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_pvs 2 vgcreate $vg1 "$dev1" "$dev2" --test vgs | not grep $vg1 vgcreate $vg1 "$dev1" "$dev2" vgs | grep $vg1 lvcreate -n bar -l 1 $vg1 --test lvs | not grep bar lvcreate -n bar -l 1 $vg1 lvs | grep bar lvremove $vg1/bar -f --test lvs | grep bar lvremove $vg1/bar -f lvs | not grep bar vgremove $vg1 --test vgs | grep $vg1 vgremove $vg1 vgs | not grep $vg1 LVM2.2.02.176/test/shell/lvmetad-ambiguous.sh0000644000000000000120000000351013176752421017372 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2012 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITHOUT_LVMETAD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_pvs 2 # flip the devices around init_udev_transaction dmsetup remove "$dev1" dmsetup remove "$dev2" dmsetup create -u TEST-${PREFIX}pv1 ${PREFIX}pv2 ${PREFIX}pv2.table dmsetup create -u TEST-${PREFIX}pv2 ${PREFIX}pv1 ${PREFIX}pv1.table finish_udev_transaction dmsetup info -c # re-scan them pvscan --cache "$dev1" || true pvscan --cache "$dev2" || true # expect both to be there pvs -a -o name | tee out grep "$dev1" out grep "$dev2" out aux lvmetad_dump # flip the devices 2nd. time around init_udev_transaction dmsetup remove "$dev1" dmsetup remove "$dev2" dmsetup create -u TEST-${PREFIX}pv2 ${PREFIX}pv2 ${PREFIX}pv2.table dmsetup create -u TEST-${PREFIX}pv1 ${PREFIX}pv1 ${PREFIX}pv1.table finish_udev_transaction # re-scan them pvscan --cache "$dev1" || true pvscan --cache "$dev2" || true # expect both to be there pvs -a -o name | tee out grep "$dev1" out grep "$dev2" out aux lvmetad_dump # flip the devices 2nd. time around dmsetup remove -f "$dev1" dmsetup remove -f "$dev2" dmsetup create -u TEST-${PREFIX}pv1 ${PREFIX}pv2 ${PREFIX}pv2.table dmsetup create -u TEST-${PREFIX}pv2 ${PREFIX}pv1 ${PREFIX}pv1.table # re-scan them pvscan --cache "$dev1" || true pvscan --cache "$dev2" || true # expect both to be there pvs -a -o name | tee out grep "$dev1" out grep "$dev2" out LVM2.2.02.176/test/shell/activate-minor.sh0000644000000000000120000000155613176752421016677 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2012 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest # Just skip this test if minor is already in use... dmsetup info | tee info grep -E "^Major, minor: *[0-9]+, 123" info && skip aux prepare_vg 2 lvcreate -a n --zero n -l 1 -n foo $vg lvchange $vg/foo -My --major=255 --minor=123 lvchange $vg/foo -a y dmsetup info $vg-foo | tee info grep -E "^Major, minor: *[0-9]+, 123" info vgremove -ff $vg LVM2.2.02.176/test/shell/lvconvert-raid10.sh0000644000000000000120000000352213176752421017050 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2012 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest get_image_pvs() { local d local images images=$(dmsetup ls | grep "${1}-${2}_.image_.*" | cut -f1 | sed -e s:-:/:) lvs --noheadings -a -o devices $images | sed s/\(.\)// } ######################################################## # MAIN ######################################################## # RAID10: Can replace 'copies - 1' devices from each stripe # Tests are run on 2-way mirror, 3-way stripe RAID10 aux have_raid 1 3 1 || skip # 9 PVs needed for RAID10 testing (3-stripes/2-mirror - replacing 3 devs) aux prepare_pvs 9 80 get_devs vgcreate -s 256k "$vg" "${DEVICES[@]}" lvcreate --type raid10 -m 1 -i 3 -l 3 -n $lv1 $vg aux wait_for_sync $vg $lv1 # Can replace any single device for i in $(get_image_pvs $vg $lv1); do lvconvert --replace $i $vg/$lv1 aux wait_for_sync $vg $lv1 done # Can't replace adjacent devices devices=( $(get_image_pvs $vg $lv1) ) not lvconvert --replace "${devices[0]}" --replace "${devices[1]}" $vg/$lv1 not lvconvert --replace "${devices[2]}" --replace "${devices[3]}" $vg/$lv1 not lvconvert --replace "${devices[4]}" --replace "${devices[5]}" $vg/$lv1 # Can replace non-adjacent devices for i in 0 1; do lvconvert \ --replace "${devices[$i]}" \ --replace "${devices[$(( i + 2 ))]}" \ --replace "${devices[$(( i + 4 ))]}" \ $vg/$lv1 aux wait_for_sync $vg $lv1 done vgremove -ff $vg LVM2.2.02.176/test/shell/lvresize-raid.sh0000644000000000000120000000336613176752421016536 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2012,2017 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux have_raid 1 3 0 || skip levels="5 6 10" aux have_raid4 && levels="4 $levels" aux have_raid 1 7 0 && levels="0 0_meta $levels" aux prepare_pvs 6 get_devs vgcreate -s 256K "$vg" "${DEVICES[@]}" for deactivate in true false; do # Extend and reduce a 2-way RAID1 lvcreate --type raid1 -m 1 -l 2 -n $lv1 $vg test $deactivate && { aux wait_for_sync $vg $lv1 lvchange -an $vg/$lv1 } lvresize -l +2 $vg/$lv1 should lvresize -y -l -2 $vg/$lv1 #check raid_images_contiguous $vg $lv1 # Extend and reduce 3-striped RAID 4/5/6/10 for i in $levels ; do lvcreate --type raid$i -i 3 -l 3 -n $lv2 $vg check lv_field $vg/$lv2 "seg_size" "768.00k" test $deactivate && { aux wait_for_sync $vg $lv2 lvchange -an $vg/$lv2 } lvresize -l +3 $vg/$lv2 check lv_field $vg/$lv2 "seg_size" "1.50m" #check raid_images_contiguous $vg $lv1 should lvresize -y -l -3 $vg/$lv2 should check lv_field $vg/$lv2 "seg_size" "768.00k" #check raid_images_contiguous $vg $lv1 lvremove -ff $vg done done # Bug 1005434 # Ensure extend is contiguous lvcreate --type raid5 -l 2 -i 2 -n $lv1 $vg "$dev4" "$dev5" "$dev6" lvextend -l +2 --alloc contiguous $vg/$lv1 check lv_tree_on $vg $lv1 "$dev4" "$dev5" "$dev6" vgremove -f $vg LVM2.2.02.176/test/shell/lvmlockd-lv-types.sh0000644000000000000120000000746413176752421017355 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008-2012 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA test_description='Check lvmlockd lock_args for different LV types' . lib/inittest [ -z "$LVM_TEST_LVMLOCKD" ] && skip; if test -n "$LVM_TEST_LOCK_TYPE_SANLOCK" ; then LOCKARGS1="1.0.0:70254592" LOCKARGS2="1.0.0:71303168" LOCKARGS3="1.0.0:72351744" fi if test -n "$LVM_TEST_LOCK_TYPE_DLM" ; then LOCKARGS1="dlm" LOCKARGS2="dlm" LOCKARGS3="dlm" fi if test -n "$LVM_TEST_LVMLOCKD_TEST" ; then LOCKARGS1="dlm" LOCKARGS2="dlm" LOCKARGS3="dlm" fi aux prepare_devs 5 vgcreate --shared $vg "$dev1" "$dev2" "$dev3" "$dev4" "$dev5" # # thin pool, thin lv, thin snap # lvcreate -L 8M -n pool1 $vg check lva_field $vg/pool1 lockargs $LOCKARGS1 lvcreate -L 8M -n pool1_meta $vg check lva_field $vg/pool1_meta lockargs $LOCKARGS2 lvconvert -y --type thin-pool --poolmetadata $vg/pool1_meta $vg/pool1 check lva_field $vg/pool1 lockargs $LOCKARGS3 check lva_field $vg/pool1_tdata lockargs "" check lva_field $vg/pool1_tmeta lockargs "" lvcreate -n thin1 -V 1G --thinpool $vg/pool1 check lva_field $vg/thin1 lockargs "" lvcreate -s -n snap1 $vg/thin1 check lva_field $vg/snap1 lockargs "" lvchange -ay -K $vg/snap1 lvchange -an $vg/snap1 lvchange -an $vg/thin1 lvchange -an $vg/pool1 lvremove $vg/snap1 lvremove $vg/thin1 lvremove $vg/pool1 # the first sanlock lock should be found and reused lvcreate -L 8M -n lv1 $vg check lva_field $vg/lv1 lockargs $LOCKARGS1 lvchange -an $vg/lv1 lvremove $vg/lv1 # # with automatic metadata lv # lvcreate -L 8M -n pool2 $vg check lva_field $vg/pool2 lockargs $LOCKARGS1 lvconvert -y --type thin-pool $vg/pool2 check lva_field $vg/pool2 lockargs $LOCKARGS2 check lva_field $vg/pool2_tdata lockargs "" check lva_field $vg/pool2_tmeta lockargs "" lvcreate -n thin2 -V 1G --thinpool $vg/pool2 check lva_field $vg/thin2 lockargs "" lvchange -an $vg/thin2 lvchange -an $vg/pool2 lvremove $vg/thin2 lvremove $vg/pool2 # # cache pool, cache lv # lvcreate -L 8M -n cache1 $vg check lva_field $vg/cache1 lockargs $LOCKARGS1 lvcreate -L 8M -n cache1_meta $vg check lva_field $vg/cache1_meta lockargs $LOCKARGS2 lvconvert -y --type cache-pool --poolmetadata $vg/cache1_meta $vg/cache1 check lva_field $vg/cache1 lockargs "" check lva_field $vg/cache1_cdata lockargs "" check lva_field $vg/cache1_cmeta lockargs "" lvcreate -n lv1 -L 8M $vg check lva_field $vg/lv1 lockargs $LOCKARGS1 lvconvert -y --type cache --cachepool $vg/cache1 $vg/lv1 check lva_field $vg/lv1 lockargs $LOCKARGS1 check lva_field $vg/cache1 lockargs "" check lva_field $vg/cache1_cdata lockargs "" check lva_field $vg/cache1_cmeta lockargs "" lvconvert --splitcache $vg/lv1 check lva_field $vg/lv1 lockargs $LOCKARGS1 check lva_field $vg/cache1 lockargs "" check lva_field $vg/cache1_cdata lockargs "" check lva_field $vg/cache1_cmeta lockargs "" lvchange -an $vg/cache1 lvchange -an $vg/lv1 lvremove $vg/cache1 lvremove $vg/lv1 # # cow snap # lvcreate -n lv2 -L 8M $vg check lva_field $vg/lv2 lockargs $LOCKARGS1 lvcreate -s -n lv2snap -L 8M $vg/lv2 check lva_field $vg/lv2 lockargs $LOCKARGS1 check lva_field $vg/lv2snap lockargs "" lvchange -y -an $vg/lv2 lvremove $vg/lv2snap lvremove $vg/lv2 # # mirror # lvcreate --type mirror -m 1 -n lv3 -L 8M $vg check lva_field $vg/lv3 lockargs $LOCKARGS1 lvchange -an $vg/lv3 lvremove $vg/lv3 # # raid1 # lvcreate --type raid1 -m 1 -n lv4 -L 8M $vg check lva_field $vg/lv4 lockargs $LOCKARGS1 lvchange -an $vg/lv4 lvremove $vg/lv4 vgremove $vg LVM2.2.02.176/test/shell/lvconvert-mirror-basic-1.sh0000644000000000000120000000103613176752421020515 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2010 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA . ./shell/lvconvert-mirror-basic.sh test_many 1 vgremove -ff $vg LVM2.2.02.176/test/shell/lvconvert-mirror.sh0000644000000000000120000003003613176752421017302 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2010-2015 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 # disable lvmetad logging as it bogs down test systems export LVM_TEST_LVMETAD_DEBUG_OPTS=${LVM_TEST_LVMETAD_DEBUG_OPTS-} . lib/inittest aux prepare_pvs 5 20 get_devs # proper DEVRANGE needs to be set according to extent size DEVRANGE="0-32" vgcreate -s 32k "$vg" "${DEVICES[@]}" # convert from linear to 2-way mirror ("mirror" default type) lvcreate -aey -l2 -n $lv1 $vg "$dev1" lvconvert -i1 -m+1 -R32k $vg/$lv1 "$dev2" "$dev3:0-1" \ --config 'global { mirror_segtype_default = "mirror" }' lvs --noheadings -o attr $vg/$lv1 | grep '^[[:space:]]*m' lvremove -ff $vg # convert from linear to 2-way mirror (override "raid1" default type) lvcreate -aey -l2 -n $lv1 $vg "$dev1" lvconvert -i1 --type mirror -m+1 $vg/$lv1 "$dev2" "$dev3:0-1" \ --config 'global { mirror_segtype_default = "raid1" }' lvs --noheadings -o attr $vg/$lv1 | grep '^[[:space:]]*m' lvremove -ff $vg # convert from linear to 2-way mirror - with tags and volume_list (bz683270) lvcreate -aey -l2 -n $lv1 $vg --addtag hello lvconvert -i1 --type mirror -m+1 $vg/$lv1 \ --config 'activation { volume_list = [ "@hello" ] }' lvremove -ff $vg # convert from 2-way to 3-way mirror - with tags and volume_list (bz683270) lvcreate -aey -l2 --type mirror -m1 -n $lv1 $vg --addtag hello lvconvert -i1 -m+1 $vg/$lv1 \ --config 'activation { volume_list = [ "@hello" ] }' lvremove -ff $vg # convert from 2-way mirror to linear lvcreate -aey -l2 --type mirror -m1 -n $lv1 $vg "$dev1" "$dev2" "$dev3:0-1" lvconvert -m-1 $vg/$lv1 check linear $vg $lv1 lvremove -ff $vg # and now try removing a specific leg (bz453643) lvcreate -aey -l2 --type mirror -m1 -n $lv1 $vg "$dev1" "$dev2" "$dev3:0-1" lvconvert -m0 $vg/$lv1 "$dev2" check lv_on $vg $lv1 "$dev1" lvremove -ff $vg # convert from disklog to corelog, active lvcreate -aey -l2 --type mirror -m1 -n $lv1 $vg "$dev1" "$dev2" "$dev3:0-1" lvconvert -f --mirrorlog core $vg/$lv1 check mirror $vg $lv1 core lvremove -ff $vg # convert from corelog to disklog, active lvcreate -aey -l2 --type mirror -m1 --mirrorlog core -n $lv1 $vg "$dev1" "$dev2" lvconvert --mirrorlog disk $vg/$lv1 "$dev3:0-1" check mirror $vg $lv1 "$dev3" lvremove -ff $vg # convert linear to 2-way mirror with 1 PV lvcreate -aey -l2 -n $lv1 $vg "$dev1" not lvconvert -m+1 --mirrorlog core $vg/$lv1 "$dev1" lvremove -ff $vg # Start w/ 3-way mirror # Test pulling primary image before mirror in-sync (should fail) # Test pulling primary image after mirror in-sync (should work) # Test that the correct devices remain in the mirror offset=$(get first_extent_sector "$dev2") offset=$(( offset + 2 )) # put 1 single slowing delayed sector # update in case mirror ever gets faster and allows parallel read aux delay_dev "$dev2" 0 2000 ${offset}:1 lvcreate -aey -l5 -Zn -Wn --type mirror --regionsize 16K -m2 -n $lv1 $vg "$dev1" "$dev2" "$dev4" "$dev3:$DEVRANGE" should not lvconvert -m-1 $vg/$lv1 "$dev1" aux enable_dev "$dev2" should lvconvert $vg/$lv1 # wait lvconvert -m2 $vg/$lv1 "$dev1" "$dev2" "$dev4" "$dev3:0" # If the above "should" failed... aux wait_for_sync $vg $lv1 lvconvert -m-1 $vg/$lv1 "$dev1" check mirror_images_on $vg $lv1 "$dev2" "$dev4" lvconvert -m-1 $vg/$lv1 "$dev2" check linear $vg $lv1 check lv_on $vg $lv1 "$dev4" lvremove -ff $vg # FIXME: lots of unneeded extents here for log - it needs to be at least region_size in size # No parallel lvconverts on a single LV please lvcreate -aey -Zn -Wn -l8 --type mirror -m1 -n $lv1 $vg "$dev1" "$dev2" "$dev3:0-8" check mirror $vg $lv1 check mirror_legs $vg $lv1 2 offset=$(get first_extent_sector "$dev4") offset=$(( offset + 2 )) aux delay_dev "$dev4" 0 2000 ${offset}: LVM_TEST_TAG="kill_me_$PREFIX" lvconvert -m+1 -b $vg/$lv1 "$dev4" # Next convert should fail b/c we can't have 2 at once should not lvconvert -m+1 $vg/$lv1 "$dev5" aux enable_dev "$dev4" should lvconvert $vg/$lv1 # wait lvconvert -m2 $vg/$lv1 # In case the above "should" actually failed check mirror $vg $lv1 "$dev3" check mirror_no_temporaries $vg $lv1 check mirror_legs $vg $lv1 3 lvremove -ff $vg # add 1 mirror to core log mirror, but # implicitly keep log as 'core' lvcreate -aey -l2 --type mirror -m1 --mirrorlog core -n $lv1 $vg "$dev1" "$dev2" lvconvert -m +1 -i1 $vg/$lv1 check mirror $vg $lv1 core check mirror_no_temporaries $vg $lv1 check mirror_legs $vg $lv1 3 lvremove -ff $vg # remove 1 mirror from corelog'ed mirror; should retain 'core' log type lvcreate -aey -l2 --type mirror -m2 --corelog -n $lv1 $vg lvconvert -m -1 -i1 $vg/$lv1 check mirror $vg $lv1 core check mirror_no_temporaries $vg $lv1 check mirror_legs $vg $lv1 2 lvremove -ff $vg # add 1 mirror then add 1 more mirror during conversion # FIXME this has been explicitly forbidden? #lvcreate -l2 --type mirror -m1 -n $lv1 $vg "$dev1" "$dev2" "$dev3":0 #lvconvert -m+1 -b $vg/$lv1 "$dev4" #lvconvert -m+1 $vg/$lv1 "$dev5" # #check mirror $vg $lv1 "$dev3" #check mirror_no_temporaries $vg $lv1 #check mirror_legs $vg $lv1 4 #lvremove -ff $vg # convert inactive mirror and start polling lvcreate -aey -l2 --type mirror -m1 -n $lv1 $vg "$dev1" "$dev2" "$dev3:$DEVRANGE" lvchange -an $vg/$lv1 lvconvert -m+1 $vg/$lv1 "$dev4" lvchange -aey $vg/$lv1 should lvconvert $vg/$lv1 # wait check mirror $vg $lv1 "$dev3" check mirror_no_temporaries $vg $lv1 lvremove -ff $vg # --------------------------------------------------------------------- # removal during conversion # "remove newly added mirror" lvcreate -aey -l2 --type mirror -m1 -n $lv1 $vg "$dev1" "$dev2" "$dev3:$DEVRANGE" LVM_TEST_TAG="kill_me_$PREFIX" lvconvert -m+1 -b $vg/$lv1 "$dev4" lvconvert -m-1 $vg/$lv1 "$dev4" should lvconvert $vg/$lv1 # wait check mirror $vg $lv1 "$dev3" check mirror_no_temporaries $vg $lv1 check mirror_legs $vg $lv1 2 lvremove -ff $vg # "remove one of newly added mirrors" lvcreate -aey -l2 --type mirror -m1 -n $lv1 $vg "$dev1" "$dev2" "$dev3:$DEVRANGE" LVM_TEST_TAG="kill_me_$PREFIX" lvconvert -m+2 -b $vg/$lv1 "$dev4" "$dev5" lvconvert -m-1 $vg/$lv1 "$dev4" should lvconvert $vg/$lv1 # wait check mirror $vg $lv1 "$dev3" check mirror_no_temporaries $vg $lv1 check mirror_legs $vg $lv1 3 lvremove -ff $vg # "remove from original mirror (the original is still mirror)" lvcreate -aey -l2 --type mirror -m2 -n $lv1 $vg "$dev1" "$dev2" "$dev5" "$dev3:$DEVRANGE" LVM_TEST_TAG="kill_me_$PREFIX" lvconvert -m+1 -b $vg/$lv1 "$dev4" # FIXME: Extra wait here for mirror upconvert synchronization # otherwise we may fail her on parallel upconvert and downconvert # lvconvert-mirror-updown.sh tests this errornous case separately should lvconvert $vg/$lv1 lvconvert -m-1 $vg/$lv1 "$dev2" should lvconvert $vg/$lv1 check mirror $vg $lv1 "$dev3" check mirror_no_temporaries $vg $lv1 check mirror_legs $vg $lv1 3 lvremove -ff $vg # "remove from original mirror (the original becomes linear)" lvcreate -aey -l2 --type mirror -m1 -n $lv1 $vg "$dev1" "$dev2" "$dev3:$DEVRANGE" LVM_TEST_TAG="kill_me_$PREFIX" lvconvert -m+1 -b $vg/$lv1 "$dev4" # FIXME: Extra wait here for mirror upconvert synchronization # otherwise we may fail her on parallel upconvert and downconvert # lvconvert-mirror-updown.sh tests this errornous case separately should lvconvert $vg/$lv1 lvconvert -m-1 $vg/$lv1 "$dev2" should lvconvert $vg/$lv1 check mirror $vg $lv1 "$dev3" check mirror_no_temporaries $vg $lv1 check mirror_legs $vg $lv1 2 lvremove -ff $vg # Check the same with new --startpool lvconvert command option lvcreate -aey -l2 --type mirror -m1 -n $lv1 $vg "$dev1" "$dev2" "$dev3:$DEVRANGE" LVM_TEST_TAG="kill_me_$PREFIX" lvconvert -m+1 -b $vg/$lv1 "$dev4" # FIXME: Extra wait here for mirror upconvert synchronization # otherwise we may fail her on parallel upconvert and downconvert # lvconvert-mirror-updown.sh tests this errornous case separately should lvconvert --startpoll $vg/$lv1 lvconvert -m-1 $vg/$lv1 "$dev2" should lvconvert --startpoll $vg/$lv1 check mirror $vg $lv1 "$dev3" check mirror_no_temporaries $vg $lv1 check mirror_legs $vg $lv1 2 lvremove -ff $vg # --------------------------------------------------------------------- # "rhbz440405: lvconvert -m0 incorrectly fails if all PEs allocated" lvcreate -aey -l "$(get pv_field "$dev1" pe_count)" --type mirror -m1 -n $lv1 $vg "$dev1" "$dev2" "$dev3:$DEVRANGE" aux wait_for_sync $vg $lv1 lvconvert -m0 $vg/$lv1 "$dev1" check linear $vg $lv1 lvremove -ff $vg # "rhbz264241: lvm mirror doesn't lose it's "M" --nosync attribute # after being down and the up converted" lvcreate -aey -l2 --type mirror -m1 -n $lv1 --nosync $vg lvconvert -m0 $vg/$lv1 lvconvert --type mirror -m1 $vg/$lv1 lvs --noheadings -o attr $vg/$lv1 | grep '^[[:space:]]*m' lvremove -ff $vg # lvconvert from linear (on multiple PVs) to mirror lvcreate -aey -l 8 -n $lv1 $vg "$dev1:0-3" "$dev2:0-3" lvconvert --type mirror -m1 $vg/$lv1 should check mirror $vg $lv1 check mirror_legs $vg $lv1 2 lvremove -ff $vg # BZ 463272: disk log mirror convert option is lost if downconvert option is also given lvcreate -aey -l1 --type mirror -m2 --corelog -n $lv1 $vg "$dev1" "$dev2" "$dev3" aux wait_for_sync $vg $lv1 lvconvert --type mirror -m1 --mirrorlog disk $vg/$lv1 check mirror $vg $lv1 not check mirror $vg $lv1 core lvremove -ff $vg # --- # add mirror and disk log # "add 1 mirror and disk log" lvcreate -aey -l2 --type mirror -m1 --mirrorlog core -n $lv1 $vg "$dev1" "$dev2" # FIXME on next line, specifying $dev3:0 $dev4 (i.e log device first) fails (!) lvconvert -m+1 --mirrorlog disk -i1 $vg/$lv1 "$dev4" "$dev3:$DEVRANGE" check mirror $vg $lv1 "$dev3" check mirror_no_temporaries $vg $lv1 check mirror_legs $vg $lv1 3 lvremove -ff $vg # simple mirrored stripe lvcreate -aey -i2 -l10 -n $lv1 $vg lvconvert --type mirror -m1 -i1 $vg/$lv1 lvreduce -f -l1 $vg/$lv1 lvextend -f -l10 $vg/$lv1 lvremove -ff $vg/$lv1 # extents must be divisible lvcreate -aey -l15 -n $lv1 $vg not lvconvert --type mirror -m1 --corelog --stripes 2 $vg/$lv1 lvremove -ff $vg test -e LOCAL_CLVMD && exit 0 # FIXME - cases which needs to be fixed to work in cluster # Linear to mirror with mirrored log using --alloc anywhere lvcreate -aey -l2 -n $lv1 $vg "$dev1" lvconvert --type mirror -m +1 --mirrorlog mirrored --alloc anywhere $vg/$lv1 "$dev1" "$dev2" should check mirror $vg $lv1 lvremove -ff $vg # Should not be able to add images to --nosync mirror # but should be able to after 'lvchange --resync' lvcreate -aey --type mirror -m 1 -l1 -n $lv1 $vg --nosync not lvconvert -m +1 $vg/$lv1 lvchange -aey --resync -y $vg/$lv1 lvconvert -m +1 $vg/$lv1 lvremove -ff $vg lvcreate -aey --type mirror -m 1 --corelog -l1 -n $lv1 $vg --nosync not lvconvert -m +1 $vg/$lv1 lvchange -aey --resync -y $vg/$lv1 lvconvert -m +1 $vg/$lv1 lvremove -ff $vg # FIXME: Cluster exclusive activation does not work here # unsure why lib/metadata/mirror.c # has this code: # # } else if (vg_is_clustered(vg)) { # log_error("Unable to convert the log of an inactive " # "cluster mirror, %s", lv->name); # return 0; # disabling this in the code passes this test # bz192865: lvconvert log of an inactive mirror lv # convert from disklog to corelog, inactive lvcreate -aey -l2 --type mirror -m1 -n $lv1 $vg "$dev1" "$dev2" "$dev3:0-1" lvchange -an $vg/$lv1 lvconvert -y -f --mirrorlog core $vg/$lv1 check mirror $vg $lv1 core lvremove -ff $vg # convert from corelog to disklog, inactive lvcreate -aey -l2 --type mirror -m1 --mirrorlog core -n $lv1 $vg "$dev1" "$dev2" lvchange -an $vg/$lv1 lvconvert --mirrorlog disk $vg/$lv1 "$dev3:0-1" check mirror $vg $lv1 "$dev3" lvremove -ff $vg # bz1272175: check lvconvert reports progress while waiting for mirror # to get synced lvcreate -l2 -n $lv1 $vg lvconvert --type mirror -i1 -m1 $vg/$lv1 | tee out grep -e "$vg/$lv1: Converted:" out || die "Missing sync info in foreground mode" lvremove -ff $vg vgremove -ff $vg LVM2.2.02.176/test/shell/lvconvert-raid-takeover-alloc-failure.sh0000644000000000000120000000524513176752421023246 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 017 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA2110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux have_raid 1 9 1 || skip aux prepare_vg 6 function check_sub_lvs { local vg=$1 local lv=$2 local end=$3 for s in $(seq 0 "$end") do check lv_exists $vg ${lv}_rmeta_$s check lv_exists $vg ${lv}_rimage_$s done } function check_no_sub_lvs { local vg=$1 local lv=$2 local start=$3 local end=$4 for s in $(seq "$start" "$end") do check lv_not_exists $vg ${lv}_rmeta_$s check lv_not_exists $vg ${lv}_rimage_$s done } # Check takover upconversion fails allocation errors nicely without leaving image pair remnants behind # 6-way striped: neither conversion to raid5 nor raid6 possible lvcreate -aey --yes --stripes 6 --size 4M --name $lv1 $vg not lvconvert --yes --type raid4 $vg/$lv1 check lv_field $vg/$lv1 segtype "striped" check_no_sub_lvs $vg $lv1 0 5 not lvconvert --yes --type raid5 $vg/$lv1 check lv_field $vg/$lv1 segtype "striped" check_no_sub_lvs $vg $lv1 0 5 not lvconvert --yes --type raid6 $vg/$lv1 check lv_field $vg/$lv1 segtype "striped" check_no_sub_lvs $vg $lv1 0 5 # raid0_meta conversion is possible lvconvert --yes --type raid0_meta $vg/$lv1 check lv_field $vg/$lv1 segtype "raid0_meta" check_sub_lvs $vg $lv1 0 5 lvremove -y $vg # 5-way striped: conversion to raid5 possible but not to raid6 lvcreate -aey --stripes 5 --size 4M --name $lv1 $vg not lvconvert --yes --type raid6 $vg/$lv1 check lv_field $vg/$lv1 segtype "striped" check_no_sub_lvs $vg $lv1 0 5 lvconvert --yes --type raid5 $vg/$lv1 check lv_field $vg/$lv1 segtype "raid5_n" check lv_field $vg/$lv1 stripes 6 check lv_field $vg/$lv1 datastripes 5 check_sub_lvs $vg $lv1 0 5 lvremove -y $vg # 4-way striped: conversion to raid5 and raid6 possible lvcreate -aey --stripes 4 --size 4M --name $lv1 $vg lvconvert --yes --type raid5 $vg/$lv1 check lv_field $vg/$lv1 segtype "raid5_n" check lv_field $vg/$lv1 stripes 5 check lv_field $vg/$lv1 datastripes 4 check_sub_lvs $vg $lv1 0 4 check_no_sub_lvs $vg $lv1 5 5 lvremove -y $vg lvcreate -aey --stripes 4 --size 4M --name $lv1 $vg lvconvert --yes --type raid6 $vg/$lv1 check lv_field $vg/$lv1 segtype "raid6_n_6" check lv_field $vg/$lv1 stripes 6 check lv_field $vg/$lv1 datastripes 4 check_sub_lvs $vg $lv1 0 5 vgremove -ff $vg LVM2.2.02.176/test/shell/tags.sh0000644000000000000120000000546613176752421014717 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008-2012 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_pvs 4 # vgcreate with --addtag vgcreate --addtag firstvg $vg1 "$dev1" "$dev2" vgcreate --addtag secondvg $vg2 "$dev3" "$dev4" check vg_field $vg1 tags "firstvg" check vg_field $vg2 tags "secondvg" vgremove -f $vg1 $vg2 # vgchange with --addtag and --deltag vgcreate $vg1 "$dev1" "$dev2" vgcreate $vg2 "$dev3" "$dev4" vgchange --addtag firstvgtag1 $vg1 # adding a tag multiple times is not an error vgchange --addtag firstvgtag2 $vg1 vgchange --addtag firstvgtag2 $vg1 vgchange --addtag firstvgtag3 $vg1 vgchange --addtag secondvgtag1 $vg2 vgchange --addtag secondvgtag2 $vg2 vgchange --addtag secondvgtag3 $vg2 check vg_field @firstvgtag2 tags "firstvgtag1,firstvgtag2,firstvgtag3" check vg_field @secondvgtag1 tags "secondvgtag1,secondvgtag2,secondvgtag3" vgchange --deltag firstvgtag2 $vg1 check vg_field @firstvgtag1 tags "firstvgtag1,firstvgtag3" # deleting a tag multiple times is not an error vgchange --deltag firstvgtag2 $vg1 vgchange --deltag firstvgtag1 $vg2 vgremove -f $vg1 $vg2 # lvcreate with --addtag vgcreate $vg1 "$dev1" "$dev2" lvcreate --addtag firstlvtag1 -l 4 -n $lv1 $vg1 lvcreate --addtag secondlvtag1 -l 4 -n $lv2 $vg1 check lv_field @firstlvtag1 tags "firstlvtag1" not check lv_field @secondlvtag1 tags "firstlvtag1" check lv_field $vg1/$lv2 tags "secondlvtag1" not check lv_field $vg1/$lv1 tags "secondlvtag1" vgremove -f $vg1 # lvchange with --addtag and --deltag vgcreate $vg1 "$dev1" "$dev2" lvcreate -l 4 -n $lv1 $vg1 lvcreate -l 4 -n $lv2 $vg1 lvchange --addtag firstlvtag1 $vg1/$lv1 # adding a tag multiple times is not an error lvchange --addtag firstlvtag2 $vg1/$lv1 lvchange --addtag firstlvtag2 $vg1/$lv1 lvchange --addtag firstlvtag3 $vg1/$lv1 lvchange --addtag secondlvtag1 $vg1/$lv2 lvchange --addtag secondlvtag2 $vg1/$lv2 lvchange --addtag secondlvtag3 $vg1/$lv2 check lv_field $vg1/$lv1 tags "firstlvtag1,firstlvtag2,firstlvtag3" not check lv_field $vg1/$lv1 tags "secondlvtag1" check lv_field $vg1/$lv2 tags "secondlvtag1,secondlvtag2,secondlvtag3" not check lv_field $vg1/$lv1 tags "secondlvtag1" # deleting a tag multiple times is not an error lvchange --deltag firstlvtag2 $vg1/$lv1 lvchange --deltag firstlvtag2 $vg1/$lv1 check lv_field $vg1/$lv1 tags "firstlvtag1,firstlvtag3" check lv_field $vg1/$lv2 tags "secondlvtag1,secondlvtag2,secondlvtag3" vgremove -ff $vg1 LVM2.2.02.176/test/shell/thin-vglock.sh0000644000000000000120000000317713176752421016203 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2014-2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Test locking works and doesn't update metadata # RHBZ: https://bugzilla.redhat.com/show_bug.cgi?id=1063542 SKIP_WITH_LVMLOCKD=1 export LVM_TEST_THIN_REPAIR_CMD=${LVM_TEST_THIN_REPAIR_CMD-/bin/false} . lib/inittest MKFS=mkfs.ext2 which $MKFS || skip aux have_thin 1 0 0 || skip aux prepare_vg lvcreate -L10 -T -V5 -n $lv1 $vg/pool lvcreate -an -V10 -T $vg/pool $MKFS "$DM_DEV_DIR/$vg/$lv1" mkdir mnt mount "$DM_DEV_DIR/$vg/$lv1" mnt lvcreate -s -n snap $vg/$lv1 check lv_field $vg/snap thin_id "3" lvconvert --merge $vg/snap umount mnt check lv_field $vg/$lv1 thin_id "1" check lv_field $vg/pool transaction_id "3" vgchange -an $vg # Check reboot case vgchange -ay --sysinit $vg # Check correct thin_id is shown after activation # even when metadata were not yet physically modified. # Merge take its place during activation, # but pool transaction_id still needs metadata update. check lv_field $vg/$lv1 thin_id "3" check lv_field $vg/pool transaction_id "3" # Check the metadata are updated after refresh # vgchange --refresh $vg check lv_field $vg/$lv1 thin_id "3" check lv_field $vg/pool transaction_id "4" #lvs -a -o+transaction_id,thin_id $vg vgremove -f $vg LVM2.2.02.176/test/shell/vgsplit-raid.sh0000644000000000000120000000261213176752421016354 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Test vgsplit operation, including different LV types SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest COMM() { LAST_TEST="$*" } create_vg_() { vgcreate -s 64k "$@" } aux have_raid 1 3 0 || skip aux prepare_pvs 5 10 # # vgsplit can be done into a new or existing VG # for i in new existing do # # We can have PVs or LVs on the cmdline # for j in PV LV do COMM "vgsplit correctly splits RAID LV into $i VG ($j args)" create_vg_ $vg1 "$dev1" "$dev2" "$dev3" test $i = existing && create_vg_ $vg2 "$dev5" lvcreate -an -Zn -l 64 --type raid5 -i 2 -n $lv1 $vg1 if [ $j = PV ]; then not vgsplit $vg1 $vg2 "$dev1" not vgsplit $vg1 $vg2 "$dev2" not vgsplit $vg1 $vg2 "$dev1" "$dev2" vgsplit $vg1 $vg2 "$dev1" "$dev2" "$dev3" else vgsplit -n $lv1 $vg1 $vg2 fi if [ $i = existing ]; then check pvlv_counts $vg2 4 1 0 else check pvlv_counts $vg2 3 1 0 fi vgremove -f $vg2 done done LVM2.2.02.176/test/shell/lvconvert-cache-abort.sh0000644000000000000120000000407213176752421020141 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2017 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Exercise cache flushing is abortable SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux have_cache 1 3 0 || skip aux prepare_vg 2 # Data device on later delayed dev1 lvcreate -L4 -n cpool $vg "$dev1" lvconvert -y --type cache-pool $vg/cpool "$dev2" lvcreate -H -L 4 -n $lv1 --chunksize 32k --cachemode writeback --cachepool $vg/cpool $vg "$dev2" # # Ensure cache gets promoted blocks # for i in $(seq 1 10) ; do echo 3 >/proc/sys/vm/drop_caches dd if=/dev/zero of="$DM_DEV_DIR/$vg/$lv1" bs=64K count=20 conv=fdatasync || true echo 3 >/proc/sys/vm/drop_caches dd if="$DM_DEV_DIR/$vg/$lv1" of=/dev/null bs=64K count=20 || true done # Delay dev to ensure we have some time to 'capture' interrupt in flush aux delay_dev "$dev1" 100 0 "$(get first_extent_sector "$dev1"):" # TODO, how to make writeback cache dirty test "$(get lv_field $vg/$lv1 cache_dirty_blocks)" -gt 0 || { lvdisplay --maps $vg skip "Cannot make a dirty writeback cache LV." } sync dd if=/dev/zero of="$DM_DEV_DIR/$vg/$lv1" bs=4k count=100 conv=fdatasync LVM_TEST_TAG="kill_me_$PREFIX" lvconvert -v --splitcache $vg/$lv1 >logconvert 2>&1 & PID_CONVERT=$! for i in {1..50}; do dmsetup table "$vg-$lv1" | grep cleaner && break test "$i" -ge 100 && die "Waited for cleaner policy on $vg/$lv1 too long!" echo "Waiting for cleaner policy on $vg/$lv1" sleep .05 done kill -INT $PID_CONVERT aux enable_dev "$dev1" wait grep -E "Flushing.*aborted" logconvert || { cat logconvert || true vgremove -f $vg die "Flushing of $vg/$lv1 not aborted ?" } # check the table got restored check grep_dmsetup table $vg-$lv1 "writeback" vgremove -f $vg LVM2.2.02.176/test/shell/lvconvert-raid-takeover-thin.sh0000644000000000000120000000330413176752421021463 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2017 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA2110-1301 USA # check we may convert thin-pool to raid1/raid10 and back # RHBZ#1365286 SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux have_thin 1 0 0 || skip aux have_raid 1 9 0 || skip aux prepare_vg 6 lvcreate -L4 -i3 -T $vg/pool -V10 for i in 1 2 ; do lvconvert --type raid10 -y $vg/pool_tdata check grep_dmsetup table $vg-pool_tdata "raid10" aux wait_for_sync $vg pool_tdata lvconvert --type striped -y $vg/pool_tdata check grep_dmsetup table $vg-pool_tdata "striped" done lvremove -f $vg lvcreate -L4 -T $vg/pool -V10 -n $lv1 for j in data meta ; do LV=pool_t${j} for i in 1 2 ; do lvconvert --type raid1 -m1 -y $vg/$LV check grep_dmsetup table $vg-${LV} "raid1" aux wait_for_sync $vg $LV lvconvert --type raid1 -m0 -y $vg/$LV check grep_dmsetup table ${vg}-${LV} "linear" done done # # Now same test again, when lock holding LV is not a thin-poll # but thinLV $lv1 # lvchange -an $vg lvchange -ay $vg/$lv1 for j in data meta ; do LV=pool_t${j} for i in 1 2 ; do lvconvert --type raid1 -m1 -y $vg/$LV check grep_dmsetup table $vg-${LV} "raid1" aux wait_for_sync $vg $LV lvconvert --type raid1 -m0 -y $vg/$LV check grep_dmsetup table ${vg}-${LV} "linear" done done vgremove -ff $vg LVM2.2.02.176/test/shell/lvresize-rounding.sh0000644000000000000120000000410413176752421017433 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2007-2012 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_pvs 3 22 get_devs vgcreate -s 32K "$vg" "${DEVICES[@]}" lvcreate -an -Zn -l4 -i3 -I64 $vg lvcreate -an -Zn -l8 -i2 -I64 $vg lvcreate -an -Zn -l16 $vg lvcreate -an -Zn -l32 -i3 -I64 -n $lv1 $vg lvresize -l+64 -i3 -I64 $vg/$lv1 lvresize -l+64 -i3 -I128 $vg/$lv1 #lvcreate -l100%FREE -i3 -I64 --alloc anywhere $vg vgremove -f $vg # 15 extents LVM_TEST_AUX_TRACE=yes aux prepare_vg 3 22 unset LVM_TEST_AUX_TRACE # Block some extents lvcreate -an -Zn -l4 -i3 $vg lvcreate -an -Zn -l1 $vg lvcreate -an -Zn -l100%FREE -n $lv1 -i3 $vg check vg_field $vg vg_free_count 2 lvremove -f $vg/$lv1 lvcreate -an -Zn -l1 -n $lv1 -i3 $vg lvextend -l+100%FREE -i3 $vg/$lv1 check vg_field $vg vg_free_count 2 lvreduce -f -l50%LV $vg/$lv1 vgremove -f $vg vgcreate -s 4M $vg "$dev1" "$dev2" "$dev3" # Expect to play with 15 extents check vg_field $vg vg_free_count 15 # Should be rounded to 12 extents lvcreate -an -Zn -l10 -n lv -i3 $vg check vg_field $vg vg_free_count 3 # Should want 16 extents not lvextend -l+4 $vg/lv # Round up to whole free space lvextend -l+100%FREE $vg/lv check vg_field $vg vg_free_count 0 # Rounds up and should reduce just by 3 extents lvreduce -f -l-4 $vg/lv check vg_field $vg vg_free_count 3 # Should round up to 15 extents lvextend -f -l+1 $vg/lv check vg_field $vg vg_free_count 0 lvreduce -f -l-4 $vg/lv check vg_field $vg vg_free_count 3 lvextend -l90%VG $vg/lv check vg_field $vg vg_free_count 0 not lvreduce -f -l-10%LV $vg/lv check vg_field $vg vg_free_count 0 lvreduce -f -l-20%LV $vg/lv check vg_field $vg vg_free_count 3 LVM2.2.02.176/test/shell/lvcreate-thin-big.sh0000644000000000000120000000613613176752421017260 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2015 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # test currently needs to drop # 'return NULL' in _lv_create_an_lv after log_error("Can't create %s without using " SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 export LVM_TEST_THIN_REPAIR_CMD=${LVM_TEST_THIN_REPAIR_CMD-/bin/false} . lib/inittest aux have_thin 1 0 0 || skip # Test --poolmetadatasize range # allocating large devices for testing aux prepare_pvs 10 16500 get_devs vgcreate -s 64K "$vg" "${DEVICES[@]}" # Size 0 is not valid invalid lvcreate -L4M --chunksize 128 --poolmetadatasize 0 -T $vg/pool1 2>out lvcreate -Zn -L4M --chunksize 128 --poolmetadatasize 16k -T $vg/pool1 2>out grep "WARNING: Minimum" out # FIXME: metadata allocation fails, if PV doesn't have at least 16GB # i.e. pool metadata device cannot be multisegment lvcreate -Zn -L4M --chunksize 64k --poolmetadatasize 17G -T $vg/pool2 2>out grep "WARNING: Maximum" out check lv_field $vg/pool1_tmeta size "2.00m" check lv_field $vg/pool2_tmeta size "15.81g" # Check we do report correct percent values. lvcreate --type zero -L3G $vg -n pool3 lvconvert -y --thinpool $vg/pool3 lvchange --errorwhenfull y $vg/pool3 lvchange --zero n $vg/pool3 lvcreate -V10G $vg/pool3 -n $lv1 lvcreate -V2G $vg/pool3 -n $lv2 dd if=/dev/zero of="$DM_DEV_DIR/$vg/$lv1" bs=512b count=1 conv=fdatasync # ...excercise write speed to 'zero' device ;) dd if=/dev/zero of="$DM_DEV_DIR/$vg/$lv2" bs=64K count=32767 conv=fdatasync lvs -a $vg # Check the percentage is not shown as 0.00 check lv_field $vg/$lv1 data_percent "0.01" # Check the percentage is not shown as 100.00 check lv_field $vg/$lv2 data_percent "99.99" # Check can start and see thinpool with metadata size above kernel limit lvcreate -L4M --poolmetadatasize 16G -T $vg/poolM check lv_field $vg/poolM data_percent "0.00" lvremove -ff $vg # Test automatic calculation of pool metadata size lvcreate -L160G -T $vg/pool check lv_field $vg/pool lv_metadata_size "80.00m" check lv_field $vg/pool chunksize "128.00k" lvremove -ff $vg/pool lvcreate -L10G --chunksize 256 -T $vg/pool1 lvcreate -L60G --chunksize 1024 -T $vg/pool2 check lv_field $vg/pool1_tmeta size "2.50m" check lv_field $vg/pool2_tmeta size "3.75m" lvremove -ff $vg # Block size of multiple 64KB needs >= 1.4 if aux have_thin 1 4 0 ; then # Test chunk size is rounded to 64KB boundary lvcreate -L10G --poolmetadatasize 4M -T $vg/pool check lv_field $vg/pool chunk_size "192.00k" fi # Old thinpool target required rounding to power of 2 aux lvmconf "global/thin_disabled_features = [ \"block_size\" ]" lvcreate -L10G --poolmetadatasize 4M -T $vg/pool_old check lv_field $vg/pool_old chunk_size "256.00k" lvremove -ff $vg # reset #aux lvmconf "global/thin_disabled_features = []" vgremove -ff $vg LVM2.2.02.176/test/shell/aa-lvmlockd-sanlock-prepare.sh0000644000000000000120000000313613176752421021227 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008-2012 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA test_description='Set up things to run tests with sanlock' . lib/inittest [ -z "$LVM_TEST_LOCK_TYPE_SANLOCK" ] && skip; # Create a device and a VG that are both outside the scope of # the standard lvm test suite so that they will not be removed # and will remain in place while all the tests are run. # # Use this VG to hold the sanlock global lock which will be used # by lvmlockd during other tests. # # This script will be run before any standard tests are run. # After all the tests are run, another script will be run # to remove this VG and device. GL_DEV="/dev/mapper/GL_DEV" GL_FILE="$PWD/gl_file.img" dmsetup remove GL_DEV || true rm -f "$GL_FILE" dd if=/dev/zero of="$GL_FILE" bs=$((1024*1024)) count=1024 2> /dev/null GL_LOOP=$(losetup -f "$GL_FILE" --show) echo "0 $(blockdev --getsize $GL_LOOP linear $GL_LOOP 0)" | dmsetup create GL_DEV aux prepare_sanlock aux prepare_lvmlockd vgcreate --config 'devices { global_filter=["a|GL_DEV|", "r|.*|"] filter=["a|GL_DEV|", "r|.*|"]}' --lock-type sanlock glvg $GL_DEV vgs --config 'devices { global_filter=["a|GL_DEV|", "r|.*|"] filter=["a|GL_DEV|", "r|.*|"]}' -o+locktype,lockargs glvg LVM2.2.02.176/test/shell/unknown-segment.sh0000644000000000000120000000272113176752421017107 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2009 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_vg 4 lvcreate -an -Zn -l 1 -n $lv1 $vg lvcreate -an -Zn -l 2 --type mirror -m 1 -n $lv2 $vg lvcreate -an -Zn --type zero -l 1 -n $lv3 $vg vgcfgbackup -f bak0 $vg sed -e 's,striped,unstriped,;s,mirror,unmirror,;s,zero,zero+NEWFLAG,' -i.orig bak0 vgcfgrestore -f bak0 $vg # we have on-disk metadata with unknown segments now not lvchange -aey $vg/$lv1 # check that activation is refused # try once more to catch invalid memory access with valgrind # when clvmd flushes cmd mem pool not lvchange -aey $vg/$lv2 # check that activation is refused not lvchange -aey $vg/$lv3 # check that activation is refused vgcfgbackup -f bak1 $vg cat bak1 sed -e 's,unstriped,striped,;s,unmirror,mirror,;s,zero+NEWFLAG,zero,' -i.orig bak1 vgcfgrestore -f bak1 $vg vgcfgbackup -f bak2 $vg grep -v -E 'description|seqno|creation_time|Generated' < bak0.orig > a grep -v -E 'description|seqno|creation_time|Generated' < bak2 > b diff -u a b vgremove -ff $vg LVM2.2.02.176/test/shell/000-basic.sh0000644000000000000120000000202513176752421015323 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2009-2011 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest lvm version lvm pvmove --version|sed -n "1s/.*: *\([0-9][^ ]*\) .*/\1/p" | tee version # ensure they are the same diff -u version lib/version-expected dmstats version |sed -n "1s/.*: *\([0-9][^ ]*\) .*/\1/p" | tee dmstats-version # ensure dmstats version matches build diff -u dmstats-version lib/dm-version-expected # ensure we can create devices (uses dmsetup, etc) aux prepare_devs 5 get_devs # ensure we do not crash on a bug in config file aux lvmconf 'log/prefix = 1""' not lvs "${DEVICES[@]}" LVM2.2.02.176/test/shell/lvconvert-cache.sh0000644000000000000120000001460613176752421017040 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2014-2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Exercise conversion of cache and cache pool SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux have_cache 1 3 0 || skip aux prepare_vg 5 80 lvcreate --type cache-pool -an -v -L 2 -n cpool $vg lvcreate -H -L 4 -n corigin --cachepool $vg/cpool fail lvcreate -s -L2 $vg/cpool fail lvcreate -s -L2 $vg/cpool_cdata fail lvcreate -s -L2 $vg/cpool_cmeta ########################### # Check regular converion # ########################### # lvcreate origin, lvcreate cache-pool, and lvconvert to cache lvcreate -an -Zn -L 2 -n $lv1 $vg lvcreate -L 8 -n $lv2 $vg lvcreate -an -Zn -L 8 -n $lv3 $vg lvcreate -an -Zn -L 8 -n $lv4 $vg lvcreate -an -Zn -L 16 -n $lv5 $vg # check validation of cachemode arg works invalid lvconvert --yes --type cache-pool --cachemode writethroughX --cachepool $vg/$lv1 # by default no cache settings are attached to converted cache-pool lvconvert --yes --type cache-pool --chunksize 256 $vg/$lv1 check inactive $vg ${lv1}_cdata check lv_field $vg/$lv1 cache_mode "" check lv_field $vg/$lv1 cache_policy "" check lv_field $vg/$lv1 cache_settings "" check lv_field $vg/$lv1 chunk_size "256.00k" # but allow to set them when specified explicitely on command line lvconvert --yes --type cache-pool --cachemode writeback --cachepolicy mq \ --cachesettings sequential_threshold=1234 --cachesettings random_threshold=56 \ --cachepool $vg/$lv2 check inactive $vg ${lv2}_cdata check lv_field $vg/$lv2 cache_mode "writeback" check lv_field $vg/$lv2 cache_policy "mq" check lv_field $vg/$lv2 cache_settings "random_threshold=56,sequential_threshold=1234" # Check swap of cache pool metadata lvconvert --yes --type cache-pool --poolmetadata $lv4 $vg/$lv3 UUID=$(get lv_field $vg/$lv5 uuid) lvconvert --yes --cachepool $vg/$lv3 --poolmetadata $lv5 check lv_field $vg/${lv3}_cmeta uuid "$UUID" # Check swap of cache pool metadata with --swapmetadata # (should swap back to lv5) lvconvert --yes --swapmetadata $vg/$lv3 --poolmetadata $lv5 check lv_field $vg/$lv5 uuid "$UUID" #fail lvconvert --cachepool $vg/$lv1 --poolmetadata $vg/$lv2 #lvconvert --yes --type cache-pool --poolmetadata $vg/$lv2 $vg/$lv1 #lvconvert --yes --poolmetadata $vg/$lv2 --cachepool $vg/$lv1 lvremove -ff $vg lvcreate -L 2 -n $lv1 $vg lvcreate --type cache-pool -l 1 -n ${lv1}_cachepool $vg lvconvert --cache --cachepool $vg/${lv1}_cachepool --cachemode writeback -Zy $vg/$lv1 check lv_field $vg/$lv1 cache_mode "writeback" dmsetup table ${vg}-$lv1 | grep cache # ensure it is loaded in kernel #lvconvert --cachepool $vg/${lv1}_cachepool $vg/$lv1 #lvconvert --cachepool $vg/${lv1}_cachepool --poolmetadatasize 20 "$dev3" fail lvconvert --type cache --cachepool $vg/${lv1}_cachepool -Zy $vg/$lv1 # Test --splitcache leaves both cache origin and cache pool lvconvert --splitcache $vg/$lv1 check lv_exists $vg $lv1 ${lv1}_cachepool lvremove -f $vg lvcreate -L 2 -n $lv1 $vg lvcreate --type cache-pool -l 1 -n ${lv1}_cachepool "$DM_DEV_DIR/$vg" lvconvert --cache --cachepool "$DM_DEV_DIR/$vg/${lv1}_cachepool" --cachemode writeback -Zy "$DM_DEV_DIR/$vg/$lv1" lvremove -f $vg lvcreate -n corigin -l 10 $vg lvcreate -n pool -l 10 $vg lvs -a -o +devices fail lvconvert --type cache --cachepool $vg/pool $vg/corigin lvconvert --yes --cache --cachepool $vg/pool $vg/corigin lvremove -ff $vg ####################### # Invalid conversions # ####################### lvcreate -an -Zn -L 2 -n $lv1 $vg lvcreate -an -Zn -L 8 -n $lv2 $vg lvcreate -an -Zn -L 8 -n $lv3 $vg lvcreate -an -Zn -L 8 -n $lv4 $vg # Undefined cachepool invalid lvconvert --type cache --poolmetadata $vg/$lv2 $vg/$lv1 # Cannot mix with thins invalid lvconvert --type cache --poolmetadata $vg/$lv2 --thinpool $vg/$lv1 invalid lvconvert --type cache --thin --poolmetadata $vg/$lv2 $vg/$lv1 # Undefined cached volume invalid lvconvert --type cache --cachepool $vg/$lv1 invalid lvconvert --cache --cachepool $vg/$lv1 # FIXME: temporarily we return error code 5 INVALID=not # Single vg is required $INVALID lvconvert --type cache --cachepool $vg/$lv1 --poolmetadata $vg1/$lv2 $vg/$lv3 $INVALID lvconvert --type cache --cachepool "$DM_DEV_DIR/$vg/$lv1" --poolmetadata "$DM_DEV_DIR/$vg1/$lv2" $vg/$lv3 $INVALID lvconvert --type cache --cachepool $vg/$lv1 --poolmetadata $lv2 $vg1/$lv3 $INVALID lvconvert --type cache --cachepool $vg1/$lv1 --poolmetadata $vg2/$lv2 $vg/$lv3 $INVALID lvconvert --type cache --cachepool $vg1/$lv1 --poolmetadata $vg2/$lv2 "$DM_DEV_DIR/$vg/$lv3" $INVALID lvconvert --type cache-pool --poolmetadata $vg2/$lv2 $vg1/$lv1 $INVALID lvconvert --cachepool $vg1/$lv1 --poolmetadata $vg2/$lv2 # Invalid syntax, vg is unknown $INVALID lvconvert --yes --cachepool $lv3 --poolmetadata $lv4 # Invalid chunk size is <32KiB >1GiB $INVALID lvconvert --type cache-pool --chunksize 16 --poolmetadata $lv2 $vg/$lv1 $INVALID lvconvert --type cache-pool --chunksize 2G --poolmetadata $lv2 $vg/$lv1 # Invalid chunk size is bigger then data size, needs to open VG fail lvconvert --yes --type cache-pool --chunksize 16M --poolmetadata $lv2 $vg/$lv1 lvremove -f $vg ######################## # Repair of cache pool # ######################## lvcreate --type cache-pool -an -v -L 2 -n cpool $vg lvcreate -H -L 4 -n corigin --cachepool $vg/cpool # unsupported yet fail lvconvert --repair $vg/cpool 2>&1 | tee out #grep "Cannot convert internal LV" out lvremove -f $vg ########################## # Prohibited conversions # ########################## lvcreate --type cache-pool -L10 $vg/$lv1 lvcreate --cache -L20 $vg/$lv1 lvcreate -L10 -n $lv2 $vg fail lvconvert --yes --type cache $vg/$lv2 --cachepool $vg/$lv1 fail lvconvert --yes --type cache $vg/$lv1 --cachepool $vg/$lv2 fail lvconvert --yes --type cache-pool $vg/$lv1 fail lvconvert --yes --type mirror -m1 $vg/$lv1 not aux have_raid 1 0 0 || fail lvconvert --yes --type raid1 -m1 $vg/$lv1 fail lvconvert --yes --type snapshot $vg/$lv1 $vg/$lv2 fail lvconvert --yes --type snapshot $vg/$lv2 $vg/$lv1 not aux have_thin 1 0 0 || fail lvconvert --yes -T --thinpool $vg/$lv2 $vg/$lv1 lvremove -f $vg vgremove -f $vg LVM2.2.02.176/test/shell/lvconvert-snapshot-mirror.sh0000644000000000000120000000316113176752421021136 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2017 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Test various supported conversion of snapshot with mirrors SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_vg 3 vgchange -s 16k $vg lvcreate -L1 -n cow $vg # Mirror and snapshot conversion lvcreate -aye --type mirror -L1 -m1 -n mir $vg # Cannot create snapshot of mirror leg not lvcreate -s -L1 $vg/mir_mimage_0 2>&1 | tee err grep "not supported" err # cannot use 'mirror' as COW not lvconvert --yes --type snapshot $vg/cow $vg/mir 2>&1 | tee err grep "not accept" err not lvconvert --yes --type snapshot $vg/cow $vg/mir_mimage_0 2>&1 | tee err grep "lv_is_visible" err not lvconvert --yes --type snapshot $vg/cow $vg/mir_mlog 2>&1 | tee err grep "lv_is_visible" err # cannot use _mimage not lvconvert --yes --type snapshot $vg/mir_mimage_0 $vg/cow 2>&1 | tee err grep "not supported" err # cannot use _mlog not lvconvert --yes --type snapshot $vg/mir_mlog $vg/cow 2>&1 | tee err grep "not supported" err lvconvert --yes -s $vg/mir $vg/cow check lv_field $vg/mir segtype mirror check lv_field $vg/cow segtype linear check lv_attr_bit type $vg/cow "s" check lv_attr_bit type $vg/mir "o" lvs -a -o+lv_role,lv_layout $vg vgremove -f $vg LVM2.2.02.176/test/shell/vg-name-from-env.sh0000644000000000000120000000341613176752421017033 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008-2013 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. test_description='Test the vg name for an lv from env var' SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_devs 2 pvcreate "$dev1" pvcreate "$dev2" vgcreate $vg1 "$dev1" vgcreate $vg2 "$dev2" export LVM_VG_NAME=$vg1 # should use env lvcreate -n $lv1 -l 2 lvcreate -n $lv3 -l 2 lvcreate -n $lv2 -l 2 $vg2 lvcreate -n $lv4 -l 2 $vg2 lvs >err grep $lv1 err grep $lv3 err grep $lv2 err grep $lv4 err not lvs $vg1 >err not grep $lv1 err not grep $lv3 err not grep $lv2 err not grep $lv4 err not lvs $vg2 >err not grep $lv1 err not grep $lv3 err not grep $lv2 err not grep $lv4 err lvs $lv1 >err grep $lv1 err not grep $lv3 err not grep $lv2 err not grep $lv4 err lvs $lv1 $lv3 >err grep $lv1 err grep $lv3 err not grep $lv2 err not grep $lv4 err # should use env and fail to fine lv4 in vg1 not lvs $lv4 >err not grep $lv1 err not grep $lv3 err not grep $lv2 err not grep $lv4 err lvs $vg2/$lv4 >err not grep $lv1 err not grep $lv3 err not grep $lv2 err grep $lv4 err lvs $vg2/$lv2 $vg2/$lv4 >err not grep $lv1 err not grep $lv3 err grep $lv2 err grep $lv4 err # should use env lvchange -an $lv3 lvremove $lv3 not lvremove $lv4 lvs >err grep $lv1 err not grep $lv3 err grep $lv2 err grep $lv4 err # should use env lvcreate -n $lv3 -l 2 lvchange --addtag foo $lv3 lvchange -an $lv3 # lvremove by tag should apply to all vgs, not env vg lvchange --addtag foo $vg2/$lv4 lvchange -an $vg2/$lv4 lvremove @foo lvs >err grep $lv1 err not grep $lv3 err grep $lv2 err not grep $lv4 err vgremove -ff $vg1 $vg2 LVM2.2.02.176/test/shell/mirror-vgreduce-removemissing.sh0000644000000000000120000003045713176752421021760 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008-2017 Red Hat, Inc. All rights reserved. # Copyright (C) 2007 NEC Corporation # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA test_description="ensure that 'vgreduce --removemissing' works on mirrored LV" SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 # disable lvmetad logging as it bogs down test systems export LVM_TEST_LVMETAD_DEBUG_OPTS=${LVM_TEST_LVMETAD_DEBUG_OPTS-} . lib/inittest list_pvs=() lv_is_on_ () { local lv=$vg/$1 shift local list_pvs=( "$@" ) echo "Check if $lv is exactly on PVs" "${list_pvs[@]}" rm -f out1 out2 printf "%s\n" "${list_pvs[@]}" | sort | uniq > out1 lvs -a -o+devices $lv get lv_devices "$lv" | sort | uniq > out2 diff --ignore-blank-lines out1 out2 } mimages_are_on_ () { local lv=$1 shift local list_pvs=( "$@" ) local mimages=() local i echo "Check if mirror images of $lv are on PVs" "${list_pvs[@]}" printf "%s\n" "${list_pvs[@]}" | sort | uniq > out1 get lv_field_lv_ "$vg" lv_name -a | grep "${lv}_mimage_" | tee lvs_log test -s lvs_log || return 1 while IFS= read -r i ; do mimages+=( "$i" ) done < lvs_log for i in "${mimages[@]}"; do echo "Checking $vg/$i" lvs -a -o+devices "$vg/$i" done for i in "${mimages[@]}"; do get lv_devices "$vg/$i" done | sort | uniq > out2 diff --ignore-blank-lines out1 out2 } mirrorlog_is_on_() { local lv=${1}_mlog shift lv_is_on_ $lv "$@" } lv_is_linear_() { echo "Check if $1 is linear LV (i.e. not a mirror)" get lv_field $vg/$1 "stripes,attr" | tee out grep "^1 -" out >/dev/null } rest_pvs_() { local index=$1 local num=$2 local rem=() local n local dev for n in $(seq 1 $(( index - 1 )) ) $(seq $(( index + 1 )) $num); do eval "dev=\$dev$n" rem+=( "$dev" ) done printf "%s\n" "${rem[@]}" } # --------------------------------------------------------------------- # Initialize PVs and VGs aux prepare_pvs 5 80 get_devs vgcreate -s 64k "$vg" "${DEVICES[@]}" BLOCKS=0-7 BLOCKS1=8-15 # --------------------------------------------------------------------- # Common environment setup/cleanup for each sub testcases prepare_lvs_() { lvremove -ff $vg (dm_table | not grep $vg) || \ die "ERROR: lvremove did leave some some mappings in DM behind!" } check_and_cleanup_lvs_() { lvs -a -o+devices $vg prepare_lvs_ } recover_vg_() { aux enable_dev "$@" pvcreate -ff "$@" vgextend $vg "$@" check_and_cleanup_lvs_ } #COMM "check environment setup/cleanup" prepare_lvs_ check_and_cleanup_lvs_ # --------------------------------------------------------------------- # one of mirror images has failed #COMM "basic: fail the 2nd mirror image of 2-way mirrored LV" prepare_lvs_ lvcreate -an -Zn -l2 --type mirror -m1 --nosync -n $lv1 $vg "$dev1" "$dev2" "$dev3":$BLOCKS mimages_are_on_ $lv1 "$dev1" "$dev2" mirrorlog_is_on_ $lv1 "$dev3" aux disable_dev "$dev2" vgreduce --removemissing --force $vg lv_is_linear_ $lv1 lv_is_on_ $lv1 "$dev1" # "cleanup" recover_vg_ "$dev2" # --------------------------------------------------------------------- # LV has 3 images in flat, # 1 out of 3 images fails #COMM test_3way_mirror_fail_1_ test_3way_mirror_fail_1_() { local index=$1 lvcreate -an -Zn -l2 --type mirror -m2 --nosync -n $lv1 $vg "$dev1" "$dev2" "$dev3" "$dev4":$BLOCKS mimages_are_on_ $lv1 "$dev1" "$dev2" "$dev3" mirrorlog_is_on_ $lv1 "$dev4" eval aux disable_dev "\$dev$index" vgreduce --removemissing --force $vg list_pvs=(); while IFS= read -r line ; do list_pvs+=( "$line" ) done < <( rest_pvs_ "$index" 3 ) mimages_are_on_ "$lv1" "${list_pvs[@]}" mirrorlog_is_on_ $lv1 "$dev4" } for n in $(seq 1 3); do #COMM fail mirror image $(($n - 1)) of 3-way mirrored LV" prepare_lvs_ test_3way_mirror_fail_1_ $n eval recover_vg_ "\$dev$n" done # --------------------------------------------------------------------- # LV has 3 images in flat, # 2 out of 3 images fail #COMM test_3way_mirror_fail_2_ test_3way_mirror_fail_2_() { local index=$1 lvcreate -an -Zn -l2 --type mirror -m2 --nosync -n $lv1 $vg "$dev1" "$dev2" "$dev3" "$dev4":$BLOCKS mimages_are_on_ $lv1 "$dev1" "$dev2" "$dev3" mirrorlog_is_on_ $lv1 "$dev4" list_pvs=(); while IFS= read -r line ; do list_pvs+=( "$line" ) done < <( rest_pvs_ "$index" 3 ) aux disable_dev "${list_pvs[@]}" vgreduce --force --removemissing $vg lv_is_linear_ $lv1 eval lv_is_on_ $lv1 "\$dev$n" } for n in $(seq 1 3); do #COMM fail mirror images other than mirror image $(($n - 1)) of 3-way mirrored LV prepare_lvs_ test_3way_mirror_fail_2_ $n list_pvs=(); while IFS= read -r line ; do list_pvs+=( "$line" ) done < <( rest_pvs_ "$n" 3 ) recover_vg_ "${list_pvs[@]}" done # --------------------------------------------------------------------- # LV has 4 images, 1 of them is in the temporary mirror for syncing. # 1 out of 4 images fails #COMM test_3way_mirror_plus_1_fail_1_ test_3way_mirror_plus_1_fail_1_() { local index=$1 lvcreate -an -Zn -l2 --type mirror -m2 -n $lv1 $vg "$dev1" "$dev2" "$dev3" "$dev5":$BLOCKS lvconvert -m+1 $vg/$lv1 "$dev4" check mirror_images_on $vg $lv1 "$dev1" "$dev2" "$dev3" "$dev4" check mirror_log_on $vg $lv1 "$dev5" eval aux disable_dev \$dev$index vgreduce --removemissing --force $vg list_pvs=(); while IFS= read -r line ; do list_pvs+=( "$line" ) done < <( rest_pvs_ "$index" 4 ) check mirror_images_on $vg $lv1 "${list_pvs[@]}" check mirror_log_on $vg $lv1 "$dev5" } for n in $(seq 1 4); do #COMM "fail mirror image $(($n - 1)) of 4-way (1 converting) mirrored LV" prepare_lvs_ test_3way_mirror_plus_1_fail_1_ $n eval recover_vg_ \$dev$n done # --------------------------------------------------------------------- # LV has 4 images, 1 of them is in the temporary mirror for syncing. # 3 out of 4 images fail #COMM test_3way_mirror_plus_1_fail_3_ test_3way_mirror_plus_1_fail_3_() { local index=$1 local dev lvcreate -an -Zn -l2 --type mirror -m2 -n $lv1 $vg "$dev1" "$dev2" "$dev3" "$dev5":$BLOCKS lvconvert -m+1 $vg/$lv1 "$dev4" check mirror_images_on $vg $lv1 "$dev1" "$dev2" "$dev3" "$dev4" check mirror_log_on $vg $lv1 "$dev5" list_pvs=(); while IFS= read -r line ; do list_pvs+=( "$line" ) done < <( rest_pvs_ "$index" 4 ) aux disable_dev "${list_pvs[@]}" vgreduce --removemissing --force $vg lvs -a -o+devices $vg eval dev=\$dev$n check linear $vg $lv1 check lv_on $vg $lv1 "$dev" } for n in $(seq 1 4); do #COMM "fail mirror images other than mirror image $(($n - 1)) of 4-way (1 converting) mirrored LV" prepare_lvs_ test_3way_mirror_plus_1_fail_3_ $n list_pvs=(); while IFS= read -r line ; do list_pvs+=( "$line" ) done < <( rest_pvs_ "$n" 4 ) recover_vg_ "${list_pvs[@]}" done # --------------------------------------------------------------------- # LV has 4 images, 2 of them are in the temporary mirror for syncing. # 1 out of 4 images fail # test_2way_mirror_plus_2_fail_1_ test_2way_mirror_plus_2_fail_1_() { local index=$1 lvcreate -an -Zn -l2 --type mirror -m1 -n $lv1 $vg "$dev1" "$dev2" "$dev5":$BLOCKS lvconvert -m+2 $vg/$lv1 "$dev3" "$dev4" mimages_are_on_ $lv1 "$dev1" "$dev2" "$dev3" "$dev4" mirrorlog_is_on_ $lv1 "$dev5" eval aux disable_dev \$dev$n vgreduce --removemissing --force $vg list_pvs=(); while IFS= read -r line ; do list_pvs+=( "$line" ) done < <( rest_pvs_ "$index" 4 ) mimages_are_on_ "$lv1" "${list_pvs[@]}" mirrorlog_is_on_ $lv1 "$dev5" } for n in $(seq 1 4); do #COMM "fail mirror image $(($n - 1)) of 4-way (2 converting) mirrored LV" prepare_lvs_ test_2way_mirror_plus_2_fail_1_ $n eval recover_vg_ "\$dev$n" done # --------------------------------------------------------------------- # LV has 4 images, 2 of them are in the temporary mirror for syncing. # 3 out of 4 images fail # test_2way_mirror_plus_2_fail_3_ test_2way_mirror_plus_2_fail_3_() { local index=$1 local dev lvcreate -an -Zn -l2 --type mirror -m1 -n $lv1 $vg "$dev1" "$dev2" "$dev5":$BLOCKS lvconvert -m+2 $vg/$lv1 "$dev3" "$dev4" mimages_are_on_ $lv1 "$dev1" "$dev2" "$dev3" "$dev4" mirrorlog_is_on_ $lv1 "$dev5" list_pvs=(); while IFS= read -r line ; do list_pvs+=( "$line" ) done < <( rest_pvs_ "$index" 4 ) aux disable_dev "${list_pvs[@]}" vgreduce --removemissing --force $vg lvs -a -o+devices $vg eval dev=\$dev$n not mimages_are_on_ $lv1 "$dev" lv_is_on_ $lv1 "$dev" not mirrorlog_is_on_ $lv1 "$dev5" } for n in $(seq 1 4); do #COMM "fail mirror images other than mirror image $(($n - 1)) of 4-way (2 converting) mirrored LV" prepare_lvs_ test_2way_mirror_plus_2_fail_3_ $n list_pvs=(); while IFS= read -r line ; do list_pvs+=( "$line" ) done < <( rest_pvs_ "$n" 4 ) recover_vg_ "${list_pvs[@]}" done # --------------------------------------------------------------------- # log device is gone (flat mirror and stacked mirror) #COMM "fail mirror log of 2-way mirrored LV" prepare_lvs_ lvcreate -aey -l2 --type mirror -m1 -n $lv1 $vg "$dev1" "$dev2" "$dev5":$BLOCKS mimages_are_on_ $lv1 "$dev1" "$dev2" mirrorlog_is_on_ $lv1 "$dev5" aux disable_dev "$dev5" vgreduce --removemissing --force $vg mimages_are_on_ $lv1 "$dev1" "$dev2" not mirrorlog_is_on_ $lv1 "$dev5" recover_vg_ "$dev5" #COMM "fail mirror log of 3-way (1 converting) mirrored LV" prepare_lvs_ lvcreate -aey -l2 --type mirror -m1 -n $lv1 $vg "$dev1" "$dev2" "$dev5":$BLOCKS lvconvert -m+1 $vg/$lv1 "$dev3" mimages_are_on_ $lv1 "$dev1" "$dev2" "$dev3" mirrorlog_is_on_ $lv1 "$dev5" aux disable_dev "$dev5" vgreduce --removemissing --force $vg mimages_are_on_ $lv1 "$dev1" "$dev2" "$dev3" not mirrorlog_is_on_ $lv1 "$dev5" recover_vg_ "$dev5" # --------------------------------------------------------------------- # all images are gone (flat mirror and stacked mirror) #COMM "fail all mirror images of 2-way mirrored LV" prepare_lvs_ lvcreate -an -Zn -l2 --type mirror -m1 --nosync -n $lv1 $vg "$dev1" "$dev2" "$dev5":$BLOCKS mimages_are_on_ $lv1 "$dev1" "$dev2" mirrorlog_is_on_ $lv1 "$dev5" aux disable_dev "$dev1" "$dev2" vgreduce --removemissing --force $vg not lvs $vg/$lv1 recover_vg_ "$dev1" "$dev2" #COMM "fail all mirror images of 3-way (1 converting) mirrored LV" prepare_lvs_ lvcreate -an -Zn -l2 --type mirror -m1 -n $lv1 $vg "$dev1" "$dev2" "$dev5":$BLOCKS lvconvert -m+1 $vg/$lv1 "$dev3" mimages_are_on_ $lv1 "$dev1" "$dev2" "$dev3" mirrorlog_is_on_ $lv1 "$dev5" aux disable_dev "$dev1" "$dev2" "$dev3" vgreduce --removemissing --force $vg not lvs $vg/$lv1 recover_vg_ "$dev1" "$dev2" "$dev3" # --------------------------------------------------------------------- # Multiple LVs #COMM "fail a mirror image of one of mirrored LV" prepare_lvs_ lvcreate -an -Zn -l2 --type mirror -m1 --nosync -n $lv1 $vg "$dev1" "$dev2" "$dev5":$BLOCKS lvcreate -an -Zn -l2 --type mirror -m1 --nosync -n $lv2 $vg "$dev3" "$dev4" "$dev5":$BLOCKS1 mimages_are_on_ $lv1 "$dev1" "$dev2" mimages_are_on_ $lv2 "$dev3" "$dev4" mirrorlog_is_on_ $lv1 "$dev5" mirrorlog_is_on_ $lv2 "$dev5" aux disable_dev "$dev2" vgreduce --removemissing --force $vg mimages_are_on_ $lv2 "$dev3" "$dev4" mirrorlog_is_on_ $lv2 "$dev5" lv_is_linear_ $lv1 lv_is_on_ $lv1 "$dev1" recover_vg_ "$dev2" #COMM "fail mirror images, one for each mirrored LV" prepare_lvs_ lvcreate -an -Zn -l2 --type mirror -m1 --nosync -n $lv1 $vg "$dev1" "$dev2" "$dev5":$BLOCKS lvcreate -an -Zn -l2 --type mirror -m1 --nosync -n $lv2 $vg "$dev3" "$dev4" "$dev5":$BLOCKS1 mimages_are_on_ $lv1 "$dev1" "$dev2" mimages_are_on_ $lv2 "$dev3" "$dev4" mirrorlog_is_on_ $lv1 "$dev5" mirrorlog_is_on_ $lv2 "$dev5" aux disable_dev "$dev2" aux disable_dev "$dev4" vgreduce --removemissing --force $vg lv_is_linear_ $lv1 lv_is_on_ $lv1 "$dev1" lv_is_linear_ $lv2 lv_is_on_ $lv2 "$dev3" recover_vg_ "$dev2" "$dev4" # --------------------------------------------------------------------- # no failure #COMM "no failures" prepare_lvs_ lvcreate -an -Zn -l2 --type mirror -m1 --nosync -n $lv1 $vg "$dev1" "$dev2" "$dev5":$BLOCKS mimages_are_on_ $lv1 "$dev1" "$dev2" mirrorlog_is_on_ $lv1 "$dev5" vgreduce --removemissing --force $vg mimages_are_on_ $lv1 "$dev1" "$dev2" mirrorlog_is_on_ $lv1 "$dev5" check_and_cleanup_lvs_ # --------------------------------------------------------------------- LVM2.2.02.176/test/shell/pvchange-usage.sh0000644000000000000120000000640213176752421016645 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # 'Test pvchange option values' SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest check_changed_uuid_() { test "$1" != "$(get pv_field "$2" uuid)" || die "UUID has not changed!" } aux prepare_pvs 4 # check 'allocatable' pv attribute pvcreate "$dev1" check pv_field "$dev1" pv_attr --- vgcreate $vg1 "$dev1" check pv_field "$dev1" pv_attr a-- pvchange --allocatable n "$dev1" check pv_field "$dev1" pv_attr u-- vgremove -ff $vg1 not pvchange --allocatable y "$dev1" pvremove -ff "$dev1" for mda in 0 1 2 do # "setup pv with metadatacopies = $mda" pvcreate --metadatacopies $mda "$dev1" # cannot change allocatability for orphan PVs fail pvchange "$dev1" -x y fail pvchange "$dev1" -x n vgcreate $vg1 "$dev4" "$dev1" # "pvchange adds/dels tag to pvs with metadatacopies = $mda " pvchange "$dev1" --addtag test$mda check pv_field "$dev1" pv_tags test$mda pvchange "$dev1" --deltag test$mda check pv_field "$dev1" pv_tags "" # "vgchange disable/enable allocation for pvs with metadatacopies = $mda (bz452982)" pvchange "$dev1" -x n pvchange "$dev1" -x n # already disabled check pv_field "$dev1" pv_attr u-- pvchange "$dev1" -x y pvchange "$dev1" -x y # already enabled check pv_field "$dev1" pv_attr a-- # check we are able to change number of managed metadata areas if test $mda -gt 0 ; then pvchange --force --metadataignore y "$dev1" else # already ignored fail pvchange --metadataignore y "$dev1" fi # 'remove pv' vgremove $vg1 pvremove "$dev1" done # "pvchange uuid" pvcreate --metadatacopies 0 "$dev1" pvcreate --metadatacopies 2 "$dev2" vgcreate $vg1 "$dev1" "$dev2" # Checking for different UUID after pvchange UUID1=$(get pv_field "$dev1" uuid) pvchange -u "$dev1" check_changed_uuid_ "$UUID1" "$dev1" UUID2=$(get pv_field "$dev2" uuid) pvchange -u "$dev2" check_changed_uuid_ "$UUID2" "$dev2" UUID1=$(get pv_field "$dev1" uuid) UUID2=$(get pv_field "$dev2" uuid) pvchange -u --all check_changed_uuid_ "$UUID1" "$dev1" check_changed_uuid_ "$UUID2" "$dev2" check pvlv_counts $vg1 2 0 0 # some args are needed invalid pvchange # some PV needed invalid pvchange --addtag tag invalid pvchange --deltag tag # some --all & PV can go together invalid pvchange -a "$dev1" --addtag tag # '-a' needs more params invalid pvchange -a # '-a' is searching for devs, so specifying device is invalid invalid pvchange -a "$dev1" fail pvchange -u "$dev1-notfound" # pvchange rejects uuid change under an active lv lvcreate -l 16 -i 2 -n $lv --alloc anywhere $vg1 check pvlv_counts $vg1 2 1 0 not pvchange -u "$dev1" vgremove -f $vg1 # cannot change PV tag to PV that is not in VG" fail pvchange "$dev1" --addtag test fail pvchange "$dev1" --deltag test if test -n "$LVM_TEST_LVM1" ; then # cannot add PV tag to lvm1 format pvcreate -M1 "$dev1" vgcreate -M1 $vg1 "$dev1" fail pvchange "$dev1" --addtag test fi LVM2.2.02.176/test/shell/lvcreate-large-raid.sh0000644000000000000120000000617113176752421017565 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2012,2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # 'Exercise some lvcreate diagnostics' SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest # FIXME update test to make something useful on <16T aux can_use_16T || skip aux have_raid 1 3 0 || skip segtypes="raid5" aux have_raid4 && segtypes="raid4 raid5" # Prepare 5x ~1P sized devices aux prepare_pvs 5 1000000000 get_devs vgcreate "$vg1" "${DEVICES[@]}" aux lvmconf 'devices/issue_discards = 1' # Delay PVs so that resynchronization doesn't fill too much space for device in "${DEVICES[@]}" do aux delay_dev "$device" 0 10 "$(get first_extent_sector "$device")" done # bz837927 START # # Create large RAID LVs # # 200 TiB raid1 lvcreate --type raid1 -m 1 -L 200T -n $lv1 $vg1 --nosync check lv_field $vg1/$lv1 size "200.00t" check raid_leg_status $vg1 $lv1 "AA" lvremove -ff $vg1 # 1 PiB raid1 lvcreate --type raid1 -m 1 -L 1P -n $lv1 $vg1 --nosync check lv_field $vg1/$lv1 size "1.00p" check raid_leg_status $vg1 $lv1 "AA" lvremove -ff $vg1 # 750 TiB raid4/5 for segtype in $segtypes; do lvcreate --type $segtype -i 3 -L 750T -n $lv1 $vg1 --nosync check lv_field $vg1/$lv1 size "750.00t" check raid_leg_status $vg1 $lv1 "AAAA" lvremove -ff $vg1 done # # Extending large 200 TiB RAID LV to 400 TiB (belong in different script?) # lvcreate --type raid1 -m 1 -L 200T -n $lv1 $vg1 --nosync check lv_field $vg1/$lv1 size "200.00t" check raid_leg_status $vg1 $lv1 "AA" lvextend -L +200T $vg1/$lv1 check lv_field $vg1/$lv1 size "400.00t" check raid_leg_status $vg1 $lv1 "AA" lvremove -ff $vg1 # Check --nosync is rejected for raid6 if aux have_raid 1 9 0 ; then not lvcreate --type raid6 -i 3 -L 750T -n $lv1 $vg1 --nosync fi # 750 TiB raid6 lvcreate --type raid6 -i 3 -L 750T -n $lv1 $vg1 check lv_field $vg1/$lv1 size "750.00t" check raid_leg_status $vg1 $lv1 "aaaaa" lvremove -ff $vg1 # 1 PiB raid6, then extend up to 2 PiB lvcreate --type raid6 -i 3 -L 1P -n $lv1 $vg1 check lv_field $vg1/$lv1 size "1.00p" check raid_leg_status $vg1 $lv1 "aaaaa" lvextend -L +1P $vg1/$lv1 check lv_field $vg1/$lv1 size "2.00p" check raid_leg_status $vg1 $lv1 "aaaaa" lvremove -ff $vg1 # # Convert large 200 TiB linear to RAID1 (belong in different test script?) # lvcreate -aey -L 200T -n $lv1 $vg1 lvconvert -y --type raid1 -m 1 $vg1/$lv1 check lv_field $vg1/$lv1 size "200.00t" if aux have_raid 1 9 0; then # The 1.9.0 version of dm-raid is capable of performing # linear -> RAID1 upconverts as "recover" not "resync" # The LVM code now checks the dm-raid version when # upconverting and if 1.9.0+ is found, it uses "recover" check raid_leg_status $vg1 $lv1 "Aa" else check raid_leg_status $vg1 $lv1 "aa" fi lvremove -ff $vg1 # bz837927 END vgremove -ff $vg1 LVM2.2.02.176/test/shell/lvconvert-repair-mirror.sh0000644000000000000120000000404113176752421020557 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest MOUNT_DIR=mnt MKFS=$(which mkfs.ext3) || skip cleanup_mounted_and_teardown() { umount "$MOUNT_DIR" || true aux teardown } aux lvmconf 'allocation/mirror_logs_require_separate_pvs = 1' aux prepare_vg 5 ################### Check lost mirror leg ################# # # NOTE: using --regionsize 1M has major impact on my box # on read performance while mirror is synchronized # with the default 512K - my C2D T61 reads just couple MB/s! # lvcreate -aey --type mirror -L10 --regionsize 1M -m1 -n $lv1 $vg "$dev1" "$dev2" "$dev3" "$MKFS" "$DM_DEV_DIR/$vg/$lv1" mkdir "$MOUNT_DIR" aux delay_dev "$dev2" 0 500 "$(get first_extent_sector "$dev2"):" aux delay_dev "$dev4" 0 500 "$(get first_extent_sector "$dev4"):" # # Enforce syncronization # ATM requires unmounted/unused LV?? # lvchange --yes --resync $vg/$lv1 trap 'cleanup_mounted_and_teardown' EXIT mount "$DM_DEV_DIR/$vg/$lv1" "$MOUNT_DIR" # run 'dd' operation during failure of 'mlog/mimage' device dd if=/dev/zero of=mnt/zero bs=4K count=100 conv=fdatasync 2>err & PERCENT=$(get lv_field $vg/$lv1 copy_percent) PERCENT=${PERCENT%%\.*} # cut decimal # and check less then 50% mirror is in sync (could be unusable delay_dev ?) test "$PERCENT" -lt 50 || skip #lvs -a -o+devices $vg #aux disable_dev "$dev3" aux disable_dev "$dev2" lvconvert --yes --repair $vg/$lv1 lvs -a $vg aux enable_dev "$dev2" wait # dd MAY NOT HAVE produced any error message not grep error err lvs -a -o+devices $vg umount "$MOUNT_DIR" fsck -n "$DM_DEV_DIR/$vg/$lv1" aux enable_dev "$dev4" lvremove -ff $vg LVM2.2.02.176/test/shell/thin-restore.sh0000644000000000000120000000165013176752421016373 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2012 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # test restore operation of thin pool metadata SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 export LVM_TEST_THIN_REPAIR_CMD=${LVM_TEST_THIN_REPAIR_CMD-/bin/false} . lib/inittest # # Main # aux have_thin 1 0 0 || skip aux prepare_vg 2 lvcreate -T -L8M $vg/pool -V10M -n $lv1 vgcfgbackup -f backup $vg # use of --force is mandatory not vgcfgrestore -f backup $vg vgcfgrestore -f backup --force $vg check lv_field $vg/pool transaction_id 1 vgremove -f $vg LVM2.2.02.176/test/shell/lvcreate-raid10.sh0000644000000000000120000000557413176752421016644 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2012-2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest lv_devices() { test "$3" -eq "$(get lv_devices "$1/$2" | wc -w)" } ######################################################## # MAIN ######################################################## aux have_raid 1 3 0 || skip aux prepare_vg 6 20 # 6 devices for RAID10 (2-mirror,3-stripe) test # # Create RAID10: # # Should not allow more than 2-way mirror not lvcreate --type raid10 -m 2 -i 2 -l 2 -n $lv1 $vg # 2-way mirror, 2-stripes lvcreate --type raid10 -m 1 -i 2 -l 2 -n $lv1 $vg aux wait_for_sync $vg $lv1 lvremove -ff $vg/$lv1 # 2-way mirror, 2-stripes - Set min/max recovery rate lvcreate --type raid10 -m 1 -i 2 -l 2 \ --minrecoveryrate 50 --maxrecoveryrate 1M \ -n $lv1 $vg check lv_field $vg/$lv1 raid_min_recovery_rate 50 check lv_field $vg/$lv1 raid_max_recovery_rate 1024 aux wait_for_sync $vg $lv1 # 2-way mirror, 3-stripes lvcreate --type raid10 -m 1 -i 3 -l 3 -n $lv2 $vg aux wait_for_sync $vg $lv2 lvremove -ff $vg # Test 100%FREE option # 38 extents / device # 1 image = 37 extents (1 for meta) # 3 images = 111 extents = 55.50m lvcreate --type raid10 -i 3 -l 100%FREE -an -Zn -n raid10 $vg check lv_field $vg/raid10 size "55.50m" lvremove -ff $vg # Create RAID (implicit stripe count based on PV count) ####################################################### # Not enough drives not lvcreate --type raid10 -l2 $vg "$dev1" "$dev2" "$dev3" # Implicit count comes from #PVs given (always 2-way mirror) # Defaults -i2, which works with 4 PVs listed lvcreate --type raid10 -l2 -an -Zn -n raid10 $vg "$dev1" "$dev2" "$dev3" "$dev4" lv_devices $vg raid10 4 # Defaults -i2 even though more PVs listed lvcreate --type raid10 -l2 -an -Zn -n raid10_6 $vg "$dev1" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6" lv_devices $vg raid10_6 4 lvremove -ff $vg # # FIXME: Add tests that specify particular PVs to use for creation # ######################################################## # Try again with backward compatible old logic applied # ######################################################## aux lvmconf 'allocation/raid_stripe_all_devices = 1' # Implicit count comes from #PVs given (always 2-way mirror) lvcreate --type raid10 -l2 -an -Zn -n raid10 $vg "$dev1" "$dev2" "$dev3" "$dev4" lv_devices $vg raid10 4 # Implicit count comes from total #PVs in VG (always 2 for mirror though) lvcreate --type raid10 -l2 -an -Zn -n raid10_vg $vg lv_devices $vg raid10_vg 6 vgremove -ff $vg LVM2.2.02.176/test/shell/thin-merge.sh0000644000000000000120000000552013176752421016007 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2013 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # test merge of thin snapshot SKIP_WITH_LVMLOCKD=1 export LVM_TEST_THIN_REPAIR_CMD=${LVM_TEST_THIN_REPAIR_CMD-/bin/false} . lib/inittest MKFS=mkfs.ext2 which $MKFS || skip which fsck || skip # # Main # aux have_thin 1 0 0 || skip aux prepare_vg 2 lvcreate -T -L8M $vg/pool -V10M -n $lv1 lvchange --addtag tagL $vg/$lv1 mkdir mnt $MKFS "$DM_DEV_DIR/$vg/$lv1" mount "$DM_DEV_DIR/$vg/$lv1" mnt touch mnt/test lvcreate -K -s -n snap --addtag tagS $vg/$lv1 mkdir mntsnap $MKFS "$DM_DEV_DIR/$vg/snap" mount "$DM_DEV_DIR/$vg/snap" mntsnap touch mntsnap/test_snap lvs -o+tags,thin_id $vg lvconvert --merge $vg/snap &>out grep "Merging of thin snapshot $vg/snap will occur on next activation of $vg/${lv1}." out umount mnt # Merge cannot happen lvchange --refresh $vg/$lv1 check lv_field $vg/$lv1 thin_id "1" # Fails since it cannot deactivate both not lvchange -an $vg/$lv1 # But test $lv1 is not active check inactive $vg $lv1 # Also still cannot reactivate $lv1 not lvchange -ay $vg/$lv1 umount mntsnap lvdisplay -a $vg | tee out grep "merged with" out grep "merging to" out # Check there is no support for manipulation with hidden 'snap' not lvchange --refresh $vg/snap not lvchange -an $vg/snap not lvremove $vg/snap # Finally deactivate 'snap' again via $lv1 lvchange -an $vg/$lv1 # Still must not be activable not lvchange -K -ay $vg/snap lvs -a -o +tags,thin_id $vg # Test if merge happens lvchange -ay $vg/$lv1 check lv_exists $vg $lv1 check lv_field $vg/$lv1 thin_id "2" check lv_field $vg/$lv1 tags "tagL" check lv_not_exists $vg snap fsck -n "$DM_DEV_DIR/$vg/$lv1" mount "$DM_DEV_DIR/$vg/$lv1" mnt test -e mnt/test_snap umount mnt # test if thin snapshot has also 'old-snapshot' lvcreate -s -n snap $vg/$lv1 # Also add old snapshot to thin origin lvcreate -s -L10 -n oldsnapof_${lv1} $vg/$lv1 not lvconvert --merge $vg/snap $MKFS "$DM_DEV_DIR/$vg/oldsnapof_${lv1}" lvconvert --merge $vg/oldsnapof_${lv1} fsck -n "$DM_DEV_DIR/$vg/$lv1" check lv_not_exists $vg oldsnapof_${lv1} # Add old snapshot to thin snapshot lvcreate -s -L10 -n oldsnapof_snap $vg/snap lvconvert --merge $vg/snap lvremove -f $vg/oldsnapof_snap check lv_field $vg/$lv1 thin_id "3" # Check --mergethin lvcreate -s -n snap $vg/$lv1 check lv_field $vg/snap thin_id "4" lvconvert --mergethin $vg/snap &>out grep "Volume $vg/snap replaced origin $vg/${lv1}." out check lv_field $vg/$lv1 thin_id "4" vgremove -ff $vg LVM2.2.02.176/test/shell/vgsplit-operation.sh0000644000000000000120000002414613176752421017443 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2007 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Test vgsplit operation, including different LV types # disable lvmetad logging as it bogs down test systems SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 export LVM_TEST_LVMETAD_DEBUG_OPTS=${LVM_TEST_LVMETAD_DEBUG_OPTS-} . lib/inittest COMM() { LAST_TEST="$*" } create_vg_() { vgcreate -s 64k "$@" } aux prepare_pvs 5 10 # FIXME: paramaterize lvm1 vs lvm2 metadata; most of these tests should run # fine with lvm1 metadata as well; for now, just add disks 5 and 6 as lvm1 # metadata # # vgsplit can be done into a new or existing VG # for i in new existing do # # We can have PVs or LVs on the cmdline # for j in PV LV do COMM "vgsplit correctly splits single linear LV into $i VG ($j args)" create_vg_ $vg1 "$dev1" "$dev2" test $i = existing && create_vg_ $vg2 "$dev3" "$dev4" lvcreate -an -Zn -l 4 -n $lv1 $vg1 "$dev1" if [ $j = PV ]; then vgsplit $vg1 $vg2 "$dev1" else vgsplit -n $lv1 $vg1 $vg2 fi check pvlv_counts $vg1 1 0 0 if [ $i = existing ]; then check pvlv_counts $vg2 3 1 0 else check pvlv_counts $vg2 1 1 0 fi lvremove -f $vg2/$lv1 vgremove -f $vg2 $vg1 COMM "vgsplit correctly splits single striped LV into $i VG ($j args)" create_vg_ $vg1 "$dev1" "$dev2" test $i = existing && create_vg_ $vg2 "$dev3" "$dev4" lvcreate -an -Zn -l 4 -i 2 -n $lv1 $vg1 "$dev1" "$dev2" if [ $j = PV ]; then vgsplit $vg1 $vg2 "$dev1" "$dev2" else vgsplit -n $lv1 $vg1 $vg2 fi if [ $i = existing ]; then check pvlv_counts $vg2 4 1 0 else check pvlv_counts $vg2 2 1 0 fi lvremove -f $vg2/$lv1 vgremove -f $vg2 COMM "vgsplit correctly splits mirror LV into $i VG ($j args)" create_vg_ $vg1 "$dev1" "$dev2" "$dev3" test $i = existing && create_vg_ $vg2 "$dev4" lvcreate -an -Zn -l 64 --type mirror -m1 -n $lv1 $vg1 "$dev1" "$dev2" "$dev3" if [ $j = PV ]; then # FIXME: Not an exhaustive check of possible bad combinations not vgsplit $vg1 $vg2 "$dev1" "$dev2" not vgsplit $vg1 $vg2 "$dev1" "$dev3" not vgsplit $vg1 $vg2 "$dev2" "$dev3" vgsplit $vg1 $vg2 "$dev1" "$dev2" "$dev3" else vgsplit -n $lv1 $vg1 $vg2 fi if [ $i = existing ]; then check pvlv_counts $vg2 4 1 0 else check pvlv_counts $vg2 3 1 0 fi lvremove -f $vg2/$lv1 vgremove -f $vg2 # RHBZ 875903 COMM "vgsplit correctly splits mirror (log+leg on same dev) into $i VG ($j args)" create_vg_ $vg1 "$dev1" "$dev2" "$dev3" test $i = existing && create_vg_ $vg2 "$dev4" lvcreate -an -Zn -l 64 --type mirror -m1 -n $lv1 $vg1 "$dev1" "$dev2" if [ $j = PV ]; then not vgsplit $vg1 $vg2 "$dev1" not vgsplit $vg1 $vg2 "$dev2" vgsplit $vg1 $vg2 "$dev1" "$dev2" else vgsplit -n $lv1 $vg1 $vg2 fi if [ $i = existing ]; then check pvlv_counts $vg2 3 1 0 else check pvlv_counts $vg2 2 1 0 fi lvremove -f $vg2/$lv1 vgremove -f $vg1 $vg2 # Can't use mirrored log without cmirrord # TODO: Should work for inactive device, needs some fixes.... if test ! -e LOCAL_CLVMD ; then COMM "vgsplit correctly splits mirror LV with mirrored log into $i VG ($j args)" create_vg_ $vg1 "$dev1" "$dev2" "$dev3" "$dev4" test $i = existing && create_vg_ $vg2 "$dev5" lvcreate -an -Zn -l 64 --mirrorlog mirrored --type mirror -m1 -n $lv1 $vg1 \ "$dev1" "$dev2" "$dev3" "$dev4" if [ $j = PV ]; then # FIXME: Not an exhaustive check of possible bad combinations not vgsplit $vg1 $vg2 "$dev1" "$dev2" not vgsplit $vg1 $vg2 "$dev3" "$dev4" not vgsplit $vg1 $vg2 "$dev1" "$dev3" not vgsplit $vg1 $vg2 "$dev2" "$dev4" vgsplit $vg1 $vg2 "$dev1" "$dev2" "$dev3" "$dev4" else vgsplit -n $lv1 $vg1 $vg2 fi if [ $i = existing ]; then check pvlv_counts $vg2 5 1 0 else check pvlv_counts $vg2 4 1 0 fi lvremove -f $vg2/$lv1 vgremove -f $vg2 # RHBZ 875903 COMM "vgsplit correctly splits mirror LV with mirrored log on same devs into $i VG ($j args)" create_vg_ $vg1 "$dev1" "$dev2" "$dev3" "$dev4" test $i = existing && create_vg_ $vg2 "$dev5" lvcreate -an -Zn -l 64 --mirrorlog mirrored --type mirror -m1 -n $lv1 $vg1 \ "$dev1" "$dev2" if [ $j = PV ]; then not vgsplit $vg1 $vg2 "$dev1" not vgsplit $vg1 $vg2 "$dev2" vgsplit $vg1 $vg2 "$dev1" "$dev2" else vgsplit -n $lv1 $vg1 $vg2 fi if [ $i = existing ]; then check pvlv_counts $vg2 3 1 0 else check pvlv_counts $vg2 2 1 0 fi lvremove -f $vg2/$lv1 vgremove -f $vg1 $vg2 fi COMM "vgsplit correctly splits origin and snapshot LV into $i VG ($j args)" create_vg_ $vg1 "$dev1" "$dev2" test $i = existing && create_vg_ $vg2 "$dev3" "$dev4" lvcreate -aey -l 64 -i 2 -n $lv1 $vg1 "$dev1" "$dev2" lvcreate -l 4 -i 2 -s -n $lv2 $vg1/$lv1 vgchange -an $vg1 if [ $j = PV ]; then vgsplit $vg1 $vg2 "$dev1" "$dev2" else vgsplit -n $lv1 $vg1 $vg2 fi if [ $i = existing ]; then check pvlv_counts $vg2 4 2 1 else check pvlv_counts $vg2 2 2 1 fi lvremove -f $vg2/$lv2 lvremove -f $vg2/$lv1 vgremove -f $vg2 COMM "vgsplit correctly splits linear LV but not snap+origin LV into $i VG ($j args)" create_vg_ $vg1 "$dev1" "$dev2" test $i = existing && create_vg_ $vg2 "$dev3" lvcreate -aey -l 64 -i 2 -n $lv1 $vg1 lvcreate -l 4 -i 2 -s -n $lv2 $vg1/$lv1 vgextend $vg1 "$dev4" lvcreate -l 64 -n $lv3 $vg1 "$dev4" vgchange -an $vg1 if [ $j = PV ]; then vgsplit $vg1 $vg2 "$dev4" else vgsplit -n $lv3 $vg1 $vg2 fi if [ $i = existing ]; then check pvlv_counts $vg2 2 1 0 check pvlv_counts $vg1 2 2 1 else check pvlv_counts $vg2 1 1 0 check pvlv_counts $vg1 2 2 1 fi lvremove -f $vg1/$lv2 lvremove -f $vg1/$lv1 $vg2/$lv3 vgremove -f $vg1 $vg2 COMM "vgsplit correctly splits linear LV but not mirror LV into $i VG ($j args)" create_vg_ $vg1 "$dev1" "$dev2" "$dev3" test $i = existing && create_vg_ $vg2 "$dev5" lvcreate -an -Zn -l 64 --type mirror -m1 -n $lv1 $vg1 "$dev1" "$dev2" "$dev3" vgextend $vg1 "$dev4" lvcreate -an -Zn -l 64 -n $lv2 $vg1 "$dev4" if [ $j = PV ]; then vgsplit $vg1 $vg2 "$dev4" else vgsplit -n $lv2 $vg1 $vg2 fi if [ $i = existing ]; then check pvlv_counts $vg1 3 1 0 check pvlv_counts $vg2 2 1 0 else check pvlv_counts $vg1 3 1 0 check pvlv_counts $vg2 1 1 0 fi vgremove -f $vg1 $vg2 done done # # Test more complex setups where the code has to find associated PVs and # LVs to split the VG correctly # COMM "vgsplit fails splitting 3 striped LVs into VG when only 1 LV specified" create_vg_ $vg1 "$dev1" "$dev2" "$dev3" "$dev4" lvcreate -an -Zn -l 4 -n $lv1 -i 2 $vg1 "$dev1" "$dev2" lvcreate -an -Zn -l 4 -n $lv2 -i 2 $vg1 "$dev2" "$dev3" lvcreate -an -Zn -l 4 -n $lv3 -i 2 $vg1 "$dev3" "$dev4" not vgsplit -n $lv1 $vg1 $vg2 vgremove -f $vg1 COMM "vgsplit fails splitting one LV with 2 snapshots, only origin LV specified" create_vg_ $vg1 "$dev1" "$dev2" "$dev3" "$dev4" lvcreate -aey -l 16 -n $lv1 $vg1 "$dev1" "$dev2" lvcreate -l 4 -n $lv2 -s $vg1/$lv1 "$dev3" lvcreate -l 4 -n $lv3 -s $vg1/$lv1 "$dev4" check pvlv_counts $vg1 4 3 2 vgchange -an $vg1 not vgsplit -n $lv1 $vg1 $vg2; lvremove -f $vg1/$lv2 $vg1/$lv3 lvremove -f $vg1/$lv1 vgremove -f $vg1 COMM "vgsplit fails splitting one LV with 2 snapshots, only snapshot LV specified" create_vg_ $vg1 "$dev1" "$dev2" "$dev3" "$dev4" lvcreate -aey -l 16 -n $lv1 $vg1 "$dev1" "$dev2" lvcreate -l 4 -n $lv2 -s $vg1/$lv1 "$dev3" lvcreate -l 4 -n $lv3 -s $vg1/$lv1 "$dev4" check pvlv_counts $vg1 4 3 2 vgchange -an $vg1 not vgsplit -n $lv2 $vg1 $vg2 lvremove -f $vg1/$lv2 $vg1/$lv3 lvremove -f $vg1/$lv1 vgremove -f $vg1 COMM "vgsplit fails splitting one mirror LV, only one PV specified" create_vg_ $vg1 "$dev1" "$dev2" "$dev3" "$dev4" lvcreate -an -Zn -l 16 -n $lv1 --type mirror -m1 $vg1 "$dev1" "$dev2" "$dev3" check pvlv_counts $vg1 4 1 0 not vgsplit $vg1 $vg2 "$dev2" vgremove -ff $vg1 COMM "vgsplit fails splitting 1 mirror + 1 striped LV, only striped LV specified" create_vg_ $vg1 "$dev1" "$dev2" "$dev3" "$dev4" lvcreate -an -Zn -l 16 -n $lv1 --type mirror --nosync -m1 $vg1 "$dev1" "$dev2" "$dev3" lvcreate -an -Zn -l 16 -n $lv2 -i 2 $vg1 "$dev3" "$dev4" check pvlv_counts $vg1 4 2 0 not vgsplit -n $lv2 $vg1 $vg2 2>err vgremove -f $vg1 # # Verify vgsplit rejects active LVs only when active LVs involved in split # COMM "vgsplit fails, active mirror involved in split" create_vg_ $vg1 "$dev1" "$dev2" "$dev3" "$dev4" lvcreate -aey -l 16 -n $lv1 --type mirror --nosync -m1 $vg1 "$dev1" "$dev2" "$dev3" lvcreate -l 16 -n $lv2 $vg1 "$dev4" lvchange -an $vg1/$lv2 check pvlv_counts $vg1 4 2 0 not vgsplit -n $lv1 $vg1 $vg2; check pvlv_counts $vg1 4 2 0 vgremove -f $vg1 COMM "vgsplit succeeds, active mirror not involved in split" create_vg_ $vg1 "$dev1" "$dev2" "$dev3" "$dev4" lvcreate -aey -l 16 -n $lv1 --type mirror --nosync -m1 $vg1 "$dev1" "$dev2" "$dev3" lvcreate -l 16 -n $lv2 $vg1 "$dev4" lvchange -an $vg1/$lv2 check pvlv_counts $vg1 4 2 0 vgsplit -n $lv2 $vg1 $vg2 check pvlv_counts $vg1 3 1 0 check pvlv_counts $vg2 1 1 0 vgremove -f $vg1 $vg2 COMM "vgsplit fails, active snapshot involved in split" create_vg_ $vg1 "$dev1" "$dev2" "$dev3" "$dev4" lvcreate -aey -l 64 -i 2 -n $lv1 $vg1 "$dev1" "$dev2" lvcreate -l 4 -i 2 -s -n $lv2 $vg1/$lv1 lvcreate -an -Zn -l 64 -i 2 -n $lv3 $vg1 "$dev3" "$dev4" check pvlv_counts $vg1 4 3 1 not vgsplit -n $lv2 $vg1 $vg2; check pvlv_counts $vg1 4 3 1 lvremove -f $vg1/$lv2 vgremove -f $vg1 COMM "vgsplit succeeds, active snapshot not involved in split" create_vg_ $vg1 "$dev1" "$dev2" "$dev3" lvcreate -aey -l 64 -i 2 -n $lv1 $vg1 "$dev1" "$dev2" lvcreate -l 4 -s -n $lv2 $vg1/$lv1 vgextend $vg1 "$dev4" lvcreate -an -Zn -l 64 -n $lv3 $vg1 "$dev4" check pvlv_counts $vg1 4 3 1 vgsplit -n $lv3 $vg1 $vg2 check pvlv_counts $vg1 3 2 1 check pvlv_counts $vg2 1 1 0 vgchange -an $vg1 lvremove -f $vg1/$lv2 vgremove -f $vg1 $vg2 LVM2.2.02.176/test/shell/vgck.sh0000644000000000000120000000200513176752421014675 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2013 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_vg 3 lvcreate -n blabla -L 1 $vg dd if=/dev/urandom bs=512 seek=2 count=32 of="$dev2" # TODO: aux lvmconf "global/locking_type = 4" vgscan 2>&1 | tee vgscan.out if test -e LOCAL_LVMETAD; then not grep "Inconsistent metadata found for VG $vg" vgscan.out else grep "Inconsistent metadata found for VG $vg" vgscan.out fi dd if=/dev/urandom bs=512 seek=2 count=32 of="$dev2" aux notify_lvmetad "$dev2" vgck $vg 2>&1 | tee vgck.out grep Incorrect vgck.out vgremove -ff $vg LVM2.2.02.176/test/shell/pvcreate-operation-md.sh0000644000000000000120000001027313176752421020156 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2009-2015 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest # skip this test if mdadm or sfdisk (or others) aren't available which sfdisk || skip test -f /proc/mdstat && grep -q raid0 /proc/mdstat || \ modprobe raid0 || skip aux lvmconf 'devices/md_component_detection = 1' aux extend_filter_LVMTEST aux extend_filter "a|/dev/md.*|" aux prepare_devs 2 # create 2 disk MD raid0 array (stripe_width=128K) aux prepare_md_dev 0 64 2 "$dev1" "$dev2" mddev=$(< MD_DEV) pvdev=$(< MD_DEV_PV) # Test alignment of PV on MD without any MD-aware or topology-aware detection # - should treat $mddev just like any other block device pvcreate --metadatasize 128k \ --config 'devices {md_chunk_alignment=0 data_alignment_detection=0 data_alignment_offset_detection=0}' \ "$pvdev" check pv_field "$pvdev" pe_start "1.00m" # Test md_chunk_alignment independent of topology-aware detection pvcreate --metadatasize 128k \ --config 'devices {data_alignment_detection=0 data_alignment_offset_detection=0}' \ "$pvdev" check pv_field "$pvdev" pe_start "1.00m" # Test newer topology-aware alignment detection # - first added to 2.6.31 but not "reliable" until 2.6.33 if aux kernel_at_least 2 6 33 ; then # optimal_io_size=131072, minimum_io_size=65536 pvcreate --metadatasize 128k \ --config 'devices { md_chunk_alignment=0 }' "$pvdev" check pv_field "$pvdev" pe_start "1.00m" pvremove "$pvdev" fi # partition MD array directly, depends on blkext in Linux >= 2.6.28 if aux kernel_at_least 2 6 28 ; then # create one partition sfdisk "$mddev" < parent lookup via sysfs paths not pvcreate --metadatasize 128k "$pvdev" # verify alignment_offset is accounted for in pe_start # - topology infrastructure is available in Linux >= 2.6.31 # - also tests partition -> parent lookup via sysfs paths # Checking for 'alignment_offset' in sysfs implies Linux >= 2.6.31 # but reliable alignment_offset support requires kernel.org Linux >= 2.6.33 if aux kernel_at_least 2 6 33 ; then # in case the system is running without devtmpfs /dev # wait here for created device node on tmpfs test "$DM_DEV_DIR" != "/dev" && cp -LR "${mddev}p1" "$DM_DEV_DIR" pvcreate --metadatasize 128k "${pvdev}p1" maj=$(($(stat -L --printf=0x%t "${mddev}p1"))) min=$(($(stat -L --printf=0x%T "${mddev}p1"))) sysfs_alignment_offset="/sys/dev/block/$maj:$min/alignment_offset" [ -f "$sysfs_alignment_offset" ] && \ alignment_offset=$(< "$sysfs_alignment_offset") || \ alignment_offset=0 # default alignment is 1M, add alignment_offset pv_align=$(( 1048576 + alignment_offset )) check pv_field "${pvdev}p1" pe_start $pv_align --units b --nosuffix pvremove "${pvdev}p1" test "$DM_DEV_DIR" != "/dev" && rm -f "$DM_DEV_DIR/${mddev}p1" fi fi # Test newer topology-aware alignment detection w/ --dataalignment override if aux kernel_at_least 2 6 33 ; then # make sure we're clean for another test dd if=/dev/zero of="$mddev" bs=512 count=1 aux prepare_md_dev 0 1024 2 "$dev1" "$dev2" pvdev=$(< MD_DEV_PV) # optimal_io_size=2097152, minimum_io_size=1048576 pvcreate --metadatasize 128k \ --config 'devices { md_chunk_alignment=0 }' "$pvdev" check pv_field "$pvdev" pe_start "2.00m" # now verify pe_start alignment override using --dataalignment pvcreate --dataalignment 64k --metadatasize 128k \ --config 'devices { md_chunk_alignment=0 }' "$pvdev" check pv_field "$pvdev" pe_start "192.00k" fi LVM2.2.02.176/test/shell/mdata-strings.sh0000644000000000000120000000266513176752421016534 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008-2013 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # 'Test for proper escaping of strings in metadata (bz431474)' SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest # For udev impossible to create test "$LVM_TEST_DEVDIR" = "/dev" && skip aux prepare_devs 2 aux extend_filter_LVMTEST # Setup mangling to 'none' globaly for all libdm users export DM_DEFAULT_NAME_MANGLING_MODE=none pv_ugly="__\"!@#\$%^&*,()|@||'\\\"__pv1" # 'set up temp files, loopback devices' name=$(basename "$dev1") dmsetup rename "$name" "$PREFIX$pv_ugly" dev1=$(dirname "$dev1")/"$PREFIX$pv_ugly" dm_table | grep -F "$pv_ugly" # 'pvcreate, vgcreate on filename with backslashed chars' created="$dev1" # when used with real udev without fallback, it will fail here pvcreate "$dev1" || created="$dev2" pvdisplay 2>&1 | tee err should grep -F "$pv_ugly" err should check pv_field "$dev1" pv_name "$dev1" vgcreate $vg "$created" # 'no parse errors and VG really exists' vgs $vg 2>err not grep "Parse error" err dmsetup remove "${PREFIX}${pv_ugly}" LVM2.2.02.176/test/shell/dumpconfig.sh0000644000000000000120000000317213176752421016104 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2011 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest flatten() { cat > flatten.config for s in $(grep -E '^[a-z]+ {$' flatten.config | sed -e 's,{$,,'); do sed -e "/^$s/,/^}/p;d" flatten.config | sed -e '1d;$d' | sed -e "s,^[ \t]*,$s/,"; done } # clvmd might not be started fast enough and # lvm still activates locking for all commands. # FIXME: Either make longer start delay, # or even better do not initialize # locking for commands like 'dumpconfig' #aux lvmconf "global/locking_type=0" lvm dumpconfig -f lvmdumpconfig flatten < lvmdumpconfig | sort > config.dump flatten < etc/lvm.conf | sort > config.input # check that dumpconfig output corresponds to the lvm.conf input diff -wu config.input config.dump # and that merging multiple config files (through tags) works lvm dumpconfig -f lvmdumpconfig flatten < lvmdumpconfig | not grep 'log/verbose=1' lvm dumpconfig -f lvmdumpconfig flatten < lvmdumpconfig | grep 'log/indent=1' aux lvmconf 'tags/@foo {}' echo 'log { verbose = 1 }' > etc/lvm_foo.conf lvm dumpconfig -f lvmdumpconfig flatten < lvmdumpconfig | grep 'log/verbose=1' lvm dumpconfig -f lvmdumpconfig flatten < lvmdumpconfig | grep 'log/indent=1' LVM2.2.02.176/test/shell/vgimportclone.sh0000644000000000000120000000531313176752421016640 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2010-2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_devs 2 vgcreate --metadatasize 128k $vg1 "$dev1" lvcreate -l100%FREE -n $lv1 $vg1 # Test plain vgexport vgimport tools # Argument is needed invalid vgexport invalid vgimport # Cannot combine -a and VG name invalid vgexport -a $vg invalid vgimport -a $vg1 # Cannot export unknonw VG fail vgexport ${vg1}-non fail vgimport ${vg1}-non # Cannot export VG with active volumes fail vgexport $vg1 vgchange -an $vg1 vgexport $vg1 # Already exported fail vgexport $vg1 vgimport $vg1 # Already imported fail vgimport $vg1 vgchange -ay $vg1 # Clone the LUN dd if="$dev1" of="$dev2" bs=256K count=1 aux notify_lvmetad "$dev2" # Verify pvs works on each device to give us vgname aux hide_dev "$dev2" check pv_field "$dev1" vg_name $vg1 aux unhide_dev "$dev2" aux hide_dev "$dev1" check pv_field "$dev2" vg_name $vg1 aux unhide_dev "$dev1" # Import the cloned PV to a new VG vgimportclone --basevgname $vg2 "$dev2" # We need to re-scan *both* $dev1 and $dev2 since a PV, as far as lvmetad is # concerned, can only live on a single device. With the last pvscan, we told it # that PV from $dev1 now lives on $dev2, but in fact this is not true anymore, # since we wrote a different PV over $dev2. rm -f "$TESTDIR/etc/.cache" aux notify_lvmetad "$dev2" aux notify_lvmetad "$dev1" # Verify we can activate / deactivate the LV from both VGs lvchange -ay $vg1/$lv1 $vg2/$lv1 vgchange -an $vg1 $vg2 vgremove -ff $vg1 $vg2 # Verify that if we provide the -n|--basevgname, # the number suffix is not added unnecessarily. vgcreate --metadatasize 128k A${vg1}B "$dev1" # vg1B is not the same as Avg1B - we don't need number suffix dd if="$dev1" of="$dev2" bs=256K count=1 aux notify_lvmetad "$dev2" vgimportclone -n ${vg1}B "$dev2" check pv_field "$dev2" vg_name ${vg1}B # Avg1 is not the same as Avg1B - we don't need number suffix dd if="$dev1" of="$dev2" bs=256K count=1 aux notify_lvmetad "$dev2" vgimportclone -n A${vg1} "$dev2" check pv_field "$dev2" vg_name A${vg1} # Avg1B is the same as Avg1B - we need to add the number suffix dd if="$dev1" of="$dev2" bs=256K count=1 aux notify_lvmetad "$dev2" vgimportclone -n A${vg1}B "$dev2" aux vgs check pv_field "$dev2" vg_name A${vg1}B1 vgremove -ff A${vg1}B A${vg1}B1 LVM2.2.02.176/test/shell/lvcreate-raid.sh0000644000000000000120000001531013176752421016470 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2011-2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest lv_devices() { test "$3" -eq "$(get lv_devices "$1/$2" | wc -w)" } ######################################################## # MAIN ######################################################## aux have_raid 1 3 0 || skip RAID4="" aux have_raid4 && RAID4=raid4 aux prepare_pvs 6 20 # 6 devices for RAID10 (2-mirror,3-stripe) test get_devs vgcreate -s 512k "$vg" "${DEVICES[@]}" ########################################### # Create, wait for sync, remove tests ########################################### # Create RAID1 (implicit 2-way) lvcreate --type raid1 -l 2 -n $lv1 $vg aux wait_for_sync $vg $lv1 lvremove -ff $vg # Create RAID1 (explicit 2-way) lvcreate --type raid1 -m 1 -l 2 -n $lv1 $vg aux wait_for_sync $vg $lv1 lvremove -ff $vg # Create RAID1 (explicit 3-way) lvcreate --type raid1 -m 2 -l 2 -n $lv1 $vg aux wait_for_sync $vg $lv1 lvremove -ff $vg # Create RAID1 (explicit 3-way) - Set min/max recovery rate lvcreate --type raid1 -m 2 -l 2 \ --minrecoveryrate 50 --maxrecoveryrate 1M \ -n $lv1 $vg check lv_field $vg/$lv1 raid_min_recovery_rate 50 check lv_field $vg/$lv1 raid_max_recovery_rate 1024 aux wait_for_sync $vg $lv1 lvremove -ff $vg # Create RAID 4/5/6 (explicit 3-stripe + parity devs) for i in $RAID4 \ raid5 raid5_ls raid5_la raid5_rs raid5_ra \ raid6 raid6_zr raid6_nr raid6_nc; do lvcreate --type $i -l 3 -i 3 -n $lv1 $vg aux wait_for_sync $vg $lv1 lvremove -ff $vg done # Create RAID 4/5/6 (explicit 3-stripe + parity devs) - Set min/max recovery for i in $RAID4 \ raid5 raid5_ls raid5_la raid5_rs raid5_ra \ raid6 raid6_zr raid6_nr raid6_nc; do lvcreate --type $i -l 3 -i 3 \ --minrecoveryrate 50 --maxrecoveryrate 1M \ -n $lv1 $vg check lv_field $vg/$lv1 raid_min_recovery_rate 50 check lv_field $vg/$lv1 raid_max_recovery_rate 1024 aux wait_for_sync $vg $lv1 lvremove -ff $vg done # Create RAID using 100%FREE ############################ # 6 PVs with 19m in each PV. # 1 metadata LV = 1 extent = .5m # 1 image = 37+38+38 extents = 56.50m = lv_size lvcreate --type raid1 -m 1 -l 100%FREE -an -Zn -n raid1 $vg check lv_field $vg/raid1 size "56.50m" lvremove -ff $vg # 1 metadata LV = 1 extent # 1 image = 37 extents = 18.5m # 5 images = 185 extents = 92.5m = lv_size lvs -a $vg lvcreate --type raid5 -i 5 -l 100%FREE -an -Zn -n raid5 $vg check lv_field $vg/raid5 size "92.50m" lvremove -ff $vg # 1 image = 37+38 extents # 2 images = 150 extents = 75.00m = lv_size lvcreate --type raid5 -i 2 -l 100%FREE -an -Zn -n raid5 $vg check lv_field $vg/raid5 size "75.00m" lvremove -ff $vg # 1 image = 37 extents # 4 images = 148 extents = 74.00m = lv_size lvcreate --type raid6 -i 4 -l 100%FREE -an -Zn -n raid6 $vg check lv_field $vg/raid6 size "74.00m" lvremove -ff $vg ### # For following tests eat 18 of 37 extents from dev1, leaving 19 lvcreate -l 18 -an -Zn -n eat_space $vg "$dev1" EAT_SIZE=$(get lv_field $vg/eat_space size) # Using 100% free should take the rest of dev1 and equal from dev2 # 1 meta takes 1 extent # 1 image = 19 extents = 9.50m = lv_size lvcreate --type raid1 -m 1 -l 100%FREE -an -Zn -n raid1 $vg "$dev1" "$dev2" check lv_field $vg/raid1 size "9.50m" # Ensure image size is the same as the RAID1 size check lv_field $vg/raid1 size "$(get lv_field $vg/raid1_rimage_0 size -a)" # Amount remaining in dev2 should equal the amount taken by 'lv' in dev1 check pv_field "$dev2" pv_free "$EAT_SIZE" lvremove -ff $vg/raid1 # Using 100% free should take the rest of dev1 and equal amount from the rest # 1 meta takes 1 extent # 1 image = 19 extents = 9.50m # 5 images = 95 extents = 47.50m = lv_size lvcreate --type raid5 -i 5 -l 100%FREE -an -Zn -n raid5 $vg check lv_field $vg/raid5 size "47.50m" # Amount remaining in dev6 should equal the amount taken by 'lv' in dev1 check pv_field "$dev6" pv_free "$EAT_SIZE" lvremove -ff $vg/raid5 # Using 100% free should take the rest of dev1, an equal amount # from 2 more devs, and all extents from 3 additional devs # 1 meta takes 1 extent # 1 image = 19+39 extents # 2 images = 114 extents = 57.00m = lv_size lvcreate --type raid5 -i 2 -l 100%FREE -an -Zn -n raid5 $vg check lv_field $vg/raid5 size "57.00m" lvremove -ff $vg/raid5 # Let's do some stripe tests too # Using 100% free should take the rest of dev1 and an equal amount from rest # 1 image = 20 extents # 6 images = 120 extents = 60.00m = lv_size lvcreate -i 6 -l 100%FREE -an -Zn -n stripe $vg check lv_field $vg/stripe size "60.00m" lvremove -ff $vg/stripe # Using 100% free should take the rest of dev1, an equal amount from # one more dev, and all of the remaining 4 # 1 image = 20+38+38 extents # 2 images = 192 extents = 96.00m = lv_size lvcreate -i 2 -l 100%FREE -an -Zn -n stripe $vg check lv_field $vg/stripe size "96.00m" lvremove -ff $vg # end of use of '$vg/eat_space' ### # Create RAID (implicit stripe count based on PV count) ####################################################### # Not enough drives not lvcreate --type raid1 -l1 $vg "$dev1" not lvcreate --type raid5 -l2 $vg "$dev1" "$dev2" not lvcreate --type raid6 -l3 $vg "$dev1" "$dev2" "$dev3" "$dev4" # Implicit count comes from #PVs given (always 2 for mirror though) lvcreate --type raid1 -l1 -an -Zn -n raid1 $vg "$dev1" "$dev2" lv_devices $vg raid1 2 lvcreate --type raid5 -l2 -an -Zn -n raid5 $vg "$dev1" "$dev2" "$dev3" lv_devices $vg raid5 3 lvcreate --type raid6 -l3 -an -Zn -n raid6 $vg "$dev1" "$dev2" "$dev3" "$dev4" "$dev5" lv_devices $vg raid6 5 lvremove -ff $vg # Implicit count comes from total #PVs in VG (always 2 for mirror though) # Defaults -i2 even though more PVs listed lvcreate --type raid1 -l1 -an -Zn -n raid1 $vg lv_devices $vg raid1 2 lvcreate --type raid5 -l2 -an -Zn -n raid5 $vg lv_devices $vg raid5 3 lvcreate --type raid6 -l3 -an -Zn -n raid6 $vg lv_devices $vg raid6 5 lvremove -ff $vg ######################################################## # Try again with backward compatible old logic applied # ######################################################## aux lvmconf 'allocation/raid_stripe_all_devices = 1' # Implicit count comes from total #PVs in VG (always 2 for mirror though) lvcreate --type raid1 -l1 -an -Zn -n raid1 $vg lv_devices $vg raid1 2 lvcreate --type raid5 -l2 -an -Zn -n raid5 $vg lv_devices $vg raid5 6 lvcreate --type raid6 -l3 -an -Zn -n raid6 $vg lv_devices $vg raid6 6 vgremove -ff $vg LVM2.2.02.176/test/shell/vgchange-many.sh0000644000000000000120000000322113176752421016470 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2013 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Check perfomance of activation and deactivation SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest # FIXME: lvmetad fails with i.e. 1500 device on memory failure... # Number of LVs to create TEST_DEVS=1000 # On low-memory boxes let's not stress too much test "$(aux total_mem)" -gt 524288 || TEST_DEVS=256 aux prepare_pvs 1 400 get_devs vgcreate -s 128K "$vg" "${DEVICES[@]}" vgcfgbackup -f data $vg # Generate a lot of devices (size of 1 extent) awk -v TEST_DEVS=$TEST_DEVS '/^\t\}/ { printf("\t}\n\tlogical_volumes {\n"); cnt=0; for (i = 0; i < TEST_DEVS; i++) { printf("\t\tlvol%06d {\n", i); printf("\t\t\tid = \"%06d-1111-2222-3333-2222-1111-%06d\"\n", i, i); print "\t\t\tstatus = [\"READ\", \"WRITE\", \"VISIBLE\"]"; print "\t\t\tsegment_count = 1"; print "\t\t\tsegment1 {"; print "\t\t\t\tstart_extent = 0"; print "\t\t\t\textent_count = 1"; print "\t\t\t\ttype = \"striped\""; print "\t\t\t\tstripe_count = 1"; print "\t\t\t\tstripes = ["; print "\t\t\t\t\t\"pv0\", " cnt++; printf("\t\t\t\t]\n\t\t\t}\n\t\t}\n"); } } {print} ' data >data_new vgcfgrestore -f data_new $vg # Activate and deactivate all of them vgchange -ay $vg vgchange -an $vg LVM2.2.02.176/test/shell/lvcreate-pvtags.sh0000644000000000000120000000275713176752421017070 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_pvs 3 get_devs aux lvmconf 'allocation/maximise_cling = 0' \ 'allocation/mirror_logs_require_separate_pvs = 1' # not required, just testing aux pvcreate --metadatacopies 0 "$dev1" vgcreate "$vg" "${DEVICES[@]}" pvchange --addtag fast "${DEVICES[@]}" # 3 stripes with 3 PVs (selected by tag, @fast) is fine lvcreate -l3 -i3 $vg @fast # too many stripes(4) for 3 PVs not lvcreate -l4 -i4 $vg @fast # 2 stripes is too many with just one PV not lvcreate -l2 -i2 $vg "$DM_DEV_DIR/mapper/pv1" # lvcreate mirror lvcreate -aey -l1 --type mirror -m1 --nosync $vg @fast # lvcreate mirror w/corelog lvcreate -aey -l1 --type mirror -m2 --corelog --nosync $vg @fast # lvcreate mirror w/no free PVs not lvcreate -aey -l1 --type mirror -m2 $vg @fast # lvcreate mirror (corelog, w/no free PVs) not lvcreate -aey -l1 --type mirror -m3 --corelog $vg @fast # lvcreate mirror with a single PV arg not lvcreate -aey -l1 --type mirror -m1 --corelog $vg "$dev1" vgremove -ff $vg LVM2.2.02.176/test/shell/thin-resize-match.sh0000644000000000000120000000463613176752421017312 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2015 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # ensure there is no data loss during thin-pool resize SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 export LVM_TEST_THIN_REPAIR_CMD=${LVM_TEST_THIN_REPAIR_CMD-/bin/false} . lib/inittest which md5sum || skip aux have_thin 1 0 0 || skip aux prepare_vg 2 20 lvcreate -L1M -V2M -n $lv1 -T $vg/pool # just ensure we check what we need to check check lv_field $vg/pool size "1.00m" check lv_field $vg/$lv1 size "2.00m" # prepare 2097152 file content seq 0 315465 > 2M md5sum 2M | cut -f 1 -d ' ' | tee MD5 dd if=2M of="$DM_DEV_DIR/mapper/$vg-$lv1" bs=512K conv=fdatasync >log 2>&1 & #dd if=2M of="$DM_DEV_DIR/mapper/$vg-$lv1" bs=2M oflag=direct & # give it some time to fill thin-volume # eventually loop to wait for 100% full pool... sleep .1 lvs -a $vg # this must not 'block & wait' on suspending flush # if it waits on thin-pool's target timeout # it will harm queued data lvextend -L+512k $vg/pool lvextend -L+512k $vg/pool # collect 'dd' result wait cat log lvs -a $vg dd if="$DM_DEV_DIR/mapper/$vg-$lv1" of=2M-2 iflag=direct md5sum 2M-2 | cut -f 1 -d ' ' | tee MD5-2 # these 2 are supposed to match diff MD5 MD5-2 # Do not want to see Live & Inactive table entry ( dm_info attr,name | not grep "LI-.*${PREFIX}" ) || { dmsetup table --inactive | grep ${PREFIX} die "Found device with Inactive table" } # Check wrapping active thin-pool linear mapping has matching size POOLSZ=$(dmsetup table ${vg}-pool-tpool | cut -d ' ' -f 2) WRAPSZ=$(dmsetup table ${vg}-pool | cut -d ' ' -f 2) # # FIXME: currently requires to update 2 dependent targets in one 'preload' # lvm2 cannot handle this and would need one extra --refresh pass. # Once resolved - enabled this test. # Maybe other solution without fake linear mapping could be found. # Eventually strictly map just single sector as it has no real use? # #should test "${POOLSZ}" = "${WRAPSZ}" || \ # die "Wrapping pool device size does not match real pool size" vgremove -f $vg LVM2.2.02.176/test/shell/pvcreate-restore.sh0000644000000000000120000000225313176752421017242 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2015 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_vg 4 lvcreate --type snapshot -s -L10 -n $lv1 $vg --virtualsize 2T lvcreate --type snapshot -s -L10 -n $lv2 $vg --virtualsize 4T lvcreate --type snapshot -s -L10 -n $lv3 $vg --virtualsize 4194300M aux extend_filter_LVMTEST vgcreate $vg1 "$DM_DEV_DIR/$vg/$lv2" vgcfgbackup -f vgback $vg1 UUID=$(get pv_field "$DM_DEV_DIR/$vg/$lv2" uuid) pvremove -ff -y "$DM_DEV_DIR/$vg/$lv2" # too small to fit fail pvcreate --restorefile vgback --uuid $UUID "$DM_DEV_DIR/$vg/$lv1" # still does not fit fail pvcreate --restorefile vgback --uuid $UUID "$DM_DEV_DIR/$vg/$lv3" pvcreate --restorefile vgback --uuid $UUID "$DM_DEV_DIR/$vg/$lv2" vgremove -ff $vg LVM2.2.02.176/test/shell/lvmcache-exercise.sh0000644000000000000120000000271313176752421017340 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008-2013 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_pvs 5 get_devs vgcreate $vg1 "$dev1" vgcreate $vg2 "$dev3" "$dev4" "$dev5" UUID1=$(get vg_field $vg1 uuid) aux disable_dev "$dev1" pvscan # dev1 is missing fail pvs "${DEVICES[@]}" # create a new vg1 on dev2, # so dev1 and dev2 have different VGs with the same name vgcreate $vg1 "$dev2" UUID2=$(get vg_field $vg1 uuid) # Once dev1 is visible again, both VGs named "vg1" are visible. aux enable_dev "$dev1" pvs "$dev1" # reappearing device (rhbz 995440) lvcreate -aey -m2 --type mirror -l4 --alloc anywhere --corelog -n $lv1 $vg2 aux disable_dev "$dev3" lvconvert --yes --repair $vg2/$lv1 aux enable_dev "$dev3" # here it should fix any reappeared devices lvs lvs -a $vg2 -o+devices 2>&1 | tee out not grep reappeared out # This removes the first "vg1" using its uuid vgremove -ff -S vg_uuid=$UUID1 # This removes the second "vg1" using its name, # now that there is only one VG with that name. vgremove -ff $vg1 $vg2 LVM2.2.02.176/test/shell/lvconvert-repair-transient-dmeventd.sh0000644000000000000120000000202213176752421023055 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2011 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_dmeventd aux mirror_recovery_works || skip aux prepare_vg 5 lvcreate -aey --type mirror -m 3 --ignoremonitoring -L 1 -n 4way $vg lvchange --monitor y $vg/4way aux disable_dev "$dev2" "$dev4" mkfs.ext3 "$DM_DEV_DIR/$vg/4way" aux enable_dev "$dev2" "$dev4" sleep 3 lvs -a -o +devices $vg | tee out not grep unknown out check mirror $vg 4way check mirror_legs $vg 4way 2 lvs -a -o +devices $vg | tee out not grep mimage_1 out lvs -a -o +devices $vg | tee out not grep mimage_3 out vgremove -f $vg LVM2.2.02.176/test/shell/lvchange-cache-old.sh0000644000000000000120000000216113176752421017352 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2015 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Exercise usage of older metadata which are missing some new settings SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux have_cache 1 3 0 || skip # FIXME: parallel cache metadata allocator is crashing when used value 8000! aux prepare_vg 5 80 lvcreate -l 10 --type cache-pool $vg/cpool lvcreate -l 20 -H -n $lv1 $vg/cpool vgcfgbackup -f backup $vg # check metadata without cache policy lvchange -an $vg grep -v "policy =" backup >backup_1 vgcfgrestore -f backup_1 $vg lvchange -ay $vg # check metadata without cache mode lvchange -an $vg grep -v "cache_mode =" backup >backup_2 vgcfgrestore -f backup_2 $vg lvchange -ay $vg vgremove -ff $vg LVM2.2.02.176/test/shell/report-hidden.sh0000644000000000000120000000174013176752421016514 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMETAD=1 SKIP_WITH_CLVMD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_vg 1 lvcreate --type mirror -m1 -l1 --alloc anywhere -n $lv1 $vg aux lvmconf 'log/prefix=""' aux lvmconf "report/mark_hidden_devices = 0" lvs --noheadings -a -o name $vg > out grep "^${lv1}_mimage_0" out not grep "^\[${lv1}_mimage_0\]" out aux lvmconf "report/mark_hidden_devices = 1" lvs --noheadings -a -o name $vg > out grep "^\[${lv1}_mimage_0\]" out not grep "^${lv1}_mimage_0" out vgremove -ff $vg LVM2.2.02.176/test/shell/lvmetad-restart.sh0000644000000000000120000000130113176752421017057 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2012 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITHOUT_LVMETAD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_pvs 2 vgcreate $vg1 "$dev1" "$dev2" vgs | grep $vg1 kill "$(< LOCAL_LVMETAD)" aux prepare_lvmetad vgs | grep $vg1 vgremove -ff $vg1 LVM2.2.02.176/test/shell/pvcreate-operation.sh0000644000000000000120000001504413176752421017561 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008-2012 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux lvmconf 'devices/md_component_detection = 1' aux prepare_devs 4 if test -n "$LVM_TEST_LVM1" ; then mdatypes='1 2' else mdatypes='2' fi for mdatype in $mdatypes do # pvcreate (lvm$mdatype) refuses to overwrite an mounted filesystem (bz168330) test ! -d mnt && mkdir mnt if mke2fs "$dev1"; then mount "$dev1" mnt not pvcreate -M$mdatype "$dev1" 2>err grep "Can't open $dev1 exclusively. Mounted filesystem?" err umount "$dev1" # wipe the filesystem signature for next # pvcreate to not issue any prompts dd if=/dev/zero of="$dev1" bs=1K count=2 fi # pvcreate (lvm$mdatype) succeeds when run repeatedly (pv not in a vg) (bz178216) pvcreate -M$mdatype "$dev1" pvcreate -M$mdatype "$dev1" pvremove -f "$dev1" # pvcreate (lvm$mdatype) fails when PV belongs to VG # pvcreate -M$mdatype "$dev1" vgcreate -M$mdatype $vg1 "$dev1" not pvcreate -M$mdatype "$dev1" vgremove -f $vg1 pvremove -f "$dev1" # pvcreate (lvm$mdatype) fails when PV1 does and PV2 does not belong to VG pvcreate -M$mdatype "$dev1" pvcreate -M$mdatype "$dev2" vgcreate -M$mdatype $vg1 "$dev1" # pvcreate a second time on $dev2 and $dev1 not pvcreate -M$mdatype "$dev2" "$dev1" vgremove -f $vg1 pvremove -f "$dev2" "$dev1" # NOTE: Force pvcreate after test completion to ensure clean device #test_expect_success # "pvcreate (lvm$mdatype) fails on md component device" # 'mdadm -C -l raid0 -n 2 /dev/md0 "$dev1" "$dev2" && # pvcreate -M$mdatype "$dev1"; # status=$?; echo status=$status; test $status != 0 && # mdadm --stop /dev/md0 && # pvcreate -ff -y -M$mdatype "$dev1" "$dev2" && # pvremove -f "$dev1" "$dev2"' done # pvcreate (lvm2) fails without -ff when PV with metadatacopies=0 belongs to VG pvcreate --metadatacopies 0 "$dev1" pvcreate --metadatacopies 1 "$dev2" vgcreate $vg1 "$dev1" "$dev2" not pvcreate "$dev1" vgremove -f $vg1 pvremove -f "$dev2" "$dev1" # pvcreate (lvm2) succeeds with -ff when PV with metadatacopies=0 belongs to VG pvcreate --metadatacopies 0 "$dev1" pvcreate --metadatacopies 1 "$dev2" vgcreate $vg1 "$dev1" "$dev2" pvcreate -ff -y "$dev1" vgreduce --removemissing $vg1 vgremove -ff $vg1 pvremove -f "$dev2" "$dev1" for i in 0 1 2 3 do # pvcreate (lvm2) succeeds writing LVM label at sector $i pvcreate --labelsector $i "$dev1" dd if="$dev1" bs=512 skip=$i count=1 2>/dev/null | strings | grep LABELONE >/dev/null pvremove -f "$dev1" done # pvcreate (lvm2) fails writing LVM label at sector 4 not pvcreate --labelsector 4 "$dev1" backupfile="$PREFIX.mybackupfile" uuid1=freddy-fred-fred-fred-fred-fred-freddy uuid2=freddy-fred-fred-fred-fred-fred-fredie bogusuuid=fred # pvcreate rejects uuid option with less than 32 characters not pvcreate --norestorefile --uuid $bogusuuid "$dev1" # pvcreate rejects uuid option without restorefile not pvcreate --uuid $uuid1 "$dev1" # pvcreate rejects uuid already in use pvcreate --norestorefile --uuid $uuid1 "$dev1" not pvcreate --norestorefile --uuid $uuid1 "$dev2" # pvcreate rejects non-existent file given with restorefile not pvcreate --uuid $uuid1 --restorefile "$backupfile" "$dev1" # pvcreate rejects restorefile with uuid not found in file pvcreate --norestorefile --uuid $uuid1 "$dev1" vgcfgbackup -f "$backupfile" not pvcreate --uuid $uuid2 --restorefile "$backupfile" "$dev2" # vgcfgrestore of a VG containing a PV with zero PEs (bz #820116) # (use case: one PV in a VG used solely to keep metadata) size_mb=$(($(blockdev --getsz "$dev1") / 2048)) pvcreate --metadatasize $size_mb "$dev1" vgcreate $vg1 "$dev1" vgcfgbackup -f "$backupfile" vgcfgrestore -f "$backupfile" "$vg1" vgremove -f $vg1 pvremove -f "$dev1" # pvcreate --restorefile should handle --dataalignment and --dataalignmentoffset # and check it's compatible with pe_start value being restored # X * dataalignment + dataalignmentoffset == pe_start pvcreate --norestorefile --uuid "$uuid1" --dataalignment 600k --dataalignmentoffset 32k "$dev1" vgcreate $vg1 "$dev1" vgcfgbackup -f "$backupfile" "$vg1" vgremove -ff $vg1 pvremove -ff "$dev1" # the dataalignment and dataalignmentoffset is ignored here since they're incompatible with pe_start pvcreate --restorefile "$backupfile" --uuid "$uuid1" --dataalignment 500k --dataalignmentoffset 10k "$dev1" 2> err grep "incompatible with restored pe_start value" err # 300k is multiple of 600k so this should pass pvcreate --restorefile "$backupfile" --uui "$uuid1" --dataalignment 300k --dataalignmentoffset 32k "$dev1" 2> err not grep "incompatible with restored pe_start value" err # pvcreate rejects non-existent uuid given with restorefile not pvcreate --uuid "$uuid2" --restorefile "$backupfile" "$dev1" 2> err grep "Can't find uuid $uuid2 in backup file $backupfile" err # pvcreate rejects restorefile without uuid not pvcreate --restorefile "$backupfile" "$dev1" 2>err grep -- "--uuid is required with --restorefile" err # pvcreate rejects uuid restore with multiple volumes specified not pvcreate --uuid "$uuid1" --restorefile "$backupfile" "$dev1" "$dev2" 2>err grep "Can only set uuid on one volume at once" err # --bootloaderareasize not allowed with pvcreate --restorefile not pvcreate --uuid "$uuid1" --restorefile "$backupfile" --bootloaderareasize 1m "$dev1" "$dev2" 2>err grep -- "Command does not accept option combination: --bootloaderareasize with --restorefile" err rm -f "$backupfile" pvcreate --norestorefile --uuid $uuid1 "$dev1" vgcreate --physicalextentsize 1m $vg1 "$dev1" vgcfgbackup -f "$backupfile" "$vg1" vgremove -ff "$vg1" pvremove -ff "$dev1" # when 2nd mda requested on pvcreate --restorefile and not enough space for it, pvcreate fails not pvcreate --restorefile "$backupfile" --uuid $uuid1 --metadatacopies 2 "$dev1" 2>err grep "Not enough space available for metadata area with index 1 on PV $dev1" err rm -f "$backupfile" # pvcreate wipes swap signature when forced dd if=/dev/zero of="$dev1" bs=1024 count=64 mkswap "$dev1" blkid -c /dev/null "$dev1" | grep "swap" pvcreate -f "$dev1" # blkid cannot make up its mind whether not finding anything it knows is a failure or not (blkid -c /dev/null "$dev1" || true) | not grep "swap" LVM2.2.02.176/test/shell/lvconvert-mirror-basic-3.sh0000644000000000000120000000103613176752421020517 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2010 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA . ./shell/lvconvert-mirror-basic.sh test_many 3 vgremove -ff $vg LVM2.2.02.176/test/shell/vgcfgbackup-lvm1.sh0000644000000000000120000000171513176752421017111 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008-2013 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_pvs 4 get_devs if test -n "$LVM_TEST_LVM1" ; then pvcreate --metadatacopies 0 "$dev4" # No automatic backup aux lvmconf "backup/backup = 0" # vgcfgbackup correctly stores metadata LVM1 with missing PVs pvcreate -M1 "${DEVICES[@]}" vgcreate -M1 -c n "$vg" "${DEVICES[@]}" lvcreate -l1 -n $lv1 $vg "$dev1" pvremove -ff -y "$dev2" not lvcreate -l1 -n $lv1 $vg "$dev3" lvchange -an $vg vgcfgbackup -f "backup.$$" $vg fi LVM2.2.02.176/test/shell/pv-duplicate-uuid.sh0000644000000000000120000000277113176752421017316 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Test 'Found duplicate' is shown SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_devs 3 pvcreate "$dev1" UUID1=$(get pv_field "$dev1" uuid) pvcreate --config "devices{filter=[\"a|$dev2|\",\"r|.*|\"]} global/use_lvmetad=0" -u "$UUID1" --norestorefile "$dev2" pvcreate --config "devices{filter=[\"a|$dev3|\",\"r|.*|\"]} global/use_lvmetad=0" -u "$UUID1" --norestorefile "$dev3" pvscan --cache 2>&1 | tee out if test -e LOCAL_LVMETAD; then grep "was already found" out grep "WARNING: Disabling lvmetad cache which does not support duplicate PVs." out fi pvs -o+uuid 2>&1 | tee out grep WARNING out > warn || true grep -v WARNING out > main || true test "$(grep -c $UUID1 main)" -eq 1 COUNT=$(grep --count "was already found" warn) [ "$COUNT" -eq 2 ] pvs -o+uuid --config "devices{filter=[\"a|$dev2|\",\"r|.*|\"]}" 2>&1 | tee out rm warn main || true grep WARNING out > warn || true grep -v WARNING out > main || true not grep "$dev1" main grep "$dev2" main not grep "$dev3" main not grep "was already found" warn LVM2.2.02.176/test/shell/lvresize-usage.sh0000644000000000000120000000315613176752421016720 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2007-2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_vg 2 80 lvcreate -L 10M -n lv -i2 $vg lvresize -l +4 $vg/lv not lvextend -L+0 $vg/lv not lvextend -l+0 $vg/lv lvremove -ff $vg lvcreate -L 64M -n $lv -i2 $vg not lvresize -v -l +4 xxx/$lv # Check stripe size is reduced to extent size when it's bigger ESIZE=$(get vg_field $vg vg_extent_size --units b) lvextend -L+64m -i 2 -I$(( ${ESIZE%%B} * 2 ))B $vg/$lv 2>&1 | tee err grep "Reducing stripe size" err lvremove -ff $vg lvcreate -L 10M -n lv $vg "$dev1" lvextend -L +10M $vg/lv "$dev2" lvextend --type striped -m0 -L +10M $vg/lv "$dev2" # Attempt to reduce with lvextend and vice versa: not lvextend -L 16M $vg/lv not lvreduce -L 32M $vg/lv lvremove -ff $vg lvcreate --type mirror -aey -L 4 -n $lv1 $vg # Incorrent name for resized LV not lvextend --type mirror -L 10 -n $lv1 $vg # Same size not lvextend --type mirror -L 4 $vg/$lv1 # Cannot use any '-' or '+' sign for --mirror arg not lvextend --type mirror -L+2 -m-1 $vg/$lv1 not lvextend --type mirror -L+2 -m+1 $vg/$lv1 lvextend --type mirror -L+4 -m1 $vg/$lv1 lvs -a $vg check lv_field $vg/$lv1 size "8.00m" lvremove -ff $vg LVM2.2.02.176/test/shell/lvconvert-raid-allocation.sh0000644000000000000120000000542413176752421021035 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2011-2012 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux have_raid 1 3 0 || skip aux prepare_pvs 5 get_devs vgcreate -s 256k "$vg" "${DEVICES[@]}" # Start with linear on 2 PV and ensure that converting to # RAID is not allowed to reuse PVs for different images. (Bug 1113180) lvcreate -aey -l 4 -n $lv1 $vg "$dev1:0-1" "$dev2:0-1" not lvconvert -y --type raid1 -m 1 $vg/$lv1 "$dev1" "$dev2" not lvconvert -y --type raid1 -m 1 $vg/$lv1 "$dev1" "$dev3:0-2" lvconvert -y --type raid1 -m 1 $vg/$lv1 "$dev3" not lvconvert -m 0 $vg/$lv1 lvconvert -y -m 0 $vg/$lv1 # RAID conversions are not honoring allocation policy! # lvconvert -y --type raid1 -m 1 --alloc anywhere $vg/$lv1 "$dev1" "$dev2" lvremove -ff $vg # Setup 2-way RAID1 LV, spread across 4 devices. # For each image: # - metadata LV + 1 image extent (2 total extents) on one PV # - 2 image extents on the other PV # Then attempt allocation of another image from 2 extents on # a 5th PV and the remainder of the rest of already used PVs. # # This should fail because there is insufficient space on the # non-parallel PV (i.e. there is not enough space for the image # if it doesn't share a PV with another image). lvcreate --type raid1 -m 1 -l 3 -n $lv1 $vg \ "$dev1:0-1" "$dev2:0-1" "$dev3:0-1" "$dev4:0-1" aux wait_for_sync $vg $lv1 # Should not be enough non-overlapping space. not lvconvert -m +1 $vg/$lv1 \ "$dev5:0-1" "$dev1" "$dev2" "$dev3" "$dev4" lvconvert -y -m +1 $vg/$lv1 "$dev5" not lvconvert -m 0 $vg/$lv1 lvconvert -y -m 0 $vg/$lv1 # Should work due to '--alloc anywhere' # RAID conversion not honoring allocation policy! #lvconvert -y -m +1 --alloc anywhere $vg/$lv1 \ # "$dev5:0-1" "$dev1" "$dev2" "$dev3" "$dev4" lvremove -ff $vg # Setup 2-way RAID1 LV, spread across 4 devices # - metadata LV + 1 image extent (2 total extents) on one PV # - 2 image extents on the other PV # Kill one PV. There should be enough space on the remaining # PV for that image to reallocate the entire image there and # still maintain redundancy. lvcreate --type raid1 -m 1 -l 3 -n $lv1 $vg \ "$dev1:0-1" "$dev2:0-1" "$dev3:0-1" "$dev4:0-1" aux wait_for_sync $vg $lv1 aux disable_dev "$dev1" lvconvert -y --repair $vg/$lv1 "$dev2" "$dev3" "$dev4" #FIXME: ensure non-overlapping images (they should not share PVs) aux enable_dev "$dev1" lvremove -ff $vg vgremove -ff $vg LVM2.2.02.176/test/shell/lvresize-thin-external-origin.sh0000644000000000000120000000272613176752421021665 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Test resize of thin volume with external origin SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 export LVM_TEST_THIN_REPAIR_CMD=${LVM_TEST_THIN_REPAIR_CMD-/bin/false} . lib/inittest aux have_thin 1 2 0 || skip # Pretend we miss the external_origin_extend feature aux lvmconf 'global/thin_disabled_features = [ "external_origin_extend" ]' aux prepare_vg 2 lvcreate -L10 -n $lv1 $vg # Prepare thin pool lvcreate -L20 -T $vg/pool # Convert $lv1 into thin LV with external origin lvconvert -T $vg/$lv1 --thinpool $vg/pool --originname ext lvs -a $vg # Bigger size is not supported without feature external_origin_extend not lvresize -L+10 $vg/$lv1 # But reduction works lvresize -L-5 -f $vg/$lv1 check lv_field $vg/$lv1 lv_size "5.00" --units m --nosuffix not lvresize -L+15 -y $vg/$lv1 check lv_field $vg/$lv1 lv_size "5.00" --units m --nosuffix # Try to resize again back up to the size of external origin lvresize -L+5 -f $vg/$lv1 check lv_field $vg/$lv1 lv_size "10.00" --units m --nosuffix vgremove -ff $vg LVM2.2.02.176/test/shell/lvconvert-snapshot-raid.sh0000644000000000000120000000317613176752421020551 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2017 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Test various supported conversion of snapshot with raid SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux have_raid 1 3 0 || skip aux prepare_vg 3 vgchange -s 16k $vg lvcreate -L1 -n cow $vg # Raid and snapshot conversion lvcreate --type raid1 -L1 -m1 -n rd $vg # Cannot create snapshot of raid leg not lvcreate -s -L1 $vg/rd_rimage_0 2>&1 | tee err grep "not supported" err # Cannot use raid-type as COW not lvconvert --yes --type snapshot $vg/cow $vg/rd 2>&1 | tee err grep "not accept" err not lvconvert --yes --type snapshot $vg/cow $vg/rd_rimage_0 2>&1 | tee err grep "lv_is_visible" err not lvconvert --yes --type snapshot $vg/cow $vg/rd_rmeta_0 2>&1 | tee err grep "lv_is_visible" err # Cannot use _rimage not lvconvert --yes --type snapshot $vg/rd_rimage_0 $vg/cow 2>&1 | tee err grep "not supported" err # Cannot use _rmeta not lvconvert --yes --type snapshot $vg/rd_rmeta_0 $vg/cow 2>&1 | tee err grep "not supported" err lvconvert --yes -s $vg/rd $vg/cow check lv_field $vg/rd segtype raid1 check lv_field $vg/cow segtype linear check lv_attr_bit type $vg/cow "s" check lv_attr_bit type $vg/rd "o" lvs -a -o+lv_role,lv_layout $vg vgremove -f $vg LVM2.2.02.176/test/shell/lvconvert-mirror-updown.sh0000644000000000000120000000177713176752421020626 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Demonstrate problem when upconverting and cutting leg in clvmd SKIP_WITH_LVMLOCKD=1 . lib/inittest aux prepare_pvs 3 get_devs vgcreate -s 64k "$vg" "${DEVICES[@]}" lvcreate -aey -l10 --type mirror -m1 -n $lv1 $vg "$dev1" "$dev2" # Slow down device so we are able to start next conversion in parallel aux delay_dev "$dev3" 0 200 lvconvert -m+1 -b $vg/$lv1 "$dev3" # To fix - wait helps here.... #lvconvert $vg/$lv1 lvs -a $vg # # It fails so use 'should' and -vvvv for now # should lvconvert -vvvv -m-1 $vg/$lv1 "$dev2" vgremove -f $vg LVM2.2.02.176/test/shell/lvchange-thin.sh0000644000000000000120000001220713176752421016477 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2013-2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 export LVM_TEST_THIN_REPAIR_CMD=${LVM_TEST_THIN_REPAIR_CMD-/bin/false} . lib/inittest aux have_thin 1 0 0 || skip aux prepare_pvs 3 vgcreate -s 128k $vg "$dev1" "$dev2" vgcreate -s 128k $vg2 "$dev3" lvcreate -L10M -T $vg/pool # When PV does not support discard # tests for checking thin-pool discard passdown are skipped pvmajor=$(get pv_field "$dev1" major) pvminor=$(get pv_field "$dev1" minor) test "$(< "/sys/dev/block/$pvmajor\:$pvminor/queue/discard_granularity")" -ne 0 || \ no_discard=1 # # Check change operations on a thin-pool without any thin LV # # discards_ARG (default is passdown) test -n "$no_discard" || check grep_dmsetup status $vg-pool " discard_passdown" || { # trace device layout grep -r "" /sys/block/* die "Device was expected to support passdown" } lvchange --discards nopassdown $vg/pool check grep_dmsetup table $vg-pool " no_discard_passdown" test -n "$no_discard" || check grep_dmsetup status $vg-pool " no_discard_passdown" lvchange --discards passdown $vg/pool check grep_dmsetup table $vg-pool -v "passdown" test -n "$no_discard" || check grep_dmsetup status $vg-pool " discard_passdown" # zero_ARG (default is 'yes') check grep_dmsetup table $vg-pool -v "zeroing" lvchange --zero n $vg/pool check grep_dmsetup table $vg-pool " skip_block_zeroing" lvchange --zero y $vg/pool check grep_dmsetup table $vg-pool -v "zeroing" # errorwhenfull_ARG (default is 'no') check grep_dmsetup status $vg-pool "queue_if_no_space" lvchange --errorwhenfull y $vg/pool check grep_dmsetup status $vg-pool "error_if_no_space" check grep_dmsetup table $vg-pool "error_if_no_space" lvchange --errorwhenfull n $vg/pool check grep_dmsetup status $vg-pool "queue_if_no_space" check grep_dmsetup table $vg-pool -v "error_if_no_space" # Attach thin volume lvcreate -V10M -n $lv1 $vg/pool lvcreate -L10M -n $lv2 $vg lvchange -an $vg/$lv1 # Test activation lvchange -aly $vg/$lv1 check active $vg $lv1 lvchange -aln $vg/$lv1 check inactive $vg $lv1 # Test for allowable changes # # contiguous_ARG lvchange -C y $vg/$lv1 lvchange -C n $vg/$lv1 # permission_ARG lvchange -p r $vg/$lv1 lvchange -p rw $vg/$lv1 # FIXME #should lvchange -p r $vg/pool #should lvchange -p rw $vg/pool # readahead_ARG lvchange -r none $vg/$lv1 lvchange -r auto $vg/$lv1 # FIXME # Think about more support # minor_ARG lvchange --yes -M y --minor 234 --major 253 $vg/$lv1 lvchange -M n $vg/$lv1 # cannot change major minor for pools not lvchange --yes -M y --minor 235 --major 253 $vg/pool not lvchange -M n $vg/pool # addtag_ARG lvchange --addtag foo $vg/$lv1 lvchange --addtag foo $vg/pool # deltag_ARG lvchange --deltag foo $vg/$lv1 lvchange --deltag foo $vg/pool # discards_ARG lvchange --discards nopassdown $vg/pool check grep_dmsetup table $vg-pool-tpool " no_discard_passdown" test -n "$no_discard" || check grep_dmsetup status $vg-pool-tpool " no_discard_passdown" lvchange --discards passdown $vg/pool check grep_dmsetup table $vg-pool-tpool -v "passdown" test -n "$no_discard" || check grep_dmsetup status $vg-pool-tpool " discard_passdown" # zero_ARG lvchange --zero n $vg/pool check grep_dmsetup table $vg-pool-tpool " skip_block_zeroing" lvchange --zero y $vg/pool check grep_dmsetup table $vg-pool-tpool -v "zeroing" lvchange --errorwhenfull y $vg/pool check grep_dmsetup status $vg-pool-tpool "error_if_no_space" check grep_dmsetup table $vg-pool-tpool "error_if_no_space" lvchange --errorwhenfull n $vg/pool check grep_dmsetup status $vg-pool-tpool "queue_if_no_space" check grep_dmsetup table $vg-pool-tpool -v "error_if_no_space" # # Test for disallowed metadata changes # # resync_ARG not lvchange --resync $vg/$lv1 # alloc_ARG #not lvchange --alloc anywhere $vg/$lv1 # discards_ARG not lvchange --discards ignore $vg/$lv1 # zero_ARG not lvchange --zero y $vg/$lv1 # # Ensure that allowed args don't cause disallowed args to get through # not lvchange --resync -ay $vg/$lv1 not lvchange --resync --addtag foo $vg/$lv1 # # Play with tags and activation # TAG=$(uname -n) aux lvmconf "activation/volume_list = [ \"$vg/$lv2\", \"@mytag\" ]" lvchange -ay $vg/$lv1 check inactive $vg $lv1 lvchange --addtag mytag $vg/$lv1 lvchange -ay @mytag_fake check inactive $vg $lv1 lvchange -ay $vg/$lv1 # Volume has matching tag check active $vg $lv1 lvchange -an $vg/$lv1 lvchange -ay @mytag check active $vg $lv1 # Fails here since it cannot clear device header not lvcreate -Zy -L10 -n $lv3 $vg2 # OK when zeroing is disabled lvcreate -Zn -L10 -n $lv3 $vg2 check inactive $vg2 $lv3 aux lvmconf "activation/volume_list = [ \"$vg2\" ]" vgchange -an $vg vgchange -ay $vg $vg2 lvs -a -o+lv_active $vg $vg2 aux lvmconf "activation/volume_list = [ \"$vg\", \"$vg2\" ]" vgremove -ff $vg $vg2 LVM2.2.02.176/test/shell/lvcreate-cache-snapshot.sh0000644000000000000120000000246213176752421020455 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Exercise creation of snapshot of cached LV SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest which mkfs.ext2 || skip which fsck || skip aux have_cache 1 5 0 || skip aux prepare_vg 2 lvcreate --type cache-pool -L1 $vg/cpool lvcreate -H -L4 -n $lv1 $vg/cpool lvcreate -s -L2 -n $lv2 $vg/$lv1 check lv_field $vg/$lv1 segtype cache # Make some 'fs' data in snapshot mkfs.ext2 "$DM_DEV_DIR/$vg/$lv2" mkdir mnt mount "$DM_DEV_DIR/$vg/$lv2" mnt touch mnt/test umount mnt sync aux udev_wait # Merge snap to origin lvconvert --merge $vg/$lv2 # Check cached origin has no valid fs. fsck -n "$DM_DEV_DIR/$vg/$lv1" # Check deactivation lvchange -an $vg # Check activation lvchange -ay $vg lvconvert --uncache $vg/$lv1 check lv_field $vg/$lv1 segtype linear # Uncached origin is fine as well fsck -n "$DM_DEV_DIR/$vg/$lv1" vgremove -ff $vg LVM2.2.02.176/test/shell/process-each-lv.sh0000644000000000000120000003655313176752421016755 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008-2013 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA test_description='Exercise toollib process_each_lv' SKIP_WITH_LVMPOLLD=1 # disable lvmetad logging as it bogs down test systems export LVM_TEST_LVMETAD_DEBUG_OPTS=${LVM_TEST_LVMETAD_DEBUG_OPTS-} . lib/inittest aux prepare_devs 10 # # process_each_lv is used by a number of lv commands: # lvconvert lv (none is error) # lvchange vg|lv (none is error) # lvremove vg|lv (none is error) # lvdisplay [vg|lv] (none is all) # vgmknodes [vg|lv] (none is all) # lvs [vg|lv] (none is all) # lvscan (none is all) # # (lv can also be a tag matching an lv tag, and # vg can also be a tag matching a vg tag.) # # The logic in process_each_vl is mainly related to # selecting which vgs/lvs to process. # # # test lvremove vg|lv names # prepare_vgs_() { # set up vgs/lvs that we will remove vgcreate $SHARED $vg1 "$dev1" "$dev2" vgcreate $SHARED $vg2 "$dev3" "$dev4" vgcreate $SHARED $vg3 "$dev5" "$dev6" vgcreate $SHARED $vg4 "$dev7" "$dev8" vgcreate $SHARED $vg5 "$dev9" "$dev10" lvcreate -Zn -an -l 2 -n $lv1 $vg1 lvcreate -Zn -an -l 2 -n $lv1 $vg2 lvcreate -Zn -an -l 2 -n $lv2 $vg2 lvcreate -Zn -an -l 2 -n $lv1 $vg3 lvcreate -Zn -an -l 2 -n $lv2 $vg3 lvcreate -Zn -an -l 2 -n $lv3 $vg3 lvcreate -Zn -an -l 2 -n $lv1 $vg5 lvcreate -Zn -an -l 2 -n $lv2 $vg5 lvcreate -Zn -an -l 2 -n $lv3 $vg5 lvcreate -Zn -an -l 2 -n $lv4 $vg5 lvcreate -Zn -an -l 2 -n $lv5 $vg5 } # # # prepare_vgs_ not lvremove not lvremove garbage not lvremove $vg1/garbage lvremove $vg1 check lv_exists $vg1 check lv_not_exists $vg1 $lv1 vgremove $vg1 lvremove $vg2 check lv_exists $vg2 check lv_not_exists $vg2 $lv1 $lv2 vgremove $vg2 lvremove $vg3/$lv1 lvremove $vg3/$lv2 $vg3/$lv3 check lv_exists $vg3 check lv_not_exists $vg3 $lv1 $lv2 $lv3 vgremove $vg3 lvremove $vg4 check lv_exists $vg4 vgremove $vg4 lvremove $vg5/$lv1 $vg5 $vg5/$lv3 check lv_not_exists $vg5 $lv1 $lv2 $lv3 $lv4 $lv5 vgremove $vg5 # # test lvremove vg|lv names from multiple vgs # prepare_vgs_ lvremove $vg2 $vg3/$lv3 $vg5/$lv1 check lv_not_exists $vg2 $lv1 $lv2 check lv_not_exists $vg3 $lv3 check lv_not_exists $vg5 $lv1 lvremove $vg2 $vg1 check lv_not_exists $vg1 $lv1 lvremove $vg3/$lv1 $vg3 $vg4 $vg5/$lv2 check lv_not_exists $vg3 $lv1 $lv2 check lv_not_exists $vg5 $lv2 lvremove $vg5 $vg1 $vg5/$lv3 check lv_not_exists $vg5 $lv3 $lv4 $lv5 vgremove $vg1 $vg2 $vg3 $vg4 $vg5 # # test lvremove @lvtags # prepare_vgs_ lvchange --addtag V1L1 $vg1/$lv1 lvchange --addtag V2L1 $vg2/$lv1 lvchange --addtag V2L2 $vg2/$lv2 lvchange --addtag V23 $vg2/$lv1 lvchange --addtag V23 $vg2/$lv2 lvchange --addtag V23 $vg3/$lv1 lvchange --addtag V23 $vg3/$lv2 lvchange --addtag V23 $vg3/$lv3 lvchange --addtag V3L2 $vg3/$lv2 lvchange --addtag V3L3A $vg3/$lv3 lvchange --addtag V3L3B $vg3/$lv3 lvchange --addtag V5L1 $vg5/$lv1 lvchange --addtag V5L234 $vg5/$lv2 lvchange --addtag V5L234 $vg5/$lv3 lvchange --addtag V5L234 $vg5/$lv4 lvchange --addtag V5L5 $vg5/$lv5 vgchange -an $vg1 $vg2 $vg3 $vg4 $vg5 # verify all exist check lv_exists $vg1 $lv1 check lv_exists $vg2 $lv1 $lv2 check lv_exists $vg3 $lv1 $lv2 $lv3 check lv_exists $vg5 $lv1 $lv2 $lv3 $lv4 $lv5 lvremove @garbage lvremove @V3L3A check lv_not_exists $vg3 $lv3 # verify unremoved still exist check lv_exists $vg1 $lv1 check lv_exists $vg2 $lv1 $lv2 check lv_exists $vg3 $lv1 $lv2 check lv_exists $vg5 $lv1 $lv2 $lv3 $lv4 $lv5 lvremove @V5L234 check lv_not_exists $vg5 $lv2 $lv3 $lv4 # verify unremoved still exist check lv_exists $vg1 $lv1 check lv_exists $vg2 $lv1 $lv2 check lv_exists $vg3 $lv1 $lv2 check lv_exists $vg5 $lv1 $lv5 lvremove @V5L1 @V5L5 check lv_not_exists $vg5 $lv1 $lv5 # verify unremoved still exist check lv_exists $vg1 $lv1 check lv_exists $vg2 $lv1 $lv2 check lv_exists $vg3 $lv1 $lv2 lvremove @V23 @V1L1 @V3L2 check lv_not_exists $vg1 $lv1 check lv_not_exists $vg2 $lv1 $lv2 check lv_not_exists $vg3 $lv1 $lv2 vgremove $vg1 $vg2 $vg3 $vg4 $vg5 # # test lvremove @vgtags # prepare_vgs_ vgchange --addtag V1 $vg1 vgchange --addtag V23 $vg2 vgchange --addtag V23 $vg3 vgchange --addtag V35 $vg3 vgchange --addtag V4 $vg4 vgchange --addtag V35 $vg5 vgchange --addtag V5 $vg5 vgchange -an $vg1 $vg2 $vg3 $vg4 $vg5 lvremove @V4 # verify unremoved exist check lv_exists $vg1 $lv1 check lv_exists $vg2 $lv1 $lv2 check lv_exists $vg3 $lv1 $lv2 $lv3 check lv_exists $vg5 $lv1 $lv2 $lv3 $lv4 $lv5 lvremove @V5 check lv_not_exists $vg5 $lv1 $lv2 $lv3 $lv4 $lv5 # verify unremoved exist check lv_exists $vg1 $lv1 check lv_exists $vg2 $lv1 $lv2 check lv_exists $vg3 $lv1 $lv2 $lv3 lvremove @V1 @V23 check lv_not_exists $vg1 $lv1 check lv_not_exists $vg2 $lv1 $lv2 check lv_not_exists $vg3 $lv1 $lv2 $lv3 vgremove $vg1 $vg2 $vg3 $vg4 $vg5 # # # prepare_vgs_ vgchange --addtag V1 $vg1 vgchange --addtag V23 $vg2 vgchange --addtag V23 $vg3 vgchange --addtag V35 $vg3 vgchange --addtag V4 $vg4 vgchange --addtag V35 $vg5 vgchange --addtag V5 $vg5 lvremove @V35 @V5 check lv_not_exists $vg3 $lv1 $lv2 /$lv3 check lv_not_exists $vg5 $lv1 $lv2 $lv3 $lv4 $lv5 # verify unremoved exist check lv_exists $vg1 $lv1 check lv_exists $vg2 $lv1 $lv2 lvremove @V1 @V23 check lv_not_exists $vg1 $lv1 check lv_not_exists $vg2 $lv1 $lv2 vgremove $vg1 $vg2 $vg3 $vg4 $vg5 # # test lvremove vg|lv names and @lvtags # prepare_vgs_ lvchange --addtag V1L1 $vg1/$lv1 lvchange --addtag V2L1 $vg2/$lv1 lvchange --addtag V2L2 $vg2/$lv2 lvchange --addtag V23 $vg2/$lv1 lvchange --addtag V23 $vg2/$lv2 lvchange --addtag V23 $vg3/$lv1 lvchange --addtag V23 $vg3/$lv2 lvchange --addtag V23 $vg3/$lv3 lvchange --addtag V3L2 $vg3/$lv2 lvchange --addtag V3L3A $vg3/$lv3 lvchange --addtag V3L3B $vg3/$lv3 lvchange --addtag V5L1 $vg5/$lv1 lvchange --addtag V5L234 $vg5/$lv2 lvchange --addtag V5L234 $vg5/$lv3 lvchange --addtag V5L234 $vg5/$lv4 lvchange --addtag V5L5 $vg5/$lv5 vgchange -an $vg1 $vg2 $vg3 $vg4 $vg5 lvremove $vg1/$lv1 @V3L2 @V5L234 check lv_not_exists $vg1 $lv1 check lv_not_exists $vg3 $lv2 check lv_not_exists $vg5 $lv2 $lv3 $lv4 # verify unremoved exist check lv_exists $vg2 $lv1 $lv2 check lv_exists $vg3 $lv1 $lv3 check lv_exists $vg5 $lv1 $lv5 lvremove $vg2/$lv1 @V23 $vg5/$lv1 @V5L5 vgremove $vg1 $vg2 $vg3 $vg4 $vg5 # # test lvremove vg|lv names and @vgtags # prepare_vgs_ vgchange --addtag V1 $vg1 vgchange --addtag V23 $vg2 vgchange --addtag V23 $vg3 vgchange --addtag V35 $vg3 vgchange --addtag V4 $vg4 vgchange --addtag V35 $vg5 vgchange --addtag V5 $vg5 lvremove $vg1/$lv1 @V35 check lv_not_exists $vg1 $lv1 check lv_not_exists $vg3 $lv1 $lv2 $lv3 check lv_not_exists $vg5 $lv1 $lv2 $lv3 $lv4 $lv5 # verify unremoved exist check lv_exists $vg2 $lv1 $lv2 lvremove $vg2/$lv1 @V23 $vg2/$lv2 vgremove $vg1 $vg2 $vg3 $vg4 $vg5 # # test lvremove @lvtags and @vgtags # prepare_vgs_ lvchange --addtag V1L1 $vg1/$lv1 lvchange --addtag V2L1 $vg2/$lv1 lvchange --addtag V2L2 $vg2/$lv2 lvchange --addtag V23 $vg2/$lv1 lvchange --addtag V23 $vg2/$lv2 lvchange --addtag V23 $vg3/$lv1 lvchange --addtag V23 $vg3/$lv2 # to check that vg tag @V23 includes this # lvchange --addtag V23 $vg3/$lv3 lvchange --addtag V3L2 $vg3/$lv2 lvchange --addtag V3L3A $vg3/$lv3 lvchange --addtag V3L3B $vg3/$lv3 lvchange --addtag V5L1 $vg5/$lv1 lvchange --addtag V5L234 $vg5/$lv2 lvchange --addtag V5L234 $vg5/$lv3 lvchange --addtag V5L234 $vg5/$lv4 lvchange --addtag V5L5 $vg5/$lv5 vgchange --addtag V1 $vg1 vgchange --addtag V23 $vg2 vgchange --addtag V23 $vg3 vgchange --addtag V35 $vg3 vgchange --addtag V4 $vg4 vgchange --addtag V35 $vg5 vgchange --addtag V5 $vg5 lvremove @V23 @V35 check lv_not_exists $vg2 $lv1 $lv2 check lv_not_exists $vg3 $lv1 $lv2 $lv3 check lv_not_exists $vg5 $lv1 $lv2 $lv3 $lv4 $lv5 # verify unremoved exist check lv_exists $vg1 $lv1 lvremove @V1 @V1L1 check lv_not_exists $vg1 $lv1 vgremove $vg1 $vg2 $vg3 $vg4 $vg5 # # test lvremove vg|lv names and @lvtags and @vgtags # prepare_vgs_ lvchange --addtag V1L1 $vg1/$lv1 lvchange --addtag V2L1 $vg2/$lv1 lvchange --addtag V2L2 $vg2/$lv2 lvchange --addtag V23 $vg2/$lv1 lvchange --addtag V23 $vg2/$lv2 lvchange --addtag V23 $vg3/$lv1 lvchange --addtag V23 $vg3/$lv2 # to check that vg tag @V23 includes this # lvchange --addtag V23 $vg3/$lv3 lvchange --addtag V3L2 $vg3/$lv2 lvchange --addtag V3L3A $vg3/$lv3 lvchange --addtag V3L3B $vg3/$lv3 lvchange --addtag V5L1 $vg5/$lv1 lvchange --addtag V5L234 $vg5/$lv2 lvchange --addtag V5L234 $vg5/$lv3 lvchange --addtag V5L234 $vg5/$lv4 lvchange --addtag V5L5 $vg5/$lv5 vgchange --addtag V1 $vg1 vgchange --addtag V23 $vg2 vgchange --addtag V23 $vg3 vgchange --addtag V35 $vg3 vgchange --addtag V4 $vg4 vgchange --addtag V35 $vg5 vgchange --addtag V5 $vg5 lvremove $vg1/$lv1 @V23 @V5L5 check lv_not_exists $vg1 $lv1 check lv_not_exists $vg2 $lv1 $lv2 check lv_not_exists $vg3 $lv1 $lv2 $lv3 check lv_not_exists $vg5 $lv5 # verify unremoved exist check lv_exists $vg5 $lv1 $lv2 $lv3 $lv4 lvremove $vg5/$lv2 @V5L234 @V5 check lv_not_exists $vg5 $lv1 $lv2 $lv3 $lv4 vgremove $vg1 $vg2 $vg3 $vg4 $vg5 # # test lvs: empty, vg(s), lv(s), vgtag(s), lvtag(s), garbage, combinations # prepare_vgs_ lvchange --addtag V1L1 $vg1/$lv1 lvchange --addtag V2L1 $vg2/$lv1 lvchange --addtag V2L2 $vg2/$lv2 lvchange --addtag V23 $vg2/$lv1 lvchange --addtag V23 $vg2/$lv2 lvchange --addtag V23 $vg3/$lv1 lvchange --addtag V23 $vg3/$lv2 lvchange --addtag V23 $vg3/$lv3 lvchange --addtag V3L2 $vg3/$lv2 lvchange --addtag V3L3A $vg3/$lv3 lvchange --addtag V3L3B $vg3/$lv3 lvchange --addtag V5L1 $vg5/$lv1 lvchange --addtag V5L234 $vg5/$lv2 lvchange --addtag V5L234 $vg5/$lv3 lvchange --addtag V5L234 $vg5/$lv4 lvchange --addtag V5L5 $vg5/$lv5 vgchange --addtag V1 $vg1 vgchange --addtag V23 $vg2 vgchange --addtag V23 $vg3 vgchange --addtag V35 $vg3 vgchange --addtag V4 $vg4 vgchange --addtag V35 $vg5 vgchange --addtag V5 $vg5 # empty lvs -o vg_name,lv_name --separator '-' >err grep $vg1-$lv1 err grep $vg2-$lv1 err grep $vg2-$lv2 err grep $vg3-$lv1 err grep $vg3-$lv2 err grep $vg3-$lv3 err grep $vg5-$lv1 err grep $vg5-$lv2 err grep $vg5-$lv3 err grep $vg5-$lv4 err grep $vg5-$lv5 err # vg lvs -o vg_name,lv_name --separator '-' $vg1 >err grep $vg1-$lv1 err not grep $vg2-$lv1 err not grep $vg2-$lv2 err not grep $vg3-$lv1 err not grep $vg3-$lv2 err not grep $vg3-$lv3 err not grep $vg5-$lv1 err not grep $vg5-$lv2 err not grep $vg5-$lv3 err not grep $vg5-$lv4 err not grep $vg5-$lv5 err # vgs lvs -o vg_name,lv_name --separator '-' $vg1 $vg2 >err grep $vg1-$lv1 err grep $vg2-$lv1 err grep $vg2-$lv2 err not grep $vg3-$lv1 err not grep $vg3-$lv2 err not grep $vg3-$lv3 err not grep $vg5-$lv1 err not grep $vg5-$lv2 err not grep $vg5-$lv3 err not grep $vg5-$lv4 err not grep $vg5-$lv5 err # lv lvs -o vg_name,lv_name --separator '-' $vg1/$lv1 >err grep $vg1-$lv1 err not grep $vg2-$lv1 err not grep $vg2-$lv2 err not grep $vg3-$lv1 err not grep $vg3-$lv2 err not grep $vg3-$lv3 err not grep $vg5-$lv1 err not grep $vg5-$lv2 err not grep $vg5-$lv3 err not grep $vg5-$lv4 err not grep $vg5-$lv5 err # lvs lvs -o vg_name,lv_name --separator '-' $vg1/$lv1 $vg2/$lv1 $vg2/$lv2 >err grep $vg1-$lv1 err grep $vg2-$lv1 err grep $vg2-$lv2 err not grep $vg3-$lv1 err not grep $vg3-$lv2 err not grep $vg3-$lv3 err not grep $vg5-$lv1 err not grep $vg5-$lv2 err not grep $vg5-$lv3 err not grep $vg5-$lv4 err not grep $vg5-$lv5 err # vgtag lvs -o vg_name,lv_name --separator '-' @V1 >err grep $vg1-$lv1 err not grep $vg2-$lv1 err not grep $vg2-$lv2 err not grep $vg3-$lv1 err not grep $vg3-$lv2 err not grep $vg3-$lv3 err not grep $vg5-$lv1 err not grep $vg5-$lv2 err not grep $vg5-$lv3 err not grep $vg5-$lv4 err not grep $vg5-$lv5 err # vgtags lvs -o vg_name,lv_name --separator '-' @V1 @V35 >err grep $vg1-$lv1 err not grep $vg2-$lv1 err not grep $vg2-$lv2 err grep $vg3-$lv1 err grep $vg3-$lv2 err grep $vg3-$lv3 err grep $vg5-$lv1 err grep $vg5-$lv2 err grep $vg5-$lv3 err grep $vg5-$lv4 err grep $vg5-$lv5 err # lvtag lvs -o vg_name,lv_name --separator '-' @V1L1 >err grep $vg1-$lv1 err not grep $vg2-$lv1 err not grep $vg2-$lv2 err not grep $vg3-$lv1 err not grep $vg3-$lv2 err not grep $vg3-$lv3 err not grep $vg5-$lv1 err not grep $vg5-$lv2 err not grep $vg5-$lv3 err not grep $vg5-$lv4 err not grep $vg5-$lv5 err # lvtags lvs -o vg_name,lv_name --separator '-' @V1L1 @V5L234 >err grep $vg1-$lv1 err not grep $vg2-$lv1 err not grep $vg2-$lv2 err not grep $vg3-$lv1 err not grep $vg3-$lv2 err not grep $vg3-$lv3 err not grep $vg5-$lv1 err grep $vg5-$lv2 err grep $vg5-$lv3 err grep $vg5-$lv4 err not grep $vg5-$lv5 err # vg and lv and vgtag and lvtag lvs -o vg_name,lv_name --separator '-' $vg2 $vg5/$lv5 @V1 @V5L234 >err grep $vg1-$lv1 err grep $vg2-$lv1 err grep $vg2-$lv2 err not grep $vg3-$lv1 err not grep $vg3-$lv2 err not grep $vg3-$lv3 err not grep $vg5-$lv1 err grep $vg5-$lv2 err grep $vg5-$lv3 err grep $vg5-$lv4 err grep $vg5-$lv5 err # garbage name gives an error if used without a tag not lvs -o vg_name,lv_name --separator '-' garbage >err not grep $vg1-$lv1 err not grep $vg2-$lv1 err not grep $vg2-$lv2 err not grep $vg3-$lv1 err not grep $vg3-$lv2 err not grep $vg3-$lv3 err not grep $vg5-$lv1 err not grep $vg5-$lv2 err not grep $vg5-$lv3 err not grep $vg5-$lv4 err not grep $vg5-$lv5 err not lvs -o vg_name,lv_name --separator '-' $vg1/$lv1 garbage >err grep $vg1-$lv1 err not grep $vg2-$lv1 err not grep $vg2-$lv2 err not grep $vg3-$lv1 err not grep $vg3-$lv2 err not grep $vg3-$lv3 err not grep $vg5-$lv1 err not grep $vg5-$lv2 err not grep $vg5-$lv3 err not grep $vg5-$lv4 err not grep $vg5-$lv5 err # garbage name does not give an error if used with a tag lvs -o vg_name,lv_name --separator '-' @V1 garbage >err grep $vg1-$lv1 err not grep $vg2-$lv1 err not grep $vg2-$lv2 err not grep $vg3-$lv1 err not grep $vg3-$lv2 err not grep $vg3-$lv3 err not grep $vg5-$lv1 err not grep $vg5-$lv2 err not grep $vg5-$lv3 err not grep $vg5-$lv4 err not grep $vg5-$lv5 err lvs -o vg_name,lv_name --separator '-' @garbage garbage >err not grep $vg1-$lv1 err not grep $vg2-$lv1 err not grep $vg2-$lv2 err not grep $vg3-$lv1 err not grep $vg3-$lv2 err not grep $vg3-$lv3 err not grep $vg5-$lv1 err not grep $vg5-$lv2 err not grep $vg5-$lv3 err not grep $vg5-$lv4 err not grep $vg5-$lv5 err # garbage tag never gives an error lvs -o vg_name,lv_name --separator '-' @V1 @garbage >err grep $vg1-$lv1 err not grep $vg2-$lv1 err not grep $vg2-$lv2 err not grep $vg3-$lv1 err not grep $vg3-$lv2 err not grep $vg3-$lv3 err not grep $vg5-$lv1 err not grep $vg5-$lv2 err not grep $vg5-$lv3 err not grep $vg5-$lv4 err not grep $vg5-$lv5 err lvs -o vg_name,lv_name --separator '-' $vg1/$lv1 @garbage >err grep $vg1-$lv1 err not grep $vg2-$lv1 err not grep $vg2-$lv2 err not grep $vg3-$lv1 err not grep $vg3-$lv2 err not grep $vg3-$lv3 err not grep $vg5-$lv1 err not grep $vg5-$lv2 err not grep $vg5-$lv3 err not grep $vg5-$lv4 err not grep $vg5-$lv5 err lvs -o vg_name,lv_name --separator '-' @garbage >err not grep $vg1-$lv1 err not grep $vg2-$lv1 err not grep $vg2-$lv2 err not grep $vg3-$lv1 err not grep $vg3-$lv2 err not grep $vg3-$lv3 err not grep $vg5-$lv1 err not grep $vg5-$lv2 err not grep $vg5-$lv3 err not grep $vg5-$lv4 err not grep $vg5-$lv5 err vgremove -f $vg1 $vg2 $vg3 $vg4 $vg5 LVM2.2.02.176/test/shell/lvconvert-mirror-basic.sh0000644000000000000120000001014313176752421020356 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2010-2012 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 # disable lvmetad logging as it bogs down test systems export LVM_TEST_LVMETAD_DEBUG_OPTS=${LVM_TEST_LVMETAD_DEBUG_OPTS-} . lib/inittest log_name_to_count() { case "$1" in mirrored) echo 2 ;; disk) echo 1 ;; *) echo 0 ;; esac } # FIXME: For test_[up|down]convert, I'd still like to be able # to specifiy devices - especially if I can do partial PV # specification for down-converts. It may even be wise to # do one round through these tests without specifying the PVs # to use and one round where we do. # test_lvconvert # start_mirror_count: The '-m' argument to create with # start_log_type: core|disk|mirrored # final_mirror_count: The '-m' argument to convert to # final_log_type: core|disk|mirrored # active: Whether the LV should be active when the convert happens # # Exmaple: Convert 3-way disk-log mirror to # 2-way disk-log mirror while not active # -> test_lvconvert 2 disk 3 disk 0 test_lvconvert() { local start_count=$1 local start_count_p1=$(( start_count + 1 )) local start_log_type=$2 local finish_count=$3 local finish_count_p1=$(( finish_count + 1 )) local finish_log_type=$4 local start_log_count local finish_log_count local max_log_count local alloc="" local active="-aey" local i test "$5" = "active" && active="-an" #test $finish_count -gt $start_count && up=true # Do we have enough devices for the mirror images? test $start_count_p1 -gt ${#DEVICES[@]} && \ die "Action requires too many devices" # Do we have enough devices for the mirror images? test $finish_count_p1 -gt ${#DEVICES[@]} && \ die "Action requires too many devices" start_log_count=$(log_name_to_count $start_log_type) finish_log_count=$(log_name_to_count $finish_log_type) if [ $finish_log_count -gt $start_log_count ]; then max_log_count=$finish_log_count else max_log_count=$start_log_count fi if [ $start_count -gt 0 ]; then # Are there extra devices for the log or do we overlap if [ $(( start_count_p1 + start_log_count )) -gt ${#DEVICES[@]} ]; then alloc="--alloc anywhere" fi lvcreate "$active" -Zn -l2 --type mirror -m $start_count --mirrorlog $start_log_type \ -n $lv1 $vg $alloc check mirror_legs $vg $lv1 $start_count_p1 # FIXME: check mirror log else lvcreate "$active" -Zn -l2 -n $lv1 $vg fi lvs -a -o name,copy_percent,devices $vg # Are there extra devices for the log or do we overlap if [ $(( finish_count_p1 + finish_log_count )) -gt ${#DEVICES[@]} ]; then alloc="--alloc anywhere" fi # --mirrorlog is invalid with -m0 if [ "$finish_count" -eq 0 ]; then mirrorlog="" finish_log_type="" else mirrorlog="--mirrorlog" fi lvconvert --type mirror -m $finish_count $mirrorlog $finish_log_type \ $vg/$lv1 $alloc test "$active" = "-an" || lvchange "$active" $vg/$lv1 check mirror_no_temporaries $vg $lv1 if [ "$finish_count_p1" -eq 1 ]; then check linear $vg $lv1 else if test -n "$alloc"; then check mirror_nonredundant $vg $lv1 else check mirror $vg $lv1 fi check mirror_legs $vg $lv1 $finish_count_p1 fi } aux prepare_vg 5 5 get_devs MIRRORED="mirrored" # FIXME: Cluster is not supporting exlusive activation of mirrored log test -e LOCAL_CLVMD && MIRRORED= test_many() { i=$1 for j in $(seq 0 3); do for k in core disk $MIRRORED; do for l in core disk $MIRRORED; do if test "$i" -eq "$j" && test "$k" = "$l"; then continue; fi : ---------------------------------------------------- : "Testing mirror conversion -m$i/$k -> -m$j/$l" : ---------------------------------------------------- test_lvconvert $i $k $j $l 0 lvremove -ff $vg test_lvconvert $i $k $j $l 1 lvremove -ff $vg done done done } LVM2.2.02.176/test/shell/snapshot-merge-stack.sh0000644000000000000120000000454213176752421020012 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Exercise snapshot merge also when stacked SKIP_WITH_LVMLOCKD=1 export LVM_TEST_THIN_REPAIR_CMD=${LVM_TEST_THIN_REPAIR_CMD-/bin/false} . lib/inittest which mkfs.ext3 || skip aux target_at_least dm-snapshot-merge 1 0 0 || skip aux prepare_vg 2 100 snap_and_merge() { lvcreate -s -n $lv2 -L20 $vg/$lv1 "$dev2" #dd if=/dev/zero of="$DM_DEV_DIR/$vg/$lv1" bs=1M count=10 conv=fdatasync aux udev_wait mkfs.ext3 "$DM_DEV_DIR/$vg/$lv2" sync lvs -a $vg SLEEP_PID=$(aux hold_device_open $vg $lv1 20) # initiate background merge lvconvert -b --merge $vg/$lv2 lvs -a -o+lv_merging,lv_merge_failed $vg get lv_field $vg/$lv1 lv_attr | grep "Owi-ao" get lv_field $vg/$lv2 lv_attr | grep "Swi-a-s---" kill $SLEEP_PID aux delay_dev "$dev1" 0 200 "$(get first_extent_sector "$dev1"):" lvchange --poll n --refresh $vg/$lv1 dmsetup table lvs -av -o+lv_merging,lv_merge_failed $vg # Origin is closed and snapshot merge could run get lv_field $vg/$lv1 lv_attr | grep "Owi-a-" sleep 1 check lv_attr_bit state $vg/$lv2 "a" aux error_dev "$dev2" "$(get first_extent_sector "$dev2"):" aux enable_dev "$dev1" # delay to let snapshot merge 'discover' failing COW device sleep 1 sync dmsetup status lvs -a -o+lv_merging,lv_merge_failed $vg check lv_attr_bit state $vg/$lv1 "m" check lv_attr_bit state $vg/$lv2 "m" # device OK and running in full speed aux enable_dev "$dev2" # reactivate so merge can finish lvchange -an $vg lvchange -ay $vg sleep 1 lvs -a -o+lv_merging,lv_merge_failed $vg check lv_not_exists $vg $lv2 fsck -n "$DM_DEV_DIR/$vg/$lv1" lvremove -f $vg } # First check merge on plain linear LV lvcreate -aey -L50 -n $lv1 $vg "$dev1" snap_and_merge # When available check merge of old snapshot with Thin LV being origin if aux have_thin 1 0 0 ; then lvcreate -T -L10 -V50 -n $lv1 $vg/pool "$dev1" snap_and_merge fi # TODO snapshot merge with Mirror, Raid, Cache... vgremove -f $vg LVM2.2.02.176/test/shell/pvmove-cache-segtypes.sh0000644000000000000120000001513313176752421020167 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA test_description="ensure pvmove works with the cache segment types" SKIP_WITH_LVMLOCKD=1 SKIP_WITH_CLVMD=1 . lib/inittest # pvmove fails when a RAID LV is the origin of a cache LV # pvmoving cache types is currently disabled in tools/pvmove.c # So, for now we set everything up and make sure pvmove /isn't/ allowed. # This allows us to ensure that it is disallowed even when there are # stacking complications to consider. which md5sum || skip aux have_cache 1 3 0 || skip # for stacking aux have_thin 1 8 0 || skip aux have_raid 1 4 2 || skip aux prepare_vg 5 80 for mode in "--atomic" "" do # Each of the following tests does: # 1) Create two LVs - one linear and one other segment type # The two LVs will share a PV. # 2) Move both LVs together # 3) Move only the second LV by name # Testing pvmove of cache-pool LV (can't check contents though) ############################################################### lvcreate -l 2 -n ${lv1}_foo $vg "$dev1" lvcreate --type cache-pool -n ${lv1}_pool -l 4 $vg "$dev1" check lv_tree_on $vg ${lv1}_foo "$dev1" check lv_tree_on $vg ${lv1}_pool "$dev1" pvmove $mode "$dev1" "$dev5" 2>&1 | tee out grep "Skipping cache-pool LV, ${lv1}_pool" out grep "Skipping cache-related LV, ${lv1}_pool_cmeta" out grep "Skipping cache-related LV, ${lv1}_pool_cdata" out check lv_tree_on $vg ${lv1}_foo "$dev5" not check lv_tree_on $vg ${lv1}_pool "$dev5" #pvmove $mode -n ${lv1}_pool "$dev5" "$dev4" #check lv_tree_on $vg ${lv1}_pool "$dev4" #check lv_tree_on $vg ${lv1}_foo "$dev5" lvremove -ff $vg dmsetup info -c | not grep $vg # Testing pvmove of origin LV ############################# lvcreate -l 2 -n ${lv1}_foo $vg "$dev1" lvcreate --type cache-pool -n ${lv1}_pool -l 4 $vg "$dev5" lvcreate --type cache -n $lv1 -l 8 $vg/${lv1}_pool "$dev1" check lv_tree_on $vg ${lv1}_foo "$dev1" check lv_tree_on $vg ${lv1}_pool "$dev5" check lv_tree_on $vg ${lv1} "$dev1" aux mkdev_md5sum $vg $lv1 pvmove $mode "$dev1" "$dev3" 2>&1 | tee out grep "Skipping cache LV, ${lv1}" out check lv_tree_on $vg ${lv1}_foo "$dev3" #check lv_tree_on $vg ${lv1}_pool "$dev5" lvs -a -o name,attr,devices $vg not check lv_tree_on $vg ${lv1} "$dev3" #check dev_md5sum $vg $lv1 #pvmove $mode -n $lv1 "$dev3" "$dev1" #check lv_tree_on $vg ${lv1}_foo "$dev3" #check lv_tree_on $vg ${lv1}_pool "$dev5" #check lv_tree_on $vg ${lv1} "$dev1" #check dev_md5sum $vg $lv1 lvremove -ff $vg dmsetup info -c | not grep $vg # Testing pvmove of a RAID origin LV #################################### lvcreate -l 2 -n ${lv1}_foo $vg "$dev1" lvcreate --type raid1 -m 1 -l 8 -n $lv1 $vg "$dev1" "$dev2" lvcreate --type cache -l 4 -n ${lv1}_pool $vg/$lv1 "$dev5" check lv_tree_on $vg ${lv1}_foo "$dev1" check lv_tree_on $vg ${lv1} "$dev1" "$dev2" check lv_tree_on $vg ${lv1}_pool "$dev5" aux mkdev_md5sum $vg $lv1 pvmove $mode "$dev1" "$dev3" 2>&1 | tee out grep "Skipping cache LV, ${lv1}" out check lv_tree_on $vg ${lv1}_foo "$dev3" not check lv_tree_on $vg ${lv1} "$dev2" "$dev3" #check lv_tree_on $vg ${lv1}_pool "$dev5" #check dev_md5sum $vg $lv1 -- THIS IS WHERE THINGS FAIL IF PVMOVE NOT DISALLOWED #pvmove $mode -n $lv1 "$dev3" "$dev1" #check lv_tree_on $vg ${lv1}_foo "$dev3" #check lv_tree_on $vg ${lv1} "$dev1" "$dev2" #check lv_tree_on $vg ${lv1}_pool "$dev5" #check dev_md5sum $vg $lv1 lvremove -ff $vg dmsetup info -c | not grep $vg # Testing pvmove of a RAID cachepool (metadata and data) ######################################################## lvcreate -l 2 -n ${lv1}_foo $vg "$dev1" lvcreate --type raid1 -L 6M -n meta $vg "$dev1" "$dev2" lvcreate --type raid1 -L 4M -n ${lv1}_pool $vg "$dev1" "$dev2" lvconvert --yes --type cache-pool $vg/${lv1}_pool --poolmetadata $vg/meta lvcreate --type cache -n $lv1 -L 8M $vg/${lv1}_pool "$dev5" check lv_tree_on $vg ${lv1}_foo "$dev1" check lv_tree_on $vg ${lv1}_pool "$dev1" "$dev2" check lv_tree_on $vg ${lv1} "$dev5" aux mkdev_md5sum $vg $lv1 # This will move ${lv1}_foo and the cache-pool data & meta # LVs, both of which contain a RAID1 _rimage & _rmeta LV - 5 total LVs pvmove $mode "$dev1" "$dev3" 2>&1 | tee out grep "Skipping cache-pool LV, ${lv1}_pool" out grep "Skipping cache-related LV, ${lv1}_pool_cmeta" out grep "Skipping cache-related LV, ${lv1}_pool_cdata" out check lv_tree_on $vg ${lv1}_foo "$dev3" not check lv_tree_on $vg ${lv1}_pool "$dev2" "$dev3" #check lv_tree_on $vg ${lv1} "$dev5" #check dev_md5sum $vg $lv1 #pvmove $mode -n ${lv1}_pool "$dev3" "$dev1" #check lv_tree_on $vg ${lv1}_foo "$dev3" #check lv_tree_on $vg ${lv1}_pool "$dev1" "$dev2" #check lv_tree_on $vg ${lv1} "$dev5" #check dev_md5sum $vg $lv1 lvremove -ff $vg dmsetup info -c | not grep $vg # Testing pvmove of Thin-pool on cache LV on RAID ################################################# lvcreate -l 2 -n ${lv1}_foo $vg "$dev1" # RAID for cachepool lvcreate --type raid1 -m 1 -L 6M -n meta $vg "$dev1" "$dev2" lvcreate --type raid1 -m 1 -L 4M -n cachepool $vg "$dev1" "$dev2" lvconvert --yes --type cache-pool $vg/cachepool --poolmetadata $vg/meta # RAID for thin pool data LV lvcreate --type raid1 -m 1 -L 8 -n thinpool $vg "$dev3" "$dev4" # Convert thin pool data to a cached LV lvconvert --type cache -Zy $vg/thinpool --cachepool $vg/cachepool # Create simple thin pool meta lvcreate -L 2M -n meta $vg "$dev1" # Use thin pool data LV to build a thin pool lvconvert --yes --thinpool $vg/thinpool --poolmetadata $vg/meta # Create a thin lv for fun lvcreate -T $vg/thinpool -V 20 -n thin_lv check lv_tree_on $vg ${lv1}_foo "$dev1" check lv_tree_on $vg cachepool "$dev1" "$dev2" check lv_tree_on $vg thinpool "$dev1" "$dev3" "$dev4" aux mkdev_md5sum $vg thin_lv lvs -a -o name,attr,devices $vg # Should move ${lv1}_foo and thinpool_tmeta from dev1 to dev5 pvmove $mode "$dev1" "$dev5" 2>&1 | tee out lvs -a -o name,attr,devices $vg check lv_tree_on $vg ${lv1}_foo "$dev5" not check lv_tree_on $vg cachepool "$dev5" "$dev2" check lv_tree_on $vg thinpool "$dev3" "$dev4" "$dev5" # Move non-cache tmeta #check dev_md5sum $vg/thin_lv #pvmove $mode -n $vg/cachepool "$dev5" "$dev1" #check lv_tree_on $vg ${lv1}_foo "$dev5" #check lv_tree_on $vg $vg/cachepool "$dev1" "$dev2" #check lv_tree_on $vg $vg/thinpool "$dev3" "$dev4" #check dev_md5sum $vg/thin_lv lvremove -ff $vg dmsetup info -c | not grep $vg done LVM2.2.02.176/test/shell/lvmetad-pvs.sh0000644000000000000120000000130213176752421016204 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2012 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_pvs 1 20000 get_devs pvs "${DEVICES[@]}" | grep "$dev1" # check for PV size overflows pvs "${DEVICES[@]}" | grep 19.53g pvs "${DEVICES[@]}" | not grep 16.00e LVM2.2.02.176/test/shell/vgsplit-thin.sh0000644000000000000120000000242713176752421016403 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2013 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Test vgsplit command options for validity SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 export LVM_TEST_THIN_REPAIR_CMD=${LVM_TEST_THIN_REPAIR_CMD-/bin/false} . lib/inittest aux have_thin 1 0 0 || skip aux prepare_devs 5 get_devs vgcreate "$vg1" "${DEVICES[@]}" lvcreate -T -L8M $vg1/pool1 -V10M -n $lv1 "$dev1" "$dev2" lvcreate -T -L8M $vg1/pool2 -V10M -n $lv2 "$dev3" "$dev4" # Test with external origin if available lvcreate -l1 -an -pr --zero n -n eorigin $vg1 "$dev5" aux have_thin 1 5 0 && lvcreate -an -s $vg1/eorigin -n $lv3 --thinpool $vg1/pool1 # Cannot move active thin not vgsplit $vg1 $vg2 "$dev1" "$dev2" "$dev5" vgchange -an $vg1 not vgsplit $vg1 $vg2 "$dev1" not vgsplit $vg1 $vg2 "$dev2" "$dev3" vgsplit $vg1 $vg2 "$dev1" "$dev2" "$dev5" lvs -a -o+devices $vg1 $vg2 vgmerge $vg1 $vg2 vgremove -ff $vg1 LVM2.2.02.176/test/shell/lvmetad-dump.sh0000644000000000000120000000124313176752421016345 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2012 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITHOUT_LVMETAD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_vg 2 lvcreate -n bar -l 1 $vg aux lvmetad_dump | tee lvmetad.txt grep $vg lvmetad.txt vgremove -ff $vg LVM2.2.02.176/test/shell/lvconvert-cache-smq.sh0000644000000000000120000000156113176752421017632 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Exercise conversion of cache and cache pool SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux have_cache 1 8 0 || skip aux prepare_vg 5 80 lvcreate --type cache-pool -an -v -L 2 -n cpool $vg lvcreate -H --cachepolicy smq -L 4 -n corigin --cachepool $vg/cpool check lv_field $vg/corigin cache_policy "smq" lvconvert --splitcache $vg/corigin lvs -o+cache_policy -a $vg vgremove -f $vg LVM2.2.02.176/test/shell/cache-metadata2.sh0000644000000000000120000000471013176752421016653 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2017 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Exercise usage of metadata2 cache metadata format SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 # Until new version of cache_check tools - no integrity validation LVM_TEST_CACHE_CHECK_CMD="" . lib/inittest META2= aux have_cache 1 10 0 || { META2=not aux have_cache 1 3 0 || skip } aux prepare_vg 5 80 lvcreate -L2 -n $lv1 $vg lvcreate --type cache-pool -L1 $vg/cpool1 # no parameter - no format is stored check lv_field $vg/cpool1 cachemetadataformat "" lvcreate --type cache-pool -L1 --config 'allocation/cache_metadata_format=1' $vg/cpool # format is in configuration - would be applied during actual caching # so not stored in this moment check lv_field $vg/cpool cachemetadataformat "" lvcreate --type cache-pool -L1 --cachemetadataformat 1 $vg/cpool2 # format was specified on cmdline - preserve it metadata check lv_field $vg/cpool2 cachemetadataformat "1" lvconvert --yes -H --cachepool $vg/cpool --config 'allocation/cache_metadata_format=1' $vg/$lv1 check lv_field $vg/cpool2 cachemetadataformat "1" lvs -a -o+cachemetadataformat $vg lvremove -f $vg if [ -z "$META2" ]; then # for these test we need kernel with metadata2 support lvcreate --type cache-pool -L1 $vg/cpool lvcreate -H -L10 -n $lv1 --cachepool $vg/cpool check lv_field $vg/$lv1 cachemetadataformat "2" lvremove -f $vg lvcreate -L10 -n $lv1 $vg lvcreate --type cache-pool -L1 $vg/cpool lvconvert -y -H --cachepool $vg/cpool $vg/$lv1 check lv_field $vg/$lv1 cachemetadataformat "2" lvremove -f $vg lvcreate -L10 -n $lv1 $vg lvcreate --type cache-pool -L1 $vg/cpool lvconvert --cachemetadataformat 1 -y -H --cachepool $vg/cpool $vg/$lv1 check lv_field $vg/$lv1 cachemetadataformat "1" lvremove -f $vg lvcreate -L10 -n $lv1 $vg lvcreate --type cache-pool -L1 $vg/cpool lvconvert --config 'allocation/cache_metadata_format=1' -y -H --cachepool $vg/cpool $vg/$lv1 check lv_field $vg/$lv1 cachemetadataformat "1" lvremove -f $vg fi #lvs -a -o name,cachemetadataformat,kernelmetadataformat,chunksize,cachepolicy,cachemode $vg vgremove -f $vg LVM2.2.02.176/test/shell/pvremove-warnings.sh0000644000000000000120000000145513176752421017444 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_devs 2 pvcreate "$dev1" "$dev2" pvremove "$dev1" "$dev2" 2>&1 | tee pvremove.txt not grep "No physical" pvremove.txt pvcreate "$dev1" "$dev2" vgcreate bla "$dev1" "$dev2" pvremove -ff -y "$dev1" "$dev2" 2>&1 | tee pvremove.txt not grep "device missing" pvremove.txt LVM2.2.02.176/test/shell/pvmove-abort.sh0000644000000000000120000000327613176752421016377 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2015 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Check pvmove --abort behaviour when specific device is requested SKIP_WITH_LVMLOCKD=1 . lib/inittest aux prepare_pvs 3 60 vgcreate -s 128k $vg "$dev1" "$dev2" pvcreate --metadatacopies 0 "$dev3" vgextend $vg "$dev3" # Slowdown read/writes aux delay_dev "$dev3" 0 800 "$(get first_extent_sector "$dev3"):" for mode in "--atomic" "" ; do for backgroundarg in "-b" "" ; do # Create multisegment LV lvcreate -an -Zn -l30 -n $lv1 $vg "$dev1" lvcreate -an -Zn -l30 -n $lv2 $vg "$dev2" cmd1=(pvmove -i1 $backgroundarg $mode "$dev1" "$dev3") cmd2=(pvmove -i1 $backgroundarg $mode "$dev2" "$dev3") if test -e HAVE_DM_DELAY; then if test -z "$backgroundarg" ; then "${cmd1[@]}" & aux wait_pvmove_lv_ready "$vg-pvmove0" "${cmd2[@]}" & aux wait_pvmove_lv_ready "$vg-pvmove1" else LVM_TEST_TAG="kill_me_$PREFIX" "${cmd1[@]}" LVM_TEST_TAG="kill_me_$PREFIX" "${cmd2[@]}" fi # remove specific device pvmove --abort "$dev1" # check if proper pvmove was canceled get lv_field $vg name -a | tee out not grep -E "^\[?pvmove0" out grep -E "^\[?pvmove1" out fi # remove any remaining pvmoves in progress pvmove --abort lvremove -ff $vg wait aux kill_tagged_processes done done # Restore delayed device back aux enable_dev "$dev3" vgremove -ff $vg LVM2.2.02.176/test/shell/snapshot-autoumount-dmeventd.sh0000644000000000000120000000314713176752421021634 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2010-2015 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # no automatic extensions please SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest which mkfs.ext2 || skip aux lvmconf "activation/snapshot_autoextend_percent = 0" \ "activation/snapshot_autoextend_threshold = 100" aux prepare_dmeventd aux prepare_vg 2 mntdir="${PREFIX}mnt" lvcreate -aey -L8 -n base $vg mkfs.ext2 "$DM_DEV_DIR/$vg/base" lvcreate -s -L4 -n snap $vg/base lvchange --monitor y $vg/snap mkdir "$mntdir" # Use remount-ro to avoid logging kernel WARNING mount -o errors=remount-ro "$DM_DEV_DIR/mapper/$vg-snap" "$mntdir" test "$(dmsetup info -c --noheadings -o open $vg-snap)" -eq 1 grep "$mntdir" /proc/mounts # overfill 4M snapshot (with metadata) not dd if=/dev/zero of="$mntdir/file" bs=1M count=4 conv=fdatasync # Should be nearly instant check of dmeventd for invalid snapshot. # Wait here for umount and open_count drops to 0 as it may # take a while to finalize umount operation (it might be already # removed from /proc/mounts, but still opened). for i in {1..100}; do sleep .1 test "$(dmsetup info -c --noheadings -o open $vg-snap)" -eq 0 && break done not grep "$mntdir" /proc/mounts vgremove -f $vg LVM2.2.02.176/test/shell/vgchange-sysinit.sh0000644000000000000120000000262613176752421017236 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2011 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_CLVMD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest which mkfs.ext3 || skip aux prepare_pvs 2 8 var_lock="$DM_DEV_DIR/$vg1/$lv1" # keep in sync with aux configured lockingdir mount_dir="var/lock/lvm" cleanup_mounted_and_teardown() { umount "$mount_dir" || true aux teardown } vgcreate $vg1 "$dev1" vgcreate $vg2 "$dev2" lvcreate -l 1 -n $lv2 $vg2 vgchange -an $vg2 lvcreate -n $lv1 -l 100%FREE $vg1 mkfs.ext3 -b4096 -j "$var_lock" trap 'cleanup_mounted_and_teardown' EXIT mount -n -r "$var_lock" "$mount_dir" # locking must fail on read-only filesystem not vgchange -ay $vg2 # no-locking with --sysinit vgchange --sysinit -ay $vg2 test -b "$DM_DEV_DIR/$vg2/$lv2" vgchange --sysinit -an $vg2 test ! -b "$DM_DEV_DIR/$vg2/$lv2" vgchange --ignorelockingfailure -ay $vg2 # TODO maybe also support --ignorelockingfailure ?? vgremove --config 'global{locking_type=0}' -ff $vg2 umount "$mount_dir" || true vgremove -ff $vg1 LVM2.2.02.176/test/shell/lvmetad-autoshutdown.sh0000644000000000000120000000257213176752421020152 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2015 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITHOUT_LVMETAD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest kill -0 "$(< LOCAL_LVMETAD)" || die "lvmetad is already dead" lvmetad_timeout=3 aux prepare_pvs 1 vgcreate $vg1 "$dev1" kill "$(< LOCAL_LVMETAD)" aux prepare_lvmetad -t $lvmetad_timeout sleep $lvmetad_timeout # lvmetad should die after timeout, but give it some time to do so i=0 while kill -0 "$(< LOCAL_LVMETAD)" 2>/dev/null; do test $i -ge $((lvmetad_timeout*10)) && die "lvmetad didn't shutdown with optional timeout: $lvmetad_timeout seconds" sleep .1 i=$((i+1)) done aux prepare_lvmetad -t 0 sleep 1 # lvmetad must not die with -t 0 option kill -0 "$(< LOCAL_LVMETAD)" || die "lvmetad died" kill "$(< LOCAL_LVMETAD)" aux prepare_lvmetad -t $lvmetad_timeout sleep 1 vgs sleep 1 vgs sleep 1 vgs # check that connection to lvmetad resets the timeout kill -0 "$(< LOCAL_LVMETAD)" || die "lvmetad died too soon" vgremove -ff $vg1 LVM2.2.02.176/test/shell/metadata-dirs.sh0000644000000000000120000000244313176752421016470 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2011 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_devs 3 get_devs pvcreate --metadatacopies 0 "${DEVICES[@]}" not vgcreate "$vg" "${DEVICES[@]}" aux lvmconf "metadata/dirs = [ \"$TESTDIR/mda\" ]" vgcreate $vg "$dev1" check vg_field $vg vg_mda_count 1 vgremove -ff $vg vgcreate "$vg" "${DEVICES[@]}" check vg_field $vg vg_mda_count 1 vgremove -ff $vg pvcreate --metadatacopies 1 --metadataignore y "$dev1" vgcreate "$vg" "${DEVICES[@]}" check vg_field $vg vg_mda_count 2 vgremove -ff $vg pvcreate --metadatacopies 1 --metadataignore n "$dev1" vgcreate "$vg" "${DEVICES[@]}" check vg_field $vg vg_mda_count 2 vgremove -ff $vg pvcreate --metadatacopies 0 "$dev1" aux lvmconf "metadata/dirs = [ \"$TESTDIR/mda\", \"$TESTDIR/mda2\" ]" vgcreate "$vg" "${DEVICES[@]}" check vg_field $vg vg_mda_count 2 vgremove -ff $vg LVM2.2.02.176/test/shell/pvmove-thin-segtypes.sh0000644000000000000120000000436213176752421020070 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2013 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA test_description="ensure pvmove works with thin segment types" SKIP_WITH_LVMLOCKD=1 SKIP_WITH_CLVMD=1 . lib/inittest which md5sum || skip aux have_thin 1 8 0 || skip # for stacking aux have_raid 1 3 5 || skip aux prepare_pvs 5 20 get_devs vgcreate -c n -s 128k "$vg" "${DEVICES[@]}" for mode in "--atomic" "" do # Each of the following tests does: # 1) Create two LVs - one linear and one other segment type # The two LVs will share a PV. # 2) Move both LVs together # 3) Move only the second LV by name # Testing pvmove of thin LV lvcreate -l 2 -n ${lv1}_foo $vg "$dev1" lvcreate -T $vg/${lv1}_pool -l 4 -V 8 -n $lv1 "$dev1" check lv_tree_on $vg ${lv1}_foo "$dev1" check lv_tree_on $vg $lv1 "$dev1" aux mkdev_md5sum $vg $lv1 pvmove "$dev1" "$dev5" $mode check lv_tree_on $vg ${lv1}_foo "$dev5" check lv_tree_on $vg $lv1 "$dev5" check dev_md5sum $vg $lv1 pvmove -n $lv1 "$dev5" "$dev4" $mode check lv_tree_on $vg $lv1 "$dev4" check lv_tree_on $vg ${lv1}_foo "$dev5" check dev_md5sum $vg $lv1 lvremove -ff $vg # Testing pvmove of thin LV on RAID lvcreate -l 2 -n ${lv1}_foo $vg "$dev1" lvcreate --type raid1 -m 1 -l 4 -n ${lv1}_raid1_pool $vg "$dev1" "$dev2" lvcreate --type raid1 -m 1 -L 2 -n ${lv1}_raid1_meta $vg "$dev1" "$dev2" lvconvert --yes --thinpool $vg/${lv1}_raid1_pool \ --poolmetadata ${lv1}_raid1_meta lvcreate -T $vg/${lv1}_raid1_pool -V 8 -n $lv1 check lv_tree_on $vg ${lv1}_foo "$dev1" check lv_tree_on $vg $lv1 "$dev1" "$dev2" aux mkdev_md5sum $vg $lv1 pvmove "$dev1" "$dev5" $mode check lv_tree_on $vg ${lv1}_foo "$dev5" check lv_tree_on $vg $lv1 "$dev2" "$dev5" check dev_md5sum $vg $lv1 pvmove -n $lv1 "$dev5" "$dev4" $mode check lv_tree_on $vg $lv1 "$dev2" "$dev4" check lv_tree_on $vg ${lv1}_foo "$dev5" check dev_md5sum $vg $lv1 lvremove -ff $vg done vgremove -ff $vg LVM2.2.02.176/test/shell/covercmd.sh0000644000000000000120000000372513176752421015557 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008-2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # tests functionality we don't have in other special test files yet # to improve code coverage # SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_pvs 5 get_devs pvcreate --metadatacopies 0 "$dev2" pvcreate --metadatacopies 0 "$dev3" # FIXME takes very long time #pvck "$dev1" vgcreate "$vg" "${DEVICES[@]}" lvcreate -l 5 -i5 -I256 -n $lv $vg lvcreate -aey -l 5 -n $lv1 $vg lvcreate -s -l 5 -n $lv2 $vg/$lv1 pvck "$dev1" # "-persistent y --major 254 --minor 20" # "-persistent n" for i in pr "p rw" "-monitor y" "-monitor n" -refresh; do lvchange -$i $vg/$lv done lvrename $vg $lv $lv-rename invalid lvrename $vg invalid lvrename $vg $vg/$lv-rename $vg1/$lv invalid lvrename $vg/$lv-rename $vg1/$lv $vg invalid lvrename $vg/$lv-rename $vg/012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 invalid lvrename $vg/$lv-rename $vg/"" invalid lvrename $vg/$lv-rename "$vg/!@#$%" invalid lvrename $vg/$lv-rename $vg/$lv-rename fail lvrename $vg1/$lv-rename $vg1/$lv vgremove -f $vg # test pvresize functionality # missing params not pvresize # negative size not pvresize --setphysicalvolumesize -10M -y "$dev1" # not existing device not pvresize --setphysicalvolumesize 10M -y "$dev7" pvresize --setphysicalvolumesize 10M -y "$dev1" pvresize "$dev1" # test various lvm utils lvm dumpconfig lvm devtypes lvm formats lvm segtypes lvm tags # test obsoleted tools not lvm lvmchange not lvm lvmsadc not lvm lvmsar not lvm pvdata LVM2.2.02.176/test/shell/thin-autoumount-dmeventd.sh0000644000000000000120000000616713176752421020744 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2012-2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # no automatic extensions, just umount SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 export LVM_TEST_THIN_REPAIR_CMD=${LVM_TEST_THIN_REPAIR_CMD-/bin/false} . lib/inittest mntdir="${PREFIX}mnt with space" mntusedir="${PREFIX}mntuse" cleanup_mounted_and_teardown() { umount "$mntdir" 2>/dev/null || true umount "$mntusedir" 2>/dev/null || true vgremove -ff $vg aux teardown } is_lv_opened_() { test "$(get lv_field "$1" lv_device_open --binary)" = 1 } # # Main # which mkfs.ext4 || skip export MKE2FS_CONFIG="$TESTDIR/lib/mke2fs.conf" aux have_thin 1 0 0 || skip # Simple implementation of umount when lvextend fails cat <<- EOF >testcmd.sh #!/bin/sh echo "Data: \$DMEVENTD_THIN_POOL_DATA" echo "Metadata: \$DMEVENTD_THIN_POOL_METADATA" "$TESTDIR/lib/lvextend" --use-policies \$1 || { umount "$mntdir" || true umount "$mntusedir" || true return 0 } test "\$($TESTDIR/lib/lvs -o selected -S "data_percent>95||metadata_percent>95" --noheadings \$1)" -eq 0 || { umount "$mntdir" || true umount "$mntusedir" || true return 0 } EOF chmod +x testcmd.sh # Show prepared script cat testcmd.sh # Use autoextend percent 0 - so extension fails and triggers umount... aux lvmconf "activation/thin_pool_autoextend_percent = 0" \ "activation/thin_pool_autoextend_threshold = 70" \ "dmeventd/thin_command = \"/$PWD/testcmd.sh\"" aux prepare_dmeventd aux prepare_vg 2 lvcreate -L8M -V8M -n $lv1 -T $vg/pool lvcreate -V8M -n $lv2 -T $vg/pool mkfs.ext4 "$DM_DEV_DIR/$vg/$lv1" mkfs.ext4 "$DM_DEV_DIR/$vg/$lv2" lvchange --monitor y $vg/pool mkdir "$mntdir" "$mntusedir" trap 'cleanup_mounted_and_teardown' EXIT mount "$DM_DEV_DIR/$vg/$lv1" "$mntdir" mount "$DM_DEV_DIR/$vg/$lv2" "$mntusedir" # Check both LVs are opened (~mounted) is_lv_opened_ "$vg/$lv1" is_lv_opened_ "$vg/$lv2" touch "$mntusedir/file$$" sync # Running 'keeper' process sleep holds the block device still in use sleep 60 < "$mntusedir/file$$" >/dev/null 2>&1 & PID_SLEEP=$! lvs -a $vg # Fill pool above 95% (to cause 'forced lazy umount) dd if=/dev/zero of="$mntdir/file$$" bs=256K count=20 conv=fdatasync lvs -a $vg # Could loop here for a few secs so dmeventd can do some work # In the worst case check only happens every 10 seconds :( # With low water mark it quickly discovers overflow and umounts $vg/$lv1 for i in $(seq 1 12) ; do is_lv_opened_ "$vg/$lv1" || break test $i -lt 12 || die "$mntdir should have been unmounted by dmeventd!" sleep 1 done lvs -a $vg is_lv_opened_ "$vg/$lv2" || \ die "$mntusedir is not mounted here (sleep already expired??)" # Kill device holding process kill $PID_SLEEP wait not is_lv_opened_ "$vg/$lv2" || { mount die "$mntusedir should have been unmounted by dmeventd!" } LVM2.2.02.176/test/shell/lvconvert-repair-cache.sh0000644000000000000120000000731513176752421020317 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Test repairing of broken cached LV SKIP_WITH_LVMPOLLD=1 . lib/inittest MKFS=mkfs.ext4 FSCK=fsck which "$MKFS" || skip which "$FSCK" || skip # # Main # # older versions of cache target reported unreliably write failures aux have_cache 1 7 0 || skip aux prepare_vg 4 #if [ 1 -eq 0 ] ; then ############################# ###### WRITETHROUGH ######### ############################# # Create cached LV lvcreate --type cache-pool -L10 $vg/cpool "$dev1" lvcreate -H -L20 --cachemode writethrough -n $lv1 $vg/cpool "$dev2" "$MKFS" "$DM_DEV_DIR/$vg/$lv1" sync aux disable_dev "$dev1" #lvchange -an $vg # Check it is prompting fro confirmation not lvconvert --uncache $vg/$lv1 # --yes to drop when Check its prompting lvconvert --yes --uncache $vg/$lv1 "$FSCK" -n "$DM_DEV_DIR/$vg/$lv1" aux enable_dev "$dev1" ################## lvcreate --type cache-pool -L10 $vg/cpool "$dev1" lvconvert -H --cachemode writethrough --cachepool $vg/cpool -Zy $lv1 # Check basic --repair of cachepool metadata lvchange -an $vg/$lv1 lvconvert --repair $vg/$lv1 lvs -a $vg check lv_exists $vg ${lv1}_meta0 eval $(lvs -S 'name=~_pmspare' -a --config 'report/mark_hidden_devices=0' -o name --noheading --nameprefixes $vg) lvremove -f --yes "$vg/$LVM2_LV_NAME" # check --repair without creation of _pmspare device lvconvert --repair --poolmetadataspare n $vg/$lv1 check lv_exists $vg ${lv1}_meta1 # check no _pmspare has been created in previous --repair test "0" = $(lvs -S 'name=~_pmspare' -a -o name --noheading --nameprefixes $vg | wc -l) aux disable_dev "$dev2" # Deactivate before remove # FIXME: handle this while LV is alive lvchange -an $vg/$lv1 # Check it is prompting for confirmation not lvconvert --uncache $vg/$lv1 # --yes to drop when Check its prompting lvconvert --yes --uncache $vg/$lv1 aux enable_dev "$dev2" # FIXME: temporary workaround lvcreate -L1 -n $lv5 $vg lvremove -ff $vg ########################## ###### WRITEBACK ######### ########################## #fi # Create cached LV so metadata is on dev1 and data on dev2 lvcreate -L5 -n meta $vg "$dev1" lvcreate -L10 -n cpool $vg "$dev2" lvconvert --yes --poolmetadata $vg/meta --cachepool $vg/cpool lvcreate -H -L20 --cachemode writeback -n $lv1 $vg/cpool "$dev3" lvs -a -o+seg_pe_ranges,cachemode $vg "$MKFS" "$DM_DEV_DIR/$vg/$lv1" sync # Seriously damage cache metadata aux error_dev "$dev1" 2054:2 # flushing status dmsetup status $vg-$lv1 # On fixed kernel we get instant Fail here get lv_field $vg/$lv1 lv_attr | tee out grep "Cwi-a-C-F-" out || { # while on older unfixed we just notice needs_check grep "Cwi-c-C---" out sleep .1 # And now cache is finaly Failed check lv_attr_bit health $vg/$lv1 "F" } check lv_field $vg/$lv1 lv_health_status "failed" aux disable_dev "$dev1" # Check it is prompting for confirmation not lvconvert --uncache $vg/$lv1 # Check --yes is not enought to drop writethrough caching not lvconvert --yes --uncache $vg/$lv1 # --force needs --yes to drop when Check its prompting not lvconvert --force --uncache $vg/$lv1 lvconvert --force --yes --uncache $vg/$lv1 not "$FSCK" -n "$DM_DEV_DIR/$vg/$lv1" aux enable_dev "$dev1" vgremove -ff $vg # FIXME - device should not be here should not dmsetup remove ${vg}-cpool_cmeta-missing_0_0 should not dmsetup remove ${vg}-cpool_cdata-missing_0_0 LVM2.2.02.176/test/shell/vgreduce-usage.sh0000644000000000000120000000616413176752421016663 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_devs 4 get_devs if test -n "$LVM_TEST_LVM1" ; then mdatypes='1 2' else mdatypes='2' fi for mdatype in $mdatypes do # setup PVs pvcreate -M$mdatype "$dev1" "$dev2" # (lvm$mdatype) vgreduce removes only the specified pv from vg (bz427382)" ' vgcreate -M$mdatype $vg1 "$dev1" "$dev2" vgreduce $vg1 "$dev1" check pv_field "$dev2" vg_name $vg1 vgremove -f $vg1 # (lvm$mdatype) vgreduce rejects removing the last pv (--all) vgcreate -M$mdatype $vg1 "$dev1" "$dev2" not vgreduce --all $vg1 vgremove -f $vg1 # (lvm$mdatype) vgreduce rejects removing the last pv vgcreate -M$mdatype $vg1 "$dev1" "$dev2" not vgreduce $vg1 "$dev1" "$dev2" vgremove -f $vg1 pvremove -ff "$dev1" "$dev2" done mdatype=2 # we only expect the following to work for lvm2 metadata # (lvm$mdatype) setup PVs (--metadatacopies 0) pvcreate -M$mdatype "$dev1" "$dev2" pvcreate --metadatacopies 0 -M$mdatype "$dev3" "$dev4" # (lvm$mdatype) vgreduce rejects removing pv with the last mda copy (bz247448) vgcreate -M$mdatype $vg1 "$dev1" "$dev3" not vgreduce $vg1 "$dev1" vgremove -f $vg1 #COMM "(lvm$mdatype) vgreduce --removemissing --force repares to linear (bz221921)" # (lvm$mdatype) setup: create mirror & damage one pv vgcreate -M$mdatype $vg1 "$dev1" "$dev2" "$dev3" lvcreate -aey -n $lv1 --type mirror -m1 -l 4 $vg1 lvcreate -n $lv2 -l 4 $vg1 "$dev2" lvcreate -n $lv3 -l 4 $vg1 "$dev3" vgchange -an $vg1 aux disable_dev "$dev1" # (lvm$mdatype) vgreduce --removemissing --force repares to linear vgreduce --removemissing --force $vg1 check lv_field $vg1/$lv1 segtype linear check pvlv_counts $vg1 2 3 0 # cleanup aux enable_dev "$dev1" pvscan vgremove -f $vg1 not vgs $vg1 # just double-check it's really gone #COMM "vgreduce rejects --removemissing --mirrorsonly --force when nonmirror lv lost too" # (lvm$mdatype) setup: create mirror + linear lvs vgcreate -M$mdatype "$vg1" "${DEVICES[@]}" lvcreate -n $lv2 -l 4 $vg1 lvcreate -aey --type mirror -m1 -n $lv1 -l 4 $vg1 "$dev1" "$dev2" "$dev3" lvcreate -n $lv3 -l 4 $vg1 "$dev3" pvs --segments -o +lv_name "${DEVICES[@]}" # for record only # (lvm$mdatype) setup: damage one pv vgchange -an $vg1 aux disable_dev "$dev1" #pvcreate -ff -y "$dev1" # vgreduce rejects --removemissing --mirrorsonly --force when nonmirror lv lost too #not vgreduce -c n --removemissing --mirrorsonly --force $vg1 # CHECKME - command above was rejected becuase of '-c n' vgreduce --removemissing --mirrorsonly --force $vg1 aux enable_dev "$dev1" pvs -P "${DEVICES[@]}" # for record lvs -P $vg1 # for record vgs -P $vg1 # for record vgremove -ff $vg1 LVM2.2.02.176/test/shell/lvmetad-pvscan-cache.sh0000644000000000000120000000322613176752421017736 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2012 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITHOUT_LVMETAD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_pvs 2 vgcreate $vg1 "$dev1" "$dev2" vgs | grep $vg1 pvscan --cache vgs | grep $vg1 # Check that an LV cannot be activated by lvchange while VG is exported lvcreate -n $lv1 -l 4 -a n $vg1 check lv_exists $vg1 vgexport $vg1 fail lvs $vg1 fail lvchange -ay $vg1/$lv1 vgimport $vg1 check lv_exists $vg1 check lv_field $vg1/$lv1 lv_active "" # Check that an LV cannot be activated by pvscan while VG is exported vgexport $vg1 pvscan --cache -aay "$dev1" pvscan --cache -aay "$dev2" vgimport $vg1 check lv_exists $vg1 check lv_field $vg1/$lv1 lv_active "" pvscan --cache -aay "$dev1" pvscan --cache -aay "$dev2" check lv_field $vg1/$lv1 lv_active "active" lvchange -an $vg1/$lv1 # When MDA is ignored on PV, do not read any VG # metadata from such PV as it may contain old # metadata which hasn't been updated for some # time and also since the MDA is marked as ignored, # it should really be *ignored*! pvchange --metadataignore y "$dev1" aux disable_dev "$dev2" pvscan --cache check pv_field "$dev1" vg_name "[unknown]" aux enable_dev "$dev2" pvscan --cache check pv_field "$dev1" vg_name "$vg1" vgremove -ff $vg1 LVM2.2.02.176/test/shell/lvresize-thin-metadata.sh0000644000000000000120000000240613176752421020331 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2013-2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 export LVM_TEST_THIN_REPAIR_CMD=${LVM_TEST_THIN_REPAIR_CMD-/bin/false} . lib/inittest aux have_thin 1 10 0 || skip aux prepare_vg 3 1256 for deactivate in true false; do # Create some thin volumes lvcreate -L20 -V30 -n $lv1 -T $vg/pool lvcreate -s $vg/$lv1 # Confirm we have basic 2M metadata check lv_field $vg/pool_tmeta size "2.00m" test $deactivate && lvchange -an $vg lvresize --poolmetadatasize +2M $vg/pool # Test it's been resized to 4M check lv_field $vg/pool_tmeta size "4.00m" lvresize --poolmetadatasize +256M $vg/pool check lv_field $vg/pool_tmeta size "260.00m" lvresize --poolmetadatasize +3G $vg/pool check lv_field $vg/pool_tmeta size "3.25g" vgchange -an $vg vgchange -ay $vg # TODO: Add more tests lvremove -ff $vg done LVM2.2.02.176/test/shell/lvcreate-raid-nosync.sh0000644000000000000120000000451113176752421020000 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux have_raid 1 7 0 || skip segtypes="raid5" aux have_raid4 && segtypes="raid4 $segtypes" aux prepare_vg 6 _sync() { aux enable_dev "$dev1" aux wait_for_sync $vg $lv1 test -z "$1" || check raid_leg_status $vg $lv1 $1 lvremove --yes $vg/$lv1 # restore to delay_dev tables for all devices aux restore_from_devtable "$dev1" } # Delay 1st leg so that rebuilding status characters # can be read before resync finished too quick. aux delay_dev "$dev1" 0 100 "$(get first_extent_sector "$dev1")" # raid0/raid0_meta don't support resynchronization for r in raid0 raid0_meta do lvcreate --type $r -Zn -i 3 -l 1 -n $lv1 $vg check raid_leg_status $vg $lv1 "AAA" lvremove --yes $vg/$lv1 done # raid1 supports resynchronization lvcreate --type raid1 -m 2 -Zn -l 4 -n $lv1 $vg check raid_leg_status $vg $lv1 "aaa" _sync "AAA" # raid1 supports --nosync lvcreate --type raid1 --nosync -Zn -m 2 -l 1 -n $lv1 $vg check raid_leg_status $vg $lv1 "AAA" lvremove --yes $vg/$lv1 for r in $segtypes do # raid4/5 support resynchronization lvcreate --type $r -Zn -i 3 -L10 -n $lv1 $vg check raid_leg_status $vg $lv1 "aaaa" _sync "AAAA" # raid4/5 support --nosync lvcreate --type $r -Zn --nosync -i 3 -l 1 -n $lv2 $vg check raid_leg_status $vg $lv2 "AAAA" lvremove --yes $vg done # raid6 supports resynchronization lvcreate --type raid6 -Zn -i 3 -l 4 -n $lv1 $vg check raid_leg_status $vg $lv1 "aaaaa" _sync "AAAAA" # raid6 rejects --nosync; it has to initialize P- and Q-Syndromes not lvcreate --type raid6 --nosync -Zn -i 3 -l 1 -n $lv1 $vg # raid10 supports resynchronization lvcreate --type raid10 -m 1 -Zn -i 3 -L10 -n $lv1 $vg check raid_leg_status $vg $lv1 "aaaaaa" _sync "AAAAAA" # raid10 supports --nosync lvcreate --type raid10 --nosync -m 1 -Zn -i 3 -l 1 -n $lv1 $vg check raid_leg_status $vg $lv1 "AAAAAA" vgremove -ff $vg LVM2.2.02.176/test/shell/activation-skip.sh0000644000000000000120000000171313176752421017055 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest # Test skip activation flag -k|--setactivationskip aux prepare_vg lvcreate -an --zero n -l 1 -n $lv1 $vg lvcreate -ky -K -l1 -n $lv2 $vg get lv_field $vg/$lv2 lv_attr | grep -- "-wi-a----k" lvchange -ay -K $vg check active $vg $lv1 lvchange -an $vg lvchange -ay --setactivationskip y $vg/$lv1 check inactive $vg $lv1 get lv_field $vg/$lv1 lv_attr | grep -- "-wi------k" lvchange -ay -K $vg check active $vg $lv1 vgremove -ff $vg LVM2.2.02.176/test/shell/lvextend-thin-raid.sh0000644000000000000120000000276613176752421017467 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 export LVM_TEST_THIN_REPAIR_CMD=${LVM_TEST_THIN_REPAIR_CMD-/bin/false} . lib/inittest aux have_thin 1 0 0 || skip aux have_raid 1 3 0 || skip aux prepare_vg 6 lvcreate --type raid1 -l2 --nosync -n pool $vg lvconvert --yes --thinpool $vg/pool "$dev3" check lv_field $vg/pool seg_size_pe "2" check lv_field $vg/pool_tdata seg_size_pe "2" -a lvextend -l+3 $vg/pool check lv_field $vg/pool seg_size_pe "5" check lv_field $vg/pool_tdata seg_size_pe "5" -a lvremove -f $vg # check 'raid10' resize works for pool metadata resize # https://bugzilla.redhat.com/1075644 lvcreate --type raid10 -m1 -L5 -i3 --nosync -n pool $vg lvcreate --type raid10 -m1 -L3 -i3 --nosync -n meta $vg lvconvert --yes --thinpool $vg/pool --poolmetadata $vg/meta check lv_field $vg/pool_tdata lv_size "6.00m" -a check lv_field $vg/pool_tmeta lv_size "3.00m" -a lvextend --poolmetadatasize +1 --size +1 $vg/pool check lv_field $vg/pool_tdata lv_size "7.50m" -a check lv_field $vg/pool_tmeta lv_size "4.50m" -a vgremove -ff $vg LVM2.2.02.176/test/shell/lvrename-cache-thin.sh0000644000000000000120000000233613176752421017564 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2015 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Check rename of stacked thin over cached LV SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux have_cache 1 3 0 || skip aux have_thin 1 0 0 || skip aux prepare_vg 1 80 lvcreate -L10 -n cpool $vg lvcreate -L10 -n tpool $vg lvcreate -L10 -n $lv1 $vg lvconvert --yes --cache --cachepool cpool $vg/tpool # currently the only allowed stacking is cache thin data volume lvconvert --yes --type thin-pool $vg/tpool lvcreate -V10 $vg/tpool # check cache pool remains same after thin-pool rename lvrename $vg/tpool $vg/newpool check lv_exists $vg newpool cpool check lv_not_exists $vg tpool # allowing rename of internal cache pool lvrename $vg/cpool $vg/cachepool check lv_exists $vg cachepool check lv_not_exists $vg cpool lvs -a $vg vgremove -f $vg LVM2.2.02.176/test/shell/lvconvert-snapshot.sh0000644000000000000120000000310113176752421017620 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Test various supported conversion of snapshot SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_pvs 1 get_devs vgcreate -s 1k "$vg" "${DEVICES[@]}" lvcreate --type snapshot -V50 -L1 -n $lv1 -s $vg lvcreate -aey -L1 -n $lv2 $vg lvcreate -L1 -s -n $lv3 $vg/$lv2 lvcreate -l1 -n $lv4 $vg lvcreate -L1 -n $lv5 $vg lvcreate -L1 -n $lv6 $vg not lvconvert -s $vg/$lv1 $vg/not_exist # Can't convert to snapshot of origin not lvconvert -s $vg/$lv1 $vg/$lv2 not lvconvert -s $vg/$lv2 $vg/$lv1 not lvconvert -s $vg/$lv5 $vg/$lv1 not lvconvert -s $vg/$lv5 $vg/$lv2 not lvconvert -s $vg/$lv5 $vg/$lv3 # Can't be itself not lvconvert -s $vg/$lv1 $vg/$lv1 not lvconvert -s $vg/$lv2 $vg/$lv2 # Can't convert snapshot to snapshot not lvconvert -s $vg/$lv1 $vg/$lv3 not lvconvert -s $vg/$lv2 $vg/$lv3 # Can't make a real LV snapshot of virtual 'zero' snapshot not lvconvert -s $vg/$lv1 $vg/$lv4 # Check minimum size not lvconvert -s $vg/$lv2 $vg/$lv4 2>&1 | tee err grep "smaller" err # This should pass lvconvert --yes -s $vg/$lv2 $vg/$lv5 lvconvert --yes --type snapshot $vg/$lv2 $vg/$lv6 vgremove -f $vg LVM2.2.02.176/test/shell/nomda-restoremissing.sh0000644000000000000120000000223313176752421020117 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2010 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_vg 3 pvchange --metadataignore y "$dev1" lvcreate -aey --type mirror -m 1 -l 1 -n mirror $vg lvchange -a n $vg/mirror lvcreate -l 1 -n lv1 $vg "$dev1" # try to just change metadata; we expect the new version (with MISSING_PV set # on the reappeared volume) to be written out to the previously missing PV aux disable_dev "$dev1" lvremove $vg/mirror not vgck $vg 2>&1 | tee log grep "missing 1 physical volume" log not lvcreate -aey --type mirror -m 1 -l 1 -n mirror $vg # write operations fail aux enable_dev "$dev1" lvcreate -aey --type mirror -m 1 -l 1 -n mirror $vg # no MDA => automatically restored vgck $vg vgremove -ff $vg LVM2.2.02.176/test/shell/lvcreate-cache-fail.sh0000644000000000000120000000205713176752421017531 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Exercise creation of cache and cache pool volumes and failure path # https://bugzilla.redhat.com/1355923 SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux have_cache 1 3 0 || skip #aux prepare_pvs 1 4707950 #vgcreate $vg "$dev1" #lvcreate -L4T -n $lv1 $vg #lvcreate -H -L500G -n cache $vg/$lv1 #fail lvcreate -H -l 127999 -n cache $vg/$lv1 aux prepare_vg 1 20 lvcreate -L10 -n $lv1 $vg fail lvcreate -H -L2 -n cache $vg/$lv1 lvs -a $vg vgs $vg lvdisplay $vg #dmsetup table #dmsetup status #time dmsetup suspend ${vg}-${lv1} #time dmsetup resume ${vg}-${lv1} vgremove -ff $vg LVM2.2.02.176/test/shell/select-report.sh0000644000000000000120000001660613176752421016547 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2014-2015 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_pvs 6 16 # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # MAKE SURE ALL PV, VG AND LV NAMES CREATED IN # THIS TEST ARE UNIQUE - THIS SIMPLIFIES TESTING # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # create $VGS with assorted tags vgcreate $vg1 --vgmetadatacopies 2 --addtag "vg_tag3" --addtag "vg_tag2" -s 4m "$dev1" "$dev2" "$dev3" vgcreate $vg2 --addtag "vg_tag2" -s 4m "$dev4" "$dev5" vgcreate $vg3 --addtag "vg_tag1" -s 4m "$dev6" # add PV assorted tags pvchange --addtag "pv_tag3" --addtag "pv_tag1" --addtag "pv_tag2" "$dev1" pvchange --addtag "pv_tag1" --addtag "pv_tag4" "$dev6" # create $LVS with assorted tags and various sizes lvcreate --addtag 'lv_tag2.-+/=!:&#' --addtag "lv_tag1" -L8m -n "vol1" $vg1 lvcreate --addtag "lv_tag1" -L4m -n "vol2" $vg1 lvcreate --readahead 512 --addtag "lv_tag1" -L16m -n "abc" $vg2 lvcreate --readahead 512 -My --minor 254 -L4m -n "xyz" $vg3 lvcreate -L4m -aey -n "orig" $vg3 lvcreate -L4m -s "$vg3/orig" -n "snap" OUT_LOG_FILE="out" ERR_LOG_FILE="err" sel() { local items_found ${1}s --noheadings -o ${1}_name --select "$2" 2>"$ERR_LOG_FILE" | tee "$OUT_LOG_FILE" shift 2 test -f "$OUT_LOG_FILE" || { echo " >>> Missing log file to check!" return 1 } # there shouldn't be any selection syntax error grep "Selection syntax error at" "$ERR_LOG_FILE" >/dev/null && { echo " >>> Selection syntax error hit!" return 1 } items_found=$(wc -l "$OUT_LOG_FILE" | cut -f 1 -d ' ') # the number of lines on output must match test "$items_found" -eq $# || { echo " >>> NUMBER OF ITEMS EXPECTED: $#" "$@" echo " >>> NUMBER OF ITEMS FOUND: $items_found ($(< $OUT_LOG_FILE))" return 1 } # the names selected must be correct # each pv, vg and lv name is unique so just check # the presence of the names given as arg for name in "$@" ; do grep "$name" "$OUT_LOG_FILE" >/dev/null || { echo " >>> $name not found in the output log" return 1 } done rm -f "$OUT_LOG_FILE" "$ERR_LOG_FILE" } ########################## # STRING FIELD SELECTION # ########################## #$LVS 'lv_name="vol1"' && result vol1 sel lv 'lv_name="vol1"' vol1 #$LVS 'lv_name!="vol1"' && result vol2 abc xyz sel lv 'lv_name!="vol1"' vol2 abc xyz orig snap # check string values are accepted without quotes too sel lv 'lv_name=vol1' vol1 # check single quotes are also accepted instead of double quotes sel lv "lv_name='vol1'" vol1 ############################### # STRING LIST FIELD SELECTION # ############################### sel pv 'tags=["pv_tag1"]' # for one item, no need to use [] sel pv 'tags="pv_tag1"' "$dev1" "$dev6" # no match sel pv 'tags=["pv_tag1" && "pv_tag2"]' sel pv 'tags=["pv_tag1" && "pv_tag2" && "pv_tag3"]' "$dev1" # check the order has no effect on selection result sel pv 'tags=["pv_tag3" && "pv_tag2" && "pv_tag1"]' "$dev1" sel pv 'tags=["pv_tag4" || "pv_tag3"]' "$dev1" "$dev6" sel pv 'tags!=["pv_tag1"]' "$dev1" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6" # check mixture of && and || - this is not allowed not sel pv 'tags=["pv_tag1" && "pv_tag2" || "pv_tag3"]' # check selection with blank value sel lv 'tags=""' xyz orig snap sel lv 'tags={}' xyz orig snap sel lv 'tags=[]' xyz orig snap # check subset selection sel pv 'tags={"pv_tag1"}' "$dev1" "$dev6" sel pv 'tags={"pv_tag1" && "pv_tag2"}' "$dev1" ########################## # NUMBER FIELD SELECTION # ########################## sel vg 'pv_count=3' $vg1 sel vg 'pv_count!=3' $vg3 $vg2 sel vg 'pv_count<2' $vg3 sel vg 'pv_count<=2' $vg3 $vg2 sel vg 'pv_count>2' $vg1 sel vg 'pv_count>=2' $vg1 $vg2 ######################## # SIZE FIELD SELECTION # ######################## # check size units are accepted as well as floating point numbers for sizes sel lv 'size=8388608b' vol1 sel lv 'size=8192k' vol1 sel lv 'size=8m' vol1 sel lv 'size=8.00m' vol1 sel lv 'size=0.0078125g' vol1 sel lv 'size=0.00000762939453125t' vol1 sel lv 'size=0.000000007450580596923828125p' vol1 sel lv 'size=0.0000000000072759576141834259033203125e' vol1 sel lv 'size>8m' abc sel lv 'size>=8m' abc vol1 sel lv 'size<8m' vol2 xyz orig snap sel lv 'size<=8m' vol2 xyz vol1 orig snap ########################### # PERCENT FIELD SELECTION # ########################### if aux target_at_least dm-snapshot 1 10 0; then # Test zero percent only if snapshot can be zero. # Before 1.10.0, the snap percent included metadata size. sel lv 'snap_percent=0' snap fi dd if=/dev/zero of="$DM_DEV_DIR/$vg3/snap" bs=1M count=1 conv=fdatasync sel lv 'snap_percent<50' snap sel lv 'snap_percent>50' # overflow snapshot -> invalidated, but still showing 100% not dd if=/dev/zero of="$DM_DEV_DIR/$vg3/snap" bs=1M count=4 conv=fdatasync sel lv 'snap_percent=100' snap # % char is accepted as suffix for percent values sel lv 'snap_percent=100%' snap # percent values over 100% are not accepted not sel lv 'snap_percent=101%' ######################### # REGEX FIELD SELECTION # ######################### sel lv 'lv_name=~"^vol[12]"' vol1 vol2 sel lv 'lv_name!~"^vol[12]"' abc xyz orig snap # check regex is accepted without quotes too sel lv 'lv_name=~^vol[12]' vol1 vol2 ########### # GENERIC # ########### # check prefix works for selection too sel lv 'lv_name="vol1"' vol1 sel lv 'name="vol1"' vol1 # check reserved values are accepted for certain fields as well as usual values sel vg 'vg_mda_copies=unmanaged' $vg2 $vg3 sel vg 'vg_mda_copies=2' $vg1 # also, we must match only vg1, not including vg2 and vg3 # when comparing ranges - unamanged is mapped onto 2^64 - 1 internally, # so we need to skip this internal value if it matches with selection criteria! sel vg 'vg_mda_copies>=2' $vg1 not sel vg 'vg_mda_copies=18446744073709551615' sel lv 'lv_read_ahead=auto' vol1 vol2 orig snap sel lv 'lv_read_ahead=256k' abc xyz sel lv 'lv_minor=-1' vol1 vol2 abc orig snap sel lv 'lv_minor=undefined' vol1 vol2 abc orig snap sel lv 'lv_minor=undef' vol1 vol2 abc orig snap sel lv 'lv_minor=unknown' vol1 vol2 abc orig snap sel lv 'lv_minor=254' xyz # also test synonym for string field type sel lv 'seg_monitor=undefined' vol1 vol2 abc abc orig snap xyz # if size unit not spefied, the 'm' (MiB) unit is used by default sel lv 'lv_size=8' vol1 # no need to use quotes for the whole selection string if it does not clash with shell sel lv name=vol1 vol1 ########################################## # FORMING MORE COMPLEX SELECTION CLAUSES # ########################################## # AND clause sel lv 'lv_tags=lv_tag1 && lv_size=4m' vol2 # OR clause sel lv 'lv_name=vol1 || lv_name=vol2' vol1 vol2 # grouping by using ( ) sel lv '(lv_name=vol1 || lv_name=vol2) || vg_tags=vg_tag1' vol1 vol2 orig snap xyz sel lv '(lv_name=vol1 && lv_size=100m) || vg_tags=vg_tag1' xyz orig snap sel lv '(lv_name=vol1 || lv_name=vol2) && vg_tags=vg_tag1' sel lv '(lv_name=vol1 || lv_name=vol2) && lv_size < 8m' vol2 sel lv '(lv_name=vol1 && lv_size=8m) && vg_tags=vg_tag2' vol1 # negation of clause grouped by ( ) sel lv '!(lv_name=vol1 || lv_name=vol2)' abc xyz orig snap vgremove -ff $vg1 $vg2 $vg3 LVM2.2.02.176/test/shell/process-each-duplicate-pvs.sh0000644000000000000120000002673513176752421021115 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008-2013 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. test_description='Test duplicate PVs' SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 SKIP_WITH_CLVMD=1 . lib/inittest aux prepare_devs 6 16 # The LV-using-PV tests (DEV_USED_FOR_LV, where a PV is # preferred if an active LV is using it) depend on sysfs # info that is not available in RHEL5 kernels. aux driver_at_least 4 15 || skip aux lvmconf 'devices/allow_changes_with_duplicate_pvs = 0' pvcreate "$dev1" pvcreate "$dev2" vgcreate $vg1 "$dev1" vgcreate $vg2 "$dev2" pvresize --setphysicalvolumesize 8m -y "$dev2" lvcreate -an -l1 -n $lv1 $vg1 # Both devs are shown and used by the VG pvs 2>&1 | tee out grep "$dev1" out grep "$dev2" out grep "$dev1" out | grep $vg1 grep "$dev2" out | grep $vg2 check pv_field "$dev1" pv_allocatable "allocatable" check pv_field "$dev2" pv_allocatable "allocatable" not grep WARNING out UUID1=$(get pv_field "$dev1" uuid) UUID2=$(get pv_field "$dev2" uuid) SIZE1=$(get pv_field "$dev1" dev_size) SIZE2=$(get pv_field "$dev2" dev_size) MINOR1=$(get pv_field "$dev1" minor) MINOR2=$(get pv_field "$dev2" minor) check pv_field "$dev1" dev_size "$SIZE1" check pv_field "$dev2" dev_size "$SIZE2" # Copy dev1 over dev2. dd if="$dev1" of="$dev2" bs=1M iflag=direct oflag=direct,sync pvscan --cache # The single preferred dev is shown from 'pvs'. pvs -o+uuid,duplicate 2>&1 | tee out rm warn main || true grep WARNING out > warn || true grep -v WARNING out > main || true # Don't know yet if dev1 or dev2 is preferred, so count just one is. test "$(grep -c "$vg1" main)" -eq 1 test "$(grep -c "$UUID1" main)" -eq 1 not grep duplicate main not grep $vg2 main not grep $UUID2 main grep "was already found on" warn grep "prefers device" warn # Find which is the preferred dev and which is the duplicate. PV=$(pvs --noheadings -o name -S uuid="$UUID1" | xargs) if [ "$PV" = "$dev1" ]; then DUP=$dev2 else DUP=$dev1 fi echo "PV is $PV" echo "DUP is $DUP" grep "$PV" main not grep "$DUP" main # Repeat above checking preferred/dup in output pvs 2>&1 | tee out rm warn main || true grep WARNING out > warn || true grep -v WARNING out > main || true grep "$PV" main not grep "$DUP" main # The duplicate dev is included in 'pvs -a' pvs -a -o+uuid,duplicate 2>&1 | tee out rm warn main || true grep WARNING out > warn || true grep -v WARNING out > main || true grep "$dev1" main grep "$dev2" main grep $PV main grep $DUP main test "$(grep -c duplicate main)" -eq 1 grep $DUP main | grep duplicate not grep $vg2 main not grep $UUID2 main grep "$dev1" main | grep $vg1 grep "$dev2" main | grep $vg1 grep "$dev1" main | grep $UUID1 grep "$dev2" main | grep $UUID1 grep "was already found on" warn grep "prefers device" warn # # Passing a dev name arg always includes that dev. # pvs -o+uuid "$dev1" 2>&1 | tee out rm warn main || true grep WARNING out > warn || true grep -v WARNING out > main || true grep "$dev1" main not grep "$dev2" main grep "$UUID1" main grep "$vg1" main grep "was already found on" warn grep "prefers device" warn pvs -o+uuid "$dev2" 2>&1 | tee out rm warn main || true grep WARNING out > warn || true grep -v WARNING out > main || true grep "$dev2" main not grep "$dev1" main grep "$UUID1" main grep "$vg1" main grep "was already found on" warn grep "prefers device" warn pvs -o+uuid,duplicate "$dev1" "$dev2" 2>&1 | tee out rm warn main || true grep WARNING out > warn || true grep -v WARNING out > main || true grep "$dev1" main grep "$dev2" main grep "$dev1" main | grep $vg1 grep "$dev2" main | grep $vg1 grep "$dev1" main | grep $UUID1 grep "$dev2" main | grep $UUID1 test "$(grep -c duplicate main)" -eq 1 grep $DUP main | grep duplicate # # Test specific report fields for each dev. # pvs --noheadings -o vg_name,vg_uuid "$dev1" 2>&1 | tee out1 pvs --noheadings -o vg_name,vg_uuid "$dev2" 2>&1 | tee out2 grep -v WARNING out1 > main1 || true grep -v WARNING out2 > main2 || true diff main1 main2 rm out1 out2 main1 main2 || true check pv_field "$dev1" pv_in_use "used" check pv_field "$dev2" pv_in_use "used" check pv_field "$PV" pv_allocatable "allocatable" check pv_field "$DUP" pv_allocatable "" check pv_field "$PV" pv_duplicate "" check pv_field "$DUP" pv_duplicate "duplicate" pvs --noheadings -o name,pv_allocatable "$dev1" "$dev2" 2>&1 | tee out rm warn main || true grep WARNING out > warn || true grep -v WARNING out > main || true grep "$PV" main grep "$DUP" main grep "$dev1" main grep "$dev2" main test "$(grep -c allocatable main)" -eq 1 pvs --noheadings -o name,pv_duplicate "$dev1" "$dev2" 2>&1 | tee out rm warn main || true grep WARNING out > warn || true grep -v WARNING out > main || true grep "$PV" main grep "$DUP" main grep "$dev1" main grep "$dev2" main test "$(grep -c duplicate main)" -eq 1 # # A filter can be used to show only one. # pvs --config "devices { filter=[ \"a|$dev2|\", \"r|.*|\" ] }" 2>&1 | tee out rm warn main || true grep WARNING out > warn || true grep -v WARNING out > main || true not grep "$dev1" main grep "$dev2" main not grep "was already found on" warn not grep "prefers device" warn pvs --config "devices { filter=[ \"a|$dev1|\", \"r|.*|\"] }" 2>&1 | tee out rm warn main || true grep WARNING out > warn || true grep -v WARNING out > main || true grep "$dev1" main not grep "$dev2" main not grep "was already found on" warn not grep "prefers device" warn # PV size and minor is still reported correctly for each. check pv_field "$dev1" dev_size "$SIZE1" check pv_field "$dev2" dev_size "$SIZE2" check pv_field "$dev1" minor "$MINOR1" check pv_field "$dev2" minor "$MINOR2" # With allow_changes_with_duplicate_pvs=0, a VG with duplicate devs # cannot be modified or activated. not lvcreate -an -l1 -n $lv2 $vg1 not lvremove $vg1/$lv1 not lvchange -ay $vg1/$lv1 not vgremove $vg1 # With allow_changes_with_duplicate_pvs=1, changes above are permitted. aux lvmconf 'devices/allow_changes_with_duplicate_pvs = 1' lvcreate -an -l1 -n $lv2 $vg1 lvremove $vg1/$lv1 lvchange -ay $vg1/$lv2 lvchange -an $vg1/$lv2 lvremove $vg1/$lv2 vgremove -f $vg1 pvremove -ff -y "$dev1" pvremove -ff -y "$dev2" # dev3 and dev4 are copies, orphans pvcreate "$dev3" pvcreate "$dev4" pvresize --setphysicalvolumesize 8m -y "$dev4" UUID3=$(get pv_field "$dev3" uuid) UUID4=$(get pv_field "$dev4" uuid) SIZE3=$(get pv_field "$dev3" dev_size) SIZE4=$(get pv_field "$dev4" dev_size) check pv_field "$dev3" dev_size "$SIZE3" check pv_field "$dev4" dev_size "$SIZE4" pvs 2>&1 | tee out grep "$dev3" out grep "$dev4" out dd if="$dev3" of="$dev4" bs=1M iflag=direct oflag=direct,sync pvscan --cache # One appears with 'pvs' pvs -o+uuid 2>&1 | tee out rm warn main || true grep WARNING out > warn || true grep -v WARNING out > main || true test "$(grep -c "$UUID3" main)" -eq 1 not grep "$UUID4" main grep "was already found on" warn grep "prefers device" warn # Both appear with 'pvs -a' pvs -a -o+uuid 2>&1 | tee out rm warn main || true grep WARNING out > warn || true grep -v WARNING out > main || true test "$(grep -c "$UUID3" main)" -eq 2 grep "$dev3" main grep "$dev4" main grep $UUID3 main not grep $UUID4 main grep "was already found on" warn grep "prefers device" warn # Show each dev individually and both together pvs -o+uuid "$dev3" 2>&1 | tee out rm warn main || true grep WARNING out > warn || true grep -v WARNING out > main || true grep "$dev3" main not grep "$dev4" main grep "was already found on" warn grep "prefers device" warn pvs -o+uuid "$dev4" 2>&1 | tee out rm warn main || true grep WARNING out > warn || true grep -v WARNING out > main || true not grep "$dev3" main grep "$dev4" main grep "was already found on" warn grep "prefers device" warn pvs -o+uuid "$dev3" "$dev4" 2>&1 | tee out rm warn main || true grep WARNING out > warn || true grep -v WARNING out > main || true grep "$dev3" main grep "$dev4" main grep "was already found on" warn grep "prefers device" warn # Same sizes shown. check pv_field "$dev3" dev_size "$SIZE3" check pv_field "$dev4" dev_size "$SIZE4" # Verify that devs being used by an active LV are # preferred over duplicates that are not used by an LV. dd if=/dev/zero of="$dev3" bs=1M oflag=direct,sync || true dd if=/dev/zero of="$dev4" bs=1M oflag=direct,sync || true pvscan --cache vgcreate "$vg2" "$dev3" "$dev4" lvcreate -l1 -n $lv1 $vg2 "$dev3" lvcreate -l1 -n $lv2 $vg2 "$dev4" dd if="$dev3" of="$dev5" bs=1M iflag=direct oflag=direct,sync dd if="$dev4" of="$dev6" bs=1M iflag=direct oflag=direct,sync pvscan --cache pvs -o+uuid,duplicate 2>&1 | tee out rm warn main || true grep WARNING out > warn || true grep -v WARNING out > main || true grep "$dev3" main grep "$dev4" main not grep duplicate main check pv_field "$dev3" pv_duplicate "" check pv_field "$dev4" pv_duplicate "" check pv_field "$dev5" pv_duplicate "duplicate" check pv_field "$dev6" pv_duplicate "duplicate" grep "prefers device $dev3" warn grep "prefers device $dev4" warn not grep "prefers device $dev5" warn not grep "prefers device $dev6" warn pvs -a -o+uuid,duplicate 2>&1 | tee out rm warn main || true grep WARNING out > warn || true grep -v WARNING out > main || true test "$(grep -c duplicate main)" -eq 2 grep "$dev3" main grep "$dev4" main grep "$dev5" main grep "$dev6" main grep "prefers device $dev3" warn grep "prefers device $dev4" warn not grep "prefers device $dev5" warn not grep "prefers device $dev6" warn pvs -o+uuid,duplicate "$dev3" "$dev4" "$dev5" "$dev6" 2>&1 | tee out rm warn main || true grep WARNING out > warn || true grep -v WARNING out > main || true test "$(grep -c duplicate main)" -eq 2 grep "$dev3" main grep "$dev4" main grep "$dev5" main grep "$dev6" main grep "prefers device $dev3" warn grep "prefers device $dev4" warn not grep "prefers device $dev5" warn not grep "prefers device $dev6" warn dd if=/dev/zero of="$dev5" bs=1M oflag=direct,sync || true dd if=/dev/zero of="$dev6" bs=1M oflag=direct,sync || true pvscan --cache lvremove -y $vg2/$lv1 lvremove -y $vg2/$lv2 vgremove $vg2 pvremove -ff -y "$dev3" pvremove -ff -y "$dev4" dd if=/dev/zero of="$dev3" bs=1M oflag=direct,sync || true dd if=/dev/zero of="$dev4" bs=1M oflag=direct,sync || true pvscan --cache # Reverse devs in the previous in case dev3/dev4 would be # preferred even without an active LV using them. vgcreate $vg2 "$dev5" "$dev6" lvcreate -l1 -n $lv1 $vg2 "$dev5" lvcreate -l1 -n $lv2 $vg2 "$dev6" dd if="$dev5" of="$dev3" bs=1M iflag=direct oflag=direct,sync dd if="$dev6" of="$dev4" bs=1M iflag=direct oflag=direct,sync pvscan --cache pvs -o+uuid,duplicate 2>&1 | tee out rm warn main || true grep WARNING out > warn || true grep -v WARNING out > main || true grep "$dev5" main grep "$dev6" main not grep duplicate main check pv_field "$dev5" pv_duplicate "" check pv_field "$dev6" pv_duplicate "" check pv_field "$dev3" pv_duplicate "duplicate" check pv_field "$dev4" pv_duplicate "duplicate" pvs -a -o+uuid,duplicate 2>&1 | tee out rm warn main || true grep WARNING out > warn || true grep -v WARNING out > main || true test "$(grep -c duplicate main)" -eq 2 grep "$dev3" main grep "$dev4" main grep "$dev5" main grep "$dev6" main grep "prefers device $dev5" warn grep "prefers device $dev6" warn not grep "prefers device $dev3" warn not grep "prefers device $dev4" warn dd if=/dev/zero of="$dev3" bs=1M oflag=direct,sync || true dd if=/dev/zero of="$dev4" bs=1M oflag=direct,sync || true pvscan --cache lvremove -y $vg2/$lv1 lvremove -y $vg2/$lv2 vgremove $vg2 LVM2.2.02.176/test/shell/dmeventd-restart.sh0000644000000000000120000000277513176752421017251 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_dmeventd aux prepare_vg 5 lvcreate -aey --type mirror -m 3 --nosync --ignoremonitoring -l1 -n 4way $vg lvchange --monitor y $vg/4way lvcreate -aey --type mirror -m 2 --nosync --ignoremonitoring -l1 -n 3way $vg lvchange --monitor y $vg/3way dmeventd -R -f & echo $! >LOCAL_DMEVENTD sleep 2 # wait a bit, so we talk to the new dmeventd later lvchange --monitor y --verbose $vg/3way 2>&1 | tee lvchange.out grep 'already monitored' lvchange.out lvchange --monitor y --verbose $vg/4way 2>&1 | tee lvchange.out grep 'already monitored' lvchange.out # now try what happens if no dmeventd is running kill -9 "$(< LOCAL_DMEVENTD)" rm LOCAL_DMEVENTD dmeventd -R -f & echo $! >LOCAL_DMEVENTD # wait longer as tries to communicate with killed daemon sleep 7 # now dmeventd should not be running not pgrep dmeventd rm LOCAL_DMEVENTD lvchange --monitor y --verbose $vg/3way 2>&1 | tee lvchange.out pgrep -o dmeventd >LOCAL_DMEVENTD not grep 'already monitored' lvchange.out vgremove -ff $vg LVM2.2.02.176/test/shell/error-usage.sh0000644000000000000120000000166213176752421016206 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Basic usage of zero target SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_vg 1 lvcreate --type error -L1 -n $lv1 $vg lvextend -L+1 $vg/$lv1 # has to match check lv_field $vg/$lv1 lv_modules "error" check lv_field $vg/$lv1 segtype "error" check lv_field $vg/$lv1 seg_count "1" check lv_field $vg/$lv1 seg_size_pe "4" # 4 * 512 => 2M # FIXME should we print info we are ignoring stripping? lvextend -L+1 -I64 -i2 $vg/$lv1 vgremove -ff $vg LVM2.2.02.176/test/shell/format-lvm1.sh0000644000000000000120000000206413176752421016115 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA test_description='Test lvm1 format' SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_devs 1 if test -n "$LVM_TEST_LVM1" ; then pvcreate -M1 "$dev1" vgcreate -M1 $vg "$dev1" check vg_field $vg fmt "lvm1" fi # TODO: if we decide to make using lvm1 with lvmetad an error, # then if lvmetad is being used, then verify: # not pvcreate -M1 "$dev1" # not vgcreate -M1 $vg "$dev1" # # TODO: if we decide to allow using lvm1 with lvmetad, but disable lvmetad # when it happens, then verify: # pvcreate -M1 "$dev1" | tee err # grep "disabled" err # vgcreate -M1 $vg "$dev1" | tee err # grep "disabled" err LVM2.2.02.176/test/shell/lvcreate-thin-snap.sh0000644000000000000120000000404213176752421017452 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2012 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 export LVM_TEST_THIN_REPAIR_CMD=${LVM_TEST_THIN_REPAIR_CMD-/bin/false} . lib/inittest check_lv_field_modules_() { mod=$1 shift for d in "$@"; do check lv_field "$vg/$d" modules "$mod" done } # # Main # aux have_thin 1 0 0 || skip which mkfs.ext4 || skip aux prepare_pvs 2 64 get_devs vgcreate -s 64K "$vg" "${DEVICES[@]}" lvcreate -L10M -V10M -T $vg/pool --name $lv1 mkfs.ext4 "$DM_DEV_DIR/$vg/$lv1" # create read-only thin snapshot of thin LV lvcreate -K -s $vg/$lv1 -pr --name snap # check snapshot filesystem was properly frozen before snapping fsck -n "$DM_DEV_DIR/$vg/snap" lvcreate -K -s $vg/$lv1 --name $lv2 lvcreate -K -s $vg/$lv1 --name $vg/$lv3 # old-snapshot without known size is invalid invalid lvcreate --type snapshot $vg/$lv1 invalid lvcreate --type snapshot $vg/$lv1 --name $lv4 invalid lvcreate --type snapshot $vg/$lv1 --name $vg/$lv5 # some other ways how to take a thin snapshot lvcreate -T $vg/$lv1 lvcreate --thin $vg/$lv1 --name $lv4 lvcreate --type thin $vg/$lv1 --name $vg/$lv5 # virtual size needs thin pool fail lvcreate --type thin $vg/$lv1 -V20 # create old-style snapshot lvcreate -s -L10M --name oldsnap1 $vg/$lv2 lvcreate -s -L10M --name oldsnap2 $vg/$lv2 # thin snap of snap of snap... lvcreate -K -s --name sn1 $vg/$lv2 lvcreate -K -s --name sn2 $vg/sn1 lvcreate -K -s --name sn3 $vg/sn2 lvcreate -K -s --name sn4 $vg/sn3 lvremove -ff $vg lvcreate -L10M --zero n -T $vg/pool -V10M --name $lv1 mkfs.ext4 "$DM_DEV_DIR/$vg/$lv1" lvcreate -K -s $vg/$lv1 --name snap fsck -n "$DM_DEV_DIR/$vg/snap" vgremove -ff $vg LVM2.2.02.176/test/shell/pv-min-size.sh0000644000000000000120000000173313176752421016130 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2011 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest # use small default size - 512KB aux lvmconf 'devices/pv_min_size = 512' aux prepare_pvs 1 8 check pv_field "$dev1" pv_name "$dev1" # increase min size beyond created PV size 10MB aux lvmconf 'devices/pv_min_size = 10240' # and test device is not visible not check pv_field "$dev1" pv_name "$dev1" # set too low value errornous value aux lvmconf 'devices/pv_min_size = -100' # check the incorrect value is printed pvs "$dev1" 2>&1 | grep -- -100 LVM2.2.02.176/test/shell/zz-lvmlockd-sanlock-remove.sh0000644000000000000120000000234313176752421021147 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008-2012 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA test_description='Remove the sanlock test setup' . lib/inittest [ -z "$LVM_TEST_LOCK_TYPE_SANLOCK" ] && skip; # FIMXME: get this to run after a test fails # Removes the VG with the global lock that was created by # the corresponding create script. vgremove --config 'devices { global_filter=["a|GL_DEV|", "r|.*|"] filter=["a|GL_DEV|", "r|.*|"]}' glvg # FIXME: collect debug logs (only if a test failed?) # lvmlockctl -d > lvmlockd-debug.txt # sanlock log_dump > sanlock-debug.txt lvmlockctl --stop-lockspaces sleep 1 killall lvmlockd sleep 1 killall lvmlockd || true sleep 1 killall sanlock sleep 1 killall -9 lvmlockd || true killall -9 sanlock || true # FIXME: dmsetup remove LVMTEST*-lvmlock dmsetup remove glvg-lvmlock || true dmsetup remove GL_DEV || true LVM2.2.02.176/test/shell/snapshot-lvm1.sh0000644000000000000120000000174213176752421016466 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2013 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # regression test for lvmetad reporting error: # Internal error: LV snap_with_lvm1_meta (00000000000000000000000000000001) missing from preload metadata SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_devs 2 get_devs vgcreate --metadatatype 1 "$vg" "${DEVICES[@]}" # Make origin volume lvcreate -ae -l5 $vg -n origin # Create a snap of origin lvcreate -s $vg/origin -n snap_with_lvm1_meta -l4 # Remove volume snapper/snap_with_lvm1_meta lvremove -f $vg/snap_with_lvm1_meta vgremove -ff $vg LVM2.2.02.176/test/shell/lvconvert-repair-raid.sh0000644000000000000120000001142313176752421020166 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2013-2017 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux have_raid 1 3 0 || skip aux raid456_replace_works || skip aux lvmconf 'allocation/maximise_cling = 0' \ 'allocation/mirror_logs_require_separate_pvs = 1' \ 'activation/raid_fault_policy = "allocate"' aux prepare_vg 8 80 get_devs function delay { for d in "${DEVICES[@]}"; do aux delay_dev "$d" 0 $1 "$(get first_extent_sector "$d")" done } # It's possible small raid arrays do have problems with reporting in-sync. # So try bigger size RAID_SIZE=32 # Fast sync and repair afterwards delay 0 # RAID1 transient failure check lvcreate --type raid1 -m 1 -L $RAID_SIZE -n $lv1 $vg "$dev1" "$dev2" aux wait_for_sync $vg $lv1 # enforce replacing live rimage leg with error target dmsetup remove -f $vg-${lv1}_rimage_1 || true # let it notice there is problem echo a > "$DM_DEV_DIR/$vg/$lv1" check grep_dmsetup status $vg-$lv1 AD lvconvert -y --repair $vg/$lv1 "$dev3" lvs -a -o+devices $vg aux wait_for_sync $vg $lv1 # Raid should have fixed device check grep_dmsetup status $vg-$lv1 AA check lv_on $vg ${lv1}_rimage_1 "$dev3" lvremove -ff $vg/$lv1 # RAID1 dual-leg single replace after initial sync lvcreate --type raid1 -m 1 -L $RAID_SIZE -n $lv1 $vg "$dev1" "$dev2" aux wait_for_sync $vg $lv1 aux disable_dev "$dev2" lvconvert -y --repair $vg/$lv1 vgreduce --removemissing $vg aux enable_dev "$dev2" vgextend $vg "$dev2" lvremove -ff $vg/$lv1 # Delayed sync to allow for repair during rebuild delay 50 # RAID1 triple-leg single replace during initial sync lvcreate --type raid1 -m 2 -L $RAID_SIZE -n $lv1 $vg "$dev1" "$dev2" "$dev3" aux disable_dev "$dev2" "$dev3" # FIXME 2016/11/04 AGK: Disabled next line as it fails to guarantee it is not already in sync. #not lvconvert -y --repair $vg/$lv1 aux wait_for_sync $vg $lv1 lvconvert -y --repair $vg/$lv1 vgreduce --removemissing $vg aux enable_dev "$dev2" "$dev3" vgextend $vg "$dev2" "$dev3" lvremove -ff $vg/$lv1 # Larger RAID size possible for striped RAID RAID_SIZE=64 # Fast sync and repair afterwards delay 0 # RAID5 single replace after initial sync lvcreate --type raid5 -i 2 -L $RAID_SIZE -n $lv1 $vg "$dev1" "$dev2" "$dev3" aux wait_for_sync $vg $lv1 aux disable_dev "$dev3" vgreduce --removemissing -f $vg lvconvert -y --repair $vg/$lv1 aux enable_dev "$dev3" pvcreate -yff "$dev3" vgextend $vg "$dev3" lvremove -ff $vg/$lv1 # Delayed sync to allow for repair during rebuild delay 60 # RAID5 single replace during initial sync lvcreate --type raid5 -i 2 -L $RAID_SIZE -n $lv1 $vg "$dev1" "$dev2" "$dev3" aux disable_dev "$dev3" # FIXME: there is quite big sleep on several 'status' read retries # so over 3sec - it may actually finish full sync # Use 'should' for this test result. should not lvconvert -y --repair $vg/$lv1 aux wait_for_sync $vg $lv1 lvconvert -y --repair $vg/$lv1 vgreduce --removemissing $vg aux enable_dev "$dev3" vgextend $vg "$dev3" lvremove -ff $vg/$lv1 # Fast sync and repair afterwards delay 0 # RAID6 double replace after initial sync lvcreate --type raid6 -i 3 -L $RAID_SIZE -n $lv1 $vg \ "$dev1" "$dev2" "$dev3" "$dev4" "$dev5" aux wait_for_sync $vg $lv1 aux disable_dev "$dev4" "$dev5" lvconvert -y --repair $vg/$lv1 vgreduce --removemissing $vg aux enable_dev "$dev4" "$dev5" vgextend $vg "$dev4" "$dev5" lvremove -ff $vg/$lv1 # Delayed sync to allow for repair during rebuild delay 50 # RAID6 single replace after initial sync lvcreate --type raid6 -i 3 -L $RAID_SIZE -n $lv1 $vg \ "$dev1" "$dev2" "$dev3" "$dev4" "$dev5" aux disable_dev "$dev4" not lvconvert -y --repair $vg/$lv1 delay 0 # Fast sync and repair afterwards aux disable_dev "$dev4" # Need to disable again after changing delay aux wait_for_sync $vg $lv1 lvconvert -y --repair $vg/$lv1 vgreduce --removemissing $vg aux enable_dev "$dev4" vgextend $vg "$dev4" lvremove -ff $vg/$lv1 # Delayed sync to allow for repair during rebuild delay 50 # RAID10 single replace after initial sync lvcreate --type raid10 -m 1 -i 2 -L $RAID_SIZE -n $lv1 $vg \ "$dev1" "$dev2" "$dev3" "$dev4" aux disable_dev "$dev4" not lvconvert -y --repair $vg/$lv1 delay 0 # Fast sync and repair afterwards aux disable_dev "$dev4" # Need to disable again after changing delay aux disable_dev "$dev1" aux wait_for_sync $vg $lv1 lvconvert -y --repair $vg/$lv1 vgreduce --removemissing $vg aux enable_dev "$dev4" vgextend $vg "$dev4" lvremove -ff $vg/$lv1 vgremove -ff $vg LVM2.2.02.176/test/shell/lvchange-mirror.sh0000644000000000000120000000215713176752421017052 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2010 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # FIXME RESYNC doesn't work in cluster with exclusive activation # seriously broken! SKIP_WITH_LVMLOCKD=1 SKIP_WITH_CLVMD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_dmeventd aux prepare_vg 3 # force resync 2-way active mirror lvcreate -aey -l2 --type mirror -m1 -n $lv1 $vg "$dev1" "$dev2" "$dev3":0-1 check mirror $vg $lv1 "$dev3" lvchange -y --resync $vg/$lv1 check mirror $vg $lv1 "$dev3" lvremove -ff $vg # force resync 2-way inactive mirror lvcreate -aey -l2 --type mirror -m1 -n $lv1 $vg "$dev1" "$dev2" "$dev3":0-1 lvchange -an $vg/$lv1 check mirror $vg $lv1 "$dev3" lvchange --resync $vg/$lv1 check mirror $vg $lv1 "$dev3" vgremove -ff $vg LVM2.2.02.176/test/shell/lvchange-cache-mode.sh0000644000000000000120000000541613176752421017526 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Exercise changing of caching mode on both cache pool and cached LV. SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux have_cache 1 5 0 || skip aux prepare_vg 2 lvcreate --type cache-pool -L18 -n cpool $vg "$dev1" lvcreate -H -L14 -n $lv1 --cachemode writeback --cachesettings migration_threshold=204800 --cachepool $vg/cpool $vg "$dev2" #cat "$DM_DEV_DIR/$vg/$lv1" >/dev/null #aux delay_dev "$dev2" 300 1000 $(get first_extent_sector "$dev2"): #dmsetup status $vg-$lv1 #dmsetup table $vg-$lv1 for i in $(seq 1 10) ; do echo 3 >/proc/sys/vm/drop_caches dd if=/dev/zero of="$DM_DEV_DIR/$vg/$lv1" bs=64K count=20 conv=fdatasync || true echo 3 >/proc/sys/vm/drop_caches dd if="$DM_DEV_DIR/$vg/$lv1" of=/dev/null bs=64K count=20 || true done lvs -o+cache_dirty_blocks,cache_read_hits,cache_read_misses,cache_write_hits,cache_write_misses $vg/$lv1 # # Drop later, code loading dm tables directly without lvm # RHBZ 1337588 # #dmsetup table #echo "STATUS before cleaner" #dmsetup status #dmsetup load --table "0 28672 cache 253:4 253:3 253:5 128 1 writethrough cleaner 0" $vg-$lv1 #dmsetup resume $vg-$lv1 #sleep 1 #dmsetup table #echo "STATUS after cleaner 1sec" #dmsetup status --noflush #dmsetup suspend --noflush $vg-$lv1 #dmsetup resume $vg-$lv1 #dmsetup load --table "0 28672 cache 253:4 253:3 253:5 128 1 passthrough smq 2 migration_threshold 204800" $vg-$lv1 #dmsetup status $vg-$lv1 #dmsetup load --table "0 28672 cache 253:4 253:3 253:5 128 1 writethrough smq 2 migration_threshold 204800" $vg-$lv1 #dmsetup resume $vg-$lv1 #dmsetup status $vg-$lv1 #dmsetup table $vg-$lv1 #dmsetup ls --tree #exit check lv_field $vg/$lv1 cache_mode "writeback" lvchange --cachemode passthrough $vg/$lv1 check lv_field $vg/$lv1 cache_mode "passthrough" lvchange --cachemode writethrough $vg/$lv1 check lv_field $vg/$lv1 cache_mode "writethrough" lvchange --cachemode writeback $vg/$lv1 check lv_field $vg/$lv1 cache_mode "writeback" lvconvert --splitcache $vg/$lv1 lvs -a $vg check lv_field $vg/cpool cache_mode "writeback" lvchange --cachemode passthrough $vg/cpool check lv_field $vg/cpool cache_mode "passthrough" lvchange --cachemode writethrough $vg/cpool check lv_field $vg/cpool cache_mode "writethrough" lvchange --cachemode writeback $vg/cpool check lv_field $vg/cpool cache_mode "writeback" lvs -a $vg vgremove -f $vg LVM2.2.02.176/test/shell/lvm-init.sh0000644000000000000120000000126213176752421015506 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # tests lvm initialization, and especially negative tests of error paths # SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_devs 5 # invalid units not pvs --config 'global { units = "<" }' LVM2.2.02.176/test/shell/lvextend-thin-data-dmeventd.sh0000644000000000000120000000335013176752421021253 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Test autoextension of thin data volume SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 export LVM_TEST_THIN_REPAIR_CMD=${LVM_TEST_THIN_REPAIR_CMD-/bin/false} . lib/inittest # As we check for 'instant' reaction # retry only few times test_equal_() { for i in $(seq 1 4) ; do test "$(get lv_field $vg/pool data_percent)" = "$1" || return sleep 1 done } aux have_thin 1 10 0 || skip aux prepare_dmeventd aux lvmconf "activation/thin_pool_autoextend_percent = 10" \ "activation/thin_pool_autoextend_threshold = 75" aux prepare_pvs 3 256 get_devs vgcreate -s 256K "$vg" "${DEVICES[@]}" lvcreate -L1M -c 64k -T $vg/pool lvcreate -V1M $vg/pool -n $lv1 # Fill exactly 75% dd if=/dev/zero of="$DM_DEV_DIR/mapper/$vg-$lv1" bs=786432c count=1 conv=fdatasync # when everything calcs correctly thin-pool should be exactly 75% full now # and the size should not have changed pre="75.00" test_equal_ $pre || die "Data percentage has changed!" # Now triger allocation of 1 extra pool chunk dd if=/dev/zero of="$DM_DEV_DIR/mapper/$vg-$lv1" bs=1c count=1 seek=786433 conv=fdatasync lvs -a -o+chunksize $vg dmsetup table dmsetup status # If the watermark works well - dmeventd should have already resized data LV test_equal_ $pre && die "Data percentage has NOT changed!" vgremove -f $vg LVM2.2.02.176/test/shell/name-mangling.sh0000644000000000000120000001747313176752421016474 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2012 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # This test is not using any lvm command # so skip duplicate CLMVD and lvmetad test SKIP_WITH_LVMLOCKD=1 SKIP_WITH_CLVMD=1 SKIP_WITH_LVMETAD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest CHARACTER_WHITELIST="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789#+-.:=@_" FAIL_MIXED_STR="contains mixed mangled and unmangled characters" FAIL_MULTI_STR="seems to be mangled more than once" FAIL_BLACK_STR="should be mangled but it contains blacklisted characters" CORRECT_FORM_STR="already in correct form" RENAMING_STR="renaming to" function create_dm_dev() { local mode=$1 local name=$2; if [ $mode = "none" ]; then # there's no mangling done - we must use --verifyudev here in # case we're testing with udev so we have the nodes in place, # udev would not create them - it can't handle unmangled names verify_udev="--verifyudev" else verify_udev="" fi aux dmsetup create "${PREFIX}$name" $verify_udev --manglename $mode --table "0 1 zero" } function remove_dm_dev() { local mode=$1 local name=$2 if [ $mode = "none" ]; then verify_udev="--verifyudev" else verify_udev="" fi aux dmsetup remove $verify_udev --manglename $mode "${PREFIX}$name" } function check_create_and_remove() { local mode=$1 local input_name=$2 local dm_name=$3 local r=0 if [ $mode = "none" ]; then verify_udev="--verifyudev" else verify_udev="" fi aux dmsetup create "${PREFIX}$input_name" $verify_udev --manglename $mode --table "0 1 zero" 2>err && \ test -b "$DM_DEV_DIR/mapper/${PREFIX}$dm_name" && \ aux dmsetup remove "${PREFIX}$input_name" $verify_udev --manglename $mode || r=1 if [ "$dm_name" = "FAIL_MIXED" ]; then r=0 grep "$FAIL_MIXED_STR" err || r=1 elif [ "$dm_name" = "FAIL_MULTI" ]; then r=0 grep "$FAIL_MULTI_STR" err || r=1 elif [ "$dm_name" = "FAIL_BLACK" ]; then r=0 grep "$FAIL_BLACK_STR" err || r=1 fi return $r } function check_dm_field() { local mode=$1 local dm_name=$2 local field=$3 local expected=$4 value=$(dmsetup info --rows --noheadings --manglename $mode -c -o $field "${DM_DEV_DIR}/mapper/${PREFIX}$dm_name" 2> err || true) if [ "$expected" = "FAIL_MIXED" ]; then grep "$FAIL_MIXED_STR" err elif [ "$expected" = "FAIL_MULTI" ]; then grep "$FAIL_MULTI_STR" err elif [ "$expected" = "FAIL_BLACK" ]; then grep "$FAIL_BLACK_STR" err else test "$value" = "${PREFIX}$expected" fi } function check_expected_names() { local mode=$1 local dm_name=$2 local r=0 create_dm_dev none "$dm_name" test -b "$DM_DEV_DIR/mapper/${PREFIX}$dm_name" && \ check_dm_field none "$dm_name" name "$dm_name" && \ check_dm_field $mode "$dm_name" name "$3" && \ check_dm_field $mode "$dm_name" mangled_name "$4" && \ check_dm_field $mode "$dm_name" unmangled_name "$5" || r=1 remove_dm_dev none "$dm_name" return $r } function check_mangle_cmd() { local mode=$1 local dm_name=$2 local expected=$3 local rename_expected=0 local r=0 create_dm_dev none "$dm_name" dmsetup mangle --manglename $mode --verifyudev "${PREFIX}$dm_name" 1>out 2>err || true; if [ "$expected" = "OK" ]; then grep "$CORRECT_FORM_STR" out || r=1 elif [ "$expected" = "FAIL_MIXED" ]; then grep "$FAIL_MIXED_STR" err || r=1 elif [ "$expected" = "FAIL_MULTI" ]; then grep "$FAIL_MULTI_STR" err || r=1 else rename_expected=1 if grep -F "$RENAMING_STR ${PREFIX}$expected" out; then # Check the old node is really renamed. test -b "$DM_DEV_DIR/mapper/${PREFIX}$dm_name" && r=1 # FIXME: when renaming to mode=none with udev, udev will # remove the old_node, but fails to properly rename # to new_node. The libdevmapper code tries to call # rename(old_node,new_node), but that won't do anything # since the old node is already removed by udev. # For example renaming 'a\x20b' to 'a b': # - udev removes 'a\x20b' # - udev creates 'a' and 'b' (since it considers the ' ' as a delimiter) # - libdevmapper checks udev has done the rename properly # - libdevmapper calls stat(new_node) and it does not see it # - libdevmapper calls rename(old_node,new_node) # - the rename is a NOP since the old_node does not exist anymore # # Remove this condition once the problem is fixed in libdevmapper. # if [ "$mode" != "none" ]; then test -b "$DM_DEV_DIR/mapper/${PREFIX}$expected" || r=1 fi else r=1 fi fi if [ "$r" = 0 ] && [ "$rename_expected" = 1 ]; then # successfuly renamed to expected name remove_dm_dev none "$expected" elif [ $r = 1 ]; then # failed to rename to expected or renamed when it should not - find the new name new_name=$(sed -e "s/.*: $RENAMING_STR //g" out) # try to remove any of the form - falling back to less probable error scenario remove_dm_dev none "$new_name" || \ remove_dm_dev none "$dm_name" || remove_dm_dev none "$expected" else # successfuly done nothing remove_dm_dev none "$dm_name" fi return $r } # check dmsetup can process path where the last component is not equal dm name (rhbz #797322) r=0 create_dm_dev auto "abc" ln -s "$DM_DEV_DIR/mapper/${PREFIX}abc" "$DM_DEV_DIR/${PREFIX}xyz" aux dmsetup status "$DM_DEV_DIR/${PREFIX}xyz" || r=1 rm -f "$DM_DEV_DIR/${PREFIX}xyz" remove_dm_dev auto "abc" if [ "$r" = 1 ]; then return "$r" fi ### ALL WHITELISTED CHARACTERS ### # none of these should be mangled in any mode name="$CHARACTER_WHITELIST" for mode in auto hex none; do check_expected_names $mode "$name" "$name" "$name" "$name" check_mangle_cmd $mode "$name" "OK" done #### NONE MANGLING MODE ### check_create_and_remove none 'a b' 'a b' check_create_and_remove none 'a\x20b' 'a\x20b' check_create_and_remove none 'a b\x20c' 'a b\x20c' check_create_and_remove none 'a\x5cx20b' 'a\x5cx20b' check_expected_names none 'a b' 'a b' 'a\x20b' 'a b' check_expected_names none 'a\x20b' 'a\x20b' 'a\x20b' 'a b' check_expected_names none 'a b\x20c' 'a b\x20c' 'FAIL_MIXED' 'a b c' check_expected_names none 'a\x5cx20b' 'a\x5cx20b' 'a\x5cx20b' 'a\x20b' check_mangle_cmd none 'a b' 'OK' check_mangle_cmd none 'a\x20b' 'a b' check_mangle_cmd none 'a b\x20c' 'a b c' check_mangle_cmd none 'a\x5cx20b' 'a\x20b' ### AUTO MANGLING MODE ### check_create_and_remove auto 'a b' 'a\x20b' check_create_and_remove auto 'a\x20b' 'a\x20b' check_create_and_remove auto 'a b\x20c' 'FAIL_MIXED' check_create_and_remove auto 'a\x5cx20b' 'FAIL_MULTI' check_expected_names auto 'a b' 'FAIL_BLACK' 'FAIL_BLACK' 'FAIL_BLACK' check_expected_names auto 'a\x20b' 'a b' 'a\x20b' 'a b' check_expected_names auto 'a b\x20c' 'FAIL_BLACK' 'FAIL_BLACK' 'FAIL_BLACK' check_expected_names auto 'a\x5cx20b' 'FAIL_MULTI' 'FAIL_MULTI' 'FAIL_MULTI' check_mangle_cmd auto 'a b' 'a\x20b' check_mangle_cmd auto 'a\x20b' 'OK' check_mangle_cmd auto 'a b\x20c' 'FAIL_MIXED' check_mangle_cmd auto 'a\x5cx20b' 'FAIL_MULTI' ### HEX MANGLING MODE ### check_create_and_remove hex 'a b' 'a\x20b' check_create_and_remove hex 'a\x20b' 'a\x5cx20b' check_create_and_remove hex 'a b\x20c' 'a\x20b\x5cx20c' check_create_and_remove hex 'a\x5cx20b' 'a\x5cx5cx20b' check_expected_names hex 'a b' 'FAIL_BLACK' 'FAIL_BLACK' 'FAIL_BLACK' check_expected_names hex 'a\x20b' 'a b' 'a\x20b' 'a b' check_expected_names hex 'a b\x20c' 'FAIL_BLACK' 'FAIL_BLACK' 'FAIL_BLACK' check_expected_names hex 'a\x5cx20b' 'a\x20b' 'a\x5cx20b' 'a\x20b' check_mangle_cmd hex 'a b' 'a\x20b' check_mangle_cmd hex 'a\x20b' 'OK' check_mangle_cmd hex 'a b\x20c' 'FAIL_MIXED' check_mangle_cmd hex 'a\x5cx20b' 'OK' LVM2.2.02.176/test/shell/thin-overprovisioning.sh0000644000000000000120000000407013176752421020331 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2015 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Test warns when thin pool is overprovisiong SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 export LVM_TEST_THIN_REPAIR_CMD=${LVM_TEST_THIN_REPAIR_CMD-/bin/false} . lib/inittest aux have_thin 1 3 0 || skip # 2PVs by 32M aux prepare_vg 2 33 lvcreate -L32 -T $vg/pool # check there is link node for UNUSED thin-pool test -e "$DM_DEV_DIR/$vg/pool" # leave 12M free space lvcreate -an -n $lv1 -L16 $vg 2>&1 | tee out vgs $vg lvcreate -n thin1 -V30 $vg/pool 2>&1 | tee out not grep "WARNING: Sum" out # check again link node is now gone for a USED thin-pool test ! -e "$DM_DEV_DIR/$vg/pool" # Pool gets overprovisioned lvcreate -an -n thin2 -V4 $vg/pool 2>&1 | tee out grep "WARNING: Sum" out grep "amount of free space in volume group (12.00 MiB)" out # Eat all space in VG lvcreate -an -n $lv2 -L12 $vg 2>&1 | tee out grep "WARNING: Sum" out grep "no free space in volume group" out lvcreate -an -n thin3 -V1G $vg/pool 2>&1 | tee out grep "WARNING: Sum" out grep "the size of whole volume group" out lvremove -ff $vg/thin2 $vg/thin3 $vg/$lv2 # Create 2nd thin pool in a VG lvcreate -L4 -T $vg/pool2 lvcreate -V4 -n thin2 $vg/pool2 2>&1 | tee out not grep "WARNING: Sum" out lvcreate -an -V4 -n thin3 $vg/pool2 2>&1 | tee out grep "WARNING: Sum of all thin volume sizes (38.00 MiB)" out grep "free space in volume group (6.00 MiB)" out lvcreate -an -L6 -n $lv3 $vg 2>&1 | tee out grep "no free space in volume group" out lvremove -ff $vg/thin2 $vg/thin3 lvcreate -an -V4 -n thin2 $vg/pool2 2>&1 | tee out not grep "WARNING: Sum" out # Check if resize notices problem lvextend -L+8 $vg/thin2 vgs $vg vgremove -ff $vg LVM2.2.02.176/test/shell/vgremove-corrupt-vg.sh0000644000000000000120000000126213176752421017707 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2013 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_vg 3 lvcreate -n blabla -L 1 $vg -an --zero n dd if=/dev/urandom bs=512 seek=2 count=32 of="$dev2" aux notify_lvmetad "$dev2" vgremove -f $vg LVM2.2.02.176/test/shell/profiles-cache.sh0000644000000000000120000001024513176752421016634 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2017 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Exercise obtaining cache parameter from various sources # Either commmand line or metadata profile or implicit default... SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux have_cache 1 8 0 || skip PDIR="$LVM_SYSTEM_DIR/profile" PFILE="cache-test" aux prepare_profiles cat < "$PDIR/${PFILE}.profile" allocation { cache_pool_chunk_size = 128 cache_mode = "writeback" cache_policy = "mq" cache_metadata_format = 1 cache_settings { smq { sequential_threshold = 300 random_threshold = 500 } mq { } mq { sequential_threshold = 100 random_threshold = 200 } } } EOF cat < "$PDIR/${PFILE}1.profile" allocation { cache_pool_chunk_size = 512 cache_mode = "passthrough" cache_policy = "smq" cache_metadata_format = 1 } EOF aux prepare_vg 2 1000000 # Check chunk_size is grabbed from configuration lvcreate -L1G --config 'allocation/cache_pool_chunk_size=512' --type cache-pool $vg/cpool check lv_field $vg/cpool chunksize "512.00k" # Check chunk_size can be overruled when caching LV. lvcreate -H --chunksize 128K -L10 --cachepool $vg/cpool -n $lv1 check lv_field $vg/$lv1 chunksize "128.00k" lvremove -f $vg # Check chunk_size is grabbed from metadata profile lvcreate -L1G --metadataprofile $PFILE --type cache-pool $vg/cpool #lvcreate -L1G --commandprofile $PFILE --type cache-pool $vg/cpool # profile name is stored with cache-pool check lv_field $vg/cpool profile "$PFILE" # cache chunk size is selected and stored on creation time check lv_field $vg/cpool chunksize "128.00k" # cache metadata format is not stored with cache-pool check lv_field $vg/cpool cachemetadataformat "" # cache mode is not stored with cache-pool check lv_field $vg/cpool cachemode "" # cache policy is not stored with cache-pool check lv_field $vg/cpool cachepolicy "" # cache settings are not stored with cache-pool check lv_field $vg/cpool cachesettings "" lvcreate -L10 -n $lv1 $vg lvconvert --metadataprofile "${PFILE}1" -y -H --cachepool $vg/cpool $vg/$lv1 # chunk size 128k is replace with 512k from PFILE1 check lv_field $vg/$lv1 chunksize "512.00k" # cachemode is from PFILE1 check lv_field $vg/$lv1 cachemode "passthrough" lvremove -f $vg lvcreate -L1G --metadataprofile "$PFILE" --type cache-pool $vg/cpool lvcreate -H -L10 -n $lv1 --cachepool $vg/cpool # profile name is stored with cache check lv_field $vg/$lv1 profile "$PFILE" # cache chunk size is selected and stored on creation time check lv_field $vg/$lv1 chunksize "128.00k" # cache metadata format is stored with cache check lv_field $vg/$lv1 cachemetadataformat "1" # cache mode is stored with cache check lv_field $vg/$lv1 cachemode "writeback" # cache policy is stored with cache check lv_field $vg/$lv1 cachepolicy "mq" # cache settings are stored with cache check lv_field $vg/$lv1 cachesettings "sequential_threshold=100,random_threshold=200" lvremove -f $vg ##### lvcreate -L1G --metadataprofile "$PFILE" --type cache-pool $vg/cpool lvcreate --cachesettings 'sequential_threshold=300' -H -L10 -n $lv1 --cachepool $vg/cpool check lv_field $vg/$lv1 profile "$PFILE" check lv_field $vg/$lv1 cachesettings "sequential_threshold=300" lvremove -f $vg ##### lvcreate -L1G --metadataprofile "$PFILE" --type cache-pool $vg/cpool lvcreate --chunksize 256 -H -L10 -n $lv1 --cachepool $vg/cpool check lv_field $vg/$lv1 cachemode "writeback" check lv_field $vg/$lv1 chunksize "256.00k" lvremove -f $vg ##### lvcreate -L1G --metadataprofile "$PFILE" --type cache-pool $vg/cpool lvcreate --metadataprofile "${PFILE}1" -H -L10 -n $lv1 --cachepool $vg/cpool check lv_field $vg/$lv1 chunksize "512.00k" check lv_field $vg/$lv1 cachemode "passthrough" lvremove -f $vg #lvs -a -o+chunksize,cachemode,cachemetadataformat,cachepolicy,cachesettings $vg vgremove -ff $vg LVM2.2.02.176/test/shell/lvmetad-lvscan-cache.sh0000644000000000000120000000160313176752421017727 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITHOUT_LVMETAD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_pvs 2 vgcreate $vg1 "$dev1" "$dev2" lvcreate -n testlv --type mirror -m 1 -l 1 $vg1 vgs | grep $vg1 lvscan --cache $vg1/testlv vgs | grep $vg1 aux disable_dev "$dev2" # pvscan --cache already ran for the disabled device above, this should be a # no-op (but should not segfault!) lvscan --cache $vg1/testlv vgremove -ff $vg1 LVM2.2.02.176/test/shell/vgsplit-stacked.sh0000644000000000000120000000153113176752421017052 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2010 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux extend_filter_LVMTEST aux prepare_pvs 3 vgcreate $vg1 "$dev1" "$dev2" lvcreate -n $lv1 -l 100%FREE $vg1 #top VG pvcreate "$DM_DEV_DIR/$vg1/$lv1" vgcreate $vg "$DM_DEV_DIR/$vg1/$lv1" "$dev3" vgchange -a n $vg $vg1 # this should fail but not segfault, RHBZ 481793. not vgsplit $vg $vg1 "$dev3" vgremove -ff $vg $vg1 LVM2.2.02.176/test/shell/lvconvert-repair-policy.sh0000644000000000000120000000606613176752421020555 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 . lib/inittest aux prepare_vg 4 aux lvmconf 'allocation/maximise_cling = 0' \ 'allocation/mirror_logs_require_separate_pvs = 1' # Clean-up and create a 2-way mirror, where the the # leg devices are always on $dev[12] and the log # is always on $dev3. ($dev4 behaves as a spare) cleanup_() { vgreduce --removemissing $vg for d in "$@"; do aux enable_dev "$d"; done for d in "$@"; do vgextend $vg "$d"; done lvremove -ff $vg/mirror lvcreate -aey --type mirror -m 1 --ignoremonitoring -l 2 -n mirror $vg "$dev1" "$dev2" "$dev3:0" } repair_() { lvconvert --repair --use-policies --config "$1" $vg/mirror } lvcreate -aey --type mirror -m 1 --ignoremonitoring -l 2 -n mirror $vg "$dev1" "$dev2" "$dev3:0" lvchange -a n $vg/mirror # Fail a leg of a mirror. aux disable_dev "$dev1" lvchange --partial -aey $vg/mirror repair_ 'activation { mirror_image_fault_policy = "remove" }' check linear $vg mirror cleanup_ "$dev1" # Fail a leg of a mirror. # Expected result: Mirror (leg replaced, should retain log) aux disable_dev "$dev1" repair_ 'activation { mirror_image_fault_policy = "replace" mirror_log_fault_policy = "remove" }' check mirror $vg mirror check active $vg mirror_mlog cleanup_ "$dev1" # Fail a leg of a mirror. # Expected result: Mirror (leg replaced) aux disable_dev "$dev1" repair_ 'activation { mirror_image_fault_policy = "replace" }' check mirror $vg mirror check active $vg mirror_mlog cleanup_ "$dev1" # Fail a leg of a mirror (use old name for policy specification) # Expected result: Mirror (leg replaced) aux disable_dev "$dev1" repair_ 'activation { mirror_image_fault_policy = "replace" }' check mirror $vg mirror check active $vg mirror_mlog not pvdisplay $vg cleanup_ "$dev1" # Fail a leg of a mirror w/ no available spare # Expected result: linear # (or 2-way with leg/log overlap if alloc anywhere) aux disable_dev "$dev2" "$dev4" repair_ 'activation { mirror_image_fault_policy = "replace" }' check mirror $vg mirror check lv_not_exists $vg mirror_mlog cleanup_ "$dev2" "$dev4" # Fail the log device of a mirror w/ no available spare # Expected result: mirror w/ corelog aux disable_dev "$dev3" "$dev4" repair_ 'activation { mirror_image_fault_policy = "replace" }' $vg/mirror check mirror $vg mirror check lv_not_exists $vg mirror_mlog cleanup_ "$dev3" "$dev4" # Fail the log device with a remove policy # Expected result: mirror w/ corelog lvchange -aey $vg/mirror aux disable_dev "$dev3" "$dev4" repair_ 'activation { mirror_log_fault_policy = "remove" }' check mirror $vg mirror core check lv_not_exists $vg mirror_mlog cleanup_ "$dev3" "$dev4" vgremove -ff $vg LVM2.2.02.176/test/shell/lvmetad-sysinit.sh0000644000000000000120000000475713176752421017117 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2012 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITHOUT_LVMETAD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_pvs 2 # # lvchange/vgchange -aay --sysinit should not activate LVs # if lvmetad is configured and running. # vgcreate $vg1 "$dev1" "$dev2" lvcreate -an -l1 --zero n -n $lv1 $vg1 # # lvmetad is configured and running # lvchange -ay $vg1 2>&1 | tee out not grep "WARNING: Failed to connect" out not grep "WARNING: lvmetad is active, skipping direct activation during sysinit" out check active $vg1 $lv1 lvchange -an $vg1 check inactive $vg1 $lv1 lvchange -aay --sysinit $vg1 2>&1 | tee out not grep "WARNING: Failed to connect" out grep "WARNING: lvmetad is active, skipping direct activation during sysinit" out check inactive $vg1 $lv1 lvchange -ay --sysinit $vg1 2>&1 | tee out not grep "WARNING: Failed to connect" out not grep "WARNING: lvmetad is active, skipping direct activation during sysinit" out check active $vg1 $lv1 lvchange -an $vg1 check inactive $vg1 $lv1 # # lvmetad is configured and not running # kill "$(< LOCAL_LVMETAD)" lvchange -ay $vg1 2>&1 | tee out grep "WARNING: Failed to connect" out not grep "WARNING: lvmetad is active, skipping direct activation during sysinit" out check active $vg1 $lv1 lvchange -an $vg1 check inactive $vg1 $lv1 lvchange -aay --sysinit $vg1 2>&1 | tee out grep "WARNING: Failed to connect" out not grep "WARNING: lvmetad is active, skipping direct activation during sysinit" out check active $vg1 $lv1 lvchange -an $vg1 check inactive $vg1 $lv1 # # lvmetad is not configured and not running # aux lvmconf 'global/use_lvmetad = 0' lvchange -ay $vg1 2>&1 | tee out not grep "WARNING: Failed to connect" out not grep "WARNING: lvmetad is active, skipping direct activation during sysinit" out check active $vg1 $lv1 lvchange -an $vg1 check inactive $vg1 $lv1 lvchange -aay $vg1 --sysinit 2>&1 | tee out not grep "WARNING: Failed to connect" not grep "WARNING: lvmetad is active, skipping direct activation during sysinit" out check active $vg1 $lv1 lvchange -an $vg1 check inactive $vg1 $lv1 vgremove -ff $vg1 LVM2.2.02.176/test/shell/lvcreate-large.sh0000644000000000000120000000245313176752421016647 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2011 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # 'Exercise some lvcreate diagnostics' SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest # FIXME update test to make something useful on <16T aux can_use_16T || skip aux prepare_vg 4 lvcreate --type snapshot -s -l 100%FREE -n $lv $vg --virtualsize 1024T #FIXME this should be 1024T #check lv_field $vg/$lv size "128.00m" aux extend_filter_LVMTEST pvcreate "$DM_DEV_DIR/$vg/$lv" vgcreate $vg1 "$DM_DEV_DIR/$vg/$lv" lvcreate -l 100%FREE -n $lv1 $vg1 check lv_field $vg1/$lv1 size "1024.00t" --units t lvresize -f -l 72%VG $vg1/$lv1 check lv_field $vg1/$lv1 size "737.28t" --units t lvremove -ff $vg1/$lv1 lvcreate -l 100%VG -n $lv1 $vg1 check lv_field $vg1/$lv1 size "1024.00t" --units t lvresize -f -l 72%VG $vg1/$lv1 check lv_field $vg1/$lv1 size "737.28t" --units t lvremove -ff $vg1/$lv1 lvremove -ff $vg/$lv vgremove -ff $vg LVM2.2.02.176/test/shell/lvconvert-raid-reshape-linear_to_striped.sh0000644000000000000120000000432513176752421024042 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2017 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA2110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest which mkfs.ext4 || skip aux have_raid 1 12 0 || skip # Temporarily skip reshape tests on single-core CPUs until there's a fix for # https://bugzilla.redhat.com/1443999 - AGK 2017/04/20 aux have_multi_core || skip aux prepare_vg 5 # # Test single step linear -> striped conversion # # Create linear LV lvcreate -aey -L 16M -n $lv1 $vg check lv_field $vg/$lv1 segtype "linear" check lv_field $vg/$lv1 stripes 1 check lv_field $vg/$lv1 data_stripes 1 echo y|mkfs -t ext4 $DM_DEV_DIR/$vg/$lv1 fsck -fn $DM_DEV_DIR/$vg/$lv1 # Convert linear -> raid1 lvconvert -y -m 1 $vg/$lv1 fsck -fn $DM_DEV_DIR/$vg/$lv1 check lv_field $vg/$lv1 segtype "raid1" check lv_field $vg/$lv1 stripes 2 check lv_field $vg/$lv1 data_stripes 2 check lv_field $vg/$lv1 regionsize "512.00k" aux wait_for_sync $vg $lv1 fsck -fn $DM_DEV_DIR/$vg/$lv1 # Convert raid1 -> raid5_n lvconvert -y --ty raid5_n --stripesize 64K --regionsize 512K $vg/$lv1 fsck -fn $DM_DEV_DIR/$vg/$lv1 check lv_field $vg/$lv1 segtype "raid5_n" check lv_field $vg/$lv1 stripes 2 check lv_field $vg/$lv1 data_stripes 1 check lv_field $vg/$lv1 stripesize "64.00k" check lv_field $vg/$lv1 regionsize "512.00k" # Convert raid5_n adding stripes lvconvert -y --stripes 4 $vg/$lv1 fsck -fn $DM_DEV_DIR/$vg/$lv1 check lv_first_seg_field $vg/$lv1 segtype "raid5_n" check lv_first_seg_field $vg/$lv1 data_stripes 4 check lv_first_seg_field $vg/$lv1 stripes 5 check lv_first_seg_field $vg/$lv1 stripesize "64.00k" check lv_first_seg_field $vg/$lv1 regionsize "512.00k" check lv_first_seg_field $vg/$lv1 reshape_len_le 10 aux wait_for_sync $vg $lv1 fsck -fn $DM_DEV_DIR/$vg/$lv1 # Convert raid5_n -> striped lvconvert -y --type striped $vg/$lv1 fsck -fn $DM_DEV_DIR/$vg/$lv1 vgremove -ff $vg LVM2.2.02.176/test/shell/lvs-cache.sh0000644000000000000120000000602213176752421015613 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Exercise creation of cache and cache pool volumes # Full CLI uses --type # Shorthand CLI uses -H | --cache SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux have_cache 1 3 0 || skip aux prepare_vg 5 8000 # Use 10M origin size lvcreate -aey -L10 -n $lv1 $vg lvcreate -H -L5 $vg/$lv1 # replace 10M size with 5M size of cache device NEWCLINE=$(dmsetup table $vg-$lv1 | sed 's/20480/10240/') dmsetup reload $vg-$lv1 --table "$NEWCLINE" dmsetup resume $vg-$lv1 # Check that mismatching cache target is shown by lvs lvs -a $vg 2>&1 | grep "WARNING" check lv_attr_bit state $vg/$lv1 "X" lvs -o+lv_active $vg lvremove -f $vg lvcreate --type cache-pool -L10 $vg/cpool lvcreate --type cache -l 1 --cachepool $vg/cpool -n corigin $vg lvs -o lv_name,cache_policy lvs -o lv_name,cache_settings lvremove -f $vg lvcreate --type cache-pool -L10 $vg/cpool lvcreate --type cache -l 1 --cachepool $vg/cpool -n corigin $vg --cachepolicy mq \ --cachesettings migration_threshold=233 lvs -o lv_name,cache_policy | grep mq lvs -o lv_name,cache_settings | grep migration_threshold=233 lvremove -f $vg lvcreate --type cache-pool -L10 --cachepolicy mq --cachesettings migration_threshold=233 $vg/cpool lvcreate --type cache -l 1 --cachepool $vg/cpool -n corigin $vg lvs -o lv_name,cache_policy | grep mq lvs -o lv_name,cache_settings | grep migration_threshold=233 lvremove -f $vg lvcreate --type cache-pool -L10 --cachepolicy mq --cachesettings migration_threshold=233 --cachesettings sequential_threshold=13 $vg/cpool lvcreate --type cache -l 1 --cachepool $vg/cpool -n corigin $vg lvs -o lv_name,cache_policy | grep mq lvs -a -o lv_name,cache_policy -S 'cache_policy=mq' | grep corigin lvs -o lv_name,cache_settings | grep migration_threshold=233 lvs -o lv_name,cache_settings | grep sequential_threshold=13 lvcreate -n foo -l 1 $vg lvs -S 'cache_policy=mq' | grep corigin lvs -S 'cache_policy=mq' | not grep foo lvs -S 'cache_policy=undefined' | not grep corigin lvs -S 'cache_policy=undefined' | grep foo lvs -o +cache_policy -S 'cache_policy=mq' | grep corigin lvs -o +cache_policy -S 'cache_policy=mq' | not grep foo lvs -o +cache_policy -S 'cache_policy=undefined' | not grep corigin lvs -o +cache_policy -S 'cache_policy=undefined' | grep foo lvs -o +cache_policy -O cache_policy lvs -o +cache_settings -S 'cache_settings={migration_threshold=233}' | grep corigin lvs -o +cache_settings -S 'cache_settings!={migration_threshold=233}' | grep foo lvs -o +cache_policy -O cache_settings lvremove -f $vg lvcreate -n foo -l 1 $vg lvs -a -S 'cache_policy=undefined' | grep foo vgremove -ff $vg LVM2.2.02.176/test/shell/lvmetad-pvscan-autoactivation-polling.sh0000644000000000000120000000346713176752421023376 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITHOUT_LVMETAD=1 . lib/inittest # test if snapshot-merge target is available aux target_at_least dm-snapshot-merge 1 0 0 || skip which mkfs.ext3 || skip lvdev_() { echo "$DM_DEV_DIR/$1/$2" } snap_lv_name_() { echo ${1}_snap } setup_merge_() { local VG_NAME=$1 local LV_NAME=$2 local NUM_EXTRA_SNAPS=${3:-0} local BASE_SNAP_LV_NAME BASE_SNAP_LV_NAME=$(snap_lv_name_ $LV_NAME) lvcreate -aey -n $LV_NAME -l 50%FREE $VG_NAME lvs lvcreate -s -n $BASE_SNAP_LV_NAME -l 20%FREE ${VG_NAME}/${LV_NAME} mkfs.ext3 "$(lvdev_ $VG_NAME $LV_NAME)" if [ $NUM_EXTRA_SNAPS -gt 0 ]; then for i in $(seq 1 $NUM_EXTRA_SNAPS); do lvcreate -s -n ${BASE_SNAP_LV_NAME}_${i} -l 20%ORIGIN ${VG_NAME}/${LV_NAME} done fi } aux prepare_pvs 1 50 vgcreate $vg1 "$dev1" mkdir test_mnt setup_merge_ $vg1 $lv1 mount "$(lvdev_ $vg1 $lv1)" test_mnt lvconvert --merge "$vg1/$(snap_lv_name_ "$lv1")" umount test_mnt vgchange -an $vg1 # check snapshot get removed on autoactivation pvscan --cache -aay "$dev1" check active $vg1 $lv1 i=100 while ! check lv_not_exists "$vg1/$(snap_lv_name_ "$lv1")"; do test $i -lt 0 && fail "Background polling failed to remove merged snapshot LV" sleep .1 i=$((i-1)) done # TODO: add similar simple tests for other interrupted/unfinished polling operation vgremove -ff $vg1 LVM2.2.02.176/test/shell/pvmove-restart.sh0000644000000000000120000000526713176752421016756 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2013-2015 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Check pvmove behavior when it's progress and machine is rebooted SKIP_WITH_LVMLOCKD=1 . lib/inittest aux prepare_pvs 3 60 vgcreate -s 128k $vg "$dev1" "$dev2" pvcreate --metadatacopies 0 "$dev3" vgextend $vg "$dev3" # Slowdown writes # (FIXME: generates interesting race when not used) aux delay_dev "$dev3" 0 800 "$(get first_extent_sector "$dev3"):" test -e HAVE_DM_DELAY || skip for mode in "--atomic" "" do # Create multisegment LV lvcreate -an -Zn -l5 -n $lv1 $vg "$dev1" lvextend -l+10 $vg/$lv1 "$dev2" lvextend -l+5 $vg/$lv1 "$dev1" lvextend -l+10 $vg/$lv1 "$dev2" pvmove -i10 -n $vg/$lv1 "$dev1" "$dev3" $mode & PVMOVE=$! # Let's wait a bit till pvmove starts and kill it aux wait_pvmove_lv_ready "$vg-pvmove0" kill -9 $PVMOVE if test -e LOCAL_LVMPOLLD; then aux prepare_lvmpolld fi wait # Simulate reboot - forcibly remove related devices # First take down $lv1 then it's pvmove0 j=0 for i in $lv1 pvmove0 pvmove0_mimage_0 pvmove0_mimage_1 ; do while dmsetup status "$vg-$i"; do dmsetup remove "$vg-$i" || { j=$(( j + 1 )) test $j -le 100 || die "Cannot take down devices." sleep .1; } done done dmsetup table | grep $PREFIX # Check we really have pvmove volume check lv_attr_bit type $vg/pvmove0 "p" if test -e LOCAL_CLVMD ; then # giveup all clvmd locks (faster then restarting clvmd) # no deactivation happen, nodes are already removed #vgchange -an $vg # FIXME: However above solution has one big problem # as clvmd starts to abort on internal errors on various # errors, based on the fact pvmove is killed -9 # Restart clvmd kill "$(< LOCAL_CLVMD)" for i in $(seq 1 100) ; do test $i -eq 100 && die "Shutdown of clvmd is too slow." pgrep clvmd || break sleep .1 done # wait for the pid removal aux prepare_clvmd fi aux notify_lvmetad "$dev1" "$dev2" "$dev3" # Only PVs should be left in table... dmsetup table # Restart pvmove # use exclusive activation to have usable pvmove without cmirrord LVM_TEST_TAG="kill_me_$PREFIX" vgchange --config 'activation{polling_interval=10}' -aey $vg aux wait_pvmove_lv_ready "$vg-pvmove0" dmsetup table pvmove --abort lvs -a -o+devices $vg lvremove -ff $vg aux kill_tagged_processes done # Restore delayed device back aux delay_dev "$dev3" vgremove -ff $vg LVM2.2.02.176/test/shell/process-each-vg.sh0000644000000000000120000001214113176752421016733 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008-2013 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA test_description='Exercise toollib process_each_vg' SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_devs 6 # # process_each_vg is used by a number of vg commands; # use 'vgremove' and 'vgs' to test it. # # The logic in process_each_vg is mainly related to # selecting which vg's to process. # # # set up four vgs that we will remove # vgcreate $SHARED $vg1 "$dev1" vgcreate $SHARED $vg2 "$dev2" vgcreate $SHARED $vg3 "$dev3" vgcreate $SHARED $vg4 "$dev4" # these two vgs will not be removed vgcreate $SHARED $vg5 "$dev5" vgchange --addtag tagvg5 $vg5 lvcreate -l 4 -n $lv1 $vg5 vgcreate $SHARED $vg6 "$dev6" lvcreate -l 4 -n $lv2 $vg6 # should fail without any arg not vgremove # should succeed vgremove $vg1 vgremove $vg2 $vg3 $vg4 # these should fail because they are already removed not vgremove $vg1 not vgremove $vg2 not vgremove $vg3 not vgremove $vg4 # these should fail because they have lvs in them not vgremove $vg5 not vgremove $vg6 # check that the vgs we removed are gone not vgs $vg1 not vgs $vg2 not vgs $vg3 not vgs $vg4 # # set up four vgs that we will remove # vgcreate $SHARED --addtag tagfoo $vg1 "$dev1" vgcreate $SHARED --addtag tagfoo $vg2 "$dev2" vgcreate $SHARED --addtag tagfoo2 $vg3 "$dev3" vgcreate $SHARED --addtag tagbar $vg4 "$dev4" vgchange --addtag foo $vg4 # should do nothing and fail not vgremove garbage # should find nothing to remove vgremove @garbage # should find nothing to remove vgremove @$vg1 # should succeed vgremove $vg1 not vgs $vg1 vgremove $vg2 $vg3 $vg4 not vgs $vg2 not vgs $vg3 not vgs $vg4 # # set up four vgs that we will remove # vgcreate $SHARED --addtag tagfoo $vg1 "$dev1" vgcreate $SHARED --addtag tagfoo $vg2 "$dev2" vgcreate $SHARED --addtag tagfoo2 $vg3 "$dev3" vgcreate $SHARED --addtag tagbar $vg4 "$dev4" vgchange --addtag foo $vg4 vgremove @tagfoo not vgs $vg1 not vgs $vg2 vgremove @tagfoo2 @tagbar not vgs $vg3 not vgs $vg4 # # set up four vgs that we will remove # vgcreate $SHARED --addtag tagfoo $vg1 "$dev1" vgcreate $SHARED --addtag tagfoo $vg2 "$dev2" vgcreate $SHARED --addtag tagfoo2 $vg3 "$dev3" vgcreate $SHARED --addtag tagbar $vg4 "$dev4" vgchange --addtag foo $vg4 vgremove $vg1 @tagfoo2 not vgs $vg1 not vgs $vg3 vgremove @tagbar $vg2 not vgs $vg2 not vgs $vg4 # # set up four vgs that we will remove # vgcreate $SHARED --addtag tagfoo $vg1 "$dev1" vgcreate $SHARED --addtag tagfoo $vg2 "$dev2" vgcreate $SHARED --addtag tagfoo2 $vg3 "$dev3" vgcreate $SHARED --addtag tagbar $vg4 "$dev4" vgchange --addtag foo $vg4 vgremove @foo @tagfoo2 $vg1 $vg2 not vgs $vg1 not vgs $vg2 not vgs $vg3 not vgs $vg4 # # set up four vgs that we will remove # vgcreate $SHARED --addtag tagfoo $vg1 "$dev1" vgcreate $SHARED --addtag tagfoo $vg2 "$dev2" vgcreate $SHARED --addtag tagfoo2 $vg3 "$dev3" vgcreate $SHARED --addtag tagbar $vg4 "$dev4" vgchange --addtag foo $vg4 vgremove @tagfoo $vg1 @tagfoo @tagfoo2 $vg3 @tagbar not vgs $vg1 not vgs $vg2 not vgs $vg3 not vgs $vg4 # # set up four vgs that we will remove # vgcreate $SHARED --addtag tagfoo $vg1 "$dev1" vgcreate $SHARED --addtag tagfoo $vg2 "$dev2" vgcreate $SHARED --addtag tagfoo2 $vg3 "$dev3" vgcreate $SHARED --addtag tagbar $vg4 "$dev4" vgchange --addtag foo $vg4 not vgremove garbage $vg1 not vgs $vg1 not vgremove $vg2 garbage not vgs $vg2 vgremove $vg3 @garbage not vgs $vg3 vgremove @garbage $vg4 not vgs $vg4 # # end vgremove tests # check that the two vgs we did not intend to remove # are still there, and then remove them # vgs $vg5 vgs $vg6 vgremove -f $vg5 vgremove -f $vg6 not vgs $vg5 not vgs $vg6 # # set up four vgs that we will report # vgcreate $SHARED --addtag tagfoo $vg1 "$dev1" vgcreate $SHARED --addtag tagfoo $vg2 "$dev2" vgcreate $SHARED --addtag tagfoo2 $vg3 "$dev3" vgcreate $SHARED --addtag tagbar $vg4 "$dev4" vgchange --addtag foo $vg4 vgs >err grep $vg1 err grep $vg2 err grep $vg3 err grep $vg4 err vgs $vg1 $vg2 >err grep $vg1 err grep $vg2 err not grep $vg3 err not grep $vg4 err vgs @tagfoo >err grep $vg1 err grep $vg2 err not grep $vg3 err not grep $vg4 err vgs @tagfoo2 >err grep $vg3 err not grep $vg1 err not grep $vg2 err not grep $vg4 err vgs @tagfoo2 @tagbar >err grep $vg3 err grep $vg4 err not grep $vg1 err not grep $vg2 err vgs $vg1 @tagbar >err grep $vg1 err grep $vg4 err not grep $vg2 err not grep $vg3 err vgs $vg1 @tagfoo >err grep $vg1 err grep $vg2 err not grep $vg3 err not grep $vg4 err not vgs garbage >err not grep $vg1 err not grep $vg2 err not grep $vg3 err not grep $vg4 err not vgs garbage $vg1 >err grep $vg1 err not grep $vg2 err not grep $vg3 err not grep $vg4 err vgs @garbage @foo >err grep $vg4 err not grep $vg1 err not grep $vg2 err not grep $vg3 err vgremove -f $vg1 $vg2 $vg3 $vg4 LVM2.2.02.176/test/shell/nomda-missing.sh0000644000000000000120000000422513176752421016516 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2010 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_devs 4 pvcreate "$dev1" "$dev2" pvcreate --metadatacopies 0 "$dev3" "$dev4" vgcreate $vg "$dev1" "$dev2" "$dev3" "$dev4" lvcreate -l1 -n linear1 $vg "$dev1" lvcreate -l1 -n linear2 $vg "$dev2" lvcreate -l2 -n linear12 $vg "$dev1":4 "$dev2":4 lvcreate -aey -l1 -n origin1 $vg "$dev1" lvcreate -s $vg/origin1 -l1 -n s_napshot2 "$dev2" lvcreate -aey -l1 --type mirror -m1 -n mirror12 --mirrorlog core $vg "$dev1" "$dev2" lvcreate -aey -l1 --type mirror -m1 -n mirror123 $vg "$dev1" "$dev2" "$dev3" vgchange -a n $vg aux disable_dev "$dev1" not vgchange -aey $vg not vgck $vg check inactive $vg linear1 check active $vg linear2 check inactive $vg origin1 check inactive $vg s_napshot2 check inactive $vg linear12 check inactive $vg mirror12 check inactive $vg mirror123 vgchange -a n $vg aux enable_dev "$dev1" aux disable_dev "$dev2" not vgchange -aey $vg not vgck $vg check active $vg linear1 check inactive $vg linear2 check inactive $vg linear12 check inactive $vg origin1 check inactive $vg s_napshot2 check inactive $vg mirror12 check inactive $vg mirror123 vgchange -a n $vg aux enable_dev "$dev2" aux disable_dev "$dev3" not vgchange -aey $vg not vgck $vg check active $vg origin1 check active $vg s_napshot2 check active $vg linear1 check active $vg linear2 check active $vg linear12 check inactive $vg mirror123 check active $vg mirror12 vgchange -a n $vg aux enable_dev "$dev3" aux disable_dev "$dev4" vgchange -aey $vg not vgck $vg check active $vg origin1 check active $vg s_napshot2 check active $vg linear1 check active $vg linear2 check active $vg linear12 check active $vg mirror12 check active $vg mirror123 vgremove -ff $vg LVM2.2.02.176/test/shell/lvconvert-cache-chunks.sh0000644000000000000120000000334713176752421020331 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2016-2017 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Exercise number of cache chunks in cache pool # Skips creation of real cached device for older cache targets... SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux have_cache 1 3 0 || skip aux prepare_vg 2 1000000 # Really large cache pool data LV lvcreate -L1T -n cpool $vg # Works and pick higher chunks size then default lvconvert -y --type cache-pool $vg/cpool # Check chunk size in sectors is more then 512K test "$(get lv_field "$vg/cpool" chunk_size --units s --nosuffix)" -gt 1000 lvcreate -L1M -n $lv1 $vg # Not let pass small chunks when caching origin fail lvconvert -y -H --chunksize 128K --cachepool $vg/cpool $vg/$lv1 >out 2>&1 cat out grep "too small chunk size" out # Thought 2M is valid if aux have_cache 1 8 0 ; then # Without SMQ we run out of kernel memory easily lvconvert -y -H --chunksize 2M --cachepool $vg/cpool $vg/$lv1 fi lvremove -f $vg ### # Really large cache pool data LV lvcreate -L1T -n cpool $vg # Not allowed to create more then 10e6 chunks fail lvconvert -y --type cache-pool --chunksize 128K $vg/cpool if aux have_cache 1 8 0 ; then # Let operation pass when max_chunk limit is raised lvconvert -y --type cache-pool --chunksize 128K $vg/cpool \ --config 'allocation/cache_pool_max_chunks=10000000' fi vgremove -f $vg LVM2.2.02.176/test/shell/lvconvert-thin-raid.sh0000644000000000000120000000410713176752421017647 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2014-2017 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 export LVM_TEST_THIN_REPAIR_CMD=${LVM_TEST_THIN_REPAIR_CMD-/bin/false} . lib/inittest aux have_thin 1 0 0 || skip aux have_raid 1 4 0 || skip aux prepare_vg 4 # create RAID LVs for data and metadata volumes lvcreate -aey -L10M --type raid1 -m3 -n $lv1 $vg lvcreate -aey -L8M --type raid1 -m3 -n $lv2 $vg aux wait_for_sync $vg $lv1 aux wait_for_sync $vg $lv2 lvchange -an $vg/$lv1 # FIXME: temporarily we return error code 5 INVALID=not # conversion fails for internal volumes $INVALID lvconvert --thinpool $vg/${lv1}_rimage_0 $INVALID lvconvert --yes --thinpool $vg/$lv1 --poolmetadata $vg/${lv2}_rimage_0 lvconvert --yes --thinpool $vg/$lv1 --poolmetadata $vg/$lv2 lvchange -ay $vg lvconvert --splitmirrors 1 --name data2 $vg/${lv1}_tdata "$dev2" lvconvert --splitmirrors 1 --name data3 $vg/${lv1}_tdata "$dev3" # Check split and track gets rejected on 2-legged raid1 not lvconvert --splitmirrors 1 --trackchanges $vg/${lv1}_tdata "$dev4" lvconvert -y --splitmirrors 1 --trackchanges $vg/${lv1}_tdata "$dev4" lvconvert --splitmirrors 1 --name meta1 $vg/${lv1}_tmeta "$dev1" lvconvert --splitmirrors 1 --name meta2 $vg/${lv1}_tmeta "$dev2" # Check split and track gets rejected on 2-legged raid1 not lvconvert --splitmirrors 1 --trackchanges $vg/${lv1}_tmeta "$dev4" lvconvert -y --splitmirrors 1 --trackchanges $vg/${lv1}_tmeta "$dev4" lvremove -ff $vg/data2 $vg/data3 $vg/meta1 $vg/meta2 lvconvert --merge $vg/${lv1}_tdata_rimage_1 lvconvert --merge $vg/${lv1}_tmeta_rimage_1 lvconvert -y -m +1 $vg/${lv1}_tdata "$dev2" lvconvert -y -m +1 $vg/${lv1}_tmeta "$dev1" vgremove -ff $vg LVM2.2.02.176/test/shell/lvconvert-raid456.sh0000644000000000000120000000313013176752421017141 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2013-2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest get_image_pvs() { local d local images images=$(dmsetup ls | grep "${1}-${2}_.image_.*" | cut -f1 | sed -e s:-:/:) lvs --noheadings -a -o devices $images | sed s/\(.\)// } ######################################################## # MAIN ######################################################## aux raid456_replace_works || skip aux have_raid 1 3 0 || skip aux prepare_vg 7 # 7 devices for 2 dev replacement of 5-dev RAID6 levels="5 6" aux have_raid4 && levels="4 5 6" # RAID 4/5/6 (can replace up to 'parity' devices) for i in $levels; do lvcreate --type raid$i -i 3 -l 3 -n $lv1 $vg if [ $i -eq 6 ]; then dev_cnt=5 limit=2 else dev_cnt=4 limit=1 fi for j in {1..3}; do for o in $(seq 0 $i); do replace="" devices=( $(get_image_pvs $vg $lv1) ) for k in $(seq $j); do index=$(( ( k + o ) % dev_cnt )) replace="$replace --replace ${devices[$index]}" done aux wait_for_sync $vg $lv1 if [ $j -gt $limit ]; then not lvconvert $replace $vg/$lv1 else lvconvert $replace $vg/$lv1 fi done done lvremove -ff $vg done vgremove -ff $vg LVM2.2.02.176/test/shell/pv-duplicate.sh0000644000000000000120000000173513176752421016351 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2011-2015 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # 'Exercise duplicate metadata diagnostics' SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_devs 3 pvcreate "$dev1" dd if="$dev1" of=backup_dev1 bs=256K count=1 vgcreate --metadatasize 128k $vg1 "$dev1" # copy mda dd if="$dev1" of="$dev2" bs=256K count=1 dd if="$dev1" of="$dev3" bs=256K count=1 pvs "$dev3" -o pv_uuid vgs $vg1 dd if=backup_dev1 of="$dev3" bs=256K count=1 pvs #-vvvv # TODO: Surely needs more inspecition about correct # behavior for such case # vgs $vg1 LVM2.2.02.176/test/shell/snapshot-remove-dmsetup.sh0000644000000000000120000000403613176752421020562 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2015 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # check if 'dmsetup --noflush' will work properly for mounted snapshot SKIP_WITH_LVMLOCKD=1 SKIP_WITH_CLVMD=1 SKIP_WITH_LVMETAD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest which mkfs.ext2 || skip aux prepare_vg 5 # Create stacked device lvcreate --type snapshot -s -L10 -n $lv1 $vg --virtualsize 100M aux extend_filter_LVMTEST vgcreate $vg1 "$DM_DEV_DIR"/$vg/$lv1 lvcreate -L20 -n $lv1 $vg1 lvcreate -L10 -n snap -s $vg1/$lv1 mkfs.ext2 "$DM_DEV_DIR/$vg1/snap" mkdir mnt mount -o errors=remount-ro "$DM_DEV_DIR/$vg1/snap" mnt sync # intentionally suspend layer below dmsetup suspend $vg-$lv1 # now this should pass without blocking dmsetup suspend --noflush --nolockfs $vg1-snap & DMPID=$! #dmsetup suspend $vg1-snap & sleep .5 dmsetup info --noheadings -c -o suspended $vg1-snap | tee out should grep -i suspend out # unlock device below dmsetup resume $vg-$lv1 # so this will pass without blocking on udev # otherwise --noudevsync would be needed dmsetup resume $vg1-snap # Expecting success from 'dmsetup' wait $DMPID # Try how force removal works dmsetup suspend $vg-$lv1 # needs to fail as device is still open not dmsetup remove --force $vg1-snap & DMPID=$! # on older snapshot target 'remove' will wait till $lv1 is resumed if aux target_at_least dm-snapshot 1 6 0 ; then sleep .5 dmsetup table $vg1-snap | tee out should grep -i error out fi dmsetup resume $vg-$lv1 # Expecting success from 'not dmsetup' wait $DMPID # check it really is now 'error' target dmsetup table $vg1-snap | tee out grep error out umount mnt || true lvremove -f $vg1 vgremove -ff $vg1 vgremove -ff $vg LVM2.2.02.176/test/shell/lvconvert-repair-transient.sh0000644000000000000120000000204113176752421021252 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux mirror_recovery_works || skip aux prepare_vg 5 # ordinary mirrors lvcreate -aey --type mirror -m 3 --ignoremonitoring -L 1 -n 4way $vg aux wait_for_sync $vg 4way aux disable_dev --error --silent "$dev2" "$dev4" mkfs.ext3 "$DM_DEV_DIR/$vg/4way" & sleep 1 dmsetup status echo n | lvconvert --repair $vg/4way 2>&1 | tee 4way.out aux enable_dev --silent "$dev2" "$dev4" lvs -a -o +devices $vg | tee out not grep unknown out vgreduce --removemissing $vg check mirror $vg 4way lvchange -a n $vg/4way wait vgremove -f $vg LVM2.2.02.176/test/shell/vg-check-devs-used.sh0000644000000000000120000000234113176752421017332 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMPOLLD=1 . lib/inittest # We need "dm" directory for dm devices in sysfs. aux driver_at_least 4 15 || skip aux prepare_devs 3 8 vgcreate "$vg" "$dev1" "$dev2" lvcreate -l100%FREE -n $lv $vg dd if="$dev1" of="$dev3" bs=1M pvs --config "devices/global_filter = [ \"a|$dev2|\", \"a|$dev3|\", \"r|.*|\" ]" 2>err grep "WARNING: Device mismatch detected for $vg/$lv which is accessing $dev1 instead of $dev3" err dd if=/dev/zero of="$dev3" bs=1M count=8 lvremove -ff $vg # Also test if sub LVs with suffixes are correctly processed. # Check with thick snapshot which has sub LVs with -real and -cow suffix in UUID. lvcreate -l1 -aey -n $lv $vg lvcreate -l1 -aey -s $vg/$lv pvs 2>err not grep "WARNING: Device mismatch detected for $vg/$lv" err vgremove -ff $vg LVM2.2.02.176/test/shell/lvconvert-repair-thin-raid.sh0000644000000000000120000000352513176752421021132 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2015 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Test repairing of broken thin pool on raid SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux have_thin 1 0 0 || skip aux have_raid 1 4 0 || skip # # To continue this test - we need real tools available # When they are not present mark test as skipped, but still # let proceed initial part which should work even without tools # aux have_tool_at_least "$LVM_TEST_THIN_CHECK_CMD" 0 3 1 || skip aux have_tool_at_least "$LVM_TEST_THIN_DUMP_CMD" 0 3 1 || skip aux have_tool_at_least "$LVM_TEST_THIN_REPAIR_CMD" 0 3 1 || skip # # Main # aux prepare_vg 4 lvcreate --type raid1 -L1 -n pool $vg lvcreate --type raid1 -L2 -n meta $vg # raid _tdata & _tmeta lvconvert -y --thinpool $vg/pool --poolmetadata $vg/meta lvcreate -V1G $vg/pool # Pool has to be inactive (ATM) for repair fail lvconvert -y --repair $vg/pool "$dev3" lvchange -an $vg check lv_field $vg/pool_tmeta lv_role "private,thin,pool,metadata" lvconvert -y --repair $vg/pool "$dev3" lvs -a -o+devices,seg_pe_ranges,role,layout $vg check lv_field $vg/pool_meta0 lv_role "public" check lv_field $vg/pool_meta0 lv_layout "raid,raid1" check lv_field $vg/pool_tmeta lv_layout "linear" check lv_on $vg pool_tmeta "$dev1" # Hmm name is generated in order SPARE=$(lvs --noheadings -a --select "name=~_pmspare" -o name $vg) SPARE=${SPARE##*[} SPARE=${SPARE%%]*} check lv_on $vg $SPARE "$dev3" lvchange -ay $vg vgremove -ff $vg LVM2.2.02.176/test/shell/stray-device-node.sh0000644000000000000120000000153113176752421017270 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_devs 3 get_devs cp -r "$dev1" "$DM_DEV_DIR/stray" vgcreate "$vg" "${DEVICES[@]}" lvcreate -an -Zn --type mirror -m 1 -l 1 -n mirror $vg aux disable_dev "$dev1" # FIXME: # for the .cache use case we need to run pvscan # to keep clvmd in sync. pvscan vgreduce --removemissing --force $vg aux enable_dev "$dev1" LVM2.2.02.176/test/shell/lvextend-thin-full.sh0000644000000000000120000000326713176752421017507 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2015 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # play with thin-pool resize in corner cases # SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 export LVM_TEST_THIN_REPAIR_CMD=${LVM_TEST_THIN_REPAIR_CMD-/bin/false} . lib/inittest aux have_thin 1 0 0 || skip test -n "$LVM_TEST_THIN_RESTORE_CMD" || LVM_TEST_THIN_RESTORE_CMD=$(which thin_restore) || skip "$LVM_TEST_THIN_RESTORE_CMD" -V || skip aux have_thin 1 10 0 || skip aux prepare_vg 3 256 aux lvmconf 'activation/thin_pool_autoextend_percent = 30' \ 'activation/thin_pool_autoextend_threshold = 70' aux prepare_thin_metadata 400 0 | tee data lvcreate -L200 -T $vg/pool lvchange -an $vg lvcreate -L2M -n $lv1 $vg "$LVM_TEST_THIN_RESTORE_CMD" -i data -o "$DM_DEV_DIR/mapper/$vg-$lv1" lvconvert -y --thinpool $vg/pool --poolmetadata $vg/$lv1 # Cannot resize if set to 0% not lvextend --use-policies --config 'activation{thin_pool_autoextend_percent = 0}' $vg/pool 2>&1 | tee err grep "0%" err # Locally active LV is needed not lvextend --use-policies $vg/pool 2>&1 | tee err grep "locally" err lvchange -ay $vg # Creation of new LV is not allowed when thinpool is over threshold not lvcreate -V10 $vg/pool lvextend --use-policies $vg/pool "$dev2" "$dev3" #should lvextend -l+100%FREE $vg/pool2 lvs -a $vg vgremove -ff $vg LVM2.2.02.176/test/shell/lv-ancestry.sh0000644000000000000120000001631313176752421016221 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMPOLLD=1 . lib/inittest aux have_thin 1 0 0 || skip aux prepare_pvs 1 16 get_devs aux lvmconf "metadata/record_lvs_history=1" vgcreate -s 64K "$vg" "${DEVICES[@]}" lvcreate -l100%FREE -T ${vg}/pool # Thin snap chain with 2 branches starting at lv3. # # lv1 --> lv2 --> lv3 --> lv4 --> lv5 # \ # --> lv6 --> lv7 lvcreate -V1 -T ${vg}/pool -n lv1 lvcreate -s ${vg}/lv1 -n lv2 lvcreate -s ${vg}/lv2 -n lv3 lvcreate -s ${vg}/lv3 -n lv4 lvcreate -s ${vg}/lv4 -n lv5 lvcreate -s ${vg}/lv3 -n lv6 lvcreate -s ${vg}/lv6 -n lv7 check lvh_field ${vg}/lv1 full_ancestors "" check lvh_field ${vg}/lv1 full_descendants "lv2,lv3,lv4,lv5,lv6,lv7" check lvh_field ${vg}/lv2 full_ancestors "lv1" check lvh_field ${vg}/lv2 full_descendants "lv3,lv4,lv5,lv6,lv7" check lvh_field ${vg}/lv3 full_ancestors "lv2,lv1" check lvh_field ${vg}/lv3 full_descendants "lv4,lv5,lv6,lv7" check lvh_field ${vg}/lv4 full_ancestors "lv3,lv2,lv1" check lvh_field ${vg}/lv4 full_descendants "lv5" check lvh_field ${vg}/lv5 full_ancestors "lv4,lv3,lv2,lv1" check lvh_field ${vg}/lv5 full_descendants "" check lvh_field ${vg}/lv6 full_ancestors "lv3,lv2,lv1" check lvh_field ${vg}/lv6 full_descendants "lv7" check lvh_field ${vg}/lv7 full_ancestors "lv6,lv3,lv2,lv1" check lvh_field ${vg}/lv7 full_descendants "" # lv1 --> lv2 --> lv3 --> -lv4 --> lv5 # \ # --> lv6 --> lv7 lvremove -ff ${vg}/lv4 check lvh_field ${vg}/lv1 full_ancestors "" check lvh_field ${vg}/lv1 full_descendants "lv2,lv3,lv6,lv7,-lv4,lv5" check lvh_field ${vg}/lv2 full_ancestors "lv1" check lvh_field ${vg}/lv2 full_descendants "lv3,lv6,lv7,-lv4,lv5" check lvh_field ${vg}/lv3 full_ancestors "lv2,lv1" check lvh_field ${vg}/lv3 full_descendants "lv6,lv7,-lv4,lv5" check lvh_field ${vg}/-lv4 full_ancestors "lv3,lv2,lv1" check lvh_field ${vg}/-lv4 full_descendants "lv5" check lvh_field ${vg}/lv5 full_ancestors "-lv4,lv3,lv2,lv1" check lvh_field ${vg}/lv5 full_descendants "" check lvh_field ${vg}/lv6 full_ancestors "lv3,lv2,lv1" check lvh_field ${vg}/lv6 full_descendants "lv7" check lvh_field ${vg}/lv7 full_ancestors "lv6,lv3,lv2,lv1" check lvh_field ${vg}/lv7 full_descendants "" # lv1 --> lv2 --> -lv3 --> -lv4 --> lv5 # \ # --> lv6 --> lv7 lvremove -ff ${vg}/lv3 check lvh_field ${vg}/lv1 full_ancestors "" check lvh_field ${vg}/lv1 full_descendants "lv2,-lv3,-lv4,lv5,lv6,lv7" check lvh_field ${vg}/lv2 full_ancestors "lv1" check lvh_field ${vg}/lv2 full_descendants "-lv3,-lv4,lv5,lv6,lv7" check lvh_field ${vg}/-lv3 full_ancestors "lv2,lv1" check lvh_field ${vg}/-lv3 full_descendants "-lv4,lv5,lv6,lv7" check lvh_field ${vg}/-lv4 full_ancestors "-lv3,lv2,lv1" check lvh_field ${vg}/-lv4 full_descendants "lv5" check lvh_field ${vg}/lv5 full_ancestors "-lv4,-lv3,lv2,lv1" check lvh_field ${vg}/lv5 full_descendants "" check lvh_field ${vg}/lv6 full_ancestors "-lv3,lv2,lv1" check lvh_field ${vg}/lv6 full_descendants "lv7" check lvh_field ${vg}/lv7 full_ancestors "lv6,-lv3,lv2,lv1" check lvh_field ${vg}/lv7 full_descendants "" # lv1 --> -lv2 --> -lv3 --> -lv4 --> lv5 # \ # --> lv6 --> lv7 lvremove -ff $vg/lv2 check lvh_field ${vg}/lv1 full_ancestors "" check lvh_field ${vg}/lv1 full_descendants "-lv2,-lv3,-lv4,lv5,lv6,lv7" check lvh_field ${vg}/-lv2 full_ancestors "lv1" check lvh_field ${vg}/-lv2 full_descendants "-lv3,-lv4,lv5,lv6,lv7" check lvh_field ${vg}/-lv3 full_ancestors "-lv2,lv1" check lvh_field ${vg}/-lv3 full_descendants "-lv4,lv5,lv6,lv7" check lvh_field ${vg}/-lv4 full_ancestors "-lv3,-lv2,lv1" check lvh_field ${vg}/-lv4 full_descendants "lv5" check lvh_field ${vg}/lv5 full_ancestors "-lv4,-lv3,-lv2,lv1" check lvh_field ${vg}/lv5 full_descendants "" check lvh_field ${vg}/lv6 full_ancestors "-lv3,-lv2,lv1" check lvh_field ${vg}/lv6 full_descendants "lv7" check lvh_field ${vg}/lv7 full_ancestors "lv6,-lv3,-lv2,lv1" check lvh_field ${vg}/lv7 full_descendants "" # lv1 --> -lv2 --> -lv3 --> -lv4 --> lv5 # \ # --> -lv6 --> lv7 lvremove -ff ${vg}/lv6 check lvh_field ${vg}/lv1 full_ancestors "" check lvh_field ${vg}/lv1 full_descendants "-lv2,-lv3,-lv4,lv5,-lv6,lv7" check lvh_field ${vg}/-lv2 full_ancestors "lv1" check lvh_field ${vg}/-lv2 full_descendants "-lv3,-lv4,lv5,-lv6,lv7" check lvh_field ${vg}/-lv3 full_ancestors "-lv2,lv1" check lvh_field ${vg}/-lv3 full_descendants "-lv4,lv5,-lv6,lv7" check lvh_field ${vg}/-lv4 full_ancestors "-lv3,-lv2,lv1" check lvh_field ${vg}/-lv4 full_descendants "lv5" check lvh_field ${vg}/lv5 full_ancestors "-lv4,-lv3,-lv2,lv1" check lvh_field ${vg}/lv5 full_descendants "" check lvh_field ${vg}/-lv6 full_ancestors "-lv3,-lv2,lv1" check lvh_field ${vg}/-lv6 full_descendants "lv7" check lvh_field ${vg}/lv7 full_ancestors "-lv6,-lv3,-lv2,lv1" check lvh_field ${vg}/lv7 full_descendants "" # lv1 --> -lv2 -----------> -lv4 --> lv5 # \ # --> -lv6 --> lv7 lvremove -ff ${vg}/-lv3 check lvh_field ${vg}/lv1 full_ancestors "" check lvh_field ${vg}/lv1 full_descendants "-lv2,-lv4,lv5,-lv6,lv7" check lvh_field ${vg}/-lv2 full_ancestors "lv1" check lvh_field ${vg}/-lv2 full_descendants "-lv4,lv5,-lv6,lv7" check lvh_field ${vg}/-lv4 full_ancestors "-lv2,lv1" check lvh_field ${vg}/-lv4 full_descendants "lv5" check lvh_field ${vg}/lv5 full_ancestors "-lv4,-lv2,lv1" check lvh_field ${vg}/lv5 full_descendants "" check lvh_field ${vg}/-lv6 full_ancestors "-lv2,lv1" check lvh_field ${vg}/-lv6 full_descendants "lv7" check lvh_field ${vg}/lv7 full_ancestors "-lv6,-lv2,lv1" check lvh_field ${vg}/lv7 full_descendants "" # -lv2 -----------> -lv4 --> lv5 # \ # --> -lv6 --> lv7 lvremove --nohistory -ff ${vg}/lv1 check lvh_field ${vg}/-lv2 full_ancestors "" check lvh_field ${vg}/-lv2 full_descendants "-lv4,lv5,-lv6,lv7" check lvh_field ${vg}/-lv4 full_ancestors "-lv2" check lvh_field ${vg}/-lv4 full_descendants "lv5" check lvh_field ${vg}/lv5 full_ancestors "-lv4,-lv2" check lvh_field ${vg}/lv5 full_descendants "" check lvh_field ${vg}/-lv6 full_ancestors "-lv2" check lvh_field ${vg}/-lv6 full_descendants "lv7" check lvh_field ${vg}/lv7 full_ancestors "-lv6,-lv2" check lvh_field ${vg}/lv7 full_descendants "" # -lv2 -----------> -lv4 --> lv5 # # lv7 lvremove --nohistory -ff ${vg}/-lv6 check lvh_field ${vg}/-lv2 full_ancestors "" check lvh_field ${vg}/-lv2 full_descendants "-lv4,lv5" check lvh_field ${vg}/-lv4 full_ancestors "-lv2" check lvh_field ${vg}/-lv4 full_descendants "lv5" check lvh_field ${vg}/lv5 full_ancestors "-lv4,-lv2" check lvh_field ${vg}/lv5 full_descendants "" check lvh_field ${vg}/lv7 full_ancestors "" check lvh_field ${vg}/lv7 full_descendants "" vgremove -ff $vg LVM2.2.02.176/test/shell/lvmetad-pvscan-nomda-bg.sh0000644000000000000120000000272713176752421020364 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2015 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITHOUT_LVMETAD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest kill "$(< LOCAL_LVMETAD)" rm LOCAL_LVMETAD aux prepare_devs 2 pvcreate --metadatacopies 0 "$dev1" pvcreate --metadatacopies 1 "$dev2" vgcreate $vg1 "$dev1" "$dev2" lvcreate -n foo -l 1 -an --zero n $vg1 # start lvmetad but make sure it doesn't know about $dev1 or $dev2 aux disable_dev "$dev1" "$dev2" aux prepare_lvmetad lvs mv LOCAL_LVMETAD XXX aux enable_dev "$dev2" "$dev1" mv XXX LOCAL_LVMETAD aux lvmconf 'global/use_lvmetad = 0' check inactive $vg1 foo aux lvmconf 'global/use_lvmetad = 1' pvscan --cache --background "$dev2" -aay aux lvmconf 'global/use_lvmetad = 0' # FIXME: inconclusive. may be a timing issue check inactive $vg1 foo aux lvmconf 'global/use_lvmetad = 1' pvscan --cache --background "$dev1" -aay aux lvmconf 'global/use_lvmetad = 0' i=100 while ! check active $vg1 foo; do test $i -lt 0 && fail "Failed to autoactivate" sleep .1 i=$((i-1)) done aux lvmconf 'global/use_lvmetad = 1' vgremove -ff $vg1 LVM2.2.02.176/test/shell/pv-check-dev-size.sh0000644000000000000120000000223713176752421017176 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_pvs 1 8 aux lvmconf 'metadata/check_pv_device_sizes = 1' CHECK_MSG="smaller than corresponding PV size" vgcreate "$vg" "$dev1" 2>err not grep "$CHECK_MSG" err pvs 2>err not grep "$CHECK_MSG" err vgremove -ff $vg # set PV size to 2x dev size pvcreate --yes --setphysicalvolumesize 16m "$dev1" vgcreate "$vg" "$dev1" 2>err grep "$CHECK_MSG" err pvs 2>err grep "$CHECK_MSG" err vgremove -ff $vg # should be quiet if requested aux lvmconf 'metadata/check_pv_device_sizes = 0' pvcreate --yes --setphysicalvolumesize 16m "$dev1" vgcreate "$vg" "$dev1" 2>err not grep "$CHECK_MSG" err pvs 2>err not grep "$CHECK_MSG" err vgremove -ff $vg LVM2.2.02.176/test/shell/lvconvert-repair.sh0000644000000000000120000000623413176752421017255 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008-2013 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 . lib/inittest recreate_vg_() { vgremove -ff $vg vgcreate "$vg" "$@" "${DEVICES[@]}" } _check_mlog() { lvs -a -o +devices $vg | tee out not grep unknown out not grep mlog out dmsetup ls | grep $PREFIX | tee out not grep mlog out } aux lvmconf "allocation/maximise_cling = 0" \ "allocation/mirror_logs_require_separate_pvs = 1" # fail multiple devices # 4-way, disk log => 2-way, disk log aux prepare_vg 8 get_devs lvcreate -aey --type mirror -m 3 --ignoremonitoring -L 1 -n 4way $vg "$dev1" "$dev2" "$dev3" "$dev4" "$dev5":0 aux disable_dev "$dev2" "$dev4" echo n | lvconvert --repair $vg/4way 2>&1 | tee 4way.out lvs -a -o +devices $vg | not grep unknown vgreduce --removemissing $vg aux enable_dev "$dev2" "$dev4" check mirror $vg 4way "$dev5" # 3-way, disk log => linear recreate_vg_ lvcreate -aey --type mirror -m 2 --ignoremonitoring -L 1 -n 3way $vg aux disable_dev "$dev1" "$dev2" echo n | lvconvert --repair $vg/3way check linear $vg 3way _check_mlog vgreduce --removemissing $vg aux enable_dev "$dev1" "$dev2" check linear $vg 3way # fail just log and get it removed # 3-way, disk log => 3-way, core log recreate_vg_ lvcreate -aey --type mirror -m 2 --ignoremonitoring -L 1 -n 3way $vg "$dev1" "$dev2" "$dev3" "$dev4":0 aux disable_dev "$dev4" echo n | lvconvert --repair $vg/3way check mirror $vg 3way core _check_mlog vgreduce --removemissing $vg aux enable_dev "$dev4" # 3-way, mirrored log => 3-way, core log recreate_vg_ -c n lvcreate -aey --type mirror -m 2 --mirrorlog mirrored --ignoremonitoring -L 1 -n 3way $vg \ "$dev1" "$dev2" "$dev3" "$dev4":0 "$dev5":0 aux disable_dev "$dev4" "$dev5" echo n | lvconvert --repair $vg/3way check mirror $vg 3way core _check_mlog vgreduce --removemissing $vg aux enable_dev "$dev4" "$dev5" # 2-way, disk log => 2-way, core log recreate_vg_ lvcreate -aey --type mirror -m 1 --ignoremonitoring -L 1 -n 2way $vg "$dev1" "$dev2" "$dev3":0 aux disable_dev "$dev3" echo n | lvconvert --repair $vg/2way check mirror $vg 2way core _check_mlog vgreduce --removemissing $vg aux enable_dev "$dev3" # fail single devices recreate_vg_ vgreduce $vg "$dev4" lvcreate -aey --type mirror -m 1 --ignoremonitoring -L 1 -n mirror $vg lvchange -a n $vg/mirror vgextend $vg "$dev4" aux disable_dev "$dev1" lvchange --partial -aey $vg/mirror not vgreduce -v --removemissing $vg lvconvert -y --repair $vg/mirror vgreduce --removemissing $vg aux enable_dev "$dev1" vgextend $vg "$dev1" aux disable_dev "$dev2" lvconvert -y --repair $vg/mirror vgreduce --removemissing $vg aux enable_dev "$dev2" vgextend $vg "$dev2" aux disable_dev "$dev3" lvconvert -y --repair $vg/mirror vgreduce --removemissing $vg aux enable_dev "$dev3" vgextend $vg "$dev3" vgremove -ff $vg LVM2.2.02.176/test/shell/lvcreate-operation.sh0000644000000000000120000000267513176752421017563 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # 'Exercise some lvcreate diagnostics' SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest cleanup_lvs() { lvremove -ff $vg (dm_table | not grep $vg) || \ die "ERROR: lvremove did leave some some mappings in DM behind!" } aux prepare_pvs 2 get_devs aux pvcreate --metadatacopies 0 "$dev1" aux vgcreate "$vg" "${DEVICES[@]}" # --- # Create snapshots of LVs on --metadatacopies 0 PV (bz450651) lvcreate -aey -n$lv1 -l4 $vg "$dev1" lvcreate -n$lv2 -l4 -s $vg/$lv1 lvcreate -n$lv3 -l4 --permission r -s $vg/$lv1 cleanup_lvs # Skip the rest for cluster test -e LOCAL_CLVMD && exit 0 # --- # Create mirror on two devices with mirrored log using --alloc anywhere lvcreate --type mirror -m 1 -l4 -n $lv1 --mirrorlog mirrored $vg --alloc anywhere "$dev1" "$dev2" cleanup_lvs # -- # Create mirror on one dev with mirrored log using --alloc anywhere, should fail not lvcreate --type mirror -m 1 -l4 -n $lv1 --mirrorlog mirrored $vg --alloc anywhere "$dev1" cleanup_lvs vgremove -ff $vg LVM2.2.02.176/test/shell/vgextend-usage.sh0000644000000000000120000000750113176752421016677 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # Exercise various vgextend commands # SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_devs 5 if test -n "$LVM_TEST_LVM1" ; then mdatypes='1 2' else mdatypes='2' fi for mdatype in $mdatypes do # Explicit pvcreate pvcreate -M$mdatype "$dev1" "$dev2" "$dev3" "$dev4" "$dev5" vgcreate -M$mdatype $vg1 "$dev1" "$dev2" vgextend $vg1 "$dev3" "$dev4" "$dev5" vgremove -ff $vg1 # Implicit pvcreate pvremove "$dev1" "$dev2" "$dev3" "$dev4" "$dev5" vgcreate -M$mdatype $vg1 "$dev1" "$dev2" vgextend -M$mdatype $vg1 "$dev3" "$dev4" "$dev5" vgremove -ff $vg1 pvremove "$dev1" "$dev2" "$dev3" "$dev4" "$dev5" done # Implicit pvcreate tests, test pvcreate options on vgcreate # --force, --yes, --metadata{size|copies|type}, --zero # --dataalignment[offset] vgcreate $vg "$dev2" vgextend --force --yes --zero y $vg "$dev1" vgreduce $vg "$dev1" pvremove -f "$dev1" for i in 0 1 2 3 do # vgcreate (lvm2) succeeds writing LVM label at sector $i vgextend --labelsector $i $vg "$dev1" dd if="$dev1" bs=512 skip=$i count=1 2>/dev/null | strings | grep LABELONE >/dev/null vgreduce $vg "$dev1" pvremove -f "$dev1" done # pvmetadatacopies for i in 0 1 2 do vgextend --pvmetadatacopies $i $vg "$dev1" check pv_field "$dev1" pv_mda_count $i vgreduce $vg "$dev1" pvremove -f "$dev1" done # metadatasize, dataalignment, dataalignmentoffset #COMM 'pvcreate sets data offset next to mda area' vgextend --metadatasize 100k --dataalignment 100k $vg "$dev1" check pv_field "$dev1" pe_start 200.00k vgreduce $vg "$dev1" pvremove -f "$dev1" # data area is aligned to 1M by default, # data area start is shifted by the specified alignment_offset pv_align=1052160B # 1048576 + (7*512) vgextend --metadatasize 128k --dataalignmentoffset 7s $vg "$dev1" check pv_field "$dev1" pe_start $pv_align --units b vgremove -f $vg pvremove -f "$dev1" # vgextend fails if pv belongs to existing vg vgcreate $vg1 "$dev1" "$dev3" vgcreate $vg2 "$dev2" not vgextend $vg2 "$dev3" vgremove -f $vg1 vgremove -f $vg2 pvremove -f "$dev1" "$dev2" "$dev3" #vgextend fails if vg is not resizeable vgcreate $vg1 "$dev1" "$dev2" vgchange --resizeable n $vg1 not vgextend $vg1 "$dev3" vgremove -f $vg1 pvremove -f "$dev1" "$dev2" # all PVs exist in the VG after extended pvcreate "$dev1" vgcreate $vg1 "$dev2" vgextend $vg1 "$dev1" "$dev3" check pv_field "$dev1" vg_name $vg1 check pv_field "$dev2" vg_name $vg1 check pv_field "$dev3" vg_name $vg1 vgremove -f $vg1 pvremove -f "$dev1" "$dev2" "$dev3" echo test vgextend --metadataignore for mdacp in 1 2; do for ignore in y n; do echo vgextend --metadataignore has proper mda_count and mda_used_count vgcreate $vg "$dev3" vgextend --metadataignore $ignore --pvmetadatacopies $mdacp $vg "$dev1" "$dev2" check pv_field "$dev1" pv_mda_count $mdacp check pv_field "$dev2" pv_mda_count $mdacp if [ $ignore = y ]; then check pv_field "$dev1" pv_mda_used_count 0 check pv_field "$dev2" pv_mda_used_count 0 else check pv_field "$dev1" pv_mda_used_count $mdacp check pv_field "$dev2" pv_mda_used_count $mdacp fi echo vg has proper vg_mda_count and vg_mda_used_count check vg_field $vg vg_mda_count $(( mdacp * 2 + 1 )) if [ $ignore = y ]; then check vg_field $vg vg_mda_used_count 1 else check vg_field $vg vg_mda_used_count $(( mdacp * 2 + 1 )) fi check vg_field $vg vg_mda_copies unmanaged vgremove $vg pvremove -ff "$dev1" "$dev2" "$dev3" done done LVM2.2.02.176/test/shell/lvcreate-striped-mirror.sh0000644000000000000120000000414113176752421020533 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2010 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_vg 9 lvcreate -aey --nosync -i2 -l2 --type mirror -m1 --mirrorlog core -n $lv1 $vg 2>&1 | tee log not grep "Rounding" log check mirror_images_redundant $vg $lv1 lvcreate -aey --nosync -i2 -l4 --type mirror -m1 --mirrorlog core -n $lv2 $vg 2>&1 | tee log not grep "Rounding" log check mirror_images_redundant $vg $lv2 lvcreate -aey --nosync -i3 -l3 --type mirror -m1 --mirrorlog core -n $lv3 $vg 2>&1 | tee log not grep "Rounding" log check mirror_images_redundant $vg $lv3 lvcreate -aey --nosync -i4 -l4 --type mirror -m1 --mirrorlog core -n $lv4 $vg 2>&1 | tee log not grep "Rounding" log check mirror_images_redundant $vg $lv4 lvcreate -aey --nosync -i2 -l2 --type mirror -m2 --mirrorlog core -n $lv5 $vg 2>&1 | tee log not grep "Rounding" log check mirror_images_redundant $vg $lv5 lvcreate -aey --nosync -i3 -l3 --type mirror -m2 --mirrorlog core -n $lv6 $vg 2>&1 | tee log not grep "Rounding" log check mirror_images_redundant $vg $lv6 lvcreate -aey --nosync -i2 -l2 --type mirror -m3 --mirrorlog core -n $lv7 $vg 2>&1 | tee log not grep "Rounding" log check mirror_images_redundant $vg $lv7 lvremove -ff $vg lvcreate -aey --nosync -i3 -l4 --type mirror -m1 --mirrorlog core -n $lv1 $vg 2>&1 | tee log grep "Rounding size .*(4 extents) up to .*(6 extents)" log lvcreate -aey --nosync -i3 -l4 --type mirror -m2 --mirrorlog core -n $lv2 $vg 2>&1 | tee log grep "Rounding size .*(4 extents) up to .*(6 extents)" log lvcreate -aey --nosync -i3 -l2 --type mirror -m2 --mirrorlog core -n $lv3 $vg 2>&1 | tee log grep "Rounding size .*(2 extents) up to .*(3 extents)" log lvremove -ff $vg LVM2.2.02.176/test/shell/mirror-names.sh0000644000000000000120000001016713176752421016366 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2007-2017 Red Hat, Inc. All rights reserved. # Copyright (C) 2007-2008 NEC Corporation # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA test_description="check namings of mirrored LV" SKIP_WITH_LVMLOCKD=1 . lib/inittest # --------------------------------------------------------------------- # Utilities lv_devices_() { local d local i local lv=$1 shift local devices=( "$@" ) local devs devs=$(get lv_devices "$lv") for d in $devs; do (echo "${devices[@]}" | grep "$d") || return 1 for i in "${!devices[@]}"; do if [ "${devices[i]}" = "$d" ] ; then unset "devices[i]" fi done done test "${#devices[@]}" -eq 0 || die "Left devices " "${devices[@]}" } lv_mirror_log_() { get lv_field "$1" mirror_log | tr -d [] } lv_convert_lv_() { get lv_field "$1" convert_lv | tr -d [] } enable_devs() { for i in "$dev1" "$dev2" "$dev3" "$dev4" "$dev5" ; do aux enable_dev "$i" done } delay_devs() { for i in "$dev1" "$dev2" "$dev3" "$dev4" "$dev5" ; do aux delay_dev "$i" 0 1000 "$(get first_extent_sector "$i"):" done } # --------------------------------------------------------------------- # Common environment setup/cleanup for each sub testcases check_and_cleanup_lvs_() { lvs -a -o+devices $vg lvremove -ff $vg (dm_table | not grep $vg) || \ die "ERROR: lvremove did leave some some mappings in DM behind!" } # --------------------------------------------------------------------- # Initialize PVs and VGs aux prepare_vg 5 80 check_and_cleanup_lvs_ # --------------------------------------------------------------------- # basic #COMM "init: lvcreate" #COMM "mirror images are ${lv1}_mimage_x" lvcreate -an -Zn -l2 --type mirror -m1 -n $lv1 $vg lv_devices_ $vg/$lv1 ${lv1}_mimage_0 ${lv1}_mimage_1 #COMM "mirror log is ${lv1}_mlog" test "$(lv_mirror_log_ $vg/$lv1)" = "${lv1}_mlog" # "cleanup" check_and_cleanup_lvs_ #COMM "mirror with name longer than 22 characters (bz221322)" name="LVwithanamelogerthan22characters_butidontwonttocounthem" lvcreate -an -Zn --type mirror -m1 -l2 -n $name $vg lvs $vg/$name check_and_cleanup_lvs_ # --------------------------------------------------------------------- # lvrename #COMM "init: lvrename" #COMM "renamed mirror names: $lv1 to $lv2" lvcreate -an -Zn -l2 --type mirror -m1 -n $lv1 $vg lvrename $vg/$lv1 $vg/$lv2 lv_devices_ $vg/$lv2 ${lv2}_mimage_0 ${lv2}_mimage_1 lv_mirror_log_ $vg/$lv2 ${lv2}_mlog #COMM "cleanup" check_and_cleanup_lvs_ # --------------------------------------------------------------------- # lvconvert #COMM "init: lvconvert" #COMM "converting mirror names is ${lv1}_mimagetmp_2" lvcreate -aey -l2 --type mirror -m1 -n $lv1 $vg delay_devs LVM_TEST_TAG="kill_me_$PREFIX" lvconvert -m+1 -i+40 -b $vg/$lv1 convlv=$(lv_convert_lv_ $vg/$lv1) test "$convlv" = "${lv1}_mimagetmp_2" lv_devices_ $vg/$lv1 $convlv ${lv1}_mimage_2 lv_devices_ $vg/$convlv ${lv1}_mimage_0 ${lv1}_mimage_1 lv_mirror_log_ $vg/$convlv ${lv1}_mlog enable_devs #COMM "mirror log name after re-adding is ${lv1}_mlog" lvconvert -f --mirrorlog core $vg/$lv1 lvconvert --mirrorlog disk $vg/$lv1 convlv=$(lv_convert_lv_ $vg/$lv1) lv_devices_ $vg/$lv1 $convlv ${lv1}_mimage_2 lv_devices_ $vg/$convlv ${lv1}_mimage_0 ${lv1}_mimage_1 lv_mirror_log_ $vg/$convlv ${lv1}_mlog #COMM "renamed converting mirror names: $lv1 to $lv2" lvrename $vg/$lv1 $vg/$lv2 convlv=$(lv_convert_lv_ $vg/$lv2) lv_devices_ $vg/$lv2 $convlv ${lv2}_mimage_2 lv_devices_ $vg/$convlv ${lv2}_mimage_0 ${lv2}_mimage_1 lv_mirror_log_ $vg/$convlv ${lv2}_mlog #COMM "cleanup" check_and_cleanup_lvs_ # Temporary mirror log should have "_mlogtmp_" suffix # but currently lvconvert doesn't have an option to add the log. # If such feature is added in future, a test for that should # be added. # --------------------------------------------------------------------- LVM2.2.02.176/test/shell/lvconvert-repair-raid-dmeventd.sh0000644000000000000120000000207213176752421021772 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest which mkfs.ext3 || skip aux have_raid 1 3 0 || skip aux lvmconf \ 'activation/raid_fault_policy = "allocate"' aux prepare_dmeventd aux prepare_vg 5 lvcreate -aey --type raid1 -m 3 --ignoremonitoring -L 1 -n 4way $vg lvchange --monitor y $vg/4way lvs -a -o all,lv_modules $vg lvdisplay --maps $vg aux wait_for_sync $vg 4way aux disable_dev "$dev2" "$dev4" mkfs.ext3 "$DM_DEV_DIR/$vg/4way" sleep 5 # FIXME: need a "poll" utility, akin to "check" aux enable_dev "$dev2" "$dev4" dmsetup table dmsetup status dmsetup info -c vgremove -vvvv -ff $vg LVM2.2.02.176/test/shell/read-ahead.sh0000644000000000000120000000314513176752421015724 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # tests basic functionality of read-ahead and ra regressions # test_description='Test read-ahead functionality' SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_vg 5 #COMM "test various read ahead settings (bz450922)" lvcreate -l 100%FREE -i5 -I512 -n $lv $vg ra=$(get lv_field $vg/$lv lv_kernel_read_ahead --units s --nosuffix) test $(( ( ra / 5 ) * 5 )) -le $ra not lvchange -r auto $vg/$lv 2>&1 | grep auto check lv_field $vg/$lv lv_read_ahead auto lvchange -r 640 $vg/$lv check lv_field $vg/$lv lv_read_ahead 640 --units s --nosuffix lvremove -ff $vg #COMM "read ahead is properly inherited from underlying PV" blockdev --setra 768 "$dev1" vgscan lvcreate -n $lv -L4m $vg "$dev1" test "$(blockdev --getra "$DM_DEV_DIR/$vg/$lv")" -eq 768 lvremove -ff $vg # Check default, active/inactive values for read_ahead / kernel_read_ahead lvcreate -n $lv -l 50%FREE $vg lvchange -an $vg/$lv check lv_field $vg/$lv lv_read_ahead auto check lv_field $vg/$lv lv_kernel_read_ahead -1 lvchange -r 512 $vg/$lv lvchange -ay $vg/$lv check lv_field $vg/$lv lv_read_ahead 256.00k check lv_field $vg/$lv lv_kernel_read_ahead 256.00k vgremove -ff $vg LVM2.2.02.176/test/shell/process-each-vgreduce.sh0000644000000000000120000002057313176752421020133 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2014 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA test_description='Exercise toollib process_each_pv with vgreduce' SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_devs 14 # # set up # # FIXME: some of the setup may not be used by the tests # since this was split out from process-each-pv, where # some of the setup was used by other tests that only # remain in process-each-pv. # # use use dev10 instead of dev1 because simple grep for # dev1 matchines dev10,dev11,etc # vgcreate $vg1 "$dev10" vgcreate $vg2 "$dev2" "$dev3" "$dev4" "$dev5" vgcreate $vg3 "$dev6" "$dev7" "$dev8" "$dev9" pvchange --addtag V2D3 "$dev3" pvchange --addtag V2D4 "$dev4" pvchange --addtag V2D45 "$dev4" pvchange --addtag V2D5 "$dev5" pvchange --addtag V2D45 "$dev5" pvchange --addtag V3 "$dev6" "$dev7" "$dev8" "$dev9" pvchange --addtag V3D9 "$dev9" # orphan pvcreate "$dev11" # dev (a non-pv device) pvcreate "$dev12" pvremove "$dev12" # dev13 is intentionally untouched so we can # test that it is handled appropriately as a non-pv # orphan pvcreate "$dev14" # fail without dev not vgreduce $vg2 # fail with dev and -a not vgreduce $vg2 "$dev2" -a check pv_field "$dev2" vg_name $vg2 check pv_field "$dev3" vg_name $vg2 check pv_field "$dev4" vg_name $vg2 check pv_field "$dev5" vg_name $vg2 check pv_field "$dev6" vg_name $vg3 check pv_field "$dev7" vg_name $vg3 check pv_field "$dev8" vg_name $vg3 check pv_field "$dev9" vg_name $vg3 # remove one pv vgreduce $vg2 "$dev2" not check pv_field "$dev2" vg_name $vg2 check pv_field "$dev3" vg_name $vg2 check pv_field "$dev4" vg_name $vg2 check pv_field "$dev5" vg_name $vg2 check pv_field "$dev6" vg_name $vg3 check pv_field "$dev7" vg_name $vg3 check pv_field "$dev8" vg_name $vg3 check pv_field "$dev9" vg_name $vg3 # reset vgextend $vg2 "$dev2" # remove two pvs vgreduce $vg2 "$dev2" "$dev3" not check pv_field "$dev2" vg_name $vg2 not check pv_field "$dev3" vg_name $vg2 check pv_field "$dev4" vg_name $vg2 check pv_field "$dev5" vg_name $vg2 check pv_field "$dev6" vg_name $vg3 check pv_field "$dev7" vg_name $vg3 check pv_field "$dev8" vg_name $vg3 check pv_field "$dev9" vg_name $vg3 # reset vgextend $vg2 "$dev2" "$dev3" pvchange --addtag V2D3 "$dev3" # remove one pv with tag vgreduce $vg2 @V2D3 check pv_field "$dev2" vg_name $vg2 not check pv_field "$dev3" vg_name $vg2 check pv_field "$dev4" vg_name $vg2 check pv_field "$dev5" vg_name $vg2 check pv_field "$dev6" vg_name $vg3 check pv_field "$dev7" vg_name $vg3 check pv_field "$dev8" vg_name $vg3 check pv_field "$dev9" vg_name $vg3 # reset vgextend $vg2 "$dev3" pvchange --addtag V2D3 "$dev3" # remove two pvs, each with different tag vgreduce $vg2 @V2D3 @V2D4 check pv_field "$dev2" vg_name $vg2 not check pv_field "$dev3" vg_name $vg2 not check pv_field "$dev4" vg_name $vg2 check pv_field "$dev5" vg_name $vg2 check pv_field "$dev6" vg_name $vg3 check pv_field "$dev7" vg_name $vg3 check pv_field "$dev8" vg_name $vg3 check pv_field "$dev9" vg_name $vg3 # reset vgextend $vg2 "$dev3" "$dev4" pvchange --addtag V2D3 "$dev3" pvchange --addtag V2D4 "$dev4" pvchange --addtag V2D45 "$dev4" # remove two pvs, both with same tag vgreduce $vg2 @V2D45 check pv_field "$dev2" vg_name $vg2 check pv_field "$dev3" vg_name $vg2 not check pv_field "$dev4" vg_name $vg2 not check pv_field "$dev5" vg_name $vg2 check pv_field "$dev6" vg_name $vg3 check pv_field "$dev7" vg_name $vg3 check pv_field "$dev8" vg_name $vg3 check pv_field "$dev9" vg_name $vg3 # reset vgextend $vg2 "$dev4" "$dev5" pvchange --addtag V2D4 "$dev4" pvchange --addtag V2D45 "$dev4" pvchange --addtag V2D5 "$dev5" pvchange --addtag V2D45 "$dev5" # remove two pvs, one by name, one by tag vgreduce $vg2 "$dev2" @V2D3 not check pv_field "$dev2" vg_name $vg2 not check pv_field "$dev3" vg_name $vg2 check pv_field "$dev4" vg_name $vg2 check pv_field "$dev5" vg_name $vg2 check pv_field "$dev6" vg_name $vg3 check pv_field "$dev7" vg_name $vg3 check pv_field "$dev8" vg_name $vg3 check pv_field "$dev9" vg_name $vg3 # reset vgextend $vg2 "$dev2" "$dev3" pvchange --addtag V2D3 "$dev3" # remove one pv by tag, where another vg has a pv with same tag pvchange --addtag V2D5V3D9 "$dev5" pvchange --addtag V2D5V3D9 "$dev9" vgreduce $vg2 @V2D5V3D9 check pv_field "$dev2" vg_name $vg2 check pv_field "$dev3" vg_name $vg2 check pv_field "$dev4" vg_name $vg2 not check pv_field "$dev5" vg_name $vg2 check pv_field "$dev6" vg_name $vg3 check pv_field "$dev7" vg_name $vg3 check pv_field "$dev8" vg_name $vg3 check pv_field "$dev9" vg_name $vg3 # reset vgextend $vg2 "$dev5" pvchange --addtag V2D5 "$dev5" pvchange --addtag V2D45 "$dev5" # fail to remove last pv (don't know which will be last) not vgreduce -a $vg2 # reset vgremove $vg2 vgcreate $vg2 "$dev2" "$dev3" "$dev4" "$dev5" pvchange --addtag V2D3 "$dev3" pvchange --addtag V2D4 "$dev4" pvchange --addtag V2D45 "$dev4" pvchange --addtag V2D5 "$dev5" pvchange --addtag V2D45 "$dev5" # lvcreate on one pv to make it used # remove all unused pvs lvcreate -n $lv1 -l 2 $vg2 "$dev2" not vgreduce -a $vg2 check pv_field "$dev2" vg_name $vg2 not check pv_field "$dev3" vg_name $vg2 not check pv_field "$dev4" vg_name $vg2 not check pv_field "$dev5" vg_name $vg2 check pv_field "$dev6" vg_name $vg3 check pv_field "$dev7" vg_name $vg3 check pv_field "$dev8" vg_name $vg3 check pv_field "$dev9" vg_name $vg3 # reset vgextend $vg2 "$dev3" "$dev4" "$dev5" pvchange --addtag V2D3 "$dev3" pvchange --addtag V2D4 "$dev4" pvchange --addtag V2D45 "$dev4" pvchange --addtag V2D5 "$dev5" pvchange --addtag V2D45 "$dev5" lvchange -an $vg2/$lv1 lvremove $vg2/$lv1 # # tests including pvs without mdas # # remove old config vgremove $vg1 vgremove $vg2 vgremove $vg3 pvremove "$dev11" pvremove "$dev14" # new config with some pvs that have zero mdas # for vg1 pvcreate "$dev10" # for vg2 pvcreate "$dev2" --metadatacopies 0 pvcreate "$dev3" pvcreate "$dev4" pvcreate "$dev5" # for vg3 pvcreate "$dev6" --metadatacopies 0 pvcreate "$dev7" --metadatacopies 0 pvcreate "$dev8" --metadatacopies 0 pvcreate "$dev9" # orphan with mda pvcreate "$dev11" # orphan without mda pvcreate "$dev14" --metadatacopies 0 # non-pv devs # dev12 # dev13 vgcreate $vg1 "$dev10" vgcreate $vg2 "$dev2" "$dev3" "$dev4" "$dev5" vgcreate $vg3 "$dev6" "$dev7" "$dev8" "$dev9" pvchange --addtag V2D3 "$dev3" pvchange --addtag V2D4 "$dev4" pvchange --addtag V2D45 "$dev4" pvchange --addtag V2D5 "$dev5" pvchange --addtag V2D45 "$dev5" pvchange --addtag V3 "$dev6" "$dev7" "$dev8" "$dev9" pvchange --addtag V3D8 "$dev8" pvchange --addtag V3D9 "$dev9" # # vgreduce including pvs without mdas # # remove pv without mda vgreduce $vg2 "$dev2" not check pv_field "$dev2" vg_name $vg2 check pv_field "$dev3" vg_name $vg2 check pv_field "$dev4" vg_name $vg2 check pv_field "$dev5" vg_name $vg2 check pv_field "$dev6" vg_name $vg3 check pv_field "$dev7" vg_name $vg3 check pv_field "$dev8" vg_name $vg3 check pv_field "$dev9" vg_name $vg3 # reset vgextend $vg2 "$dev2" # remove pv with mda and pv without mda vgreduce $vg2 "$dev2" "$dev3" not check pv_field "$dev2" vg_name $vg2 not check pv_field "$dev3" vg_name $vg2 check pv_field "$dev4" vg_name $vg2 check pv_field "$dev5" vg_name $vg2 check pv_field "$dev6" vg_name $vg3 check pv_field "$dev7" vg_name $vg3 check pv_field "$dev8" vg_name $vg3 check pv_field "$dev9" vg_name $vg3 # reset vgextend $vg2 "$dev2" vgextend $vg2 "$dev3" # fail to remove only pv with mda not vgreduce $vg3 "$dev9" check pv_field "$dev6" vg_name $vg3 check pv_field "$dev7" vg_name $vg3 check pv_field "$dev8" vg_name $vg3 check pv_field "$dev9" vg_name $vg3 check pv_field "$dev2" vg_name $vg2 check pv_field "$dev3" vg_name $vg2 check pv_field "$dev4" vg_name $vg2 check pv_field "$dev5" vg_name $vg2 # remove by tag a pv without mda vgreduce $vg3 @V3D8 check pv_field "$dev6" vg_name $vg3 check pv_field "$dev7" vg_name $vg3 not check pv_field "$dev8" vg_name $vg3 check pv_field "$dev9" vg_name $vg3 check pv_field "$dev2" vg_name $vg2 check pv_field "$dev3" vg_name $vg2 check pv_field "$dev4" vg_name $vg2 check pv_field "$dev5" vg_name $vg2 # reset vgextend $vg3 "$dev8" vgremove $vg1 $vg2 $vg3 LVM2.2.02.176/test/shell/lvcreate-signature-wiping.sh0000644000000000000120000000475513176752421021060 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2013 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # 'Exercise signature wiping during lvcreate' SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest init_lv_() { mkswap "$DM_DEV_DIR/$vg/$lv1" } test_blkid_() { local type type=$(blkid -s TYPE -o value -c /dev/null "$DM_DEV_DIR/$vg/$lv1") test "$type" = "swap" } test_msg_() { grep "Wiping swap signature" out } aux prepare_vg # lvcreate wipes signatures when found on newly created LV - test this on "swap". # Test all combinatios with -Z{y|n} and -W{y|n} and related lvm.conf settings. lvcreate -l1 -n $lv1 $vg init_lv_ # This system has unusable blkid (does not recognize small swap, needs fix...) test_blkid_ || skip lvremove -f $vg/$lv1 aux lvmconf "allocation/wipe_signatures_when_zeroing_new_lvs = 0" lvcreate -y -Zn -l1 -n $lv1 $vg 2>&1 | tee out not test_msg_ test_blkid_ lvremove -f $vg/$lv1 lvcreate -y -Zn -Wn -l1 -n $lv1 $vg 2>&1 | tee out not test_msg_ test_blkid_ lvremove -f $vg/$lv1 lvcreate -y -Zn -Wy -l1 -n $lv1 $vg 2>&1 | tee out test_msg_ not test_blkid_ init_lv_ lvremove -f $vg/$lv1 lvcreate -y -Zy -l1 -n $lv1 $vg 2>&1 | tee out not test_msg_ not test_blkid_ init_lv_ lvremove -f $vg/$lv1 lvcreate -y -Zy -Wn -l1 -n $lv1 $vg 2>&1 | tee out not test_msg_ not test_blkid_ init_lv_ lvremove -f $vg/$lv1 lvcreate -y -Zy -Wy -l1 -n $lv1 $vg 2>&1 | tee out test_msg_ not test_blkid_ init_lv_ lvremove -f $vg/$lv1 aux lvmconf "allocation/wipe_signatures_when_zeroing_new_lvs = 1" lvcreate -y -Zn -l1 -n $lv1 $vg 2>&1 | tee out not test_msg_ test_blkid_ lvremove -f $vg/$lv1 lvcreate -y -Zn -Wn -l1 -n $lv1 $vg 2>&1 | tee out not test_msg_ test_blkid_ lvremove -f $vg/$lv1 lvcreate -y -Zn -Wy -l1 -n $lv1 $vg 2>&1 | tee out test_msg_ not test_blkid_ init_lv_ lvremove -f $vg/$lv1 lvcreate -y -Zy -l1 -n $lv1 $vg 2>&1 | tee out test_msg_ not test_blkid_ init_lv_ lvremove -f $vg/$lv1 lvcreate -y -Zy -Wn -l1 -n $lv1 $vg 2>&1 | tee out not test_msg_ not test_blkid_ init_lv_ lvremove -f $vg/$lv1 lvcreate -y -Zy -Wy -l1 -n $lv1 $vg 2>&1 | tee out test_msg_ not test_blkid_ init_lv_ lvremove -f $vg/$lv1 vgremove -f $vg LVM2.2.02.176/test/shell/select-tools-thin.sh0000644000000000000120000000267513176752421017335 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2015 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 export LVM_TEST_THIN_REPAIR_CMD=${LVM_TEST_THIN_REPAIR_CMD-/bin/false} . lib/inittest aux have_thin 1 0 0 || skip aux prepare_pvs 1 16 ######################### # special cases to test # ######################### # if calling lvremove and an LV is removed that is related to other LV # and we're doing selection based on this relation, check if we're # selecting on initial state (here, thin origin LV thin_orig is removed # first, but thin snap should be still selectable based on origin=thin_orig # condition even though thin_orig has just been removed) vgcreate -s 4m $vg1 "$dev1" lvcreate -l100%FREE -T $vg1/pool lvcreate -V4m -T $vg1/pool -n thin_orig lvcreate -s $vg1/thin_orig -n thin_snap lvremove -ff -S 'lv_name=thin_orig || origin=thin_orig' > out grep "Logical volume \"thin_orig\" successfully removed" out grep "Logical volume \"thin_snap\" successfully removed" out not lvs $vg1/thin_orig not lvs $vg1/thin_snap vgremove -ff $vg1 LVM2.2.02.176/test/shell/fsadm-crypt.sh0000644000000000000120000003262713176752421016211 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008-2017 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA test_description='Exercise fsadm filesystem resize on crypt devices' SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_vg 1 300 # set to "skip" to avoid testing given fs and test warning result # i.e. check_reiserfs=skip check_ext2= check_ext3= check_xfs= check_reiserfs= check_cryptsetup= DROP_SYMLINK= CRYPT_NAME="$PREFIX-tcrypt" CRYPT_DEV="$DM_DEV_DIR/mapper/$CRYPT_NAME" CRYPT_NAME2="$PREFIX-tcrypt2" CRYPT_DEV2="$DM_DEV_DIR/mapper/$CRYPT_NAME2" CRYPT_NAME_PLAIN="$PREFIX-tcryptp" CRYPT_DEV_PLAIN="$DM_DEV_DIR/mapper/$CRYPT_NAME_PLAIN" FORMAT_PARAMS="-i1" PWD1="93R4P4pIqAH8" PWD2="mymJeD8ivEhE" PWD3="ocMakf3fAcQO" SKIP_DETACHED= which cryptsetup || check_cryptsetup=${check_cryptsetup:-cryptsetup} which mkfs.ext2 || check_ext2=${check_ext2:-mkfs.ext2} which mkfs.ext3 || check_ext3=${check_ext3:-mkfs.ext3} which fsck.ext3 || check_ext3=${check_ext3:-fsck.ext3} which mkfs.xfs || check_xfs=${check_xfs:-mkfs.xfs} which xfs_check || { which xfs_repair || check_xfs=${check_xfs:-xfs_repair} } grep xfs /proc/filesystems || check_xfs=${check_xfs:-no_xfs} which mkfs.reiserfs || check_reiserfs=${check_reiserfs:-mkfs.reiserfs} which reiserfsck || check_reiserfs=${check_reiserfs:-reiserfsck} modprobe reiserfs || true grep reiserfs /proc/filesystems || check_reiserfs=${check_reiserfs:-no_reiserfs} vg_lv=$vg/$lv1 vg_lv2=$vg/${lv1}bar vg_lv3=$vg/${lv1}plain dev_vg_lv="$DM_DEV_DIR/$vg_lv" dev_vg_lv2="$DM_DEV_DIR/$vg_lv2" dev_vg_lv3="$DM_DEV_DIR/$vg_lv3" mount_dir="mnt" # for recursive call LVM_BINARY=$(which lvm) export LVM_BINARY test ! -d "$mount_dir" && mkdir "$mount_dir" crypt_close() { cryptsetup remove "$1" if [ "$?" -eq 0 -a -n "$DROP_SYMLINK" ]; then rm -f "$DM_DEV_DIR/mapper/$1" fi } cleanup_mounted_and_teardown() { umount "$mount_dir" || true crypt_close $CRYPT_NAME > /dev/null 2>&1 || true crypt_close $CRYPT_NAME2 > /dev/null 2>&1 || true crypt_close $CRYPT_NAME_PLAIN > /dev/null 2>&1 || true aux teardown } fscheck_ext3() { fsck.ext3 -p -F -f "$1" } fscheck_xfs() { if which xfs_repair ; then xfs_repair -n "$1" else xfs_check "$1" fi } fscheck_reiserfs() { reiserfsck --check -p -f "$1" /dev/null || skip check_missing cryptsetup || skip vgchange -s 128k lvcreate -n $lv1 -L25M $vg lvcreate -n ${lv1}bar -L35M $vg lvcreate -n ${lv1}plain -L35M $vg create_crypt_device trap 'cleanup_mounted_and_teardown' EXIT # $1 LVM backend (vg/lv name) # $2 LVM backend device (/dev/vg/lv) # $3 active dm-crypt device (/dev/mapper/some_name ) test_ext2_resize() { mkfs.ext2 -b4096 -j "$3" fsadm --lvresize resize $1 30M # Fails - not enough space for 4M fs not fsadm -y --lvresize resize "$2" 4M lvresize -L+10M -r $1 lvreduce -L10M -r $1 fscheck_ext3 "$3" mount "$3" "$mount_dir" not fsadm -y --lvresize resize $1 4M echo n | not lvresize -L4M -r -n $1 lvresize -L+20M -r -n $1 umount "$mount_dir" fscheck_ext3 "$3" } test_ext2_small_shrink() { mkfs.ext2 "$3" lvresize -L-1 -r $1 lvresize -L-1 -r $1 fscheck_ext3 "$3" } test_ext3_resize() { mkfs.ext3 -b4096 -j "$3" fsadm --lvresize resize $1 30M # Fails - not enough space for 4M fs not fsadm -y --lvresize resize "$2" 4M lvresize -L+10M -r $1 lvreduce -L10M -r $1 fscheck_ext3 "$3" mount "$3" "$mount_dir" lvresize -L+10M -r $1 not fsadm -y --lvresize resize $1 4M echo n | not lvresize -L4M -r -n $1 lvresize -L+20M -r -n $1 lvresize -L-10M -r -y $1 umount "$mount_dir" } test_ext3_small_shrink() { mkfs.ext3 "$3" lvresize -L-1 -r $1 lvresize -L-1 -r $1 fscheck_ext3 "$3" } test_xfs_resize() { mkfs.xfs -l internal,size=1000b -f "$3" fsadm --lvresize resize $1 30M # Fails - not enough space for 4M fs lvresize -L+10M -r $1 not lvreduce -L10M -r $1 fscheck_xfs "$3" mount "$3" "$mount_dir" lvresize -L+10M -r -n $1 umount "$mount_dir" fscheck_xfs "$3" } test_xfs_small_shrink() { mkfs.xfs -l internal,size=1000b -f "$3" not lvresize -L-1 -r $1 fscheck_xfs "$3" } test_reiserfs_resize() { mkfs.reiserfs -s 513 -f "$3" fsadm --lvresize resize $1 30M lvresize -L+10M -r $1 fsadm --lvresize -y resize $1 10M fscheck_reiserfs "$3" mount "$3" "$mount_dir" fsadm -y --lvresize resize $1 30M umount "$mount_dir" fscheck_reiserfs "$3" } test_reiserfs_small_shrink() { mkfs.reiserfs -s 513 -f "$3" lvresize -y -L-1 -r $1 lvresize -y -L-1 -r $1 fscheck_reiserfs "$3" } # $1 LVM backend (vg/lv name) # $2 LVM backend device (/dev/vg/lv) # $3 active dm-crypt device (/dev/mapper/some_name ) # $4 active dm-crypt name ( some_name ) test_ext2_inactive() { crypt_open "$2" $PWD2 "$4" mkfs.ext2 -b4096 -j "$3" crypt_close "$4" not fsadm --lvresize resize $1 30M not lvresize -L+10M -r $1 not lvreduce -L10M -r $1 crypt_open "$2" $PWD2 "$4" fscheck_ext3 "$3" crypt_close "$4" } test_ext3_inactive() { crypt_open "$2" $PWD2 "$4" mkfs.ext3 -b4096 -j "$3" crypt_close "$4" not fsadm --lvresize resize $1 30M not lvresize -L+10M -r $1 not lvreduce -L10M -r $1 crypt_open "$2" $PWD2 "$4" fscheck_ext3 "$3" crypt_close "$4" } test_xfs_inactive() { crypt_open "$2" $PWD2 "$4" mkfs.xfs -l internal,size=1000b -f "$3" crypt_close "$4" not fsadm --lvresize resize $1 30M not lvresize -L+10M -r $1 not lvreduce -L10M -r $1 crypt_open "$2" $PWD2 "$4" fscheck_xfs "$3" crypt_close "$4" } test_reiserfs_inactive() { crypt_open "$2" $PWD2 "$4" mkfs.reiserfs -s 513 -f "$3" crypt_close "$4" not fsadm --lvresize resize $1 30M not lvresize -L+10M -r $1 not lvreduce -L10M -r $1 crypt_open "$2" $PWD2 "$4" fscheck_reiserfs "$3" crypt_close "$4" } # $1 LVM backend (vg/lv name) # $2 LVM backend device (/dev/vg/lv) # $3 active dm-crypt device (/dev/mapper/some_name ) # $4 active dm-crypt name ( some_name ) test_ext2_plain() { mkfs.ext2 -b4096 -j "$3" not fsadm --lvresize resize $1 30M not lvresize -L+10M -r $1 not lvreduce -L10M -r $1 fscheck_ext3 "$3" fsadm --cryptresize resize $3 30M fsadm --cryptresize resize $3 35M fscheck_ext3 "$3" mount "$3" "$mount_dir" not fsadm -y --cryptresize resize $3 4M umount "$mount_dir" fscheck_ext3 "$3" crypt_close "$4" not fsadm --lvresize resize $1 30M not lvresize -L+10M -r $1 not lvreduce -L10M -r $1 crypt_open_plain "$2" $PWD3 "$4" fscheck_ext3 "$3" } test_ext3_plain() { mkfs.ext3 -b4096 -j "$3" not fsadm --lvresize resize $1 30M not lvresize -L+10M -r $1 not lvreduce -L10M -r $1 fscheck_ext3 "$3" fsadm --cryptresize resize $3 30M fsadm --cryptresize resize $3 35M fscheck_ext3 "$3" mount "$3" "$mount_dir" not fsadm -y --cryptresize resize $3 4M umount "$mount_dir" fscheck_ext3 "$3" crypt_close "$4" not fsadm --lvresize resize $1 30M not lvresize -L+10M -r $1 not lvreduce -L10M -r $1 crypt_open_plain "$2" $PWD3 "$4" fscheck_ext3 "$3" } test_xfs_plain() { mkfs.xfs -l internal,size=1000b -f "$3" not fsadm --lvresize resize $1 30M not lvresize -L+10M -r $1 not lvreduce -L10M -r $1 fscheck_xfs "$3" lvresize -f -L+10M $1 fsadm --cryptresize resize $3 40M # no shrink support in xfs not fsadm --cryptresize resize $3 35M fscheck_xfs "$3" crypt_close "$4" not fsadm --lvresize resize $1 30M not lvresize -L+10M -r $1 not lvreduce -L10M -r $1 crypt_open_plain "$2" $PWD3 "$4" fscheck_xfs "$3" lvresize -f -L35M $1 } test_reiserfs_plain() { mkfs.reiserfs -s 513 -f "$3" not fsadm --lvresize resize $1 30M not lvresize -L+10M -r $1 not lvreduce -L-10M -r $1 fscheck_reiserfs "$3" fsadm -y --cryptresize resize $3 30M fsadm -y --cryptresize resize $3 35M fscheck_reiserfs "$3" crypt_close "$4" not fsadm --lvresize resize $1 30M not lvresize -L+10M -r $1 not lvreduce -L10M -r $1 crypt_open_plain "$2" $PWD3 "$4" fscheck_reiserfs "$3" } # $1 LVM header backend (vg/lv name) # $2 LVM hedaer backend device (/dev/vg/lv) # $3 active dm-crypt device (/dev/mapper/some_name ) # $4 active dm-crypt name ( some_name )a test_ext2_detached() { mkfs.ext2 -b4096 -j "$3" not fsadm --lvresize resize $1 30M not lvresize -L+10M -r $1 not lvreduce -L10M -r $1 fscheck_ext3 "$3" } test_ext3_detached() { mkfs.ext3 -b4096 -j "$3" not fsadm --lvresize resize $1 30M not lvresize -L+10M -r $1 not lvreduce -L10M -r $1 fscheck_ext3 "$3" } test_xfs_detached() { mkfs.xfs -l internal,size=1000b -f "$3" not fsadm --lvresize resize $1 30M not lvresize -L+10M -r $1 not lvreduce -L10M -r $1 fscheck_xfs "$3" } test_reiserfs_detached() { mkfs.reiserfs -s 513 -f "$3" not fsadm --lvresize resize $1 30M not lvresize -L+10M -r $1 not lvreduce -L10M -r $1 fscheck_reiserfs "$3" } if check_missing ext2; then test_ext2_resize "$vg_lv" "$dev_vg_lv" "$CRYPT_DEV" lvresize -f -L25M $vg_lv cryptsetup resize $CRYPT_NAME test_ext2_inactive "$vg_lv2" "$dev_vg_lv2" "$CRYPT_DEV2" "$CRYPT_NAME2" crypt_open_plain "$dev_vg_lv3" $PWD3 "$CRYPT_NAME_PLAIN" test_ext2_plain "$vg_lv3" "$dev_vg_lv3" "$CRYPT_DEV_PLAIN" "$CRYPT_NAME_PLAIN" crypt_close "$CRYPT_NAME_PLAIN" lvresize -f -L100M $vg_lv cryptsetup resize $CRYPT_NAME test_ext2_small_shrink "$vg_lv" "$dev_vg_lv" "$CRYPT_DEV" lvresize -f -L25M $vg_lv cryptsetup resize $CRYPT_NAME if [ -z "$SKIP_DETACHED" ]; then crypt_open_detached "$dev_vg_lv3" $PWD2 "$CRYPT_NAME2" "$dev_vg_lv2" test_ext2_detached "$vg_lv2" "$dev_vg_lv2" "$CRYPT_DEV2" "$CRYPT_NAME2" crypt_close "$CRYPT_NAME2" fi fi if check_missing ext3; then test_ext3_resize "$vg_lv" "$dev_vg_lv" "$CRYPT_DEV" lvresize -f -L25M $vg_lv cryptsetup resize $CRYPT_NAME test_ext3_inactive "$vg_lv2" "$dev_vg_lv2" "$CRYPT_DEV2" "$CRYPT_NAME2" crypt_open_plain "$dev_vg_lv3" $PWD3 "$CRYPT_NAME_PLAIN" test_ext3_plain "$vg_lv3" "$dev_vg_lv3" "$CRYPT_DEV_PLAIN" "$CRYPT_NAME_PLAIN" crypt_close "$CRYPT_NAME_PLAIN" lvresize -f -L100M $vg_lv cryptsetup resize $CRYPT_NAME test_ext3_small_shrink "$vg_lv" "$dev_vg_lv" "$CRYPT_DEV" lvresize -f -L25M $vg_lv cryptsetup resize $CRYPT_NAME if [ -z "$SKIP_DETACHED" ]; then crypt_open_detached "$dev_vg_lv3" $PWD2 "$CRYPT_NAME2" "$dev_vg_lv2" test_ext3_detached "$vg_lv2" "$dev_vg_lv2" "$CRYPT_DEV2" "$CRYPT_NAME2" crypt_close "$CRYPT_NAME2" fi fi if check_missing xfs; then test_xfs_resize "$vg_lv" "$dev_vg_lv" "$CRYPT_DEV" lvresize -f -L25M $vg_lv cryptsetup resize $CRYPT_NAME test_xfs_inactive "$vg_lv2" "$dev_vg_lv2" "$CRYPT_DEV2" "$CRYPT_NAME2" crypt_open_plain "$dev_vg_lv3" $PWD3 "$CRYPT_NAME_PLAIN" test_xfs_plain "$vg_lv3" "$dev_vg_lv3" "$CRYPT_DEV_PLAIN" "$CRYPT_NAME_PLAIN" crypt_close "$CRYPT_NAME_PLAIN" lvresize -f -L100M $vg_lv cryptsetup resize $CRYPT_NAME test_xfs_small_shrink "$vg_lv" "$dev_vg_lv" "$CRYPT_DEV" lvresize -f -L25M $vg_lv cryptsetup resize $CRYPT_NAME if [ -z "$SKIP_DETACHED" ]; then crypt_open_detached "$dev_vg_lv3" $PWD2 "$CRYPT_NAME2" "$dev_vg_lv2" test_xfs_detached "$vg_lv2" "$dev_vg_lv2" "$CRYPT_DEV2" "$CRYPT_NAME2" crypt_close "$CRYPT_NAME2" fi fi if check_missing reiserfs; then test_reiserfs_resize "$vg_lv" "$dev_vg_lv" "$CRYPT_DEV" lvresize -f -L25M $vg_lv cryptsetup resize $CRYPT_NAME test_reiserfs_inactive "$vg_lv2" "$dev_vg_lv2" "$CRYPT_DEV2" "$CRYPT_NAME2" crypt_open_plain "$dev_vg_lv3" $PWD3 "$CRYPT_NAME_PLAIN" test_reiserfs_plain "$vg_lv3" "$dev_vg_lv3" "$CRYPT_DEV_PLAIN" "$CRYPT_NAME_PLAIN" crypt_close "$CRYPT_NAME_PLAIN" lvresize -f -L100M $vg_lv cryptsetup resize $CRYPT_NAME test_reiserfs_small_shrink "$vg_lv" "$dev_vg_lv" "$CRYPT_DEV" lvresize -f -L25M $vg_lv cryptsetup resize $CRYPT_NAME if [ -z "$SKIP_DETACHED" ]; then crypt_open_detached "$dev_vg_lv3" $PWD2 "$CRYPT_NAME2" "$dev_vg_lv2" test_reiserfs_detached "$vg_lv2" "$dev_vg_lv2" "$CRYPT_DEV2" "$CRYPT_NAME2" crypt_close "$CRYPT_NAME2" fi fi crypt_close "$CRYPT_NAME" vgremove -ff $vg LVM2.2.02.176/test/shell/lvresize-full.sh0000644000000000000120000000333413176752421016554 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Excersize resize of filesystem when size of LV already matches # https://bugzilla.redhat.com/1354396 SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest FSCK=${FSCK-fsck} MKFS=${MKFS-mkfs.ext3} RESIZEFS=${RESIZEFS-resize2fs} LVM_BINARY=$(which lvm) export LVM_BINARY which $FSCK || skip which $MKFS || skip which $RESIZEFS || skip aux prepare_vg 2 20 lvcreate -l100%FREE -n $lv1 $vg lvdev="$DM_DEV_DIR/$vg/$lv1" lvs -a $vg "$MKFS" "$lvdev" # this should resolve to resize to same actual size lvreduce -r -f -l-100%FREE $vg/$lv1 "$FSCK" -n "$lvdev" # size should remain the same lvextend -r -f -l+100%FREE $vg/$lv1 "$FSCK" -n "$lvdev" #lvchange -an $vg/$lv1 lvresize -r -f -l+100%FREE $vg/$lv1 "$FSCK" -n "$lvdev" # Check there is really file system resize happening # even when LV itself has still the same size "$RESIZEFS" -f "$lvdev" 20000 "$FSCK" -n "$lvdev" | tee out grep "20000 blocks" out SIZE=$(get lv_field $vg/$lv1 size) lvresize -r -f -l-100%FREE $vg/$lv1 test "$SIZE" = "$(get lv_field $vg/$lv1 size)" "$FSCK" -n "$lvdev" | tee out grep -v "20000 blocks" out # Also check it fails when the user 'resize' volume without # resizing fs and then retries with '-r'. lvreduce -f -l50%VG $vg/$lv1 fail lvresize -r -f -l50%VG $vg/$lv1 lvremove -ff $vg LVM2.2.02.176/test/shell/vgrename-usage.sh0000644000000000000120000000271713176752421016663 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_devs 4 pvcreate "$dev1" "$dev2" pvcreate --metadatacopies 0 "$dev3" "$dev4" # vgrename normal operation - rename vg1 to vg2 # vgrename normal operation - rename vg2 to vg1 # ensure name ordering does not matter vgcreate $vg1 "$dev1" "$dev2" vgrename $vg1 $vg2 check vg_field $vg2 vg_name $vg2 vgrename $vg2 $vg1 check vg_field $vg1 vg_name $vg1 vgremove $vg1 # vgrename by uuid (bz231187) vgcreate $vg1 "$dev1" "$dev3" UUID=$(vgs --noheading -o vg_uuid $vg1) check vg_field $vg1 vg_uuid $UUID vgrename $UUID $vg2 check vg_field $vg2 vg_name $vg2 vgremove $vg2 # vgrename fails - new vg already exists vgcreate $vg1 "$dev1" vgcreate $vg2 "$dev2" not vgrename $vg1 $vg2 vgremove $vg1 $vg2 # vgrename duplicate name vgcreate $vg1 "$dev1" aux disable_dev "$dev1" vgcreate $vg1 "$dev2" UUID=$(vgs --noheading -o vg_uuid $vg1) aux enable_dev "$dev1" not vgrename $vg1 $vg2 vgrename $UUID $vg2 not vgrename $UUID $vg1 vgs vgremove $vg1 $vg2 LVM2.2.02.176/test/shell/losetup-partscan.sh0000644000000000000120000000242513176752421017255 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Check how lvm2 handles partitions over losetup -P devices SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest which sfdisk || skip aux prepare_loop 1000 -P || skip test -f LOOP LOOP=$(< LOOP) echo "1 2" | sfdisk "$LOOP" # wait for links aux udev_wait # losetup -P should provide partition ls -la "${LOOP}"* test -e "${LOOP}p1" aux extend_filter "a|$LOOP|" # creation should fail for 'partitioned' loop device not pvcreate -y "$LOOP" not vgcreate vg "$LOOP" aux teardown_devs aux prepare_loop 1000 || skip test -f LOOP LOOP=$(< LOOP) echo "1 2" | sfdisk "$LOOP" # wait for links aux udev_wait # no partitione should be actually there ls -la "${LOOP}"* test ! -e "${LOOP}p1" aux extend_filter "a|$LOOP|" # creation should pass for 'non-partitioned' loop device pvcreate -y "$LOOP" vgcreate vg "$LOOP" LVM2.2.02.176/test/shell/lvcreate-mirror.sh0000644000000000000120000000302413176752421017062 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2010 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_vg 5 80 aux lvmconf 'allocation/maximise_cling = 0' \ 'allocation/mirror_logs_require_separate_pvs = 1' # 2-way mirror with corelog, 2 PVs lvcreate -aey -l2 --type mirror -m1 --mirrorlog core -n $lv1 $vg "$dev1" "$dev2" check mirror_images_redundant $vg $lv1 # 2-way mirror with disklog, 3 PVs # lvcreate --nosync is in 100% sync after creation (bz429342) lvcreate -aey -l2 --type mirror -m1 --nosync -n $lv2 $vg "$dev1" "$dev2" "$dev3":0-1 2>&1 | tee out grep "New mirror won't be synchronised." out check lv_field $vg/$lv2 copy_percent "100.00" check mirror_images_redundant $vg $lv2 check mirror_log_on $vg $lv2 "$dev3" # 3-way mirror with disklog, 4 PVs lvcreate -aey -l2 --type mirror -m2 --nosync --mirrorlog disk -n $lv3 $vg "$dev1" "$dev2" "$dev4" "$dev3":0-1 check mirror_images_redundant $vg $lv3 check mirror_log_on $vg $lv3 "$dev3" lvremove -ff $vg # creating 2-way mirror with disklog from 2 PVs fails not lvcreate -aey -l2 --type mirror -m1 -n $lv1 $vg "$dev1" "$dev2" vgremove -ff $vg LVM2.2.02.176/test/shell/lvmlockd-hello-world.sh0000644000000000000120000000127113176752421020010 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2008-2012 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA test_description='Hello world for vgcreate with lvmlockd and sanlock' . lib/inittest [ -z "$LVM_TEST_LVMLOCKD" ] && skip aux prepare_pvs 1 vgcreate $SHARED $vg "$dev1" vgs -o+locktype,lockargs $vg vgremove $vg LVM2.2.02.176/test/shell/snapshot-usage-exa.sh0000644000000000000120000000230513176752421017462 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2015 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Check very large device size (upto 15Exa bytes) # this needs 64bit arch SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux can_use_16T || skip aux prepare_pvs 1 get_devs # Prepare large enough backend device vgcreate -s 4M "$vg" "${DEVICES[@]}" lvcreate --type snapshot -s -l 100%FREE -n $lv $vg --virtualsize 15P aux extend_filter_LVMTEST # Check usability with largest extent size pvcreate "$DM_DEV_DIR/$vg/$lv" vgcreate -s 4G $vg1 "$DM_DEV_DIR/$vg/$lv" lvcreate -an -Zn -l50%FREE -n $lv1 $vg1 lvcreate -s -l100%FREE -n $lv2 $vg1/$lv1 check lv_field $vg1/$lv2 size "7.50p" lvremove -ff $vg1 lvcreate --type snapshot -V15E -l1 -n $lv1 -s $vg1 check lv_field $vg1/$lv1 origin_size "15.00e" vgremove -ff $vg1 vgremove -ff $vg LVM2.2.02.176/test/lib/0000755000000000000120000000000013176752421013051 5ustar rootwheelLVM2.2.02.176/test/lib/flavour-udev-cluster-lvmpolld.sh0000644000000000000120000000012113176752421021324 0ustar rootwheelexport LVM_TEST_LOCKING=3 export LVM_TEST_LVMPOLLD=1 export LVM_TEST_DEVDIR=/dev LVM2.2.02.176/test/lib/harness.c0000644000000000000120000003745513176752421014676 0ustar rootwheel/* * Copyright (C) 2010-2013 Red Hat, Inc. All rights reserved. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License v.2. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #define _GNU_SOURCE #include #include #include #include #include #include #include #include /* rusage */ #include #include #include #include #include #include #include #include #include static pid_t pid; static int fds[2]; #define MAX 1024 #define MAX_LOG_SIZE (32*1024*1024) /* Default max size of test log */ #define WRITE_TIMEOUT (180 * 2) /* 3 minutes */ struct stats { int nfailed; int nskipped; int npassed; int nknownfail; int nwarned; int ninterrupted; int status[MAX]; }; static struct stats s; static char *readbuf = NULL; static size_t readbuf_sz = 0, readbuf_used = 0; static int die = 0; static int verbose = 0; /* >1 with timestamps */ static int interactive = 0; /* disable all redirections */ static int quiet = 0; static const char *results; static unsigned fullbuffer = 0; static int unlimited = 0; static int write_timeout = WRITE_TIMEOUT; static time_t harness_start; static FILE *outfile = NULL; char testdirdebug[PATH_MAX]; struct subst { const char *key; char *value; }; static struct subst subst[2]; enum { UNKNOWN, FAILED, INTERRUPTED, KNOWNFAIL, PASSED, SKIPPED, TIMEOUT, WARNED, }; static void handler( int sig ) { signal( sig, SIG_DFL ); kill( -pid, sig ); die = sig; } static int outline(FILE *out, char *buf, int start, int force) { char *from = buf + start; char *next = strchr(buf + start, '\n'); if (!next && !force) /* not a complete line yet... */ return start; if (!next) next = from + strlen(from); else ++next; if (!strncmp(from, "@TESTDIR=", 9)) { subst[0].key = "@TESTDIR@"; free(subst[0].value); subst[0].value = strndup(from + 9, next - from - 9 - 1); snprintf(testdirdebug, sizeof(testdirdebug), "%s/debug.log", subst[0].value); } else if (!strncmp(from, "@PREFIX=", 8)) { subst[1].key = "@PREFIX@"; free(subst[1].value); subst[1].value = strndup(from + 8, next - from - 8 - 1); } else { char *line = strndup(from, next - from); char *a = line, *b; do { int idx = -1; int i; b = line + strlen(line); for ( i = 0; i < 2; ++i ) { if (subst[i].key) { // printf("trying: %s -> %s\n", subst[i].value, subst[i].key); char *stop = strstr(a, subst[i].value); if (stop && stop < b) { idx = i; b = stop; } } } fwrite(a, 1, b - a, out); a = b; if ( idx >= 0 ) { fprintf(out, "%s", subst[idx].key); a += strlen(subst[idx].value); } } while (b < line + strlen(line)); free(line); } return next - buf + (force ? 0 : 1); } static void dump(void) { int counter_last = -1, counter = 0; while ((counter < (int) readbuf_used) && (counter != counter_last)) { counter_last = counter; counter = outline( stdout, readbuf, counter, 1 ); } } static void trickle(FILE *out, int *last, int *counter) { if (*last > (int) readbuf_used) { *last = -1; *counter = 0; } while ((*counter < (int) readbuf_used) && (*counter != *last)) { *last = *counter; *counter = outline( out, readbuf, *counter, 1 ); } } static void clear(void) { readbuf_used = 0; fullbuffer = 0; } static int64_t _get_time_us(void) { struct timeval tv; (void) gettimeofday(&tv, 0); return (int64_t) tv.tv_sec * 1000000 + (int64_t) tv.tv_usec; } static void _append_buf(const char *buf, size_t len) { if ((readbuf_used + len) >= readbuf_sz) { if ((readbuf_sz >= MAX_LOG_SIZE) && !unlimited) { if (fullbuffer++ == 0) kill(-pid, SIGINT); return; } readbuf_sz = 2 * (readbuf_used + len + readbuf_sz); readbuf = realloc(readbuf, readbuf_sz); } if (!readbuf) exit(205); memcpy(readbuf + readbuf_used, buf, len); readbuf_used += len; } static const char *_append_with_stamp(const char *buf, int stamp) { static const char spaces[] = " "; static int64_t t_last; static int64_t t_start = 0; int64_t t_now; char stamp_buf[32]; /* Bigger to always fit both numbers */ const char *be; const char *bb = buf; size_t len; while ((be = strchr(bb, '\n'))) { if (stamp++ == 0) { t_now = _get_time_us(); if (!t_start) t_start = t_last = t_now; len = snprintf(stamp_buf, sizeof(stamp_buf), "%8.3f%8.4f ", (t_now - t_start) / 1000000.f, (t_now - t_last) / 1000000.f); _append_buf(stamp_buf, (len < (sizeof(spaces) - 1)) ? len : (sizeof(spaces) - 1)); t_last = t_now; } _append_buf(bb, be + 1 - bb); bb = be + 1; if (stamp > 0 && bb[0]) _append_buf(spaces, sizeof(spaces) - 1); } return bb; } static int drain(int fd) { char buf[2 * 1024 * 1024 + 1]; /* try to capture large sysrq trace */ const char *bp; int stamp = 0; int sz; static int stdout_last = -1, stdout_counter = 0; static int outfile_last = -1, outfile_counter = 0; if ((sz = read(fd, buf, sizeof(buf) - 1)) > 0) { buf[sz] = '\0'; bp = (verbose < 2) ? buf : _append_with_stamp(buf, stamp); if (sz > (bp - buf)) { _append_buf(bp, sz - (bp - buf)); stamp = -1; /* unfinished line */ } else stamp = 0; readbuf[readbuf_used] = 0; if (verbose) trickle(stdout, &stdout_last, &stdout_counter); if (outfile) trickle(outfile, &outfile_last, &outfile_counter); } return sz; } static int drain_fds(int fd1, int fd2, long timeout) { return -1; } #define SYSLOG_ACTION_READ_CLEAR 4 #define SYSLOG_ACTION_CLEAR 5 static void clear_dmesg(void) { klogctl(SYSLOG_ACTION_CLEAR, 0, 0); } static void drain_dmesg(void) { char buf[1024 * 1024 + 1]; int sz = klogctl(SYSLOG_ACTION_READ_CLEAR, buf, sizeof(buf) - 1); if (sz > 0) { buf[sz] = 0; _append_buf(buf, sz); } } static const char *duration(time_t start, const struct rusage *usage) { static char buf[100]; int t = (int)(time(NULL) - start); int p = sprintf(buf, "%2d:%02d", t / 60, t % 60); if (usage) sprintf(buf + p, " %2ld:%02ld.%03ld/%ld:%02ld.%03ld%5ld%8ld/%ld", usage->ru_utime.tv_sec / 60, usage->ru_utime.tv_sec % 60, usage->ru_utime.tv_usec / 1000, usage->ru_stime.tv_sec / 60, usage->ru_stime.tv_sec % 60, usage->ru_stime.tv_usec / 1000, usage->ru_maxrss / 1024, usage->ru_inblock, usage->ru_oublock); return buf; } static void passed(int i, char *f, time_t t, const struct rusage *usage) { if (readbuf && strstr(readbuf, "TEST EXPECT FAIL")) { ++ s.npassed; s.status[i] = PASSED; printf("passed (UNEXPECTED). %s\n", duration(t, usage)); } else if (readbuf && strstr(readbuf, "TEST WARNING")) { ++s.nwarned; s.status[i] = WARNED; printf("warnings %s\n", duration(t, usage)); } else { ++ s.npassed; s.status[i] = PASSED; printf("passed. %s\n", duration(t, usage)); } } static void interrupted(int i, char *f) { ++ s.ninterrupted; s.status[i] = INTERRUPTED; printf("\ninterrupted.\n"); if (!quiet && !verbose && fullbuffer) { printf("-- Interrupted %s ------------------------------------\n", f); dump(); printf("\n-- Interrupted %s (end) ------------------------------\n", f); } } static void timeout(int i, char *f) { ++ s.ninterrupted; s.status[i] = TIMEOUT; printf("timeout.\n"); if (!quiet && !verbose && readbuf) { printf("-- Timed out %s ------------------------------------\n", f); dump(); printf("\n-- Timed out %s (end) ------------------------------\n", f); } } static void skipped(int i, char *f) { ++ s.nskipped; s.status[i] = SKIPPED; printf("skipped.\n"); } static void failed(int i, char *f, int st) { if (readbuf && strstr(readbuf, "TEST EXPECT FAIL")) { printf("FAILED (expected).\n"); s.status[i] = KNOWNFAIL; ++ s.nknownfail; return; } ++ s.nfailed; s.status[i] = FAILED; printf("FAILED (status %d).\n", WEXITSTATUS(st)); if (!quiet && !verbose && readbuf) { printf("-- FAILED %s ------------------------------------\n", f); dump(); printf("-- FAILED %s (end) ------------------------------\n", f); } } static void run(int i, char *f) { struct rusage usage; char flavour[512], script[512]; pid = fork(); if (pid < 0) { perror("Fork failed."); exit(201); } else if (pid == 0) { if (!interactive) { close(STDIN_FILENO); dup2(fds[1], STDOUT_FILENO); dup2(fds[1], STDERR_FILENO); close(fds[1]); } close(fds[0]); if (strchr(f, ':')) { strcpy(flavour, f); *strchr(flavour, ':') = 0; setenv("LVM_TEST_FLAVOUR", flavour, 1); strcpy(script, strchr(f, ':') + 1); } else { strcpy(script, f); } setpgid(0, 0); execlp("bash", "bash", "-noprofile", "-norc", script, NULL); perror("execlp"); fflush(stderr); _exit(202); } else { int st = -1, w; time_t start = time(NULL); char buf[128]; char outpath[PATH_MAX]; char *c = outpath + strlen(results) + 1; struct stat statbuf; int runaway = 0; int no_write = 0; int clobber_dmesg = 0; int collect_debug = 0; int fd_debuglog = -1; int fd_kmsg; fd_set set; int ret; //close(fds[1]); testdirdebug[0] = '\0'; /* Capture RUNTESTDIR */ snprintf(buf, sizeof(buf), "%s ...", f); printf("Running %-60s%c", buf, verbose ? '\n' : ' '); fflush(stdout); snprintf(outpath, sizeof(outpath), "%s/%s.txt", results, f); while ((c = strchr(c, '/'))) *c = '_'; if (!(outfile = fopen(outpath, "w"))) perror("fopen"); /* Mix-in kernel log message */ if ((fd_kmsg = open("/dev/kmsg", O_RDONLY | O_NONBLOCK)) < 0) { if (errno != ENOENT) /* Older kernels (<3.5) do not support /dev/kmsg */ perror("open /dev/kmsg"); } else if (lseek(fd_kmsg, 0L, SEEK_END) == (off_t) -1) perror("lseek /dev/kmsg"); if ((fd_kmsg < 0) && (clobber_dmesg = strcmp(getenv("LVM_TEST_CAN_CLOBBER_DMESG") ? : "0", "0"))) clear_dmesg(); while ((w = wait4(pid, &st, WNOHANG, &usage)) == 0) { struct timeval selectwait = { .tv_usec = 500000 }; /* 0.5s */ if ((fullbuffer && fullbuffer++ == 8000) || (write_timeout > 0 && no_write > write_timeout)) { timeout: kill(pid, SIGINT); sleep(5); /* wait a bit for a reaction */ if ((w = waitpid(pid, &st, WNOHANG)) == 0) { if (write_timeout > 0 && no_write > write_timeout) /* * Kernel traces needed, when stuck for * too long in userspace without producing * any output, in other case it should be * user space problem */ system("echo t > /proc/sysrq-trigger"); collect_debug = 1; kill(-pid, SIGKILL); w = pid; // waitpid(pid, &st, NULL); } runaway = 1; break; } if (clobber_dmesg) drain_dmesg(); FD_ZERO(&set); FD_SET(fds[0], &set); if (fd_kmsg >= 0) FD_SET(fd_kmsg, &set); if ((ret = select(fd_kmsg > fds[0] ? fd_kmsg + 1 : fds[0] + 1, &set, NULL, NULL, &selectwait)) <= 0) { /* Still checking debug log size if it's not growing too much */ if (!unlimited && testdirdebug[0] && (stat(testdirdebug, &statbuf) == 0) && statbuf.st_size > 32 * 1024 * 1024) { /* 32MB command log size */ printf("Killing test since debug.log has gone wild (size %ld)\n", statbuf.st_size); goto timeout; } no_write++; continue; } if (FD_ISSET(fds[0], &set) && drain(fds[0]) > 0) no_write = 0; else if (fd_kmsg >= 0 && FD_ISSET(fd_kmsg, &set) && (drain(fd_kmsg) < 0)) { close(fd_kmsg); fd_kmsg = -1; /* Likely /dev/kmsg is not readable */ if ((clobber_dmesg = strcmp(getenv("LVM_TEST_CAN_CLOBBER_DMESG") ? : "0", "0"))) clear_dmesg(); } } if (w != pid) { perror("waitpid"); exit(206); } while (!fullbuffer && (drain_fds(fds[0], fd_kmsg, 0) > 0)) /* read out what was left */; if (die == 2) interrupted(i, f); else if (runaway) { if (collect_debug && (fd_debuglog = open(testdirdebug, O_RDONLY)) != -1) { runaway = unlimited ? INT32_MAX : 4 * 1024 * 1024; while (!fullbuffer && runaway > 0 && (ret = drain(fd_debuglog)) > 0) runaway -= ret; close(fd_debuglog); } timeout(i, f); } else if (WIFEXITED(st)) { if (WEXITSTATUS(st) == 0) passed(i, f, start, &usage); else if (WEXITSTATUS(st) == 200) skipped(i, f); else failed(i, f, st); } else failed(i, f, st); if (fd_kmsg >= 0) close(fd_kmsg); else if (clobber_dmesg) drain_dmesg(); if (outfile) fclose(outfile); if (fullbuffer) printf("\nTest was interrupted, output has got too large (>%u) (loop:%u)\n" "Set LVM_TEST_UNLIMITED=1 for unlimited log.\n", (unsigned) readbuf_sz, fullbuffer); clear(); } } int main(int argc, char **argv) { char results_list[PATH_MAX]; const char *result; const char *be_verbose = getenv("VERBOSE"), *be_interactive = getenv("INTERACTIVE"), *be_quiet = getenv("QUIET"), *be_write_timeout = getenv("WRITE_TIMEOUT"); time_t start = time(NULL); int i; FILE *list; if (argc >= MAX) { fprintf(stderr, "Sorry, my head exploded. Please increase MAX.\n"); exit(1); } if (be_verbose) verbose = atoi(be_verbose); if (be_interactive) interactive = atoi(be_interactive); if (be_quiet) quiet = atoi(be_quiet); if (be_write_timeout) write_timeout = atoi(be_write_timeout) * 2; results = getenv("LVM_TEST_RESULTS") ? : "results"; unlimited = getenv("LVM_TEST_UNLIMITED") ? 1 : 0; (void) snprintf(results_list, sizeof(results_list), "%s/list", results); //if (pipe(fds)) { if (socketpair(PF_UNIX, SOCK_STREAM, 0, fds)) { perror("socketpair"); return 201; } if (fcntl(fds[0], F_SETFL, O_NONBLOCK ) == -1) { perror("fcntl on socket"); return 202; } /* set up signal handlers */ for (i = 0; i <= 32; ++i) switch (i) { case SIGCHLD: case SIGWINCH: case SIGURG: case SIGKILL: case SIGSTOP: break; default: signal(i, handler); } harness_start = time(NULL); /* run the tests */ for (i = 1; !die && i < argc; ++i) { run(i, argv[i]); if ( time(NULL) - harness_start > 48 * 360 ) { /* 04:48 */ printf("Nearly 5 hours passed, giving up...\n"); die = 1; } } free(subst[0].value); free(subst[1].value); free(readbuf); printf("\n## %d tests %s : %d OK, %d warnings, %d failures (%d interrupted), %d known failures; " "%d skipped\n", s.nwarned + s.npassed + s.nfailed + s.nskipped + s.ninterrupted, duration(start, NULL), s.npassed, s.nwarned, s.nfailed + s.ninterrupted, s.ninterrupted, s.nknownfail, s.nskipped); /* dump a list to results */ if ((list = fopen(results_list, "w"))) { for (i = 1; i < argc; ++ i) { switch (s.status[i]) { case FAILED: result = "failed"; break; case INTERRUPTED: result = "interrupted"; break; case PASSED: result = "passed"; break; case SKIPPED: result = "skipped"; break; case TIMEOUT: result = "timeout"; break; case WARNED: result = "warnings"; break; default: result = "unknown"; break; } fprintf(list, "%s %s\n", argv[i], result); } fclose(list); } else perror("fopen result"); /* print out a summary */ if (s.nfailed || s.nskipped || s.nknownfail || s.ninterrupted || s.nwarned) { for (i = 1; i < argc; ++ i) { switch (s.status[i]) { case FAILED: printf("FAILED: %s\n", argv[i]); break; case INTERRUPTED: printf("INTERRUPTED: %s\n", argv[i]); break; case KNOWNFAIL: printf("FAILED (expected): %s\n", argv[i]); break; case SKIPPED: printf("skipped: %s\n", argv[i]); break; case TIMEOUT: printf("TIMEOUT: %s\n", argv[i]); break; case WARNED: printf("WARNED: %s\n", argv[i]); break; default: /* do nothing */ ; } } printf("\n"); return (s.nfailed > 0) || (s.ninterrupted > 0) || die; } return die; } LVM2.2.02.176/test/lib/flavour-ndev-lvmetad.sh0000644000000000000120000000006413176752421017447 0ustar rootwheelexport LVM_TEST_LOCKING=1 export LVM_TEST_LVMETAD=1 LVM2.2.02.176/test/lib/lvm-wrapper.sh0000644000000000000120000000333013176752421015660 0ustar rootwheel#!/bin/sh # Copyright (C) 2011-2017 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA . lib/paths CMD=${0##*/} test "$CMD" != lvm || unset CMD # When needed to trace command from test suite use env var before program # and run program directly via shell in test dir i.e.: # sh shell/activate-mirror.sh # 'LVM_GDB=1 lvcreate -l1 $vg' # > run test -z "$LVM_GDB" || exec gdb --readnow --args "$abs_top_builddir/tools/lvm" $CMD "$@" # Multiple level of LVM_VALGRIND support # the higher level the more commands are traced if test -n "$LVM_VALGRIND"; then RUN_DBG="${VALGRIND:-valgrind}"; fi if test -n "$LVM_STRACE"; then RUN_DBG="strace $LVM_STRACE -o strace.log" fi case "$CMD" in lvs|pvs|vgs|vgck|vgscan) test "${LVM_DEBUG_LEVEL:-0}" -lt 2 && RUN_DBG="" ;; pvcreate|pvremove|lvremove|vgcreate|vgremove) test "${LVM_DEBUG_LEVEL:-0}" -lt 1 && RUN_DBG="" ;; esac # Capture parallel users of debug.log file #test -z "$(fuser debug.log 2>/dev/null)" || { # echo "TEST WARNING: \"debug.log\" is still in use while running $CMD $@" >&2 # fuser -v debug.log >&2 #} # the exec is important, because otherwise fatal signals inside "not" go unnoticed if test -n "$abs_top_builddir"; then exec $RUN_DBG "$abs_top_builddir/tools/lvm" $CMD "$@" else # we are testing the lvm on $PATH PATH=$(echo "$PATH" | sed -e 's,[^:]*lvm2-testsuite[^:]*:,,g') exec $RUN_DBG lvm $CMD "$@" fi LVM2.2.02.176/test/lib/flavour-ndev-lvmpolld.sh0000644000000000000120000000006513176752421017645 0ustar rootwheelexport LVM_TEST_LOCKING=1 export LVM_TEST_LVMPOLLD=1 LVM2.2.02.176/test/lib/brick-shelltest.h0000644000000000000120000011556113176752421016332 0ustar rootwheel// -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4 -*- /* * This brick allows you to build a test runner for shell-based functional * tests. It comes with fairly elaborate features (although most are only * available on posix systems), geared toward difficult-to-test software. * * It provides a full-featured "main" function (brick::shelltest::run) that you * can use as a drop-in shell test runner. * * Features include: * - interactive and batch-mode execution * - collects test results and test logs in a simple text-based format * - measures resource use of individual tests * - rugged: suited for running in monitored virtual machines * - supports test flavouring */ /* * (c) 2014 Petr Rockai * (c) 2014 Red Hat, Inc. */ /* Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __unix #include #include /* rusage */ #include #include #include #include #include #include #include #include #include #endif #include "configure.h" /* Timeout for the whole test suite in hours */ static const time_t TEST_SUITE_TIMEOUT = 4; #ifndef BRICK_SHELLTEST_H #define BRICK_SHELLTEST_H namespace brick { namespace shelltest { /* TODO: remove this section in favour of brick-filesystem.h */ inline std::runtime_error syserr( std::string msg, std::string ctx = "" ) { return std::runtime_error( std::string( strerror( errno ) ) + " " + msg + " " + ctx ); } struct dir { DIR *d; dir( std::string p ) { d = opendir( p.c_str() ); if ( !d ) throw syserr( "error opening directory", p ); } ~dir() { closedir( d ); } }; typedef std::vector< std::string > Listing; inline void fsync_name( std::string n ) { int fd = open( n.c_str(), O_WRONLY ); if ( fd >= 0 ) { fsync( fd ); close( fd ); } } inline Listing listdir( std::string p, bool recurse = false, std::string prefix = "" ) { Listing r; dir d( p ); #if !defined(__GLIBC__) || (__GLIBC__ < 2) || ((__GLIBC__ == 2) && (__GLIBC_MINOR__ < 23)) /* readdir_r is deprecated with newer GLIBC */ struct dirent entry, *iter = 0; while ( (errno = readdir_r( d.d, &entry, &iter )) == 0 && iter ) { std::string ename( entry.d_name ); #else struct dirent *entry; errno = 0; while ( (entry = readdir( d.d )) ) { std::string ename( entry->d_name ); #endif if ( ename == "." || ename == ".." ) continue; if ( recurse ) { struct stat64 stat; std::string s = p + "/" + ename; if ( ::stat64( s.c_str(), &stat ) == -1 ) { errno = 0; continue; } if ( S_ISDIR(stat.st_mode) ) { Listing sl = listdir( s, true, prefix + ename + "/" ); for ( Listing::iterator i = sl.begin(); i != sl.end(); ++i ) r.push_back( prefix + *i ); } else r.push_back( prefix + ename ); } else r.push_back( ename ); }; if ( errno != 0 ) throw syserr( "error reading directory", p ); return r; } /* END remove this section */ struct Journal { enum R { STARTED, RETRIED, UNKNOWN, FAILED, INTERRUPTED, KNOWNFAIL, PASSED, SKIPPED, TIMEOUT, WARNED, }; friend std::ostream &operator<<( std::ostream &o, R r ) { switch ( r ) { case STARTED: return o << "started"; case RETRIED: return o << "retried"; case FAILED: return o << "failed"; case INTERRUPTED: return o << "interrupted"; case PASSED: return o << "passed"; case SKIPPED: return o << "skipped"; case TIMEOUT: return o << "timeout"; case WARNED: return o << "warnings"; default: return o << "unknown"; } } friend std::istream &operator>>( std::istream &i, R &r ) { std::string x; i >> x; r = UNKNOWN; if ( x == "started" ) r = STARTED; if ( x == "retried" ) r = RETRIED; if ( x == "failed" ) r = FAILED; if ( x == "interrupted" ) r = INTERRUPTED; if ( x == "passed" ) r = PASSED; if ( x == "skipped" ) r = SKIPPED; if ( x == "timeout" ) r = TIMEOUT; if ( x == "warnings" ) r = WARNED; return i; } template< typename S, typename T > friend std::istream &operator>>( std::istream &i, std::pair< S, T > &r ) { return i >> r.first >> r.second; } typedef std::map< std::string, R > Status; Status status, written; std::string location, list; int timeouts; void append( std::string path ) { std::ofstream of( path.c_str(), std::fstream::app ); Status::iterator writ; for ( Status::iterator i = status.begin(); i != status.end(); ++i ) { writ = written.find( i->first ); if ( writ == written.end() || writ->second != i->second ) of << i->first << " " << i->second << std::endl; } written = status; of.close(); } void write( std::string path ) { std::ofstream of( path.c_str() ); for ( Status::iterator i = status.begin(); i != status.end(); ++i ) of << i->first << " " << i->second << std::endl; of.close(); } void sync() { append( location ); fsync_name( location ); write ( list ); fsync_name( list ); } void started( std::string n ) { if ( status.count( n ) && status[ n ] == STARTED ) status[ n ] = RETRIED; else status[ n ] = STARTED; sync(); } void done( std::string n, R r ) { status[ n ] = r; if ( r == TIMEOUT ) ++ timeouts; else timeouts = 0; sync(); } bool done( std::string n ) { if ( !status.count( n ) ) return false; return status[ n ] != STARTED && status[ n ] != INTERRUPTED; } int count( R r ) { int c = 0; for ( Status::iterator i = status.begin(); i != status.end(); ++i ) if ( i->second == r ) ++ c; return c; } void banner() { std::cout << std::endl << "### " << status.size() << " tests: " << count( PASSED ) << " passed, " << count( SKIPPED ) << " skipped, " << count( TIMEOUT ) << " timed out, " << count( WARNED ) << " warned, " << count( FAILED ) << " failed" << std::endl; } void details() { for ( Status::iterator i = status.begin(); i != status.end(); ++i ) if ( i->second != PASSED ) std::cout << i->second << ": " << i->first << std::endl; } void read( std::string n ) { std::ifstream ifs( n.c_str() ); typedef std::istream_iterator< std::pair< std::string, R > > It; for ( It i( ifs ); i != It(); ++i ) status[ i->first ] = i->second; } void read() { read( location ); } Journal( std::string dir ) : location( dir + "/journal" ), list( dir + "/list" ), timeouts( 0 ) {} }; struct TimedBuffer { typedef std::pair< time_t, std::string > Line; std::deque< Line > data; Line incomplete; bool stamp; Line shift( bool force = false ) { Line result = std::make_pair( 0, "" ); if ( force && data.empty() ) std::swap( result, incomplete ); else { result = data.front(); data.pop_front(); } return result; } void push( std::string buf ) { time_t now = stamp ? time( 0 ) : 0; std::string::iterator b = buf.begin(), e = buf.begin(); while ( e != buf.end() ) { e = std::find( b, buf.end(), '\n' ); incomplete.second += std::string( b, e ); if ( !incomplete.first ) incomplete.first = now; if ( e != buf.end() ) { incomplete.second += "\n"; data.push_back( incomplete ); if (incomplete.second[0] == '#') { /* Disable timing between '## 0 STACKTRACE' & '## teardown' keywords */ if (incomplete.second.find("# 0 STACKTRACE", 1) != std::string::npos || incomplete.second.find("# timing off", 1) != std::string::npos) { stamp = false; now = 0; } else if (incomplete.second.find("# teardown", 1) != std::string::npos || incomplete.second.find("# timing on", 1) != std::string::npos) { stamp = true; now = time( 0 ); } } incomplete = std::make_pair( now, "" ); } b = (e == buf.end() ? e : e + 1); } } bool empty( bool force = false ) { if ( force && !incomplete.second.empty() ) return false; return data.empty(); } TimedBuffer() : stamp(true) {} }; struct Sink { virtual void outline( bool ) {} virtual void push( std::string x ) = 0; virtual void sync( bool ) {} virtual ~Sink() {} }; struct Substitute { typedef std::map< std::string, std::string > Map; std::string testdir; // replace testdir first std::string prefix; std::string map( std::string line ) { if ( std::string( line, 0, 9 ) == "@TESTDIR=" ) testdir = std::string( line, 9, line.length() - 10 ); // skip \n else if ( std::string( line, 0, 8 ) == "@PREFIX=" ) prefix = std::string( line, 8, line.length() - 9 ); // skip \n else { size_t off; if (!testdir.empty()) while ( (off = line.find( testdir )) != std::string::npos ) line.replace( off, testdir.length(), "@TESTDIR@" ); if (!prefix.empty()) while ( (off = line.find( prefix )) != std::string::npos ) line.replace( off, prefix.length(), "@PREFIX@" ); } return line; } }; struct Format { time_t start; Substitute subst; std::string format( TimedBuffer::Line l ) { std::stringstream result; if ( l.first >= start ) { time_t rel = l.first - start; result << "[" << std::setw( 2 ) << std::setfill( ' ' ) << rel / 60 << ":" << std::setw( 2 ) << std::setfill( '0' ) << rel % 60 << "] "; } result << subst.map( l.second ); return result.str(); } Format() : start( time( 0 ) ) {} }; struct BufSink : Sink { TimedBuffer data; Format fmt; virtual void push( std::string x ) { data.push( x ); } void dump( std::ostream &o ) { o << std::endl; while ( !data.empty( true ) ) o << "| " << fmt.format( data.shift( true ) ); } }; struct FdSink : Sink { int fd; TimedBuffer stream; Format fmt; bool killed; virtual void outline( bool force ) { TimedBuffer::Line line = stream.shift( force ); std::string out = fmt.format( line ); write( fd, out.c_str(), out.length() ); } virtual void sync( bool force ) { if ( killed ) return; while ( !stream.empty( force ) ) outline( force ); } virtual void push( std::string x ) { if ( !killed ) stream.push( x ); } FdSink( int _fd ) : fd( _fd ), killed( false ) {} }; struct FileSink : FdSink { std::string file; FileSink( std::string n ) : FdSink( -1 ), file( n ) {} void sync( bool force ) { if ( fd < 0 && !killed ) { #ifdef O_CLOEXEC fd = open( file.c_str(), O_WRONLY | O_CREAT | O_TRUNC | O_CLOEXEC, 0644 ); #else fd = open( file.c_str(), O_WRONLY | O_CREAT | O_TRUNC, 0644 ); if ( fcntl( fd, F_SETFD, FD_CLOEXEC ) < 0 ) perror("failed to set FD_CLOEXEC on file"); #endif if ( fd < 0 ) killed = true; } FdSink::sync( force ); } ~FileSink() { if ( fd >= 0 ) { fsync( fd ); close( fd ); } } }; #define BRICK_SYSLOG_ACTION_READ 2 #define BRICK_SYSLOG_ACTION_READ_ALL 3 #define BRICK_SYSLOG_ACTION_READ_CLEAR 4 #define BRICK_SYSLOG_ACTION_CLEAR 5 #define BRICK_SYSLOG_ACTION_SIZE_UNREAD 9 #define BRICK_SYSLOG_ACTION_SIZE_BUFFER 10 struct Source { int fd; virtual void sync( Sink *sink ) { ssize_t sz; char buf[ 128 * 1024 ]; if ( (sz = read(fd, buf, sizeof(buf) - 1)) > 0 ) sink->push( std::string( buf, sz ) ); /* * On RHEL5 box this code busy-loops here, while * parent process no longer writes anything. * * Unclear why 'select()' is anouncing available * data, while we read 0 bytes with errno == 0. * * Temporarily resolved with usleep() instead of loop. */ if (!sz && (!errno || errno == EINTR)) usleep(50000); if ( sz < 0 && errno != EAGAIN ) throw syserr( "reading pipe" ); } virtual void reset() {} virtual int fd_set_( fd_set *set ) { if ( fd >= 0 ) { FD_SET( fd, set ); return fd; } else return -1; } Source( int _fd = -1 ) : fd( _fd ) {} virtual ~Source() { if ( fd >= 0 ) ::close( fd ); } }; struct FileSource : Source { std::string file; FileSource( std::string n ) : Source( -1 ), file( n ) {} int fd_set_( ::fd_set * ) { return -1; } /* reading a file is always non-blocking */ void sync( Sink *s ) { if ( fd < 0 ) { #ifdef O_CLOEXEC fd = open( file.c_str(), O_RDONLY | O_CLOEXEC | O_NONBLOCK ); #else fd = open( file.c_str(), O_RDONLY | O_NONBLOCK ); if ( fcntl( fd, F_SETFD, FD_CLOEXEC ) < 0 ) perror("failed to set FD_CLOEXEC on file"); #endif if ( fd >= 0 ) lseek( fd, 0, SEEK_END ); } if ( fd >= 0 ) Source::sync( s ); } }; struct KMsg : Source { bool can_clear; ssize_t buffer_size; KMsg() : can_clear( strcmp(getenv("LVM_TEST_CAN_CLOBBER_DMESG") ? : "0", "0") ), buffer_size(128 * 1024) { #ifdef __unix struct utsname uts; unsigned kmaj, kmin, krel; const char *read_msg = "/dev/kmsg"; // Can't use kmsg on kernels pre 3.5, read /var/log/messages if ( ( ::uname(&uts) == 0 ) && ( ::sscanf( uts.release, "%u.%u.%u", &kmaj, &kmin, &krel ) == 3 ) && ( ( kmaj < 3 ) || ( ( kmaj == 3 ) && ( kmin < 5 ) ) ) ) can_clear = false, read_msg = "/var/log/messages"; if ( ( fd = open(read_msg, O_RDONLY | O_NONBLOCK)) < 0 ) { if ( errno != ENOENT ) /* Older kernels (<3.5) do not support /dev/kmsg */ fprintf( stderr, "open log %s %s\n", read_msg, strerror( errno ) ); if ( can_clear && ( klogctl( BRICK_SYSLOG_ACTION_CLEAR, 0, 0 ) < 0 ) ) can_clear = false; } else if ( lseek( fd, 0L, SEEK_END ) == (off_t) -1 ) { fprintf( stderr, "lseek log %s %s\n", read_msg, strerror( errno ) ); close(fd); fd = -1; } #endif } bool dev_kmsg() { return fd >= 0; } void sync( Sink *s ) { #ifdef __unix ssize_t sz; char buf[ buffer_size ]; if ( dev_kmsg() ) { while ( (sz = ::read(fd, buf, buffer_size)) > 0 ) s->push( std::string( buf, sz ) ); } else if ( can_clear ) { while ( ( sz = klogctl( BRICK_SYSLOG_ACTION_READ_CLEAR, buf, ( int) buffer_size ) ) > 0 ) s->push( std::string( buf, sz ) ); if ( sz < 0 && errno == EPERM ) can_clear = false; } #endif } }; struct Observer : Sink { TimedBuffer stream; bool warnings; Observer() : warnings( false ) {} void push( std::string s ) { stream.push( s ); } void sync( bool force ) { while ( !stream.empty( force ) ) { TimedBuffer::Line line = stream.shift( force ); if ( line.second.find( "TEST WARNING" ) != std::string::npos ) warnings = true; } } }; struct IO : Sink { typedef std::vector< Sink* > Sinks; typedef std::vector< Source* > Sources; mutable Sinks sinks; mutable Sources sources; Observer *_observer; virtual void push( std::string x ) { for ( Sinks::iterator i = sinks.begin(); i != sinks.end(); ++i ) (*i)->push( x ); } void sync( bool force ) { for ( Sources::iterator i = sources.begin(); i != sources.end(); ++i ) (*i)->sync( this ); for ( Sinks::iterator i = sinks.begin(); i != sinks.end(); ++i ) (*i)->sync( force ); } void close() { for ( Sources::iterator i = sources.begin(); i != sources.end(); ++i ) delete *i; sources.clear(); } int fd_set_( fd_set *set ) { int max = -1; for ( Sources::iterator i = sources.begin(); i != sources.end(); ++i ) max = std::max( (*i)->fd_set_( set ), max ); return max + 1; } Observer &observer() { return *_observer; } IO() { clear(); } /* a stealing copy constructor */ IO( const IO &io ) : sinks( io.sinks ), sources( io.sources ), _observer( io._observer ) { io.sinks.clear(); io.sources.clear(); } IO &operator= ( const IO &io ) { this->~IO(); return *new (this) IO( io ); } void clear( int to_push = 1 ) { for ( Sinks::iterator i = sinks.begin(); i != sinks.end(); ++i ) delete *i; sinks.clear(); if ( to_push ) sinks.push_back( _observer = new Observer ); } ~IO() { close(); clear(0); } }; namespace { pid_t kill_pid = 0; bool fatal_signal = false; bool interrupt = false; } struct Options { bool verbose, batch, interactive, cont, fatal_timeouts, kmsg; std::string testdir, outdir, workdir, heartbeat; std::vector< std::string > flavours, filter, skip, watch; std::string flavour_envvar; int timeout; Options() : verbose( false ), batch( false ), interactive( false ), cont( false ), fatal_timeouts( false ), kmsg( true ), timeout( 180 ) {} }; struct TestProcess { std::string filename; bool interactive; int fd; void exec() __attribute__ ((noreturn)) { assert( fd >= 0 ); if ( !interactive ) { int devnull = ::open( "/dev/null", O_RDONLY ); if ( devnull >= 0 ) { /* gcc really doesn't like to not have stdin */ dup2( devnull, STDIN_FILENO ); close( devnull ); } else close( STDIN_FILENO ); dup2( fd, STDOUT_FILENO ); dup2( fd, STDERR_FILENO ); close( fd ); } setpgid( 0, 0 ); execlp( "bash", "bash", "-noprofile", "-norc", filename.c_str(), NULL ); perror( "execlp" ); _exit( 202 ); } TestProcess( std::string file ) : filename( file ), interactive( false ), fd( -1 ) {} }; struct TestCase { TestProcess child; std::string name, flavour; IO io; BufSink *iobuf; struct rusage usage; int status; bool timeout; pid_t pid; time_t start, end, silent_start, last_update, last_heartbeat; Options options; Journal *journal; std::string pretty() { if ( options.batch ) return flavour + ": " + name; return "[" + flavour + "] " + name; } std::string id() { return flavour + ":" + name; } void pipe() { int fds[2]; if (socketpair( PF_UNIX, SOCK_STREAM, 0, fds )) { perror("socketpair"); exit(201); } #if 0 if (fcntl( fds[0], F_SETFL, O_NONBLOCK ) == -1) { perror("fcntl on socket"); exit(202); } #endif io.sources.push_back( new Source( fds[0] ) ); child.fd = fds[1]; child.interactive = options.interactive; } bool monitor() { end = time( 0 ); /* heartbeat */ if ( end - last_heartbeat >= 20 && !options.heartbeat.empty() ) { std::ofstream hb( options.heartbeat.c_str(), std::fstream::app ); hb << "."; hb.close(); fsync_name( options.heartbeat ); last_heartbeat = end; } if ( wait4(pid, &status, WNOHANG, &usage) != 0 ) { io.sync( true ); return false; } /* kill off tests after a timeout silence */ if ( !options.interactive ) if ( end - silent_start > options.timeout ) { kill( pid, SIGINT ); sleep( 5 ); /* wait a bit for a reaction */ if ( waitpid( pid, &status, WNOHANG ) == 0 ) { system( "echo t > /proc/sysrq-trigger 2> /dev/null" ); kill( -pid, SIGKILL ); waitpid( pid, &status, 0 ); } timeout = true; io.sync( true ); return false; } struct timeval wait; fd_set set; FD_ZERO( &set ); int nfds = io.fd_set_( &set ); wait.tv_sec = 0; wait.tv_usec = 500000; /* timeout 0.5s */ if ( !options.verbose && !options.interactive && !options.batch ) { if ( end - last_update >= 1 ) { progress( Update ) << tag( "running" ) << pretty() << " " << end - start << std::flush; last_update = end; } } if ( select( nfds, &set, NULL, NULL, &wait ) > 0 ) { silent_start = end; /* something happened */ io.sync( false ); } return true; } std::string timefmt( time_t t ) { std::stringstream ss; ss << t / 60 << ":" << std::setw( 2 ) << std::setfill( '0' ) << t % 60; return ss.str(); } std::string rusage() { std::stringstream ss; time_t wall = end - start, user = usage.ru_utime.tv_sec, system = usage.ru_stime.tv_sec; size_t rss = usage.ru_maxrss / 1024, inb = usage.ru_inblock / 100, outb = usage.ru_oublock / 100; size_t inb_10 = inb % 10, outb_10 = outb % 10; inb /= 10; outb /= 10; ss << timefmt( wall ) << " wall " << timefmt( user ) << " user " << timefmt( system ) << " sys " << std::setw( 3 ) << rss << "M RSS | " << "IOPS: " << std::setw( 5 ) << inb << "." << inb_10 << "K in " << std::setw( 5 ) << outb << "." << outb_10 << "K out"; return ss.str(); } std::string tag( std::string n ) { if ( options.batch ) return "## "; size_t pad = n.length(); pad = (pad < 12) ? 12 - pad : 0; return "### " + std::string( pad, ' ' ) + n + ": "; } std::string tag( Journal::R r ) { std::stringstream s; s << r; return tag( s.str() ); } enum P { First, Update, Last }; std::ostream &progress( P p = Last ) { static struct : std::streambuf {} buf; static std::ostream null(&buf); if ( options.batch && p == First ) return std::cout; if ( isatty( STDOUT_FILENO ) && !options.batch ) { if ( p != First ) return std::cout << "\r"; return std::cout; } if ( p == Last ) return std::cout; return null; } void parent() { ::close( child.fd ); setupIO(); journal->started( id() ); silent_start = start = time( 0 ); progress( First ) << tag( "running" ) << pretty() << std::flush; if ( options.verbose || options.interactive ) progress() << std::endl; while ( monitor() ) /* empty */ ; Journal::R r = Journal::UNKNOWN; if ( timeout ) { r = Journal::TIMEOUT; } else if ( WIFEXITED( status ) ) { if ( WEXITSTATUS( status ) == 0 ) r = Journal::PASSED; else if ( WEXITSTATUS( status ) == 200 ) r = Journal::SKIPPED; else r = Journal::FAILED; } else if ( interrupt && WIFSIGNALED( status ) && WTERMSIG( status ) == SIGINT ) r = Journal::INTERRUPTED; else r = Journal::FAILED; if ( r == Journal::PASSED && io.observer().warnings ) r = Journal::WARNED; io.close(); if ( iobuf && ( r == Journal::FAILED || r == Journal::TIMEOUT ) ) iobuf->dump( std::cout ); journal->done( id(), r ); if ( options.batch ) { int spaces = std::max( 64 - int(pretty().length()), 0 ); progress( Last ) << " " << std::string( spaces, '.' ) << " " << std::left << std::setw( 9 ) << std::setfill( ' ' ) << r; if ( r != Journal::SKIPPED ) progress( First ) << " " << rusage(); progress( Last ) << std::endl; } else progress( Last ) << tag( r ) << pretty() << std::endl; io.clear(); } void run() { pipe(); pid = kill_pid = fork(); if (pid < 0) { perror("Fork failed."); exit(201); } else if (pid == 0) { io.close(); chdir( options.workdir.c_str() ); if ( !options.flavour_envvar.empty() ) setenv( options.flavour_envvar.c_str(), flavour.c_str(), 1 ); child.exec(); } else { parent(); } } void setupIO() { iobuf = 0; if ( options.verbose || options.interactive ) io.sinks.push_back( new FdSink( 1 ) ); else if ( !options.batch ) io.sinks.push_back( iobuf = new BufSink() ); std::string n = id(); std::replace( n.begin(), n.end(), '/', '_' ); std::string fn = options.outdir + "/" + n + ".txt"; io.sinks.push_back( new FileSink( fn ) ); for ( std::vector< std::string >::iterator i = options.watch.begin(); i != options.watch.end(); ++i ) io.sources.push_back( new FileSource( *i ) ); if ( options.kmsg ) io.sources.push_back( new KMsg ); } TestCase( Journal &j, Options opt, std::string path, std::string _name, std::string _flavour ) : child( path ), name( _name ), flavour( _flavour ), timeout( false ), last_update( 0 ), last_heartbeat( 0 ), options( opt ), journal( &j ) { } }; struct Main { bool die; time_t start; typedef std::vector< TestCase > Cases; typedef std::vector< std::string > Flavours; Journal journal; Options options; Cases cases; void setup() { bool filter; Listing l = listdir( options.testdir, true ); std::sort( l.begin(), l.end() ); for ( Flavours::iterator flav = options.flavours.begin(); flav != options.flavours.end(); ++flav ) { for ( Listing::iterator i = l.begin(); i != l.end(); ++i ) { if ( ( i->length() < 3 ) || ( i->substr( i->length() - 3, i->length() ) != ".sh" ) ) continue; if ( i->substr( 0, 4 ) == "lib/" ) continue; if (!options.filter.empty()) { filter = true; for ( std::vector< std::string >::iterator filt = options.filter.begin(); filt != options.filter.end(); ++filt ) { if ( i->find( *filt ) != std::string::npos ) { filter = false; break; } } if ( filter ) continue; } if (!options.skip.empty()) { filter = false; for ( std::vector< std::string >::iterator filt = options.skip.begin(); filt != options.skip.end(); ++filt ) { if ( i->find( *filt ) != std::string::npos ) { filter = true; break; } } if ( filter ) continue; } cases.push_back( TestCase( journal, options, options.testdir + *i, *i, *flav ) ); cases.back().options = options; } } if ( options.cont ) journal.read(); else ::unlink( journal.location.c_str() ); } int run() { setup(); start = time( 0 ); std::cerr << "running " << cases.size() << " tests" << std::endl; for ( Cases::iterator i = cases.begin(); i != cases.end(); ++i ) { if ( options.cont && journal.done( i->id() ) ) continue; i->run(); if ( options.fatal_timeouts && journal.timeouts >= 2 ) { journal.started( i->id() ); // retry the test on --continue std::cerr << "E: Hit 2 timeouts in a row with --fatal-timeouts" << std::endl; std::cerr << "Suspending (please restart the VM)." << std::endl; sleep( 3600 ); die = 1; } if ( time(0) - start > (TEST_SUITE_TIMEOUT * 3600) ) { std::cerr << TEST_SUITE_TIMEOUT << " hours passed, giving up..." << std::endl; die = 1; } if ( die || fatal_signal ) break; } journal.banner(); if ( die || fatal_signal ) return 1; return journal.count( Journal::FAILED ) || journal.count( Journal::TIMEOUT ) ? 1 : 0; } Main( Options o ) : die( false ), journal( o.outdir ), options( o ) {} }; namespace { void handler( int sig ) { signal( sig, SIG_DFL ); /* die right away next time */ if ( kill_pid > 0 ) kill( -kill_pid, sig ); fatal_signal = true; if ( sig == SIGINT ) interrupt = true; } void setup_handlers() { /* set up signal handlers */ for ( int i = 0; i <= 32; ++i ) switch (i) { case SIGCHLD: case SIGWINCH: case SIGURG: case SIGKILL: case SIGSTOP: break; default: signal(i, handler); } } } /* TODO remove in favour of brick-commandline.h */ struct Args { typedef std::vector< std::string > V; V args; Args( int argc, const char **argv ) { for ( int i = 1; i < argc; ++ i ) args.push_back( argv[ i ] ); } bool has( std::string fl ) { return std::find( args.begin(), args.end(), fl ) != args.end(); } // TODO: This does not handle `--option=VALUE`: std::string opt( std::string fl ) { V::iterator i = std::find( args.begin(), args.end(), fl ); if ( i == args.end() || i + 1 == args.end() ) return ""; return *(i + 1); } }; namespace { bool hasenv( const char *name ) { const char *v = getenv( name ); if ( !v ) return false; if ( strlen( v ) == 0 || !strcmp( v, "0" ) ) return false; return true; } template< typename C > void split( std::string s, C &c ) { std::stringstream ss( s ); std::string item; while ( std::getline( ss, item, ',' ) ) c.push_back( item ); } } const char *DEF_FLAVOURS="ndev-vanilla"; std::string resolve_path(std::string a_path, const char *default_path=".") { char temp[PATH_MAX]; const char *p; p = a_path.empty() ? default_path : a_path.c_str(); if ( !realpath( p, temp ) ) throw syserr( "Failed to resolve path", p ); return temp; } static int run( int argc, const char **argv, std::string fl_envvar = "TEST_FLAVOUR" ) { Args args( argc, argv ); Options opt; if ( args.has( "--help" ) ) { std::cout << " lvm2-testsuite - Run a lvm2 testsuite.\n\n" "lvm2-testsuite" "\n\t" " [--flavours FLAVOURS]" " [--only TESTS]" "\n\t" " [--outdir OUTDIR]" " [--testdir TESTDIR]" " [--workdir WORKDIR]" "\n\t" " [--batch|--verbose|--interactive]" "\n\t" " [--fatal-timeouts]" " [--continue]" " [--heartbeat]" " [--watch WATCH]" " [--timeout TIMEOUT]" " [--nokmsg]\n\n" /* TODO: list of flavours: "lvm2-testsuite" "\n\t" " --list-flavours [--testdir TESTDIR]" */ "\n\n" "OPTIONS:\n\n" // TODO: looks like this could be worth a man page... "Filters:\n" " --flavours FLAVOURS\n\t\t- comma separated list of flavours to run.\n\t\t For the list of flavours see `$TESTDIR/lib/flavour-*`.\n\t\t Default: \"" << DEF_FLAVOURS << "\".\n" " --only TESTS\t- comma separated list of tests to run. Default: All tests.\n" "\n" "Directories:\n" " --testdir TESTDIR\n\t\t- directory where tests reside. Default: \"" TESTSUITE_DATA "\".\n" " --workdir WORKDIR\n\t\t- directory to change to when running tests.\n\t\t This is directory containing testing libs. Default: TESTDIR.\n" " --outdir OUTDIR\n\t\t- directory where all the output files should go. Default: \".\".\n" "\n" "Formatting:\n" " --batch\t- Brief format for automated runs.\n" " --verbose\t- More verbose format for automated runs displaying progress on stdout.\n" " --interactive\t- Verbose format for interactive runs.\n" "\n" "Other:\n" " --fatal-timeouts\n\t\t- exit after encountering 2 timeouts in a row.\n" " --continue\t- If set append to journal. Otherwise it will be overwritten.\n" " --heartbeat HEARTBEAT\n\t\t- Name of file to update periodicaly while running.\n" " --watch WATCH\t- Comma separated list of files to watch and print.\n" " --timeout TIMEOUT\n\t\t- Period of silence in seconds considered a timeout. Default: 180.\n" " --nokmsg\t- Do not try to read kernel messages.\n" "\n\n" "ENV.VARIABLES:\n\n" " T\t\t- see --only\n" " INTERACTIVE\t- see --interactive\n" " VERBOSE\t- see --verbose\n" " BATCH\t\t- see --batch\n" " LVM_TEST_CAN_CLOBBER_DMESG\n\t\t- when set and non-empty tests are allowed to flush\n\t\t kmsg in an attempt to read it." "\n\n" "FORMATS:\n\n" "When multiple formats are specified interactive overrides verbose\n" "which overrides batch. Command line options override environment\n" "variables.\n\n" ; return 0; } opt.flavour_envvar = fl_envvar; if ( args.has( "--continue" ) ) opt.cont = true; if ( args.has( "--only" ) ) split( args.opt( "--only" ), opt.filter ); else if ( hasenv( "T" ) ) split( getenv( "T" ), opt.filter ); if ( args.has( "--skip" ) ) split( args.opt( "--skip" ), opt.skip ); else if ( hasenv( "S" ) ) split( getenv( "S" ), opt.skip ); if ( args.has( "--fatal-timeouts" ) ) opt.fatal_timeouts = true; if ( args.has( "--heartbeat" ) ) opt.heartbeat = args.opt( "--heartbeat" ); if ( args.has( "--batch" ) || args.has( "--verbose" ) || args.has( "--interactive" ) ) { if ( args.has( "--batch" ) ) { opt.verbose = false; opt.batch = true; } if ( args.has( "--verbose" ) ) { opt.batch = false; opt.verbose = true; } if ( args.has( "--interactive" ) ) { opt.verbose = false; opt.batch = false; opt.interactive = true; } } else { if ( hasenv( "BATCH" ) ) { opt.verbose = false; opt.batch = true; } if ( hasenv( "VERBOSE" ) ) { opt.batch = false; opt.verbose = true; } if ( hasenv( "INTERACTIVE" ) ) { opt.verbose = false; opt.batch = false; opt.interactive = true; } } if ( args.has( "--flavours" ) ) split( args.opt( "--flavours" ), opt.flavours ); else split( DEF_FLAVOURS, opt.flavours ); if ( args.has( "--watch" ) ) split( args.opt( "--watch" ), opt.watch ); if ( args.has( "--timeout" ) ) opt.timeout = atoi( args.opt( "--timeout" ).c_str() ); if ( args.has( "--nokmsg" ) ) opt.kmsg = false; opt.testdir = resolve_path( args.opt( "--testdir" ), TESTSUITE_DATA ) + "/"; opt.workdir = resolve_path( args.opt( "--workdir" ), opt.testdir.c_str() ); opt.outdir = resolve_path( args.opt( "--outdir" ), "." ); setup_handlers(); Main main( opt ); return main.run(); } } } #endif #ifdef BRICK_DEMO int main( int argc, const char **argv ) { return brick::shelltest::run( argc, argv ); } #endif // vim: syntax=cpp tabstop=4 shiftwidth=4 expandtab LVM2.2.02.176/test/lib/flavour-udev-lvmpolld.sh0000644000000000000120000000012113176752421017645 0ustar rootwheelexport LVM_TEST_LOCKING=1 export LVM_TEST_LVMPOLLD=1 export LVM_TEST_DEVDIR=/dev LVM2.2.02.176/test/lib/flavour-ndev-cluster-lvmpolld.sh0000644000000000000120000000006513176752421021324 0ustar rootwheelexport LVM_TEST_LOCKING=3 export LVM_TEST_LVMPOLLD=1 LVM2.2.02.176/test/lib/flavour-udev-cluster.sh0000644000000000000120000000011513176752421017500 0ustar rootwheelexport LVM_TEST_LOCKING=3 export LVM_TEST_DEVDIR=/dev export LVM_TEST_LVM1=1 LVM2.2.02.176/test/lib/get.sh0000644000000000000120000000547713176752421014201 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2011-2017 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # get.sh: get various values from volumes # # USAGE: # get pv_field PV field [pvs params] # get vg_field VG field [vgs params] # get lv_field LV field [lvs params] # # get lv_devices LV [lvs params] test -z "$BASH" || set -e -o pipefail # trims only leading prefix and suffix trim_() { rm -f debug.log # drop log, command was ok local var=${1%${1##*[! ]}} # remove trailing space characters echo "${var#${var%%[! ]*}}" # remove leading space characters } pv_field() { local r r=$(pvs --config 'log{prefix=""}' --noheadings -o "$2" "${@:3}" "$1") trim_ "$r" } vg_field() { local r r=$(vgs --config 'log{prefix=""}' --noheadings -o "$2" "${@:3}" "$1") trim_ "$r" } lv_field() { local r r=$(lvs --config 'log{prefix=""}' --noheadings -o "$2" "${@:3}" "$1") trim_ "$r" } lv_first_seg_field() { local r r=$(lvs --config 'log{prefix=""}' --noheadings -o "$2" "${@:3}" "$1" | head -1) trim_ "$r" } lvh_field() { local r r=$(lvs -H --config 'log{prefix=""}' --noheadings -o "$2" "${@:3}" "$1") trim_ "$r" } lva_field() { local r r=$(lvs -a --config 'log{prefix=""}' --noheadings -o "$2" "${@:3}" "$1") trim_ "$r" } lv_devices() { lv_field "$1" devices -a "${@:2}" | sed 's/([^)]*)//g; s/,/\n/g' } lv_field_lv_() { lv_field "$1" "$2" -a --unbuffered | tr -d [] } lv_tree_devices_() { local lv="$1/$2" local type type=$(lv_field "$lv" segtype -a --unbuffered | head -n 1) #local orig #orig=$(lv_field_lv_ "$lv" origin) # FIXME: should we count in also origins ? #test -z "$orig" || lv_tree_devices_ $1 $orig case "$type" in linear|striped) lv_devices "$lv" ;; mirror|raid*) local log log=$(lv_field_lv_ "$lv" mirror_log) test -z "$log" || lv_tree_devices_ "$1" "$log" for i in $(lv_devices "$lv") do lv_tree_devices_ "$1" "$i"; done ;; thin) lv_tree_devices_ "$1" "$(lv_field_lv_ "$lv" pool_lv)" ;; thin-pool) lv_tree_devices_ "$1" "$(lv_field_lv_ "$lv" data_lv)" lv_tree_devices_ "$1" "$(lv_field_lv_ "$lv" metadata_lv)" ;; cache) lv_tree_devices_ "$1" "$(lv_devices "$lv")" ;; cache-pool) lv_tree_devices_ "$1" "$(lv_field_lv_ "$lv" data_lv)" lv_tree_devices_ "$1" "$(lv_field_lv_ "$lv" metadata_lv)" ;; esac } lv_tree_devices() { lv_tree_devices_ "$@" | sort | uniq } first_extent_sector() { pv_field "$@" pe_start --units s --nosuffix } #set -x unset LVM_VALGRIND unset LVM_LOG_FILE_EPOCH "$@" LVM2.2.02.176/test/lib/aux.sh0000644000000000000120000012546113176752421014213 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2011-2017 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA . lib/utils test -n "$BASH" && set -euE -o pipefail run_valgrind() { # Execute script which may use $TESTNAME for creating individual # log files for each execute command exec "${VALGRIND:-valgrind}" "$@" } expect_failure() { echo "TEST EXPECT FAILURE" } check_daemon_in_builddir() { # skip if we don't have our own deamon... if test -z "${installed_testsuite+varset}"; then (which "$1" 2>/dev/null | grep -q "$abs_builddir") || skip "$1 is not in executed path." fi rm -f debug.log strace.log } create_corosync_conf() { COROSYNC_CONF="/etc/corosync/corosync.conf" COROSYNC_NODE=$(hostname) if test -a "$COROSYNC_CONF"; then if ! grep "created by lvm test suite" "$COROSYNC_CONF"; then rm "$COROSYNC_CONF" else mv "$COROSYNC_CONF" "$COROSYNC_CONF.prelvmtest" fi fi sed -e "s/@LOCAL_NODE@/$COROSYNC_NODE/" lib/test-corosync-conf > "$COROSYNC_CONF" echo "created new $COROSYNC_CONF" } DLM_CONF="/etc/dlm/dlm.conf" create_dlm_conf() { if test -a "$DLM_CONF"; then if ! grep "created by lvm test suite" "$DLM_CONF"; then rm "$DLM_CONF" else mv "$DLM_CONF" "$DLM_CONF.prelvmtest" fi fi cp lib/test-dlm-conf "$DLM_CONF" echo "created new $DLM_CONF" } prepare_dlm() { if pgrep dlm_controld ; then echo "Cannot run while existing dlm_controld process exists." exit 1 fi if pgrep corosync; then echo "Cannot run while existing corosync process exists." exit 1 fi create_corosync_conf create_dlm_conf systemctl start corosync sleep 1 if ! pgrep corosync; then echo "Failed to start corosync." exit 1 fi systemctl start dlm sleep 1 if ! pgrep dlm_controld; then echo "Failed to start dlm." exit 1 fi } SANLOCK_CONF="/etc/sanlock/sanlock.conf" create_sanlock_conf() { if test -a "$SANLOCK_CONF"; then if ! grep "created by lvm test suite" "$SANLOCK_CONF"; then rm "$SANLOCK_CONF" else mv "$SANLOCK_CONF" "$SANLOCK_CONF.prelvmtest" fi fi cp lib/test-sanlock-conf "$SANLOCK_CONF" echo "created new $SANLOCK_CONF" } prepare_sanlock() { if pgrep sanlock ; then echo "Cannot run while existing sanlock process exists" exit 1 fi create_sanlock_conf systemctl start sanlock if ! pgrep sanlock; then echo "Failed to start sanlock" exit 1 fi } prepare_lvmlockd() { if pgrep lvmlockd ; then echo "Cannot run while existing lvmlockd process exists" exit 1 fi if test -n "$LVM_TEST_LOCK_TYPE_SANLOCK"; then # make check_lvmlockd_sanlock echo "starting lvmlockd for sanlock" lvmlockd -o 2 elif test -n "$LVM_TEST_LOCK_TYPE_DLM"; then # make check_lvmlockd_dlm echo "starting lvmlockd for dlm" lvmlockd elif test -n "$LVM_TEST_LVMLOCKD_TEST_DLM"; then # make check_lvmlockd_test echo "starting lvmlockd --test (dlm)" lvmlockd --test -g dlm elif test -n "$LVM_TEST_LVMLOCKD_TEST_SANLOCK"; then # FIXME: add option for this combination of --test and sanlock echo "starting lvmlockd --test (sanlock)" lvmlockd --test -g sanlock -o 2 else echo "not starting lvmlockd" exit 0 fi sleep 1 if ! pgrep lvmlockd; then echo "Failed to start lvmlockd" exit 1 fi } prepare_clvmd() { test "${LVM_TEST_LOCKING:-0}" -ne 3 && return # not needed if pgrep clvmd ; then skip "Cannot use fake cluster locking with real clvmd ($(pgrep clvmd)) running." fi check_daemon_in_builddir clvmd test -e "$DM_DEV_DIR/control" || dmsetup table >/dev/null # create control node # skip if singlenode is not compiled in (clvmd --help 2>&1 | grep "Available cluster managers" | grep -q "singlenode") || \ skip "Compiled clvmd does not support singlenode for testing." # lvmconf "activation/monitoring = 1" local run_valgrind="" test "${LVM_VALGRIND_CLVMD:-0}" -eq 0 || run_valgrind="run_valgrind" rm -f "$CLVMD_PIDFILE" echo "<======== Starting CLVMD ========>" echo -n "## preparing clvmd..." # lvs is executed from clvmd - use our version LVM_LOG_FILE_EPOCH=CLVMD LVM_LOG_FILE_MAX_LINES=1000000 LVM_BINARY=$(which lvm) $run_valgrind clvmd -Isinglenode -d 1 -f & echo $! > LOCAL_CLVMD for i in {1..100} ; do test "$i" -eq 100 && die "Startup of clvmd is too slow." test -e "$CLVMD_PIDFILE" && test -e "${CLVMD_PIDFILE%/*}/lvm/clvmd.sock" && break echo -n . sleep .1 done echo ok } prepare_dmeventd() { if pgrep dmeventd ; then skip "Cannot test dmeventd with real dmeventd ($(pgrep dmeventd)) running." fi check_daemon_in_builddir dmeventd lvmconf "activation/monitoring = 1" local run_valgrind="" test "${LVM_VALGRIND_DMEVENTD:-0}" -eq 0 || run_valgrind="run_valgrind" echo -n "## preparing dmeventd..." # LVM_LOG_FILE_EPOCH=DMEVENTD $run_valgrind dmeventd -fddddl "$@" 2>&1 & LVM_LOG_FILE_EPOCH=DMEVENTD $run_valgrind dmeventd -fddddl "$@" >debug.log_DMEVENTD_out 2>&1 & echo $! > LOCAL_DMEVENTD # FIXME wait for pipe in /var/run instead for i in {1..100} ; do test "$i" -eq 100 && die "Startup of dmeventd is too slow." test -e "${DMEVENTD_PIDFILE}" && break echo -n . sleep .1 done echo ok } prepare_lvmetad() { check_daemon_in_builddir lvmetad local run_valgrind="" test "${LVM_VALGRIND_LVMETAD:-0}" -eq 0 || run_valgrind="run_valgrind" kill_sleep_kill_ LOCAL_LVMETAD "${LVM_VALGRIND_LVMETAD:-0}" lvmconf "global/use_lvmetad = 1" "devices/md_component_detection = 0" # Default debug is "-l all" and could be override # by setting LVM_TEST_LVMETAD_DEBUG_OPTS before calling inittest. echo -n "## preparing lvmetad..." # shellcheck disable=SC2086 $run_valgrind lvmetad -f "$@" -s "$TESTDIR/lvmetad.socket" \ ${LVM_TEST_LVMETAD_DEBUG_OPTS--l all} & echo $! > LOCAL_LVMETAD for i in {1..100} ; do test "$i" -eq 100 && die "Startup of lvmetad is too slow." test -e "$TESTDIR/lvmetad.socket" && break echo -n . sleep .1; done echo ok } lvmetad_talk() { local use=nc if type -p socat >& /dev/null; then use=socat elif echo | not nc -U "$TESTDIR/lvmetad.socket" ; then echo "WARNING: Neither socat nor nc -U seems to be available." 1>&2 echo "## failed to contact lvmetad." return 1 fi if test "$use" = nc ; then nc -U "$TESTDIR/lvmetad.socket" else socat "unix-connect:$TESTDIR/lvmetad.socket" - fi | tee -a lvmetad-talk.txt } lvmetad_dump() { (echo 'request="dump"'; echo '##') | lvmetad_talk "$@" } notify_lvmetad() { if test -e LOCAL_LVMETAD; then # Ignore results here... LVM_LOG_FILE_EPOCH="" pvscan --cache "$@" || true rm -f debug.log fi } prepare_lvmpolld() { check_daemon_in_builddir lvmetad lvmconf "global/use_lvmpolld = 1" local run_valgrind="" test "${LVM_VALGRIND_LVMPOLLD:-0}" -eq 0 || run_valgrind="run_valgrind" kill_sleep_kill_ LOCAL_LVMPOLLD "${LVM_VALGRIND_LVMPOLLD:-0}" echo -n "## preparing lvmpolld..." $run_valgrind lvmpolld -f "$@" -s "$TESTDIR/lvmpolld.socket" -B "$TESTDIR/lib/lvm" -l all & echo $! > LOCAL_LVMPOLLD for i in {1..100} ; do test "$i" -eq 100 && die "Startup of lvmpolld is too slow." test -e "$TESTDIR/lvmpolld.socket" && break echo -n .; sleep .1; done # wait for the socket echo ok } lvmpolld_talk() { local use=nc if type -p socat >& /dev/null; then use=socat elif echo | not nc -U "$TESTDIR/lvmpolld.socket" ; then echo "WARNING: Neither socat nor nc -U seems to be available." 1>&2 echo "## failed to contact lvmpolld." return 1 fi if test "$use" = nc ; then nc -U "$TESTDIR/lvmpolld.socket" else socat "unix-connect:$TESTDIR/lvmpolld.socket" - fi | tee -a lvmpolld-talk.txt } lvmpolld_dump() { (echo 'request="dump"'; echo '##') | lvmpolld_talk "$@" } prepare_lvmdbusd() { local daemon rm -f debug.log_LVMDBUSD_out kill_sleep_kill_ LOCAL_LVMDBUSD 0 # FIXME: This is not correct! Daemon is auto started. echo -n "## checking lvmdbusd is NOT running..." if pgrep -f -l lvmdbusd | grep python3 ; then skip "Cannot run lvmdbusd while existing lvmdbusd process exists" fi echo ok # skip if we don't have our own lvmdbusd... if test -z "${installed_testsuite+varset}"; then # NOTE: this is always present - additional checks are needed: daemon="$abs_top_builddir/daemons/lvmdbusd/lvmdbusd" # Setup the python path so we can run export PYTHONPATH="$abs_top_builddir/daemons" else daemon=$(which lvmdbusd || :) fi test -x "$daemon" || skip "The lvmdbusd daemon is missing" which python3 >/dev/null || skip "Missing python3" python3 -c "import pyudev, dbus, gi.repository" || skip "Missing python modules" # Copy the needed file to run on the system bus if it doesn't # already exist if [ ! -f /etc/dbus-1/system.d/com.redhat.lvmdbus1.conf ]; then install -m 644 "$abs_top_builddir/scripts/com.redhat.lvmdbus1.conf" /etc/dbus-1/system.d/ fi echo "## preparing lvmdbusd..." lvmconf "global/notify_dbus = 1" "$daemon" --debug > debug.log_LVMDBUSD_out 2>&1 & local pid=$! sleep 1 echo -n "## checking lvmdbusd IS running..." if ! pgrep -f -l lvmdbusd | grep python3; then echo "Failed to start lvmdbusd daemon" return 1 fi # TODO: Is there a better check than wait 1 second and check pid? if ! ps -p $pid -o comm= >/dev/null || [[ $(ps -p $pid -o comm=) != python3 ]]; then echo "Failed to start lvmdbusd daemon" return 1 fi echo "$pid" > LOCAL_LVMDBUSD echo ok } # # Temporary solution to create some occupied thin metadata # This heavily depends on thin metadata output format to stay as is. # Currently it expects 2MB thin metadata and 200MB data volume size # Argument specifies how many devices should be created. # prepare_thin_metadata() { local devices=$1 local transaction_id=${2:-0} local data_block_size=${3:-128} local nr_data_blocks=${4:-3200} local i echo '' for i in $(seq 1 "$devices") do echo ' ' echo ' ' echo ' ' done echo "" } teardown_devs_prefixed() { local prefix=$1 local stray=${2:-0} local IFS=$IFS_NL local dm rm -rf "$TESTDIR/dev/$prefix*" # Resume suspended devices first for dm in $(dm_info suspended,name | grep "^Suspended:.*$prefix"); do echo "dmsetup resume \"${dm#Suspended:}\"" dmsetup clear "${dm#Suspended:}" dmsetup resume "${dm#Suspended:}" & done wait local mounts=( $(grep "$prefix" /proc/mounts | cut -d' ' -f1) ) if test ${#mounts[@]} -gt 0; then test "$stray" -eq 0 || echo "## removing stray mounted devices containing $prefix:" "${mounts[@]}" if umount -fl "${mounts[@]}"; then udev_wait fi fi # Remove devices, start with closed (sorted by open count) # Run 'dmsetup remove' in parallel rm -f REMOVE_FAILED #local listdevs=( $(dm_info name,open --sort open,name | grep "$prefix.*:0") ) #dmsetup remove --deferred ${listdevs[@]%%:0} || touch REMOVE_FAILED # 2nd. loop is trying --force removal which can possibly 'unstuck' some bloked operations for i in 0 1; do local num_remaining_devs=999999 local num_devs=0 test "$i" = 1 && test "$stray" = 0 && break # no stray device removal while :; do local cnt local sortby="name" local need_udev_wait=0 # HACK: sort also by minors - so we try to close 'possibly later' created device first test "$i" = 0 || sortby="-minor" # when nothing left for removal, escape both loops... dm_info name,open --separator ' ' --sort open,"$sortby" | grep "$prefix" > out || break 2 num_devs=$(wc -l < out) test "$num_devs" -lt "$num_remaining_devs" || break # not managed to reduce table size anymore test "$i" = 0 || echo "## removing $num_devs stray mapped devices with names beginning with $prefix: " while IFS=' ' read -r dm cnt; do if test "$i" = 0; then test "$cnt" -eq 0 || break # stop loop with 1st. opened device dmsetup remove "$dm" &>/dev/null || touch REMOVE_FAILED & else dmsetup remove -f "$dm" || true fi need_udev_wait=1 done < out test "$need_udev_wait" -eq 1 || break udev_wait wait num_remaining_devs=$num_devs done # looping till there are some removed devicess done } teardown_devs() { # Delete any remaining dm/udev semaphores teardown_udev_cookies test ! -f MD_DEV || cleanup_md_dev test ! -f DEVICES || teardown_devs_prefixed "$PREFIX" # NOTE: SCSI_DEBUG_DEV test must come before the LOOP test because # prepare_scsi_debug_dev() also sets LOOP to short-circuit prepare_loop() if test -f SCSI_DEBUG_DEV; then udev_wait test "${LVM_TEST_PARALLEL:-0}" -eq 1 || modprobe -r scsi_debug else test ! -f LOOP || losetup -d "$(< LOOP)" || true test ! -f LOOPFILE || rm -f "$(< LOOPFILE)" fi not diff LOOP BACKING_DEV >/dev/null 2>&1 || rm -f BACKING_DEV rm -f DEVICES LOOP # Attempt to remove any loop devices that failed to get torn down if earlier tests aborted test "${LVM_TEST_PARALLEL:-0}" -eq 1 || test -z "$COMMON_PREFIX" || { local stray_loops=( $(losetup -a | grep "$COMMON_PREFIX" | cut -d: -f1) ) test ${#stray_loops[@]} -eq 0 || { teardown_devs_prefixed "$COMMON_PREFIX" 1 echo "## removing stray loop devices containing $COMMON_PREFIX:" "${stray_loops[@]}" for i in "${stray_loops[@]}" ; do test ! -b "$i" || losetup -d "$i" || true ; done # Leave test when udev processed all removed devices udev_wait } } } kill_sleep_kill_() { local pidfile=$1 local slow=$2 if test -s "$pidfile" ; then pid=$(< "$pidfile") rm -f "$pidfile" kill -TERM "$pid" 2>/dev/null || return 0 if test "$slow" -eq 0 ; then sleep .1 ; else sleep 1 ; fi kill -KILL "$pid" 2>/dev/null || true local wait=0 while ps "$pid" > /dev/null && test "$wait" -le 10; do sleep .5 wait=$(( wait + 1 )) done fi } print_procs_by_tag_() { (ps -o pid,args ehax | grep -we"LVM_TEST_TAG=${1:-kill_me_$PREFIX}") || true } count_processes_with_tag() { print_procs_by_tag_ | wc -l } kill_tagged_processes() { local pid local wait # read uses all vars within pipe subshell local pids=() while read -r pid wait; do if test -n "$pid" ; then echo "## killing tagged process: $pid ${wait:0:120}..." kill -TERM "$pid" 2>/dev/null || true fi pids+=( "$pid" ) done < <(print_procs_by_tag_ "$@") test ${#pids[@]} -eq 0 && return # wait if process exited and eventually -KILL wait=0 for pid in "${pids[@]}" ; do while ps "$pid" > /dev/null && test "$wait" -le 10; do sleep .2 wait=$(( wait + 1 )) done test "$wait" -le 10 || kill -KILL "$pid" 2>/dev/null || true done } teardown() { local TEST_LEAKED_DEVICES="" echo -n "## teardown..." unset LVM_LOG_FILE_EPOCH if test -f TESTNAME ; then if test ! -f SKIP_THIS_TEST ; then # Evaluate left devices only for non-skipped tests TEST_LEAKED_DEVICES=$(dmsetup table | grep "$PREFIX" | grep -v "${PREFIX}pv") || true fi kill_tagged_processes if test -n "$LVM_TEST_LVMLOCKD_TEST" ; then echo "" echo "## stopping lvmlockd in teardown" killall lvmlockd sleep 1 killall lvmlockd || true sleep 1 killall -9 lvmlockd || true fi kill_sleep_kill_ LOCAL_LVMETAD "${LVM_VALGRIND_LVMETAD:-0}" dm_table | not grep -E -q "$vg|$vg1|$vg2|$vg3|$vg4" || { # Avoid activation of dmeventd if there is no pid cfg=$(test -s LOCAL_DMEVENTD || echo "--config activation{monitoring=0}") if dm_info suspended,name | grep -q "^Suspended:.*$PREFIX" ; then echo "## skipping vgremove, suspended devices detected." else vgremove -ff "$cfg" \ "$vg" "$vg1" "$vg2" "$vg3" "$vg4" &>/dev/null || rm -f debug.log strace.log fi } kill_sleep_kill_ LOCAL_LVMDBUSD 0 echo -n . kill_sleep_kill_ LOCAL_LVMPOLLD "${LVM_VALGRIND_LVMPOLLD:-0}" echo -n . kill_sleep_kill_ LOCAL_CLVMD "${LVM_VALGRIND_CLVMD:-0}" echo -n . kill_sleep_kill_ LOCAL_DMEVENTD "${LVM_VALGRIND_DMEVENTD:-0}" echo -n . test -d "$DM_DEV_DIR/mapper" && teardown_devs echo -n . fi test -z "$TEST_LEAKED_DEVICES" || { echo "## unexpected devices left dm table:" echo "$TEST_LEAKED_DEVICES" return 1 } if test "${LVM_TEST_PARALLEL:-0}" = 0 && test -z "$RUNNING_DMEVENTD"; then not pgrep dmeventd &>/dev/null # printed in STACKTRACE fi echo -n . test -n "$TESTDIR" && { cd "$TESTOLDPWD" || die "Failed to enter $TESTOLDPWD" # after this delete no further write is possible rm -rf "$TESTDIR" || echo BLA } echo "ok" } prepare_loop() { local size=${1=32} shift # all other params are directly passed to all 'losetup' calls local i local slash test -f LOOP && LOOP=$(< LOOP) echo -n "## preparing loop device..." # skip if prepare_scsi_debug_dev() was used if test -f SCSI_DEBUG_DEV -a -f LOOP ; then echo "(skipped)" return 0 fi test ! -e LOOP test -n "$DM_DEV_DIR" for i in 0 1 2 3 4 5 6 7; do test -e "$DM_DEV_DIR/loop$i" || mknod "$DM_DEV_DIR/loop$i" b 7 $i done echo -n . local LOOPFILE="$PWD/test.img" rm -f "$LOOPFILE" dd if=/dev/zero of="$LOOPFILE" bs=$((1024*1024)) count=0 seek=$(( size + 1 )) 2> /dev/null if LOOP=$(losetup "$@" -s -f "$LOOPFILE" 2>/dev/null); then : elif LOOP=$(losetup -f) && losetup "$@" "$LOOP" "$LOOPFILE"; then # no -s support : else # no -f support # Iterate through $DM_DEV_DIR/loop{,/}{0,1,2,3,4,5,6,7} for slash in '' /; do for i in 0 1 2 3 4 5 6 7; do local dev="$DM_DEV_DIR/loop$slash$i" ! losetup "$dev" >/dev/null 2>&1 || continue # got a free losetup "$@" "$dev" "$LOOPFILE" LOOP=$dev break done test -z "$LOOP" || break done fi test -n "$LOOP" # confirm or fail BACKING_DEV=$LOOP echo "$LOOP" > LOOP echo "$LOOP" > BACKING_DEV echo "ok ($LOOP)" } # A drop-in replacement for prepare_loop() that uses scsi_debug to create # a ramdisk-based SCSI device upon which all LVM devices will be created # - scripts must take care not to use a DEV_SIZE that will enduce OOM-killer prepare_scsi_debug_dev() { local DEV_SIZE=$1 shift # rest of params directly passed to modprobe local DEBUG_DEV rm -f debug.log strace.log test ! -f "SCSI_DEBUG_DEV" || return 0 test ! -f LOOP test -n "$DM_DEV_DIR" # Skip test if scsi_debug module is unavailable or is already in use modprobe --dry-run scsi_debug || skip lsmod | not grep -q scsi_debug || skip # Create the scsi_debug device and determine the new scsi device's name # NOTE: it will _never_ make sense to pass num_tgts param; # last param wins.. so num_tgts=1 is imposed touch SCSI_DEBUG_DEV modprobe scsi_debug dev_size_mb="$DEV_SIZE" "$@" num_tgts=1 || skip for i in {1..20} ; do DEBUG_DEV="/dev/$(grep -H scsi_debug /sys/block/*/device/model | cut -f4 -d /)" test -b "$DEBUG_DEV" && break sleep .1 # allow for async Linux SCSI device registration done test -b "$DEBUG_DEV" || return 1 # should not happen # Create symlink to scsi_debug device in $DM_DEV_DIR SCSI_DEBUG_DEV="$DM_DEV_DIR/$(basename "$DEBUG_DEV")" echo "$SCSI_DEBUG_DEV" > SCSI_DEBUG_DEV echo "$SCSI_DEBUG_DEV" > BACKING_DEV # Setting $LOOP provides means for prepare_devs() override test "$DEBUG_DEV" = "$SCSI_DEBUG_DEV" || ln -snf "$DEBUG_DEV" "$SCSI_DEBUG_DEV" } cleanup_scsi_debug_dev() { teardown_devs rm -f SCSI_DEBUG_DEV LOOP } prepare_md_dev() { local level=$1 local rchunk=$2 local rdevs=$3 local with_bitmap="--bitmap=internal" local coption="--chunk" local maj local mddev maj=$(mdadm --version 2>&1) || skip "mdadm tool is missing!" cleanup_md_dev rm -f debug.log strace.log MD_DEV MD_DEV_PV MD_DEVICES case "$level" in "1") coption="--bitmap-chunk" ;; "0") with_bitmap="" ;; esac # Have MD use a non-standard name to avoid colliding with an existing MD device # - mdadm >= 3.0 requires that non-standard device names be in /dev/md/ # - newer mdadm _completely_ defers to udev to create the associated device node maj=${maj##*- v} maj=${maj%%.*} [ "$maj" -ge 3 ] && \ mddev=/dev/md/md_lvm_test0 || \ mddev=/dev/md_lvm_test0 mdadm --create --metadata=1.0 "$mddev" --auto=md --level "$level" $with_bitmap "$coption"="$rchunk" --raid-devices="$rdevs" "${@:4}" || { # Some older 'mdadm' version managed to open and close devices internaly # and reporting non-exclusive access on such device # let's just skip the test if this happens. # Note: It's pretty complex to get rid of consequences # the following sequence avoid leaks on f19 # TODO: maybe try here to recreate few times.... mdadm --stop "$mddev" || true udev_wait mdadm --zero-superblock "${@:4}" || true udev_wait skip "Test skipped, unreliable mdadm detected!" } test -b "$mddev" || skip "mdadm has not created device!" # LVM/DM will see this device case "$DM_DEV_DIR" in "/dev") readlink -f "$mddev" ;; *) cp -LR "$mddev" "$DM_DEV_DIR" echo "$DM_DEV_DIR/md_lvm_test0" ;; esac > MD_DEV_PV echo "$mddev" > MD_DEV notify_lvmetad "$(< MD_DEV_PV)" printf "%s\n" "${@:4}" > MD_DEVICES for mddev in "${@:4}"; do notify_lvmetad "$mddev" done } cleanup_md_dev() { test -f MD_DEV || return 0 local IFS=$IFS_NL local dev local mddev mddev=$(< MD_DEV) udev_wait mdadm --stop "$mddev" || true test "$DM_DEV_DIR" != "/dev" && rm -f "$DM_DEV_DIR/$(basename "$mddev")" notify_lvmetad "$(< MD_DEV_PV)" udev_wait # wait till events are process, not zeroing to early for dev in $(< MD_DEVICES); do mdadm --zero-superblock "$dev" || true notify_lvmetad "$dev" done udev_wait if [ -b "$mddev" ]; then # mdadm doesn't always cleanup the device node # sleeps offer hack to defeat: 'md: md127 still in use' # see: https://bugzilla.redhat.com/show_bug.cgi?id=509908#c25 sleep 2 rm -f "$mddev" fi rm -f MD_DEV MD_DEVICES MD_DEV_PV } prepare_backing_dev() { if test -f BACKING_DEV; then BACKING_DEV=$(< BACKING_DEV) elif test -b "$LVM_TEST_BACKING_DEVICE"; then BACKING_DEV=$LVM_TEST_BACKING_DEVICE echo "$BACKING_DEV" > BACKING_DEV else prepare_loop "$@" fi } prepare_devs() { local n=${1:-3} local devsize=${2:-34} local pvname=${3:-pv} local shift=0 # sanlock requires more space for the internal sanlock lv # This could probably be lower, but what are the units? if test -n "$LVM_TEST_LOCK_TYPE_SANLOCK" ; then devsize=1024 fi touch DEVICES prepare_backing_dev $(( n * devsize )) # shift start of PV devices on /dev/loopXX by 1M not diff LOOP BACKING_DEV >/dev/null 2>&1 || shift=2048 echo -n "## preparing $n devices..." local size=$(( devsize * 2048 )) # sectors local count=0 rm -f CREATE_FAILED init_udev_transaction for i in $(seq 1 "$n"); do local name="${PREFIX}$pvname$i" local dev="$DM_DEV_DIR/mapper/$name" DEVICES[$count]=$dev count=$(( count + 1 )) echo 0 $size linear "$BACKING_DEV" $(( ( i - 1 ) * size + shift )) > "$name.table" dmsetup create -u "TEST-$name" "$name" "$name.table" || touch CREATE_FAILED & test -f CREATE_FAILED && break; done wait finish_udev_transaction if test -f CREATE_FAILED -a -n "$LVM_TEST_BACKING_DEVICE"; then LVM_TEST_BACKING_DEVICE= rm -f BACKING_DEV CREATE_FAILED prepare_devs "$@" return $? fi # non-ephemeral devices need to be cleared between tests test -f LOOP || for d in "${DEVICES[@]}"; do blkdiscard "$d" 2>/dev/null || true # ensure disk header is always zeroed dd if=/dev/zero of="$d" bs=32k count=1 wipefs -a "$d" 2>/dev/null || true done #for i in `seq 1 $n`; do # local name="${PREFIX}$pvname$i" # dmsetup info -c $name #done #for i in `seq 1 $n`; do # local name="${PREFIX}$pvname$i" # dmsetup table $name #done printf "%s\n" "${DEVICES[@]}" > DEVICES # ( IFS=$'\n'; echo "${DEVICES[*]}" ) >DEVICES echo "ok" if test -e LOCAL_LVMETAD; then for dev in "${DEVICES[@]}"; do notify_lvmetad "$dev" done fi } common_dev_() { local tgtype=$1 local dev=$2 local name=${dev##*/} shift 2 local read_ms=${1-0} local write_ms=${2-0} case "$tgtype" in delay) test "$read_ms" -eq 0 && test "$write_ms" -eq 0 && { # zero delay is just equivalent to 'enable_dev' enable_dev "$dev" return } shift 2 ;; # error|zero target does not take read_ms & write_ms only offset list esac local pos local size local type local pvdev local offset read -r pos size type pvdev offset < "$name.table" for fromlen in "${@-0:}"; do from=${fromlen%%:*} len=${fromlen##*:} test -n "$len" || len=$(( size - from )) diff=$(( from - pos )) if test $diff -gt 0 ; then echo "$pos $diff $type $pvdev $(( pos + offset ))" pos=$(( pos + diff )) elif test $diff -lt 0 ; then die "Position error" fi case "$tgtype" in delay) echo "$from $len delay $pvdev $(( pos + offset )) $read_ms $pvdev $(( pos + offset )) $write_ms" ;; error|zero) echo "$from $len $tgtype" ;; esac pos=$(( pos + len )) done > "$name.devtable" diff=$(( size - pos )) test "$diff" -gt 0 && echo "$pos $diff $type $pvdev $(( pos + offset ))" >>"$name.devtable" restore_from_devtable "$dev" } # Replace linear PV device with its 'delayed' version # Could be used to more deterministicaly hit some problems. # Parameters: {device path} [read delay ms] [write delay ms] [offset:size]... # Original device is restored when both delay params are 0 (or missing). # If the size is missing, the remaing portion of device is taken # i.e. delay_dev "$dev1" 0 200 256: delay_dev() { if test ! -f HAVE_DM_DELAY ; then target_at_least dm-delay 1 1 0 || return 0 touch HAVE_DM_DELAY fi common_dev_ delay "$@" } disable_dev() { local dev local silent="" local error="" local notify="" while test -n "$1"; do if test "$1" = "--silent"; then silent=1 shift elif test "$1" = "--error"; then error=1 shift else break fi done udev_wait for dev in "$@"; do maj=$(($(stat -L --printf=0x%t "$dev"))) min=$(($(stat -L --printf=0x%T "$dev"))) echo "Disabling device $dev ($maj:$min)" notify="$notify $maj:$min" if test -n "$error"; then echo 0 10000000 error | dmsetup load "$dev" dmsetup resume "$dev" else dmsetup remove -f "$dev" 2>/dev/null || true fi done test -n "$silent" || for num in $notify; do notify_lvmetad --major "${num%%:*}" --minor "${num##*:}" done } enable_dev() { local dev local silent="" if test "$1" = "--silent"; then silent=1 shift fi rm -f debug.log strace.log init_udev_transaction for dev in "$@"; do local name=${dev##*/} dmsetup create -u "TEST-$name" "$name" "$name.table" 2>/dev/null || \ dmsetup load "$name" "$name.table" # using device name (since device path does not exists yes with udev) dmsetup resume "$name" done finish_udev_transaction test -n "$silent" || for dev in "$@"; do notify_lvmetad "$dev" done } # Once there is $name.devtable # this is a quick way to restore to this table entry restore_from_devtable() { local dev local silent="" if test "$1" = "--silent"; then silent=1 shift fi rm -f debug.log strace.log init_udev_transaction for dev in "$@"; do local name=${dev##*/} dmsetup load "$name" "$name.devtable" dmsetup resume "$name" done finish_udev_transaction test -n "$silent" || for dev in "$@"; do notify_lvmetad "$dev" done } # # Convert device to device with errors # Takes the list of pairs of error segment from:len # Combination with zero or delay is unsupported # Original device table is replaced with multiple lines # i.e. error_dev "$dev1" 8:32 96:8 error_dev() { common_dev_ error "$@" } # # Convert existing device to a device with zero segments # Takes the list of pairs of zero segment from:len # Combination with error or delay is unsupported # Original device table is replaced with multiple lines # i.e. zero_dev "$dev1" 8:32 96:8 zero_dev() { common_dev_ zero "$@" } backup_dev() { local dev for dev in "$@"; do dd if="$dev" of="${dev}.backup" bs=1024 done } restore_dev() { local dev for dev in "$@"; do test -e "${dev}.backup" || \ die "Internal error: $dev not backed up, can't restore!" dd of="$dev" if="${dev}.backup" bs=1024 done } prepare_pvs() { prepare_devs "$@" pvcreate -ff "${DEVICES[@]}" } prepare_vg() { teardown_devs prepare_devs "$@" vgcreate -s 512K "$vg" "${DEVICES[@]}" } extend_filter() { local filter=$(grep ^devices/global_filter CONFIG_VALUES | tail -n 1) for rx in "$@"; do filter=$(echo "$filter" | sed -e "s:\[:[ \"$rx\", :") done lvmconf "$filter" } extend_filter_LVMTEST() { extend_filter "a|$DM_DEV_DIR/$PREFIX|" } hide_dev() { local filter=$(grep ^devices/global_filter CONFIG_VALUES | tail -n 1) for dev in "$@"; do filter=$(echo "$filter" | sed -e "s:\[:[ \"r|$dev|\", :") done lvmconf "$filter" } unhide_dev() { local filter=$(grep ^devices/global_filter CONFIG_VALUES | tail -n 1) for dev in "$@"; do filter=$(echo "$filter" | sed -e "s:\"r|$dev|\", ::") done lvmconf "$filter" } mkdev_md5sum() { rm -f debug.log strace.log mkfs.ext2 "$DM_DEV_DIR/$1/$2" || return 1 md5sum "$DM_DEV_DIR/$1/$2" > "md5.$1-$2" } generate_config() { if test -n "$profile_name"; then config_values="PROFILE_VALUES_$profile_name" config="PROFILE_$profile_name" touch "$config_values" else config_values=CONFIG_VALUES config=CONFIG fi LVM_TEST_LOCKING=${LVM_TEST_LOCKING:-1} LVM_TEST_LVMETAD=${LVM_TEST_LVMETAD:-0} LVM_TEST_LVMPOLLD=${LVM_TEST_LVMPOLLD:-0} LVM_TEST_LVMLOCKD=${LVM_TEST_LVMLOCKD:-0} # FIXME:dct: This is harmful! Variables are unused here and are tested not being empty elsewhere: #LVM_TEST_LOCK_TYPE_SANLOCK=${LVM_TEST_LOCK_TYPE_SANLOCK:-0} #LVM_TEST_LOCK_TYPE_DLM=${LVM_TEST_LOCK_TYPE_DLM:-0} if test "$DM_DEV_DIR" = "/dev"; then LVM_VERIFY_UDEV=${LVM_VERIFY_UDEV:-0} else LVM_VERIFY_UDEV=${LVM_VERIFY_UDEV:-1} fi test -f "$config_values" || { cat > "$config_values" <<-EOF activation/checks = 1 activation/monitoring = 0 activation/polling_interval = 1 activation/retry_deactivation = 1 activation/snapshot_autoextend_percent = 50 activation/snapshot_autoextend_threshold = 50 activation/udev_rules = 1 activation/udev_sync = 1 activation/verify_udev_operations = $LVM_VERIFY_UDEV activation/raid_region_size = 512 allocation/wipe_signatures_when_zeroing_new_lvs = 0 backup/archive = 0 backup/backup = 0 devices/cache_dir = "$TESTDIR/etc" devices/default_data_alignment = 1 devices/dir = "$DM_DEV_DIR" devices/filter = "a|.*|" devices/global_filter = [ "a|$DM_DEV_DIR/mapper/${PREFIX}.*pv[0-9_]*$|", "r|.*|" ] devices/md_component_detection = 0 devices/scan = "$DM_DEV_DIR" devices/sysfs_scan = 1 devices/write_cache_state = 0 global/abort_on_internal_errors = 1 global/cache_check_executable = "$LVM_TEST_CACHE_CHECK_CMD" global/cache_dump_executable = "$LVM_TEST_CACHE_DUMP_CMD" global/cache_repair_executable = "$LVM_TEST_CACHE_REPAIR_CMD" global/detect_internal_vg_cache_corruption = 1 global/fallback_to_local_locking = 0 global/library_dir = "$TESTDIR/lib" global/locking_dir = "$TESTDIR/var/lock/lvm" global/locking_type=$LVM_TEST_LOCKING global/notify_dbus = 0 global/si_unit_consistency = 1 global/thin_check_executable = "$LVM_TEST_THIN_CHECK_CMD" global/thin_dump_executable = "$LVM_TEST_THIN_DUMP_CMD" global/thin_repair_executable = "$LVM_TEST_THIN_REPAIR_CMD" global/use_lvmetad = $LVM_TEST_LVMETAD global/use_lvmpolld = $LVM_TEST_LVMPOLLD global/use_lvmlockd = $LVM_TEST_LVMLOCKD log/activation = 1 log/file = "$TESTDIR/debug.log" log/indent = 1 log/level = 9 log/overwrite = 1 log/syslog = 0 log/verbose = 0 EOF # For 'rpm' builds use system installed binaries. # For test suite run use binaries from builddir. test -z "${abs_top_builddir+varset}" || { cat >> "$config_values" <<-EOF dmeventd/executable = "$abs_top_builddir/test/lib/dmeventd" global/fsadm_executable = "$abs_top_builddir/test/lib/fsadm" EOF } } # append all parameters (avoid adding empty \n) local v test $# -gt 0 && printf "%s\n" "$@" >> "$config_values" declare -A CONF 2>/dev/null || { # Associative arrays is not available local s for s in $(cut -f1 -d/ "$config_values" | sort | uniq); do echo "$s {" local k for k in $(grep ^"$s"/ "$config_values" | cut -f1 -d= | sed -e 's, *$,,' | sort | uniq); do grep "^$k" "$config_values" | tail -n 1 | sed -e "s,^$s/, ," || true done echo "}" echo done | tee "$config" | sed -e "s,^,## LVMCONF: ," return 0 } local sec local last_sec="" # read sequential list and put into associative array while IFS= read -r v; do CONF["${v%%[={ ]*}"]=${v#*/} done < "$config_values" # sort by section and iterate through them printf "%s\n" "${!CONF[@]}" | sort | while read -r v ; do sec=${v%%/*} # split on section'/'param_name test "$sec" = "$last_sec" || { test -z "$last_sec" || echo "}" echo "$sec {" last_sec=$sec } echo " ${CONF[$v]}" done > "$config" echo "}" >> "$config" sed -e "s,^,## LVMCONF: ," "$config" } lvmconf() { local profile_name="" test $# -eq 0 || { # Compare if passed args aren't already all in generated lvm.conf local needed=0 for i in "$@"; do val=$(grep "${i%%[={ ]*}" CONFIG_VALUES 2>/dev/null | tail -1) || { needed=1; break; } test "$val" = "$i" || { needed=1; break; } done test "$needed" -eq 0 && { echo "## Skipping reconfiguring for: (" "$@" ")" return 0 # not needed } } generate_config "$@" mv -f CONFIG "$LVM_SYSTEM_DIR/lvm.conf" } profileconf() { local pdir="$LVM_SYSTEM_DIR/profile" local profile_name=$1 shift generate_config "$@" mkdir -p "$pdir" mv -f "PROFILE_$profile_name" "$pdir/$profile_name.profile" } prepare_profiles() { local pdir="$LVM_SYSTEM_DIR/profile" local profile_name mkdir -p "$pdir" for profile_name in "$@"; do test -L "lib/$profile_name.profile" || skip cp "lib/$profile_name.profile" "$pdir/$profile_name.profile" done } apitest() { test -x "$TESTOLDPWD/api/$1.t" || skip "$TESTOLDPWD/api/$1.t" "${@:2}" && rm -f debug.log strace.log } mirror_recovery_works() { case "$(uname -r)" in 3.3.4-5.fc17.i686|3.3.4-5.fc17.x86_64) return 1 ;; esac } raid456_replace_works() { # The way kmem_cache aliasing is done in the kernel is broken. # It causes RAID 4/5/6 tests to fail. # # The problem with kmem_cache* is this: # *) Assume CONFIG_SLUB is set # 1) kmem_cache_create(name="foo-a") # - creates new kmem_cache structure # 2) kmem_cache_create(name="foo-b") # - If identical cache characteristics, it will be merged with the previously # created cache associated with "foo-a". The cache's refcount will be # incremented and an alias will be created via sysfs_slab_alias(). # 3) kmem_cache_destroy() # - Attempting to destroy cache associated with "foo-a", but instead the # refcount is simply decremented. I don't even think the sysfs aliases are # ever removed... # 4) kmem_cache_create(name="foo-a") # - This FAILS because kmem_cache_sanity_check colides with the existing # name ("foo-a") associated with the non-removed cache. # # This is a problem for RAID (specifically dm-raid) because the name used # for the kmem_cache_create is ("raid%d-%p", level, mddev). If the cache # persists for long enough, the memory address of an old mddev will be # reused for a new mddev - causing an identical formulation of the cache # name. Even though kmem_cache_destory had long ago been used to delete # the old cache, the merging of caches has cause the name and cache of that # old instance to be preserved and causes a colision (and thus failure) in # kmem_cache_create(). I see this regularly in testing the following # kernels: # # This seems to be finaly resolved with this patch: # http://www.redhat.com/archives/dm-devel/2014-March/msg00008.html # so we need to put here exlusion for kernes which do trace SLUB # case "$(uname -r)" in 3.6.*.fc18.i686*|3.6.*.fc18.x86_64) return 1 ;; 3.9.*.fc19.i686*|3.9.*.fc19.x86_64) return 1 ;; 3.1[0123].*.fc18.i686*|3.1[0123].*.fc18.x86_64) return 1 ;; 3.1[01234].*.fc19.i686*|3.1[01234].*.fc19.x86_64) return 1 ;; 3.1[123].*.fc20.i686*|3.1[123].*.fc20.x86_64) return 1 ;; 3.14.*.fc21.i686*|3.14.*.fc21.x86_64) return 1 ;; 3.15.*rc6*.fc21.i686*|3.15.*rc6*.fc21.x86_64) return 1 ;; 3.16.*rc4*.fc21.i686*|3.16.*rc4*.fc21.x86_64) return 1 ;; esac } # # Some 32bit kernel cannot pass some erroring magic which forces # thin-pool to be falling into Error state. # # Skip test on such kernels (see: https://bugzilla.redhat.com/1310661) # thin_pool_error_works_32() { case "$(uname -r)" in 2.6.32-618.*.i686) return 1 ;; 2.6.32-623.*.i686) return 1 ;; 2.6.32-573.1[28].1.el6.i686) return 1 ;; esac } udev_wait() { pgrep udev >/dev/null || return 0 which udevadm &>/dev/null || return 0 if test -n "${1-}" ; then udevadm settle --exit-if-exists="$1" || true else udevadm settle --timeout=15 || true fi } # wait_for_sync wait_for_sync() { local i for i in {1..100} ; do check in_sync "$@" && return sleep .2 done echo "Sync is taking too long - assume stuck" return 1 } # Check if tests are running on 64bit architecture can_use_16T() { test "$(getconf LONG_BIT)" -eq 64 } # Check if major.minor.revision' string is 'at_least' version_at_least() { local major local minor local revision IFS=".-" read -r major minor revision <<< "$1" shift test -z "$1" && return 0 test -n "$major" || return 1 test "$major" -gt "$1" && return 0 test "$major" -eq "$1" || return 1 test -z "$2" && return 0 test -n "$minor" || return 1 test "$minor" -gt "$2" && return 0 test "$minor" -eq "$2" || return 1 test -z "$3" && return 0 test "$revision" -ge "$3" 2>/dev/null || return 1 } # # Check wheter kernel [dm module] target exist # at least in expected version # # [dm-]target-name major minor revision # # i.e. dm_target_at_least dm-thin-pool 1 0 target_at_least() { rm -f debug.log strace.log case "$1" in dm-*) modprobe "$1" || true ;; esac if test "$1" = dm-raid; then case "$(uname -r)" in 3.12.0*) return 1 ;; esac fi local version version=$(dmsetup targets 2>/dev/null | grep "${1##dm-} " 2>/dev/null) version=${version##* v} version_at_least "$version" "${@:2}" || { echo "Found $1 version $version, but requested ${*:2}." >&2 return 1 } } # Check whether the kernel driver version is greater or equal # to the specified version. This can be used to skip tests on # kernels where they are known to not be supported. # # e.g. driver_at_least 4 33 # driver_at_least() { local version version=$(dmsetup version | tail -1 2>/dev/null) version=${version##*:} version_at_least "$version" "$@" || { echo "Found driver version $version, but requested" "$@" "." >&2 return 1 } } have_thin() { test "$THIN" = shared -o "$THIN" = internal || { echo "Thin is not built-in." >&2 return 1; } target_at_least dm-thin-pool "$@" declare -a CONF=() # disable thin_check if not present in system if test -n "$LVM_TEST_THIN_CHECK_CMD" && test ! -x "$LVM_TEST_THIN_CHECK_CMD"; then CONF[0]="global/thin_check_executable = \"\"" fi if test -n "$LVM_TEST_THIN_DUMP_CMD" && test ! -x "$LVM_TEST_THIN_DUMP_CMD"; then CONF[1]="global/thin_dump_executable = \"\"" fi if test -n "$LVM_TEST_THIN_REPAIR_CMD" && test ! -x "$LVM_TEST_THIN_REPAIR_CMD"; then CONF[2]="global/thin_repair_executable = \"\"" fi if test ${#CONF[@]} -ne 0 ; then echo "TEST WARNING: Reconfiguring" "${CONF[@]}" lvmconf "${CONF[@]}" fi } have_raid() { test "$RAID" = shared -o "$RAID" = internal || { echo "Raid is not built-in." >&2 return 1; } target_at_least dm-raid "$@" # some kernels have broken mdraid bitmaps, don't use them! # may oops kernel, we know for sure all FC24 are currently broken # in general any 4.1, 4.2 is likely useless unless patched case "$(uname -r)" in 4.[12].*fc24*) return 1 ;; esac } have_raid4 () { local r=0 have_raid 1 8 0 && r=1 have_raid 1 9 1 && r=0 return $r } have_cache() { test "$CACHE" = shared -o "$CACHE" = internal || { echo "Cache is not built-in." >&2 return 1; } target_at_least dm-cache "$@" declare -a CONF=() # disable cache_check if not present in system if test -n "$LVM_TEST_CACHE_CHECK_CMD" -a ! -x "$LVM_TEST_CACHE_CHECK_CMD" ; then CONF[0]="global/cache_check_executable = \"\"" fi if test -n "$LVM_TEST_CACHE_DUMP_CMD" -a ! -x "$LVM_TEST_CACHE_DUMP_CMD" ; then CONF[1]="global/cache_dump_executable = \"\"" fi if test -n "$LVM_TEST_CACHE_REPAIR_CMD" -a ! -x "$LVM_TEST_CACHE_REPAIR_CMD" ; then CONF[2]="global/cache_repair_executable = \"\"" fi if test ${#CONF[@]} -ne 0 ; then echo "TEST WARNING: Reconfiguring" "${CONF[@]}" lvmconf "${CONF[@]}" fi } have_tool_at_least() { local version version=$("$1" -V 2>/dev/null) version=${version%%-*} shift version_at_least "$version" "$@" } # check if lvm shell is build-in (needs readline) have_readline() { echo version | lvm &>/dev/null } have_multi_core() { which nproc &>/dev/null || return 0 [ "$(nproc)" -ne 1 ] } dmsetup_wrapped() { udev_wait dmsetup "$@" } awk_parse_init_count_in_lvmpolld_dump() { printf '%s' \ \ $'BEGINFILE { x=0; answ=0; FS="="; key="[[:space:]]*"vkey }' \ $'{' \ $'if (/.*{$/) { x++ }' \ $'else if (/.*}$/) { x-- }' \ $'else if ( x == 2 && $1 ~ key) { value=substr($2, 2); value=substr(value, 1, length(value) - 1); }' \ $'if ( x == 2 && value == vvalue && $1 ~ /[[:space:]]*init_requests_count/) { answ=$2 }' \ $'if (answ > 0) { exit 0 }' \ $'}' \ $'END { printf "%d", answ }' } check_lvmpolld_init_rq_count() { local ret ret=$(awk -v vvalue="$2" -v vkey="${3:-lvname}" "$(awk_parse_init_count_in_lvmpolld_dump)" lvmpolld_dump.txt) test "$ret" -eq "$1" || { die "check_lvmpolld_init_rq_count failed. Expected $1, got $ret" } } wait_pvmove_lv_ready() { # given sleep .1 this is about 60 secs of waiting local retries=${2-300} if [ -e LOCAL_LVMPOLLD ]; then local lvid="" while : ; do test "$retries" -le 0 && die "Waiting for lvmpolld timed out" test -n "$lvid" || { # wait till wanted LV really appears lvid=$(get lv_field "${1//-//}" vg_uuid,lv_uuid -a 2>/dev/null) && { lvid=${lvid//\ /} lvid=${lvid//-/} } } test -z "$lvid" || { lvmpolld_dump > lvmpolld_dump.txt ! check_lvmpolld_init_rq_count 1 "$lvid" lvid || break; } sleep .1 retries=$((retries-1)) done else while : ; do test "$retries" -le 0 && die "Waiting for pvmove LV to get activated has timed out" dmsetup info -c -o tables_loaded "$1" >out 2>/dev/null|| true; not grep Live out >/dev/null || break sleep .1 retries=$((retries-1)) done fi } # Holds device open with sleep which automatically expires after given timeout # Prints PID of running holding sleep process in background hold_device_open() { local vgname=$1 local lvname=$2 local sec=${3-20} # default 20sec sleep "$sec" < "$DM_DEV_DIR/$vgname/$lvname" >/dev/null 2>&1 & SLEEP_PID=$! # wait till device is openned for i in $(seq 1 50) ; do if test "$(dmsetup info --noheadings -c -o open "$vgname"-"$lvname")" -ne 0 ; then echo "$SLEEP_PID" return fi sleep .1 done die "$vgname-$lvname expected to be openned, but it's not!" } # return total memory size in kB units total_mem() { local a local b while IFS=":" read -r a b ; do case "$a" in MemTotal*) echo "${b%% kB}" ; break ;; esac done < /proc/meminfo } kernel_at_least() { version_at_least "$(uname -r)" "$@" } test -z "${LVM_TEST_AUX_TRACE-}" || set -x test -f DEVICES && devs=$(< DEVICES) if test "$1" = "dmsetup" ; then shift dmsetup_wrapped "$@" else "$@" fi LVM2.2.02.176/test/lib/not.c0000644000000000000120000000532313176752421014020 0ustar rootwheel/* * Copyright (C) 2010 Red Hat, Inc. All rights reserved. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License v.2. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include static int _finished(const char *cmd, int status, int pid) { int ret; if (!strcmp(cmd, "not")) return !status; if (!strcmp(cmd, "should")) { if (status) { fprintf(stderr, "TEST WARNING: Ignoring command failure.\n"); /* TODO: avoid using shell here */ /* Show log for failing command which should be passing */ ret = system("ls debug.log*${LVM_LOG_FILE_EPOCH}* 2>/dev/null"); if (WIFEXITED(ret) && WEXITSTATUS(ret) == 0) { printf("## timing off\n<======== Debug log ========>\n"); /* timing off */ fflush(stdout); if (system("sed -e 's,^,## DEBUG: ,' debug.log*${LVM_LOG_FILE_EPOCH}* 2>/dev/null")) { /* Ignore result code */; } printf("## timing on\n"); /* timing on */ if (system("rm -f debug.log*${LVM_LOG_FILE_EPOCH}*")) { /* Ignore result code */; } fflush(stdout); } } return 0; } else if (!strcmp(cmd, "invalid")) { if (status == 3) return 0; fprintf(stderr, "Test expected exit code 3 (invalid), but got %d.\n", status); } else if (!strcmp(cmd, "fail")) { if (status == 5) return 0; fprintf(stderr, "Test expected exit code 5 (fail), but got %d.\n", status); } return 6; } int main(int args, char **argv) { const char *val = NULL; pid_t pid; int status; int FAILURE = 6; if (args < 2) { fprintf(stderr, "Need args\n"); return FAILURE; } pid = fork(); if (pid == -1) { fprintf(stderr, "Could not fork\n"); return FAILURE; } else if (pid == 0) { /* child */ if (!strcmp(argv[0], "not")) val = ">1"; else if (!strcmp(argv[0], "invalid")) val = "3"; else if (!strcmp(argv[0], "fail")) val = "5"; if (val) setenv("LVM_EXPECTED_EXIT_STATUS", val, 1); execvp(argv[1], &argv[1]); /* should not be accessible */ return FAILURE; } else { /* parent */ waitpid(pid, &status, 0); if (!WIFEXITED(status)) { if (WIFSIGNALED(status)) fprintf(stderr, "Process %d died of signal %d.\n", pid, WTERMSIG(status)); /* did not exit correctly */ return FAILURE; } return _finished(argv[0], WEXITSTATUS(status), pid); } /* not accessible */ return FAILURE; } LVM2.2.02.176/test/lib/flavour-ndev-vanilla.sh0000644000000000000120000000006113176752421017436 0ustar rootwheelexport LVM_TEST_LOCKING=1 export LVM_TEST_LVM1=1 LVM2.2.02.176/test/lib/flavour-udev-lvmlockd-sanlock.sh0000644000000000000120000000025213176752421021264 0ustar rootwheelexport LVM_TEST_LOCKING=1 export LVM_TEST_LVMETAD=1 export LVM_TEST_LVMPOLLD=1 export LVM_TEST_LVMLOCKD=1 export LVM_TEST_LOCK_TYPE_SANLOCK=1 export LVM_TEST_DEVDIR=/dev LVM2.2.02.176/test/lib/inittest.sh0000644000000000000120000001341713176752421015256 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2011-2017 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA initskip() { test $# -eq 0 || echo "TEST SKIPPED:" "$@" exit 200 } # sanitize the environment LANG=C LC_ALL=C TZ=UTC # Put script name into variable, so it can used in external scripts TESTNAME=${0##*/} # Nice debug message PS4='#${BASH_SOURCE[0]##*/}:${LINENO}+ ' export TESTNAME PS4 LVM_TEST_BACKING_DEVICE=${LVM_TEST_BACKING_DEVICE-} LVM_TEST_DEVDIR=${LVM_TEST_DEVDIR-} LVM_TEST_NODEBUG=${LVM_TEST_NODEBUG-} LVM_TEST_LVM1=${LVM_TEST_LVM1-} # TODO: LVM_TEST_SHARED SHARED=${SHARED-} LVM_TEST_LVMETAD=${LVM_TEST_LVMETAD-} LVM_TEST_LVMLOCKD=${LVM_TEST_LVMLOCKD-} LVM_TEST_LVMLOCKD_TEST=${LVM_TEST_LVMLOCKD_TEST-} LVM_TEST_LVMPOLLD=${LVM_TEST_LVMPOLLD-} LVM_TEST_LOCK_TYPE_DLM=${LVM_TEST_LOCK_TYPE_DLM-} LVM_TEST_LOCK_TYPE_SANLOCK=${LVM_TEST_LOCK_TYPE_SANLOCK-} SKIP_WITHOUT_CLVMD=${SKIP_WITHOUT_CLVMD-} SKIP_WITH_CLVMD=${SKIP_WITH_CLVMD-} SKIP_WITHOUT_LVMETAD=${SKIP_WITHOUT_LVMETAD-} SKIP_WITH_LVMETAD=${SKIP_WITH_LVMETAD-} SKIP_WITH_LVMPOLLD=${SKIP_WITH_LVMPOLLD-} SKIP_WITH_LVMLOCKD=${SKIP_WITH_LVMLOCKD-} if test -n "$LVM_TEST_FLAVOUR"; then . "lib/flavour-$LVM_TEST_FLAVOUR" fi test -n "$SKIP_WITHOUT_CLVMD" && test "$LVM_TEST_LOCKING" -ne 3 && initskip test -n "$SKIP_WITH_CLVMD" && test "$LVM_TEST_LOCKING" -eq 3 && initskip test -n "$SKIP_WITHOUT_LVMETAD" && test -z "$LVM_TEST_LVMETAD" && initskip test -n "$SKIP_WITH_LVMETAD" && test -n "$LVM_TEST_LVMETAD" && initskip test -n "$SKIP_WITH_LVMPOLLD" && test -n "$LVM_TEST_LVMPOLLD" && initskip test -n "$SKIP_WITH_LVMLOCKD" && test -n "$LVM_TEST_LVMLOCKD" && initskip unset CDPATH export LVM_TEST_BACKING_DEVICE LVM_TEST_DEVDIR LVM_TEST_NODEBUG export LVM_TEST_LVMETAD LVM_TEST_LVMLOCKD LVM_TEST_LVMLOCKD_TEST export LVM_TEST_LVMPOLLD LVM_TEST_LOCK_TYPE_DLM LVM_TEST_LOCK_TYPE_SANLOCK # grab some common utilities . lib/utils TESTOLDPWD=$(pwd) COMMON_PREFIX="LVMTEST" PREFIX="${COMMON_PREFIX}$$" # Check we are not conflickting with some exiting setup dmsetup table | not grep "${PREFIX}[^0-9]" || die "DM table already has devices with prefix $PREFIX!" if test -z "$LVM_TEST_DIR"; then LVM_TEST_DIR=$TMPDIR; fi TESTDIR=$(mkdtemp "${LVM_TEST_DIR:-/tmp}" "$PREFIX.XXXXXXXXXX") || \ die "failed to create temporary directory in ${LVM_TEST_DIR:-$TESTOLDPWD}" RUNNING_DMEVENTD=$(pgrep dmeventd || true) export TESTOLDPWD TESTDIR COMMON_PREFIX PREFIX RUNNING_DMEVENTD LVM_LOG_FILE_EPOCH=DEBUG LVM_LOG_FILE_MAX_LINES=${LVM_LOG_FILE_MAX_LINES-1000000} LVM_EXPECTED_EXIT_STATUS=1 export LVM_LOG_FILE_EPOCH LVM_LOG_FILE_MAX_LINES LVM_EXPECTED_EXIT_STATUS test -n "$BASH" && trap 'set +vx; STACKTRACE; set -vx' ERR trap 'aux teardown' EXIT # don't forget to clean up cd "$TESTDIR" mkdir lib # Setting up symlink from $i to $TESTDIR/lib test -n "${abs_top_builddir+varset}" && \ find "$abs_top_builddir/daemons/dmeventd/plugins/" -name '*.so' \ -exec ln -s -t lib "{}" + find "$TESTOLDPWD/lib" ! \( -name '*.sh' -o -name '*.[cdo]' \ -o -name '*~' \) -exec ln -s -t lib "{}" + DM_DEFAULT_NAME_MANGLING_MODE=none DM_DEV_DIR="$TESTDIR/dev" LVM_SYSTEM_DIR="$TESTDIR/etc" # abort on the internal dm errors in the tests (allowing test user override) DM_ABORT_ON_INTERNAL_ERRORS=${DM_ABORT_ON_INTERNAL_ERRORS:-1} export DM_DEFAULT_NAME_MANGLING_MODE DM_DEV_DIR LVM_SYSTEM_DIR DM_ABORT_ON_INTERNAL_ERRORS mkdir "$LVM_SYSTEM_DIR" "$DM_DEV_DIR" if test -n "$LVM_TEST_DEVDIR" ; then test -d "$LVM_TEST_DEVDIR" || die "Test device directory LVM_TEST_DEVDIR=\"$LVM_TEST_DEVDIR\" is not valid." DM_DEV_DIR=$LVM_TEST_DEVDIR else mknod "$DM_DEV_DIR/testnull" c 1 3 || die "mknod failed" echo >"$DM_DEV_DIR/testnull" || \ die "Filesystem does support devices in $DM_DEV_DIR (mounted with nodev?)" # dmsetup makes here needed control entry if still missing dmsetup version || \ die "Dmsetup in $DM_DEV_DIR can't report version?" fi echo "$TESTNAME" >TESTNAME # Require 50M of free space in testdir test $(df -k -P . | awk '/\// {print $4}') -gt 51200 || die "Testing requires more then 50M of free space in directory $TESTDIR!\n$(df -H)" echo "Kernel is $(uname -a)" # Report SELinux mode echo "Selinux mode is $(getenforce 2>/dev/null || echo not installed)." free -m || true df -h || true # Set vars from utils now that we have TESTDIR/PREFIX/... prepare_test_vars # Set strict shell mode # see: http://redsymbol.net/articles/unofficial-bash-strict-mode test -n "$BASH" && set -euE -o pipefail # Vars for harness echo "@TESTDIR=$TESTDIR" echo "@PREFIX=$PREFIX" if test -n "$LVM_TEST_LVMETAD" ; then export LVM_LVMETAD_SOCKET="$TESTDIR/lvmetad.socket" export LVM_LVMETAD_PIDFILE="$TESTDIR/lvmetad.pid" aux prepare_lvmetad else # lvmetad prepares its own lvmconf export LVM_LVMETAD_PIDFILE="$TESTDIR/non-existing-file" aux lvmconf aux prepare_clvmd fi test -n "$LVM_TEST_LVMPOLLD" && { export LVM_LVMPOLLD_SOCKET="$TESTDIR/lvmpolld.socket" export LVM_LVMPOLLD_PIDFILE="$TESTDIR/lvmpolld.pid" aux prepare_lvmpolld } if test -n "$LVM_TEST_LVMLOCKD" ; then if test -n "$LVM_TEST_LOCK_TYPE_SANLOCK" ; then aux lvmconf 'local/host_id = 1' fi export SHARED="--shared" fi # for check_lvmlockd_test, lvmlockd is restarted for each shell test. # for check_lvmlockd_{sanlock,dlm}, lvmlockd is started once by # aa-lvmlockd-{sanlock,dlm}-prepare.sh and left running for all shell tests. if test -n "$LVM_TEST_LVMLOCKD_TEST" ; then aux prepare_lvmlockd fi echo "<======== Processing test: \"$TESTNAME\" ========>" set -vx LVM2.2.02.176/test/lib/flavour-udev-lvmlockd-dlm.sh0000644000000000000120000000024613176752421020411 0ustar rootwheelexport LVM_TEST_LOCKING=1 export LVM_TEST_LVMETAD=1 export LVM_TEST_LVMPOLLD=1 export LVM_TEST_LVMLOCKD=1 export LVM_TEST_LOCK_TYPE_DLM=1 export LVM_TEST_DEVDIR=/dev LVM2.2.02.176/test/lib/test-dlm-conf0000644000000000000120000000007213176752421015447 0ustar rootwheel# created by lvm test suite log_debug=1 enable_fencing=0 LVM2.2.02.176/test/lib/flavour-udev-lvmetad.sh0000644000000000000120000000012013176752421017447 0ustar rootwheelexport LVM_TEST_LOCKING=1 export LVM_TEST_LVMETAD=1 export LVM_TEST_DEVDIR=/dev LVM2.2.02.176/test/lib/test-sanlock-conf0000644000000000000120000000005313176752421016324 0ustar rootwheel# created by lvm test suite use_watchdog=0 LVM2.2.02.176/test/lib/utils.sh0000644000000000000120000001706013176752421014551 0ustar rootwheel#!/usr/bin/env bash # Copyright (C) 2011-2012 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA set -e MAX_TRIES=4 IFS_NL=' ' die() { rm -f debug.log echo -e "$@" >&2 return 1 } rand_bytes() { n=$1 chars="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" dev_rand="/dev/urandom" if test -r "$dev_rand"; then # Note: 256-length($chars) == 194; 3 copies of $chars is 186 + 8 = 194. head -c"$n" "$dev_rand" | tr -c "$chars" "01234567$chars$chars$chars" return fi cmds='date; date +%N; free; who -a; w; ps auxww; ps ef; netstat -n' data=$( (eval "$cmds") 2>&1 | gzip ) n_plus_50=$(( n + 50 )) # Ensure that $data has length at least 50+$n while :; do len=$(echo "$data" | wc -c) test "$n_plus_50" -le "$len" && break; data=$( (echo "$data"; eval "$cmds") 2>&1 | gzip ) done echo "$data" | dd bs=1 skip=50 count="$n" 2>/dev/null \ | tr -c "$chars" "01234567$chars$chars$chars" } mkdtemp() { case $# in 2) ;; *) die "Usage: mkdtemp DIR TEMPLATE";; esac destdir=$1 template=$2 test -d "$destdir" || die "DIR ('$destdir') does not exist." case "$template" in *XXXX) ;; *) die "Invalid template: $template (must have a suffix of at least 4 X's)";; esac fail=0 # First, try to use mktemp. d=$(env -u TMPDIR mktemp -d -t -p "$destdir" "$template" 2>/dev/null) || fail=1 # The resulting name must be in the specified directory. case "$d" in "${destdir}"*);; *) fail=1;; esac # It must have created the directory. test -d "$d" || fail=1 # It must have 0700 permissions. perms=$(ls -dgo "$d" 2>/dev/null) || fail=1 case "$perms" in drwx------*) ;; *) fail=1;; esac test $fail = 0 && { echo "$d"; return; } # If we reach this point, we'll have to create a directory manually. # Get a copy of the template without its suffix of X's. base_template=$(echo "$template" | sed 's/XX*$//') # Calculate how many X's we've just removed. nx=$(expr length "$template" - length "$base_template") err= i=1 while :; do X=$(rand_bytes "$nx") candidate_dir="$destdir/$base_template$X" err=$(mkdir -m 0700 "$candidate_dir" 2>&1) && \ { echo "$candidate_dir"; return; } test $MAX_TRIES -le $i && break; i=$(( i + 1 )) done die "$err" } # Like grep, just always print 1st. line grep1_() { awk -v pattern="${1}" 'NR==1 || $0~pattern' "${@:2}" } stacktrace() { trap - ERR # i=1 - ignoring innermost frame - it is always stacktrace function local i=1 n=${#BASH_LINENO[*]} # n-=1 - ignoring last frame as well - it is not interesting let n-=1 echo "## - $0:${BASH_LINENO[$((n-1))]}" while [[ $i -lt $n ]]; do echo "## $i ${FUNCNAME[$i]}() called from ${BASH_SOURCE[$((i+1))]}:${BASH_LINENO[$i]}" i=$(( i + 1 )) done } STACKTRACE() { trap - ERR local i stacktrace test "${LVM_TEST_PARALLEL:-0}" -eq 0 && test -z "$RUNNING_DMEVENTD" && \ test ! -f LOCAL_DMEVENTD && pgrep dmeventd >DPID 2>/dev/null && { echo "## ERROR: The test started dmeventd ($(< DPID)) unexpectedly." kill "$(< DPID)" } # Get backtraces from coredumps if which gdb &>/dev/null; then { echo bt full echo l echo quit } > gdb_commands.txt # Check for all cores newer then TESTNAME file # Assume users keep prefix 'core' # TODO: possibly better integrate with coredumpctl & systemd while IFS= read -r i; do bin=$(gdb -batch -c "$i" 2>&1 | grep "generated by" | \ sed -e "s,.*generated by \`\([^ ']*\).*,\1,") || continue echo "## Checking coredump: $i generated by $bin." gdb -batch -c "$i" -x gdb_commands.txt "$(which "$bin")" 2>/dev/null | \ sed -e "s,^,## GDB: ," || continue done < <(find . "$(dirname "$(sysctl -n kernel.core_pattern)")" \ "/var/lib/systemd/coredump/" -name 'core*' -newer TESTNAME 2>/dev/null || true) fi test -f SKIP_THIS_TEST && exit 200 test -z "$LVM_TEST_NODEBUG" && test -f TESTNAME && { local name local idx=0 for i in debug.log* ; do test -f "$i" || break # nothing is found (expands to debug.log*) name=${i##debug.log_} name=${name%%_*} test "$name" = "DEBUG" && { name="$name$idx" ; idx=$(( idx + 1 )) ; } echo "<======== Debug log $i ========>" sed -e "s,^,## $name: ," "$i" done if test -e strace.log ; then echo "<======== Strace debug log ========>" sed -e "s,^,## STRACE: ," strace.log fi dmsetup info -c | grep1_ "$PREFIX" > out if test "$(wc -l < out)" -gt 1 ; then echo "<======== Info ========>" sed -e "s,^,## DMINFO: ," out echo "<======== Active table ========>" dmsetup table | grep "$PREFIX" | sed -e "s,^,## DMTABLE: ," echo "<======== Inactive table ========>" dmsetup table --inactive | grep "$PREFIX" | sed -e "s,^,## DMITABLE: ," echo "<======== Status ========>" dmsetup status | grep "$PREFIX" | sed -e "s,^,## DMSTATUS: ," echo "<======== Tree ========>" dmsetup ls --tree | sed -e "s,^,## DMTREE: ," echo "<======== Recursive list of $DM_DEV_DIR ========>" ls -Rl --hide=shm --hide=bus --hide=snd --hide=input --hide=dri \ --hide=net --hide=hugepages --hide=mqueue --hide=pts \ "$DM_DEV_DIR" | sed -e "s,^,## LSLR: ," echo "<======== Udev DB content ========>" for i in /sys/block/dm-* /sys/block/loop* ; do udevadm info --query=all --path "$i" 2>/dev/null || true done | sed -e "s,^,## UDEV: ," fi echo "<======== Script file \"$(< TESTNAME)\" ========>" local script=$0 test -f "$script" || script="$TESTOLDPWD/$0" awk '{print "## Line:", NR, "\t", $0}' "$script" } } init_udev_transaction() { if test "$DM_UDEV_SYNCHRONISATION" = 1; then local cookie cookie=$(dmsetup udevcreatecookie) # Cookie is not generated if udev is not running! test -z "$cookie" || export DM_UDEV_COOKIE=$cookie fi } finish_udev_transaction() { if test "$DM_UDEV_SYNCHRONISATION" = 1 && test -n "$DM_UDEV_COOKIE" ; then dmsetup udevreleasecookie unset DM_UDEV_COOKIE fi } teardown_udev_cookies() { if test "$DM_UDEV_SYNCHRONISATION" = 1; then # Delete any cookies created more than 10 minutes ago # and not used in the last 10 minutes. # Log only non-zero semaphores count (dmsetup udevcomplete_all -y 10 | grep -v "^0 ") || true fi } dm_info() { should dmsetup info --noheadings -c -o "$@" } dm_table() { should dmsetup table "$@" } skip() { set +vx # debug off if test "$#" -eq 0; then stacktrace else echo "TEST SKIPPED:" "$@" fi touch SKIP_THIS_TEST exit 200 } get_devs() { local IFS=$IFS_NL DEVICES=( $(&2 return 1 } lvl() { lvs -a --noheadings "$@" } lvdevices() { get lv_devices "$@" } mirror_images_redundant() { local vg=$1 local lv="$vg/$2" lvs -a "$vg" -o+devices for i in $(lvdevices "$lv"); do echo "# $i:" lvdevices "$vg/$i" | sort | uniq done > check.tmp.all (grep -v ^# check.tmp.all || true) | sort | uniq -d > check.tmp test "$(wc -l < check.tmp)" -eq 0 || \ die "mirror images of $lv expected redundant, but are not:" \ "$(cat check.tmp.all)" } lv_err_list_() { (echo "$2" | not grep -m 1 -q "$1") || \ echo "$3 on [ $(echo "$2" | grep "$1" | cut -b3- | tr '\n' ' ')] " } lv_on_diff_() { declare -a xdevs=("${!1}") # pass in shell array local expect=( "${@:4}" ) # make an array starting from 4th args... local diff_e # Find diff between 2 shell arrays, print them as stdin files printf "%s\n" "${expect[@]}" | sort | uniq >_lv_on_diff1 printf "%s\n" "${xdevs[@]}" >_lv_on_diff2 diff_e=$(diff _lv_on_diff1 _lv_on_diff2) || die "LV $2/$3 $(lv_err_list_ "^>" "${diff_e}" found)$(lv_err_list_ "^<" "${diff_e}" "not found")." } # list devices for given LV lv_on() { local devs devs=( $(lvdevices "$1/$2" | sort | uniq ) ) lv_on_diff_ devs[@] "$@" } # list devices for given LV and all its subdevices lv_tree_on() { local devs # Get sorted list of devices devs=( $(get lv_tree_devices "$1" "$2") ) lv_on_diff_ devs[@] "$@" } # Test if all mimage_X LV legs are sitting on given ordered list of PVs # When LV is composed of imagetmp, such leg is decomposed so only # real _mimage LVs are always checked mirror_images_on() { local vg=$1 local lv=$2 shift 2 local mimages=() local line while IFS= read -r line ; do mimages+=( "$line" ) done < <( get lv_field_lv_ "$vg" lv_name -a | grep "${lv}_mimage_" ) for i in "${mimages[@]}"; do lv_on "$vg" "$i" "$1" shift done } mirror_log_on() { local vg=$1 local lv=$2 local where=$3 if test "$where" = "core"; then get lv_field "$vg/$lv" mirror_log | not grep mlog else lv_on "$vg" "${lv}_mlog" "$where" fi } lv_is_contiguous() { local lv="$1/$2" test "$(lvl --segments "$lv" | wc -l)" -eq 1 || \ die "LV $lv expected to be contiguous, but is not:" \ "$(lvl --segments "$lv")" } lv_is_clung() { local lv="$1/$2" test "$(lvdevices "$lv" | sort | uniq | wc -l)" -eq 1 || \ die "LV $lv expected to be clung, but is not:" \ "$(lvdevices "$lv" | sort | uniq)" } mirror_images_contiguous() { for i in $(lvdevices "$1/$2"); do lv_is_contiguous "$1" "$i" done } mirror_images_clung() { for i in $(lvdevices "$1/$2"); do lv_is_clung "$1" "$i" done } mirror() { mirror_nonredundant "$@" mirror_images_redundant "$1" "$2" } mirror_nonredundant() { local lv="$1/$2" local attr attr=$(get lv_field "$lv" attr) (echo "$attr" | grep "^......m...$" >/dev/null) || { if (echo "$attr" | grep "^o.........$" >/dev/null) && lvs -a | grep -F "[${2}_mimage" >/dev/null; then echo "TEST WARNING: $lv is a snapshot origin and looks like a mirror," echo "assuming it is actually a mirror" else die "$lv expected a mirror, but is not:" \ "$(lvs "$lv")" fi } test -z "$3" || mirror_log_on "$1" "$2" "$3" } mirror_legs() { local expect_legs=$3 test "$expect_legs" -eq "$(lvdevices "$1/$2" | wc -w)" } mirror_no_temporaries() { local vg=$1 local lv=$2 (lvl -o name "$vg" | grep "$lv" | not grep "tmp") || \ die "$lv has temporary mirror images unexpectedly:" \ "$(lvl "$vg" | grep "$lv")" } linear() { local lv="$1/$2" test "$(get lv_field "$lv" stripes -a)" -eq 1 || \ die "$lv expected linear, but is not:" \ "$(lvl "$lv" -o+devices)" } # in_sync # Works for "mirror" and "raid*" in_sync() { local a local b local c local idx local type local snap="" local lvm_name="$1/$2" local ignore_a=${3:-0} local dm_name="$1-$2" a=( $(dmsetup status "$dm_name") ) || \ die "Unable to get sync status of $1" if [ "${a[2]}" = "snapshot-origin" ]; then a=( $(dmsetup status "${dm_name}-real") ) || \ die "Unable to get sync status of $1" snap=": under snapshot" fi case "${a[2]}" in "raid") # 6th argument is the sync ratio for RAID idx=6 type=${a[3]} if [ "${a[$(( idx + 1 ))]}" != "idle" ]; then echo "$lvm_name ($type$snap) is not in-sync" return 1 fi ;; "mirror") # 4th Arg tells us how far to the sync ratio idx=$(( a[3] + 4 )) type=${a[2]} ;; *) die "Unable to get sync ratio for target type '${a[2]}'" ;; esac b=${a[$idx]%%/*} # split ratio x/y c=${a[$idx]##*/} if [ "$b" -eq 0 ] || [ "$b" != "$c" ]; then echo "$lvm_name ($type$snap) is not in-sync" return 1 fi if [[ ${a[$(( idx - 1 ))]} =~ a ]] ; then [ $ignore_a -eq 0 ] && \ die "$lvm_name ($type$snap) in-sync, but 'a' characters in health status" echo "$lvm_name ($type$snap) is not in-sync" [ $ignore_a -eq 1 ] && return 0 return 1 fi echo "$lvm_name ($type$snap) is in-sync" "${a[@]}" } active() { local lv="$1/$2" (get lv_field "$lv" attr | grep "^....a.....$" >/dev/null) || \ die "$lv expected active, but lvs says it's not:" \ "$(lvl "$lv" -o+devices)" dmsetup info "$1-$2" >/dev/null || die "$lv expected active, lvs thinks it is but there are no mappings!" } inactive() { local lv="$1/$2" (get lv_field "$lv" attr | grep "^....[-isd].....$" >/dev/null) || \ die "$lv expected inactive, but lvs says it's not:" \ "$(lvl "$lv" -o+devices)" not dmsetup info "$1-$2" 2>/dev/null || \ die "$lv expected inactive, lvs thinks it is but there are mappings!" } # Check for list of LVs from given VG lv_exists() { local vg=$1 declare -a list=() while [ $# -gt 1 ]; do shift list+=( "$vg/$1" ) done test "${#list[@]}" -gt 0 || list=( "$vg" ) lvl "${list[@]}" &>/dev/null || \ die "${list[@]}" "expected to exist, but does not!" } lv_not_exists() { local vg=$1 if test $# -le 1 ; then if lvl "$vg" &>/dev/null ; then die "$vg expected to not exist but it does!" fi else while [ $# -gt 1 ]; do shift not lvl "$vg/$1" &>/dev/null || die "$vg/$1 expected to not exist but it does!" done fi rm -f debug.log } pv_field() { local actual actual=$(get pv_field "$1" "$2" "${@:4}") test "$actual" = "$3" || \ die "pv_field: PV=\"$1\", field=\"$2\", actual=\"$actual\", expected=\"$3\"" } vg_field() { local actual actual=$(get vg_field "$1" "$2" "${@:4}") test "$actual" = "$3" || \ die "vg_field: vg=$1, field=\"$2\", actual=\"$actual\", expected=\"$3\"" } vg_attr_bit() { local actual local offset=$1 actual=$(get vg_field "$2" vg_attr "${@:4}") case "$offset" in perm*) offset=0 ;; resiz*) offset=1 ;; export*) offset=2 ;; partial) offset=3 ;; alloc*) offset=4 ;; cluster*) offset=5 ;; esac test "${actual:$offset:1}" = "$3" || \ die "vg_attr_bit: vg=$2, ${offset} bit of \"$actual\" is \"${actual:$offset:1}\", but expected \"$3\"" } lv_field() { local actual actual=$(get lv_field "$1" "$2" "${@:4}") test "$actual" = "$3" || \ die "lv_field: lv=$1, field=\"$2\", actual=\"$actual\", expected=\"$3\"" } lv_first_seg_field() { local actual actual=$(get lv_first_seg_field "$1" "$2" "${@:4}") test "$actual" = "$3" || \ die "lv_field: lv=$1, field=\"$2\", actual=\"$actual\", expected=\"$3\"" } lvh_field() { local actual actual=$(get lvh_field "$1" "$2" "${@:4}") test "$actual" = "$3" || \ die "lvh_field: lv=$1, field=\"$2\", actual=\"$actual\", expected=\"$3\"" } lva_field() { local actual actual=$(get lva_field "$1" "$2" "${@:4}") test "$actual" = "$3" || \ die "lva_field: lv=$1, field=\"$2\", actual=\"$actual\", expected=\"$3\"" } lv_attr_bit() { local actual local offset=$1 actual=$(get lv_field "$2" lv_attr "${@:4}") case "$offset" in type) offset=0 ;; perm*) offset=1 ;; alloc*) offset=2 ;; fixed*) offset=3 ;; state) offset=4 ;; open) offset=5 ;; target) offset=6 ;; zero) offset=7 ;; health) offset=8 ;; skip) offset=9 ;; esac test "${actual:$offset:1}" = "$3" || \ die "lv_attr_bit: lv=$2, ${offset} bit of \"$actual\" is \"${actual:$offset:1}\", but expected \"$3\"" } compare_fields() { local cmd1=$1 local obj1=$2 local field1=$3 local cmd2=$4 local obj2=$5 local field2=$6 local val1 local val2 val1=$("$cmd1" --noheadings -o "$field1" "$obj1") val2=$("$cmd2" --noheadings -o "$field2" "$obj2") test "$val1" = "$val2" || \ die "compare_fields $obj1($field1): $val1 $obj2($field2): $val2" } compare_vg_field() { local vg1=$1 local vg2=$2 local field=$3 local val1 local val2 val1=$(vgs --noheadings -o "$field" "$vg1") val2=$(vgs --noheadings -o "$field" "$vg2") test "$val1" = "$val2" || \ die "compare_vg_field: $vg1: $val1, $vg2: $val2" } pvlv_counts() { local local_vg=$1 local num_pvs=$2 local num_lvs=$3 local num_snaps=$4 lvs -o+devices "$local_vg" vg_field "$local_vg" pv_count "$num_pvs" vg_field "$local_vg" lv_count "$num_lvs" vg_field "$local_vg" snap_count "$num_snaps" } # Compare md5 check generated from get dev_md5sum dev_md5sum() { md5sum -c "md5.$1-$2" || \ (get lv_field "$1/$2" "name,size,seg_pe_ranges" die "LV $1/$2 has different MD5 check sum!") } sysfs() { # read maj min and also convert hex to decimal local maj local min local P="/sys/dev/block/$maj:$min/$2" local val maj=$(($(stat -L --printf=0x%t "$1"))) min=$(($(stat -L --printf=0x%T "$1"))) val=$(< "$P") || return 0 # no sysfs ? test "$val" -eq "$3" || \ die "$1: $P = $val differs from expected value $3!" } # check raid_leg_status $vg $lv "Aaaaa" raid_leg_status() { local st local val st=$(dmsetup status "$1-$2") val=$(echo "$st" | cut -d ' ' -f 6) test "$val" = "$3" || \ die "$1-$2 status $val != $3 ($st)" } grep_dmsetup() { dmsetup "$1" "$2" | tee out grep -q "${@:3}" out || die "Expected output \"" "${@:3}" "\" from dmsetup $1 not found!" } #set -x unset LVM_VALGRIND "$@" LVM2.2.02.176/test/lib/flavour-ndev-lvmetad-lvmpolld.sh0000644000000000000120000000011713176752421021275 0ustar rootwheelexport LVM_TEST_LOCKING=1 export LVM_TEST_LVMETAD=1 export LVM_TEST_LVMPOLLD=1 LVM2.2.02.176/test/lib/flavour-udev-lvmetad-lvmpolld.sh0000644000000000000120000000015313176752421021304 0ustar rootwheelexport LVM_TEST_LOCKING=1 export LVM_TEST_LVMETAD=1 export LVM_TEST_LVMPOLLD=1 export LVM_TEST_DEVDIR=/dev LVM2.2.02.176/test/lib/mke2fs.conf0000644000000000000120000000144713176752421015115 0ustar rootwheel[defaults] base_features = sparse_super,filetype,resize_inode,dir_index,ext_attr enable_periodic_fsck = 1 blocksize = 4096 inode_size = 256 inode_ratio = 16384 [fs_types] ext3 = { features = has_journal } ext4 = { features = has_journal,extent,huge_file,flex_bg,dir_nlink,extra_isize inode_size = 256 } ext4dev = { features = has_journal,extent,huge_file,flex_bg,dir_nlink,extra_isize inode_size = 256 options = test_fs=1 } small = { blocksize = 1024 inode_size = 128 inode_ratio = 4096 } floppy = { blocksize = 1024 inode_size = 128 inode_ratio = 8192 } news = { inode_ratio = 4096 } largefile = { inode_ratio = 1048576 blocksize = -1 } largefile4 = { inode_ratio = 4194304 blocksize = -1 } hurd = { blocksize = 4096 inode_size = 128 } LVM2.2.02.176/test/lib/runner.cpp0000644000000000000120000000320213176752421015063 0ustar rootwheel/* -*- C++ -*- copyright (c) 2014 Red Hat, Inc. * * This file is part of LVM2. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "brick-shelltest.h" int main(int argc, const char **argv) { try { return brick::shelltest::run( argc, argv, "LVM_TEST_FLAVOUR" ); } catch (std::exception const& e) { std::cout << "Exception: " << e.what() << "\n"; } return 1; } LVM2.2.02.176/test/api/0000755000000000000120000000000013176752421013054 5ustar rootwheelLVM2.2.02.176/test/api/vglist.c0000644000000000000120000000271213176752421014532 0ustar rootwheel/* * Copyright (C) 2009 Red Hat, Inc. All rights reserved. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include "lvm2app.h" lvm_t handle; vg_t vg; static void start(void) { handle = lvm_init(NULL); if (!handle) { fprintf(stderr, "Unable to lvm_init\n"); abort(); } } static void done(int ok) { if (handle && lvm_errno(handle)) { fprintf(stderr, "LVM Error: %s\n", lvm_errmsg(handle)); ok = 0; } if (handle) lvm_quit(handle); if (!ok) abort(); } int main(int argc, char *argv[]) { lvm_str_list_t *str; int i = 0; struct dm_list *vgnames; struct dm_list *vgids; if (argc != 3) abort(); start(); vgnames = lvm_list_vg_names(handle); dm_list_iterate_items(str, vgnames) { assert(++i <= 1); assert(!strcmp(str->str, argv[1])); } assert(i == 1); done(1); i = 0; start(); vgids = lvm_list_vg_uuids(handle); dm_list_iterate_items(str, vgids) { assert(++i <= 1); assert(!strcmp(str->str, argv[2])); } assert(i == 1); done(1); return 0; } LVM2.2.02.176/test/api/pe_start.c0000644000000000000120000000216313176752421015043 0ustar rootwheel/* * Copyright (C) 2011 Red Hat, Inc. All rights reserved. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #undef NDEBUG #include "lvm2app.h" #include "assert.h" int main(int argc, char *argv[]) { lvm_t handle; vg_t vg = NULL; pv_t pv; struct lvm_property_value v; handle = lvm_init(NULL); assert(handle); vg = lvm_vg_create(handle, argv[1]); assert(vg); if (lvm_vg_extend(vg, argv[2])) abort(); pv = lvm_pv_from_name(vg, argv[2]); assert(pv); v = lvm_pv_get_property(pv, "pe_start"); assert(v.is_valid); fprintf(stderr, "pe_start = %d\n", (int)v.value.integer); assert(v.value.integer == 2048 * 512); lvm_vg_close(vg); lvm_quit(handle); return 0; } LVM2.2.02.176/test/api/Makefile.in0000644000000000000120000000260413176752421015123 0ustar rootwheel# # Copyright (C) 2009-2012 Red Hat, Inc. All rights reserved. # # This file is part of LVM2. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA srcdir = @srcdir@ top_srcdir = @top_srcdir@ top_builddir = @top_builddir@ TARGETS = ifeq ("@APPLIB@", "yes") TARGETS += test SOURCES = test.c TARGETS += \ lvtest.t \ vglist.t \ percent.t \ pe_start.t \ thin_percent.t \ vgtest.t SOURCES2 = \ lvtest.c \ vglist.c \ percent.c \ pe_start.c \ thin_percent.c \ vgtest.c endif include $(top_builddir)/make.tmpl DEFS += -D_REENTRANT DEPLIBS += $(top_builddir)/liblvm/liblvm2app.so $(top_builddir)/libdm/libdevmapper.so LDFLAGS += -L$(top_builddir)/liblvm -L$(top_builddir)/daemons/dmeventd LIBS += @LVM2APP_LIB@ $(DMEVENT_LIBS) -ldevmapper %.t: %.o $(DEPLIBS) $(CC) -o $@ $(<) $(CFLAGS) $(LDFLAGS) $(ELDFLAGS) $(LIBS) test: $(OBJECTS) $(DEPLIBS) $(CC) -o $@ $(OBJECTS) $(CFLAGS) $(LDFLAGS) $(ELDFLAGS) $(LIBS) $(READLINE_LIBS) Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status cd $(top_builddir) && $(SHELL) ./config.status test/api/Makefile LVM2.2.02.176/test/api/thin_percent.sh0000644000000000000120000000171713176752421016100 0ustar rootwheel#!/bin/sh # Copyright (C) 2012 Red Hat, Inc. All rights reserved. # # This file is part of LVM2. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMPOLLD=1 export LVM_TEST_THIN_REPAIR_CMD=${LVM_TEST_THIN_REPAIR_CMD-/bin/false} . lib/inittest aux have_thin 1 0 0 || skip aux prepare_devs 2 vgcreate -s 64k $vg $(cat DEVICES) lvcreate -L5M -T $vg/pool lvcreate -V1M -T $vg/pool -n thin dd if=/dev/urandom of="$DM_DEV_DIR/$vg/thin" count=2 bs=256K lvcreate -s $vg/thin -K -n snap dd if=/dev/urandom of="$DM_DEV_DIR/$vg/snap" count=3 bs=256K lvs -o+discards $vg aux apitest thin_percent $vg vgremove -ff $vg LVM2.2.02.176/test/api/percent.sh0000644000000000000120000000200113176752421015041 0ustar rootwheel#!/bin/sh # Copyright (C) 2010-2013 Red Hat, Inc. All rights reserved. # # This file is part of LVM2. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMPOLLD=1 . lib/inittest aux kernel_at_least 2 6 33 || skip aux prepare_pvs 2 get_devs vgcreate -s 4k "$vg" "${DEVICES[@]}" lvcreate -aey -l 5 -n foo $vg lvcreate -s -n snap $vg/foo -l 3 -c 4k lvcreate -s -n snap2 $vg/foo -l 6 -c 4k dd if=/dev/zero of="$DM_DEV_DIR/$vg/snap2" count=1 bs=1024 oflag=direct # skip test with broken kernel check lv_field $vg/snap2 data_percent "50.00" || skip lvcreate -aey --type mirror -m 1 -n mirr $vg -l 1 --mirrorlog core lvs -a $vg aux apitest percent $vg vgremove -ff $vg LVM2.2.02.176/test/api/lvtest.c0000644000000000000120000000304113176752421014537 0ustar rootwheel/* * Copyright (C) 2010 Red Hat, Inc. All rights reserved. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #undef NDEBUG #include "lvm2app.h" #include "assert.h" #define err(args...) \ do { fprintf(stderr, args); goto bad; } while (0) int main(int argc, char *argv[]) { lvm_t handle; vg_t vg; lv_t lv; int r = -1; if (!(handle = lvm_init(NULL))) return -1; if (!(vg = lvm_vg_open(handle, argv[1], "w", 0))) err("VG open %s failed.\n", argv[1]); if (!(lv = lvm_lv_from_name(vg, "test"))) err("LV test not found.\n"); if (lvm_lv_deactivate(lv)) err("LV test deactivation failed.\n"); if (lvm_lv_activate(lv)) err("LV test activation failed.\n"); if (lvm_lv_activate(lv)) err("LV test repeated activation failed.\n"); if (lvm_lv_rename(lv, "test1")) err("LV test rename to test1 failed.\n"); if (lvm_lv_rename(lv, "test2")) err("LV test1 rename to test2 failed.\n"); if (lvm_lv_rename(lv, "test")) err("LV test2 rename to test failed.\n"); if (lvm_vg_close(vg)) err("VG close failed.\n"); r = 0; bad: lvm_quit(handle); return r; } LVM2.2.02.176/test/api/vgtest.c0000644000000000000120000000771413176752421014545 0ustar rootwheel/* * Copyright (C) 2009 Red Hat, Inc. All rights reserved. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /* * Unit test case for vgcreate and related APIs. * # gcc -g vgcreate.c -I../../liblvm -I../../include -L../../liblvm \ * -L../../libdm -ldevmapper -llvm2app * # export LD_LIBRARY_PATH=`pwd`/../../libdm:`pwd`/../../liblvm */ #include #include #include #include "lvm2app.h" lvm_t handle; vg_t vg; const char *vg_name; #define MAX_DEVICES 16 const char *device[MAX_DEVICES]; uint64_t size = 1024; #define vg_create(vg_name) \ printf("Creating VG %s\n", vg_name); \ vg = lvm_vg_create(handle, vg_name); \ if (!vg) { \ fprintf(stderr, "Error creating volume group %s\n", vg_name); \ goto bad; \ } #define vg_extend(vg, dev) \ printf("Extending VG %s by %s\n", vg_name, dev); \ status = lvm_vg_extend(vg, dev); \ if (status) { \ fprintf(stderr, "Error extending volume group %s " \ "with device %s\n", vg_name, dev); \ goto bad; \ } #define vg_commit(vg) \ printf("Committing VG %s to disk\n", vg_name); \ status = lvm_vg_write(vg); \ if (status) { \ fprintf(stderr, "Commit of volume group '%s' failed\n", \ lvm_vg_get_name(vg)); \ goto bad; \ } #define vg_open(vg_name, mode) \ printf("Opening VG %s %s\n", vg_name, mode); \ vg = lvm_vg_open(handle, vg_name, mode, 0); \ if (!vg) { \ fprintf(stderr, "Error opening volume group %s\n", vg_name); \ goto bad; \ } #define vg_close(vg) \ printf("Closing VG %s\n", vg_name); \ if (lvm_vg_close(vg)) { \ fprintf(stderr, "Error closing volume group %s\n", vg_name); \ goto bad; \ } #define vg_reduce(vg, dev) \ printf("Reducing VG %s by %s\n", vg_name, dev); \ status = lvm_vg_reduce(vg, dev); \ if (status) { \ fprintf(stderr, "Error reducing volume group %s " \ "by device %s\n", vg_name, dev); \ goto bad; \ } #define vg_remove(vg) \ printf("Removing VG %s from system\n", vg_name); \ status = lvm_vg_remove(vg); \ if (status) { \ fprintf(stderr, "Revmoval of volume group '%s' failed\n", \ vg_name); \ goto bad; \ } static int init_vgtest(int argc, char *argv[]) { int i; if (argc < 4) { fprintf(stderr, "Usage: %s [... ]", argv[0]); return -1; } vg_name = argv[1]; for(i=2; ilvseg, "discards"); assert(v.is_valid && v.is_string); assert(strcmp(v.value.string, "passdown") == 0); } v = lvm_lv_get_property(lv, "data_percent"); assert(v.is_valid); assert(v.value.integer == 25 * PERCENT_1); lv = lvm_lv_from_name(vg, "thin"); assert(lv); v = lvm_lv_get_property(lv, "data_percent"); assert(v.is_valid); assert(v.value.integer == 50 * PERCENT_1); lv = lvm_lv_from_name(vg, "snap"); assert(lv); v = lvm_lv_get_property(lv, "data_percent"); assert(v.is_valid); assert(v.value.integer == 75 * PERCENT_1); v = lvm_lv_get_property(lv, "snap_percent"); assert(v.is_valid); assert(v.value.integer == (uint64_t) DM_PERCENT_INVALID); v = lvm_lv_get_property(lv, "origin"); assert(v.is_valid); assert(strcmp(v.value.string, "thin") == 0); lvm_vg_close(vg); lvm_quit(handle); return 0; } LVM2.2.02.176/test/api/lvtest.sh0000644000000000000120000000122013176752421014724 0ustar rootwheel#!/bin/sh # Copyright (C) 2011 Red Hat, Inc. All rights reserved. # # This file is part of LVM2. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_vg 1 lvcreate -n test -l 5 $vg aux apitest lvtest $vg check lv_field $vg/test lv_name test vgremove -ff $vg LVM2.2.02.176/test/api/pytest.sh0000644000000000000120000000511013176752421014735 0ustar rootwheel#!/bin/bash # Copyright (C) 2012-2015 Red Hat, Inc. All rights reserved. # # This file is part of LVM2. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMETAD=1 SKIP_WITH_CLVMD=1 . lib/inittest aux prepare_dmeventd # # TODO: # lvm2app is not yet capable to respect many lvm.conf options # since a lot of them is set in /tools/liblvmline # Until fixed - testing always runs with enabled monitoring # thus it needs dmeventd # # Example of using 'gdb' with python: # gdb -ex r --args python FULL_PATH/lvm2/test/api/python_lvm_unit.py -v TestLvm.test_lv_active_inactive #Locate the python binding library to use. if [[ -n "${abs_top_builddir+varset}" ]]; then python_lib=($(find "$abs_top_builddir" -name lvm.so)) if [[ ${#python_lib[*]} -ne 1 ]]; then if [[ ${#python_lib[*]} -gt 1 ]]; then # Unable to test python bindings if multiple libraries found: echo "Found left over lvm.so: ${python_lib[*]}" false else # Unable to test python bindings if library not available skip "lvm2-python-libs not built" fi fi PYTHONPATH=$(dirname "${python_lib[*]}"):${PYTHONPATH-} export PYTHONPATH elif rpm -q lvm2-python-libs &>/dev/null; then true else skip "lvm2-python-libs neither built nor installed" fi #If you change this change the unit test case too. aux prepare_pvs 6 #Setup which devices the unit test can use. PY_UNIT_PVS=$(cat DEVICES) export PY_UNIT_PVS #python_lvm_unit.py -v -f # Run individual tests for shorter error trace for i in \ lv_persistence \ config_find_bool \ config_override \ config_reload \ dupe_lv_create \ get_set_extend_size \ lv_active_inactive \ lv_property \ lv_rename \ lv_resize \ lv_seg \ lv_size \ lv_snapshot \ lv_suspend \ lv_tags \ percent_to_float \ pv_create \ pv_empty_listing \ pv_getters \ pv_life_cycle \ pv_lookup_from_vg \ pv_property \ pv_resize \ pv_segs \ scan \ version \ vg_from_pv_lookups \ vg_getters \ vg_get_name \ vg_get_set_prop \ vg_get_uuid \ vg_lv_name_validate \ vg_names \ vg_reduce \ vg_remove_restore \ vg_tags \ vg_uuids do python_lvm_unit.py -v TestLvm.test_$i rm -f debug.log_DEBUG* done # CHECKME: not for testing? #python_lvm_unit.py -v TestLvm.test_listing #python_lvm_unit.py -v TestLvm.test_pv_methods LVM2.2.02.176/test/api/test.c0000644000000000000120000006414513176752421014211 0ustar rootwheel/* * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved. * Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include "configure.h" #include "lvm2app.h" #ifdef READLINE_SUPPORT #include #define MAX_ARGS 64 static int lvm_split(char *str, int *argc, char **argv, int max) { char *b = str, *e; *argc = 0; while (*b) { while (*b && isspace(*b)) b++; if ((!*b) || ((*argc == 0)&&(*b == '#'))) break; e = b; while (*e && !isspace(*e)) e++; argv[(*argc)++] = b; if (!*e) break; *e++ = '\0'; b = e; if (*argc == max) break; } if (*argc < max) argv[*argc] = NULL; return *argc; } static void _show_help(void) { printf("'lv_activate vgname lvname: " "Activate an LV\n"); printf("'lv_deactivate vgname lvname: " "Deactivate an LV\n"); printf("'vg_remove_lv vgname lvname': " "Remove a LV\n"); printf("'vg_create_lv_linear vgname lvname size_in_bytes': " "Create a linear LV\n"); printf("'scan_vgs': " "Scan the system for LVM metadata\n"); printf("'list_vg_names': " "List the names of the VGs that exist in the system\n"); printf("'list_vg_ids': " "List the uuids of the VGs that exist in the system\n"); printf("'vg_list_pvs vgname': " "List the PVs that exist in VG vgname\n"); printf("'pv_list_pvsegs pvname': " "List the PV segments that exist in PV pvname\n"); printf("'vg_list_lvs vgname': " "List the LVs that exist in VG vgname\n"); printf("'lv_list_lvsegs vgname lvname': " "List the LV segments that exist in LV vgname/lvname\n"); printf("'vgs_open': " "List the VGs that are currently open\n"); printf("'vgs': " "List all VGs known to the system\n"); printf("'vg_extend vgname device: " "Issue a lvm_vg_extend() API call on VG 'vgname'\n"); printf("'vg_reduce vgname device: " "Issue a lvm_vg_reduce() API call on VG 'vgname'\n"); printf("'vg_open vgname ['r' | 'w']': " "Issue a lvm_vg_open() API call on VG 'vgname'\n"); printf("'vg_close vgname': " "Issue a lvm_vg_close() API call on VG 'vgname'\n"); printf("'vg_create vgname: " "Issue a lvm_vg_create() to create VG 'vgname'\n"); printf("'vg_remove vgname: " "Issue a lvm_vg_remove() to remove VG 'vgname'\n"); printf("'config_reload': " "Issue a lvm_config_reload() API to reload LVM config\n"); printf("'config_override' device: " "Issue a lvm_config_override() with accept device filter\n"); printf("'vg_get_tags vgname': " "List the tags of a VG\n"); printf("'lv_get_property vgname lvname property_name': " "Display the value of LV property\n"); printf("'vg_get_property vgname property_name': " "Display the value of VG property\n"); printf("'pv_get_property pvname property_name': " "Display the value of PV property\n"); printf("'vg_set_property vgname property_name': " "Set the value of VG property\n"); printf("'lv_get_tags vgname lvname': " "List the tags of a LV\n"); printf("'vg_{add|remove}_tag vgname tag': " "Add/remove a tag from a VG\n"); printf("'lv_{add|remove}_tag vgname lvname tag': " "Add/remove a tag from a LV\n"); printf("'vgname_from_devname device': " "Lookup a vgname from a device name\n"); printf("'vgname_from_pvid pvid': " "Lookup a vgname from a pvid\n"); printf("'lv_from_uuid vgname lvuuid': " "Lookup an LV from an LV uuid\n"); printf("'lv_from_name vgname lvname': " "Lookup an LV from an LV name\n"); printf("'pv_from_uuid vgname pvuuid': " "Lookup an LV from an LV uuid\n"); printf("'pv_from_name vgname pvname': " "Lookup an LV from an LV name\n"); printf("'quit': exit the program\n"); } static struct dm_hash_table *_vgid_hash = NULL; static struct dm_hash_table *_vgname_hash = NULL; static struct dm_hash_table *_pvname_hash = NULL; static struct dm_hash_table *_lvname_hash = NULL; static void _hash_destroy_single(struct dm_hash_table **htable) { if (htable && *htable) { dm_hash_destroy(*htable); *htable = NULL; } } static void _hash_destroy(void) { _hash_destroy_single(&_vgname_hash); _hash_destroy_single(&_vgid_hash); _hash_destroy_single(&_pvname_hash); _hash_destroy_single(&_lvname_hash); } static int _hash_create(void) { if (!(_vgname_hash = dm_hash_create(128))) return 0; if (!(_pvname_hash = dm_hash_create(128))) { _hash_destroy_single(&_vgname_hash); return 0; } if (!(_lvname_hash = dm_hash_create(128))) { _hash_destroy_single(&_vgname_hash); _hash_destroy_single(&_pvname_hash); return 0; } if (!(_vgid_hash = dm_hash_create(128))) { _hash_destroy_single(&_vgname_hash); _hash_destroy_single(&_pvname_hash); _hash_destroy_single(&_lvname_hash); return 0; } return 1; } /* FIXME: this should be per vg */ static lv_t _lookup_lv_by_name(const char *name) { lv_t lv; if (!name) { printf ("Invalid LV name\n"); return NULL; } if (!(lv = dm_hash_lookup(_lvname_hash, name))) { printf ("Can't find %s in LVs - run vg_create_lv first\n", name); return NULL; } return lv; } static vg_t _lookup_vg_by_name(char **argv, int argc) { vg_t vg; if (argc < 2) { printf ("Please enter vg_name\n"); return NULL; } if (!(vg = dm_hash_lookup(_vgid_hash, argv[1])) && !(vg = dm_hash_lookup(_vgname_hash, argv[1]))) { printf ("Can't find %s in open VGs - run vg_open first\n", argv[1]); return NULL; } return vg; } static pv_t _lookup_pv_by_name(const char *name) { pv_t pv; if (!(pv = dm_hash_lookup(_pvname_hash, name))) { printf ("Can't find %s in open PVs - run vg_open first\n", name); return NULL; } return pv; } static void _add_lvs_to_lvname_hash(struct dm_list *lvs) { struct lvm_lv_list *lvl; dm_list_iterate_items(lvl, lvs) { /* Concatenate VG name with LV name */ dm_hash_insert(_lvname_hash, lvm_lv_get_name(lvl->lv), lvl->lv); } } static void _add_pvs_to_pvname_hash(struct dm_list *pvs) { struct lvm_pv_list *pvl; dm_list_iterate_items(pvl, pvs) { dm_hash_insert(_pvname_hash, lvm_pv_get_name(pvl->pv), pvl->pv); } } static void _remove_device_from_pvname_hash(struct dm_list *pvs, const char *name) { struct lvm_pv_list *pvl; dm_list_iterate_items(pvl, pvs) { if (!strncmp(lvm_pv_get_name(pvl->pv), name, strlen(name))) dm_hash_remove(_pvname_hash, name); } } static void _add_device_to_pvname_hash(struct dm_list *pvs, const char *name) { struct lvm_pv_list *pvl; dm_list_iterate_items(pvl, pvs) { if (!strncmp(lvm_pv_get_name(pvl->pv), name, strlen(name))) dm_hash_insert(_pvname_hash, name, pvl->pv); } } static void _vg_reduce(char **argv, int argc, lvm_t libh) { vg_t vg; struct dm_list *pvs; if (argc < 2) { printf ("Please enter vg_name\n"); return; } if (!(vg = dm_hash_lookup(_vgid_hash, argv[1])) && !(vg = dm_hash_lookup(_vgname_hash, argv[1]))) { printf ("VG not open\n"); return; } if (lvm_vg_reduce(vg, argv[2])) { printf("Error reducing %s by %s\n", argv[1], argv[2]); return; } printf("Success reducing vg %s by %s\n", argv[1], argv[2]); /* * Add the device into the hashes for lookups */ pvs = lvm_vg_list_pvs(vg); if (pvs && !dm_list_empty(pvs)) _remove_device_from_pvname_hash(pvs, argv[2]); } /* Print "Error" or "Success" depending on lvm status */ static int _lvm_status_to_pass_fail(int rc) { if (rc) printf("Error "); else printf("Success "); return rc; } static void _config_override(char **argv, int argc, lvm_t libh) { int rc; char tmp[64]; if (argc < 2) { printf ("Please enter device\n"); return; } snprintf(tmp, 63, "devices{filter=[\"a|%s|\", \"r|.*|\"]}", argv[1]); rc = lvm_config_override(libh, tmp); _lvm_status_to_pass_fail(rc); printf("overriding LVM configuration\n"); } static void _config_reload(char **argv, int argc, lvm_t libh) { int rc; rc = lvm_config_reload(libh); _lvm_status_to_pass_fail(rc); printf("reloading LVM configuration\n"); } static void _vg_extend(char **argv, int argc, lvm_t libh) { vg_t vg; struct dm_list *pvs; if (argc < 2) { printf ("Please enter vg_name\n"); return; } if (!(vg = dm_hash_lookup(_vgid_hash, argv[1])) && !(vg = dm_hash_lookup(_vgname_hash, argv[1]))) { printf ("VG not open\n"); return; } if (lvm_vg_extend(vg, argv[2])) { printf("Error extending %s with %s\n", argv[1], argv[2]); return; } printf("Success extending vg %s with %s\n", argv[1], argv[2]); /* * Add the device into the hashes for lookups */ pvs = lvm_vg_list_pvs(vg); if (pvs && !dm_list_empty(pvs)) _add_device_to_pvname_hash(pvs, argv[2]); } static void _vg_open(char **argv, int argc, lvm_t libh) { vg_t vg; struct dm_list *lvs; struct dm_list *pvs; if (argc < 2) { printf ("Please enter vg_name\n"); return; } if ((vg = dm_hash_lookup(_vgid_hash, argv[1])) || (vg = dm_hash_lookup(_vgname_hash, argv[1]))) { printf ("VG already open\n"); return; } if (argc < 3) vg = lvm_vg_open(libh, argv[1], "r", 0); else vg = lvm_vg_open(libh, argv[1], argv[2], 0); if (!vg || !lvm_vg_get_name(vg)) { printf("Error opening %s\n", argv[1]); return; } printf("Success opening vg %s\n", argv[1]); dm_hash_insert(_vgname_hash, lvm_vg_get_name(vg), vg); dm_hash_insert(_vgid_hash, lvm_vg_get_uuid(vg), vg); /* * Add the LVs and PVs into the hashes for lookups */ lvs = lvm_vg_list_lvs(vg); if (lvs && !dm_list_empty(lvs)) _add_lvs_to_lvname_hash(lvs); pvs = lvm_vg_list_pvs(vg); if (pvs && !dm_list_empty(pvs)) _add_pvs_to_pvname_hash(pvs); } /* Lookup the vg and remove it from the vgname and vgid hashes */ static vg_t _lookup_and_remove_vg(const char *vgname) { vg_t vg=NULL; if ((vg = dm_hash_lookup(_vgname_hash, vgname))) { dm_hash_remove(_vgid_hash, lvm_vg_get_uuid(vg)); dm_hash_remove(_vgname_hash, lvm_vg_get_name(vg)); } if (!vg && (vg = dm_hash_lookup(_vgid_hash, vgname))) { dm_hash_remove(_vgid_hash, lvm_vg_get_uuid(vg)); dm_hash_remove(_vgname_hash, lvm_vg_get_name(vg)); } return vg; } static void _vg_write(char **argv, int argc) { vg_t vg; int rc = 0; if (argc < 2) { printf ("Please enter vg_name\n"); return; } vg = _lookup_vg_by_name(argv, argc); if (!vg) { printf("Can't find vg_name %s\n", argv[1]); return; } rc = lvm_vg_write(vg); _lvm_status_to_pass_fail(rc); printf("writing VG %s\n", lvm_vg_get_name(vg)); } static void _vg_create(char **argv, int argc, lvm_t libh) { vg_t vg; if (argc < 2) { printf ("Please enter vg_name\n"); return; } vg = lvm_vg_create(libh, argv[1]); if (!vg || !lvm_vg_get_name(vg)) { printf("Error creating %s\n", argv[1]); return; } printf("Success creating vg %s\n", argv[1]); dm_hash_insert(_vgname_hash, lvm_vg_get_name(vg), vg); dm_hash_insert(_vgid_hash, lvm_vg_get_uuid(vg), vg); } static void _vg_remove(char **argv, int argc) { vg_t vg; int rc = 0; if (argc < 2) { printf ("Please enter vg_name\n"); return; } vg = _lookup_vg_by_name(argv, argc); if (!vg) { printf("Can't find vg_name %s\n", argv[1]); return; } rc = lvm_vg_remove(vg); _lvm_status_to_pass_fail(rc); printf("removing VG\n"); } static void _vg_close(char **argv, int argc) { vg_t vg; int rc = 0; if (argc < 2) { printf ("Please enter vg_name\n"); return; } vg = _lookup_and_remove_vg(argv[1]); if (!vg) { printf("Can't find vg_name %s\n", argv[1]); return; } rc = lvm_vg_close(vg); _lvm_status_to_pass_fail(rc); printf("closing VG\n"); } static void _show_one_vg(vg_t vg) { printf("%s (%s): sz=%"PRIu64", free=%"PRIu64", #pv=%"PRIu64 ", seq#=%"PRIu64"\n", lvm_vg_get_name(vg), lvm_vg_get_uuid(vg), lvm_vg_get_size(vg), lvm_vg_get_free_size(vg), lvm_vg_get_pv_count(vg), lvm_vg_get_seqno(vg)); } static void _print_pv(pv_t pv) { if (!pv) return; printf("%s (%s): size=%"PRIu64", free=%"PRIu64 ", dev_size=%"PRIu64", mda_count=%"PRIu64"\n", lvm_pv_get_name(pv), lvm_pv_get_uuid(pv), lvm_pv_get_size(pv), lvm_pv_get_free(pv), lvm_pv_get_dev_size(pv), lvm_pv_get_mda_count(pv)); } static void _print_lv(vg_t vg, lv_t lv) { if (!lv) return; printf("%s/%s (%s): size=%"PRIu64", %sACTIVE / %sSUSPENDED\n", lvm_vg_get_name(vg), lvm_lv_get_name(lv), lvm_lv_get_uuid(lv), lvm_lv_get_size(lv), lvm_lv_is_active(lv) ? "" : "IN", lvm_lv_is_suspended(lv) ? "" : "NOT "); } static void _list_open_vgs(void) { dm_hash_iter(_vgid_hash, (dm_hash_iterate_fn) _show_one_vg); } static void _pvs_in_vg(char **argv, int argc) { struct dm_list *pvs; struct lvm_pv_list *pvl; vg_t vg; if (!(vg = _lookup_vg_by_name(argv, argc))) return; pvs = lvm_vg_list_pvs(vg); if (!pvs || dm_list_empty(pvs)) { printf("No PVs in VG %s\n", lvm_vg_get_name(vg)); return; } printf("PVs in VG %s:\n", lvm_vg_get_name(vg)); dm_list_iterate_items(pvl, pvs) { _print_pv(pvl->pv); } } static void _print_property_value(const char *name, struct lvm_property_value v) { if (!v.is_valid) printf("%s = INVALID\n", name); else if (v.is_string) printf("%s = %s\n", name, v.value.string); else printf("%s = %"PRIu64"\n", name, v.value.integer); } static void _pvsegs_in_pv(char **argv, int argc) { struct dm_list *pvsegs; struct lvm_pvseg_list *pvl; pv_t pv; if (!(pv = _lookup_pv_by_name(argv[1]))) return; pvsegs = lvm_pv_list_pvsegs(pv); if (!pvsegs || dm_list_empty(pvsegs)) { printf("No PV segments in pv %s\n", argv[1]); return; } printf("PV segments in pv %s:\n", argv[1]); dm_list_iterate_items(pvl, pvsegs) { struct lvm_property_value v; v = lvm_pvseg_get_property(pvl->pvseg, "pvseg_start"); _print_property_value("pvseg_start", v); v = lvm_pvseg_get_property(pvl->pvseg, "pvseg_size"); _print_property_value("pvseg_size", v); } } static void _scan_vgs(lvm_t libh) { lvm_scan(libh); } static void _list_vg_names(lvm_t libh) { struct dm_list *list; struct lvm_str_list *strl; list = lvm_list_vg_names(libh); printf("VG names:\n"); dm_list_iterate_items(strl, list) { printf("%s\n", strl->str); } } static void _list_vg_ids(lvm_t libh) { struct dm_list *list; struct lvm_str_list *strl; list = lvm_list_vg_uuids(libh); printf("VG uuids:\n"); dm_list_iterate_items(strl, list) { printf("%s\n", strl->str); } } static void _display_tags(struct dm_list *list) { struct lvm_str_list *strl; if (dm_list_empty(list)) { printf("No tags exist\n"); return; } else if (!list) { printf("Error obtaining tags\n"); return; } dm_list_iterate_items(strl, list) { printf("%s\n", strl->str); } } static void _vg_get_tags(char **argv, int argc) { vg_t vg; if (!(vg = _lookup_vg_by_name(argv, argc))) return; printf("VG tags:\n"); _display_tags(lvm_vg_get_tags(vg)); } static void _vg_tag(char **argv, int argc, int add) { vg_t vg; if (argc < 3) { printf("Please enter vgname, tag\n"); return; } if (!(vg = _lookup_vg_by_name(argv, argc))) return; if (add && lvm_vg_add_tag(vg, argv[2])) printf("Error "); else if (!add && lvm_vg_remove_tag(vg, argv[2])){ printf("Error "); } else { printf("Success "); } printf("%s tag %s to VG %s\n", add ? "adding":"removing", argv[2], argv[1]); } static void _pv_get_property(char **argv, int argc) { pv_t pv; struct lvm_property_value v; if (argc < 3) { printf("Please enter pvname, field_id\n"); return; } if (!(pv = _lookup_pv_by_name(argv[1]))) return; v = lvm_pv_get_property(pv, argv[2]); _print_property_value(argv[2], v); } static void _vg_get_property(char **argv, int argc) { vg_t vg; struct lvm_property_value v; if (argc < 3) { printf("Please enter vgname, field_id\n"); return; } if (!(vg = _lookup_vg_by_name(argv, argc))) return; v = lvm_vg_get_property(vg, argv[2]); _print_property_value(argv[2], v); } static void _lv_get_property(char **argv, int argc) { lv_t lv; struct lvm_property_value v; if (argc < 4) { printf("Please enter vgname, lvname, field_id\n"); return; } if (!(lv = _lookup_lv_by_name(argv[2]))) return; v = lvm_lv_get_property(lv, argv[3]); _print_property_value(argv[3], v); } static void _vg_set_property(char **argv, int argc) { vg_t vg; struct lvm_property_value value; int rc; if (argc < 4) { printf("Please enter vgname, field_id, value\n"); return; } if (!(vg = _lookup_vg_by_name(argv, argc))) return; value = lvm_vg_get_property(vg, argv[2]); if (!value.is_valid) { printf("Error obtaining property value\n"); return; } if (value.is_string) value.value.string = argv[3]; else value.value.integer = atoi(argv[3]); rc = lvm_vg_set_property(vg, argv[2], &value); if (rc) printf("Error "); else printf("Success "); printf("setting value of property %s in VG %s\n", argv[2], argv[1]); } static void _lv_get_tags(char **argv, int argc) { lv_t lv; if (argc < 3) { printf("Please enter vgname, lvname\n"); return; } if (!(lv = _lookup_lv_by_name(argv[2]))) return; printf("LV tags:\n"); _display_tags(lvm_lv_get_tags(lv)); } static void _lv_tag(char **argv, int argc, int add) { lv_t lv; if (argc < 3) { printf("Please enter vgname, lvname\n"); return; } if (!(lv = _lookup_lv_by_name(argv[2]))) return; if (add && lvm_lv_add_tag(lv, argv[3])) printf("Error "); else if (!add && lvm_lv_remove_tag(lv, argv[3])){ printf("Error "); } else { printf("Success "); } printf("%s tag %s to LV %s\n", add ? "adding":"removing", argv[3], argv[2]); } static void _lv_from_uuid(char **argv, int argc) { vg_t vg; if (argc < 3) { printf("Please enter vgname, lv_uuid\n"); return; } if (!(vg = _lookup_vg_by_name(argv, argc))) return; _print_lv(vg, lvm_lv_from_uuid(vg, argv[2])); } static void _lv_from_name(char **argv, int argc) { vg_t vg; if (argc < 3) { printf("Please enter vgname, lv_uuid\n"); return; } if (!(vg = _lookup_vg_by_name(argv, argc))) return; _print_lv(vg, lvm_lv_from_name(vg, argv[2])); } static void _pv_from_uuid(char **argv, int argc) { vg_t vg; if (argc < 3) { printf("Please enter vgname, pv_uuid\n"); return; } if (!(vg = _lookup_vg_by_name(argv, argc))) return; _print_pv(lvm_pv_from_uuid(vg, argv[2])); } static void _pv_from_name(char **argv, int argc) { vg_t vg; if (argc < 3) { printf("Please enter vgname, pv_uuid\n"); return; } if (!(vg = _lookup_vg_by_name(argv, argc))) return; _print_pv(lvm_pv_from_name(vg, argv[2])); } static void _vgname_from_pvid(char **argv, int argc, lvm_t libh) { const char *vgname; if (argc < 1) { printf("Please enter pvid\n"); return; } if (!(vgname = lvm_vgname_from_pvid(libh, argv[1]))) { printf("Error "); } else { printf("Success "); } printf("looking up vgname=%s from PVID=%s\n", vgname, argv[1]); } static void _vgname_from_devname(char **argv, int argc, lvm_t libh) { const char *vgname; if (argc < 1) { printf("Please enter device\n"); return; } if (!(vgname = lvm_vgname_from_device(libh, argv[1]))) { printf("Error "); } else { printf("Success "); } printf("looking up vgname=%s from device name=%s\n", vgname, argv[1]); } static void _lvs_in_vg(char **argv, int argc) { struct dm_list *lvs; struct lvm_lv_list *lvl; vg_t vg; if (!(vg = _lookup_vg_by_name(argv, argc))) return; lvs = lvm_vg_list_lvs(vg); if (!lvs || dm_list_empty(lvs)) { printf("No LVs in VG %s\n", lvm_vg_get_name(vg)); return; } printf("LVs in VG %s:\n", lvm_vg_get_name(vg)); dm_list_iterate_items(lvl, lvs) { _print_lv(vg, lvl->lv); } } static void _lvsegs_in_lv(char **argv, int argc) { struct dm_list *lvsegs; struct lvm_lvseg_list *lvl; lv_t lv; if (!(lv = _lookup_lv_by_name(argv[2]))) return; lvsegs = lvm_lv_list_lvsegs(lv); if (!lvsegs || dm_list_empty(lvsegs)) { printf("No LV segments in lv %s\n", lvm_lv_get_name(lv)); return; } printf("LV segments in lv %s:\n", lvm_lv_get_name(lv)); dm_list_iterate_items(lvl, lvsegs) { struct lvm_property_value v; v = lvm_lvseg_get_property(lvl->lvseg, "segtype"); _print_property_value("segtype", v); v = lvm_lvseg_get_property(lvl->lvseg, "seg_start_pe"); _print_property_value("seg_start_pe", v); v = lvm_lvseg_get_property(lvl->lvseg, "seg_size"); _print_property_value("seg_size", v); v = lvm_lvseg_get_property(lvl->lvseg, "devices"); _print_property_value("devices", v); v = lvm_lvseg_get_property(lvl->lvseg, "seg_pe_ranges"); _print_property_value("seg_pe_ranges", v); } } static void _lv_deactivate(char **argv, int argc) { lv_t lv; int rc=0; if (argc < 3) { printf("Please enter vgname, lvname\n"); return; } if (!(lv = _lookup_lv_by_name(argv[2]))) return; rc = lvm_lv_deactivate(lv); _lvm_status_to_pass_fail(rc); printf("De-activating LV %s in VG %s\n", argv[2], argv[1]); } static void _lv_activate(char **argv, int argc) { lv_t lv; int rc=0; if (argc < 3) { printf("Please enter vgname, lvname\n"); return; } if (!(lv = _lookup_lv_by_name(argv[2]))) return; rc = lvm_lv_activate(lv); _lvm_status_to_pass_fail(rc); printf("activating LV %s in VG %s\n", argv[2], argv[1]); } static void _vg_remove_lv(char **argv, int argc) { lv_t lv; if (argc < 3) { printf("Please enter vgname, lvname\n"); return; } if (!(lv = _lookup_lv_by_name(argv[2]))) return; if (lvm_vg_remove_lv(lv)) printf("Error "); else { printf("Success "); dm_hash_remove(_lvname_hash, argv[2]); } printf("removing LV %s in VG %s\n", argv[2], argv[1]); } static void _vg_create_lv_linear(char **argv, int argc) { vg_t vg; lv_t lv; if (argc < 4) { printf("Please enter vgname, lvname, and size\n"); return; } if (!(vg = _lookup_vg_by_name(argv, argc))) return; lv = lvm_vg_create_lv_linear(vg, argv[2], atol(argv[3])); if (!lv) printf("Error "); else { printf("Success "); dm_hash_insert(_lvname_hash, argv[2], lv); } printf("creating LV %s in VG %s\n", argv[2], argv[1]); } static int lvmapi_test_shell(lvm_t libh) { int argc; char *input = NULL, *args[MAX_ARGS], **argv; _hash_create(); argc=0; while (1) { free(input); input = readline("liblvm> "); /* EOF */ if (!input) { printf("\n"); break; } /* empty line */ if (!*input) continue; argv = args; if (lvm_split(input, &argc, argv, MAX_ARGS) == MAX_ARGS) { printf("Too many arguments, sorry."); continue; } if (!strcmp(argv[0], "lvm")) { argv++; argc--; } if (!argc) continue; if (!strcmp(argv[0], "quit") || !strcmp(argv[0], "exit")) { printf("Exiting.\n"); break; } else if (!strcmp(argv[0], "?") || !strcmp(argv[0], "help")) { _show_help(); } else if (!strcmp(argv[0], "config_reload")) { _config_reload(argv, argc, libh); } else if (!strcmp(argv[0], "config_override")) { _config_override(argv, argc, libh); } else if (!strcmp(argv[0], "vg_extend")) { _vg_extend(argv, argc, libh); } else if (!strcmp(argv[0], "vg_reduce")) { _vg_reduce(argv, argc, libh); } else if (!strcmp(argv[0], "vg_write")) { _vg_write(argv, argc); } else if (!strcmp(argv[0], "vg_open")) { _vg_open(argv, argc, libh); } else if (!strcmp(argv[0], "vg_close")) { _vg_close(argv, argc); } else if (!strcmp(argv[0], "vg_create")) { _vg_create(argv, argc, libh); } else if (!strcmp(argv[0], "vg_remove")) { _vg_remove(argv, argc); } else if (!strcmp(argv[0], "lv_activate")) { _lv_activate(argv, argc); } else if (!strcmp(argv[0], "lv_deactivate")) { _lv_deactivate(argv, argc); } else if (!strcmp(argv[0], "vg_remove_lv")) { _vg_remove_lv(argv, argc); } else if (!strcmp(argv[0], "vgs_open")) { _list_open_vgs(); } else if (!strcmp(argv[0], "vg_list_pvs")) { _pvs_in_vg(argv, argc); } else if (!strcmp(argv[0], "pv_list_pvsegs")) { _pvsegs_in_pv(argv, argc); } else if (!strcmp(argv[0], "vg_list_lvs")) { _lvs_in_vg(argv, argc); } else if (!strcmp(argv[0], "lv_list_lvsegs")) { _lvsegs_in_lv(argv, argc); } else if (!strcmp(argv[0], "list_vg_names")) { _list_vg_names(libh); } else if (!strcmp(argv[0], "list_vg_ids")) { _list_vg_ids(libh); } else if (!strcmp(argv[0], "scan_vgs")) { _scan_vgs(libh); } else if (!strcmp(argv[0], "vg_create_lv_linear")) { _vg_create_lv_linear(argv, argc); } else if (!strcmp(argv[0], "vg_add_tag")) { _vg_tag(argv, argc, 1); } else if (!strcmp(argv[0], "vg_remove_tag")) { _vg_tag(argv, argc, 0); } else if (!strcmp(argv[0], "vg_get_tags")) { _vg_get_tags(argv, argc); } else if (!strcmp(argv[0], "lv_get_property")) { _lv_get_property(argv, argc); } else if (!strcmp(argv[0], "vg_get_property")) { _vg_get_property(argv, argc); } else if (!strcmp(argv[0], "pv_get_property")) { _pv_get_property(argv, argc); } else if (!strcmp(argv[0], "vg_set_property")) { _vg_set_property(argv, argc); } else if (!strcmp(argv[0], "lv_add_tag")) { _lv_tag(argv, argc, 1); } else if (!strcmp(argv[0], "lv_remove_tag")) { _lv_tag(argv, argc, 0); } else if (!strcmp(argv[0], "lv_get_tags")) { _lv_get_tags(argv, argc); } else if (!strcmp(argv[0], "vgname_from_devname")) { _vgname_from_devname(argv, argc, libh); } else if (!strcmp(argv[0], "vgname_from_pvid")) { _vgname_from_pvid(argv, argc, libh); } else if (!strcmp(argv[0], "lv_from_uuid")) { _lv_from_uuid(argv, argc); } else if (!strcmp(argv[0], "lv_from_name")) { _lv_from_name(argv, argc); } else if (!strcmp(argv[0], "pv_from_uuid")) { _pv_from_uuid(argv, argc); } else if (!strcmp(argv[0], "pv_from_name")) { _pv_from_name(argv, argc); } else { printf ("Unrecognized command %s\n", argv[0]); } } dm_hash_iter(_vgname_hash, (dm_hash_iterate_fn) lvm_vg_close); _hash_destroy(); free(input); return 0; } #else /* !READLINE_SUPPORT */ static int lvmapi_test_shell(lvm_t libh) { printf("Build without readline library, no interactive testing.\n"); return 1; } #endif int main (int argc, char *argv[]) { lvm_t libh; libh = lvm_init(NULL); if (!libh) { printf("Unable to open lvm library instance\n"); return 1; } printf("Library version: %s\n", lvm_library_get_version()); lvmapi_test_shell(libh); lvm_quit(libh); return 0; } LVM2.2.02.176/test/api/pe_start.sh0000644000000000000120000000115313176752421015231 0ustar rootwheel#!/bin/sh # Copyright (C) 2011 Red Hat, Inc. All rights reserved. # # This file is part of LVM2. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_devs 2 aux apitest pe_start test_vg $dev1 not vgs test_vg not pvs $dev1 LVM2.2.02.176/test/api/percent.c0000644000000000000120000000305313176752421014661 0ustar rootwheel/* * Copyright (C) 2010 Red Hat, Inc. All rights reserved. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #undef NDEBUG #include "lvm2app.h" #include "assert.h" int main(int argc, char *argv[]) { lvm_t handle; vg_t vg = NULL; lv_t lv; struct lvm_property_value v; struct lvm_property_value d; handle = lvm_init(NULL); assert(handle); vg = lvm_vg_open(handle, argv[1], "r", 0); assert(vg); lv = lvm_lv_from_name(vg, "snap"); assert(lv); v = lvm_lv_get_property(lv, "snap_percent"); assert(v.is_valid); assert(v.value.integer == PERCENT_0); lv = lvm_lv_from_name(vg, "mirr"); assert(lv); v = lvm_lv_get_property(lv, "copy_percent"); assert(v.is_valid); assert(v.value.integer == PERCENT_100); lv = lvm_lv_from_name(vg, "snap2"); assert(lv); v = lvm_lv_get_property(lv, "snap_percent"); assert(v.is_valid); assert(v.value.integer == 50 * PERCENT_1); d = lvm_lv_get_property(lv, "data_percent"); assert(d.is_valid); assert(d.value.integer == v.value.integer); lvm_vg_close(vg); lvm_quit(handle); return 0; } LVM2.2.02.176/test/api/vglist.sh0000644000000000000120000000111413176752421014715 0ustar rootwheel#!/bin/sh # Copyright (C) 2013 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_vg 2 aux apitest vglist $vg "$(get vg_field $vg vg_uuid | sed -e s,-,,g)" LVM2.2.02.176/test/api/python_lvm_unit.py0000755000000000000120000006325713176752421016704 0ustar rootwheel#!/usr/bin/env python # Copyright (C) 2012-2013 Red Hat, Inc. All rights reserved. # # This file is part of LVM2. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA import unittest import random import string import lvm import os import itertools import sys if sys.version_info[0] > 2: long = int # Set of basic unit tests for the python bindings. # # *** WARNING *** # # This test tries to only modify configuration for the list of allowed # PVs, but an error in it could potentially cause data loss if run on a # production system. Therefore it is strongly advised that this unit test # not be run on a system that contains data of value. fh = None def l(txt): if os.environ.get('PY_UNIT_LOG') is not None: global fh if fh is None: fh = open('/tmp/lvm_py_unit_test_' + rs(10), "a") fh.write(txt + "\n") fh.flush() def rs(rand_len=10): """ Generate a random string """ return ''.join( random.choice(string.ascii_uppercase)for x in range(rand_len)) def _get_allowed_devices(): rc = os.environ.get('PY_UNIT_PVS') if rc is not None: rc = rc.splitlines() rc.sort() return rc class AllowedPVS(object): """ We are only allowed to muck with certain PV, filter to only the ones we can use. """ def __init__(self): self.handle = None self.pvs_all = None def __enter__(self): rc = [] allowed_dev = _get_allowed_devices() if allowed_dev: self.handle = lvm.listPvs() self.pvs_all = self.handle.open() for p in self.pvs_all: if p.getName() in allowed_dev: rc.append(p) #Sort them consistently rc.sort(key=lambda x: x.getName()) return rc def __exit__(self, t_type, value, traceback): if self.handle: self.pvs_all = None self.handle.close() class TestLvm(unittest.TestCase): VG_P = os.environ.get('PREFIX') @staticmethod def _get_pv_device_names(): rc = [] with AllowedPVS() as pvs: for p in pvs: rc.append(p.getName()) return rc @staticmethod def _create_thick_lv(device_list, name): vg = lvm.vgCreate(TestLvm.VG_P + "_" + name) for d in device_list: vg.extend(d) vg.createLvLinear(name, vg.getSize() / 2) vg.close() vg = None @staticmethod def _create_thin_pool(device_list, pool_name): vg = lvm.vgCreate(TestLvm.VG_P + "_" + pool_name) for d in device_list: vg.extend(d) vg.createLvThinpool( pool_name, vg.getSize() / 2, 0, 0, lvm.THIN_DISCARDS_PASSDOWN, 1) return vg @staticmethod def _create_thin_lv(pv_devices, name): thin_pool_name = 'thin_vg_pool_' + rs(4) vg = TestLvm._create_thin_pool(pv_devices, thin_pool_name) vg.createLvThin(thin_pool_name, name, vg.getSize() / 8) vg.close() vg = None @staticmethod def _vg_names(): rc = [] vg_names = lvm.listVgNames() for i in vg_names: if i[0:len(TestLvm.VG_P)] == TestLvm.VG_P: rc.append(i) return rc @staticmethod def _get_lv(lv_vol_type=None, lv_name=None): vg_name_list = TestLvm._vg_names() for vg_name in vg_name_list: vg = lvm.vgOpen(vg_name, "w") lvs = vg.listLVs() for lv in lvs: attr = lv.getAttr() if lv_vol_type or lv_name: if lv_vol_type is not None and attr[0] == lv_vol_type: return lv, vg elif lv_name is not None and lv_name == lv.getName(): return lv, vg else: return lv, vg vg.close() return None, None @staticmethod def _remove_vg(vg_name): vg = lvm.vgOpen(vg_name, 'w') pvs = vg.listPVs() pe_devices = [] #Remove old snapshots first, then lv for lv in vg.listLVs(): attr = lv.getAttr() if attr[0] == 's': lv.remove() lvs = vg.listLVs() #Now remove any thin lVs for lv in vg.listLVs(): attr = lv.getAttr() if attr[0] == 'V': lv.remove() #now remove the rest for lv in vg.listLVs(): name = lv.getName() #Don't remove the hidden ones if '_tmeta' not in name and '_tdata' not in name: lv.remove() for p in pvs: pe_devices.append(p.getName()) for pv in pe_devices[:-1]: vg.reduce(pv) vg.remove() vg.close() @staticmethod def _clean_up(): #Clear out the testing PVs, but only if they contain stuff #this unit test created for vg_n in TestLvm._vg_names(): TestLvm._remove_vg(vg_n) for d in TestLvm._get_pv_device_names(): lvm.pvRemove(d) lvm.pvCreate(d) def setUp(self): device_list = TestLvm._get_pv_device_names() #Make sure we have an adequate number of PVs to use self.assertTrue(len(device_list) >= 4) TestLvm._clean_up() def tearDown(self): TestLvm._clean_up() def test_pv_resize(self): with AllowedPVS() as pvs: pv = pvs[0] curr_size = pv.getSize() dev_size = pv.getDevSize() self.assertTrue(curr_size == dev_size) pv.resize(curr_size / 2) with AllowedPVS() as pvs: pv = pvs[0] resized_size = pv.getSize() self.assertTrue(resized_size != curr_size) pv.resize(dev_size) def test_pv_life_cycle(self): """ Test removing and re-creating a PV """ target_name = None with AllowedPVS() as pvs: pv = pvs[0] target_name = pv.getName() lvm.pvRemove(target_name) with AllowedPVS() as pvs: for p in pvs: self.assertTrue(p.getName() != target_name) lvm.pvCreate(target_name, 0) with AllowedPVS() as pvs: found = False for p in pvs: if p.getName() == target_name: found = True self.assertTrue(found) @staticmethod def test_pv_methods(): with AllowedPVS() as pvs: for p in pvs: p.getName() p.getUuid() p.getMdaCount() p.getSize() p.getDevSize() p.getFree() p = None def test_version(self): version = lvm.getVersion() self.assertNotEquals(version, None) self.assertEquals(type(version), str) self.assertTrue(len(version) > 0) def test_pv_getters(self): with AllowedPVS() as pvs: pv = pvs[0] self.assertEqual(type(pv.getName()), str) self.assertTrue(len(pv.getName()) > 0) self.assertEqual(type(pv.getUuid()), str) self.assertTrue(len(pv.getUuid()) > 0) self.assertTrue( type(pv.getMdaCount()) == int or type(pv.getMdaCount()) == long) self.assertTrue( type(pv.getSize()) == int or type(pv.getSize()) == long) self.assertTrue( type(pv.getDevSize()) == int or type(pv.getSize()) == long) self.assertTrue( type(pv.getFree()) == int or type(pv.getFree()) == long) def _test_prop(self, prop_obj, prop, var_type, settable): result = prop_obj.getProperty(prop) #If we have no string value we can get a None type back if result[0] is not None: self.assertEqual(type(result[0]), var_type) else: self.assertTrue(str == var_type) self.assertEqual(type(result[1]), bool) self.assertTrue(result[1] == settable) def test_pv_segs(self): with AllowedPVS() as pvs: pv = pvs[0] pv_segs = pv.listPVsegs() #LVsegs returns a tuple, (value, bool settable) #TODO: Test other properties of pv_seg for i in pv_segs: self._test_prop(i, 'pvseg_start', long, False) def test_pv_property(self): with AllowedPVS() as pvs: pv = pvs[0] self._test_prop(pv, 'pv_mda_count', long, False) def test_lv_property(self): lv_name = 'lv_test' TestLvm._create_thin_lv(TestLvm._get_pv_device_names(), lv_name) lv, vg = TestLvm._get_lv(None, lv_name) lv_seg_properties = [ ('chunk_size', long, False), ('devices', str, False), ('discards', str, False), ('region_size', long, False), ('segtype', str, False), ('seg_pe_ranges', str, False), ('seg_size', long, False), ('seg_size_pe', long, False), ('seg_start', long, False), ('seg_start_pe', long, False), ('seg_tags', str, False), ('stripes', long, False), ('stripe_size', long, False), ('thin_count', long, False), ('transaction_id', long, False), ('zero', long, False)] lv_properties = [ ('convert_lv', str, False), ('copy_percent', long, False), ('data_lv', str, False), ('lv_attr', str, False), ('lv_host', str, False), ('lv_kernel_major', long, False), ('lv_kernel_minor', long, False), ('lv_kernel_read_ahead', long, False), ('lv_major', long, False), ('lv_minor', long, False), ('lv_name', str, False), ('lv_path', str, False), ('lv_profile', str, False), ('lv_read_ahead', long, False), ('lv_size', long, False), ('lv_tags', str, False), ('lv_time', str, False), ('lv_uuid', str, False), ('metadata_lv', str, False), ('mirror_log', str, False), ('lv_modules', str, False), ('move_pv', str, False), ('origin', str, False), ('origin_size', long, False), ('pool_lv', str, False), ('raid_max_recovery_rate', long, False), ('raid_min_recovery_rate', long, False), ('raid_mismatch_count', long, False), ('raid_sync_action', str, False), ('raid_write_behind', long, False), ('seg_count', long, False), ('snap_percent', long, False), ('sync_percent', long, False)] # Generic test case, make sure we get what we expect for t in lv_properties: self._test_prop(lv, *t) segments = lv.listLVsegs() if segments and len(segments): for s in segments: for t in lv_seg_properties: self._test_prop(s, *t) # Test specific cases tag = 'hello_world' lv.addTag(tag) tags = lv.getProperty('lv_tags') self.assertTrue(tag in tags[0]) vg.close() def test_lv_tags(self): lv_name = 'lv_test' TestLvm._create_thin_lv(TestLvm._get_pv_device_names(), lv_name) lv, vg = TestLvm._get_lv(None, lv_name) self._test_tags(lv) vg.close() def test_lv_active_inactive(self): lv_name = 'lv_test' TestLvm._create_thin_lv(TestLvm._get_pv_device_names(), lv_name) lv, vg = TestLvm._get_lv(None, lv_name) lv.deactivate() self.assertTrue(lv.isActive() is False) lv.activate() self.assertTrue(lv.isActive() is True) vg.close() def test_lv_rename(self): lv_name = 'lv_test' TestLvm._create_thin_lv(TestLvm._get_pv_device_names(), lv_name) lv, vg = TestLvm._get_lv(None, lv_name) current_name = lv.getName() new_name = rs() lv.rename(new_name) self.assertEqual(lv.getName(), new_name) lv.rename(current_name) vg.close() def test_lv_persistence(self): # Make changes to the lv, close the vg and re-open to make sure that # the changes persist lv_name = 'lv_test_persist' TestLvm._create_thick_lv(TestLvm._get_pv_device_names(), lv_name) # Test rename lv, vg = TestLvm._get_lv(None, lv_name) current_name = lv.getName() new_name = rs() lv.rename(new_name) vg.close() vg = None lv, vg = TestLvm._get_lv(None, new_name) self.assertTrue(lv is not None) if lv and vg: lv.rename(lv_name) vg.close() vg = None # Test lv tag add tag = 'hello_world' lv, vg = TestLvm._get_lv(None, lv_name) lv.addTag(tag) vg.close() vg = None lv, vg = TestLvm._get_lv(None, lv_name) tags = lv.getTags() self.assertTrue(tag in tags) vg.close() vg = None # Test lv tag delete lv, vg = TestLvm._get_lv(None, lv_name) self.assertTrue(lv is not None and vg is not None) if lv and vg: tags = lv.getTags() for t in tags: lv.removeTag(t) vg.close() vg = None lv, vg = TestLvm._get_lv(None, lv_name) self.assertTrue(lv is not None and vg is not None) if lv and vg: tags = lv.getTags() if tags: self.assertEqual(len(tags), 0) vg.close() vg = None # Test lv deactivate lv, vg = TestLvm._get_lv(None, lv_name) self.assertTrue(lv is not None and vg is not None) if lv and vg: lv.deactivate() vg.close() vg = None lv, vg = TestLvm._get_lv(None, lv_name) self.assertTrue(lv is not None and vg is not None) if lv and vg: self.assertFalse(lv.isActive()) vg.close() vg = None # Test lv activate lv, vg = TestLvm._get_lv(None, lv_name) self.assertTrue(lv is not None and vg is not None) if lv and vg: lv.activate() vg.close() vg = None lv, vg = TestLvm._get_lv(None, lv_name) self.assertTrue(lv is not None and vg is not None) if lv and vg: self.assertTrue(lv.isActive()) vg.close() vg = None def test_lv_snapshot(self): thin_lv = 'thin_lv' thick_lv = 'thick_lv' device_names = TestLvm._get_pv_device_names() TestLvm._create_thin_lv(device_names[0:2], thin_lv) TestLvm._create_thick_lv(device_names[2:4], thick_lv) lv, vg = TestLvm._get_lv(None, thick_lv) # FIXME lv.snapshot('thick_snap_shot', 1024*1024) vg.close() # FIXME thick_ss, vg = TestLvm._get_lv(None, 'thick_snap_shot') # FIXME self.assertTrue(thick_ss is not None) # FIXME vg.close() thin_lv, vg = TestLvm._get_lv(None, thin_lv) thin_lv.snapshot('thin_snap_shot') vg.close() thin_ss, vg = TestLvm._get_lv(None, 'thin_snap_shot') self.assertTrue(thin_ss is not None) origin = thin_ss.getOrigin() self.assertTrue(thin_lv, origin) vg.close() def test_lv_suspend(self): lv_name = 'lv_test' TestLvm._create_thin_lv(TestLvm._get_pv_device_names(), lv_name) lv, vg = TestLvm._get_lv(None, lv_name) result = lv.isSuspended() self.assertTrue(type(result) == bool) vg.close() def test_lv_size(self): lv_name = 'lv_test' TestLvm._create_thin_lv(TestLvm._get_pv_device_names(), lv_name) lv, vg = TestLvm._get_lv(None, lv_name) result = lv.getSize() self.assertTrue(type(result) == int or type(result) == long) vg.close() def test_lv_resize(self): lv_name = 'lv_test' TestLvm._create_thin_lv(TestLvm._get_pv_device_names(), lv_name) lv, vg = TestLvm._get_lv(None, lv_name) curr_size = lv.getSize() lv.resize(curr_size + (1024 * 1024)) latest = lv.getSize() self.assertTrue(curr_size != latest) def test_lv_seg(self): lv_name = 'lv_test' TestLvm._create_thin_lv(TestLvm._get_pv_device_names(), lv_name) lv, vg = TestLvm._get_lv(None, lv_name) lv_segs = lv.listLVsegs() #LVsegs returns a tuple, (value, bool settable) #TODO: Test other properties of lv_seg for i in lv_segs: self._test_prop(i, 'seg_start_pe', long, False) vg.close() def test_get_set_extend_size(self): thick_lv = 'get_set_prop' device_names = TestLvm._get_pv_device_names() TestLvm._create_thick_lv(device_names[0:2], thick_lv) lv, vg = TestLvm._get_lv(None, thick_lv) new_extent = 1024 * 1024 * 4 self.assertFalse( vg.getExtentSize() != new_extent, "Cannot determine if it works if they are the same") vg.setExtentSize(new_extent) self.assertEqual(vg.getExtentSize(), new_extent) vg.close() def test_vg_get_set_prop(self): thick_lv = 'get_set_prop' device_names = TestLvm._get_pv_device_names() TestLvm._create_thick_lv(device_names[0:2], thick_lv) lv, vg = TestLvm._get_lv(None, thick_lv) self.assertTrue(vg is not None) if vg: vg_mda_copies = vg.getProperty('vg_mda_copies') vg.setProperty('vg_mda_copies', vg_mda_copies[0]) vg.close() def test_vg_remove_restore(self): #Store off the list of physical devices pv_devices = [] thick_lv = 'get_set_prop' device_names = TestLvm._get_pv_device_names() TestLvm._create_thick_lv(device_names[0:2], thick_lv) lv, vg = TestLvm._get_lv(None, thick_lv) vg_name = vg.getName() pvs = vg.listPVs() for p in pvs: pv_devices.append(p.getName()) vg.close() TestLvm._remove_vg(vg_name) self._create_thick_lv(pv_devices, thick_lv) def test_vg_names(self): vg = lvm.listVgNames() self.assertTrue(isinstance(vg, tuple)) def test_dupe_lv_create(self): """ Try to create a lv with the same name expecting a failure Note: This was causing a seg. fault previously """ thick_lv = 'dupe_name' device_names = TestLvm._get_pv_device_names() TestLvm._create_thick_lv(device_names[0:2], thick_lv) lv, vg = TestLvm._get_lv(None, thick_lv) self.assertTrue(vg is not None) if vg: lvs = vg.listLVs() if len(lvs): lv = lvs[0] lv_name = lv.getName() self.assertRaises( lvm.LibLVMError, vg.createLvLinear, lv_name, lv.getSize()) vg.close() def test_vg_uuids(self): device_names = TestLvm._get_pv_device_names() TestLvm._create_thin_lv(device_names[0:2], 'thin') TestLvm._create_thick_lv(device_names[2:4], 'thick') vgs_uuids = lvm.listVgUuids() self.assertTrue(len(vgs_uuids) > 0) self.assertTrue(isinstance(vgs_uuids, tuple)) vgs_uuids = list(vgs_uuids) vgs_names = lvm.listVgNames() for vg_name in vgs_names: vg = lvm.vgOpen(vg_name, "r") #TODO Write/fix BUG, vg uuid don't match between #lvm.listVgUuids and vg.getUuid() vg_uuid_search = vg.getUuid().replace('-', '') self.assertTrue(vg_uuid_search in vgs_uuids) vgs_uuids.remove(vg_uuid_search) vg.close() self.assertTrue(len(vgs_uuids) == 0) def test_pv_lookup_from_vg(self): device_names = TestLvm._get_pv_device_names() TestLvm._create_thin_lv(device_names[0:2], 'thin') TestLvm._create_thick_lv(device_names[2:4], 'thick') vg_names = TestLvm._vg_names() self.assertTrue(len(vg_names) > 0) for vg_name in vg_names: vg = lvm.vgOpen(vg_name, 'w') pvs = vg.listPVs() for p in pvs: name = p.getName() uuid = p.getUuid() pv_name_lookup = vg.pvFromName(name) pv_uuid_lookup = vg.pvFromUuid(uuid) self.assertTrue( pv_name_lookup.getName() == pv_uuid_lookup.getName()) self.assertTrue( pv_name_lookup.getUuid() == pv_uuid_lookup.getUuid()) self.assertTrue(name == pv_name_lookup.getName()) self.assertTrue(uuid == pv_uuid_lookup.getUuid()) pv_name_lookup = None pv_uuid_lookup = None p = None pvs = None vg.close() def test_percent_to_float(self): self.assertEqual(lvm.percentToFloat(0), 0.0) self.assertEqual(lvm.percentToFloat(1000000), 1.0) self.assertEqual(lvm.percentToFloat(1000000 / 2), 0.5) def test_scan(self): self.assertEqual(lvm.scan(), None) def test_config_reload(self): self.assertEqual(lvm.configReload(), None) def test_config_override(self): self.assertEquals(lvm.configOverride("global.test = 1"), None) def test_config_find_bool(self): either_or = lvm.configFindBool("global/fallback_to_local_locking") self.assertTrue(type(either_or) == bool) self.assertTrue(lvm.configFindBool("global/locking_type")) def test_vg_from_pv_lookups(self): device_names = TestLvm._get_pv_device_names() TestLvm._create_thin_lv(device_names[0:2], 'thin') TestLvm._create_thick_lv(device_names[2:4], 'thick') vgname_list = TestLvm._vg_names() self.assertTrue(len(vgname_list) > 0) for vg_name in vgname_list: vg = lvm.vgOpen(vg_name, 'r') vg_name = vg.getName() pv_list = vg.listPVs() for pv in pv_list: vg_name_from_pv = lvm.vgNameFromPvid(pv.getUuid()) self.assertEquals(vg_name, vg_name_from_pv) self.assertEqual(vg_name, lvm.vgNameFromDevice(pv.getName())) vg.close() def test_vg_get_name(self): device_names = TestLvm._get_pv_device_names() TestLvm._create_thin_lv(device_names[0:2], 'thin') TestLvm._create_thick_lv(device_names[2:4], 'thick') vgname_list = TestLvm._vg_names() self.assertTrue(len(vgname_list) > 0) for vg_name in vgname_list: vg = lvm.vgOpen(vg_name, 'r') self.assertEqual(vg.getName(), vg_name) vg.close() def test_vg_get_uuid(self): device_names = TestLvm._get_pv_device_names() TestLvm._create_thin_lv(device_names[0:2], 'thin') TestLvm._create_thick_lv(device_names[2:4], 'thick') vgname_list = TestLvm._vg_names() self.assertTrue(len(vgname_list) > 0) for vg_name in vgname_list: vg = lvm.vgOpen(vg_name, 'r') uuid = vg.getUuid() self.assertNotEqual(uuid, None) self.assertTrue(len(uuid) > 0) vg.close() RETURN_NUMERIC = [ "getSeqno", "getSize", "getFreeSize", "getFreeSize", "getExtentSize", "getExtentCount", "getFreeExtentCount", "getPvCount", "getMaxPv", "getMaxLv"] def test_vg_getters(self): device_names = TestLvm._get_pv_device_names() TestLvm._create_thin_lv(device_names[0:2], 'thin') TestLvm._create_thick_lv(device_names[2:4], 'thick') vg_name_list = TestLvm._vg_names() self.assertTrue(len(vg_name_list) > 0) for vg_name in vg_name_list: vg = lvm.vgOpen(vg_name, 'r') self.assertTrue(type(vg.isClustered()) == bool) self.assertTrue(type(vg.isExported()) == bool) self.assertTrue(type(vg.isPartial()) == bool) #Loop through the list invoking the method for method_name in TestLvm.RETURN_NUMERIC: method = getattr(vg, method_name) result = method() self.assertTrue(type(result) == int or type(result) == long) vg.close() def _test_tags(self, tag_obj): existing_tags = tag_obj.getTags() self.assertTrue(type(existing_tags) == tuple) num_tags = random.randint(2, 40) created_tags = [] for i in range(num_tags): tag_name = rs(random.randint(1, 128)) tag_obj.addTag(tag_name) created_tags.append(tag_name) tags = tag_obj.getTags() self.assertTrue(len(existing_tags) + len(created_tags) == len(tags)) num_remove = len(created_tags) for i in range(num_remove): tag_to_remove = created_tags[ random.randint(0, len(created_tags) - 1)] created_tags.remove(tag_to_remove) tag_obj.removeTag(tag_to_remove) current_tags = tag_obj.getTags() self.assertFalse(tag_to_remove in current_tags) current_tags = tag_obj.getTags() self.assertTrue(len(current_tags) == len(existing_tags)) for e in existing_tags: self.assertTrue(e in current_tags) def test_vg_tags(self): device_names = TestLvm._get_pv_device_names() i = 0 for d in device_names: if i % 2 == 0: TestLvm._create_thin_lv([d], "thin_lv%d" % i) else: TestLvm._create_thick_lv([d], "thick_lv%d" % i) i += 1 for vg_name in TestLvm._vg_names(): vg = lvm.vgOpen(vg_name, 'w') self._test_tags(vg) vg.close() @staticmethod def test_listing(): env = os.environ for k, v in env.items(): l("%s:%s" % (k, v)) with lvm.listPvs() as pvs: for p in pvs: l('pv= %s' % p.getName()) l('Checking for VG') for v in lvm.listVgNames(): l('vg= %s' % v) def test_pv_empty_listing(self): #We had a bug where we would seg. fault if we had no PVs. l('testPVemptylisting entry') device_names = TestLvm._get_pv_device_names() for d in device_names: l("Removing %s" % d) lvm.pvRemove(d) count = 0 with lvm.listPvs() as pvs: for p in pvs: count += 1 l('pv= %s' % p.getName()) self.assertTrue(count == 0) for d in device_names: lvm.pvCreate(d) def test_pv_create(self): size = [0, 1024 * 1024 * 4] pvmeta_copies = [0, 1, 2] pvmeta_size = [0, 255, 512, 1024] data_alignment = [0, 2048, 4096] zero = [0, 1] device_names = TestLvm._get_pv_device_names() for d in device_names: lvm.pvRemove(d) d = device_names[0] #Test some error cases self.assertRaises(TypeError, lvm.pvCreate, None) self.assertRaises(lvm.LibLVMError, lvm.pvCreate, '') self.assertRaises(lvm.LibLVMError, lvm.pvCreate, d, 4) self.assertRaises(lvm.LibLVMError, lvm.pvCreate, d, 0, 4) self.assertRaises(lvm.LibLVMError, lvm.pvCreate, d, 0, 0, 0, 2 ** 34) self.assertRaises( lvm.LibLVMError, lvm.pvCreate, d, 0, 0, 0, 4096, 2 ** 34) #Try a number of combinations and permutations for s in size: for copies in pvmeta_copies: for pv_size in pvmeta_size: for align in data_alignment: for z in zero: lvm.pvCreate(d, s, copies, pv_size, align, align, z) lvm.pvRemove(d) #Restore for d in device_names: lvm.pvCreate(d) def test_vg_reduce(self): # Test the case where we try to reduce a vg where the last PV has # no metadata copies. In this case the reduce should fail. vg_name = TestLvm.VG_P + 'reduce_test' device_names = TestLvm._get_pv_device_names() for d in device_names: lvm.pvRemove(d) lvm.pvCreate(device_names[0], 0, 0) # Size all, pvmetadatacopies 0 lvm.pvCreate(device_names[1]) lvm.pvCreate(device_names[2]) lvm.pvCreate(device_names[3]) vg = lvm.vgCreate(vg_name) vg.extend(device_names[3]) vg.extend(device_names[2]) vg.extend(device_names[1]) vg.extend(device_names[0]) vg.close() vg = None vg = lvm.vgOpen(vg_name, 'w') vg.reduce(device_names[3]) vg.reduce(device_names[2]) self.assertRaises(lvm.LibLVMError, vg.reduce, device_names[1]) vg.close() vg = None vg = lvm.vgOpen(vg_name, 'w') vg.remove() vg.close() @staticmethod def _test_valid_names(method): sample = 'azAZ09._-+' method('x' * 127) method('.X') method('..X') for i in range(1, 7): tests = (''.join(i) for i in itertools.product(sample, repeat=i)) for t in tests: if t == '.' or t == '..': t += 'X' elif t.startswith('-'): t = 'H' + t method(t) def _test_bad_names(self, method, dupe_name): # Test for duplicate name self.assertRaises(lvm.LibLVMError, method, dupe_name) # Test for too long a name self.assertRaises(lvm.LibLVMError, method, ('x' * 128)) # Test empty self.assertRaises(lvm.LibLVMError, method, '') # Invalid characters self.assertRaises(lvm.LibLVMError, method, '&invalid^char') # Cannot start with .. and no following characters self.assertRaises(lvm.LibLVMError, method, '..') # Cannot start with . and no following characters self.assertRaises(lvm.LibLVMError, method, '.') # Cannot start with a hyphen self.assertRaises(lvm.LibLVMError, method, '-not_good') def _lv_reserved_names(self, method): prefixes = ['snapshot', 'pvmove'] reserved = [ '_mlog', '_mimage', '_pmspare', '_rimage', '_rmeta', '_vorigin', '_tdata', '_tmeta'] for p in prefixes: self.assertRaises(lvm.LibLVMError, method, p + rs(3)) for r in reserved: self.assertRaises(lvm.LibLVMError, method, rs(3) + r + rs(1)) self.assertRaises(lvm.LibLVMError, method, r + rs(1)) def test_vg_lv_name_validate(self): lv_name = 'vg_lv_name_validate' TestLvm._create_thin_lv(TestLvm._get_pv_device_names(), lv_name) lv, vg = TestLvm._get_lv(None, lv_name) self._test_bad_names(lvm.vgNameValidate, vg.getName()) self._test_bad_names(vg.lvNameValidate, lv.getName()) # Test good values TestLvm._test_valid_names(lvm.vgNameValidate) TestLvm._test_valid_names(vg.lvNameValidate) self._lv_reserved_names(vg.lvNameValidate) vg.close() if __name__ == "__main__": unittest.main() LVM2.2.02.176/test/api/dbustest.sh0000644000000000000120000000215213176752421015245 0ustar rootwheel#!/bin/sh # Copyright (C) 2016 Red Hat, Inc. All rights reserved. # # This file is part of LVM2. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SKIP_WITH_LVMLOCKD=1 SKIP_WITH_CLVMD=1 . lib/inittest aux prepare_pvs 6 # We need the lvmdbusd.profile for the daemon to utilize JSON # output mkdir -p "$TESTDIR/etc/profile" cp -v "$TESTOLDPWD/lib/lvmdbusd.profile" "$TESTDIR/etc/profile/" # Need to set this up so that the lvmdbusd service knows which # binary to be running, which should be the one we just built LVM_BINARY=$(which lvm 2>/dev/null) export LVM_BINARY # skip if we don't have our own lvmetad... if test -z "${installed_testsuite+varset}"; then (echo "$LVM_BINARY" | grep -q "$abs_builddir") || skip fi aux prepare_lvmdbusd "$TESTOLDPWD/dbus/lvmdbustest.py" -v LVM2.2.02.176/test/api/vgtest.sh0000644000000000000120000000114313176752421014723 0ustar rootwheel#!/bin/sh # Copyright (C) 2008 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # tests lvm2app library # SKIP_WITH_LVMLOCKD=1 SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_pvs 2 aux apitest vgtest $vg1 "$dev1" "$dev2" LVM2.2.02.176/doc/0000755000000000000120000000000013176752421012071 5ustar rootwheelLVM2.2.02.176/doc/pvmove_outline.txt0000644000000000000120000000333413176752421015710 0ustar rootwheelLet's say we have an LV, made up of three segments of different PV's, I've also added in the device major:minor as this will be useful later: +-----------------------------+ | PV1 | PV2 | PV3 | 254:3 +----------+---------+--------+ Now our hero decides to PV move PV2 to PV4: 1. Suspend our LV (254:3), this starts queueing all io, and flushes all pending io. Once the suspend has completed we are free to change the mapping table. 2. Set up *another* (254:4) device with the mapping table of our LV. 3. Load a new mapping table into (254:3) that has identity targets for parts that aren't moving, and a mirror target for parts that are. 4. Unsuspend (254:3) So now we have: destination of copy +--------------------->--------------+ | | +-----------------------------+ + -----------+ | Identity | mirror | Ident. | 254:3 | PV4 | +----------+---------+--------+ +------------+ | | | \/ \/ \/ +-----------------------------+ | PV1 | PV2 | PV3 | 254:4 +----------+---------+--------+ Any writes to segment2 of the LV get intercepted by the mirror target who checks that that chunk has been copied to the new destination, if it hasn't it queues the initial copy and defers the current io until it has finished. Then the current io is written to *both* PV2 and the PV4. 5. When the copying has completed 254:3 is suspended/pending flushed. 6. 254:4 is taken down 7. metadata is updated on disk 8. 254:3 has new mapping table loaded: +-----------------------------+ | PV1 | PV4 | PV3 | 254:3 +----------+---------+--------+ LVM2.2.02.176/doc/kernel/0000755000000000000120000000000013176752421013351 5ustar rootwheelLVM2.2.02.176/doc/kernel/flakey.txt0000644000000000000120000000355313176752421015373 0ustar rootwheeldm-flakey ========= This target is the same as the linear target except that it exhibits unreliable behaviour periodically. It's been found useful in simulating failing devices for testing purposes. Starting from the time the table is loaded, the device is available for seconds, then exhibits unreliable behaviour for seconds, and then this cycle repeats. Also, consider using this in combination with the dm-delay target too, which can delay reads and writes and/or send them to different underlying devices. Table parameters ---------------- \ [ []] Mandatory parameters: : Full pathname to the underlying block-device, or a "major:minor" device-number. : Starting sector within the device. : Number of seconds device is available. : Number of seconds device returns errors. Optional feature parameters: If no feature parameters are present, during the periods of unreliability, all I/O returns errors. drop_writes: All write I/O is silently ignored. Read I/O is handled correctly. corrupt_bio_byte : During , replace of the data of each matching bio with . : The offset of the byte to replace. Counting starts at 1, to replace the first byte. : Either 'r' to corrupt reads or 'w' to corrupt writes. 'w' is incompatible with drop_writes. : The value (from 0-255) to write. : Perform the replacement only if bio->bi_opf has all the selected flags set. Examples: corrupt_bio_byte 32 r 1 0 - replaces the 32nd byte of READ bios with the value 1 corrupt_bio_byte 224 w 0 32 - replaces the 224th byte of REQ_META (=32) bios with the value 0 LVM2.2.02.176/doc/kernel/verity.txt0000644000000000000120000001755013176752421015444 0ustar rootwheeldm-verity ========== Device-Mapper's "verity" target provides transparent integrity checking of block devices using a cryptographic digest provided by the kernel crypto API. This target is read-only. Construction Parameters ======================= [<#opt_params> ] This is the type of the on-disk hash format. 0 is the original format used in the Chromium OS. The salt is appended when hashing, digests are stored continuously and the rest of the block is padded with zeroes. 1 is the current format that should be used for new devices. The salt is prepended when hashing and each digest is padded with zeroes to the power of two. This is the device containing data, the integrity of which needs to be checked. It may be specified as a path, like /dev/sdaX, or a device number, :. This is the device that supplies the hash tree data. It may be specified similarly to the device path and may be the same device. If the same device is used, the hash_start should be outside the configured dm-verity device. The block size on a data device in bytes. Each block corresponds to one digest on the hash device. The size of a hash block in bytes. The number of data blocks on the data device. Additional blocks are inaccessible. You can place hashes to the same partition as data, in this case hashes are placed after . This is the offset, in -blocks, from the start of hash_dev to the root block of the hash tree. The cryptographic hash algorithm used for this device. This should be the name of the algorithm, like "sha1". The hexadecimal encoding of the cryptographic hash of the root hash block and the salt. This hash should be trusted as there is no other authenticity beyond this point. The hexadecimal encoding of the salt value. <#opt_params> Number of optional parameters. If there are no optional parameters, the optional paramaters section can be skipped or #opt_params can be zero. Otherwise #opt_params is the number of following arguments. Example of optional parameters section: 1 ignore_corruption ignore_corruption Log corrupted blocks, but allow read operations to proceed normally. restart_on_corruption Restart the system when a corrupted block is discovered. This option is not compatible with ignore_corruption and requires user space support to avoid restart loops. ignore_zero_blocks Do not verify blocks that are expected to contain zeroes and always return zeroes instead. This may be useful if the partition contains unused blocks that are not guaranteed to contain zeroes. use_fec_from_device Use forward error correction (FEC) to recover from corruption if hash verification fails. Use encoding data from the specified device. This may be the same device where data and hash blocks reside, in which case fec_start must be outside data and hash areas. If the encoding data covers additional metadata, it must be accessible on the hash device after the hash blocks. Note: block sizes for data and hash devices must match. Also, if the verity is encrypted the should be too. fec_roots Number of generator roots. This equals to the number of parity bytes in the encoding data. For example, in RS(M, N) encoding, the number of roots is M-N. fec_blocks The number of encoding data blocks on the FEC device. The block size for the FEC device is . fec_start This is the offset, in blocks, from the start of the FEC device to the beginning of the encoding data. Theory of operation =================== dm-verity is meant to be set up as part of a verified boot path. This may be anything ranging from a boot using tboot or trustedgrub to just booting from a known-good device (like a USB drive or CD). When a dm-verity device is configured, it is expected that the caller has been authenticated in some way (cryptographic signatures, etc). After instantiation, all hashes will be verified on-demand during disk access. If they cannot be verified up to the root node of the tree, the root hash, then the I/O will fail. This should detect tampering with any data on the device and the hash data. Cryptographic hashes are used to assert the integrity of the device on a per-block basis. This allows for a lightweight hash computation on first read into the page cache. Block hashes are stored linearly, aligned to the nearest block size. If forward error correction (FEC) support is enabled any recovery of corrupted data will be verified using the cryptographic hash of the corresponding data. This is why combining error correction with integrity checking is essential. Hash Tree --------- Each node in the tree is a cryptographic hash. If it is a leaf node, the hash of some data block on disk is calculated. If it is an intermediary node, the hash of a number of child nodes is calculated. Each entry in the tree is a collection of neighboring nodes that fit in one block. The number is determined based on block_size and the size of the selected cryptographic digest algorithm. The hashes are linearly-ordered in this entry and any unaligned trailing space is ignored but included when calculating the parent node. The tree looks something like: alg = sha256, num_blocks = 32768, block_size = 4096 [ root ] / . . . \ [entry_0] [entry_1] / . . . \ . . . \ [entry_0_0] . . . [entry_0_127] . . . . [entry_1_127] / ... \ / . . . \ / \ blk_0 ... blk_127 blk_16256 blk_16383 blk_32640 . . . blk_32767 On-disk format ============== The verity kernel code does not read the verity metadata on-disk header. It only reads the hash blocks which directly follow the header. It is expected that a user-space tool will verify the integrity of the verity header. Alternatively, the header can be omitted and the dmsetup parameters can be passed via the kernel command-line in a rooted chain of trust where the command-line is verified. Directly following the header (and with sector number padded to the next hash block boundary) are the hash blocks which are stored a depth at a time (starting from the root), sorted in order of increasing index. The full specification of kernel parameters and on-disk metadata format is available at the cryptsetup project's wiki page https://gitlab.com/cryptsetup/cryptsetup/wikis/DMVerity Status ====== V (for Valid) is returned if every check performed so far was valid. If any check failed, C (for Corruption) is returned. Example ======= Set up a device: # dmsetup create vroot --readonly --table \ "0 2097152 verity 1 /dev/sda1 /dev/sda2 4096 4096 262144 1 sha256 "\ "4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076 "\ "1234000000000000000000000000000000000000000000000000000000000000" A command line tool veritysetup is available to compute or verify the hash tree or activate the kernel device. This is available from the cryptsetup upstream repository https://gitlab.com/cryptsetup/cryptsetup/ (as a libcryptsetup extension). Create hash on the device: # veritysetup format /dev/sda1 /dev/sda2 ... Root hash: 4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076 Activate the device: # veritysetup create vroot /dev/sda1 /dev/sda2 \ 4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076 LVM2.2.02.176/doc/kernel/snapshot.txt0000644000000000000120000001525413176752421015760 0ustar rootwheelDevice-mapper snapshot support ============================== Device-mapper allows you, without massive data copying: *) To create snapshots of any block device i.e. mountable, saved states of the block device which are also writable without interfering with the original content; *) To create device "forks", i.e. multiple different versions of the same data stream. *) To merge a snapshot of a block device back into the snapshot's origin device. In the first two cases, dm copies only the chunks of data that get changed and uses a separate copy-on-write (COW) block device for storage. For snapshot merge the contents of the COW storage are merged back into the origin device. There are three dm targets available: snapshot, snapshot-origin, and snapshot-merge. *) snapshot-origin which will normally have one or more snapshots based on it. Reads will be mapped directly to the backing device. For each write, the original data will be saved in the of each snapshot to keep its visible content unchanged, at least until the fills up. *) snapshot A snapshot of the block device is created. Changed chunks of sectors will be stored on the . Writes will only go to the . Reads will come from the or from for unchanged data. will often be smaller than the origin and if it fills up the snapshot will become useless and be disabled, returning errors. So it is important to monitor the amount of free space and expand the before it fills up. is P (Persistent) or N (Not persistent - will not survive after reboot). O (Overflow) can be added as a persistent store option to allow userspace to advertise its support for seeing "Overflow" in the snapshot status. So supported store types are "P", "PO" and "N". The difference between persistent and transient is with transient snapshots less metadata must be saved on disk - they can be kept in memory by the kernel. * snapshot-merge takes the same table arguments as the snapshot target except it only works with persistent snapshots. This target assumes the role of the "snapshot-origin" target and must not be loaded if the "snapshot-origin" is still present for . Creates a merging snapshot that takes control of the changed chunks stored in the of an existing snapshot, through a handover procedure, and merges these chunks back into the . Once merging has started (in the background) the may be opened and the merge will continue while I/O is flowing to it. Changes to the are deferred until the merging snapshot's corresponding chunk(s) have been merged. Once merging has started the snapshot device, associated with the "snapshot" target, will return -EIO when accessed. How snapshot is used by LVM2 ============================ When you create the first LVM2 snapshot of a volume, four dm devices are used: 1) a device containing the original mapping table of the source volume; 2) a device used as the ; 3) a "snapshot" device, combining #1 and #2, which is the visible snapshot volume; 4) the "original" volume (which uses the device number used by the original source volume), whose table is replaced by a "snapshot-origin" mapping from device #1. A fixed naming scheme is used, so with the following commands: lvcreate -L 1G -n base volumeGroup lvcreate -L 100M --snapshot -n snap volumeGroup/base we'll have this situation (with volumes in above order): # dmsetup table|grep volumeGroup volumeGroup-base-real: 0 2097152 linear 8:19 384 volumeGroup-snap-cow: 0 204800 linear 8:19 2097536 volumeGroup-snap: 0 2097152 snapshot 254:11 254:12 P 16 volumeGroup-base: 0 2097152 snapshot-origin 254:11 # ls -lL /dev/mapper/volumeGroup-* brw------- 1 root root 254, 11 29 ago 18:15 /dev/mapper/volumeGroup-base-real brw------- 1 root root 254, 12 29 ago 18:15 /dev/mapper/volumeGroup-snap-cow brw------- 1 root root 254, 13 29 ago 18:15 /dev/mapper/volumeGroup-snap brw------- 1 root root 254, 10 29 ago 18:14 /dev/mapper/volumeGroup-base How snapshot-merge is used by LVM2 ================================== A merging snapshot assumes the role of the "snapshot-origin" while merging. As such the "snapshot-origin" is replaced with "snapshot-merge". The "-real" device is not changed and the "-cow" device is renamed to -cow to aid LVM2's cleanup of the merging snapshot after it completes. The "snapshot" that hands over its COW device to the "snapshot-merge" is deactivated (unless using lvchange --refresh); but if it is left active it will simply return I/O errors. A snapshot will merge into its origin with the following command: lvconvert --merge volumeGroup/snap we'll now have this situation: # dmsetup table|grep volumeGroup volumeGroup-base-real: 0 2097152 linear 8:19 384 volumeGroup-base-cow: 0 204800 linear 8:19 2097536 volumeGroup-base: 0 2097152 snapshot-merge 254:11 254:12 P 16 # ls -lL /dev/mapper/volumeGroup-* brw------- 1 root root 254, 11 29 ago 18:15 /dev/mapper/volumeGroup-base-real brw------- 1 root root 254, 12 29 ago 18:16 /dev/mapper/volumeGroup-base-cow brw------- 1 root root 254, 10 29 ago 18:16 /dev/mapper/volumeGroup-base How to determine when a merging is complete =========================================== The snapshot-merge and snapshot status lines end with: / Both and include both data and metadata. During merging, the number of sectors allocated gets smaller and smaller. Merging has finished when the number of sectors holding data is zero, in other words == . Here is a practical example (using a hybrid of lvm and dmsetup commands): # lvs LV VG Attr LSize Origin Snap% Move Log Copy% Convert base volumeGroup owi-a- 4.00g snap volumeGroup swi-a- 1.00g base 18.97 # dmsetup status volumeGroup-snap 0 8388608 snapshot 397896/2097152 1560 ^^^^ metadata sectors # lvconvert --merge -b volumeGroup/snap Merging of volume snap started. # lvs volumeGroup/snap LV VG Attr LSize Origin Snap% Move Log Copy% Convert base volumeGroup Owi-a- 4.00g 17.23 # dmsetup status volumeGroup-base 0 8388608 snapshot-merge 281688/2097152 1104 # dmsetup status volumeGroup-base 0 8388608 snapshot-merge 180480/2097152 712 # dmsetup status volumeGroup-base 0 8388608 snapshot-merge 16/2097152 16 Merging has finished. # lvs LV VG Attr LSize Origin Snap% Move Log Copy% Convert base volumeGroup owi-a- 4.00g LVM2.2.02.176/doc/kernel/linear.txt0000644000000000000120000000263113176752421015366 0ustar rootwheeldm-linear ========= Device-Mapper's "linear" target maps a linear range of the Device-Mapper device onto a linear range of another device. This is the basic building block of logical volume managers. Parameters: : Full pathname to the underlying block-device, or a "major:minor" device-number. : Starting sector within the device. Example scripts =============== [[ #!/bin/sh # Create an identity mapping for a device echo "0 `blockdev --getsz $1` linear $1 0" | dmsetup create identity ]] [[ #!/bin/sh # Join 2 devices together size1=`blockdev --getsz $1` size2=`blockdev --getsz $2` echo "0 $size1 linear $1 0 $size1 $size2 linear $2 0" | dmsetup create joined ]] [[ #!/usr/bin/perl -w # Split a device into 4M chunks and then join them together in reverse order. my $name = "reverse"; my $extent_size = 4 * 1024 * 2; my $dev = $ARGV[0]; my $table = ""; my $count = 0; if (!defined($dev)) { die("Please specify a device.\n"); } my $dev_size = `blockdev --getsz $dev`; my $extents = int($dev_size / $extent_size) - (($dev_size % $extent_size) ? 1 : 0); while ($extents > 0) { my $this_start = $count * $extent_size; $extents--; $count++; my $this_offset = $extents * $extent_size; $table .= "$this_start $extent_size linear $dev $this_offset\n"; } `echo \"$table\" | dmsetup create $name`; ]] LVM2.2.02.176/doc/kernel/zoned.txt0000644000000000000120000001523013176752421015232 0ustar rootwheeldm-zoned ======== The dm-zoned device mapper target exposes a zoned block device (ZBC and ZAC compliant devices) as a regular block device without any write pattern constraints. In effect, it implements a drive-managed zoned block device which hides from the user (a file system or an application doing raw block device accesses) the sequential write constraints of host-managed zoned block devices and can mitigate the potential device-side performance degradation due to excessive random writes on host-aware zoned block devices. For a more detailed description of the zoned block device models and their constraints see (for SCSI devices): http://www.t10.org/drafts.htm#ZBC_Family and (for ATA devices): http://www.t13.org/Documents/UploadedDocuments/docs2015/di537r05-Zoned_Device_ATA_Command_Set_ZAC.pdf The dm-zoned implementation is simple and minimizes system overhead (CPU and memory usage as well as storage capacity loss). For a 10TB host-managed disk with 256 MB zones, dm-zoned memory usage per disk instance is at most 4.5 MB and as little as 5 zones will be used internally for storing metadata and performaing reclaim operations. dm-zoned target devices are formatted and checked using the dmzadm utility available at: https://github.com/hgst/dm-zoned-tools Algorithm ========= dm-zoned implements an on-disk buffering scheme to handle non-sequential write accesses to the sequential zones of a zoned block device. Conventional zones are used for caching as well as for storing internal metadata. The zones of the device are separated into 2 types: 1) Metadata zones: these are conventional zones used to store metadata. Metadata zones are not reported as useable capacity to the user. 2) Data zones: all remaining zones, the vast majority of which will be sequential zones used exclusively to store user data. The conventional zones of the device may be used also for buffering user random writes. Data in these zones may be directly mapped to the conventional zone, but later moved to a sequential zone so that the conventional zone can be reused for buffering incoming random writes. dm-zoned exposes a logical device with a sector size of 4096 bytes, irrespective of the physical sector size of the backend zoned block device being used. This allows reducing the amount of metadata needed to manage valid blocks (blocks written). The on-disk metadata format is as follows: 1) The first block of the first conventional zone found contains the super block which describes the on disk amount and position of metadata blocks. 2) Following the super block, a set of blocks is used to describe the mapping of the logical device blocks. The mapping is done per chunk of blocks, with the chunk size equal to the zoned block device size. The mapping table is indexed by chunk number and each mapping entry indicates the zone number of the device storing the chunk of data. Each mapping entry may also indicate if the zone number of a conventional zone used to buffer random modification to the data zone. 3) A set of blocks used to store bitmaps indicating the validity of blocks in the data zones follows the mapping table. A valid block is defined as a block that was written and not discarded. For a buffered data chunk, a block is always valid only in the data zone mapping the chunk or in the buffer zone of the chunk. For a logical chunk mapped to a conventional zone, all write operations are processed by directly writing to the zone. If the mapping zone is a sequential zone, the write operation is processed directly only if the write offset within the logical chunk is equal to the write pointer offset within of the sequential data zone (i.e. the write operation is aligned on the zone write pointer). Otherwise, write operations are processed indirectly using a buffer zone. In that case, an unused conventional zone is allocated and assigned to the chunk being accessed. Writing a block to the buffer zone of a chunk will automatically invalidate the same block in the sequential zone mapping the chunk. If all blocks of the sequential zone become invalid, the zone is freed and the chunk buffer zone becomes the primary zone mapping the chunk, resulting in native random write performance similar to a regular block device. Read operations are processed according to the block validity information provided by the bitmaps. Valid blocks are read either from the sequential zone mapping a chunk, or if the chunk is buffered, from the buffer zone assigned. If the accessed chunk has no mapping, or the accessed blocks are invalid, the read buffer is zeroed and the read operation terminated. After some time, the limited number of convnetional zones available may be exhausted (all used to map chunks or buffer sequential zones) and unaligned writes to unbuffered chunks become impossible. To avoid this situation, a reclaim process regularly scans used conventional zones and tries to reclaim the least recently used zones by copying the valid blocks of the buffer zone to a free sequential zone. Once the copy completes, the chunk mapping is updated to point to the sequential zone and the buffer zone freed for reuse. Metadata Protection =================== To protect metadata against corruption in case of sudden power loss or system crash, 2 sets of metadata zones are used. One set, the primary set, is used as the main metadata region, while the secondary set is used as a staging area. Modified metadata is first written to the secondary set and validated by updating the super block in the secondary set, a generation counter is used to indicate that this set contains the newest metadata. Once this operation completes, in place of metadata block updates can be done in the primary metadata set. This ensures that one of the set is always consistent (all modifications committed or none at all). Flush operations are used as a commit point. Upon reception of a flush request, metadata modification activity is temporarily blocked (for both incoming BIO processing and reclaim process) and all dirty metadata blocks are staged and updated. Normal operation is then resumed. Flushing metadata thus only temporarily delays write and discard requests. Read requests can be processed concurrently while metadata flush is being executed. Usage ===== A zoned block device must first be formatted using the dmzadm tool. This will analyze the device zone configuration, determine where to place the metadata sets on the device and initialize the metadata sets. Ex: dmzadm --format /dev/sdxx For a formatted device, the target can be created normally with the dmsetup utility. The only parameter that dm-zoned requires is the underlying zoned block device name. Ex: echo "0 `blockdev --getsize ${dev}` zoned ${dev}" | dmsetup create dmz-`basename ${dev}` LVM2.2.02.176/doc/kernel/statistics.txt0000644000000000000120000001705113176752421016310 0ustar rootwheelDM statistics ============= Device Mapper supports the collection of I/O statistics on user-defined regions of a DM device. If no regions are defined no statistics are collected so there isn't any performance impact. Only bio-based DM devices are currently supported. Each user-defined region specifies a starting sector, length and step. Individual statistics will be collected for each step-sized area within the range specified. The I/O statistics counters for each step-sized area of a region are in the same format as /sys/block/*/stat or /proc/diskstats (see: Documentation/iostats.txt). But two extra counters (12 and 13) are provided: total time spent reading and writing. When the histogram argument is used, the 14th parameter is reported that represents the histogram of latencies. All these counters may be accessed by sending the @stats_print message to the appropriate DM device via dmsetup. The reported times are in milliseconds and the granularity depends on the kernel ticks. When the option precise_timestamps is used, the reported times are in nanoseconds. Each region has a corresponding unique identifier, which we call a region_id, that is assigned when the region is created. The region_id must be supplied when querying statistics about the region, deleting the region, etc. Unique region_ids enable multiple userspace programs to request and process statistics for the same DM device without stepping on each other's data. The creation of DM statistics will allocate memory via kmalloc or fallback to using vmalloc space. At most, 1/4 of the overall system memory may be allocated by DM statistics. The admin can see how much memory is used by reading /sys/module/dm_mod/parameters/stats_current_allocated_bytes Messages ======== @stats_create [ ...] [ []] Create a new region and return the region_id. "-" - whole device "+" - a range of 512-byte sectors starting with . "" - the range is subdivided into areas each containing sectors. "/" - the range is subdivided into the specified number of areas. The number of optional arguments The following optional arguments are supported precise_timestamps - use precise timer with nanosecond resolution instead of the "jiffies" variable. When this argument is used, the resulting times are in nanoseconds instead of milliseconds. Precise timestamps are a little bit slower to obtain than jiffies-based timestamps. histogram:n1,n2,n3,n4,... - collect histogram of latencies. The numbers n1, n2, etc are times that represent the boundaries of the histogram. If precise_timestamps is not used, the times are in milliseconds, otherwise they are in nanoseconds. For each range, the kernel will report the number of requests that completed within this range. For example, if we use "histogram:10,20,30", the kernel will report four numbers a:b:c:d. a is the number of requests that took 0-10 ms to complete, b is the number of requests that took 10-20 ms to complete, c is the number of requests that took 20-30 ms to complete and d is the number of requests that took more than 30 ms to complete. An optional parameter. A name that uniquely identifies the userspace owner of the range. This groups ranges together so that userspace programs can identify the ranges they created and ignore those created by others. The kernel returns this string back in the output of @stats_list message, but it doesn't use it for anything else. If we omit the number of optional arguments, program id must not be a number, otherwise it would be interpreted as the number of optional arguments. An optional parameter. A word that provides auxiliary data that is useful to the client program that created the range. The kernel returns this string back in the output of @stats_list message, but it doesn't use this value for anything. @stats_delete Delete the region with the specified id. region_id returned from @stats_create @stats_clear Clear all the counters except the in-flight i/o counters. region_id returned from @stats_create @stats_list [] List all regions registered with @stats_create. An optional parameter. If this parameter is specified, only matching regions are returned. If it is not specified, all regions are returned. Output format: : + precise_timestamps histogram:n1,n2,n3,... The strings "precise_timestamps" and "histogram" are printed only if they were specified when creating the region. @stats_print [ ] Print counters for each step-sized area of a region. region_id returned from @stats_create The index of the starting line in the output. If omitted, all lines are returned. The number of lines to include in the output. If omitted, all lines are returned. Output format for each step-sized area of a region: + counters The first 11 counters have the same meaning as /sys/block/*/stat or /proc/diskstats. Please refer to Documentation/iostats.txt for details. 1. the number of reads completed 2. the number of reads merged 3. the number of sectors read 4. the number of milliseconds spent reading 5. the number of writes completed 6. the number of writes merged 7. the number of sectors written 8. the number of milliseconds spent writing 9. the number of I/Os currently in progress 10. the number of milliseconds spent doing I/Os 11. the weighted number of milliseconds spent doing I/Os Additional counters: 12. the total time spent reading in milliseconds 13. the total time spent writing in milliseconds @stats_print_clear [ ] Atomically print and then clear all the counters except the in-flight i/o counters. Useful when the client consuming the statistics does not want to lose any statistics (those updated between printing and clearing). region_id returned from @stats_create The index of the starting line in the output. If omitted, all lines are printed and then cleared. The number of lines to process. If omitted, all lines are printed and then cleared. @stats_set_aux Store auxiliary data aux_data for the specified region. region_id returned from @stats_create The string that identifies data which is useful to the client program that created the range. The kernel returns this string back in the output of @stats_list message, but it doesn't use this value for anything. Examples ======== Subdivide the DM device 'vol' into 100 pieces and start collecting statistics on them: dmsetup message vol 0 @stats_create - /100 Set the auxiliary data string to "foo bar baz" (the escape for each space must also be escaped, otherwise the shell will consume them): dmsetup message vol 0 @stats_set_aux 0 foo\\ bar\\ baz List the statistics: dmsetup message vol 0 @stats_list Print the statistics: dmsetup message vol 0 @stats_print 0 Delete the statistics: dmsetup message vol 0 @stats_delete 0 LVM2.2.02.176/doc/kernel/log-writes.txt0000644000000000000120000001137113176752421016211 0ustar rootwheeldm-log-writes ============= This target takes 2 devices, one to pass all IO to normally, and one to log all of the write operations to. This is intended for file system developers wishing to verify the integrity of metadata or data as the file system is written to. There is a log_write_entry written for every WRITE request and the target is able to take arbitrary data from userspace to insert into the log. The data that is in the WRITE requests is copied into the log to make the replay happen exactly as it happened originally. Log Ordering ============ We log things in order of completion once we are sure the write is no longer in cache. This means that normal WRITE requests are not actually logged until the next REQ_PREFLUSH request. This is to make it easier for userspace to replay the log in a way that correlates to what is on disk and not what is in cache, to make it easier to detect improper waiting/flushing. This works by attaching all WRITE requests to a list once the write completes. Once we see a REQ_PREFLUSH request we splice this list onto the request and once the FLUSH request completes we log all of the WRITEs and then the FLUSH. Only completed WRITEs, at the time the REQ_PREFLUSH is issued, are added in order to simulate the worst case scenario with regard to power failures. Consider the following example (W means write, C means complete): W1,W2,W3,C3,C2,Wflush,C1,Cflush The log would show the following W3,W2,flush,W1.... Again this is to simulate what is actually on disk, this allows us to detect cases where a power failure at a particular point in time would create an inconsistent file system. Any REQ_FUA requests bypass this flushing mechanism and are logged as soon as they complete as those requests will obviously bypass the device cache. Any REQ_DISCARD requests are treated like WRITE requests. Otherwise we would have all the DISCARD requests, and then the WRITE requests and then the FLUSH request. Consider the following example: WRITE block 1, DISCARD block 1, FLUSH If we logged DISCARD when it completed, the replay would look like this DISCARD 1, WRITE 1, FLUSH which isn't quite what happened and wouldn't be caught during the log replay. Target interface ================ i) Constructor log-writes dev_path : Device that all of the IO will go to normally. log_dev_path : Device where the log entries are written to. ii) Status <#logged entries> #logged entries : Number of logged entries highest allocated sector : Highest allocated sector iii) Messages mark You can use a dmsetup message to set an arbitrary mark in a log. For example say you want to fsck a file system after every write, but first you need to replay up to the mkfs to make sure we're fsck'ing something reasonable, you would do something like this: mkfs.btrfs -f /dev/mapper/log dmsetup message log 0 mark mkfs This would allow you to replay the log up to the mkfs mark and then replay from that point on doing the fsck check in the interval that you want. Every log has a mark at the end labeled "dm-log-writes-end". Userspace component =================== There is a userspace tool that will replay the log for you in various ways. It can be found here: https://github.com/josefbacik/log-writes Example usage ============= Say you want to test fsync on your file system. You would do something like this: TABLE="0 $(blockdev --getsz /dev/sdb) log-writes /dev/sdb /dev/sdc" dmsetup create log --table "$TABLE" mkfs.btrfs -f /dev/mapper/log dmsetup message log 0 mark mkfs mount /dev/mapper/log /mnt/btrfs-test dmsetup message log 0 mark fsync md5sum /mnt/btrfs-test/foo umount /mnt/btrfs-test dmsetup remove log replay-log --log /dev/sdc --replay /dev/sdb --end-mark fsync mount /dev/sdb /mnt/btrfs-test md5sum /mnt/btrfs-test/foo Another option is to do a complicated file system operation and verify the file system is consistent during the entire operation. You could do this with: TABLE="0 $(blockdev --getsz /dev/sdb) log-writes /dev/sdb /dev/sdc" dmsetup create log --table "$TABLE" mkfs.btrfs -f /dev/mapper/log dmsetup message log 0 mark mkfs mount /dev/mapper/log /mnt/btrfs-test btrfs filesystem balance /mnt/btrfs-test umount /mnt/btrfs-test dmsetup remove log replay-log --log /dev/sdc --replay /dev/sdb --end-mark mkfs btrfsck /dev/sdb replay-log --log /dev/sdc --replay /dev/sdb --start-mark mkfs \ --fsck "btrfsck /dev/sdb" --check fua And that will replay the log until it sees a FUA request, run the fsck command and if the fsck passes it will replay to the next FUA, until it is completed or the fsck command exists abnormally. LVM2.2.02.176/doc/kernel/delay.txt0000644000000000000120000000132413176752421015210 0ustar rootwheeldm-delay ======== Device-Mapper's "delay" target delays reads and/or writes and maps them to different devices. Parameters: [ ] With separate write parameters, the first set is only used for reads. Offsets are specified in sectors. Delays are specified in milliseconds. Example scripts =============== [[ #!/bin/sh # Create device delaying rw operation for 500ms echo "0 `blockdev --getsz $1` delay $1 0 500" | dmsetup create delayed ]] [[ #!/bin/sh # Create device delaying only write operation for 500ms and # splitting reads and writes to different devices $1 $2 echo "0 `blockdev --getsz $1` delay $1 0 0 $2 0 500" | dmsetup create delayed ]] LVM2.2.02.176/doc/kernel/log.txt0000644000000000000120000000453513176752421014702 0ustar rootwheelDevice-Mapper Logging ===================== The device-mapper logging code is used by some of the device-mapper RAID targets to track regions of the disk that are not consistent. A region (or portion of the address space) of the disk may be inconsistent because a RAID stripe is currently being operated on or a machine died while the region was being altered. In the case of mirrors, a region would be considered dirty/inconsistent while you are writing to it because the writes need to be replicated for all the legs of the mirror and may not reach the legs at the same time. Once all writes are complete, the region is considered clean again. There is a generic logging interface that the device-mapper RAID implementations use to perform logging operations (see dm_dirty_log_type in include/linux/dm-dirty-log.h). Various different logging implementations are available and provide different capabilities. The list includes: Type Files ==== ===== disk drivers/md/dm-log.c core drivers/md/dm-log.c userspace drivers/md/dm-log-userspace* include/linux/dm-log-userspace.h The "disk" log type ------------------- This log implementation commits the log state to disk. This way, the logging state survives reboots/crashes. The "core" log type ------------------- This log implementation keeps the log state in memory. The log state will not survive a reboot or crash, but there may be a small boost in performance. This method can also be used if no storage device is available for storing log state. The "userspace" log type ------------------------ This log type simply provides a way to export the log API to userspace, so log implementations can be done there. This is done by forwarding most logging requests to userspace, where a daemon receives and processes the request. The structure used for communication between kernel and userspace are located in include/linux/dm-log-userspace.h. Due to the frequency, diversity, and 2-way communication nature of the exchanges between kernel and userspace, 'connector' is used as the interface for communication. There are currently two userspace log implementations that leverage this framework - "clustered-disk" and "clustered-core". These implementations provide a cluster-coherent log for shared-storage. Device-mapper mirroring can be used in a shared-storage environment when the cluster log implementations are employed. LVM2.2.02.176/doc/kernel/cache.txt0000644000000000000120000003117013176752421015157 0ustar rootwheelIntroduction ============ dm-cache is a device mapper target written by Joe Thornber, Heinz Mauelshagen, and Mike Snitzer. It aims to improve performance of a block device (eg, a spindle) by dynamically migrating some of its data to a faster, smaller device (eg, an SSD). This device-mapper solution allows us to insert this caching at different levels of the dm stack, for instance above the data device for a thin-provisioning pool. Caching solutions that are integrated more closely with the virtual memory system should give better performance. The target reuses the metadata library used in the thin-provisioning library. The decision as to what data to migrate and when is left to a plug-in policy module. Several of these have been written as we experiment, and we hope other people will contribute others for specific io scenarios (eg. a vm image server). Glossary ======== Migration - Movement of the primary copy of a logical block from one device to the other. Promotion - Migration from slow device to fast device. Demotion - Migration from fast device to slow device. The origin device always contains a copy of the logical block, which may be out of date or kept in sync with the copy on the cache device (depending on policy). Design ====== Sub-devices ----------- The target is constructed by passing three devices to it (along with other parameters detailed later): 1. An origin device - the big, slow one. 2. A cache device - the small, fast one. 3. A small metadata device - records which blocks are in the cache, which are dirty, and extra hints for use by the policy object. This information could be put on the cache device, but having it separate allows the volume manager to configure it differently, e.g. as a mirror for extra robustness. This metadata device may only be used by a single cache device. Fixed block size ---------------- The origin is divided up into blocks of a fixed size. This block size is configurable when you first create the cache. Typically we've been using block sizes of 256KB - 1024KB. The block size must be between 64 (32KB) and 2097152 (1GB) and a multiple of 64 (32KB). Having a fixed block size simplifies the target a lot. But it is something of a compromise. For instance, a small part of a block may be getting hit a lot, yet the whole block will be promoted to the cache. So large block sizes are bad because they waste cache space. And small block sizes are bad because they increase the amount of metadata (both in core and on disk). Cache operating modes --------------------- The cache has three operating modes: writeback, writethrough and passthrough. If writeback, the default, is selected then a write to a block that is cached will go only to the cache and the block will be marked dirty in the metadata. If writethrough is selected then a write to a cached block will not complete until it has hit both the origin and cache devices. Clean blocks should remain clean. If passthrough is selected, useful when the cache contents are not known to be coherent with the origin device, then all reads are served from the origin device (all reads miss the cache) and all writes are forwarded to the origin device; additionally, write hits cause cache block invalidates. To enable passthrough mode the cache must be clean. Passthrough mode allows a cache device to be activated without having to worry about coherency. Coherency that exists is maintained, although the cache will gradually cool as writes take place. If the coherency of the cache can later be verified, or established through use of the "invalidate_cblocks" message, the cache device can be transitioned to writethrough or writeback mode while still warm. Otherwise, the cache contents can be discarded prior to transitioning to the desired operating mode. A simple cleaner policy is provided, which will clean (write back) all dirty blocks in a cache. Useful for decommissioning a cache or when shrinking a cache. Shrinking the cache's fast device requires all cache blocks, in the area of the cache being removed, to be clean. If the area being removed from the cache still contains dirty blocks the resize will fail. Care must be taken to never reduce the volume used for the cache's fast device until the cache is clean. This is of particular importance if writeback mode is used. Writethrough and passthrough modes already maintain a clean cache. Future support to partially clean the cache, above a specified threshold, will allow for keeping the cache warm and in writeback mode during resize. Migration throttling -------------------- Migrating data between the origin and cache device uses bandwidth. The user can set a throttle to prevent more than a certain amount of migration occurring at any one time. Currently we're not taking any account of normal io traffic going to the devices. More work needs doing here to avoid migrating during those peak io moments. For the time being, a message "migration_threshold <#sectors>" can be used to set the maximum number of sectors being migrated, the default being 204800 sectors (or 100MB). Updating on-disk metadata ------------------------- On-disk metadata is committed every time a FLUSH or FUA bio is written. If no such requests are made then commits will occur every second. This means the cache behaves like a physical disk that has a volatile write cache. If power is lost you may lose some recent writes. The metadata should always be consistent in spite of any crash. The 'dirty' state for a cache block changes far too frequently for us to keep updating it on the fly. So we treat it as a hint. In normal operation it will be written when the dm device is suspended. If the system crashes all cache blocks will be assumed dirty when restarted. Per-block policy hints ---------------------- Policy plug-ins can store a chunk of data per cache block. It's up to the policy how big this chunk is, but it should be kept small. Like the dirty flags this data is lost if there's a crash so a safe fallback value should always be possible. For instance, the 'mq' policy, which is currently the default policy, uses this facility to store the hit count of the cache blocks. If there's a crash this information will be lost, which means the cache may be less efficient until those hit counts are regenerated. Policy hints affect performance, not correctness. Policy messaging ---------------- Policies will have different tunables, specific to each one, so we need a generic way of getting and setting these. Device-mapper messages are used. Refer to cache-policies.txt. Discard bitset resolution ------------------------- We can avoid copying data during migration if we know the block has been discarded. A prime example of this is when mkfs discards the whole block device. We store a bitset tracking the discard state of blocks. However, we allow this bitset to have a different block size from the cache blocks. This is because we need to track the discard state for all of the origin device (compare with the dirty bitset which is just for the smaller cache device). Target interface ================ Constructor ----------- cache <#feature args> []* <#policy args> [policy args]* metadata dev : fast device holding the persistent metadata cache dev : fast device holding cached data blocks origin dev : slow device holding original data blocks block size : cache unit size in sectors #feature args : number of feature arguments passed feature args : writethrough or passthrough (The default is writeback.) policy : the replacement policy to use #policy args : an even number of arguments corresponding to key/value pairs passed to the policy policy args : key/value pairs passed to the policy E.g. 'sequential_threshold 1024' See cache-policies.txt for details. Optional feature arguments are: writethrough : write through caching that prohibits cache block content from being different from origin block content. Without this argument, the default behaviour is to write back cache block contents later for performance reasons, so they may differ from the corresponding origin blocks. passthrough : a degraded mode useful for various cache coherency situations (e.g., rolling back snapshots of underlying storage). Reads and writes always go to the origin. If a write goes to a cached origin block, then the cache block is invalidated. To enable passthrough mode the cache must be clean. metadata2 : use version 2 of the metadata. This stores the dirty bits in a separate btree, which improves speed of shutting down the cache. A policy called 'default' is always registered. This is an alias for the policy we currently think is giving best all round performance. As the default policy could vary between kernels, if you are relying on the characteristics of a specific policy, always request it by name. Status ------ <#used metadata blocks>/<#total metadata blocks> <#used cache blocks>/<#total cache blocks> <#read hits> <#read misses> <#write hits> <#write misses> <#demotions> <#promotions> <#dirty> <#features> * <#core args> * <#policy args> * metadata block size : Fixed block size for each metadata block in sectors #used metadata blocks : Number of metadata blocks used #total metadata blocks : Total number of metadata blocks cache block size : Configurable block size for the cache device in sectors #used cache blocks : Number of blocks resident in the cache #total cache blocks : Total number of cache blocks #read hits : Number of times a READ bio has been mapped to the cache #read misses : Number of times a READ bio has been mapped to the origin #write hits : Number of times a WRITE bio has been mapped to the cache #write misses : Number of times a WRITE bio has been mapped to the origin #demotions : Number of times a block has been removed from the cache #promotions : Number of times a block has been moved to the cache #dirty : Number of blocks in the cache that differ from the origin #feature args : Number of feature args to follow feature args : 'writethrough' (optional) #core args : Number of core arguments (must be even) core args : Key/value pairs for tuning the core e.g. migration_threshold policy name : Name of the policy #policy args : Number of policy arguments to follow (must be even) policy args : Key/value pairs e.g. sequential_threshold cache metadata mode : ro if read-only, rw if read-write In serious cases where even a read-only mode is deemed unsafe no further I/O will be permitted and the status will just contain the string 'Fail'. The userspace recovery tools should then be used. needs_check : 'needs_check' if set, '-' if not set A metadata operation has failed, resulting in the needs_check flag being set in the metadata's superblock. The metadata device must be deactivated and checked/repaired before the cache can be made fully operational again. '-' indicates needs_check is not set. Messages -------- Policies will have different tunables, specific to each one, so we need a generic way of getting and setting these. Device-mapper messages are used. (A sysfs interface would also be possible.) The message format is: E.g. dmsetup message my_cache 0 sequential_threshold 1024 Invalidation is removing an entry from the cache without writing it back. Cache blocks can be invalidated via the invalidate_cblocks message, which takes an arbitrary number of cblock ranges. Each cblock range's end value is "one past the end", meaning 5-10 expresses a range of values from 5 to 9. Each cblock must be expressed as a decimal value, in the future a variant message that takes cblock ranges expressed in hexadecimal may be needed to better support efficient invalidation of larger caches. The cache must be in passthrough mode when invalidate_cblocks is used. invalidate_cblocks [|-]* E.g. dmsetup message my_cache 0 invalidate_cblocks 2345 3456-4567 5678-6789 Examples ======== The test suite can be found here: https://github.com/jthornber/device-mapper-test-suite dmsetup create my_cache --table '0 41943040 cache /dev/mapper/metadata \ /dev/mapper/ssd /dev/mapper/origin 512 1 writeback default 0' dmsetup create my_cache --table '0 41943040 cache /dev/mapper/metadata \ /dev/mapper/ssd /dev/mapper/origin 1024 1 writeback \ mq 4 sequential_threshold 1024 random_threshold 8' LVM2.2.02.176/doc/kernel/zero.txt0000644000000000000120000000322613176752421015074 0ustar rootwheeldm-zero ======= Device-Mapper's "zero" target provides a block-device that always returns zero'd data on reads and silently drops writes. This is similar behavior to /dev/zero, but as a block-device instead of a character-device. Dm-zero has no target-specific parameters. One very interesting use of dm-zero is for creating "sparse" devices in conjunction with dm-snapshot. A sparse device reports a device-size larger than the amount of actual storage space available for that device. A user can write data anywhere within the sparse device and read it back like a normal device. Reads to previously unwritten areas will return a zero'd buffer. When enough data has been written to fill up the actual storage space, the sparse device is deactivated. This can be very useful for testing device and filesystem limitations. To create a sparse device, start by creating a dm-zero device that's the desired size of the sparse device. For this example, we'll assume a 10TB sparse device. TEN_TERABYTES=`expr 10 \* 1024 \* 1024 \* 1024 \* 2` # 10 TB in sectors echo "0 $TEN_TERABYTES zero" | dmsetup create zero1 Then create a snapshot of the zero device, using any available block-device as the COW device. The size of the COW device will determine the amount of real space available to the sparse device. For this example, we'll assume /dev/sdb1 is an available 10GB partition. echo "0 $TEN_TERABYTES snapshot /dev/mapper/zero1 /dev/sdb1 p 128" | \ dmsetup create sparse1 This will create a 10TB sparse device called /dev/mapper/sparse1 that has 10GB of actual storage space available. If more than 10GB of data is written to this device, it will start returning I/O errors. LVM2.2.02.176/doc/kernel/thin-provisioning.txt0000644000000000000120000003446113176752421017610 0ustar rootwheelIntroduction ============ This document describes a collection of device-mapper targets that between them implement thin-provisioning and snapshots. The main highlight of this implementation, compared to the previous implementation of snapshots, is that it allows many virtual devices to be stored on the same data volume. This simplifies administration and allows the sharing of data between volumes, thus reducing disk usage. Another significant feature is support for an arbitrary depth of recursive snapshots (snapshots of snapshots of snapshots ...). The previous implementation of snapshots did this by chaining together lookup tables, and so performance was O(depth). This new implementation uses a single data structure to avoid this degradation with depth. Fragmentation may still be an issue, however, in some scenarios. Metadata is stored on a separate device from data, giving the administrator some freedom, for example to: - Improve metadata resilience by storing metadata on a mirrored volume but data on a non-mirrored one. - Improve performance by storing the metadata on SSD. Status ====== These targets are very much still in the EXPERIMENTAL state. Please do not yet rely on them in production. But do experiment and offer us feedback. Different use cases will have different performance characteristics, for example due to fragmentation of the data volume. If you find this software is not performing as expected please mail dm-devel@redhat.com with details and we'll try our best to improve things for you. Userspace tools for checking and repairing the metadata are under development. Cookbook ======== This section describes some quick recipes for using thin provisioning. They use the dmsetup program to control the device-mapper driver directly. End users will be advised to use a higher-level volume manager such as LVM2 once support has been added. Pool device ----------- The pool device ties together the metadata volume and the data volume. It maps I/O linearly to the data volume and updates the metadata via two mechanisms: - Function calls from the thin targets - Device-mapper 'messages' from userspace which control the creation of new virtual devices amongst other things. Setting up a fresh pool device ------------------------------ Setting up a pool device requires a valid metadata device, and a data device. If you do not have an existing metadata device you can make one by zeroing the first 4k to indicate empty metadata. dd if=/dev/zero of=$metadata_dev bs=4096 count=1 The amount of metadata you need will vary according to how many blocks are shared between thin devices (i.e. through snapshots). If you have less sharing than average you'll need a larger-than-average metadata device. As a guide, we suggest you calculate the number of bytes to use in the metadata device as 48 * $data_dev_size / $data_block_size but round it up to 2MB if the answer is smaller. If you're creating large numbers of snapshots which are recording large amounts of change, you may find you need to increase this. The largest size supported is 16GB: If the device is larger, a warning will be issued and the excess space will not be used. Reloading a pool table ---------------------- You may reload a pool's table, indeed this is how the pool is resized if it runs out of space. (N.B. While specifying a different metadata device when reloading is not forbidden at the moment, things will go wrong if it does not route I/O to exactly the same on-disk location as previously.) Using an existing pool device ----------------------------- dmsetup create pool \ --table "0 20971520 thin-pool $metadata_dev $data_dev \ $data_block_size $low_water_mark" $data_block_size gives the smallest unit of disk space that can be allocated at a time expressed in units of 512-byte sectors. $data_block_size must be between 128 (64KB) and 2097152 (1GB) and a multiple of 128 (64KB). $data_block_size cannot be changed after the thin-pool is created. People primarily interested in thin provisioning may want to use a value such as 1024 (512KB). People doing lots of snapshotting may want a smaller value such as 128 (64KB). If you are not zeroing newly-allocated data, a larger $data_block_size in the region of 256000 (128MB) is suggested. $low_water_mark is expressed in blocks of size $data_block_size. If free space on the data device drops below this level then a dm event will be triggered which a userspace daemon should catch allowing it to extend the pool device. Only one such event will be sent. Resuming a device with a new table itself triggers an event so the userspace daemon can use this to detect a situation where a new table already exceeds the threshold. A low water mark for the metadata device is maintained in the kernel and will trigger a dm event if free space on the metadata device drops below it. Updating on-disk metadata ------------------------- On-disk metadata is committed every time a FLUSH or FUA bio is written. If no such requests are made then commits will occur every second. This means the thin-provisioning target behaves like a physical disk that has a volatile write cache. If power is lost you may lose some recent writes. The metadata should always be consistent in spite of any crash. If data space is exhausted the pool will either error or queue IO according to the configuration (see: error_if_no_space). If metadata space is exhausted or a metadata operation fails: the pool will error IO until the pool is taken offline and repair is performed to 1) fix any potential inconsistencies and 2) clear the flag that imposes repair. Once the pool's metadata device is repaired it may be resized, which will allow the pool to return to normal operation. Note that if a pool is flagged as needing repair, the pool's data and metadata devices cannot be resized until repair is performed. It should also be noted that when the pool's metadata space is exhausted the current metadata transaction is aborted. Given that the pool will cache IO whose completion may have already been acknowledged to upper IO layers (e.g. filesystem) it is strongly suggested that consistency checks (e.g. fsck) be performed on those layers when repair of the pool is required. Thin provisioning ----------------- i) Creating a new thinly-provisioned volume. To create a new thinly- provisioned volume you must send a message to an active pool device, /dev/mapper/pool in this example. dmsetup message /dev/mapper/pool 0 "create_thin 0" Here '0' is an identifier for the volume, a 24-bit number. It's up to the caller to allocate and manage these identifiers. If the identifier is already in use, the message will fail with -EEXIST. ii) Using a thinly-provisioned volume. Thinly-provisioned volumes are activated using the 'thin' target: dmsetup create thin --table "0 2097152 thin /dev/mapper/pool 0" The last parameter is the identifier for the thinp device. Internal snapshots ------------------ i) Creating an internal snapshot. Snapshots are created with another message to the pool. N.B. If the origin device that you wish to snapshot is active, you must suspend it before creating the snapshot to avoid corruption. This is NOT enforced at the moment, so please be careful! dmsetup suspend /dev/mapper/thin dmsetup message /dev/mapper/pool 0 "create_snap 1 0" dmsetup resume /dev/mapper/thin Here '1' is the identifier for the volume, a 24-bit number. '0' is the identifier for the origin device. ii) Using an internal snapshot. Once created, the user doesn't have to worry about any connection between the origin and the snapshot. Indeed the snapshot is no different from any other thinly-provisioned device and can be snapshotted itself via the same method. It's perfectly legal to have only one of them active, and there's no ordering requirement on activating or removing them both. (This differs from conventional device-mapper snapshots.) Activate it exactly the same way as any other thinly-provisioned volume: dmsetup create snap --table "0 2097152 thin /dev/mapper/pool 1" External snapshots ------------------ You can use an external _read only_ device as an origin for a thinly-provisioned volume. Any read to an unprovisioned area of the thin device will be passed through to the origin. Writes trigger the allocation of new blocks as usual. One use case for this is VM hosts that want to run guests on thinly-provisioned volumes but have the base image on another device (possibly shared between many VMs). You must not write to the origin device if you use this technique! Of course, you may write to the thin device and take internal snapshots of the thin volume. i) Creating a snapshot of an external device This is the same as creating a thin device. You don't mention the origin at this stage. dmsetup message /dev/mapper/pool 0 "create_thin 0" ii) Using a snapshot of an external device. Append an extra parameter to the thin target specifying the origin: dmsetup create snap --table "0 2097152 thin /dev/mapper/pool 0 /dev/image" N.B. All descendants (internal snapshots) of this snapshot require the same extra origin parameter. Deactivation ------------ All devices using a pool must be deactivated before the pool itself can be. dmsetup remove thin dmsetup remove snap dmsetup remove pool Reference ========= 'thin-pool' target ------------------ i) Constructor thin-pool \ [ []*] Optional feature arguments: skip_block_zeroing: Skip the zeroing of newly-provisioned blocks. ignore_discard: Disable discard support. no_discard_passdown: Don't pass discards down to the underlying data device, but just remove the mapping. read_only: Don't allow any changes to be made to the pool metadata. error_if_no_space: Error IOs, instead of queueing, if no space. Data block size must be between 64KB (128 sectors) and 1GB (2097152 sectors) inclusive. ii) Status / / [no_]discard_passdown ro|rw transaction id: A 64-bit number used by userspace to help synchronise with metadata from volume managers. used data blocks / total data blocks If the number of free blocks drops below the pool's low water mark a dm event will be sent to userspace. This event is edge-triggered and it will occur only once after each resume so volume manager writers should register for the event and then check the target's status. held metadata root: The location, in blocks, of the metadata root that has been 'held' for userspace read access. '-' indicates there is no held root. discard_passdown|no_discard_passdown Whether or not discards are actually being passed down to the underlying device. When this is enabled when loading the table, it can get disabled if the underlying device doesn't support it. ro|rw|out_of_data_space If the pool encounters certain types of device failures it will drop into a read-only metadata mode in which no changes to the pool metadata (like allocating new blocks) are permitted. In serious cases where even a read-only mode is deemed unsafe no further I/O will be permitted and the status will just contain the string 'Fail'. The userspace recovery tools should then be used. error_if_no_space|queue_if_no_space If the pool runs out of data or metadata space, the pool will either queue or error the IO destined to the data device. The default is to queue the IO until more space is added or the 'no_space_timeout' expires. The 'no_space_timeout' dm-thin-pool module parameter can be used to change this timeout -- it defaults to 60 seconds but may be disabled using a value of 0. needs_check A metadata operation has failed, resulting in the needs_check flag being set in the metadata's superblock. The metadata device must be deactivated and checked/repaired before the thin-pool can be made fully operational again. '-' indicates needs_check is not set. iii) Messages create_thin Create a new thinly-provisioned device. is an arbitrary unique 24-bit identifier chosen by the caller. create_snap Create a new snapshot of another thinly-provisioned device. is an arbitrary unique 24-bit identifier chosen by the caller. is the identifier of the thinly-provisioned device of which the new device will be a snapshot. delete Deletes a thin device. Irreversible. set_transaction_id Userland volume managers, such as LVM, need a way to synchronise their external metadata with the internal metadata of the pool target. The thin-pool target offers to store an arbitrary 64-bit transaction id and return it on the target's status line. To avoid races you must provide what you think the current transaction id is when you change it with this compare-and-swap message. reserve_metadata_snap Reserve a copy of the data mapping btree for use by userland. This allows userland to inspect the mappings as they were when this message was executed. Use the pool's status command to get the root block associated with the metadata snapshot. release_metadata_snap Release a previously reserved copy of the data mapping btree. 'thin' target ------------- i) Constructor thin [] pool dev: the thin-pool device, e.g. /dev/mapper/my_pool or 253:0 dev id: the internal device identifier of the device to be activated. external origin dev: an optional block device outside the pool to be treated as a read-only snapshot origin: reads to unprovisioned areas of the thin target will be mapped to this device. The pool doesn't store any size against the thin devices. If you load a thin target that is smaller than you've been using previously, then you'll have no access to blocks mapped beyond the end. If you load a target that is bigger than before, then extra blocks will be provisioned as and when needed. ii) Status If the pool has encountered device errors and failed, the status will just contain the string 'Fail'. The userspace recovery tools should then be used. LVM2.2.02.176/doc/kernel/crypt.txt0000644000000000000120000001402113176752421015251 0ustar rootwheeldm-crypt ========= Device-Mapper's "crypt" target provides transparent encryption of block devices using the kernel crypto API. For a more detailed description of supported parameters see: https://gitlab.com/cryptsetup/cryptsetup/wikis/DMCrypt Parameters: \ [<#opt_params> ] Encryption cipher, encryption mode and Initial Vector (IV) generator. The cipher specifications format is: cipher[:keycount]-chainmode-ivmode[:ivopts] Examples: aes-cbc-essiv:sha256 aes-xts-plain64 serpent-xts-plain64 Cipher format also supports direct specification with kernel crypt API format (selected by capi: prefix). The IV specification is the same as for the first format type. This format is mainly used for specification of authenticated modes. The crypto API cipher specifications format is: capi:cipher_api_spec-ivmode[:ivopts] Examples: capi:cbc(aes)-essiv:sha256 capi:xts(aes)-plain64 Examples of authenticated modes: capi:gcm(aes)-random capi:authenc(hmac(sha256),xts(aes))-random capi:rfc7539(chacha20,poly1305)-random The /proc/crypto contains a list of curently loaded crypto modes. Key used for encryption. It is encoded either as a hexadecimal number or it can be passed as prefixed with single colon character (':') for keys residing in kernel keyring service. You can only use key sizes that are valid for the selected cipher in combination with the selected iv mode. Note that for some iv modes the key string can contain additional keys (for example IV seed) so the key contains more parts concatenated into a single string. The kernel keyring key is identified by string in following format: ::. The encryption key size in bytes. The kernel key payload size must match the value passed in . Either 'logon' or 'user' kernel key type. The kernel keyring key description crypt target should look for when loading key of . Multi-key compatibility mode. You can define keys and then sectors are encrypted according to their offsets (sector 0 uses key0; sector 1 uses key1 etc.). must be a power of two. The IV offset is a sector count that is added to the sector number before creating the IV. This is the device that is going to be used as backend and contains the encrypted data. You can specify it as a path like /dev/xxx or a device number :. Starting sector within the device where the encrypted data begins. <#opt_params> Number of optional parameters. If there are no optional parameters, the optional paramaters section can be skipped or #opt_params can be zero. Otherwise #opt_params is the number of following arguments. Example of optional parameters section: 3 allow_discards same_cpu_crypt submit_from_crypt_cpus allow_discards Block discard requests (a.k.a. TRIM) are passed through the crypt device. The default is to ignore discard requests. WARNING: Assess the specific security risks carefully before enabling this option. For example, allowing discards on encrypted devices may lead to the leak of information about the ciphertext device (filesystem type, used space etc.) if the discarded blocks can be located easily on the device later. same_cpu_crypt Perform encryption using the same cpu that IO was submitted on. The default is to use an unbound workqueue so that encryption work is automatically balanced between available CPUs. submit_from_crypt_cpus Disable offloading writes to a separate thread after encryption. There are some situations where offloading write bios from the encryption threads to a single thread degrades performance significantly. The default is to offload write bios to the same thread because it benefits CFQ to have writes submitted using the same context. integrity:: The device requires additional metadata per-sector stored in per-bio integrity structure. This metadata must by provided by underlying dm-integrity target. The can be "none" if metadata is used only for persistent IV. For Authenticated Encryption with Additional Data (AEAD) the is "aead". An AEAD mode additionally calculates and verifies integrity for the encrypted device. The additional space is then used for storing authentication tag (and persistent IV if needed). sector_size: Use as the encryption unit instead of 512 bytes sectors. This option can be in range 512 - 4096 bytes and must be power of two. Virtual device will announce this size as a minimal IO and logical sector. iv_large_sectors IV generators will use sector number counted in units instead of default 512 bytes sectors. For example, if is 4096 bytes, plain64 IV for the second sector will be 8 (without flag) and 1 if iv_large_sectors is present. The must be multiple of (in 512 bytes units) if this flag is specified. Example scripts =============== LUKS (Linux Unified Key Setup) is now the preferred way to set up disk encryption with dm-crypt using the 'cryptsetup' utility, see https://gitlab.com/cryptsetup/cryptsetup [[ #!/bin/sh # Create a crypt device using dmsetup dmsetup create crypt1 --table "0 `blockdev --getsz $1` crypt aes-cbc-essiv:sha256 babebabebabebabebabebabebabebabe 0 $1 0" ]] [[ #!/bin/sh # Create a crypt device using dmsetup when encryption key is stored in keyring service dmsetup create crypt2 --table "0 `blockdev --getsize $1` crypt aes-cbc-essiv:sha256 :32:logon:my_prefix:my_key 0 $1 0" ]] [[ #!/bin/sh # Create a crypt device using cryptsetup and LUKS header with default cipher cryptsetup luksFormat $1 cryptsetup luksOpen $1 crypt1 ]] LVM2.2.02.176/doc/kernel/kcopyd.txt0000644000000000000120000000355513176752421015413 0ustar rootwheelkcopyd ====== Kcopyd provides the ability to copy a range of sectors from one block-device to one or more other block-devices, with an asynchronous completion notification. It is used by dm-snapshot and dm-mirror. Users of kcopyd must first create a client and indicate how many memory pages to set aside for their copy jobs. This is done with a call to kcopyd_client_create(). int kcopyd_client_create(unsigned int num_pages, struct kcopyd_client **result); To start a copy job, the user must set up io_region structures to describe the source and destinations of the copy. Each io_region indicates a block-device along with the starting sector and size of the region. The source of the copy is given as one io_region structure, and the destinations of the copy are given as an array of io_region structures. struct io_region { struct block_device *bdev; sector_t sector; sector_t count; }; To start the copy, the user calls kcopyd_copy(), passing in the client pointer, pointers to the source and destination io_regions, the name of a completion callback routine, and a pointer to some context data for the copy. int kcopyd_copy(struct kcopyd_client *kc, struct io_region *from, unsigned int num_dests, struct io_region *dests, unsigned int flags, kcopyd_notify_fn fn, void *context); typedef void (*kcopyd_notify_fn)(int read_err, unsigned int write_err, void *context); When the copy completes, kcopyd will call the user's completion routine, passing back the user's context pointer. It will also indicate if a read or write error occurred during the copy. When a user is done with all their copy jobs, they should call kcopyd_client_destroy() to delete the kcopyd client, which will release the associated memory pages. void kcopyd_client_destroy(struct kcopyd_client *kc); LVM2.2.02.176/doc/kernel/switch.txt0000644000000000000120000001264113176752421015417 0ustar rootwheeldm-switch ========= The device-mapper switch target creates a device that supports an arbitrary mapping of fixed-size regions of I/O across a fixed set of paths. The path used for any specific region can be switched dynamically by sending the target a message. It maps I/O to underlying block devices efficiently when there is a large number of fixed-sized address regions but there is no simple pattern that would allow for a compact representation of the mapping such as dm-stripe. Background ---------- Dell EqualLogic and some other iSCSI storage arrays use a distributed frameless architecture. In this architecture, the storage group consists of a number of distinct storage arrays ("members") each having independent controllers, disk storage and network adapters. When a LUN is created it is spread across multiple members. The details of the spreading are hidden from initiators connected to this storage system. The storage group exposes a single target discovery portal, no matter how many members are being used. When iSCSI sessions are created, each session is connected to an eth port on a single member. Data to a LUN can be sent on any iSCSI session, and if the blocks being accessed are stored on another member the I/O will be forwarded as required. This forwarding is invisible to the initiator. The storage layout is also dynamic, and the blocks stored on disk may be moved from member to member as needed to balance the load. This architecture simplifies the management and configuration of both the storage group and initiators. In a multipathing configuration, it is possible to set up multiple iSCSI sessions to use multiple network interfaces on both the host and target to take advantage of the increased network bandwidth. An initiator could use a simple round robin algorithm to send I/O across all paths and let the storage array members forward it as necessary, but there is a performance advantage to sending data directly to the correct member. A device-mapper table already lets you map different regions of a device onto different targets. However in this architecture the LUN is spread with an address region size on the order of 10s of MBs, which means the resulting table could have more than a million entries and consume far too much memory. Using this device-mapper switch target we can now build a two-layer device hierarchy: Upper Tier - Determine which array member the I/O should be sent to. Lower Tier - Load balance amongst paths to a particular member. The lower tier consists of a single dm multipath device for each member. Each of these multipath devices contains the set of paths directly to the array member in one priority group, and leverages existing path selectors to load balance amongst these paths. We also build a non-preferred priority group containing paths to other array members for failover reasons. The upper tier consists of a single dm-switch device. This device uses a bitmap to look up the location of the I/O and choose the appropriate lower tier device to route the I/O. By using a bitmap we are able to use 4 bits for each address range in a 16 member group (which is very large for us). This is a much denser representation than the dm table b-tree can achieve. Construction Parameters ======================= [...] [ ]+ The number of paths across which to distribute the I/O. The number of 512-byte sectors in a region. Each region can be redirected to any of the available paths. The number of optional arguments. Currently, no optional arguments are supported and so this must be zero. The block device that represents a specific path to the device. The offset of the start of data on the specific (in units of 512-byte sectors). This number is added to the sector number when forwarding the request to the specific path. Typically it is zero. Messages ======== set_region_mappings : []: []:... Modify the region table by specifying which regions are redirected to which paths. The region number (region size was specified in constructor parameters). If index is omitted, the next region (previous index + 1) is used. Expressed in hexadecimal (WITHOUT any prefix like 0x). The path number in the range 0 ... ( - 1). Expressed in hexadecimal (WITHOUT any prefix like 0x). R, This parameter allows repetitive patterns to be loaded quickly. and are hexadecimal numbers. The last mappings are repeated in the next slots. Status ====== No status line is reported. Example ======= Assume that you have volumes vg1/switch0 vg1/switch1 vg1/switch2 with the same size. Create a switch device with 64kB region size: dmsetup create switch --table "0 `blockdev --getsz /dev/vg1/switch0` switch 3 128 0 /dev/vg1/switch0 0 /dev/vg1/switch1 0 /dev/vg1/switch2 0" Set mappings for the first 7 entries to point to devices switch0, switch1, switch2, switch0, switch1, switch2, switch1: dmsetup message switch 0 set_region_mappings 0:0 :1 :2 :0 :1 :2 :1 Set repetitive mapping. This command: dmsetup message switch 0 set_region_mappings 1000:1 :2 R2,10 is equivalent to: dmsetup message switch 0 set_region_mappings 1000:1 :2 :1 :2 :1 :2 :1 :2 \ :1 :2 :1 :2 :1 :2 :1 :2 :1 :2 LVM2.2.02.176/doc/kernel/persistent-data.txt0000644000000000000120000000562413176752421017230 0ustar rootwheelIntroduction ============ The more-sophisticated device-mapper targets require complex metadata that is managed in kernel. In late 2010 we were seeing that various different targets were rolling their own data structures, for example: - Mikulas Patocka's multisnap implementation - Heinz Mauelshagen's thin provisioning target - Another btree-based caching target posted to dm-devel - Another multi-snapshot target based on a design of Daniel Phillips Maintaining these data structures takes a lot of work, so if possible we'd like to reduce the number. The persistent-data library is an attempt to provide a re-usable framework for people who want to store metadata in device-mapper targets. It's currently used by the thin-provisioning target and an upcoming hierarchical storage target. Overview ======== The main documentation is in the header files which can all be found under drivers/md/persistent-data. The block manager ----------------- dm-block-manager.[hc] This provides access to the data on disk in fixed sized-blocks. There is a read/write locking interface to prevent concurrent accesses, and keep data that is being used in the cache. Clients of persistent-data are unlikely to use this directly. The transaction manager ----------------------- dm-transaction-manager.[hc] This restricts access to blocks and enforces copy-on-write semantics. The only way you can get hold of a writable block through the transaction manager is by shadowing an existing block (ie. doing copy-on-write) or allocating a fresh one. Shadowing is elided within the same transaction so performance is reasonable. The commit method ensures that all data is flushed before it writes the superblock. On power failure your metadata will be as it was when last committed. The Space Maps -------------- dm-space-map.h dm-space-map-metadata.[hc] dm-space-map-disk.[hc] On-disk data structures that keep track of reference counts of blocks. Also acts as the allocator of new blocks. Currently two implementations: a simpler one for managing blocks on a different device (eg. thinly-provisioned data blocks); and one for managing the metadata space. The latter is complicated by the need to store its own data within the space it's managing. The data structures ------------------- dm-btree.[hc] dm-btree-remove.c dm-btree-spine.c dm-btree-internal.h Currently there is only one data structure, a hierarchical btree. There are plans to add more. For example, something with an array-like interface would see a lot of use. The btree is 'hierarchical' in that you can define it to be composed of nested btrees, and take multiple keys. For example, the thin-provisioning target uses a btree with two levels of nesting. The first maps a device id to a mapping tree, and that in turn maps a virtual block to a physical block. Values stored in the btrees can have arbitrary size. Keys are always 64bits, although nesting allows you to use multiple keys. LVM2.2.02.176/doc/kernel/era.txt0000644000000000000120000000610713176752421014665 0ustar rootwheelIntroduction ============ dm-era is a target that behaves similar to the linear target. In addition it keeps track of which blocks were written within a user defined period of time called an 'era'. Each era target instance maintains the current era as a monotonically increasing 32-bit counter. Use cases include tracking changed blocks for backup software, and partially invalidating the contents of a cache to restore cache coherency after rolling back a vendor snapshot. Constructor =========== era metadata dev : fast device holding the persistent metadata origin dev : device holding data blocks that may change block size : block size of origin data device, granularity that is tracked by the target Messages ======== None of the dm messages take any arguments. checkpoint ---------- Possibly move to a new era. You shouldn't assume the era has incremented. After sending this message, you should check the current era via the status line. take_metadata_snap ------------------ Create a clone of the metadata, to allow a userland process to read it. drop_metadata_snap ------------------ Drop the metadata snapshot. Status ====== <#used metadata blocks>/<#total metadata blocks> metadata block size : Fixed block size for each metadata block in sectors #used metadata blocks : Number of metadata blocks used #total metadata blocks : Total number of metadata blocks current era : The current era held metadata root : The location, in blocks, of the metadata root that has been 'held' for userspace read access. '-' indicates there is no held root Detailed use case ================= The scenario of invalidating a cache when rolling back a vendor snapshot was the primary use case when developing this target: Taking a vendor snapshot ------------------------ - Send a checkpoint message to the era target - Make a note of the current era in its status line - Take vendor snapshot (the era and snapshot should be forever associated now). Rolling back to an vendor snapshot ---------------------------------- - Cache enters passthrough mode (see: dm-cache's docs in cache.txt) - Rollback vendor storage - Take metadata snapshot - Ascertain which blocks have been written since the snapshot was taken by checking each block's era - Invalidate those blocks in the caching software - Cache returns to writeback/writethrough mode Memory usage ============ The target uses a bitset to record writes in the current era. It also has a spare bitset ready for switching over to a new era. Other than that it uses a few 4k blocks for updating metadata. (4 * nr_blocks) bytes + buffers Resilience ========== Metadata is updated on disk before a write to a previously unwritten block is performed. As such dm-era should not be effected by a hard crash such as power failure. Userland tools ============== Userland tools are found in the increasingly poorly named thin-provisioning-tools project: https://github.com/jthornber/thin-provisioning-tools LVM2.2.02.176/doc/kernel/striped.txt0000644000000000000120000000343713176752421015573 0ustar rootwheeldm-stripe ========= Device-Mapper's "striped" target is used to create a striped (i.e. RAID-0) device across one or more underlying devices. Data is written in "chunks", with consecutive chunks rotating among the underlying devices. This can potentially provide improved I/O throughput by utilizing several physical devices in parallel. Parameters: [ ]+ : Number of underlying devices. : Size of each chunk of data. Must be at least as large as the system's PAGE_SIZE. : Full pathname to the underlying block-device, or a "major:minor" device-number. : Starting sector within the device. One or more underlying devices can be specified. The striped device size must be a multiple of the chunk size multiplied by the number of underlying devices. Example scripts =============== [[ #!/usr/bin/perl -w # Create a striped device across any number of underlying devices. The device # will be called "stripe_dev" and have a chunk-size of 128k. my $chunk_size = 128 * 2; my $dev_name = "stripe_dev"; my $num_devs = @ARGV; my @devs = @ARGV; my ($min_dev_size, $stripe_dev_size, $i); if (!$num_devs) { die("Specify at least one device\n"); } $min_dev_size = `blockdev --getsz $devs[0]`; for ($i = 1; $i < $num_devs; $i++) { my $this_size = `blockdev --getsz $devs[$i]`; $min_dev_size = ($min_dev_size < $this_size) ? $min_dev_size : $this_size; } $stripe_dev_size = $min_dev_size * $num_devs; $stripe_dev_size -= $stripe_dev_size % ($chunk_size * $num_devs); $table = "0 $stripe_dev_size striped $num_devs $chunk_size"; for ($i = 0; $i < $num_devs; $i++) { $table .= " $devs[$i] 0"; } `echo $table | dmsetup create $dev_name`; ]] LVM2.2.02.176/doc/kernel/io.txt0000644000000000000120000000634213176752421014526 0ustar rootwheeldm-io ===== Dm-io provides synchronous and asynchronous I/O services. There are three types of I/O services available, and each type has a sync and an async version. The user must set up an io_region structure to describe the desired location of the I/O. Each io_region indicates a block-device along with the starting sector and size of the region. struct io_region { struct block_device *bdev; sector_t sector; sector_t count; }; Dm-io can read from one io_region or write to one or more io_regions. Writes to multiple regions are specified by an array of io_region structures. The first I/O service type takes a list of memory pages as the data buffer for the I/O, along with an offset into the first page. struct page_list { struct page_list *next; struct page *page; }; int dm_io_sync(unsigned int num_regions, struct io_region *where, int rw, struct page_list *pl, unsigned int offset, unsigned long *error_bits); int dm_io_async(unsigned int num_regions, struct io_region *where, int rw, struct page_list *pl, unsigned int offset, io_notify_fn fn, void *context); The second I/O service type takes an array of bio vectors as the data buffer for the I/O. This service can be handy if the caller has a pre-assembled bio, but wants to direct different portions of the bio to different devices. int dm_io_sync_bvec(unsigned int num_regions, struct io_region *where, int rw, struct bio_vec *bvec, unsigned long *error_bits); int dm_io_async_bvec(unsigned int num_regions, struct io_region *where, int rw, struct bio_vec *bvec, io_notify_fn fn, void *context); The third I/O service type takes a pointer to a vmalloc'd memory buffer as the data buffer for the I/O. This service can be handy if the caller needs to do I/O to a large region but doesn't want to allocate a large number of individual memory pages. int dm_io_sync_vm(unsigned int num_regions, struct io_region *where, int rw, void *data, unsigned long *error_bits); int dm_io_async_vm(unsigned int num_regions, struct io_region *where, int rw, void *data, io_notify_fn fn, void *context); Callers of the asynchronous I/O services must include the name of a completion callback routine and a pointer to some context data for the I/O. typedef void (*io_notify_fn)(unsigned long error, void *context); The "error" parameter in this callback, as well as the "*error" parameter in all of the synchronous versions, is a bitset (instead of a simple error value). In the case of an write-I/O to multiple regions, this bitset allows dm-io to indicate success or failure on each individual region. Before using any of the dm-io services, the user should call dm_io_get() and specify the number of pages they expect to perform I/O on concurrently. Dm-io will attempt to resize its mempool to make sure enough pages are always available in order to avoid unnecessary waiting while performing I/O. When the user is finished using the dm-io services, they should call dm_io_put() and specify the same number of pages that were given on the dm_io_get() call. LVM2.2.02.176/doc/kernel/service-time.txt0000644000000000000120000000625213176752421016513 0ustar rootwheeldm-service-time =============== dm-service-time is a path selector module for device-mapper targets, which selects a path with the shortest estimated service time for the incoming I/O. The service time for each path is estimated by dividing the total size of in-flight I/Os on a path with the performance value of the path. The performance value is a relative throughput value among all paths in a path-group, and it can be specified as a table argument. The path selector name is 'service-time'. Table parameters for each path: [ []] : The number of I/Os to dispatch using the selected path before switching to the next path. If not given, internal default is used. To check the default value, see the activated table. : The relative throughput value of the path among all paths in the path-group. The valid range is 0-100. If not given, minimum value '1' is used. If '0' is given, the path isn't selected while other paths having a positive value are available. Status for each path: \ : 'A' if the path is active, 'F' if the path is failed. : The number of path failures. : The size of in-flight I/Os on the path. : The relative throughput value of the path among all paths in the path-group. Algorithm ========= dm-service-time adds the I/O size to 'in-flight-size' when the I/O is dispatched and subtracts when completed. Basically, dm-service-time selects a path having minimum service time which is calculated by: ('in-flight-size' + 'size-of-incoming-io') / 'relative_throughput' However, some optimizations below are used to reduce the calculation as much as possible. 1. If the paths have the same 'relative_throughput', skip the division and just compare the 'in-flight-size'. 2. If the paths have the same 'in-flight-size', skip the division and just compare the 'relative_throughput'. 3. If some paths have non-zero 'relative_throughput' and others have zero 'relative_throughput', ignore those paths with zero 'relative_throughput'. If such optimizations can't be applied, calculate service time, and compare service time. If calculated service time is equal, the path having maximum 'relative_throughput' may be better. So compare 'relative_throughput' then. Examples ======== In case that 2 paths (sda and sdb) are used with repeat_count == 128 and sda has an average throughput 1GB/s and sdb has 4GB/s, 'relative_throughput' value may be '1' for sda and '4' for sdb. # echo "0 10 multipath 0 0 1 1 service-time 0 2 2 8:0 128 1 8:16 128 4" \ dmsetup create test # # dmsetup table test: 0 10 multipath 0 0 1 1 service-time 0 2 2 8:0 128 1 8:16 128 4 # # dmsetup status test: 0 10 multipath 2 0 0 0 1 1 E 0 2 2 8:0 A 0 0 1 8:16 A 0 0 4 Or '2' for sda and '8' for sdb would be also true. # echo "0 10 multipath 0 0 1 1 service-time 0 2 2 8:0 128 2 8:16 128 8" \ dmsetup create test # # dmsetup table test: 0 10 multipath 0 0 1 1 service-time 0 2 2 8:0 128 2 8:16 128 8 # # dmsetup status test: 0 10 multipath 2 0 0 0 1 1 E 0 2 2 8:0 A 0 0 2 8:16 A 0 0 8 LVM2.2.02.176/doc/kernel/raid.txt0000644000000000000120000003710713176752421015041 0ustar rootwheeldm-raid ======= The device-mapper RAID (dm-raid) target provides a bridge from DM to MD. It allows the MD RAID drivers to be accessed using a device-mapper interface. Mapping Table Interface ----------------------- The target is named "raid" and it accepts the following parameters: <#raid_params> \ <#raid_devs> [.. ] : raid0 RAID0 striping (no resilience) raid1 RAID1 mirroring raid4 RAID4 with dedicated last parity disk raid5_n RAID5 with dedicated last parity disk supporting takeover Same as raid4 -Transitory layout raid5_la RAID5 left asymmetric - rotating parity 0 with data continuation raid5_ra RAID5 right asymmetric - rotating parity N with data continuation raid5_ls RAID5 left symmetric - rotating parity 0 with data restart raid5_rs RAID5 right symmetric - rotating parity N with data restart raid6_zr RAID6 zero restart - rotating parity zero (left-to-right) with data restart raid6_nr RAID6 N restart - rotating parity N (right-to-left) with data restart raid6_nc RAID6 N continue - rotating parity N (right-to-left) with data continuation raid6_n_6 RAID6 with dedicate parity disks - parity and Q-syndrome on the last 2 disks; layout for takeover from/to raid4/raid5_n raid6_la_6 Same as "raid_la" plus dedicated last Q-syndrome disk - layout for takeover from raid5_la from/to raid6 raid6_ra_6 Same as "raid5_ra" dedicated last Q-syndrome disk - layout for takeover from raid5_ra from/to raid6 raid6_ls_6 Same as "raid5_ls" dedicated last Q-syndrome disk - layout for takeover from raid5_ls from/to raid6 raid6_rs_6 Same as "raid5_rs" dedicated last Q-syndrome disk - layout for takeover from raid5_rs from/to raid6 raid10 Various RAID10 inspired algorithms chosen by additional params (see raid10_format and raid10_copies below) - RAID10: Striped Mirrors (aka 'Striping on top of mirrors') - RAID1E: Integrated Adjacent Stripe Mirroring - RAID1E: Integrated Offset Stripe Mirroring - and other similar RAID10 variants Reference: Chapter 4 of http://www.snia.org/sites/default/files/SNIA_DDF_Technical_Position_v2.0.pdf <#raid_params>: The number of parameters that follow. consists of Mandatory parameters: : Chunk size in sectors. This parameter is often known as "stripe size". It is the only mandatory parameter and is placed first. followed by optional parameters (in any order): [sync|nosync] Force or prevent RAID initialization. [rebuild ] Rebuild drive number 'idx' (first drive is 0). [daemon_sleep ] Interval between runs of the bitmap daemon that clear bits. A longer interval means less bitmap I/O but resyncing after a failure is likely to take longer. [min_recovery_rate ] Throttle RAID initialization [max_recovery_rate ] Throttle RAID initialization [write_mostly ] Mark drive index 'idx' write-mostly. [max_write_behind ] See '--write-behind=' (man mdadm) [stripe_cache ] Stripe cache size (RAID 4/5/6 only) [region_size ] The region_size multiplied by the number of regions is the logical size of the array. The bitmap records the device synchronisation state for each region. [raid10_copies <# copies>] [raid10_format ] These two options are used to alter the default layout of a RAID10 configuration. The number of copies is can be specified, but the default is 2. There are also three variations to how the copies are laid down - the default is "near". Near copies are what most people think of with respect to mirroring. If these options are left unspecified, or 'raid10_copies 2' and/or 'raid10_format near' are given, then the layouts for 2, 3 and 4 devices are: 2 drives 3 drives 4 drives -------- ---------- -------------- A1 A1 A1 A1 A2 A1 A1 A2 A2 A2 A2 A2 A3 A3 A3 A3 A4 A4 A3 A3 A4 A4 A5 A5 A5 A6 A6 A4 A4 A5 A6 A6 A7 A7 A8 A8 .. .. .. .. .. .. .. .. .. The 2-device layout is equivalent 2-way RAID1. The 4-device layout is what a traditional RAID10 would look like. The 3-device layout is what might be called a 'RAID1E - Integrated Adjacent Stripe Mirroring'. If 'raid10_copies 2' and 'raid10_format far', then the layouts for 2, 3 and 4 devices are: 2 drives 3 drives 4 drives -------- -------------- -------------------- A1 A2 A1 A2 A3 A1 A2 A3 A4 A3 A4 A4 A5 A6 A5 A6 A7 A8 A5 A6 A7 A8 A9 A9 A10 A11 A12 .. .. .. .. .. .. .. .. .. A2 A1 A3 A1 A2 A2 A1 A4 A3 A4 A3 A6 A4 A5 A6 A5 A8 A7 A6 A5 A9 A7 A8 A10 A9 A12 A11 .. .. .. .. .. .. .. .. .. If 'raid10_copies 2' and 'raid10_format offset', then the layouts for 2, 3 and 4 devices are: 2 drives 3 drives 4 drives -------- ------------ ----------------- A1 A2 A1 A2 A3 A1 A2 A3 A4 A2 A1 A3 A1 A2 A2 A1 A4 A3 A3 A4 A4 A5 A6 A5 A6 A7 A8 A4 A3 A6 A4 A5 A6 A5 A8 A7 A5 A6 A7 A8 A9 A9 A10 A11 A12 A6 A5 A9 A7 A8 A10 A9 A12 A11 .. .. .. .. .. .. .. .. .. Here we see layouts closely akin to 'RAID1E - Integrated Offset Stripe Mirroring'. [delta_disks ] The delta_disks option value (-251 < N < +251) triggers device removal (negative value) or device addition (positive value) to any reshape supporting raid levels 4/5/6 and 10. RAID levels 4/5/6 allow for addition of devices (metadata and data device tuple), raid10_near and raid10_offset only allow for device addition. raid10_far does not support any reshaping at all. A minimum of devices have to be kept to enforce resilience, which is 3 devices for raid4/5 and 4 devices for raid6. [data_offset ] This option value defines the offset into each data device where the data starts. This is used to provide out-of-place reshaping space to avoid writing over data whilst changing the layout of stripes, hence an interruption/crash may happen at any time without the risk of losing data. E.g. when adding devices to an existing raid set during forward reshaping, the out-of-place space will be allocated at the beginning of each raid device. The kernel raid4/5/6/10 MD personalities supporting such device addition will read the data from the existing first stripes (those with smaller number of stripes) starting at data_offset to fill up a new stripe with the larger number of stripes, calculate the redundancy blocks (CRC/Q-syndrome) and write that new stripe to offset 0. Same will be applied to all N-1 other new stripes. This out-of-place scheme is used to change the RAID type (i.e. the allocation algorithm) as well, e.g. changing from raid5_ls to raid5_n. [journal_dev ] This option adds a journal device to raid4/5/6 raid sets and uses it to close the 'write hole' caused by the non-atomic updates to the component devices which can cause data loss during recovery. The journal device is used as writethrough thus causing writes to be throttled versus non-journaled raid4/5/6 sets. Takeover/reshape is not possible with a raid4/5/6 journal device; it has to be deconfigured before requesting these. [journal_mode ] This option sets the caching mode on journaled raid4/5/6 raid sets (see 'journal_dev ' above) to 'writethrough' or 'writeback'. If 'writeback' is selected the journal device has to be resilient and must not suffer from the 'write hole' problem itself (e.g. use raid1 or raid10) to avoid a single point of failure. <#raid_devs>: The number of devices composing the array. Each device consists of two entries. The first is the device containing the metadata (if any); the second is the one containing the data. A Maximum of 64 metadata/data device entries are supported up to target version 1.8.0. 1.9.0 supports up to 253 which is enforced by the used MD kernel runtime. If a drive has failed or is missing at creation time, a '-' can be given for both the metadata and data drives for a given position. Example Tables -------------- # RAID4 - 4 data drives, 1 parity (no metadata devices) # No metadata devices specified to hold superblock/bitmap info # Chunk size of 1MiB # (Lines separated for easy reading) 0 1960893648 raid \ raid4 1 2048 \ 5 - 8:17 - 8:33 - 8:49 - 8:65 - 8:81 # RAID4 - 4 data drives, 1 parity (with metadata devices) # Chunk size of 1MiB, force RAID initialization, # min recovery rate at 20 kiB/sec/disk 0 1960893648 raid \ raid4 4 2048 sync min_recovery_rate 20 \ 5 8:17 8:18 8:33 8:34 8:49 8:50 8:65 8:66 8:81 8:82 Status Output ------------- 'dmsetup table' displays the table used to construct the mapping. The optional parameters are always printed in the order listed above with "sync" or "nosync" always output ahead of the other arguments, regardless of the order used when originally loading the table. Arguments that can be repeated are ordered by value. 'dmsetup status' yields information on the state and health of the array. The output is as follows (normally a single line, but expanded here for clarity): 1: raid \ 2: <#devices> \ 3: Line 1 is the standard output produced by device-mapper. Line 2 & 3 are produced by the raid target and are best explained by example: 0 1960893648 raid raid4 5 AAAAA 2/490221568 init 0 Here we can see the RAID type is raid4, there are 5 devices - all of which are 'A'live, and the array is 2/490221568 complete with its initial recovery. Here is a fuller description of the individual fields: Same as the used to create the array. One char for each device, indicating: 'A' = alive and in-sync, 'a' = alive but not in-sync, 'D' = dead/failed. The ratio indicating how much of the array has undergone the process described by 'sync_action'. If the 'sync_action' is "check" or "repair", then the process of "resync" or "recover" can be considered complete. One of the following possible states: idle - No synchronization action is being performed. frozen - The current action has been halted. resync - Array is undergoing its initial synchronization or is resynchronizing after an unclean shutdown (possibly aided by a bitmap). recover - A device in the array is being rebuilt or replaced. check - A user-initiated full check of the array is being performed. All blocks are read and checked for consistency. The number of discrepancies found are recorded in . No changes are made to the array by this action. repair - The same as "check", but discrepancies are corrected. reshape - The array is undergoing a reshape. The number of discrepancies found between mirror copies in RAID1/10 or wrong parity values found in RAID4/5/6. This value is valid only after a "check" of the array is performed. A healthy array has a 'mismatch_cnt' of 0. The current data offset to the start of the user data on each component device of a raid set (see the respective raid parameter to support out-of-place reshaping). 'A' - active write-through journal device. 'a' - active write-back journal device. 'D' - dead journal device. '-' - no journal device. Message Interface ----------------- The dm-raid target will accept certain actions through the 'message' interface. ('man dmsetup' for more information on the message interface.) These actions include: "idle" - Halt the current sync action. "frozen" - Freeze the current sync action. "resync" - Initiate/continue a resync. "recover"- Initiate/continue a recover process. "check" - Initiate a check (i.e. a "scrub") of the array. "repair" - Initiate a repair of the array. Discard Support --------------- The implementation of discard support among hardware vendors varies. When a block is discarded, some storage devices will return zeroes when the block is read. These devices set the 'discard_zeroes_data' attribute. Other devices will return random data. Confusingly, some devices that advertise 'discard_zeroes_data' will not reliably return zeroes when discarded blocks are read! Since RAID 4/5/6 uses blocks from a number of devices to calculate parity blocks and (for performance reasons) relies on 'discard_zeroes_data' being reliable, it is important that the devices be consistent. Blocks may be discarded in the middle of a RAID 4/5/6 stripe and if subsequent read results are not consistent, the parity blocks may be calculated differently at any time; making the parity blocks useless for redundancy. It is important to understand how your hardware behaves with discards if you are going to enable discards with RAID 4/5/6. Since the behavior of storage devices is unreliable in this respect, even when reporting 'discard_zeroes_data', by default RAID 4/5/6 discard support is disabled -- this ensures data integrity at the expense of losing some performance. Storage devices that properly support 'discard_zeroes_data' are increasingly whitelisted in the kernel and can thus be trusted. For trusted devices, the following dm-raid module parameter can be set to safely enable discard support for RAID 4/5/6: 'devices_handle_discards_safely' Version History --------------- 1.0.0 Initial version. Support for RAID 4/5/6 1.1.0 Added support for RAID 1 1.2.0 Handle creation of arrays that contain failed devices. 1.3.0 Added support for RAID 10 1.3.1 Allow device replacement/rebuild for RAID 10 1.3.2 Fix/improve redundancy checking for RAID10 1.4.0 Non-functional change. Removes arg from mapping function. 1.4.1 RAID10 fix redundancy validation checks (commit 55ebbb5). 1.4.2 Add RAID10 "far" and "offset" algorithm support. 1.5.0 Add message interface to allow manipulation of the sync_action. New status (STATUSTYPE_INFO) fields: sync_action and mismatch_cnt. 1.5.1 Add ability to restore transiently failed devices on resume. 1.5.2 'mismatch_cnt' is zero unless [last_]sync_action is "check". 1.6.0 Add discard support (and devices_handle_discard_safely module param). 1.7.0 Add support for MD RAID0 mappings. 1.8.0 Explicitly check for compatible flags in the superblock metadata and reject to start the raid set if any are set by a newer target version, thus avoiding data corruption on a raid set with a reshape in progress. 1.9.0 Add support for RAID level takeover/reshape/region size and set size reduction. 1.9.1 Fix activation of existing RAID 4/10 mapped devices 1.9.2 Don't emit '- -' on the status table line in case the constructor fails reading a superblock. Correctly emit 'maj:min1 maj:min2' and 'D' on the status line. If '- -' is passed into the constructor, emit '- -' on the table line and '-' as the status line health character. 1.10.0 Add support for raid4/5/6 journal device 1.10.1 Fix data corruption on reshape request 1.11.0 Fix table line argument order (wrong raid10_copies/raid10_format sequence) 1.11.1 Add raid4/5/6 journal write-back support via journal_mode option LVM2.2.02.176/doc/kernel/uevent.txt0000644000000000000120000000513213176752421015421 0ustar rootwheelThe device-mapper uevent code adds the capability to device-mapper to create and send kobject uevents (uevents). Previously device-mapper events were only available through the ioctl interface. The advantage of the uevents interface is the event contains environment attributes providing increased context for the event avoiding the need to query the state of the device-mapper device after the event is received. There are two functions currently for device-mapper events. The first function listed creates the event and the second function sends the event(s). void dm_path_uevent(enum dm_uevent_type event_type, struct dm_target *ti, const char *path, unsigned nr_valid_paths) void dm_send_uevents(struct list_head *events, struct kobject *kobj) The variables added to the uevent environment are: Variable Name: DM_TARGET Uevent Action(s): KOBJ_CHANGE Type: string Description: Value: Name of device-mapper target that generated the event. Variable Name: DM_ACTION Uevent Action(s): KOBJ_CHANGE Type: string Description: Value: Device-mapper specific action that caused the uevent action. PATH_FAILED - A path has failed. PATH_REINSTATED - A path has been reinstated. Variable Name: DM_SEQNUM Uevent Action(s): KOBJ_CHANGE Type: unsigned integer Description: A sequence number for this specific device-mapper device. Value: Valid unsigned integer range. Variable Name: DM_PATH Uevent Action(s): KOBJ_CHANGE Type: string Description: Major and minor number of the path device pertaining to this event. Value: Path name in the form of "Major:Minor" Variable Name: DM_NR_VALID_PATHS Uevent Action(s): KOBJ_CHANGE Type: unsigned integer Description: Value: Valid unsigned integer range. Variable Name: DM_NAME Uevent Action(s): KOBJ_CHANGE Type: string Description: Name of the device-mapper device. Value: Name Variable Name: DM_UUID Uevent Action(s): KOBJ_CHANGE Type: string Description: UUID of the device-mapper device. Value: UUID. (Empty string if there isn't one.) An example of the uevents generated as captured by udevmonitor is shown below. 1.) Path failure. UEVENT[1192521009.711215] change@/block/dm-3 ACTION=change DEVPATH=/block/dm-3 SUBSYSTEM=block DM_TARGET=multipath DM_ACTION=PATH_FAILED DM_SEQNUM=1 DM_PATH=8:32 DM_NR_VALID_PATHS=0 DM_NAME=mpath2 DM_UUID=mpath-35333333000002328 MINOR=3 MAJOR=253 SEQNUM=1130 2.) Path reinstate. UEVENT[1192521132.989927] change@/block/dm-3 ACTION=change DEVPATH=/block/dm-3 SUBSYSTEM=block DM_TARGET=multipath DM_ACTION=PATH_REINSTATED DM_SEQNUM=2 DM_PATH=8:32 DM_NR_VALID_PATHS=1 DM_NAME=mpath2 DM_UUID=mpath-35333333000002328 MINOR=3 MAJOR=253 SEQNUM=1131 LVM2.2.02.176/doc/kernel/cache-policies.txt0000644000000000000120000001103613176752421016763 0ustar rootwheelGuidance for writing policies ============================= Try to keep transactionality out of it. The core is careful to avoid asking about anything that is migrating. This is a pain, but makes it easier to write the policies. Mappings are loaded into the policy at construction time. Every bio that is mapped by the target is referred to the policy. The policy can return a simple HIT or MISS or issue a migration. Currently there's no way for the policy to issue background work, e.g. to start writing back dirty blocks that are going to be evicted soon. Because we map bios, rather than requests it's easy for the policy to get fooled by many small bios. For this reason the core target issues periodic ticks to the policy. It's suggested that the policy doesn't update states (eg, hit counts) for a block more than once for each tick. The core ticks by watching bios complete, and so trying to see when the io scheduler has let the ios run. Overview of supplied cache replacement policies =============================================== multiqueue (mq) --------------- This policy is now an alias for smq (see below). The following tunables are accepted, but have no effect: 'sequential_threshold <#nr_sequential_ios>' 'random_threshold <#nr_random_ios>' 'read_promote_adjustment ' 'write_promote_adjustment ' 'discard_promote_adjustment ' Stochastic multiqueue (smq) --------------------------- This policy is the default. The stochastic multi-queue (smq) policy addresses some of the problems with the multiqueue (mq) policy. The smq policy (vs mq) offers the promise of less memory utilization, improved performance and increased adaptability in the face of changing workloads. smq also does not have any cumbersome tuning knobs. Users may switch from "mq" to "smq" simply by appropriately reloading a DM table that is using the cache target. Doing so will cause all of the mq policy's hints to be dropped. Also, performance of the cache may degrade slightly until smq recalculates the origin device's hotspots that should be cached. Memory usage: The mq policy used a lot of memory; 88 bytes per cache block on a 64 bit machine. smq uses 28bit indexes to implement it's data structures rather than pointers. It avoids storing an explicit hit count for each block. It has a 'hotspot' queue, rather than a pre-cache, which uses a quarter of the entries (each hotspot block covers a larger area than a single cache block). All this means smq uses ~25bytes per cache block. Still a lot of memory, but a substantial improvement nontheless. Level balancing: mq placed entries in different levels of the multiqueue structures based on their hit count (~ln(hit count)). This meant the bottom levels generally had the most entries, and the top ones had very few. Having unbalanced levels like this reduced the efficacy of the multiqueue. smq does not maintain a hit count, instead it swaps hit entries with the least recently used entry from the level above. The overall ordering being a side effect of this stochastic process. With this scheme we can decide how many entries occupy each multiqueue level, resulting in better promotion/demotion decisions. Adaptability: The mq policy maintained a hit count for each cache block. For a different block to get promoted to the cache it's hit count has to exceed the lowest currently in the cache. This meant it could take a long time for the cache to adapt between varying IO patterns. smq doesn't maintain hit counts, so a lot of this problem just goes away. In addition it tracks performance of the hotspot queue, which is used to decide which blocks to promote. If the hotspot queue is performing badly then it starts moving entries more quickly between levels. This lets it adapt to new IO patterns very quickly. Performance: Testing smq shows substantially better performance than mq. cleaner ------- The cleaner writes back all dirty blocks in a cache to decommission it. Examples ======== The syntax for a table is: cache <#feature_args> []* <#policy_args> []* The syntax to send a message using the dmsetup command is: dmsetup message 0 sequential_threshold 1024 dmsetup message 0 random_threshold 8 Using dmsetup: dmsetup create blah --table "0 268435456 cache /dev/sdb /dev/sdc \ /dev/sdd 512 0 mq 4 sequential_threshold 1024 random_threshold 8" creates a 128GB large mapped device named 'blah' with the sequential threshold set to 1024 and the random_threshold set to 8. LVM2.2.02.176/doc/kernel/integrity.txt0000644000000000000120000002100313176752421016124 0ustar rootwheelThe dm-integrity target emulates a block device that has additional per-sector tags that can be used for storing integrity information. A general problem with storing integrity tags with every sector is that writing the sector and the integrity tag must be atomic - i.e. in case of crash, either both sector and integrity tag or none of them is written. To guarantee write atomicity, the dm-integrity target uses journal, it writes sector data and integrity tags into a journal, commits the journal and then copies the data and integrity tags to their respective location. The dm-integrity target can be used with the dm-crypt target - in this situation the dm-crypt target creates the integrity data and passes them to the dm-integrity target via bio_integrity_payload attached to the bio. In this mode, the dm-crypt and dm-integrity targets provide authenticated disk encryption - if the attacker modifies the encrypted device, an I/O error is returned instead of random data. The dm-integrity target can also be used as a standalone target, in this mode it calculates and verifies the integrity tag internally. In this mode, the dm-integrity target can be used to detect silent data corruption on the disk or in the I/O path. When loading the target for the first time, the kernel driver will format the device. But it will only format the device if the superblock contains zeroes. If the superblock is neither valid nor zeroed, the dm-integrity target can't be loaded. To use the target for the first time: 1. overwrite the superblock with zeroes 2. load the dm-integrity target with one-sector size, the kernel driver will format the device 3. unload the dm-integrity target 4. read the "provided_data_sectors" value from the superblock 5. load the dm-integrity target with the the target size "provided_data_sectors" 6. if you want to use dm-integrity with dm-crypt, load the dm-crypt target with the size "provided_data_sectors" Target arguments: 1. the underlying block device 2. the number of reserved sector at the beginning of the device - the dm-integrity won't read of write these sectors 3. the size of the integrity tag (if "-" is used, the size is taken from the internal-hash algorithm) 4. mode: D - direct writes (without journal) - in this mode, journaling is not used and data sectors and integrity tags are written separately. In case of crash, it is possible that the data and integrity tag doesn't match. J - journaled writes - data and integrity tags are written to the journal and atomicity is guaranteed. In case of crash, either both data and tag or none of them are written. The journaled mode degrades write throughput twice because the data have to be written twice. R - recovery mode - in this mode, journal is not replayed, checksums are not checked and writes to the device are not allowed. This mode is useful for data recovery if the device cannot be activated in any of the other standard modes. 5. the number of additional arguments Additional arguments: journal_sectors:number The size of journal, this argument is used only if formatting the device. If the device is already formatted, the value from the superblock is used. interleave_sectors:number The number of interleaved sectors. This values is rounded down to a power of two. If the device is already formatted, the value from the superblock is used. buffer_sectors:number The number of sectors in one buffer. The value is rounded down to a power of two. The tag area is accessed using buffers, the buffer size is configurable. The large buffer size means that the I/O size will be larger, but there could be less I/Os issued. journal_watermark:number The journal watermark in percents. When the size of the journal exceeds this watermark, the thread that flushes the journal will be started. commit_time:number Commit time in milliseconds. When this time passes, the journal is written. The journal is also written immediatelly if the FLUSH request is received. internal_hash:algorithm(:key) (the key is optional) Use internal hash or crc. When this argument is used, the dm-integrity target won't accept integrity tags from the upper target, but it will automatically generate and verify the integrity tags. You can use a crc algorithm (such as crc32), then integrity target will protect the data against accidental corruption. You can also use a hmac algorithm (for example "hmac(sha256):0123456789abcdef"), in this mode it will provide cryptographic authentication of the data without encryption. When this argument is not used, the integrity tags are accepted from an upper layer target, such as dm-crypt. The upper layer target should check the validity of the integrity tags. journal_crypt:algorithm(:key) (the key is optional) Encrypt the journal using given algorithm to make sure that the attacker can't read the journal. You can use a block cipher here (such as "cbc(aes)") or a stream cipher (for example "chacha20", "salsa20", "ctr(aes)" or "ecb(arc4)"). The journal contains history of last writes to the block device, an attacker reading the journal could see the last sector nubmers that were written. From the sector numbers, the attacker can infer the size of files that were written. To protect against this situation, you can encrypt the journal. journal_mac:algorithm(:key) (the key is optional) Protect sector numbers in the journal from accidental or malicious modification. To protect against accidental modification, use a crc algorithm, to protect against malicious modification, use a hmac algorithm with a key. This option is not needed when using internal-hash because in this mode, the integrity of journal entries is checked when replaying the journal. Thus, modified sector number would be detected at this stage. block_size:number The size of a data block in bytes. The larger the block size the less overhead there is for per-block integrity metadata. Supported values are 512, 1024, 2048 and 4096 bytes. If not specified the default block size is 512 bytes. The journal mode (D/J), buffer_sectors, journal_watermark, commit_time can be changed when reloading the target (load an inactive table and swap the tables with suspend and resume). The other arguments should not be changed when reloading the target because the layout of disk data depend on them and the reloaded target would be non-functional. The layout of the formatted block device: * reserved sectors (they are not used by this target, they can be used for storing LUKS metadata or for other purpose), the size of the reserved area is specified in the target arguments * superblock (4kiB) * magic string - identifies that the device was formatted * version * log2(interleave sectors) * integrity tag size * the number of journal sections * provided data sectors - the number of sectors that this target provides (i.e. the size of the device minus the size of all metadata and padding). The user of this target should not send bios that access data beyond the "provided data sectors" limit. * flags - a flag is set if journal_mac is used * journal The journal is divided into sections, each section contains: * metadata area (4kiB), it contains journal entries every journal entry contains: * logical sector (specifies where the data and tag should be written) * last 8 bytes of data * integrity tag (the size is specified in the superblock) every metadata sector ends with * mac (8-bytes), all the macs in 8 metadata sectors form a 64-byte value. It is used to store hmac of sector numbers in the journal section, to protect against a possibility that the attacker tampers with sector numbers in the journal. * commit id * data area (the size is variable; it depends on how many journal entries fit into the metadata area) every sector in the data area contains: * data (504 bytes of data, the last 8 bytes are stored in the journal entry) * commit id To test if the whole journal section was written correctly, every 512-byte sector of the journal ends with 8-byte commit id. If the commit id matches on all sectors in a journal section, then it is assumed that the section was written correctly. If the commit id doesn't match, the section was written partially and it should not be replayed. * one or more runs of interleaved tags and data. Each run contains: * tag area - it contains integrity tags. There is one tag for each sector in the data area * data area - it contains data sectors. The number of data sectors in one run must be a power of two. log2 of this value is stored in the superblock. LVM2.2.02.176/doc/kernel/queue-length.txt0000644000000000000120000000230213176752421016512 0ustar rootwheeldm-queue-length =============== dm-queue-length is a path selector module for device-mapper targets, which selects a path with the least number of in-flight I/Os. The path selector name is 'queue-length'. Table parameters for each path: [] : The number of I/Os to dispatch using the selected path before switching to the next path. If not given, internal default is used. To check the default value, see the activated table. Status for each path: : 'A' if the path is active, 'F' if the path is failed. : The number of path failures. : The number of in-flight I/Os on the path. Algorithm ========= dm-queue-length increments/decrements 'in-flight' when an I/O is dispatched/completed respectively. dm-queue-length selects a path with the minimum 'in-flight'. Examples ======== In case that 2 paths (sda and sdb) are used with repeat_count == 128. # echo "0 10 multipath 0 0 1 1 queue-length 0 2 1 8:0 128 8:16 128" \ dmsetup create test # # dmsetup table test: 0 10 multipath 0 0 1 1 queue-length 0 2 1 8:0 128 8:16 128 # # dmsetup status test: 0 10 multipath 2 0 0 0 1 1 E 0 2 1 8:0 A 0 0 8:16 A 0 0 LVM2.2.02.176/doc/lvm2-raid.txt0000644000000000000120000005474113176752421014442 0ustar rootwheel======================= = LVM RAID Design Doc = ======================= ############################# # Chapter 1: User-Interface # ############################# ***************** CREATING A RAID DEVICE ****************** 01: lvcreate --type \ 02: [--regionsize ] \ 03: [-i/--stripes <#>] [-I,--stripesize ] \ 04: [-m/--mirrors <#>] \ 05: [--[min|max]recoveryrate ] \ 06: [--stripecache ] \ 07: [--writemostly ] \ 08: [--maxwritebehind ] \ 09: [[no]sync] \ 10: \ 11: [devices] Line 01: I don't intend for there to be shorthand options for specifying the segment type. The available RAID types are: "raid0" - Stripe [NOT IMPLEMENTED] "raid1" - should replace DM Mirroring "raid10" - striped mirrors, [NOT IMPLEMENTED] "raid4" - RAID4 "raid5" - Same as "raid5_ls" (Same default as MD) "raid5_la" - RAID5 Rotating parity 0 with data continuation "raid5_ra" - RAID5 Rotating parity N with data continuation "raid5_ls" - RAID5 Rotating parity 0 with data restart "raid5_rs" - RAID5 Rotating parity N with data restart "raid6" - Same as "raid6_zr" "raid6_zr" - RAID6 Rotating parity 0 with data restart "raid6_nr" - RAID6 Rotating parity N with data restart "raid6_nc" - RAID6 Rotating parity N with data continuation The exception to 'no shorthand options' will be where the RAID implementations can displace traditional tagets. This is the case with 'mirror' and 'raid1'. In this case, "mirror_segtype_default" - found under the "global" section in lvm.conf - can be set to "mirror" or "raid1". The segment type inferred when the '-m' option is used will be taken from this setting. The default segment types can be overridden on the command line by using the '--type' argument. Line 02: Region size is relevant for all RAID types. It defines the granularity for which the bitmap will track the active areas of disk. The default is currently 4MiB. I see no reason to change this unless it is a problem for MD performance. MD does impose a restriction of 2^21 regions for a given device, however. This means two things: 1) we should never need a metadata area larger than 8kiB+sizeof(superblock)+bitmap_offset (IOW, pretty small) and 2) the region size will have to be upwardly revised if the device is larger than 8TiB (assuming defaults). Line 03/04: The '-m/--mirrors' option is only relevant to RAID1 and will be used just like it is today for DM mirroring. For all other RAID types, -i/--stripes and -I/--stripesize are relevant. The former will specify the number of data devices that will be used for striping. For example, if the user specifies '--type raid0 -i 3', then 3 devices are needed. If the user specifies '--type raid6 -i 3', then 5 devices are needed. The -I/--stripesize may be confusing to MD users, as they use the term "chunksize". I think they will adapt without issue and I don't wish to create a conflict with the term "chunksize" that we use for snapshots. Line 05/06/07: I'm still not clear on how to specify these options. Some are easier than others. '--writemostly' is particularly hard because it involves specifying which devices shall be 'write-mostly' and thus, also have 'max-write-behind' applied to them. It has been suggested that a '--readmostly'/'--readfavored' or similar option could be introduced as a way to specify a primary disk vs. specifying all the non-primary disks via '--writemostly'. I like this idea, but haven't come up with a good name yet. Thus, these will remain unimplemented until future specification. Line 09/10/11: These are familiar. Further creation related ideas: Today, you can specify '--type mirror' without an '-m/--mirrors' argument necessary. The number of devices defaults to two (and the log defaults to 'disk'). A similar thing should happen with the RAID types. All of them should default to having two data devices unless otherwise specified. This would mean a total number of 2 devices for RAID 0/1, 3 devices for RAID 4/5, and 4 devices for RAID 6/10. ***************** CONVERTING A RAID DEVICE ****************** 01: lvconvert [--type ] \ 02: [-R/--regionsize ] \ 03: [-i/--stripes <#>] [-I,--stripesize ] \ 04: [-m/--mirrors <#>] \ 05: [--merge] 06: [--splitmirrors <#> [--trackchanges]] \ 07: [--replace ] \ 08: [--[min|max]recoveryrate ] \ 09: [--stripecache ] \ 10: [--writemostly ] \ 11: [--maxwritebehind ] \ 12: vg/lv 13: [devices] lvconvert should work exactly as it does now when dealing with mirrors - even if(when) we switch to MD RAID1. Of course, there are no plans to allow the presense of the metadata area to be configurable (e.g. --corelog). It will be simple enough to detect if the LV being up/down-converted is new or old-style mirroring. If we choose to use MD RAID0 as well, it will be possible to change the number of stripes and the stripesize. It is therefore conceivable to see something like, 'lvconvert -i +1 vg/lv'. Line 01: It is possible to change the RAID type of an LV - even if that LV is already a RAID device of a different type. For example, you could change from RAID4 to RAID5 or RAID5 to RAID6. Line 02/03/04: These are familiar options - all of which would now be available as options for change. (However, it'd be nice if we didn't have regionsize in there. It's simple on the kernel side, but is just an extra - often unecessary - parameter to many functions in the LVM codebase.) Line 05: This option is used to merge an LV back into a RAID1 array - provided it was split for temporary read-only use by '--splitmirrors 1 --trackchanges'. Line 06: The '--splitmirrors <#>' argument should be familiar from the "mirror" segment type. It allows RAID1 images to be split from the array to form a new LV. Either the original LV or the split LV - or both - could become a linear LV as a result. If the '--trackchanges' argument is specified in addition to '--splitmirrors', an LV will be split from the array. It will be read-only. This operation does not change the original array - except that it uses an empty slot to hold the position of the split LV which it expects to return in the future (see the '--merge' argument). It tracks any changes that occur to the array while the slot is kept in reserve. If the LV is merged back into the array, only the changes are resync'ed to the returning image. Repeating the 'lvconvert' operation without the '--trackchanges' option will complete the split of the LV permanently. Line 07: This option allows the user to specify a sub_lv (e.g. a mirror image) or a particular device for replacement. The device (or all the devices in the sub_lv) will be removed and replaced with different devices from the VG. Line 08/09/10/11: It should be possible to alter these parameters of a RAID device. As with lvcreate, however, I'm not entirely certain how to best define some of these. We don't need all the capabilities at once though, so it isn't a pressing issue. Line 12: The LV to operate on. Line 13: Devices that are to be used to satisfy the conversion request. If the operation removes devices or splits a mirror, then the devices specified form the list of candidates for removal. If the operation adds or replaces devices, then the devices specified form the list of candidates for allocation. ############################################### # Chapter 2: LVM RAID internal representation # ############################################### The internal representation is somewhat like mirroring, but with alterations for the different metadata components. LVM mirroring has a single log LV, but RAID will have one for each data device. Because of this, I've added a new 'areas' list to the 'struct lv_segment' - 'meta_areas'. There is exactly a one-to-one relationship between 'areas' and 'meta_areas'. The 'areas' array still holds the data sub-lv's (similar to mirroring), while the 'meta_areas' array holds the metadata sub-lv's (akin to the mirroring log device). The sub_lvs will be named '%s_rimage_%d' instead of '%s_mimage_%d' as it is for mirroring, and '%s_rmeta_%d' instead of '%s_mlog'. Thus, you can imagine an LV named 'foo' with the following layout: foo [foo's lv_segment] | |-> foo_rimage_0 (areas[0]) | [foo_rimage_0's lv_segment] |-> foo_rimage_1 (areas[1]) | [foo_rimage_1's lv_segment] | |-> foo_rmeta_0 (meta_areas[0]) | [foo_rmeta_0's lv_segment] |-> foo_rmeta_1 (meta_areas[1]) | [foo_rmeta_1's lv_segment] LVM Meta-data format ==================== The RAID format will need to be able to store parameters that are unique to RAID and unique to specific RAID sub-devices. It will be modeled after that of mirroring. Here is an example of the mirroring layout: lv { id = "agL1vP-1B8Z-5vnB-41cS-lhBJ-Gcvz-dh3L3H" status = ["READ", "WRITE", "VISIBLE"] flags = [] segment_count = 1 segment1 { start_extent = 0 extent_count = 125 # 500 Megabytes type = "mirror" mirror_count = 2 mirror_log = "lv_mlog" region_size = 1024 mirrors = [ "lv_mimage_0", 0, "lv_mimage_1", 0 ] } } The real trick is dealing with the metadata devices. Mirroring has an entry, 'mirror_log', in the top-level segment. This won't work for RAID because there is a one-to-one mapping between the data devices and the metadata devices. The mirror devices are layed-out in sub-device/le pairs. The 'le' parameter is redundant since it will always be zero. So for RAID, I have simple put the metadata and data devices in pairs without the 'le' parameter. RAID metadata: lv { id = "EnpqAM-5PEg-i9wB-5amn-P116-1T8k-nS3GfD" status = ["READ", "WRITE", "VISIBLE"] flags = [] segment_count = 1 segment1 { start_extent = 0 extent_count = 125 # 500 Megabytes type = "raid1" device_count = 2 region_size = 1024 raids = [ "lv_rmeta_0", "lv_rimage_0", "lv_rmeta_1", "lv_rimage_1", ] } } The metadata also must be capable of representing the various tunables. We already have a good example for one from mirroring, region_size. 'max_write_behind', 'stripe_cache', and '[min|max]_recovery_rate' could also be handled in this way. However, 'write_mostly' cannot be handled in this way, because it is a characteristic associated with the sub_lvs, not the array as a whole. In these cases, the status field of the sub-lv's themselves will hold these flags - the meaning being only useful in the larger context. ############################################## # Chapter 3: LVM RAID implementation details # ############################################## New Segment Type(s) =================== I've created a new file 'lib/raid/raid.c' that will handle the various different RAID types. While there will be a unique segment type for each RAID variant, they will all share a common backend - segtype_handler functions and segtype->flags = SEG_RAID. I'm also adding a new field to 'struct segment_type', parity_devs. For every segment_type except RAID4/5/6, this will be 0. This field facilitates in allocation and size calculations. For example, the lvcreate for RAID5 would look something like: ~> lvcreate --type raid5 -L 30G -i 3 -n my_raid5 my_vg or ~> lvcreate --type raid5 -n my_raid5 my_vg /dev/sd[bcdef]1 In the former case, the stripe count (3) and device size are computed, and then 'segtype->parity_devs' extra devices are allocated of the same size. In the latter case, the number of PVs is determined and 'segtype->parity_devs' is subtracted off to determine the number of stripes. This should also work in the case of RAID10 and doing things in this manor should not affect the way size is calculated via the area_multiple. Allocation ========== When a RAID device is created, metadata LVs must be created along with the data LVs that will ultimately compose the top-level RAID array. For the foreseeable future, the metadata LVs must reside on the same device as (or at least one of the devices that compose) the data LV. We use this property to simplify the allocation process. Rather than allocating for the data LVs and then asking for a small chunk of space on the same device (or the other way around), we simply ask for the aggregate size of the data LV plus the metadata LV. Once we have the space allocated, we divide it between the metadata and data LVs. This also greatly simplifies the process of finding parallel space for all the data LVs that will compose the RAID array. When a RAID device is resized, we will not need to take the metadata LV into account, because it will already be present. Apart from the metadata areas, the other unique characteristic of RAID devices is the parity device count. The number of parity devices does nothing to the calculation of size-per-device. The 'area_multiple' means nothing here. The parity devices will simply be the same size as all the other devices and will also require a metadata LV (i.e. it is treated no differently than the other devices). Therefore, to allocate space for RAID devices, we need to know two things: 1) how many parity devices are required and 2) does an allocated area need to be split out for the metadata LVs after finding the space to fill the request. We simply add these two fields to the 'alloc_handle' data structure as, 'parity_count' and 'alloc_and_split_meta'. These two fields get set in '_alloc_init'. The 'segtype->parity_devs' holds the number of parity drives and can be directly copied to 'ah->parity_count' and 'alloc_and_split_meta' is set when a RAID segtype is detected and 'metadata_area_count' has been specified. With these two variables set, we can calculate how many allocated areas we need. Also, in the routines that find the actual space, they stop not when they have found ah->area_count but when they have found (ah->area_count + ah->parity_count). Conversion ========== RAID -> RAID, adding images --------------------------- When adding images to a RAID array, metadata and data components must be added as a pair. It is best to perform as many operations as possible before writing new LVM metadata. This allows us to error-out without having to unwind any changes. It also makes things easier if the machine should crash during a conversion operation. Thus, the actions performed when adding a new image are: 1) Allocate the required number of metadata/data pairs using the method describe above in 'Allocation' (i.e. find the metadata/data space as one unit and split the space between them after found - this keeps them together on the same device). 2) Form the metadata/data LVs from the allocated space (leave them visible) - setting required RAID_[IMAGE | META] flags as appropriate. 3) Write the LVM metadata 4) Activate and clear the metadata LVs. The clearing of the metadata requires the LVM metadata be written (step 3) and is a requirement before adding the new metadata LVs to the array. If the metadata is not cleared, it carry residual superblock state from a previous array the device may have been part of. 5) Deactivate new sub-LVs and set them "hidden". 6) expand the 'first_seg(raid_lv)->areas' and '->meta_areas' array for inclusion of the new sub-LVs 7) Add new sub-LVs and update 'first_seg(raid_lv)->area_count' 8) Commit new LVM metadata Failure during any of these steps will not affect the original RAID array. In the worst scenario, the user may have to remove the new sub-LVs that did not yet make it into the array. RAID -> RAID, removing images ----------------------------- To remove images from a RAID, the metadata/data LV pairs must be removed together. This is pretty straight-forward, but one place where RAID really differs from the "mirror" segment type is how the resulting "holes" are filled. When a device is removed from a "mirror" segment type, it is identified, moved to the end of the 'mirrored_seg->areas' array, and then removed. This action causes the other images to shift down and fill the position of the device which was removed. While "raid1" could be handled in this way, the other RAID types could not be - it would corrupt the ordering of the data on the array. Thus, when a device is removed from a RAID array, the corresponding metadata/data sub-LVs are removed from the 'raid_seg->meta_areas' and 'raid_seg->areas' arrays. The slot in these 'lv_segment_area' arrays are set to 'AREA_UNASSIGNED'. RAID is perfectly happy to construct a DM table mapping with '- -' if it comes across area assigned in such a way. The pair of dashes is a valid way to tell the RAID kernel target that the slot should be considered empty. So, we can remove devices from a RAID array without affecting the correct operation of the RAID. (It also becomes easy to replace the empty slots properly if a spare device is available.) In the case of RAID1 device removal, the empty slot can be safely eliminated. This is done by shifting the higher indexed devices down to fill the slot. Even the names of the images will be renamed to properly reflect their index in the array. Unlike the "mirror" segment type, you will never have an image named "*_rimage_1" occupying the index position 0. As with adding images, removing images holds off on commiting LVM metadata until all possible changes have been made. This reduces the likelyhood of bad intermediate stages being left due to a failure of operation or machine crash. RAID1 '--splitmirrors', '--trackchanges', and '--merge' operations ------------------------------------------------------------------ This suite of operations is only available to the "raid1" segment type. Splitting an image from a RAID1 array is almost identical to the removal of an image described above. However, the metadata LV associated with the split image is removed and the data LV is kept and promoted to a top-level device. (i.e. It is made visible and stripped of its RAID_IMAGE status flags.) When the '--trackchanges' option is given along with the '--splitmirrors' argument, the metadata LV is left as part of the original array. The data LV is set as 'VISIBLE' and read-only (~LVM_WRITE). When the array DM table is being created, it notices the read-only, VISIBLE nature of the sub-LV and puts in the '- -' sentinel. Only a single image can be split from the mirror and the name of the sub-LV cannot be changed. Unlike '--splitmirrors' on its own, the '--name' argument must not be specified. Therefore, the name of the newly split LV will remain the same '_rimage_', where 'N' is the index of the slot in the array for which it is associated. When an LV which was split from a RAID1 array with the '--trackchanges' option is merged back into the array, its read/write status is restored and it is set as "hidden" again. Recycling the array (suspend/resume) restores the sub-LV to its position in the array and begins the process of sync'ing the changes that were made since the time it was split from the array. RAID device replacement with '--replace' ---------------------------------------- This option is available to all RAID segment types. The '--replace' option can be used to remove a particular device from a RAID logical volume and replace it with a different one in one action (CLI command). The device device to be removed is specified as the argument to the '--replace' option. This option can be specified more than once in a single command, allowing multiple devices to be replaced at the same time - provided the RAID logical volume has the necessary redundancy to allow the action. The devices to be used as replacements can also be specified in the command; similar to the way allocatable devices are specified during an up-convert. Example> lvconvert --replace /dev/sdd1 --replace /dev/sde1 vg/lv /dev/sd[bc]1 RAID '--repair' --------------- This 'lvconvert' option is available to all RAID segment types and is described under "RAID Fault Handling". RAID Fault Handling =================== RAID is not like traditional LVM mirroring (i.e. the "mirror" segment type). LVM mirroring required failed devices to be removed or the logical volume would simply hang. RAID arrays can keep on running with failed devices. In fact, for RAID types other than RAID1 removing a device would mean substituting an error target or converting to a lower level RAID (e.g. RAID6 -> RAID5, or RAID4/5 to RAID0). Therefore, rather than removing a failed device unconditionally, the user has a couple of options to choose from. The automated response to a device failure is handled according to the user's preference defined in lvm.conf:activation.raid_fault_policy. The options are: # "warn" - Use the system log to warn the user that a device in the RAID # logical volume has failed. It is left to the user to run # 'lvconvert --repair' manually to remove or replace the failed # device. As long as the number of failed devices does not # exceed the redundancy of the logical volume (1 device for # raid4/5, 2 for raid6, etc) the logical volume will remain # usable. # # "remove" - NOT CURRENTLY IMPLEMENTED OR DOCUMENTED IN example.conf.in. # Remove the failed device and reduce the RAID logical volume # accordingly. If a single device dies in a 3-way mirror, # remove it and reduce the mirror to 2-way. If a single device # dies in a RAID 4/5 logical volume, reshape it to a striped # volume, etc - RAID 6 -> RAID 4/5 -> RAID 0. If devices # cannot be removed for lack of redundancy, fail. # THIS OPTION CANNOT YET BE IMPLEMENTED BECAUSE RESHAPE IS NOT # YET SUPPORTED IN linux/drivers/md/dm-raid.c. The superblock # does not yet hold enough information to support reshaping. # # "allocate" - Attempt to use any extra physical volumes in the volume # group as spares and replace faulty devices. If manual intervention is taken, either in response to the automated solution's "warn" mode or simply because dmeventd hadn't run, then the user can call 'lvconvert --repair vg/lv' and follow the prompts. They will be prompted whether or not to replace the device and cause a full recovery of the failed device. If replacement is chosen via the manual method or "allocate" is the policy taken by the automated response, then 'lvconvert --replace' is the mechanism used to attempt the replacement of the failed device. 'vgreduce --removemissing' is ineffectual at repairing RAID logical volumes. It will remove the failed device, but the RAID logical volume will simply continue to operate with an sub-LV. The user should clear the failed device with 'lvconvert --repair'. LVM2.2.02.176/doc/tagging.txt0000644000000000000120000001234313176752421014255 0ustar rootwheelTagging aims ============ 1) Ability to attach an unordered list of tags to LVM metadata objects. 2) Ability to add or remove tags easily. 3) Ability to select LVM objects for processing according to presence/absence of specific tags. 4) Ability to control through the config file which VGs/LVs are activated on different machines using names or tags. 5) Ability to overlay settings from different config files e.g. override some settings in a global config file locally. Clarifications ============== 1) Tag character set: A-Za-z0-9_+.- Can't start with hyphen & max length is 128 (NAME_LEN). 2) LVM object types that can be tagged: VG, LV, LV segment PV - tags are stored in VG metadata so disappear when PV becomes orphaned Snapshots can't be tagged, but their origin may be. 3) A tag can be used in place of any command line LVM object reference that accepts (a) a list of objects; or (b) a single object as long as the tag expands to a single object. This is not supported everywhere yet. Duplicate arguments in a list after argument expansion may get removed retaining the first copy of each argument. 4) Wherever there may be ambiguity of argument type, a tag must be prefixed by '@'; elsewhere an '@' prefix is optional. 5) LVM1 objects cannot be tagged, as the disk format doesn't support it. 6) Tags can be added or removed with --addtag or --deltag. Config file Extensions ====================== To define host tags in config file: tags { # Set a tag with the hostname hosttags = 1 tag1 { } tag2 { # If no exact match, tag is not set. host_list = [ "hostname", "dbase" ] } } Activation config file example ============================== activation { volume_list = [ "vg1/lvol0", "@database" ] } Matches against vgname, vgname/lvname or @tag set in *metadata*. @* matches exactly against *any* tag set on the host. The VG or LV only gets activated if a metadata tag matches. The default if there is no match is not to activate. If volume_list is not present and any tags are defined on the host then it only activates if a host tag matches a metadata tag. If volume_list is not present and no tags are defined on the host then it does activate. Multiple config files ===================== (a) lvm.conf (b) lvm_.conf At startup, load lvm.conf. Process tag settings. If any host tags were defined, load lvm_tag.conf for each tag, if present. When searching for a specific config file entry, search order is (b) then (a), stopping at the first match. Within (b) use reverse order tags got set, so file for last tag set is searched first. New tags set in (b) *do* trigger additional config file loads. Usage Examples ============== 1) Simple activation control via metadata with static config files lvm.conf: (Identical on every machine - global settings) tags { hosttags = 1 } From any machine in the cluster, add db1 to the list of machines that activate vg1/lvol2: lvchange --addtag @db1 vg1/lvol2 (followed by lvchange -ay to actually activate it) 2) Multiple hosts. Activate vg1 only on the database hosts, db1 and db2. Activate vg2 only on the fileserver host fs1. Activate nothing initially on the fileserver backup host fsb1, but be prepared for it to take over from fs1. Option (i) - centralised admin, static configuration replicated between hosts # Add @database tag to vg1's metadata vgchange --addtag @database vg1 # Add @fileserver tag to vg2's metadata vgchange --addtag @fileserver vg2 lvm.conf: (Identical on every machine) tags { database { host_list = [ "db1", "db2" ] } fileserver { host_list = [ "fs1" ] } fileserverbackup { host_list = [ "fsb1" ] } } activation { # Only activate if host has a tag that matches a metadata tag volume_list = [ "@*" ] } In the event of the fileserver host going down, vg2 can be brought up on fsb1 by running *on any node* 'vgchange --addtag @fileserverbackup vg2' followed by 'vgchange -ay vg2' Option (ii) - localised admin & configuation (i.e. each host holds *locally* which classes of volumes to activate) # Add @database tag to vg1's metadata vgchange --addtag @database vg1 # Add @fileserver tag to vg2's metadata vgchange --addtag @fileserver vg2 lvm.conf: (Identical on every machine - global settings) tags { hosttags = 1 } lvm_db1.conf: (only needs to be on db1 - could be symlink to lvm_db.conf) activation { volume_list = [ "@database" ] } lvm_db2.conf: (only needs to be on db2 - could be symlink to lvm_db.conf) activation { volume_list = [ "@database" ] } lvm_fs1.conf: (only needs to be on fs1 - could be symlink to lvm_fs.conf) activation { volume_list = [ "@fileserver" ] } If fileserver goes down, to bring a spare machine fsb1 in as fileserver, create lvm_fsb1.conf on fsb1 (or symlink to lvm_fs.conf): activation { volume_list = [ "@fileserver" ] } and run 'vgchange -ay vg2' or 'vgchange -ay @fileserver' LVM2.2.02.176/doc/lvm_fault_handling.txt0000644000000000000120000002607213176752421016476 0ustar rootwheelLVM device fault handling ========================= Introduction ------------ This document is to serve as the definitive source for information regarding the policies and procedures surrounding device failures in LVM. It codifies LVM's responses to device failures as well as the responsibilities of administrators. Device failures can be permanent or transient. A permanent failure is one where a device becomes inaccessible and will never be revived. A transient failure is a failure that can be recovered from (e.g. a power failure, intermittent network outage, block relocation, etc). The policies for handling both types of failures is described herein. Users need to be aware that there are two implementations of RAID1 in LVM. The first is defined by the "mirror" segment type. The second is defined by the "raid1" segment type. The characteristics of each of these are defined in lvm.conf under 'mirror_segtype_default' - the configuration setting used to identify the default RAID1 implementation used for LVM operations. Available Operations During a Device Failure -------------------------------------------- When there is a device failure, LVM behaves somewhat differently because only a subset of the available devices will be found for the particular volume group. The number of operations available to the administrator is diminished. It is not possible to create new logical volumes while PVs cannot be accessed, for example. Operations that create, convert, or resize logical volumes are disallowed, such as: - lvcreate - lvresize - lvreduce - lvextend - lvconvert (unless '--repair' is used) Operations that activate, deactivate, remove, report, or repair logical volumes are allowed, such as: - lvremove - vgremove (will remove all LVs, but not the VG until consistent) - pvs - vgs - lvs - lvchange -a [yn] - vgchange -a [yn] Operations specific to the handling of failed devices are allowed and are as follows: - 'vgreduce --removemissing ': This action is designed to remove the reference of a failed device from the LVM metadata stored on the remaining devices. If there are (portions of) logical volumes on the failed devices, the ability of the operation to proceed will depend on the type of logical volumes found. If an image (i.e leg or side) of a mirror is located on the device, that image/leg of the mirror is eliminated along with the failed device. The result of such a mirror reduction could be a no-longer-redundant linear device. If a linear, stripe, or snapshot device is located on the failed device the command will not proceed without a '--force' option. The result of using the '--force' option is the entire removal and complete loss of the non-redundant logical volume. If an image or metadata area of a RAID logical volume is on the failed device, the sub-LV affected is replace with an error target device - appearing as in 'lvs' output. RAID logical volumes cannot be completely repaired by vgreduce - 'lvconvert --repair' (listed below) must be used. Once this operation is complete on volume groups not containing RAID logical volumes, the volume group will again have a complete and consistent view of the devices it contains. Thus, all operations will be permitted - including creation, conversion, and resizing operations. It is currently the preferred method to call 'lvconvert --repair' on the individual logical volumes to repair them followed by 'vgreduce --removemissing' to extract the physical volume's representation in the volume group. - 'lvconvert --repair ': This action is designed specifically to operate on individual logical volumes. If, for example, a failed device happened to contain the images of four distinct mirrors, it would be necessary to run 'lvconvert --repair' on each of them. The ultimate result is to leave the faulty device in the volume group, but have no logical volumes referencing it. (This allows for 'vgreduce --removemissing' to removed the physical volumes cleanly.) In addition to removing mirror or RAID images that reside on failed devices, 'lvconvert --repair' can also replace the failed device if there are spare devices available in the volume group. The user is prompted whether to simply remove the failed portions of the mirror or to also allocate a replacement, if run from the command-line. Optionally, the '--use-policies' flag can be specified which will cause the operation not to prompt the user, but instead respect the policies outlined in the LVM configuration file - usually, /etc/lvm/lvm.conf. Once this operation is complete, the logical volumes will be consistent. However, the volume group will still be inconsistent - due to the refernced-but-missing device/PV - and operations will still be restricted to the aformentioned actions until either the device is restored or 'vgreduce --removemissing' is run. Device Revival (transient failures): ------------------------------------ During a device failure, the above section describes what limitations a user can expect. However, if the device returns after a period of time, what to expect will depend on what has happened during the time period when the device was failed. If no automated actions (described below) or user actions were necessary or performed, then no change in operations or logical volume layout will occur. However, if an automated action or one of the aforementioned repair commands was manually run, the returning device will be perceived as having stale LVM metadata. In this case, the user can expect to see a warning concerning inconsistent metadata. The metadata on the returning device will be automatically replaced with the latest copy of the LVM metadata - restoring consistency. Note, while most LVM commands will automatically update the metadata on a restored devices, the following possible exceptions exist: - pvs (when it does not read/update VG metadata) Automated Target Response to Failures: -------------------------------------- The only LVM target types (i.e. "personalities") that have an automated response to failures are the mirror and RAID logical volumes. The other target types (linear, stripe, snapshot, etc) will simply propagate the failure. [A snapshot becomes invalid if its underlying device fails, but the origin will remain valid - presuming the origin device has not failed.] Starting with the "mirror" segment type, there are three types of errors that a mirror can suffer - read, write, and resynchronization errors. Each is described in depth below. Mirror read failures: If a mirror is 'in-sync' (i.e. all images have been initialized and are identical), a read failure will only produce a warning. Data is simply pulled from one of the other images and the fault is recorded. Sometimes - like in the case of bad block relocation - read errors can be recovered from by the storage hardware. Therefore, it is up to the user to decide whether to reconfigure the mirror and remove the device that caused the error. Managing the composition of a mirror is done with 'lvconvert' and removing a device from a volume group can be done with 'vgreduce'. If a mirror is not 'in-sync', a read failure will produce an I/O error. This error will propagate all the way up to the applications above the logical volume (e.g. the file system). No automatic intervention will take place in this case either. It is up to the user to decide what can be done/salvaged in this senario. If the user is confident that the images of the mirror are the same (or they are willing to simply attempt to retreive whatever data they can), 'lvconvert' can be used to eliminate the failed image and proceed. Mirror resynchronization errors: A resynchronization error is one that occurs when trying to initialize all mirror images to be the same. It can happen due to a failure to read the primary image (the image considered to have the 'good' data), or due to a failure to write the secondary images. This type of failure only produces a warning, and it is up to the user to take action in this case. If the error is transient, the user can simply reactivate the mirrored logical volume to make another attempt at resynchronization. If attempts to finish resynchronization fail, 'lvconvert' can be used to remove the faulty device from the mirror. TODO... Some sort of response to this type of error could be automated. Since this document is the definitive source for how to handle device failures, the process should be defined here. If the process is defined but not implemented, it should be noted as such. One idea might be to make a single attempt to suspend/resume the mirror in an attempt to redo the sync operation that failed. On the other hand, if there is a permanent failure, it may simply be best to wait for the user or the automated response that is sure to follow from a write failure. ...TODO Mirror write failures: When a write error occurs on a mirror constituent device, an attempt to handle the failure is automatically made. This is done by calling 'lvconvert --repair --use-policies'. The policies implied by this command are set in the LVM configuration file. They are: - mirror_log_fault_policy: This defines what action should be taken if the device containing the log fails. The available options are "remove" and "allocate". Either of these options will cause the faulty log device to be removed from the mirror. The "allocate" policy will attempt the further action of trying to replace the failed disk log by using space that might be available in the volume group. If the allocation fails (or the "remove" policy is specified), the mirror log will be maintained in memory. Should the machine be rebooted or the logical volume deactivated, a complete resynchronization of the mirror will be necessary upon the follow activation - such is the nature of a mirror with a 'core' log. The default policy for handling log failures is "allocate". The service disruption incurred by replacing the failed log is negligible, while the benefits of having persistent log is pronounced. - mirror_image_fault_policy: This defines what action should be taken if a device containing an image fails. Again, the available options are "remove" and "allocate". Both of these options will cause the faulty image device to be removed - adjusting the logical volume accordingly. For example, if one image of a 2-way mirror fails, the mirror will be converted to a linear device. If one image of a 3-way mirror fails, the mirror will be converted to a 2-way mirror. The "allocate" policy takes the further action of trying to replace the failed image using space that is available in the volume group. Replacing a failed mirror image will incure the cost of resynchronizing - degrading the performance of the mirror. The default policy for handling an image failure is "remove". This allows the mirror to still function, but gives the administrator the choice of when to incure the extra performance costs of replacing the failed image. RAID logical volume device failures are handled differently from the "mirror" segment type. Discussion of this can be found in lvm2-raid.txt. LVM2.2.02.176/doc/lvmetad_design.txt0000644000000000000120000002435013176752421015623 0ustar rootwheelThe design of LVMetaD ===================== Invocation and setup -------------------- The daemon should be started automatically by the first LVM command issued on the system, when needed. The usage of the daemon should be configurable in lvm.conf, probably with its own section. Say lvmetad { enabled = 1 # default autostart = 1 # default socket = "/path/to/socket" # defaults to /var/run/lvmetad or such } Library integration ------------------- When a command needs to access metadata, it currently needs to perform a scan of the physical devices available in the system. This is a possibly quite expensive operation, especially if many devices are attached to the system. In most cases, LVM needs a complete image of the system's PVs to operate correctly, so all devices need to be read, to at least determine presence (and content) of a PV label. Additional IO is done to obtain or write metadata areas, but this is only marginally related and addressed by Dave's metadata-balancing work. In the existing scanning code, a cache layer exists, under lib/cache/lvmcache.[hc]. This layer is keeping a textual copy of the metadata for a given volume group, in a format_text form, as a character string. We can plug the lvmetad interface at this level: in lvmcache_get_vg, which is responsible for looking up metadata in a local cache, we can, if the metadata is not available in the local cache, query lvmetad. Under normal circumstances, when a VG is not cached yet, this operation fails and prompts the caller to perform a scan. Under the lvmetad enabled scenario, this would never happen and the fall-through would only be activated when lvmetad is disabled, which would lead to local cache being populated as usual through a locally executed scan. Therefore, existing stand-alone (i.e. no lvmetad) functionality of the tools would be not compromised by adding lvmetad. With lvmetad enabled, however, significant portions of the code would be short-circuited. Scanning -------- Initially (at least), the lvmetad will be not allowed to read disks: it will rely on an external program to provide the metadata. In the ideal case, this will be triggered by udev. The role of lvmetad is then to collect and maintain an accurate (up to the data it has received) image of the VGs available in the system. I imagine we could extend the pvscan command (or add a new one, say lvmetad_client, if pvscan is found to be inappropriate): $ pvscan --cache /dev/foo $ pvscan --cache --remove /dev/foo These commands would simply read the label and the MDA (if applicable) from the given PV and feed that data to the running lvmetad, using lvmetad_{add,remove}_pv (see lvmetad_client.h). We however need to ensure a couple of things here: 1) only LVM commands ever touch PV labels and VG metadata 2) when a device is added or removed, udev fires a rule to notify lvmetad While the latter is straightforward, there are issues with the first. We *might* want to invoke the dreaded "watch" udev rule in this case, however it ends up being implemented. Of course, we can also rely on the sysadmin to be reasonable and not write over existing LVM metadata without first telling LVM to let go of the respective device(s). Even if we simply ignore the problem, metadata write should fail in these cases, so the admin should be unable to do substantial damage to the system. If there were active LVs on top of the vanished PV, they are in trouble no matter what happens there. Incremental scan ---------------- There are some new issues arising with the "udev" scan mode. Namely, the devices of a volume group will be appearing one by one. The behaviour in this case will be very similar to the current behaviour when devices are missing: the volume group, until *all* its physical volumes have been discovered and announced by udev, will be in a state with some of its devices flagged as MISSING_PV. This means that the volume group will be, for most purposes, read-only until it is complete and LVs residing on yet-unknown PVs won't activate without --partial. Under usual circumstances, this is not a problem and the current code for dealing with MISSING_PVs should be adequate. However, the code for reading volume groups from disks will need to be adapted, since it currently does not work incrementally. Such support will need to track metadata-less PVs that have been encountered so far and to provide a way to update an existing volume group. When the first PV with metadata of a given VG is encountered, the VG is created in lvmetad (probably in the form of "struct volume_group") and it is assigned any previously cached metadata-less PVs it is referencing. Any PVs that were not yet encountered will be marked as MISSING_PV in the "struct volume_group". Upon scanning a new PV, if it belongs to any already-known volume group, this PV is checked for consistency with the already cached metadata (in a case of mismatch, the VG needs to be recovered or declared conflicted), and is subsequently unmarked MISSING_PV. Care need be taken not to unmark MISSING_PV on PVs that have this flag in their persistent metadata, though. The most problematic aspect of the whole design may be orphan PVs. At any given point, a metadata-less PV may appear orphaned, if a PV of its VG with metadata has not been scanned yet. Eventually, we will have to decide that this PV is really an orphan and enable its usage for creating or extending VGs. In practice, the decision might be governed by a timeout or assumed immediately -- the former case is a little safer, the latter is probably more transparent. I am not very keen on using timeouts and we can probably assume that the admin won't blindly try to re-use devices in a way that would trip up LVM in this respect. I would be in favour of just assuming that metadata-less VGs with no known referencing VGs are orphans -- after all, this is the same approach as we use today. The metadata balancing support may stress this a bit more than the usual contemporary setups do, though. Automatic activation -------------------- It may also be prudent to provide a command that will block until a volume group is complete, so that scripts can reliably activate/mount LVs and such. Of course, some PVs may never appear, so a timeout is necessary. Again, this is something not handled by current tools, but may become more important in future. It probably does not need to be implemented right away though. The other aspect of the progressive VG assembly is automatic activation. The currently only problem with that is that we would like to avoid having activation code in lvmetad, so we would prefer to fire up an event of some sort and let someone else handle the activation and whatnot. Cluster support --------------- When working in a cluster, clvmd integration will be necessary: clvmd will need to instruct lvmetad to re-read metadata as appropriate due to writes on remote hosts. Overall, this is not hard, but the devil is in the details. I would possibly disable lvmetad for clustered volume groups in the first phase and only proceed when the local mode is robust and well tested. With lvmlockd, lvmetad state is kept up to date by flagging either an individual VG as "invalid", or the global state as "invalid". When either the VG or the global state are read, this invalid flag is returned along with the data. The client command can check for this invalid state and decide to read the information from disk rather than use the stale cached data. After the latest data is read from disk, the command may choose to send it to lvmetad to update the cache. lvmlockd uses version numbers embedded in its VG and global locks to detect when cached data becomes invalid, and it then tells lvmetad to set the related invalid flag. dct, 2015-06-23 Protocol & co. -------------- I expect a simple text-based protocol executed on top of an Unix Domain Socket to be the communication interface for lvmetad. Ideally, the requests and replies will be well-formed "config file" style strings, so we can re-use existing parsing infrastructure. Since we already have two daemons, I would probably look into factoring some common code for daemon-y things, like sockets, communication (including thread management) and maybe logging and re-using it in all the daemons (clvmd, dmeventd and lvmetad). This shared infrastructure should live under daemons/common, and the existing daemons shall be gradually migrated to the shared code. Future extensions ----------------- The above should basically cover the use of lvmetad as a cache-only daemon. Writes could still be executed locally, and the new metadata version can be provided to lvmetad through the socket the usual way. This is fairly natural and in my opinion reasonable. The lvmetad acts like a cache that will hold metadata, no more no less. Above this, there is a couple of things that could be worked on later, when the above basic design is finished and implemented. _Metadata writing_: We may want to support writing new metadata through lvmetad. This may or may not be a better design, but the write itself should be more or less orthogonal to the rest of the story outlined above. _Locking_: Other than directing metadata writes through lvmetad, one could conceivably also track VG/LV locking through the same. _Clustering_: A deeper integration of lvmetad with clvmd might be possible and maybe desirable. Since clvmd communicates over the network with other clvmd instances, this could be extended to metadata exchange between lvmetad's, further cutting down scanning costs. This would combine well with the write-through-lvmetad approach. Testing ------- Since (at least bare-bones) lvmetad has no disk interaction and is fed metadata externally, it should be very amenable to automated testing. We need to provide a client that can feed arbitrary, synthetic metadata to the daemon and request the data back, providing reasonable (nearly unit-level) testing infrastructure. Battle plan & code layout ========================= - config_tree from lib/config needs to move to libdm/ - daemon/common *client* code can go to libdm/ as well (say libdm/libdm-daemon.{h,c} or such) - daemon/common *server* code stays, is built in daemon/ toplevel as a static library, say libdaemon-common.a - daemon/lvmetad *client* code goes to lib/lvmetad - daemon/lvmetad *server* code stays (links in daemon/libdaemon_common.a) LVM2.2.02.176/doc/udev_assembly.txt0000644000000000000120000000773613176752421015511 0ustar rootwheelAutomatic device assembly by udev ================================= We want to asynchronously assemble and activate devices as their components become available. Eventually, the complete storage stack should be covered, including: multipath, cryptsetup, LVM, mdadm. Each of these can be addressed more or less separately. The general plan of action is to simply provide udev rules for each of the device "type": for MD component devices, PVs, LUKS/crypto volumes and for multipathed SCSI devices. There's no compelling reason to have a daemon do these things: all systems that actually need to assemble multiple devices into a single entity already either support incremental assembly or will do so shortly. Whenever in this document we talk about udev rules, these may include helper programs that implement a multi-step process. In many cases, it can be expected that the functionality can be implemented in couple lines of shell (or couple hundred of C). Multipath --------- For multipath, we will need to rely on SCSI IDs for now, until we have a better scheme of things, since multipath devices can't be identified until the second path appears, and unfortunately we need to decide whether a device is multipath when the *first* path appears. Anyway, the multipath folks need to sort this out, but it shouldn't bee too hard. Just bring up multipathing on anything that appears and is set up for multipathing. LVM --- For LVM, the crucial piece of the puzzle is lvmetad, which allows us to build up VGs from PVs as they appear, and at the same time collect information on what is already available. A command, pvscan --cache is expected to be used to implement udev rules. It is relatively easy to make this command print out a list of VGs (and possibly LVs) that have been made available by adding any particular device to the set of visible devices. In othe words, udev says "hey, /dev/sdb just appeared", calls pvscan --cache, which talks to lvmetad, which says "cool, that makes vg0 complete". Pvscan takes this info and prints it out, and the udev rule can then somehow decide whether anything needs to be done about this "vg0". Presumably a table of devices that need to be activated automatically is made available somewhere in /etc (probably just a simple list of volume groups or logical volumes, given by name or UUID, globbing possible). The udev rule can then consult this file. Cryptsetup ---------- This may be the trickiest of the lot: the obvious hurdle here is that crypto volumes need to somehow obtain a key (passphrase, physical token or such), meaning there is interactivity involved. On the upside, dm-crypt is a 1:1 system: one encrypted device results in one decrypted device, so no assembly or notification needs to be done. While interactivity is a challenge, there are at least partial solutions around. (TODO: Milan should probably elaborate here.) (For LUKS devices, these can probably be detected automatically. I suppose that non-LUKS devices can be looked up in crypttab by the rule, to decide what is the appropriate action to take.) MD -- Fortunately, MD (namely mdadm) already comes with a mechanism for incremental assembly (mdadm -I or such). We can assume that this fits with the rest of stack nicely. Filesystem &c. discovery ======================== Considering other requirements that exist for storage systems (namely large-scale storage deployments), it is absolutely not feasible to have the system hunt automatically for filesystems based on their UUIDs. In a number of cases, this could mean activating tens of thousands of volumes. On small systems, asking for all volumes to be brought up automatically is probably the best route anyway, and once all storage devices are activated, scanning for filesystems is no different from today. In effect, no action is required on this count: only filesystems that are available on already active devices can be mounted by their UUID. Activating volumes by naming a filesystem UUID is useless, since to read the UUID the volume needs to be active first. LVM2.2.02.176/doc/example_cmdlib.c0000644000000000000120000000206313176752421015203 0ustar rootwheel/* * Copyright (C) 2004 Red Hat, Inc. All rights reserved. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License v.2. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "lvm2cmd.h" #include /* All output gets passed to this function line-by-line */ void test_log_fn(int level, const char *file, int line, int dm_errno, const char *format) { /* Extract and process output here rather than printing it */ if (level != 4) return; printf("%s\n", format); return; } int main(int argc, char **argv) { void *handle; int r; lvm2_log_fn(test_log_fn); handle = lvm2_init(); lvm2_log_level(handle, 1); r = lvm2_run(handle, "vgs --noheadings vg1"); /* More commands here */ lvm2_exit(handle); return r; } LVM2.2.02.176/doc/testing.txt0000644000000000000120000000226413176752421014313 0ustar rootwheelHere's how I test new LVM2 builds without interfering with the stable LVM2 that is running the LV's on my development box. 1) Create a set of loopback devices. 2) Create a new directory to contain the LVM2 configuration files for this setup. (I use /etc/lvm_loops) 3) Write a suitable lvm.conf file, this goes in the directory you just created. eg, my /etc/lvm_loops/lvm.conf looks like: log { file="/tmp/lvm2_loop.log" level=9 verbose=0 overwrite=1 } devices { scan = "/dev" filter = ["a/loop/", "r/.*/"] } The important thing to note is the devices section which makes sure that only the loopback devices are considered for LVM2 operations. 4) When you want to use this test setup just set the environment variable LVM_SYSTEM_DIR to point to your config directory (/etc/lvm_loops in my case). 5) It's a good idea to do a vgscan to initialise the filters: export LVM_SYSTEM_DIR=/etc/lvm_loops ./lvm vgscan where ./lvm is the new build of LVM2 that I'm trying out. 7) Test away. Make sure that you are explicit about which lvm executable you want to execute (eg, ./lvm if you are in LVM2/tools). LVM2.2.02.176/doc/lvmpolld_overview.txt0000644000000000000120000000633513176752421016420 0ustar rootwheelLVM poll daemon overview ======================== (last updated: 2015-05-09) LVM poll daemon (lvmpolld) is the alternative for lvm2 classical polling mechanisms. The motivation behind new lvmpolld was to create persistent system service that would be more durable and transparent. It's suited particularly for any systemd enabled distribution. Before lvmpolld any background polling process originating in a lvm2 command initiated inside cgroup of a systemd service could get killed if the main process (service) exited in such cgroup. That could lead to premature termination of such lvm2 polling process. Also without lvmpolld there were no means to detect a particular polling process suited for monitoring of specific operation is already in-progress and therefore it's not desirable to start next one with exactly same task. lvmpolld is able to detect such duplicate requests and not spawn such redundant process. lvmpolld is primarily targeted for systems with systemd as init process. For systems without systemd there's no need to install lvmpolld because there is no issue with observation described in second paragraph. You can still benefit from avoiding duplicate polling process being spawned, but without systemd lvmpolld can't easily be run on-demand (activated by a socket maintained by systemd). lvmpolld implement shutdown on idle and can shutdown automatically when idle for requested time. 60 second is recommended default here. This behaviour can be turned off if found useless. Data structures --------------- a) Logical Volume (struct lvmpolld_lv) Each operation is identified by LV. Internal identifier within lvmpolld is full LV uuid (vg_uuid+lv_uuid) prefixed with LVM_SYSTEM_DIR if set by client. such full identifier may look like: "/etc/lvm/lvm.confWFd2dU67S8Av29IcJCnYzqQirdfElnxzhCdzEh7EJrfCn9R1TIQjIj58weUZDre4" or without LVM_SYSTEM_DIR being set explicitly: "WFd2dU67S8Av29IcJCnYzqQirdfElnxzhCdzEh7EJrfCn9R1TIQjIj58weUZDre4" LV carries various metadata about polling operation. The most significant are: VG name LV name polling interval (usually --interval passed to lvm2 command or default from lvm2 configuration) operation type (one of: pvmove, convert, merge, thin_merge) LVM_SYSTEM_DIR (if set, this is also passed among environment variables of lvpoll command spawned by lvmpolld) b) LV stores (struct lvmpolld_store) lvmpolld uses two stores for Logical volumes (struct lvmpolld_lv). One store for polling operations in-progress. These operations are as of now: PV move, mirror up-conversion, classical snapshot merge, thin snapshot merge. The second store is suited only for pvmove --abort operations in-progress. Both stores are independent and identical LVs (pvmove /dev/sda3 and pvmove --abort /dev/sda3) can be run concurently from lvmpolld point of view (on lvm2 side the consistency is guaranteed by lvm2 locking mechanism). Locking order ------------- There are two types of locks in lvmpolld. Each store has own store lock and each LV has own lv lock. Locking order is: 1) store lock 2) LV lock Each LV has to be inside a store. When daemon requires to take both locks it has to take a store lock first and LV lock has to be taken afterwards (after the appropriate store lock where the LV is being stored :)) LVM2.2.02.176/acinclude.m40000644000000000000120000001703313176752421013521 0ustar rootwheeldnl AC_GCC_VERSION dnl check for compiler version dnl sets COMPILER_VERSION and GCC_VERSION AC_DEFUN([AC_CC_VERSION], [ AC_MSG_CHECKING([C compiler version]) COMPILER_VERSION=`$CC -v 2>&1 | grep version` case "$COMPILER_VERSION" in *gcc*) dnl Ok, how to turn $3 into the real $3 GCC_VERSION=`echo $COMPILER_VERSION | \ sed -e 's/[[^ ]]*\ [[^ ]]*\ \([[^ ]]*\)\ .*/\1/'` ;; *) GCC_VERSION=unknown ;; esac AC_MSG_RESULT($GCC_VERSION) ]) dnl AC_TRY_CCFLAG([CCFLAG], [VAR], [ACTION-IF-WORKS], [ACTION-IF-FAILS]) dnl check if $CC supports a given flag AC_DEFUN([AC_TRY_CCFLAG], [ AC_REQUIRE([AC_PROG_CC]) ac_save_CFLAGS=$CFLAGS CFLAGS=$1 AC_CACHE_CHECK([whether $CC accepts $1 flag], [ac_cv_flag_$2], [AC_COMPILE_IFELSE([AC_LANG_PROGRAM()], [AS_VAR_SET([ac_cv_flag_$2], [yes])], [AS_VAR_SET([ac_cv_flag_$2], [no])])]) CFLAGS=$ac_save_CFLAGS $2=AS_VAR_GET([ac_cv_flag_$2]) if test "$2" = yes; then ifelse([$3], [], [:], [$3]) else ifelse([$4], [], [:], [$4]) fi ]) dnl AC_IF_YES([TEST-FOR-YES], [ACTION-IF-TRUE], [ACTION-IF-FALSE]) dnl AS_IF() abstraction, checks shell variable for 'yes' AC_DEFUN([AC_IF_YES], [AS_IF([test $$1 = yes], [$2], [$3])]) dnl AC_TRY_LDFLAGS([LDFLAGS], [VAR], [ACTION-IF-WORKS], [ACTION-IF-FAILS]) dnl check if $CC supports given ld flags AC_DEFUN([AC_TRY_LDFLAGS], [ AC_REQUIRE([AC_PROG_CC]) ac_save_LDFLAGS=$LDFLAGS LDFLAGS=$1 AC_CACHE_CHECK([whether $CC accepts $1 ld flags], [ac_cv_flag_$2], [AC_LINK_IFELSE([AC_LANG_PROGRAM()], [AS_VAR_SET([ac_cv_flag_$2], [yes])], [AS_VAR_SET([ac_cv_flag_$2], [no])])]) LDFLAGS=$ac_save_LDFLAGS $2=AS_VAR_GET([ac_cv_flag_$2]) if test "$2" = yes; then ifelse([$3], [], [:], [$3]) else ifelse([$4], [], [:], [$4]) fi ]) # =========================================================================== # http://www.gnu.org/software/autoconf-archive/ax_gcc_builtin.html # =========================================================================== # # SYNOPSIS # # AX_GCC_BUILTIN(BUILTIN) # # DESCRIPTION # # This macro checks if the compiler supports one of GCC's built-in # functions; many other compilers also provide those same built-ins. # # The BUILTIN parameter is the name of the built-in function. # # If BUILTIN is supported define HAVE_. Keep in mind that since # builtins usually start with two underscores they will be copied over # into the HAVE_ definition (e.g. HAVE___BUILTIN_EXPECT for # __builtin_expect()). # # The macro caches its result in the ax_cv_have_ variable (e.g. # ax_cv_have___builtin_expect). # # The macro currently supports the following built-in functions: # # __builtin_assume_aligned # __builtin_bswap16 # __builtin_bswap32 # __builtin_bswap64 # __builtin_choose_expr # __builtin___clear_cache # __builtin_clrsb # __builtin_clrsbl # __builtin_clrsbll # __builtin_clz # __builtin_clzl # __builtin_clzll # __builtin_complex # __builtin_constant_p # __builtin_ctz # __builtin_ctzl # __builtin_ctzll # __builtin_expect # __builtin_ffs # __builtin_ffsl # __builtin_ffsll # __builtin_fpclassify # __builtin_huge_val # __builtin_huge_valf # __builtin_huge_vall # __builtin_inf # __builtin_infd128 # __builtin_infd32 # __builtin_infd64 # __builtin_inff # __builtin_infl # __builtin_isinf_sign # __builtin_nan # __builtin_nand128 # __builtin_nand32 # __builtin_nand64 # __builtin_nanf # __builtin_nanl # __builtin_nans # __builtin_nansf # __builtin_nansl # __builtin_object_size # __builtin_parity # __builtin_parityl # __builtin_parityll # __builtin_popcount # __builtin_popcountl # __builtin_popcountll # __builtin_powi # __builtin_powif # __builtin_powil # __builtin_prefetch # __builtin_trap # __builtin_types_compatible_p # __builtin_unreachable # # Unsuppored built-ins will be tested with an empty parameter set and the # result of the check might be wrong or meaningless so use with care. # # LICENSE # # Copyright (c) 2013 Gabriele Svelto # # Copying and distribution of this file, with or without modification, are # permitted in any medium without royalty provided the copyright notice # and this notice are preserved. This file is offered as-is, without any # warranty. #serial 3 AC_DEFUN([AX_GCC_BUILTIN], [ AS_VAR_PUSHDEF([ac_var], [ax_cv_have_$1]) AC_CACHE_CHECK([for $1], [ac_var], [ AC_LINK_IFELSE([AC_LANG_PROGRAM([], [ m4_case([$1], [__builtin_assume_aligned], [$1("", 0)], [__builtin_bswap16], [$1(0)], [__builtin_bswap32], [$1(0)], [__builtin_bswap64], [$1(0)], [__builtin_choose_expr], [$1(0, 0, 0)], [__builtin___clear_cache], [$1("", "")], [__builtin_clrsb], [$1(0)], [__builtin_clrsbl], [$1(0)], [__builtin_clrsbll], [$1(0)], [__builtin_clz], [$1(0)], [__builtin_clzl], [$1(0)], [__builtin_clzll], [$1(0)], [__builtin_complex], [$1(0.0, 0.0)], [__builtin_constant_p], [$1(0)], [__builtin_ctz], [$1(0)], [__builtin_ctzl], [$1(0)], [__builtin_ctzll], [$1(0)], [__builtin_expect], [$1(0, 0)], [__builtin_ffs], [$1(0)], [__builtin_ffsl], [$1(0)], [__builtin_ffsll], [$1(0)], [__builtin_fpclassify], [$1(0, 1, 2, 3, 4, 0.0)], [__builtin_huge_val], [$1()], [__builtin_huge_valf], [$1()], [__builtin_huge_vall], [$1()], [__builtin_inf], [$1()], [__builtin_infd128], [$1()], [__builtin_infd32], [$1()], [__builtin_infd64], [$1()], [__builtin_inff], [$1()], [__builtin_infl], [$1()], [__builtin_isinf_sign], [$1(0.0)], [__builtin_nan], [$1("")], [__builtin_nand128], [$1("")], [__builtin_nand32], [$1("")], [__builtin_nand64], [$1("")], [__builtin_nanf], [$1("")], [__builtin_nanl], [$1("")], [__builtin_nans], [$1("")], [__builtin_nansf], [$1("")], [__builtin_nansl], [$1("")], [__builtin_object_size], [$1("", 0)], [__builtin_parity], [$1(0)], [__builtin_parityl], [$1(0)], [__builtin_parityll], [$1(0)], [__builtin_popcount], [$1(0)], [__builtin_popcountl], [$1(0)], [__builtin_popcountll], [$1(0)], [__builtin_powi], [$1(0, 0)], [__builtin_powif], [$1(0, 0)], [__builtin_powil], [$1(0, 0)], [__builtin_prefetch], [$1("")], [__builtin_trap], [$1()], [__builtin_types_compatible_p], [$1(int, int)], [__builtin_unreachable], [$1()], [m4_warn([syntax], [Unsupported built-in $1, the test may fail]) $1()] ) ])], [AS_VAR_SET([ac_var], [yes])], [AS_VAR_SET([ac_var], [no])]) ]) AS_IF([test yes = AS_VAR_GET([ac_var])], [AC_DEFINE_UNQUOTED(AS_TR_CPP(HAVE_$1), 1, [Define to 1 if the system has the `$1' built-in function])], []) AS_VAR_POPDEF([ac_var]) ]) LVM2.2.02.176/make.tmpl.in0000644000000000000120000004024613176752421013552 0ustar rootwheel# @configure_input@ # # Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved. # Copyright (C) 2004-2014 Red Hat, Inc. All rights reserved. # # This file is part of LVM2. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA SHELL = @SHELL@ @SET_MAKE@ # Allow environment to override any built-in default value for CC. # If there is a built-in default, CC is NOT set to @CC@ here. CC ?= @CC@ # If $(CC) holds the usual built-in default value of 'cc' then replace it with # the configured value. # (To avoid this and force the use of 'cc' from the environment, supply its # full path.) ifeq ($(CC), cc) CC = @CC@ endif RANLIB = @RANLIB@ INSTALL = @INSTALL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ LCOV = @LCOV@ GENHTML = @GENHTML@ LN_S = @LN_S@ SED = @SED@ CFLOW_CMD = @CFLOW_CMD@ AWK = @AWK@ CHMOD = @CHMOD@ EGREP = @EGREP@ GREP = @GREP@ SORT = @SORT@ WC = @WC@ AR = @AR@ RM = rm -f PYTHON2 = @PYTHON2@ PYTHON3 = @PYTHON3@ PYCOMPILE = $(top_srcdir)/autoconf/py-compile LIBS = @LIBS@ # Extra libraries always linked with static binaries STATIC_LIBS = $(SELINUX_LIBS) $(UDEV_LIBS) $(BLKID_LIBS) DEFS += @DEFS@ # FIXME set this only where it's needed, not globally? CFLAGS ?= @COPTIMISE_FLAG@ @CFLAGS@ LDFLAGS ?= @LDFLAGS@ CLDFLAGS += @CLDFLAGS@ ELDFLAGS += @ELDFLAGS@ LDDEPS += @LDDEPS@ LIB_SUFFIX = @LIB_SUFFIX@ LVMINTERNAL_LIBS = -llvm-internal $(DMEVENT_LIBS) $(DAEMON_LIBS) $(SYSTEMD_LIBS) $(UDEV_LIBS) $(DL_LIBS) $(BLKID_LIBS) DL_LIBS = @DL_LIBS@ RT_LIBS = @RT_LIBS@ M_LIBS = @M_LIBS@ PTHREAD_LIBS = @PTHREAD_LIBS@ READLINE_LIBS = @READLINE_LIBS@ SELINUX_LIBS = @SELINUX_LIBS@ UDEV_CFLAGS = @UDEV_CFLAGS@ UDEV_LIBS = @UDEV_LIBS@ BLKID_CFLAGS = @BLKID_CFLAGS@ BLKID_LIBS = @BLKID_LIBS@ SYSTEMD_LIBS = @SYSTEMD_LIBS@ VALGRIND_CFLAGS = @VALGRIND_CFLAGS@ TESTING = @TESTING@ # Setup directory variables prefix = @prefix@ exec_prefix = @exec_prefix@ udev_prefix = @udev_prefix@ sysconfdir = @sysconfdir@ rootdir = $(DESTDIR)/ bindir = $(DESTDIR)@bindir@ confdir = $(DESTDIR)@CONFDIR@/lvm profiledir = $(confdir)/@DEFAULT_PROFILE_SUBDIR@ includedir = $(DESTDIR)@includedir@ libdir = $(DESTDIR)@libdir@ libexecdir = $(DESTDIR)@libexecdir@ usrlibdir = $(DESTDIR)@usrlibdir@ sbindir = $(DESTDIR)@sbindir@ usrsbindir = $(DESTDIR)@usrsbindir@ datarootdir = @datarootdir@ datadir = $(DESTDIR)@datadir@ infodir = $(DESTDIR)@infodir@ mandir = $(DESTDIR)@mandir@ localedir = $(DESTDIR)@localedir@ staticdir = $(DESTDIR)@STATICDIR@ udevdir = $(DESTDIR)@udevdir@ pkgconfigdir = $(usrlibdir)/pkgconfig initdir = $(DESTDIR)$(sysconfdir)/rc.d/init.d dbusconfdir = $(DESTDIR)$(sysconfdir)/dbus-1/system.d dbusservicedir = $(datadir)/dbus-1/system-services systemd_unit_dir = $(DESTDIR)@systemdsystemunitdir@ systemd_generator_dir = $(DESTDIR)$(SYSTEMD_GENERATOR_DIR) systemd_dir = $(DESTDIR)@systemdutildir@ tmpfiles_dir = $(DESTDIR)@tmpfilesdir@ ocf_scriptdir = $(DESTDIR)@OCFDIR@ pythonprefix = $(DESTDIR)$(prefix) # N.B. No $(DESTDIR) prefix here. python2dir = @PYTHON2DIR@ python3dir = @PYTHON3DIR@ USRLIB_RELPATH = $(shell echo $(abspath $(usrlibdir) $(libdir)) | \ $(AWK) -f $(top_srcdir)/scripts/relpath.awk) SYSTEMD_GENERATOR_DIR = @systemdutildir@/system-generators DEFAULT_SYS_DIR = @DEFAULT_SYS_DIR@ DEFAULT_ARCHIVE_DIR = $(DEFAULT_SYS_DIR)/@DEFAULT_ARCHIVE_SUBDIR@ DEFAULT_BACKUP_DIR = $(DEFAULT_SYS_DIR)/@DEFAULT_BACKUP_SUBDIR@ DEFAULT_CACHE_DIR = $(DEFAULT_SYS_DIR)/@DEFAULT_CACHE_SUBDIR@ DEFAULT_PROFILE_DIR = $(DEFAULT_SYS_DIR)/@DEFAULT_PROFILE_SUBDIR@ DEFAULT_LOCK_DIR = @DEFAULT_LOCK_DIR@ DEFAULT_RUN_DIR = @DEFAULT_RUN_DIR@ DEFAULT_PID_DIR = @DEFAULT_PID_DIR@ DEFAULT_MANGLING = @MANGLING@ # Setup vpath search paths for some suffixes vpath %.c $(srcdir) vpath %.cpp $(srcdir) vpath %.in $(srcdir) vpath %.po $(srcdir) vpath %.exported_symbols $(srcdir) interface = @interface@ interfacebuilddir = $(top_builddir)/libdm/$(interface) rpmbuilddir = $(abs_top_builddir)/build # The number of jobs to run, if blank, defaults to the make standard ifndef MAKEFLAGS MAKEFLAGS = @JOBS@ endif # Handle installation of files ifeq ("@WRITE_INSTALL@", "yes") # leaving defaults M_INSTALL_SCRIPT = M_INSTALL_DATA = -m 644 else M_INSTALL_PROGRAM = -m 555 M_INSTALL_DATA = -m 444 endif INSTALL_PROGRAM = $(INSTALL) $(M_INSTALL_PROGRAM) $(STRIP) INSTALL_DATA = $(INSTALL) -p $(M_INSTALL_DATA) INSTALL_WDATA = $(INSTALL) -p -m 644 INSTALL_DIR = $(INSTALL) -m 755 -d INSTALL_ROOT_DIR = $(INSTALL) -m 700 -d INSTALL_ROOT_DATA = $(INSTALL) -m 600 INSTALL_SCRIPT = $(INSTALL) -p $(M_INSTALL_PROGRAM) .SUFFIXES: .SUFFIXES: .c .cpp .d .o .so .a .po .pot .mo .dylib ifeq ("$(notdir $(CC))", "gcc") WFLAGS +=\ -Wall\ -Wcast-align\ -Wfloat-equal\ -Wformat-security\ -Winline\ -Wmissing-format-attribute\ -Wmissing-include-dirs\ -Wmissing-noreturn\ -Wpointer-arith\ -Wredundant-decls\ -Wshadow\ -Wundef\ -Wwrite-strings WCFLAGS +=\ -Wmissing-declarations\ -Wmissing-prototypes\ -Wnested-externs\ -Wold-style-definition\ -Wstrict-prototypes\ -Wuninitialized ifeq ("@HAVE_WJUMP@", "yes") WCFLAGS += -Wjump-misses-init endif ifeq ("@HAVE_WCLOBBERED@", "yes") WFLAGS +=\ -Wclobbered\ -Wempty-body\ -Wignored-qualifiers\ -Wlogical-op\ -Wtype-limits WCFLAGS +=\ -Wmissing-parameter-type\ -Wold-style-declaration\ -Woverride-init endif ifeq ("@HAVE_WSYNCNAND@", "yes") WFLAGS += -Wsync-nand endif endif ifneq ("@STATIC_LINK@", "yes") ifeq ("@HAVE_PIE@", "yes") ifeq ("@HAVE_FULL_RELRO@", "yes") EXTRA_EXEC_CFLAGS += -fPIE EXTRA_EXEC_LDFLAGS += -Wl,-z,relro,-z,now -pie -fPIE CLDFLAGS += -Wl,-z,relro endif endif endif #WFLAGS += -W -Wno-sign-compare -Wno-unused-parameter -Wno-missing-field-initializers #WFLAGS += -Wsign-compare -Wunused-parameter -Wmissing-field-initializers #WFLAGS += -Wconversion -Wbad-function-cast -Wcast-qual -Waggregate-return -Wpacked #WFLAGS += -pedantic -std=gnu99 #DEFS += -DDEBUG_CRC32 # # Avoid recursive extension of CFLAGS # by checking whether CFLAGS already has fPIC string # ifeq (,$(findstring fPIC,$(CFLAGS))) CFLAGS += -fPIC ifeq ("@DEBUG@", "yes") ifeq (,$(findstring -g,$(CFLAGS))) CFLAGS += -g endif CFLAGS += -fno-omit-frame-pointer DEFS += -DDEBUG # memory debugging is not thread-safe yet ifneq ("@BUILD_DMEVENTD@", "yes") ifneq ("@BUILD_DMFILEMAPD@", "yes") ifneq ("@BUILD_LVMLOCKD@", "yes") ifneq ("@BUILD_LVMPOLLD@", "yes") ifneq ("@BUILD_LVMETAD@", "yes") ifeq ("@CLVMD@", "none") DEFS += -DDEBUG_MEM endif endif endif endif endif endif endif # end of fPIC protection endif LDFLAGS += -L$(top_builddir)/libdm -L$(top_builddir)/lib CLDFLAGS += -L$(top_builddir)/libdm -L$(top_builddir)/lib DAEMON_LIBS = -ldaemonclient LDFLAGS += -L$(top_builddir)/libdaemon/client CLDFLAGS += -L$(top_builddir)/libdaemon/client ifeq ("@BUILD_DMEVENTD@", "yes") DMEVENT_LIBS = -ldevmapper-event LDFLAGS += -L$(top_builddir)/daemons/dmeventd CLDFLAGS += -L$(top_builddir)/daemons/dmeventd endif # Combination of DEBUG_POOL and DEBUG_ENFORCE_POOL_LOCKING is not suppored. #DEFS += -DDEBUG_POOL # Default pool locking is using the crc checksum. With mprotect memory # enforcing compilation faulty memory write could be easily found. #DEFS += -DDEBUG_ENFORCE_POOL_LOCKING #DEFS += -DBOUNDS_CHECK # LVM is not supposed to use mmap while devices are suspended. # This code causes a core dump if gets called. #DEFS += -DDEBUG_MEMLOCK #CFLAGS += -pg #LDFLAGS += -pg STRIP= #STRIP = -s LVM_VERSION := $(shell cat $(top_srcdir)/VERSION) LIB_VERSION_LVM := $(shell $(AWK) -F '.' '{printf "%s.%s",$$1,$$2}' $(top_srcdir)/VERSION) LIB_VERSION_DM := $(shell $(AWK) -F '.' '{printf "%s.%s",$$1,$$2}' $(top_srcdir)/VERSION_DM) LIB_VERSION_APP := $(shell $(AWK) -F '[(). ]' '{printf "%s.%s",$$1,$$4}' $(top_srcdir)/VERSION) INCLUDES += -I$(srcdir) -I$(top_builddir)/include INC_LNS = $(top_builddir)/include/.symlinks_created DEPS = $(top_builddir)/make.tmpl $(top_srcdir)/VERSION \ $(top_builddir)/Makefile $(INC_LNS) OBJECTS = $(SOURCES:%.c=%.o) $(CXXSOURCES:%.cpp=%.o) POTFILES = $(SOURCES:%.c=%.pot) .PHONY: all pofile distclean clean cleandir cflow device-mapper .PHONY: install install_cluster install_device-mapper install_lvm2 .PHONY: install_dbus_service .PHONY: install_lib_shared install_dm_plugin install_lvm2_plugin .PHONY: install_ocf install_systemd_generators install_all_man all_man man help .PHONY: python_bindings install_python_bindings .PHONY: $(SUBDIRS) $(SUBDIRS.install) $(SUBDIRS.clean) $(SUBDIRS.distclean) .PHONY: $(SUBDIRS.pofile) $(SUBDIRS.install_cluster) $(SUBDIRS.cflow) .PHONY: $(SUBDIRS.device-mapper) $(SUBDIRS.install-device-mapper) .PHONY: $(SUBDIRS.generate) generate SUBDIRS.device-mapper := $(SUBDIRS:=.device-mapper) SUBDIRS.install := $(SUBDIRS:=.install) SUBDIRS.install_cluster := $(SUBDIRS:=.install_cluster) SUBDIRS.install_device-mapper := $(SUBDIRS:=.install_device-mapper) SUBDIRS.install_lvm2 := $(SUBDIRS:=.install_lvm2) SUBDIRS.install_ocf := $(SUBDIRS:=.install_ocf) SUBDIRS.pofile := $(SUBDIRS:=.pofile) SUBDIRS.cflow := $(SUBDIRS:=.cflow) SUBDIRS.clean := $(SUBDIRS:=.clean) SUBDIRS.distclean := $(SUBDIRS:=.distclean) TARGETS += $(LIB_SHARED) $(LIB_STATIC) all: $(SUBDIRS) $(TARGETS) install: all $(SUBDIRS.install) install_cluster: all $(SUBDIRS.install_cluster) install_device-mapper: $(SUBDIRS.install_device-mapper) install_lvm2: $(SUBDIRS.install_lvm2) install_ocf: $(SUBDIRS.install_ocf) cflow: $(SUBDIRS.cflow) $(SUBDIRS): $(SUBDIRS.device-mapper) $(MAKE) -C $@ $(SUBDIRS.device-mapper): $(MAKE) -C $(@:.device-mapper=) device-mapper $(SUBDIRS.install): $(SUBDIRS) $(MAKE) -C $(@:.install=) install $(SUBDIRS.install_cluster): $(SUBDIRS) $(MAKE) -C $(@:.install_cluster=) install_cluster $(SUBDIRS.install_device-mapper): device-mapper $(MAKE) -C $(@:.install_device-mapper=) install_device-mapper $(SUBDIRS.install_lvm2): $(SUBDIRS) $(MAKE) -C $(@:.install_lvm2=) install_lvm2 $(SUBDIRS.install_ocf): $(MAKE) -C $(@:.install_ocf=) install_ocf $(SUBDIRS.clean): -$(MAKE) -C $(@:.clean=) clean $(SUBDIRS.distclean): -$(MAKE) -C $(@:.distclean=) distclean $(SUBDIRS.cflow): $(MAKE) -C $(@:.cflow=) cflow ifeq ("@INTL@", "yes") pofile: $(SUBDIRS.pofile) $(POTFILES) $(SUBDIRS.pofile): $(MAKE) -C $(@:.pofile=) pofile endif $(SUBDIRS.generate): $(MAKE) -C $(@:.generate=) generate ifneq ("$(CFLOW_LIST_TARGET)", "") CLEAN_CFLOW += $(CFLOW_LIST_TARGET) $(CFLOW_LIST_TARGET): $(CFLOW_LIST) echo "CFLOW_SOURCES += $(addprefix \ \$$(top_srcdir)$(subst $(top_srcdir),,$(srcdir))/, $(CFLOW_LIST))" > $@ cflow: $(CFLOW_LIST_TARGET) endif ifneq ("$(CFLOW_TARGET)", "") CLEAN_CFLOW += \ $(CFLOW_TARGET).cflow \ $(CFLOW_TARGET).xref \ $(CFLOW_TARGET).tree \ $(CFLOW_TARGET).rtree \ $(CFLOW_TARGET).rxref ifneq ("$(CFLOW_CMD)", "") CFLOW_FLAGS +=\ --cpp="$(CC) -E" \ --symbol _ISbit:wrapper \ --symbol __attribute__:wrapper \ --symbol __const__:wrapper \ --symbol __const:type \ --symbol __restrict:type \ --symbol __extension__:wrapper \ --symbol __nonnull:wrapper \ --symbol __nothrow__:wrapper \ --symbol __pure__:wrapper \ --symbol __REDIRECT:wrapper \ --symbol __REDIRECT_NTH:wrapper \ --symbol __wur:wrapper \ -I$(top_srcdir)/libdm \ -I$(top_srcdir)/libdm/ioctl \ -I$(top_srcdir)/daemons/dmeventd/plugins/lvm2/ \ $(INCLUDES) $(DEFS) $(CFLOW_TARGET).cflow: $(CFLOW_SOURCES) $(CFLOW_CMD) -o$@ $(CFLOW_FLAGS) $(CFLOW_SOURCES) $(CFLOW_TARGET).rxref: $(CFLOW_SOURCES) $(CFLOW_CMD) -o$@ $(CFLOW_FLAGS) -r --omit-arguments $(CFLOW_SOURCES) $(CFLOW_TARGET).tree: $(CFLOW_SOURCES) $(CFLOW_CMD) -o$@ $(CFLOW_FLAGS) --omit-arguments -T -b $(CFLOW_SOURCES) $(CFLOW_TARGET).xref: $(CFLOW_SOURCES) $(CFLOW_CMD) -o$@ $(CFLOW_FLAGS) --omit-arguments -x $(CFLOW_SOURCES) #$(CFLOW_TARGET).rtree: $(CFLOW_SOURCES) # $(CFLOW_CMD) -o$@ $(CFLOW_FLAGS) -r --omit-arguments -T -b $(CFLOW_SOURCES) cflow: $(CFLOW_TARGET).cflow $(CFLOW_TARGET).tree $(CFLOW_TARGET).rxref $(CFLOW_TARGET).xref #$(CFLOW_TARGET).rtree endif endif .LIBPATTERNS = lib%.so lib%.a %.o: %.c $(CC) -c $(INCLUDES) $(VALGRIND_CFLAGS) $(PROGS_CFLAGS) $(DEFS) $(DEFS_$@) $(WFLAGS) $(WCFLAGS) $(CFLAGS) $(CFLAGS_$@) $< -o $@ %.o: %.cpp $(CXX) -c $(INCLUDES) $(VALGRIND_CFLAGS) $(DEFS) $(DEFS_$@) $(WFLAGS) $(CXXFLAGS) $(CXXFLAGS_$@) $< -o $@ %.pot: %.c Makefile $(CC) -E $(INCLUDES) $(VALGRIND_CFLAGS) $(PROGS_CFLAGS) -include $(top_builddir)/include/pogen.h $(DEFS) $(WFLAGS) $(CFLAGS) $< >$@ %.so: %.o $(CC) -c $(CFLAGS) $(CLDFLAGS) $< $(LIBS) -o $@ ifneq (,$(LIB_SHARED)) TARGETS += $(LIB_SHARED).$(LIB_VERSION) $(LIB_SHARED).$(LIB_VERSION): $(OBJECTS) $(LDDEPS) ifeq ("@LIB_SUFFIX@","so") $(CC) -shared -Wl,-soname,$(notdir $@) \ $(CFLAGS) $(CLDFLAGS) $(OBJECTS) $(LIBS) -o $@ endif ifeq ("@LIB_SUFFIX@","dylib") $(CC) -dynamiclib -dylib_current_version,$(LIB_VERSION) \ $(CFLAGS) $(CLDFLAGS) $(OBJECTS) $(LIBS) -o $@ endif $(LIB_SHARED): $(LIB_SHARED).$(LIB_VERSION) $(LN_S) -f $(> $@; \ [ -s $@ ] || $(RM) $@ %.mo: %.po $(MSGFMT) -o $@ $< CLEAN_TARGETS += \ $(SOURCES:%.c=%.d) $(SOURCES:%.c=%.gcno) $(SOURCES:%.c=%.gcda) \ $(SOURCES2:%.c=%.o) $(SOURCES2:%.c=%.d) $(SOURCES2:%.c=%.gcno) $(SOURCES2:%.c=%.gcda) \ $(POTFILES) $(CLEAN_CFLOW) cleandir: ifneq (,$(firstword $(CLEAN_DIRS))) $(RM) -r $(CLEAN_DIRS) endif $(RM) $(OBJECTS) $(TARGETS) $(CLEAN_TARGETS) core clean: $(SUBDIRS.clean) cleandir distclean: cleandir $(SUBDIRS.distclean) ifneq (,$(firstword $(DISTCLEAN_DIRS))) $(RM) -r $(DISTCLEAN_DIRS) endif $(RM) $(DISTCLEAN_TARGETS) Makefile .exported_symbols_generated: $(EXPORTED_HEADER) .exported_symbols $(DEPS) set -e; \ ( cat $(srcdir)/.exported_symbols; \ if test -n "$(EXPORTED_HEADER)"; then \ $(CC) -E -P $(INCLUDES) $(DEFS) $(EXPORTED_HEADER) | \ $(SED) -ne "/^typedef|}/!s/.*[ *]\($(EXPORTED_FN_PREFIX)_[a-z0-9_]*\)(.*/\1/p"; \ fi \ ) > $@ EXPORTED_UC := $(shell echo $(EXPORTED_FN_PREFIX) | tr '[a-z]' '[A-Z]') EXPORTED_SYMBOLS := $(wildcard $(srcdir)/.exported_symbols.Base $(srcdir)/.exported_symbols.$(EXPORTED_UC)_[0-9_]*[0-9]) .export.sym: .exported_symbols_generated $(EXPORTED_SYMBOLS) ifeq (,$(firstword $(EXPORTED_SYMBOLS))) set -e; (echo "Base {"; echo " global:";\ $(SED) "s/^/ /;s/$$/;/" $<;\ echo "};";\ echo "Local {"; echo " local:"; echo " *;"; echo "};";\ ) > $@ else set -e;\ R=$$($(SORT) $^ | uniq -u);\ test -z "$$R" || { echo "Mismatch between symbols in shared library and lists in .exported_symbols.* files: $$R"; false; } ;\ ( for i in $$(echo $(EXPORTED_SYMBOLS) | tr ' ' '\n' | $(SORT) -rnt_ -k5 ); do\ echo "$${i##*.} {"; echo " global:";\ $(SED) "s/^/ /;s/$$/;/" $$i;\ echo "};";\ done;\ echo "Local {"; echo " local:"; echo " *;"; echo "};";\ ) > $@ endif ifeq ("@USE_TRACKING@","yes") ifeq (,$(findstring $(MAKECMDGOALS),cscope.out cflow clean distclean lcov \ help check check_local check_cluster check_lvmetad check_lvmpolld)) ifdef SOURCES -include $(SOURCES:.c=.d) $(CXXSOURCES:.cpp=.d) endif ifdef SOURCES2 -include $(SOURCES2:.c=.d) endif endif endif LVM2.2.02.176/COPYING0000644000000000000120000004311313176752421012361 0ustar rootwheel GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License. LVM2.2.02.176/python/0000755000000000000120000000000013176752421012645 5ustar rootwheelLVM2.2.02.176/python/Makefile.in0000644000000000000120000000246613176752421014722 0ustar rootwheel# # Copyright (C) 2011-2016 Red Hat, Inc. # # This file is part of LVM2. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU Lesser General Public License v.2.1. # # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA srcdir = @srcdir@ top_srcdir = @top_srcdir@ top_builddir = @top_builddir@ TARGETS = .liblvm_built include $(top_builddir)/make.tmpl .liblvm_built: liblvm_python.c ifeq ("@PYTHON2_BINDINGS@", "yes") $(PYTHON2) setup.py build endif ifeq ("@PYTHON3_BINDINGS@", "yes") $(PYTHON3) setup.py build endif touch $@ liblvm_python.c: $(LN_S) $(srcdir)/liblvm.c $@ install_python_bindings: $(TARGETS) ifeq ("@PYTHON2_BINDINGS@", "yes") $(PYTHON2) setup.py install --skip-build --prefix $(pythonprefix) endif ifeq ("@PYTHON3_BINDINGS@", "yes") $(PYTHON3) setup.py install --skip-build --prefix $(pythonprefix) endif install_lvm2: install_python_bindings install: install_lvm2 .PHONY: install_python_bindings .INTERMEDIATE: liblvm_python.c clean: $(RM) -r build distclean: clean CLEAN_TARGETS += liblvm_python.c DISTCLEAN_TARGETS += setup.py LVM2.2.02.176/python/example.py0000644000000000000120000000637413176752421014664 0ustar rootwheel# # Copyright (C) 2012 Red Hat, Inc. All rights reserved. # # This file is part of LVM2. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 2.1 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see . # #----------------------------- # Python example code: #----------------------------- import lvm # Note: This example will create a logical unit, tag it and # delete it, don't run this on production box! #Dump information about PV def print_pv(pv): print('PV name: ', pv.getName(), ' ID: ', pv.getUuid(), 'Size: ', pv.getSize()) #Dump some information about a specific volume group def print_vg(vg_name): #Open read only vg = lvm.vgOpen(vg_name, 'r') print('Volume group:', vg_name, 'Size: ', vg.getSize()) #Retrieve a list of Physical volumes for this volume group pv_list = vg.listPVs() #Print out the physical volumes for p in pv_list: print_pv(p) #Get a list of logical volumes in this volume group lv_list = vg.listLVs() if len(lv_list): for l in lv_list: print('LV name: ', l.getName(), ' ID: ', l.getUuid()) else: print('No logical volumes present!') vg.close() #Returns the name of a vg with space available def find_vg_with_free_space(): free_space = 0 rc = None vg_names = lvm.listVgNames() for v in vg_names: vg = lvm.vgOpen(v, 'r') c_free = vg.getFreeSize() if c_free > free_space: free_space = c_free rc = v vg.close() return rc #Walk through the volume groups and fine one with space in which we can #create a new logical volume def create_delete_logical_volume(): vg_name = find_vg_with_free_space() print('Using volume group ', vg_name, ' for example') if vg_name: vg = lvm.vgOpen(vg_name, 'w') lv = vg.createLvLinear('python_lvm_ok_to_delete', vg.getFreeSize()) if lv: print('New lv, id= ', lv.getUuid()) #Create a tag lv.addTag('Demo_tag') #Get the tags tags = lv.getTags() for t in tags: #Remove tag lv.removeTag(t) lv.deactivate() #Try to rename lv.rename("python_lvm_renamed") print('LV name= ', lv.getName()) lv.remove() vg.close() else: print('No free space available to create demo lv!') if __name__ == '__main__': #What version print('lvm version=', lvm.getVersion()) #Get a list of volume group names vg_names = lvm.listVgNames() #For each volume group display some information about each of them for vg_i in vg_names: print_vg(vg_i) #Demo creating a logical volume create_delete_logical_volume() LVM2.2.02.176/python/setup.py.in0000644000000000000120000000245313176752421014770 0ustar rootwheel# # Copyright (C) 2012 Red Hat, Inc. All rights reserved. # # This file is part of LVM2. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 2.1 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see . from distutils.core import setup, Extension liblvm = Extension('lvm', sources = ['liblvm_python.c'], libraries= ['lvm2app'], library_dirs= ['@top_builddir@/liblvm'], include_dirs= ['@top_builddir@/include']) setup (name='lvm', version=@LVM_VERSION@, description='Python bindings for liblvm2', license="LGPLv2+", maintainer='LVM2 maintainers', maintainer_email='linux-lvm@redhat.com', url='http://sourceware.org/lvm2/', ext_modules=[liblvm], ) LVM2.2.02.176/python/liblvm.c0000644000000000000120000013700013176752421014277 0ustar rootwheel/* * Liblvm -- Python interface to LVM2 API. * * Copyright (C) 2010, 2013 Red Hat, Inc. All rights reserved. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 2.1 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * You should have received a copy of the GNU Lesser General Public License * along with this program. If not, see . * * Authors: Lars Sjostrom (lars sjostrom redhat com) * Andy Grover (agrover redhat com) * Tony Asleson (tasleson redhat com) */ #include #include "lvm2app.h" #include "defaults.h" #if PY_MAJOR_VERSION >= 3 #define IS_PY3K #define PYINTTYPE_CHECK PyLong_Check #define PYINTTYPE_ASLONG PyLong_AsLong #define PYINTTYPE_FROMLONG PyLong_FromLong #define PYSTRYPE_CHECK PyUnicode_Check #define PYSTRTYPE_ASSTRING PyUnicode_AsUTF8 #define PYSTRTYPE_FROMSTRING PyUnicode_FromString #else #define PYINTTYPE_CHECK PyInt_Check #define PYINTTYPE_ASLONG PyInt_AsLong #define PYINTTYPE_FROMLONG PyInt_FromLong #define PYSTRYPE_CHECK PyString_Check #define PYSTRTYPE_ASSTRING PyString_AsString #define PYSTRTYPE_FROMSTRING PyString_FromString #endif static lvm_t _libh; typedef struct { PyObject_HEAD vg_t vg; /* vg handle */ lvm_t libh_copy; } vgobject; typedef struct { PyObject_HEAD struct dm_list *pvslist; lvm_t libh_copy; } pvslistobject; typedef struct { PyObject_HEAD lv_t lv; /* lv handle */ vgobject *parent_vgobj; } lvobject; typedef struct { PyObject_HEAD pv_t pv; /* pv handle */ vgobject *parent_vgobj; pvslistobject *parent_pvslistobj; } pvobject; typedef struct { PyObject_HEAD lvseg_t lv_seg; /* lv segment handle */ lvobject *parent_lvobj; } lvsegobject; typedef struct { PyObject_HEAD pvseg_t pv_seg; /* pv segment handle */ pvobject *parent_pvobj; } pvsegobject; static PyTypeObject _LibLVMvgType; static PyTypeObject _LibLVMlvType; static PyTypeObject _LibLVMpvlistType; static PyTypeObject _LibLVMpvType; static PyTypeObject _LibLVMlvsegType; static PyTypeObject _LibLVMpvsegType; static PyObject *_LibLVMError; #define LVM_VALID(ptr) \ do { \ if (!_libh) { \ _libh = lvm_init(NULL); \ } \ if (ptr && _libh) { \ if (ptr != _libh) { \ PyErr_SetString(PyExc_UnboundLocalError, "LVM handle reference stale"); \ return NULL; \ } \ } else if (!_libh) { \ PyErr_SetString(PyExc_UnboundLocalError, "LVM handle invalid"); \ return NULL; \ } \ } while (0) /** * Ensure that we initialize all the bits to a sane state. */ static pvobject *_create_py_pv(void) { pvobject * pvobj = PyObject_New(pvobject, &_LibLVMpvType); if (pvobj) { pvobj->pv = NULL; pvobj->parent_vgobj = NULL; pvobj->parent_pvslistobj = NULL; } return pvobj; } static vgobject *_create_py_vg(void) { vgobject *vgobj = PyObject_New(vgobject, &_LibLVMvgType); if (vgobj) { vgobj->vg = NULL; vgobj->libh_copy = _libh; } return vgobj; } static pvslistobject *_create_py_pvlist(void) { pvslistobject *pvlistobj = PyObject_New(pvslistobject, &_LibLVMpvlistType); if (pvlistobj) { pvlistobj->pvslist = NULL; pvlistobj->libh_copy = _libh; } return pvlistobj; } static lvobject *_create_py_lv(vgobject *parent, lv_t lv) { lvobject * lvobj = PyObject_New(lvobject, &_LibLVMlvType); if (lvobj) { lvobj->parent_vgobj = parent; Py_INCREF(lvobj->parent_vgobj); lvobj->lv = lv; } return lvobj; } static PyObject *_liblvm_get_last_error(void) { PyObject *info; const char *msg = NULL; LVM_VALID(NULL); if (!(info = PyTuple_New(2))) return NULL; PyTuple_SetItem(info, 0, PYINTTYPE_FROMLONG((long) lvm_errno(_libh))); msg = lvm_errmsg(_libh); PyTuple_SetItem(info, 1, ((msg) ? PYSTRTYPE_FROMSTRING(msg) : PYSTRTYPE_FROMSTRING("Memory error while retrieving error message"))); return info; } static PyObject *_liblvm_library_get_version(void) { return Py_BuildValue("s", lvm_library_get_version()); } static const char _gc_doc[] = "Garbage collect the C library"; static PyObject *_liblvm_lvm_gc(void) { if (_libh) { lvm_quit(_libh); _libh = NULL; } Py_INCREF(Py_None); return Py_None; } static PyObject *_liblvm_lvm_list_vg_names(void) { struct dm_list *vgnames; struct lvm_str_list *strl; PyObject * pytuple; int i = 0; LVM_VALID(NULL); if (!(vgnames = lvm_list_vg_names(_libh))) { PyErr_SetObject(_LibLVMError, _liblvm_get_last_error()); return NULL; } if (!(pytuple = PyTuple_New(dm_list_size(vgnames)))) return NULL; dm_list_iterate_items(strl, vgnames) { PyTuple_SET_ITEM(pytuple, i, PYSTRTYPE_FROMSTRING(strl->str)); i++; } return pytuple; } static PyObject *_liblvm_lvm_list_vg_uuids(void) { struct dm_list *uuids; struct lvm_str_list *strl; PyObject * pytuple; int i = 0; LVM_VALID(NULL); if (!(uuids = lvm_list_vg_uuids(_libh))) { PyErr_SetObject(_LibLVMError, _liblvm_get_last_error()); return NULL; } if (!(pytuple = PyTuple_New(dm_list_size(uuids)))) return NULL; dm_list_iterate_items(strl, uuids) { PyTuple_SET_ITEM(pytuple, i, PYSTRTYPE_FROMSTRING(strl->str)); i++; } return pytuple; } static PyObject *_liblvm_lvm_pvlist_get(pvslistobject *pvsobj) { struct lvm_pv_list *pvl; PyObject * pytuple; pvobject * pvobj; int i = 0; /* unlike other LVM api calls, if there are no results, we get NULL */ pvsobj->pvslist = lvm_list_pvs(_libh); if (!pvsobj->pvslist) return Py_BuildValue("()"); if (!(pytuple = PyTuple_New(dm_list_size(pvsobj->pvslist)))) return NULL; dm_list_iterate_items(pvl, pvsobj->pvslist) { /* Create and initialize the object */ if (!(pvobj = _create_py_pv())) { Py_DECREF(pytuple); return NULL; } /* We don't have a parent vg object to be concerned about */ pvobj->parent_vgobj = NULL; pvobj->parent_pvslistobj = pvsobj; Py_INCREF(pvobj->parent_pvslistobj); pvobj->pv = pvl->pv; PyTuple_SET_ITEM(pytuple, i, (PyObject *) pvobj); i++; } return pytuple; } static PyObject *_liblvm_lvm_pvlist_put(pvslistobject *self) { if (self->pvslist) { if (lvm_list_pvs_free(self->pvslist)) { PyErr_SetObject(_LibLVMError, _liblvm_get_last_error()); return NULL; } self->pvslist = NULL; Py_INCREF(Py_None); return Py_None; } return NULL; } static PyObject *_liblvm_pvlist_dealloc(pvslistobject *self) { if (self->pvslist) _liblvm_lvm_pvlist_put(self); PyObject_Del(self); Py_INCREF(Py_None); return Py_None; } static PyObject *_liblvm_lvm_list_pvs(void) { LVM_VALID(NULL); return (PyObject *)_create_py_pvlist(); } static PyObject *_liblvm_lvm_pv_remove(PyObject *self, PyObject *arg) { const char *pv_name; LVM_VALID(NULL); if (!PyArg_ParseTuple(arg, "s", &pv_name)) return NULL; if (lvm_pv_remove(_libh, pv_name) == -1) { PyErr_SetObject(_LibLVMError, _liblvm_get_last_error()); return NULL; } Py_INCREF(Py_None); return Py_None; } static int _set_pv_numeric_prop(pv_create_params_t pv_params, const char *name, unsigned long long value) { struct lvm_property_value prop_value = { .is_integer = 1, .value.integer = value, }; return lvm_pv_params_set_property(pv_params, name, &prop_value); } #define SET_PV_PROP(params, name, value) \ do { \ if (_set_pv_numeric_prop(params, name, value) == -1) \ goto error; \ } while(0)\ static PyObject *_liblvm_lvm_pv_create(PyObject *self, PyObject *arg) { const char *pv_name; unsigned long long size = 0; unsigned long long pvmetadatacopies = DEFAULT_PVMETADATACOPIES; unsigned long long pvmetadatasize = DEFAULT_PVMETADATASIZE; unsigned long long data_alignment = 0; unsigned long long data_alignment_offset = 0; unsigned long long zero = 1; pv_create_params_t pv_params = NULL; LVM_VALID(NULL); if (!PyArg_ParseTuple(arg, "s|KKKKKK", &pv_name, &size, &pvmetadatacopies, &pvmetadatasize, &data_alignment, &data_alignment_offset, &zero)) return NULL; pv_params = lvm_pv_params_create(_libh, pv_name); if (!pv_params) { goto error; } SET_PV_PROP(pv_params, "size", size); SET_PV_PROP(pv_params, "pvmetadatacopies", pvmetadatacopies); SET_PV_PROP(pv_params, "pvmetadatasize", pvmetadatasize); SET_PV_PROP(pv_params, "data_alignment", data_alignment); SET_PV_PROP(pv_params, "data_alignment_offset", data_alignment_offset); SET_PV_PROP(pv_params, "zero", zero); if (lvm_pv_create_adv(pv_params)) { goto error; } Py_INCREF(Py_None); return Py_None; error: PyErr_SetObject(_LibLVMError, _liblvm_get_last_error()); return NULL; } static PyObject *_liblvm_lvm_percent_to_float(PyObject *self, PyObject *arg) { double converted; int percent; LVM_VALID(NULL); if (!PyArg_ParseTuple(arg, "i", &percent)) return NULL; converted = lvm_percent_to_float(percent); return Py_BuildValue("d", converted); } static PyObject *_liblvm_lvm_vg_name_validate(PyObject *self, PyObject *arg) { const char *name; LVM_VALID(NULL); if (!PyArg_ParseTuple(arg, "s", &name)) return NULL; if (lvm_vg_name_validate(_libh, name) < 0) { PyErr_SetObject(_LibLVMError, _liblvm_get_last_error()); return NULL; } Py_INCREF(Py_None); return Py_None; } static PyObject *_liblvm_lvm_vgname_from_pvid(PyObject *self, PyObject *arg) { const char *pvid; const char *vgname; LVM_VALID(NULL); if (!PyArg_ParseTuple(arg, "s", &pvid)) return NULL; if (!(vgname = lvm_vgname_from_pvid(_libh, pvid))) { PyErr_SetObject(_LibLVMError, _liblvm_get_last_error()); return NULL; } return Py_BuildValue("s", vgname); } static PyObject *_liblvm_lvm_vgname_from_device(PyObject *self, PyObject *arg) { const char *device; const char *vgname; LVM_VALID(NULL); if (!PyArg_ParseTuple(arg, "s", &device)) return NULL; if (!(vgname = lvm_vgname_from_device(_libh, device))) { PyErr_SetObject(_LibLVMError, _liblvm_get_last_error()); return NULL; } return Py_BuildValue("s", vgname); } static PyObject *_liblvm_lvm_config_find_bool(PyObject *self, PyObject *arg) { const char *config; int rval; PyObject *rc; LVM_VALID(NULL); if (!PyArg_ParseTuple(arg, "s", &config)) return NULL; if ((rval = lvm_config_find_bool(_libh, config, -10)) == -10) { /* Retrieving error information yields no error in this case */ PyErr_Format(PyExc_ValueError, "config path not found"); return NULL; } rc = (rval) ? Py_True: Py_False; Py_INCREF(rc); return rc; } static PyObject *_liblvm_lvm_config_reload(void) { LVM_VALID(NULL); if (lvm_config_reload(_libh) == -1) { PyErr_SetObject(_LibLVMError, _liblvm_get_last_error()); return NULL; } Py_INCREF(Py_None); return Py_None; } static PyObject *_liblvm_lvm_scan(void) { LVM_VALID(NULL); if (lvm_scan(_libh) == -1) { PyErr_SetObject(_LibLVMError, _liblvm_get_last_error()); return NULL; } Py_INCREF(Py_None); return Py_None; } static PyObject *_liblvm_lvm_config_override(PyObject *self, PyObject *arg) { const char *config; LVM_VALID(NULL); if (!PyArg_ParseTuple(arg, "s", &config)) return NULL; if (lvm_config_override(_libh, config) == -1) { PyErr_SetObject(_LibLVMError, _liblvm_get_last_error()); return NULL; } Py_INCREF(Py_None); return Py_None; } /* ---------------------------------------------------------------------- * VG object initialization/deallocation */ static PyObject *_liblvm_lvm_vg_open(PyObject *self, PyObject *args) { const char *vgname; const char *mode = NULL; vgobject *vgobj; LVM_VALID(NULL); if (!PyArg_ParseTuple(args, "s|s", &vgname, &mode)) return NULL; if (mode == NULL) mode = "r"; if (!(vgobj = _create_py_vg())) return NULL; if (!(vgobj->vg = lvm_vg_open(_libh, vgname, mode, 0))) { PyErr_SetObject(_LibLVMError, _liblvm_get_last_error()); Py_DECREF(vgobj); return NULL; } return (PyObject *)vgobj; } static PyObject *_liblvm_lvm_vg_create(PyObject *self, PyObject *args) { const char *vgname; vgobject *vgobj; LVM_VALID(NULL); if (!PyArg_ParseTuple(args, "s", &vgname)) return NULL; if (!(vgobj = _create_py_vg())) return NULL; if (!(vgobj->vg = lvm_vg_create(_libh, vgname))) { PyErr_SetObject(_LibLVMError, _liblvm_get_last_error()); Py_DECREF(vgobj); return NULL; } return (PyObject *)vgobj; } static void liblvm_vg_dealloc(vgobject *self) { /* if already closed, don't reclose it */ if (self->vg != NULL) { lvm_vg_close(self->vg); self->vg = NULL; self->libh_copy = NULL; } PyObject_Del(self); } /* VG Methods */ #define VG_VALID(vgobject) \ do { \ if (!vgobject || !vgobject->vg) { \ PyErr_SetString(PyExc_UnboundLocalError, "VG object invalid"); \ return NULL; \ } \ LVM_VALID(vgobject->libh_copy); \ } while (0) #define PVSLIST_VALID(pvslistobject) \ do { \ if (!pvslistobject || !pvslistobject->pvslist) { \ PyErr_SetString(PyExc_UnboundLocalError, "PVS object invalid"); \ return NULL; \ } \ LVM_VALID(pvslistobject->libh_copy); \ } while (0) static PyObject *_liblvm_lvm_vg_close(vgobject *self) { /* if already closed, don't reclose it */ if (self->vg) { if (lvm_vg_close(self->vg) == -1) { PyErr_SetObject(_LibLVMError, _liblvm_get_last_error()); return NULL; } self->vg = NULL; self->libh_copy = NULL; } Py_INCREF(Py_None); return Py_None; } static PyObject *_liblvm_lvm_vg_get_name(vgobject *self) { VG_VALID(self); return Py_BuildValue("s", lvm_vg_get_name(self->vg)); } static PyObject *_liblvm_lvm_vg_get_uuid(vgobject *self) { VG_VALID(self); return Py_BuildValue("s", lvm_vg_get_uuid(self->vg)); } static PyObject *_liblvm_lvm_vg_remove(vgobject *self) { VG_VALID(self); if (lvm_vg_remove(self->vg) == -1) goto error; if (lvm_vg_write(self->vg) == -1) goto error; /* Not much you can do with a vg that is removed so close it */ return _liblvm_lvm_vg_close(self); error: PyErr_SetObject(_LibLVMError, _liblvm_get_last_error()); return NULL; } static PyObject *_liblvm_lvm_vg_extend(vgobject *self, PyObject *args) { const char *device; VG_VALID(self); if (!PyArg_ParseTuple(args, "s", &device)) { return NULL; } if (lvm_vg_extend(self->vg, device) == -1) goto error; if (lvm_vg_write(self->vg) == -1) goto error; Py_INCREF(Py_None); return Py_None; error: PyErr_SetObject(_LibLVMError, _liblvm_get_last_error()); return NULL; } static PyObject *_liblvm_lvm_vg_reduce(vgobject *self, PyObject *args) { const char *device; VG_VALID(self); if (!PyArg_ParseTuple(args, "s", &device)) return NULL; if (lvm_vg_reduce(self->vg, device) == -1) goto error; if (lvm_vg_write(self->vg) == -1) goto error; Py_INCREF(Py_None); return Py_None; error: PyErr_SetObject(_LibLVMError, _liblvm_get_last_error()); return NULL; } static PyObject *_liblvm_lvm_vg_add_tag(vgobject *self, PyObject *args) { const char *tag; int rval; VG_VALID(self); if (!PyArg_ParseTuple(args, "s", &tag)) { return NULL; } if ((rval = lvm_vg_add_tag(self->vg, tag)) == -1) goto error; if (lvm_vg_write(self->vg) == -1) goto error; return Py_BuildValue("i", rval); error: PyErr_SetObject(_LibLVMError, _liblvm_get_last_error()); return NULL; } static PyObject *_liblvm_lvm_vg_remove_tag(vgobject *self, PyObject *args) { const char *tag; VG_VALID(self); if (!PyArg_ParseTuple(args, "s", &tag)) return NULL; if (lvm_vg_remove_tag(self->vg, tag) == -1) goto error; if (lvm_vg_write(self->vg) == -1) goto error; Py_INCREF(Py_None); return Py_None; error: PyErr_SetObject(_LibLVMError, _liblvm_get_last_error()); return NULL; } static PyObject *_liblvm_lvm_vg_is_clustered(vgobject *self) { PyObject *rval; VG_VALID(self); rval = ( lvm_vg_is_clustered(self->vg) == 1) ? Py_True : Py_False; Py_INCREF(rval); return rval; } static PyObject *_liblvm_lvm_vg_is_exported(vgobject *self) { PyObject *rval; VG_VALID(self); rval = ( lvm_vg_is_exported(self->vg) == 1) ? Py_True : Py_False; Py_INCREF(rval); return rval; } static PyObject *_liblvm_lvm_vg_is_partial(vgobject *self) { PyObject *rval; VG_VALID(self); rval = ( lvm_vg_is_partial(self->vg) == 1) ? Py_True : Py_False; Py_INCREF(rval); return rval; } static PyObject *_liblvm_lvm_vg_get_seqno(vgobject *self) { VG_VALID(self); return Py_BuildValue("K", (unsigned long long)lvm_vg_get_seqno(self->vg)); } static PyObject *_liblvm_lvm_vg_get_size(vgobject *self) { VG_VALID(self); return Py_BuildValue("K", (unsigned long long)lvm_vg_get_size(self->vg)); } static PyObject *_liblvm_lvm_vg_get_free_size(vgobject *self) { VG_VALID(self); return Py_BuildValue("K", (unsigned long long)lvm_vg_get_free_size(self->vg)); } static PyObject *_liblvm_lvm_vg_get_extent_size(vgobject *self) { VG_VALID(self); return Py_BuildValue("K", (unsigned long long)lvm_vg_get_extent_size(self->vg)); } static PyObject *_liblvm_lvm_vg_get_extent_count(vgobject *self) { VG_VALID(self); return Py_BuildValue("K", (unsigned long long)lvm_vg_get_extent_count(self->vg)); } static PyObject *_liblvm_lvm_vg_get_free_extent_count(vgobject *self) { VG_VALID(self); return Py_BuildValue("K", (unsigned long long)lvm_vg_get_free_extent_count(self->vg)); } /* Builds a python tuple ([string|number], bool) from a struct lvm_property_value */ static PyObject *get_property(struct lvm_property_value *prop) { PyObject *pytuple; PyObject *setable; if (!prop->is_valid) { PyErr_SetObject(_LibLVMError, _liblvm_get_last_error()); return NULL; } if (!(pytuple = PyTuple_New(2))) return NULL; if (prop->is_integer) { if (prop->is_signed) { PyTuple_SET_ITEM(pytuple, 0, Py_BuildValue("L", prop->value.signed_integer)); } else { PyTuple_SET_ITEM(pytuple, 0, Py_BuildValue("K", prop->value.integer)); } } else { if ( prop->value.string ) { PyTuple_SET_ITEM(pytuple, 0, PYSTRTYPE_FROMSTRING(prop->value.string)); } else { PyTuple_SET_ITEM(pytuple, 0, Py_None); } } if (prop->is_settable) setable = Py_True; else setable = Py_False; Py_INCREF(setable); PyTuple_SET_ITEM(pytuple, 1, setable); return pytuple; } /* This will return a tuple of (value, bool) with the value being a string or integer and bool indicating if property is settable */ static PyObject *_liblvm_lvm_vg_get_property(vgobject *self, PyObject *args) { const char *name; struct lvm_property_value prop_value; VG_VALID(self); if (!PyArg_ParseTuple(args, "s", &name)) return NULL; prop_value = lvm_vg_get_property(self->vg, name); return get_property(&prop_value); } static PyObject *_liblvm_lvm_vg_set_property(vgobject *self, PyObject *args) { const char *property_name = NULL; PyObject *variant_type_arg = NULL; struct lvm_property_value lvm_property; char *string_value = NULL; int temp_py_int; unsigned long long temp_py_long; VG_VALID(self); if (!PyArg_ParseTuple(args, "sO", &property_name, &variant_type_arg)) return NULL; lvm_property = lvm_vg_get_property(self->vg, property_name); if (!lvm_property.is_valid) goto lvmerror; if (PYSTRYPE_CHECK(variant_type_arg)) { if (!lvm_property.is_string) { PyErr_Format(PyExc_ValueError, "Property requires string value"); goto bail; } if (!(string_value = PYSTRTYPE_ASSTRING(variant_type_arg))) { PyErr_NoMemory(); goto bail; } lvm_property.value.string = string_value; } else { if (!lvm_property.is_integer) { PyErr_Format(PyExc_ValueError, "Property requires numeric value"); goto bail; } if (PYINTTYPE_CHECK(variant_type_arg)) { temp_py_int = PYINTTYPE_ASLONG(variant_type_arg); /* -1 could be valid, need to see if an exception was gen. */ if (temp_py_int == -1 && PyErr_Occurred()) goto bail; if (temp_py_int < 0) { PyErr_Format(PyExc_ValueError, "Positive integers only!"); goto bail; } lvm_property.value.integer = temp_py_int; } else if (PyObject_IsInstance(variant_type_arg, (PyObject*)&PyLong_Type)){ /* If PyLong_AsUnsignedLongLong function fails an OverflowError is * raised and (unsigned long long)-1 is returned */ if ((temp_py_long = PyLong_AsUnsignedLongLong(variant_type_arg)) == ~0ULL) goto bail; lvm_property.value.integer = temp_py_long; } else { PyErr_Format(PyExc_ValueError, "supported value types are numeric and string"); goto bail; } } if (lvm_vg_set_property(self->vg, property_name, &lvm_property) == -1) goto lvmerror; if (lvm_vg_write(self->vg) == -1) goto lvmerror; Py_INCREF(Py_None); return Py_None; lvmerror: PyErr_SetObject(_LibLVMError, _liblvm_get_last_error()); bail: return NULL; } static PyObject *_liblvm_lvm_vg_get_pv_count(vgobject *self) { VG_VALID(self); return Py_BuildValue("K", (unsigned long long)lvm_vg_get_pv_count(self->vg)); } static PyObject *_liblvm_lvm_vg_get_max_pv(vgobject *self) { VG_VALID(self); return Py_BuildValue("K", (unsigned long long)lvm_vg_get_max_pv(self->vg)); } static PyObject *_liblvm_lvm_vg_get_max_lv(vgobject *self) { VG_VALID(self); return Py_BuildValue("K", (unsigned long long)lvm_vg_get_max_lv(self->vg)); } static PyObject *_liblvm_lvm_vg_set_extent_size(vgobject *self, PyObject *args) { unsigned int new_size; VG_VALID(self); if (!PyArg_ParseTuple(args, "I", &new_size)) return NULL; if (lvm_vg_set_extent_size(self->vg, new_size) == -1) { PyErr_SetObject(_LibLVMError, _liblvm_get_last_error()); return NULL; } Py_INCREF(Py_None); return Py_None; } static PyObject *_liblvm_lvm_vg_list_lvs(vgobject *self) { struct dm_list *lvs; struct lvm_lv_list *lvl; PyObject * pytuple; lvobject * lvobj; int i = 0; VG_VALID(self); /* unlike other LVM api calls, if there are no results, we get NULL */ if (!(lvs = lvm_vg_list_lvs(self->vg))) return Py_BuildValue("()"); if (!(pytuple = PyTuple_New(dm_list_size(lvs)))) return NULL; dm_list_iterate_items(lvl, lvs) { /* Create and initialize the object */ if (!(lvobj = _create_py_lv(self, lvl->lv))) { Py_DECREF(pytuple); return NULL; } PyTuple_SET_ITEM(pytuple, i, (PyObject *) lvobj); i++; } return pytuple; } static PyObject *_liblvm_lvm_vg_get_tags(vgobject *self) { struct dm_list *tagsl; struct lvm_str_list *strl; PyObject * pytuple; int i = 0; VG_VALID(self); if (!(tagsl = lvm_vg_get_tags(self->vg))) { PyErr_SetObject(_LibLVMError, _liblvm_get_last_error()); return NULL; } if (!(pytuple = PyTuple_New(dm_list_size(tagsl)))) return NULL; dm_list_iterate_items(strl, tagsl) { PyTuple_SET_ITEM(pytuple, i, PYSTRTYPE_FROMSTRING(strl->str)); i++; } return pytuple; } static PyObject *_liblvm_lvm_vg_create_lv_linear(vgobject *self, PyObject *args) { const char *vgname; unsigned long long size; lv_t lv; VG_VALID(self); if (!PyArg_ParseTuple(args, "sK", &vgname, &size)) return NULL; if (!(lv = lvm_vg_create_lv_linear(self->vg, vgname, size))) { PyErr_SetObject(_LibLVMError, _liblvm_get_last_error()); return NULL; } return (PyObject *)_create_py_lv(self, lv); } static PyObject *_liblvm_lvm_vg_create_lv_thinpool(vgobject *self, PyObject *args) { unsigned long long size = 0; unsigned long long meta_size = 0; const char *pool_name; unsigned long chunk_size = 0; int skip_zero = 0; lvm_thin_discards_t discard = LVM_THIN_DISCARDS_PASSDOWN; lv_t lv; lv_create_params_t lvp = NULL; struct lvm_property_value prop_value; VG_VALID(self); if (!PyArg_ParseTuple(args, "sK|kKii", &pool_name, &size, &chunk_size, &meta_size, &discard, &skip_zero)) return NULL; if (!(lvp = lvm_lv_params_create_thin_pool(self->vg, pool_name, size, chunk_size, meta_size, discard))) { PyErr_SetObject(_LibLVMError, _liblvm_get_last_error()); return NULL; } if (skip_zero) { prop_value = lvm_lv_params_get_property(lvp, "skip_zero"); if (prop_value.is_valid) { prop_value.value.integer = 1; if (lvm_lv_params_set_property(lvp, "skip_zero", &prop_value) == -1) { goto error; } } } if (!(lv = lvm_lv_create(lvp))) { goto error; } return (PyObject *)_create_py_lv(self, lv); error: PyErr_SetObject(_LibLVMError, _liblvm_get_last_error()); return NULL; } static PyObject *_liblvm_lvm_vg_create_lv_thin(vgobject *self, PyObject *args) { const char *pool_name; const char *lv_name; unsigned long long size = 0; lv_t lv; lv_create_params_t lvp = NULL; VG_VALID(self); if (!PyArg_ParseTuple(args, "ssK", &pool_name, &lv_name, &size)) return NULL; if (!(lvp = lvm_lv_params_create_thin(self->vg, pool_name, lv_name,size))) { PyErr_SetObject(_LibLVMError, _liblvm_get_last_error()); return NULL; } if (!(lv = lvm_lv_create(lvp))) { PyErr_SetObject(_LibLVMError, _liblvm_get_last_error()); return NULL; } return (PyObject *)_create_py_lv(self, lv); } static void liblvm_lv_dealloc(lvobject *self) { /* We can dealloc an object that didn't get fully created */ if (self->parent_vgobj) { Py_DECREF(self->parent_vgobj); } PyObject_Del(self); } static PyObject *_liblvm_lvm_vg_list_pvs(vgobject *self) { struct dm_list *pvs; struct lvm_pv_list *pvl; PyObject * pytuple; pvobject * pvobj; int i = 0; VG_VALID(self); /* unlike other LVM api calls, if there are no results, we get NULL */ if (!(pvs = lvm_vg_list_pvs(self->vg))) return Py_BuildValue("()"); if (!(pytuple = PyTuple_New(dm_list_size(pvs)))) return NULL; dm_list_iterate_items(pvl, pvs) { /* Create and initialize the object */ if (!(pvobj = _create_py_pv())) { Py_DECREF(pytuple); return NULL; } pvobj->parent_vgobj = self; Py_INCREF(pvobj->parent_vgobj); pvobj->pv = pvl->pv; PyTuple_SET_ITEM(pytuple, i, (PyObject *) pvobj); i++; } return pytuple; } typedef lv_t (*lv_fetch_by_N)(vg_t vg, const char *id); typedef pv_t (*pv_fetch_by_N)(vg_t vg, const char *id); static PyObject *_liblvm_lvm_lv_from_N(vgobject *self, PyObject *arg, lv_fetch_by_N method) { const char *id; lv_t lv = NULL; VG_VALID(self); if (!PyArg_ParseTuple(arg, "s", &id)) return NULL; if (!(lv = method(self->vg, id))) { PyErr_SetObject(_LibLVMError, _liblvm_get_last_error()); return NULL; } return (PyObject *)_create_py_lv(self, lv); } static PyObject *_liblvm_lvm_lv_from_name(vgobject *self, PyObject *arg) { return _liblvm_lvm_lv_from_N(self, arg, lvm_lv_from_name); } static PyObject *_liblvm_lvm_lv_from_uuid(vgobject *self, PyObject *arg) { return _liblvm_lvm_lv_from_N(self, arg, lvm_lv_from_uuid); } static PyObject *_liblvm_lvm_lv_name_validate(vgobject *self, PyObject *args) { const char *name; VG_VALID(self); if (!PyArg_ParseTuple(args, "s", &name)) return NULL; if (lvm_lv_name_validate(self->vg, name) < 0) { PyErr_SetObject(_LibLVMError, _liblvm_get_last_error()); return NULL; } Py_INCREF(Py_None); return Py_None; } static PyObject *_liblvm_lvm_pv_from_N(vgobject *self, PyObject *arg, pv_fetch_by_N method) { const char *id; pvobject *rc; pv_t pv = NULL; VG_VALID(self); if (!PyArg_ParseTuple(arg, "s", &id)) return NULL; if (!(pv = method(self->vg, id))) { PyErr_SetObject(_LibLVMError, _liblvm_get_last_error()); return NULL; } if (!(rc = _create_py_pv())) return NULL; Py_INCREF(self); rc->pv = pv; return (PyObject *)rc; } static PyObject *_liblvm_lvm_pv_from_name(vgobject *self, PyObject *arg) { return _liblvm_lvm_pv_from_N(self, arg, lvm_pv_from_name); } static PyObject *_liblvm_lvm_pv_from_uuid(vgobject *self, PyObject *arg) { return _liblvm_lvm_pv_from_N(self, arg, lvm_pv_from_uuid); } static void _liblvm_pv_dealloc(pvobject *self) { if (self->parent_vgobj) { Py_DECREF(self->parent_vgobj); } if (self->parent_pvslistobj) { Py_DECREF(self->parent_pvslistobj); } self->parent_vgobj = NULL; self->parent_pvslistobj = NULL; PyObject_Del(self); } /* LV Methods */ #define LV_VALID(lvobject) \ do { \ if (!lvobject || !lvobject->lv) { \ PyErr_SetString(PyExc_UnboundLocalError, "LV object invalid"); \ return NULL; \ }\ VG_VALID(lvobject->parent_vgobj); \ } while (0) static PyObject *_liblvm_lvm_lv_get_attr(lvobject *self) { LV_VALID(self); return Py_BuildValue("s", lvm_lv_get_attr(self->lv)); } static PyObject *_liblvm_lvm_lv_get_origin(lvobject *self) { LV_VALID(self); return Py_BuildValue("s", lvm_lv_get_origin(self->lv)); } static PyObject *_liblvm_lvm_lv_get_name(lvobject *self) { LV_VALID(self); return Py_BuildValue("s", lvm_lv_get_name(self->lv)); } static PyObject *_liblvm_lvm_lv_get_uuid(lvobject *self) { LV_VALID(self); return Py_BuildValue("s", lvm_lv_get_uuid(self->lv)); } static PyObject *_liblvm_lvm_lv_activate(lvobject *self) { LV_VALID(self); if (lvm_lv_activate(self->lv) == -1) { PyErr_SetObject(_LibLVMError, _liblvm_get_last_error()); return NULL; } Py_INCREF(Py_None); return Py_None; } static PyObject *_liblvm_lvm_lv_deactivate(lvobject *self) { LV_VALID(self); if (lvm_lv_deactivate(self->lv) == -1) { PyErr_SetObject(_LibLVMError, _liblvm_get_last_error()); return NULL; } Py_INCREF(Py_None); return Py_None; } static PyObject *_liblvm_lvm_vg_remove_lv(lvobject *self) { LV_VALID(self); if (lvm_vg_remove_lv(self->lv) == -1) { PyErr_SetObject(_LibLVMError, _liblvm_get_last_error()); return NULL; } self->lv = NULL; Py_INCREF(Py_None); return Py_None; } /* This will return a tuple of (value, bool) with the value being a string or integer and bool indicating if property is settable */ static PyObject * _liblvm_lvm_lv_get_property(lvobject *self, PyObject *args) { const char *name; struct lvm_property_value prop_value; LV_VALID(self); if (!PyArg_ParseTuple(args, "s", &name)) return NULL; prop_value = lvm_lv_get_property(self->lv, name); return get_property(&prop_value); } static PyObject *_liblvm_lvm_lv_get_size(lvobject *self) { LV_VALID(self); return Py_BuildValue("K", (unsigned long long)lvm_lv_get_size(self->lv)); } static PyObject *_liblvm_lvm_lv_is_active(lvobject *self) { PyObject *rval; LV_VALID(self); rval = (lvm_lv_is_active(self->lv) == 1) ? Py_True : Py_False; Py_INCREF(rval); return rval; } static PyObject *_liblvm_lvm_lv_is_suspended(lvobject *self) { PyObject *rval; LV_VALID(self); rval = (lvm_lv_is_suspended(self->lv) == 1) ? Py_True : Py_False; Py_INCREF(rval); return rval; } static PyObject *_liblvm_lvm_lv_add_tag(lvobject *self, PyObject *args) { const char *tag; LV_VALID(self); if (!PyArg_ParseTuple(args, "s", &tag)) return NULL; if (lvm_lv_add_tag(self->lv, tag) == -1) goto error; if (lvm_vg_write(self->parent_vgobj->vg) == -1) goto error; Py_INCREF(Py_None); return Py_None; error: PyErr_SetObject(_LibLVMError, _liblvm_get_last_error()); return NULL; } static PyObject *_liblvm_lvm_lv_remove_tag(lvobject *self, PyObject *args) { const char *tag; LV_VALID(self); if (!PyArg_ParseTuple(args, "s", &tag)) return NULL; if (lvm_lv_remove_tag(self->lv, tag) == -1) goto error; if (lvm_vg_write(self->parent_vgobj->vg) == -1) goto error; Py_INCREF(Py_None); return Py_None; error: PyErr_SetObject(_LibLVMError, _liblvm_get_last_error()); return NULL; } static PyObject *_liblvm_lvm_lv_get_tags(lvobject *self) { struct dm_list *tagsl; struct lvm_str_list *strl; PyObject * pytuple; int i = 0; LV_VALID(self); if (!(tagsl = lvm_lv_get_tags(self->lv))) { PyErr_SetObject(_LibLVMError, _liblvm_get_last_error()); return NULL; } if (!(pytuple = PyTuple_New(dm_list_size(tagsl)))) return NULL; dm_list_iterate_items(strl, tagsl) { PyTuple_SET_ITEM(pytuple, i, PYSTRTYPE_FROMSTRING(strl->str)); i++; } return pytuple; } static PyObject *_liblvm_lvm_lv_rename(lvobject *self, PyObject *args) { const char *new_name; LV_VALID(self); if (!PyArg_ParseTuple(args, "s", &new_name)) return NULL; if (lvm_lv_rename(self->lv, new_name) == -1) { PyErr_SetObject(_LibLVMError, _liblvm_get_last_error()); return NULL; } Py_INCREF(Py_None); return Py_None; } static PyObject *_liblvm_lvm_lv_resize(lvobject *self, PyObject *args) { unsigned long long new_size; LV_VALID(self); if (!PyArg_ParseTuple(args, "K", &new_size)) return NULL; if (lvm_lv_resize(self->lv, new_size) == -1) { PyErr_SetObject(_LibLVMError, _liblvm_get_last_error()); return NULL; } Py_INCREF(Py_None); return Py_None; } static PyObject *_liblvm_lvm_lv_list_lvsegs(lvobject *self) { struct dm_list *lvsegs; lvseg_list_t *lvsegl; PyObject * pytuple; lvsegobject *lvsegobj; int i = 0; LV_VALID(self); if (!(lvsegs = lvm_lv_list_lvsegs(self->lv))) return Py_BuildValue("()"); if (!(pytuple = PyTuple_New(dm_list_size(lvsegs)))) return NULL; dm_list_iterate_items(lvsegl, lvsegs) { /* Create and initialize the object */ if (!(lvsegobj = PyObject_New(lvsegobject, &_LibLVMlvsegType))) { Py_DECREF(pytuple); return NULL; } lvsegobj->parent_lvobj = self; Py_INCREF(lvsegobj->parent_lvobj); lvsegobj->lv_seg = lvsegl->lvseg; PyTuple_SET_ITEM(pytuple, i, (PyObject *) lvsegobj); i++; } return pytuple; } static PyObject *_liblvm_lvm_lv_snapshot(lvobject *self, PyObject *args) { const char *snap_name; unsigned long long size = 0; lv_t lv; lv_create_params_t lvp = NULL; LV_VALID(self); if (!PyArg_ParseTuple(args, "s|K", &snap_name, &size)) return NULL; if (!(lvp = lvm_lv_params_create_snapshot(self->lv, snap_name, size))) { PyErr_SetObject(_LibLVMError, _liblvm_get_last_error()); return NULL; } if (!(lv = lvm_lv_create(lvp))) { PyErr_SetObject(_LibLVMError, _liblvm_get_last_error()); return NULL; } return (PyObject *)_create_py_lv(self->parent_vgobj, lv); } /* PV Methods */ #define PV_VALID(pvobject) \ do { \ if (!pvobject || !pvobject->pv) { \ PyErr_SetString(PyExc_UnboundLocalError, "PV object invalid"); \ return NULL; \ } \ if (pvobject->parent_vgobj) { \ VG_VALID(pvobject->parent_vgobj); \ } \ if (pvobject->parent_pvslistobj) { \ PVSLIST_VALID(pvobject->parent_pvslistobj); \ } \ } while (0) static PyObject *_liblvm_lvm_pv_get_name(pvobject *self) { PV_VALID(self); return Py_BuildValue("s", lvm_pv_get_name(self->pv)); } static PyObject *_liblvm_lvm_pv_get_uuid(pvobject *self) { PV_VALID(self); return Py_BuildValue("s", lvm_pv_get_uuid(self->pv)); } static PyObject *_liblvm_lvm_pv_get_mda_count(pvobject *self) { PV_VALID(self); return Py_BuildValue("K", (unsigned long long)lvm_pv_get_mda_count(self->pv)); } static PyObject *_liblvm_lvm_pv_get_property(pvobject *self, PyObject *args) { const char *name; struct lvm_property_value prop_value; PV_VALID(self); if (!PyArg_ParseTuple(args, "s", &name)) return NULL; prop_value = lvm_pv_get_property(self->pv, name); return get_property(&prop_value); } static PyObject *_liblvm_lvm_pv_get_dev_size(pvobject *self) { PV_VALID(self); return Py_BuildValue("K", (unsigned long long)lvm_pv_get_dev_size(self->pv)); } static PyObject *_liblvm_lvm_pv_get_size(pvobject *self) { PV_VALID(self); return Py_BuildValue("K", (unsigned long long)lvm_pv_get_size(self->pv)); } static PyObject *_liblvm_lvm_pv_get_free(pvobject *self) { PV_VALID(self); return Py_BuildValue("K", (unsigned long long)lvm_pv_get_free(self->pv)); } static PyObject *_liblvm_lvm_pv_resize(pvobject *self, PyObject *args) { unsigned long long new_size; PV_VALID(self); if (!PyArg_ParseTuple(args, "K", &new_size)) return NULL; if (lvm_pv_resize(self->pv, new_size) == -1) { PyErr_SetObject(_LibLVMError, _liblvm_get_last_error()); return NULL; } Py_INCREF(Py_None); return Py_None; } static PyObject *_liblvm_lvm_pv_list_pvsegs(pvobject *self) { struct dm_list *pvsegs; pvseg_list_t *pvsegl; PyObject *pytuple; pvsegobject *pvsegobj; int i = 0; PV_VALID(self); if (!(pvsegs = lvm_pv_list_pvsegs(self->pv))) return Py_BuildValue("()"); if (!(pytuple = PyTuple_New(dm_list_size(pvsegs)))) return NULL; dm_list_iterate_items(pvsegl, pvsegs) { /* Create and initialize the object */ if (!(pvsegobj = PyObject_New(pvsegobject, &_LibLVMpvsegType))) { Py_DECREF(pytuple); return NULL; } pvsegobj->parent_pvobj = self; Py_INCREF(pvsegobj->parent_pvobj); pvsegobj->pv_seg = pvsegl->pvseg; PyTuple_SET_ITEM(pytuple, i, (PyObject *) pvsegobj); i++; } return pytuple; } /* LV seg methods */ /* * No way to close/destroy an lvseg, just need to make sure parents are * still good */ #define LVSEG_VALID(lvsegobject) \ do { \ if ( !lvsegobject || !lvsegobject->parent_lvobj ) { \ PyErr_SetString(PyExc_UnboundLocalError, "LV segment object invalid"); \ return NULL; \ } \ LV_VALID(lvsegobject->parent_lvobj); \ } while(0) static void _liblvm_lvseg_dealloc(lvsegobject *self) { Py_DECREF(self->parent_lvobj); PyObject_Del(self); } static PyObject *_liblvm_lvm_lvseg_get_property(lvsegobject *self, PyObject *args) { const char *name; struct lvm_property_value prop_value; LVSEG_VALID(self); if (!PyArg_ParseTuple(args, "s", &name)) return NULL; prop_value = lvm_lvseg_get_property(self->lv_seg, name); return get_property(&prop_value); } /* PV seg methods */ /* * No way to close/destroy a pvseg, just need to make sure parents are * still good */ #define PVSEG_VALID(pvsegobject) \ do { \ if (!pvsegobject || !pvsegobject->parent_pvobj) { \ PyErr_SetString(PyExc_UnboundLocalError, "PV segment object invalid"); \ return NULL; \ } \ PV_VALID(pvsegobject->parent_pvobj); \ } while(0) static void _liblvm_pvseg_dealloc(pvsegobject *self) { Py_DECREF(self->parent_pvobj); PyObject_Del(self); } static PyObject *_liblvm_lvm_pvseg_get_property(pvsegobject *self, PyObject *args) { const char *name; struct lvm_property_value prop_value; PVSEG_VALID(self); if (!PyArg_ParseTuple(args, "s", &name)) return NULL; prop_value = lvm_pvseg_get_property(self->pv_seg, name); return get_property(&prop_value); } /* ---------------------------------------------------------------------- * Method tables and other bureaucracy */ static PyMethodDef _Liblvm_methods[] = { /* LVM methods */ { "getVersion", (PyCFunction)_liblvm_library_get_version, METH_NOARGS }, { "gc", (PyCFunction)_liblvm_lvm_gc, METH_NOARGS, _gc_doc }, { "vgOpen", (PyCFunction)_liblvm_lvm_vg_open, METH_VARARGS }, { "vgCreate", (PyCFunction)_liblvm_lvm_vg_create, METH_VARARGS }, { "configFindBool", (PyCFunction)_liblvm_lvm_config_find_bool, METH_VARARGS }, { "configReload", (PyCFunction)_liblvm_lvm_config_reload, METH_NOARGS }, { "configOverride", (PyCFunction)_liblvm_lvm_config_override, METH_VARARGS }, { "scan", (PyCFunction)_liblvm_lvm_scan, METH_NOARGS }, { "listVgNames", (PyCFunction)_liblvm_lvm_list_vg_names, METH_NOARGS }, { "listVgUuids", (PyCFunction)_liblvm_lvm_list_vg_uuids, METH_NOARGS }, { "listPvs", (PyCFunction)_liblvm_lvm_list_pvs, METH_NOARGS }, { "pvCreate", (PyCFunction)_liblvm_lvm_pv_create, METH_VARARGS }, { "pvRemove", (PyCFunction)_liblvm_lvm_pv_remove, METH_VARARGS }, { "percentToFloat", (PyCFunction)_liblvm_lvm_percent_to_float, METH_VARARGS }, { "vgNameValidate", (PyCFunction)_liblvm_lvm_vg_name_validate, METH_VARARGS }, { "vgNameFromPvid", (PyCFunction)_liblvm_lvm_vgname_from_pvid, METH_VARARGS }, { "vgNameFromDevice", (PyCFunction)_liblvm_lvm_vgname_from_device, METH_VARARGS }, { NULL, NULL } /* sentinel */ }; static PyMethodDef _liblvm_vg_methods[] = { /* vg methods */ { "getName", (PyCFunction)_liblvm_lvm_vg_get_name, METH_NOARGS }, { "getUuid", (PyCFunction)_liblvm_lvm_vg_get_uuid, METH_NOARGS }, { "close", (PyCFunction)_liblvm_lvm_vg_close, METH_NOARGS }, { "remove", (PyCFunction)_liblvm_lvm_vg_remove, METH_NOARGS }, { "extend", (PyCFunction)_liblvm_lvm_vg_extend, METH_VARARGS }, { "reduce", (PyCFunction)_liblvm_lvm_vg_reduce, METH_VARARGS }, { "addTag", (PyCFunction)_liblvm_lvm_vg_add_tag, METH_VARARGS }, { "removeTag", (PyCFunction)_liblvm_lvm_vg_remove_tag, METH_VARARGS }, { "setExtentSize", (PyCFunction)_liblvm_lvm_vg_set_extent_size, METH_VARARGS }, { "isClustered", (PyCFunction)_liblvm_lvm_vg_is_clustered, METH_NOARGS }, { "isExported", (PyCFunction)_liblvm_lvm_vg_is_exported, METH_NOARGS }, { "isPartial", (PyCFunction)_liblvm_lvm_vg_is_partial, METH_NOARGS }, { "getSeqno", (PyCFunction)_liblvm_lvm_vg_get_seqno, METH_NOARGS }, { "getSize", (PyCFunction)_liblvm_lvm_vg_get_size, METH_NOARGS }, { "getFreeSize", (PyCFunction)_liblvm_lvm_vg_get_free_size, METH_NOARGS }, { "getExtentSize", (PyCFunction)_liblvm_lvm_vg_get_extent_size, METH_NOARGS }, { "getExtentCount", (PyCFunction)_liblvm_lvm_vg_get_extent_count, METH_NOARGS }, { "getFreeExtentCount", (PyCFunction)_liblvm_lvm_vg_get_free_extent_count, METH_NOARGS }, { "getProperty", (PyCFunction)_liblvm_lvm_vg_get_property, METH_VARARGS }, { "setProperty", (PyCFunction)_liblvm_lvm_vg_set_property, METH_VARARGS }, { "getPvCount", (PyCFunction)_liblvm_lvm_vg_get_pv_count, METH_NOARGS }, { "getMaxPv", (PyCFunction)_liblvm_lvm_vg_get_max_pv, METH_NOARGS }, { "getMaxLv", (PyCFunction)_liblvm_lvm_vg_get_max_lv, METH_NOARGS }, { "listLVs", (PyCFunction)_liblvm_lvm_vg_list_lvs, METH_NOARGS }, { "listPVs", (PyCFunction)_liblvm_lvm_vg_list_pvs, METH_NOARGS }, { "lvFromName", (PyCFunction)_liblvm_lvm_lv_from_name, METH_VARARGS }, { "lvFromUuid", (PyCFunction)_liblvm_lvm_lv_from_uuid, METH_VARARGS }, { "lvNameValidate", (PyCFunction)_liblvm_lvm_lv_name_validate, METH_VARARGS }, { "pvFromName", (PyCFunction)_liblvm_lvm_pv_from_name, METH_VARARGS }, { "pvFromUuid", (PyCFunction)_liblvm_lvm_pv_from_uuid, METH_VARARGS }, { "getTags", (PyCFunction)_liblvm_lvm_vg_get_tags, METH_NOARGS }, { "createLvLinear", (PyCFunction)_liblvm_lvm_vg_create_lv_linear, METH_VARARGS }, { "createLvThinpool", (PyCFunction)_liblvm_lvm_vg_create_lv_thinpool, METH_VARARGS }, { "createLvThin", (PyCFunction)_liblvm_lvm_vg_create_lv_thin, METH_VARARGS }, { NULL, NULL } /* sentinel */ }; static PyMethodDef _liblvm_lv_methods[] = { /* lv methods */ { "getAttr", (PyCFunction)_liblvm_lvm_lv_get_attr, METH_NOARGS }, { "getName", (PyCFunction)_liblvm_lvm_lv_get_name, METH_NOARGS }, { "getOrigin", (PyCFunction)_liblvm_lvm_lv_get_origin, METH_NOARGS }, { "getUuid", (PyCFunction)_liblvm_lvm_lv_get_uuid, METH_NOARGS }, { "activate", (PyCFunction)_liblvm_lvm_lv_activate, METH_NOARGS }, { "deactivate", (PyCFunction)_liblvm_lvm_lv_deactivate, METH_NOARGS }, { "remove", (PyCFunction)_liblvm_lvm_vg_remove_lv, METH_NOARGS }, { "getProperty", (PyCFunction)_liblvm_lvm_lv_get_property, METH_VARARGS }, { "getSize", (PyCFunction)_liblvm_lvm_lv_get_size, METH_NOARGS }, { "isActive", (PyCFunction)_liblvm_lvm_lv_is_active, METH_NOARGS }, { "isSuspended", (PyCFunction)_liblvm_lvm_lv_is_suspended, METH_NOARGS }, { "addTag", (PyCFunction)_liblvm_lvm_lv_add_tag, METH_VARARGS }, { "removeTag", (PyCFunction)_liblvm_lvm_lv_remove_tag, METH_VARARGS }, { "getTags", (PyCFunction)_liblvm_lvm_lv_get_tags, METH_NOARGS }, { "rename", (PyCFunction)_liblvm_lvm_lv_rename, METH_VARARGS }, { "resize", (PyCFunction)_liblvm_lvm_lv_resize, METH_VARARGS }, { "listLVsegs", (PyCFunction)_liblvm_lvm_lv_list_lvsegs, METH_NOARGS }, { "snapshot", (PyCFunction)_liblvm_lvm_lv_snapshot, METH_VARARGS }, { NULL, NULL } /* sentinel */ }; static PyMethodDef _liblvm_pv_list_methods[] = { /* pv list methods */ { "__enter__", (PyCFunction)_liblvm_lvm_pvlist_get, METH_VARARGS }, { "__exit__", (PyCFunction)_liblvm_lvm_pvlist_put, METH_VARARGS }, { "open", (PyCFunction)_liblvm_lvm_pvlist_get, METH_VARARGS }, { "close", (PyCFunction)_liblvm_lvm_pvlist_put, METH_VARARGS }, { NULL, NULL } }; static PyMethodDef _liblvm_pv_methods[] = { /* pv methods */ { "getName", (PyCFunction)_liblvm_lvm_pv_get_name, METH_NOARGS }, { "getUuid", (PyCFunction)_liblvm_lvm_pv_get_uuid, METH_NOARGS }, { "getMdaCount", (PyCFunction)_liblvm_lvm_pv_get_mda_count, METH_NOARGS }, { "getProperty", (PyCFunction)_liblvm_lvm_pv_get_property, METH_VARARGS }, { "getSize", (PyCFunction)_liblvm_lvm_pv_get_size, METH_NOARGS }, { "getDevSize", (PyCFunction)_liblvm_lvm_pv_get_dev_size, METH_NOARGS }, { "getFree", (PyCFunction)_liblvm_lvm_pv_get_free, METH_NOARGS }, { "resize", (PyCFunction)_liblvm_lvm_pv_resize, METH_VARARGS }, { "listPVsegs", (PyCFunction)_liblvm_lvm_pv_list_pvsegs, METH_NOARGS }, { NULL, NULL } /* sentinel */ }; static PyMethodDef _liblvm_lvseg_methods[] = { { "getProperty", (PyCFunction)_liblvm_lvm_lvseg_get_property, METH_VARARGS }, { NULL, NULL } /* sentinel */ }; static PyMethodDef _liblvm_pvseg_methods[] = { { "getProperty", (PyCFunction)_liblvm_lvm_pvseg_get_property, METH_VARARGS }, { NULL, NULL } /* sentinel */ }; static PyTypeObject _LibLVMvgType = { PyVarObject_HEAD_INIT(&PyType_Type, 0) .tp_name = "lvm.Liblvm_vg", .tp_basicsize = sizeof(vgobject), .tp_new = PyType_GenericNew, .tp_dealloc = (destructor)liblvm_vg_dealloc, .tp_flags = Py_TPFLAGS_DEFAULT, .tp_doc = "LVM Volume Group object", .tp_methods = _liblvm_vg_methods, }; static PyTypeObject _LibLVMlvType = { PyVarObject_HEAD_INIT(&PyType_Type, 0) .tp_name = "lvm.Liblvm_lv", .tp_basicsize = sizeof(lvobject), .tp_new = PyType_GenericNew, .tp_dealloc = (destructor)liblvm_lv_dealloc, .tp_flags = Py_TPFLAGS_DEFAULT, .tp_doc = "LVM Logical Volume object", .tp_methods = _liblvm_lv_methods, }; static PyTypeObject _LibLVMpvlistType = { PyVarObject_HEAD_INIT(&PyType_Type, 0) .tp_name = "lvm.Liblvm_pvlist", .tp_basicsize = sizeof(pvslistobject), .tp_new = PyType_GenericNew, .tp_dealloc = (destructor)_liblvm_pvlist_dealloc, .tp_flags = Py_TPFLAGS_DEFAULT, .tp_doc = "LVM Physical Volume list object", .tp_methods = _liblvm_pv_list_methods, }; static PyTypeObject _LibLVMpvType = { PyVarObject_HEAD_INIT(&PyType_Type, 0) .tp_name = "lvm.Liblvm_pv", .tp_basicsize = sizeof(pvobject), .tp_new = PyType_GenericNew, .tp_dealloc = (destructor)_liblvm_pv_dealloc, .tp_flags = Py_TPFLAGS_DEFAULT, .tp_doc = "LVM Physical Volume object", .tp_methods = _liblvm_pv_methods, }; static PyTypeObject _LibLVMlvsegType = { PyVarObject_HEAD_INIT(&PyType_Type, 0) .tp_name = "lvm.Liblvm_lvseg", .tp_basicsize = sizeof(lvsegobject), .tp_new = PyType_GenericNew, .tp_dealloc = (destructor)_liblvm_lvseg_dealloc, .tp_flags = Py_TPFLAGS_DEFAULT, .tp_doc = "LVM Logical Volume Segment object", .tp_methods = _liblvm_lvseg_methods, }; static PyTypeObject _LibLVMpvsegType = { PyVarObject_HEAD_INIT(&PyType_Type, 0) .tp_name = "lvm.Liblvm_pvseg", .tp_basicsize = sizeof(pvsegobject), .tp_new = PyType_GenericNew, .tp_dealloc = (destructor)_liblvm_pvseg_dealloc, .tp_flags = Py_TPFLAGS_DEFAULT, .tp_doc = "LVM Physical Volume Segment object", .tp_methods = _liblvm_pvseg_methods, }; static void _liblvm_cleanup(void) { if (_libh) { lvm_quit(_libh); _libh = NULL; } } #ifdef IS_PY3K static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "lvm", "Liblvm module", -1, _Liblvm_methods, NULL, NULL, NULL, NULL }; #define MODINITERROR return NULL PyObject * PyInit_lvm(void) #else #define MODINITERROR return PyMODINIT_FUNC initlvm(void); PyMODINIT_FUNC initlvm(void) #endif { PyObject *m; if (PyType_Ready(&_LibLVMvgType) < 0) MODINITERROR; if (PyType_Ready(&_LibLVMlvType) < 0) MODINITERROR; if (PyType_Ready(&_LibLVMpvType) < 0) MODINITERROR; if (PyType_Ready(&_LibLVMlvsegType) < 0) MODINITERROR; if (PyType_Ready(&_LibLVMpvsegType) < 0) MODINITERROR; if (PyType_Ready(&_LibLVMpvlistType) < 0) MODINITERROR; #ifdef IS_PY3K m = PyModule_Create(&moduledef); #else m = Py_InitModule3("lvm", _Liblvm_methods, "Liblvm module"); #endif if (m == NULL) MODINITERROR; if (PyModule_AddIntConstant(m, "THIN_DISCARDS_IGNORE", LVM_THIN_DISCARDS_IGNORE) < 0) MODINITERROR; if (PyModule_AddIntConstant(m, "THIN_DISCARDS_NO_PASSDOWN", LVM_THIN_DISCARDS_NO_PASSDOWN) < 0) MODINITERROR; if (PyModule_AddIntConstant(m, "THIN_DISCARDS_PASSDOWN", LVM_THIN_DISCARDS_PASSDOWN) < 0) MODINITERROR; if ((_LibLVMError = PyErr_NewException((char*)"lvm.LibLVMError", NULL, NULL))) { /* Each call to PyModule_AddObject decrefs it; compensate: */ Py_INCREF(_LibLVMError); Py_INCREF(_LibLVMError); PyModule_AddObject(m, "error", _LibLVMError); PyModule_AddObject(m, "LibLVMError", _LibLVMError); } PyErr_Warn(PyExc_DeprecationWarning, "Python API is deprecated, use D-Bus API instead."); Py_AtExit(_liblvm_cleanup); #ifdef IS_PY3K return m; #endif } LVM2.2.02.176/INSTALL0000644000000000000120000000167113176752421012362 0ustar rootwheelInstallation ============ 1) Generate custom makefiles. Run the 'configure' script from the top directory. If you don't want to include the LVM1 backwards-compatibility code use: ./configure --with-lvm1=none To separate the LVM1 support into a shared library loaded by lvm.conf use: ./configure --with-lvm1=shared Use ./configure --help to see other options. 2) Build and install. Run 'make' from the top directory to build everything you configured. Run 'make install' to build and install everything you configured. If you only want the device-mapper libraries and tools use 'make device-mapper' or 'make install_device-mapper'. 3) If using LVM2, create a configuration file. The tools will work fine without a configuration file being present, but you ought to review the example file in doc/example.conf. Please also refer to the WHATS_NEW file and the manual pages for the individual commands. LVM2.2.02.176/configure.in0000644000000000000120000024111513176752421013641 0ustar rootwheel############################################################################### ## Copyright (C) 2000-2004 Sistina Software, Inc. All rights reserved. ## Copyright (C) 2004-2016 Red Hat, Inc. All rights reserved. ## ## This copyrighted material is made available to anyone wishing to use, ## modify, copy, or redistribute it subject to the terms and conditions ## of the GNU General Public License v.2. ## ## You should have received a copy of the GNU General Public License ## along with this program; if not, write to the Free Software Foundation, ## Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ################################################################################ AC_PREREQ(2.69) ################################################################################ dnl -- Process this file with autoconf to produce a configure script. AC_INIT CONFIGURE_LINE="$0 $@" AC_CONFIG_SRCDIR([lib/device/dev-cache.h]) AC_CONFIG_HEADERS([include/configure.h]) ################################################################################ dnl -- Setup the directory where autoconf has auxilary files AC_CONFIG_AUX_DIR(autoconf) ################################################################################ dnl -- Get system type AC_CANONICAL_TARGET([]) AS_IF([test -z "$CFLAGS"], [COPTIMISE_FLAG="-O2"]) case "$host_os" in linux*) CLDFLAGS="$CLDFLAGS -Wl,--version-script,.export.sym" # equivalent to -rdynamic ELDFLAGS="-Wl,--export-dynamic" # FIXME Generate list and use --dynamic-list=.dlopen.sym CLDWHOLEARCHIVE="-Wl,-whole-archive" CLDNOWHOLEARCHIVE="-Wl,-no-whole-archive" LDDEPS="$LDDEPS .export.sym" LIB_SUFFIX=so DEVMAPPER=yes BUILD_LVMETAD=no BUILD_LVMPOLLD=no LOCKDSANLOCK=no LOCKDDLM=no ODIRECT=yes DM_IOCTLS=yes SELINUX=yes CLUSTER=internal FSADM=yes BLKDEACTIVATE=yes ;; darwin*) CFLAGS="$CFLAGS -no-cpp-precomp -fno-common" CLDFLAGS="$CLDFLAGS" ELDFLAGS= CLDWHOLEARCHIVE="-all_load" CLDNOWHOLEARCHIVE= LIB_SUFFIX=dylib DEVMAPPER=yes ODIRECT=no DM_IOCTLS=no SELINUX=no CLUSTER=none FSADM=no BLKDEACTIVATE=no ;; esac ################################################################################ dnl -- Checks for programs. AC_PROG_SED AC_PROG_AWK save_CFLAGS=$CFLAGS save_CXXFLAGS=$CXXFLAGS AC_PROG_CC AC_PROG_CXX CFLAGS=$save_CFLAGS CXXFLAGS=$save_CXXFLAGS dnl probably no longer needed in 2008, but... AC_PROG_GCC_TRADITIONAL AC_PROG_INSTALL AC_PROG_LN_S AC_PROG_MAKE_SET AC_PROG_MKDIR_P AC_PROG_RANLIB AC_CHECK_TOOL(AR, ar) AC_PATH_TOOL(CFLOW_CMD, cflow) AC_PATH_TOOL(CSCOPE_CMD, cscope) AC_PATH_TOOL(CHMOD, chmod) AC_PATH_TOOL(WC, wc) AC_PATH_TOOL(SORT, sort) ################################################################################ dnl -- Check for header files. AC_HEADER_DIRENT AC_HEADER_MAJOR AC_HEADER_STDBOOL AC_HEADER_STDC AC_HEADER_SYS_WAIT AC_HEADER_TIME AC_CHECK_HEADERS([assert.h ctype.h dirent.h errno.h fcntl.h float.h \ getopt.h inttypes.h langinfo.h libgen.h limits.h locale.h paths.h \ signal.h stdarg.h stddef.h stdio.h stdlib.h string.h sys/file.h \ sys/ioctl.h syslog.h sys/mman.h sys/param.h sys/resource.h sys/stat.h \ sys/time.h sys/types.h sys/utsname.h sys/wait.h time.h \ unistd.h], , [AC_MSG_ERROR(bailing out)]) AC_CHECK_HEADERS(termios.h sys/statvfs.h sys/timerfd.h sys/vfs.h linux/magic.h linux/fiemap.h) case "$host_os" in linux*) AC_CHECK_HEADERS(asm/byteorder.h linux/fs.h malloc.h,,AC_MSG_ERROR(bailing out)) ;; darwin*) AC_CHECK_HEADERS(machine/endian.h sys/disk.h,,AC_MSG_ERROR(bailing out)) ;; esac ################################################################################ dnl -- Check for typedefs, structures, and compiler characteristics. AC_C_CONST AC_C_INLINE AC_CHECK_MEMBERS([struct stat.st_rdev]) AC_CHECK_TYPES([ptrdiff_t]) AC_STRUCT_ST_BLOCKS AC_STRUCT_TM AC_TYPE_OFF_T AC_TYPE_PID_T AC_TYPE_SIGNAL AC_TYPE_SIZE_T AC_TYPE_MODE_T AC_TYPE_INT8_T AC_TYPE_INT16_T AC_TYPE_INT32_T AC_TYPE_INT64_T AC_TYPE_SSIZE_T AC_TYPE_UID_T AC_TYPE_UINT8_T AC_TYPE_UINT16_T AC_TYPE_UINT32_T AC_TYPE_UINT64_T AX_GCC_BUILTIN([__builtin_clz]) ################################################################################ dnl -- Check for functions AC_CHECK_FUNCS([ftruncate gethostname getpagesize gettimeofday localtime_r \ memchr memset mkdir mkfifo munmap nl_langinfo realpath rmdir setenv \ setlocale strcasecmp strchr strcspn strdup strerror strncasecmp strndup \ strrchr strspn strstr strtol strtoul uname], , [AC_MSG_ERROR(bailing out)]) AC_FUNC_ALLOCA AC_FUNC_CLOSEDIR_VOID AC_FUNC_CHOWN AC_FUNC_FORK AC_FUNC_LSTAT AC_FUNC_MALLOC AC_FUNC_MEMCMP AC_FUNC_MKTIME AC_FUNC_MMAP AC_FUNC_REALLOC AC_FUNC_STAT AC_FUNC_STRTOD AC_FUNC_VPRINTF ################################################################################ dnl -- Disable dependency tracking AC_MSG_CHECKING(whether to enable dependency tracking) AC_ARG_ENABLE(dependency-tracking, AC_HELP_STRING([--disable-dependency-tracking], [speeds up one-time build.]), USE_TRACKING=$enableval, USE_TRACKING=yes) AC_MSG_RESULT($USE_TRACKING) ################################################################################ dnl -- Enables statically-linked tools AC_MSG_CHECKING(whether to use static linking) AC_ARG_ENABLE(static_link, AC_HELP_STRING([--enable-static_link], [use this to link the tools to their libraries statically (default is dynamic linking]), STATIC_LINK=$enableval, STATIC_LINK=no) AC_MSG_RESULT($STATIC_LINK) ################################################################################ dnl -- Check if compiler/linker supports PIE and RELRO AC_TRY_CCFLAG([-pie], [HAVE_PIE], [], []) AC_SUBST(HAVE_PIE) AC_TRY_LDFLAGS([-Wl,-z,relro,-z,now], [HAVE_FULL_RELRO], [], []) AC_SUBST(HAVE_FULL_RELRO) ################################################################################ dnl -- Prefix is /usr by default, the exec_prefix default is setup later AC_PREFIX_DEFAULT(/usr) ################################################################################ dnl -- Clear default exec_prefix - install into /sbin rather than /usr/sbin test "$exec_prefix" = NONE -a "$prefix" = NONE && exec_prefix="" test "x$prefix" = xNONE && prefix=$ac_default_prefix # Let make expand exec_prefix. test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' ################################################################################ dnl -- Setup the ownership of the files AC_MSG_CHECKING(file owner) AC_ARG_WITH(user, AC_HELP_STRING([--with-user=USER], [set the owner of installed files [USER=]]), OWNER=$withval) AC_MSG_RESULT($OWNER) test -n "$OWNER" && INSTALL="$INSTALL -o $OWNER" ################################################################################ dnl -- Setup the group ownership of the files AC_MSG_CHECKING(group owner) AC_ARG_WITH(group, AC_HELP_STRING([--with-group=GROUP], [set the group owner of installed files [GROUP=]]), GROUP=$withval) AC_MSG_RESULT($GROUP) test -n "$GROUP" && INSTALL="$INSTALL -g $GROUP" ################################################################################ dnl -- Setup device node ownership AC_MSG_CHECKING(device node uid) AC_ARG_WITH(device-uid, AC_HELP_STRING([--with-device-uid=UID], [set the owner used for new device nodes [UID=0]]), DM_DEVICE_UID=$withval, DM_DEVICE_UID=0) AC_MSG_RESULT($DM_DEVICE_UID) AC_DEFINE_UNQUOTED([DM_DEVICE_UID], [$DM_DEVICE_UID], [Define default owner for device node]) ################################################################################ dnl -- Setup device group ownership AC_MSG_CHECKING(device node gid) AC_ARG_WITH(device-gid, AC_HELP_STRING([--with-device-gid=GID], [set the group used for new device nodes [GID=0]]), DM_DEVICE_GID=$withval, DM_DEVICE_GID=0) AC_MSG_RESULT($DM_DEVICE_GID) AC_DEFINE_UNQUOTED([DM_DEVICE_GID], [$DM_DEVICE_GID], [Define default group for device node]) ################################################################################ dnl -- Setup device mode AC_MSG_CHECKING(device node mode) AC_ARG_WITH(device-mode, AC_HELP_STRING([--with-device-mode=MODE], [set the mode used for new device nodes [MODE=0600]]), DM_DEVICE_MODE=$withval, DM_DEVICE_MODE=0600) AC_MSG_RESULT($DM_DEVICE_MODE) AC_DEFINE_UNQUOTED([DM_DEVICE_MODE], [$DM_DEVICE_MODE], [Define default mode for device node]) AC_MSG_CHECKING(when to create device nodes) AC_ARG_WITH(device-nodes-on, AC_HELP_STRING([--with-device-nodes-on=ON], [create nodes on resume or create [ON=resume]]), ADD_NODE=$withval, ADD_NODE=resume) case "$ADD_NODE" in resume) add_on=DM_ADD_NODE_ON_RESUME;; create) add_on=DM_ADD_NODE_ON_CREATE;; *) AC_MSG_ERROR([--with-device-nodes-on parameter invalid]);; esac AC_MSG_RESULT(on $ADD_NODE) AC_DEFINE_UNQUOTED([DEFAULT_DM_ADD_NODE], $add_on, [Define default node creation behavior with dmsetup create]) AC_MSG_CHECKING(default name mangling) AC_ARG_WITH(default-name-mangling, AC_HELP_STRING([--with-default-name-mangling=MANGLING], [default name mangling: auto/none/hex [auto]]), MANGLING=$withval, MANGLING=auto) case "$MANGLING" in auto) mangling=DM_STRING_MANGLING_AUTO;; none|disabled) mangling=DM_STRING_MANGLING_NONE;; hex) mangling=DM_STRING_MANGLING_HEX;; *) AC_MSG_ERROR([--with-default-name-mangling parameter invalid]);; esac AC_MSG_RESULT($MANGLING) AC_DEFINE_UNQUOTED([DEFAULT_DM_NAME_MANGLING], $mangling, [Define default name mangling behaviour]) ################################################################################ dnl -- LVM1 tool fallback option AC_MSG_CHECKING(whether to enable lvm1 fallback) AC_ARG_ENABLE(lvm1_fallback, AC_HELP_STRING([--enable-lvm1_fallback], [use this to fall back and use LVM1 binaries if device-mapper is missing from the kernel]), LVM1_FALLBACK=$enableval, LVM1_FALLBACK=no) AC_MSG_RESULT($LVM1_FALLBACK) if test "$LVM1_FALLBACK" = yes; then DEFAULT_FALLBACK_TO_LVM1=1 AC_DEFINE([LVM1_FALLBACK], 1, [Define to 1 if 'lvm' should fall back to using LVM1 binaries if device-mapper is missing from the kernel]) else DEFAULT_FALLBACK_TO_LVM1=0 fi AC_DEFINE_UNQUOTED(DEFAULT_FALLBACK_TO_LVM1, [$DEFAULT_FALLBACK_TO_LVM1], [Fall back to LVM1 by default if device-mapper is missing from the kernel.]) ################################################################################ dnl -- format1 inclusion type AC_MSG_CHECKING(whether to include support for lvm1 metadata) AC_ARG_WITH(lvm1, AC_HELP_STRING([--with-lvm1=TYPE], [LVM1 metadata support: internal/shared/none [internal]]), LVM1=$withval, LVM1=internal) AC_MSG_RESULT($LVM1) case "$LVM1" in none|shared) ;; internal) AC_DEFINE([LVM1_INTERNAL], 1, [Define to 1 to include built-in support for LVM1 metadata.]) ;; *) AC_MSG_ERROR([--with-lvm1 parameter invalid]) ;; esac ################################################################################ dnl -- format_pool inclusion type AC_MSG_CHECKING(whether to include support for GFS pool metadata) AC_ARG_WITH(pool, AC_HELP_STRING([--with-pool=TYPE], [GFS pool read-only support: internal/shared/none [internal]]), POOL=$withval, POOL=internal) AC_MSG_RESULT($POOL) case "$POOL" in none|shared) ;; internal) AC_DEFINE([POOL_INTERNAL], 1, [Define to 1 to include built-in support for GFS pool metadata.]) ;; *) AC_MSG_ERROR([--with-pool parameter invalid]) esac ################################################################################ dnl -- cluster_locking inclusion type AC_MSG_CHECKING(whether to include support for cluster locking) AC_ARG_WITH(cluster, AC_HELP_STRING([--with-cluster=TYPE], [cluster LVM locking support: internal/shared/none [internal]]), CLUSTER=$withval) AC_MSG_RESULT($CLUSTER) case "$CLUSTER" in none|shared) ;; internal) AC_DEFINE([CLUSTER_LOCKING_INTERNAL], 1, [Define to 1 to include built-in support for clustered LVM locking.]) ;; *) AC_MSG_ERROR([--with-cluster parameter invalid]) ;; esac ################################################################################ dnl -- snapshots inclusion type AC_MSG_CHECKING(whether to include snapshots) AC_ARG_WITH(snapshots, AC_HELP_STRING([--with-snapshots=TYPE], [snapshot support: internal/shared/none [internal]]), SNAPSHOTS=$withval, SNAPSHOTS=internal) AC_MSG_RESULT($SNAPSHOTS) case "$SNAPSHOTS" in none|shared) ;; internal) AC_DEFINE([SNAPSHOT_INTERNAL], 1, [Define to 1 to include built-in support for snapshots.]) ;; *) AC_MSG_ERROR([--with-snapshots parameter invalid]) ;; esac ################################################################################ dnl -- mirrors inclusion type AC_MSG_CHECKING(whether to include mirrors) AC_ARG_WITH(mirrors, AC_HELP_STRING([--with-mirrors=TYPE], [mirror support: internal/shared/none [internal]]), MIRRORS=$withval, MIRRORS=internal) AC_MSG_RESULT($MIRRORS) case "$MIRRORS" in none|shared) ;; internal) AC_DEFINE([MIRRORED_INTERNAL], 1, [Define to 1 to include built-in support for mirrors.]) ;; *) AC_MSG_ERROR([--with-mirrors parameter invalid]) ;; esac ################################################################################ dnl -- raid inclusion type AC_MSG_CHECKING(whether to include raid) AC_ARG_WITH(raid, AC_HELP_STRING([--with-raid=TYPE], [raid support: internal/shared/none [internal]]), RAID=$withval, RAID=internal) AC_MSG_RESULT($RAID) AC_ARG_WITH(default-mirror-segtype, AC_HELP_STRING([--with-default-mirror-segtype=TYPE], [default mirror segtype: raid1/mirror [raid1]]), DEFAULT_MIRROR_SEGTYPE=$withval, DEFAULT_MIRROR_SEGTYPE="raid1") AC_ARG_WITH(default-raid10-segtype, AC_HELP_STRING([--with-default-raid10-segtype=TYPE], [default mirror segtype: raid10/mirror [raid10]]), DEFAULT_RAID10_SEGTYPE=$withval, DEFAULT_RAID10_SEGTYPE="raid10") case "$RAID" in none) test "$DEFAULT_MIRROR_SEGTYPE" = "raid1" && DEFAULT_MIRROR_SEGTYPE="mirror" test "$DEFAULT_RAID10_SEGTYPE" = "raid10" && DEFAULT_RAID10_SEGTYPE="mirror" ;; shared) ;; internal) AC_DEFINE([RAID_INTERNAL], 1, [Define to 1 to include built-in support for raid.]) ;; *) AC_MSG_ERROR([--with-raid parameter invalid]) ;; esac AC_DEFINE_UNQUOTED([DEFAULT_MIRROR_SEGTYPE], ["$DEFAULT_MIRROR_SEGTYPE"], [Default segtype used for mirror volumes.]) AC_DEFINE_UNQUOTED([DEFAULT_RAID10_SEGTYPE], ["$DEFAULT_RAID10_SEGTYPE"], [Default segtype used for raid10 volumes.]) ################################################################################ AC_ARG_WITH(default-sparse-segtype, AC_HELP_STRING([--with-default-sparse-segtype=TYPE], [default sparse segtype: thin/snapshot [thin]]), DEFAULT_SPARSE_SEGTYPE=$withval, DEFAULT_SPARSE_SEGTYPE="thin") ################################################################################ dnl -- thin provisioning AC_MSG_CHECKING(whether to include thin provisioning) AC_ARG_WITH(thin, AC_HELP_STRING([--with-thin=TYPE], [thin provisioning support: internal/shared/none [internal]]), THIN=$withval, THIN=internal) AC_ARG_WITH(thin-check, AC_HELP_STRING([--with-thin-check=PATH], [thin_check tool: [autodetect]]), THIN_CHECK_CMD=$withval, THIN_CHECK_CMD="autodetect") AC_ARG_WITH(thin-dump, AC_HELP_STRING([--with-thin-dump=PATH], [thin_dump tool: [autodetect]]), THIN_DUMP_CMD=$withval, THIN_DUMP_CMD="autodetect") AC_ARG_WITH(thin-repair, AC_HELP_STRING([--with-thin-repair=PATH], [thin_repair tool: [autodetect]]), THIN_REPAIR_CMD=$withval, THIN_REPAIR_CMD="autodetect") AC_ARG_WITH(thin-restore, AC_HELP_STRING([--with-thin-restore=PATH], [thin_restore tool: [autodetect]]), THIN_RESTORE_CMD=$withval, THIN_RESTORE_CMD="autodetect") AC_MSG_RESULT($THIN) case "$THIN" in none) test "$DEFAULT_SPARSE_SEGTYPE" = "thin" && DEFAULT_SPARSE_SEGTYPE="snapshot" ;; shared) ;; internal) AC_DEFINE([THIN_INTERNAL], 1, [Define to 1 to include built-in support for thin provisioning.]) ;; *) AC_MSG_ERROR([--with-thin parameter invalid ($THIN)]) ;; esac AC_DEFINE_UNQUOTED([DEFAULT_SPARSE_SEGTYPE], ["$DEFAULT_SPARSE_SEGTYPE"], [Default segtype used for sparse volumes.]) dnl -- thin_check needs-check flag AC_ARG_ENABLE(thin_check_needs_check, AC_HELP_STRING([--disable-thin_check_needs_check], [required if thin_check version is < 0.3.0]), THIN_CHECK_NEEDS_CHECK=$enableval, THIN_CHECK_NEEDS_CHECK=yes) # Test if necessary thin tools are available # if not - use plain defaults and warn user case "$THIN" in internal|shared) # Empty means a config way to ignore thin checking if test "$THIN_CHECK_CMD" = "autodetect"; then AC_PATH_TOOL(THIN_CHECK_CMD, thin_check) if test -z "$THIN_CHECK_CMD"; then AC_MSG_WARN([thin_check not found in path $PATH]) THIN_CHECK_CMD=/usr/sbin/thin_check THIN_CONFIGURE_WARN=y fi fi if test "$THIN_CHECK_NEEDS_CHECK" = yes; then THIN_CHECK_VSN=`"$THIN_CHECK_CMD" -V 2>/dev/null` THIN_CHECK_VSN_MAJOR=`echo "$THIN_CHECK_VSN" | $AWK -F '.' '{print $1}'` THIN_CHECK_VSN_MINOR=`echo "$THIN_CHECK_VSN" | $AWK -F '.' '{print $2}'` if test -z "$THIN_CHECK_VSN_MAJOR" -o -z "$THIN_CHECK_VSN_MINOR"; then AC_MSG_WARN([$THIN_CHECK_CMD: Bad version "$THIN_CHECK_VSN" found]) THIN_CHECK_VERSION_WARN=y THIN_CHECK_NEEDS_CHECK=no elif test "$THIN_CHECK_VSN_MAJOR" -eq 0 -a "$THIN_CHECK_VSN_MINOR" -lt 3; then AC_MSG_WARN([$THIN_CHECK_CMD: Old version "$THIN_CHECK_VSN" found]) THIN_CHECK_VERSION_WARN=y THIN_CHECK_NEEDS_CHECK=no fi fi # Empty means a config way to ignore thin dumping if test "$THIN_DUMP_CMD" = "autodetect"; then AC_PATH_TOOL(THIN_DUMP_CMD, thin_dump) test -z "$THIN_DUMP_CMD" && { AC_MSG_WARN(thin_dump not found in path $PATH) THIN_DUMP_CMD=/usr/sbin/thin_dump THIN_CONFIGURE_WARN=y } fi # Empty means a config way to ignore thin repairing if test "$THIN_REPAIR_CMD" = "autodetect"; then AC_PATH_TOOL(THIN_REPAIR_CMD, thin_repair) test -z "$THIN_REPAIR_CMD" && { AC_MSG_WARN(thin_repair not found in path $PATH) THIN_REPAIR_CMD=/usr/sbin/thin_repair THIN_CONFIGURE_WARN=y } fi # Empty means a config way to ignore thin restoring if test "$THIN_RESTORE_CMD" = "autodetect"; then AC_PATH_TOOL(THIN_RESTORE_CMD, thin_restore) test -z "$THIN_RESTORE_CMD" && { AC_MSG_WARN(thin_restore not found in path $PATH) THIN_RESTORE_CMD=/usr/sbin/thin_restore THIN_CONFIGURE_WARN=y } fi AC_MSG_CHECKING([whether thin_check supports the needs-check flag]) AC_MSG_RESULT([$THIN_CHECK_NEEDS_CHECK]) if test "$THIN_CHECK_NEEDS_CHECK" = yes; then AC_DEFINE([THIN_CHECK_NEEDS_CHECK], 1, [Define to 1 if the external 'thin_check' tool requires the --clear-needs-check-flag option]) fi ;; esac AC_DEFINE_UNQUOTED([THIN_CHECK_CMD], ["$THIN_CHECK_CMD"], [The path to 'thin_check', if available.]) AC_DEFINE_UNQUOTED([THIN_DUMP_CMD], ["$THIN_DUMP_CMD"], [The path to 'thin_dump', if available.]) AC_DEFINE_UNQUOTED([THIN_REPAIR_CMD], ["$THIN_REPAIR_CMD"], [The path to 'thin_repair', if available.]) AC_DEFINE_UNQUOTED([THIN_RESTORE_CMD], ["$THIN_RESTORE_CMD"], [The path to 'thin_restore', if available.]) ################################################################################ dnl -- cache inclusion type AC_MSG_CHECKING(whether to include cache) AC_ARG_WITH(cache, AC_HELP_STRING([--with-cache=TYPE], [cache support: internal/shared/none [internal]]), CACHE=$withval, CACHE="internal") AC_ARG_WITH(cache-check, AC_HELP_STRING([--with-cache-check=PATH], [cache_check tool: [autodetect]]), CACHE_CHECK_CMD=$withval, CACHE_CHECK_CMD="autodetect") AC_ARG_WITH(cache-dump, AC_HELP_STRING([--with-cache-dump=PATH], [cache_dump tool: [autodetect]]), CACHE_DUMP_CMD=$withval, CACHE_DUMP_CMD="autodetect") AC_ARG_WITH(cache-repair, AC_HELP_STRING([--with-cache-repair=PATH], [cache_repair tool: [autodetect]]), CACHE_REPAIR_CMD=$withval, CACHE_REPAIR_CMD="autodetect") AC_ARG_WITH(cache-restore, AC_HELP_STRING([--with-cache-restore=PATH], [cache_restore tool: [autodetect]]), CACHE_RESTORE_CMD=$withval, CACHE_RESTORE_CMD="autodetect") AC_MSG_RESULT($CACHE) case "$CACHE" in none|shared) ;; internal) AC_DEFINE([CACHE_INTERNAL], 1, [Define to 1 to include built-in support for cache.]) ;; *) AC_MSG_ERROR([--with-cache parameter invalid]) ;; esac dnl -- cache_check needs-check flag AC_ARG_ENABLE(cache_check_needs_check, AC_HELP_STRING([--disable-cache_check_needs_check], [required if cache_check version is < 0.5]), CACHE_CHECK_NEEDS_CHECK=$enableval, CACHE_CHECK_NEEDS_CHECK=yes) # Test if necessary cache tools are available # if not - use plain defaults and warn user case "$CACHE" in internal|shared) # Empty means a config way to ignore cache checking if test "$CACHE_CHECK_CMD" = "autodetect"; then AC_PATH_TOOL(CACHE_CHECK_CMD, cache_check) if test -z "$CACHE_CHECK_CMD"; then AC_MSG_WARN([cache_check not found in path $PATH]) CACHE_CHECK_CMD=/usr/sbin/cache_check CACHE_CONFIGURE_WARN=y fi fi if test "$CACHE_CHECK_NEEDS_CHECK" = yes; then $CACHE_CHECK_CMD -V 2>/dev/null >conftest.tmp read -r CACHE_CHECK_VSN < conftest.tmp IFS=.- read -r CACHE_CHECK_VSN_MAJOR CACHE_CHECK_VSN_MINOR CACHE_CHECK_VSN_PATCH LEFTOVER < conftest.tmp rm -f conftest.tmp # Require version >= 0.5.4 for --clear-needs-check-flag if test -z "$CACHE_CHECK_VSN_MAJOR" \ || test -z "$CACHE_CHECK_VSN_MINOR" \ || test -z "$CACHE_CHECK_VSN_PATCH"; then AC_MSG_WARN([$CACHE_CHECK_CMD: Bad version "$CACHE_CHECK_VSN" found]) CACHE_CHECK_VERSION_WARN=y CACHE_CHECK_NEEDS_CHECK=no elif test "$CACHE_CHECK_VSN_MAJOR" -eq 0 ; then if test "$CACHE_CHECK_VSN_MINOR" -lt 5 \ || test "$CACHE_CHECK_VSN_MINOR" -eq 5 -a "$CACHE_CHECK_VSN_PATCH" -lt 4; then AC_MSG_WARN([$CACHE_CHECK_CMD: Old version "$CACHE_CHECK_VSN" found]) CACHE_CHECK_VERSION_WARN=y CACHE_CHECK_NEEDS_CHECK=no fi if test "$CACHE_CHECK_VSN_MINOR" -lt 7 ; then AC_MSG_WARN([$CACHE_CHECK_CMD: Old version "$CACHE_CHECK_VSN" does not support new cache format V2]) CACHE_CHECK_VERSION_WARN=y fi fi fi # Empty means a config way to ignore cache dumping if test "$CACHE_DUMP_CMD" = "autodetect"; then AC_PATH_TOOL(CACHE_DUMP_CMD, cache_dump) test -z "$CACHE_DUMP_CMD" && { AC_MSG_WARN(cache_dump not found in path $PATH) CACHE_DUMP_CMD=/usr/sbin/cache_dump CACHE_CONFIGURE_WARN=y } fi # Empty means a config way to ignore cache repairing if test "$CACHE_REPAIR_CMD" = "autodetect"; then AC_PATH_TOOL(CACHE_REPAIR_CMD, cache_repair) test -z "$CACHE_REPAIR_CMD" && { AC_MSG_WARN(cache_repair not found in path $PATH) CACHE_REPAIR_CMD=/usr/sbin/cache_repair CACHE_CONFIGURE_WARN=y } fi # Empty means a config way to ignore cache restoring if test "$CACHE_RESTORE_CMD" = "autodetect"; then AC_PATH_TOOL(CACHE_RESTORE_CMD, cache_restore) test -z "$CACHE_RESTORE_CMD" && { AC_MSG_WARN(cache_restore not found in path $PATH) CACHE_RESTORE_CMD=/usr/sbin/cache_restore CACHE_CONFIGURE_WARN=y } fi AC_MSG_CHECKING([whether cache_check supports the needs-check flag]) AC_MSG_RESULT([$CACHE_CHECK_NEEDS_CHECK]) if test "$CACHE_CHECK_NEEDS_CHECK" = yes; then AC_DEFINE([CACHE_CHECK_NEEDS_CHECK], 1, [Define to 1 if the external 'cache_check' tool requires the --clear-needs-check-flag option]) fi ;; esac AC_DEFINE_UNQUOTED([CACHE_CHECK_CMD], ["$CACHE_CHECK_CMD"], [The path to 'cache_check', if available.]) AC_DEFINE_UNQUOTED([CACHE_DUMP_CMD], ["$CACHE_DUMP_CMD"], [The path to 'cache_dump', if available.]) AC_DEFINE_UNQUOTED([CACHE_REPAIR_CMD], ["$CACHE_REPAIR_CMD"], [The path to 'cache_repair', if available.]) AC_DEFINE_UNQUOTED([CACHE_RESTORE_CMD], ["$CACHE_RESTORE_CMD"], [The path to 'cache_restore', if available.]) ################################################################################ dnl -- Disable readline AC_ARG_ENABLE([readline], AC_HELP_STRING([--disable-readline], [disable readline support]), READLINE=$enableval, READLINE=maybe) ################################################################################ dnl -- Disable realtime clock support AC_MSG_CHECKING(whether to enable realtime support) AC_ARG_ENABLE(realtime, AC_HELP_STRING([--disable-realtime], [disable realtime clock support]), REALTIME=$enableval, REALTIME=yes) AC_MSG_RESULT($REALTIME) ################################################################################ dnl -- disable OCF resource agents AC_MSG_CHECKING(whether to enable OCF resource agents) AC_ARG_ENABLE(ocf, AC_HELP_STRING([--enable-ocf], [enable Open Cluster Framework (OCF) compliant resource agents]), OCF=$enableval, OCF=no) AC_MSG_RESULT($OCF) AC_ARG_WITH(ocfdir, AC_HELP_STRING([--with-ocfdir=DIR], [install OCF files in [PREFIX/lib/ocf/resource.d/lvm2]]), OCFDIR=$withval, OCFDIR='${prefix}/lib/ocf/resource.d/lvm2') ################################################################################ dnl -- Init pkg-config with dummy invokation: dnl -- this is required because PKG_CHECK_MODULES macro is expanded dnl -- to initialize the pkg-config environment only at the first invokation, dnl -- that would be conditional in this configure.in. pkg_config_init() { if test "$PKGCONFIG_INIT" != 1; then PKG_CHECK_MODULES(PKGCONFIGINIT, pkgconfiginit, [], [AC_MSG_RESULT([pkg-config initialized])]) PKGCONFIG_INIT=1 fi } ################################################################################ AC_MSG_CHECKING(for default run directory) RUN_DIR="/run" test -d "/run" || RUN_DIR="/var/run" AC_MSG_RESULT($RUN_DIR) dnl -- Set up pidfile and run directory AH_TEMPLATE(DEFAULT_PID_DIR) AC_ARG_WITH(default-pid-dir, AC_HELP_STRING([--with-default-pid-dir=PID_DIR], [Default directory to keep PID files in. [autodetect]]), DEFAULT_PID_DIR="$withval", DEFAULT_PID_DIR=$RUN_DIR) AC_DEFINE_UNQUOTED(DEFAULT_PID_DIR, ["$DEFAULT_PID_DIR"], [Default directory to keep PID files in.]) AH_TEMPLATE(DEFAULT_DM_RUN_DIR, [Name of default DM run directory.]) AC_ARG_WITH(default-dm-run-dir, AC_HELP_STRING([--with-default-dm-run-dir=DM_RUN_DIR], [ Default DM run directory. [autodetect]]), DEFAULT_DM_RUN_DIR="$withval", DEFAULT_DM_RUN_DIR=$RUN_DIR) AC_DEFINE_UNQUOTED(DEFAULT_DM_RUN_DIR, ["$DEFAULT_DM_RUN_DIR"], [Default DM run directory.]) AH_TEMPLATE(DEFAULT_RUN_DIR, [Name of default LVM run directory.]) AC_ARG_WITH(default-run-dir, AC_HELP_STRING([--with-default-run-dir=RUN_DIR], [Default LVM run directory. [autodetect_run_dir/lvm]]), DEFAULT_RUN_DIR="$withval", DEFAULT_RUN_DIR="$RUN_DIR/lvm") AC_DEFINE_UNQUOTED(DEFAULT_RUN_DIR, ["$DEFAULT_RUN_DIR"], [Default LVM run directory.]) ################################################################################ dnl -- Build cluster LVM daemon AC_MSG_CHECKING(whether to build cluster LVM daemon) AC_ARG_WITH(clvmd, [ --with-clvmd=TYPE build cluster LVM Daemon The following cluster manager combinations are valid: * cman (RHEL5 or equivalent) * cman,corosync,openais (or selection of them) * singlenode (localhost only) * all (autodetect) * none (disable build) [[none]]], CLVMD=$withval, CLVMD=none) test "$CLVMD" = yes && CLVMD=all AC_MSG_RESULT($CLVMD) dnl -- If clvmd enabled without cluster locking, automagically include it test "$CLVMD" != none -a "$CLUSTER" = none && CLUSTER=internal dnl -- init pkgconfig if required test "$CLVMD" != none && pkg_config_init dnl -- Express clvmd init script Required-Start / Required-Stop CLVMD_CMANAGERS="" dnl -- On RHEL4/RHEL5, qdiskd is started from a separate init script. dnl -- Enable if we are build for cman. CLVMD_NEEDS_QDISKD=no dnl -- define build types if [[ `expr x"$CLVMD" : '.*gulm.*'` != 0 ]]; then AC_MSG_ERROR([Since version 2.02.87 GULM locking is no longer supported.]); fi if [[ `expr x"$CLVMD" : '.*cman.*'` != 0 ]]; then BUILDCMAN=yes CLVMD_CMANAGERS="$CLVMD_CMANAGERS cman" CLVMD_NEEDS_QDISKD=yes fi if [[ `expr x"$CLVMD" : '.*corosync.*'` != 0 ]]; then BUILDCOROSYNC=yes CLVMD_CMANAGERS="$CLVMD_CMANAGERS corosync" fi if [[ `expr x"$CLVMD" : '.*openais.*'` != 0 ]]; then BUILDOPENAIS=yes CLVMD_CMANAGERS="$CLVMD_CMANAGERS openais" fi test "$CLVMD_NEEDS_QDISKD" != no && CLVMD_CMANAGERS="$CLVMD_CMANAGERS qdiskd" dnl -- define a soft bailout if we are autodetecting soft_bailout() { NOTFOUND=1 } hard_bailout() { AC_MSG_ERROR([bailing out]) } dnl -- if clvmd=all then set soft_bailout (we do not want to error) dnl -- and set all builds to yes. We need to do this here dnl -- to skip the openais|corosync sanity check above. if test "$CLVMD" = all; then bailout=soft_bailout BUILDCMAN=yes BUILDCOROSYNC=yes BUILDOPENAIS=yes else bailout=hard_bailout fi dnl -- helper macro to check libs without adding them to LIBS check_lib_no_libs() { lib_no_libs_arg1=$1 shift lib_no_libs_arg2=$1 shift lib_no_libs_args=$@ AC_CHECK_LIB([$lib_no_libs_arg1], [$lib_no_libs_arg2],, [$bailout], [$lib_no_libs_args]) LIBS=$ac_check_lib_save_LIBS } dnl -- Look for cman libraries if required. if test "$BUILDCMAN" = yes; then PKG_CHECK_MODULES(CMAN, libcman, [HAVE_CMAN=yes], [NOTFOUND=0 AC_CHECK_HEADERS(libcman.h,,$bailout) check_lib_no_libs cman cman_init if test $NOTFOUND = 0; then AC_MSG_RESULT([no pkg for libcman, using -lcman]) CMAN_LIBS="-lcman" HAVE_CMAN=yes fi]) CHECKCONFDB=yes CHECKDLM=yes fi dnl -- Look for corosync that is required also for openais build dnl -- only enough recent version of corosync ship pkg-config files. dnl -- We can safely rely on that to detect the correct bits. if test "$BUILDCOROSYNC" = yes -o "$BUILDOPENAIS" = yes; then PKG_CHECK_MODULES(COROSYNC, corosync, [HAVE_COROSYNC=yes], $bailout) CHECKCONFDB=yes CHECKCMAP=yes fi dnl -- Look for corosync libraries if required. if test "$BUILDCOROSYNC" = yes; then PKG_CHECK_MODULES(QUORUM, libquorum, [HAVE_QUORUM=yes], $bailout) CHECKCPG=yes CHECKDLM=yes fi dnl -- Look for openais libraries if required. if test "$BUILDOPENAIS" = yes; then PKG_CHECK_MODULES(SALCK, libSaLck, [HAVE_SALCK=yes], $bailout) CHECKCPG=yes fi dnl -- Below are checks for libraries common to more than one build. dnl -- Check confdb library. dnl -- mandatory for corosync < 2.0 build. dnl -- optional for openais/cman build. if test "$CHECKCONFDB" = yes; then PKG_CHECK_MODULES(CONFDB, libconfdb, [HAVE_CONFDB=yes], [HAVE_CONFDB=no]) AC_CHECK_HEADERS([corosync/confdb.h], [HAVE_CONFDB_H=yes], [HAVE_CONFDB_H=no]) if test "$HAVE_CONFDB" != yes -a "$HAVE_CONFDB_H" = yes; then check_lib_no_libs confdb confdb_initialize AC_MSG_RESULT([no pkg for confdb, using -lconfdb]) CONFDB_LIBS="-lconfdb" HAVE_CONFDB=yes fi fi dnl -- Check cmap library dnl -- mandatory for corosync >= 2.0 build. if test "$CHECKCMAP" = yes; then PKG_CHECK_MODULES(CMAP, libcmap, [HAVE_CMAP=yes], [HAVE_CMAP=no]) AC_CHECK_HEADERS([corosync/cmap.h], [HAVE_CMAP_H=yes], [HAVE_CMAP_H=no]) if test "$HAVE_CMAP" != yes -a "$HAVE_CMAP_H" = yes; then check_lib_no_libs cmap cmap_initialize AC_MSG_RESULT([no pkg for cmap, using -lcmap]) CMAP_LIBS="-lcmap" HAVE_CMAP=yes fi fi if test "$BUILDCOROSYNC" = yes -a \ "$HAVE_CMAP" != yes -a "$HAVE_CONFDB" != yes -a "$CLVMD" != all; then AC_MSG_ERROR([bailing out... cmap (corosync >= 2.0) or confdb (corosync < 2.0) library is required]) fi dnl -- Check cpg library. if test "$CHECKCPG" = yes; then PKG_CHECK_MODULES(CPG, libcpg, [HAVE_CPG=yes], [$bailout]) fi dnl -- Check dlm library. if test "$CHECKDLM" = yes; then PKG_CHECK_MODULES(DLM, libdlm, [HAVE_DLM=yes], [NOTFOUND=0 AC_CHECK_HEADERS(libdlm.h,,[$bailout]) check_lib_no_libs dlm dlm_lock -lpthread if test $NOTFOUND = 0; then AC_MSG_RESULT([no pkg for libdlm, using -ldlm]) DLM_LIBS="-ldlm -lpthread" HAVE_DLM=yes fi]) fi dnl -- If we are autodetecting, we need to re-create dnl -- the depedencies checks and set a proper CLVMD, dnl -- together with init script Required-Start/Stop entries. if test "$CLVMD" = all; then CLVMD=none CLVMD_CMANAGERS="" CLVMD_NEEDS_QDISKD=no if test "$HAVE_CMAN" = yes -a \ "$HAVE_DLM" = yes; then AC_MSG_RESULT([Enabling clvmd cman cluster manager]) CLVMD="$CLVMD,cman" CLVMD_CMANAGERS="$CLVMD_CMANAGERS cman" CLVMD_NEEDS_QDISKD=yes fi if test "$HAVE_COROSYNC" = yes -a \ "$HAVE_QUORUM" = yes -a \ "$HAVE_CPG" = yes -a \ "$HAVE_DLM" = yes; then if test "$HAVE_CONFDB" = yes -o "$HAVE_CMAP" = yes; then AC_MSG_RESULT([Enabling clvmd corosync cluster manager]) CLVMD="$CLVMD,corosync" CLVMD_CMANAGERS="$CLVMD_CMANAGERS corosync" fi fi if test "$HAVE_COROSYNC" = yes -a \ "$HAVE_CPG" = yes -a \ "$HAVE_SALCK" = yes; then AC_MSG_RESULT([Enabling clvmd openais cluster manager]) CLVMD="$CLVMD,openais" CLVMD_CMANAGERS="$CLVMD_CMANAGERS openais" fi test "$CLVMD_NEEDS_QDISKD" != no && CLVMD_CMANAGERS="$CLVMD_CMANAGERS qdiskd" test "$CLVMD" = none && AC_MSG_RESULT([Disabling clvmd build. No cluster manager detected.]) fi dnl -- Fixup CLVMD_CMANAGERS with new corosync dnl -- clvmd built with corosync >= 2.0 needs dlm (either init or systemd service) dnl -- to be started. if [[ `expr x"$CLVMD" : '.*corosync.*'` != 0 ]]; then test "$HAVE_CMAP" = yes && CLVMD_CMANAGERS="$CLVMD_CMANAGERS dlm" fi ################################################################################ dnl -- clvmd pidfile if test "$CLVMD" != none; then AC_ARG_WITH(clvmd-pidfile, AC_HELP_STRING([--with-clvmd-pidfile=PATH], [clvmd pidfile [PID_DIR/clvmd.pid]]), CLVMD_PIDFILE=$withval, CLVMD_PIDFILE="$DEFAULT_PID_DIR/clvmd.pid") AC_DEFINE_UNQUOTED(CLVMD_PIDFILE, ["$CLVMD_PIDFILE"], [Path to clvmd pidfile.]) fi ################################################################################ dnl -- Build cluster mirror log daemon AC_MSG_CHECKING(whether to build cluster mirror log daemon) AC_ARG_ENABLE(cmirrord, AC_HELP_STRING([--enable-cmirrord], [enable the cluster mirror log daemon]), CMIRRORD=$enableval, CMIRRORD=no) AC_MSG_RESULT($CMIRRORD) BUILD_CMIRRORD=$CMIRRORD ################################################################################ dnl -- cmirrord pidfile if test "$BUILD_CMIRRORD" = yes; then AC_ARG_WITH(cmirrord-pidfile, AC_HELP_STRING([--with-cmirrord-pidfile=PATH], [cmirrord pidfile [PID_DIR/cmirrord.pid]]), CMIRRORD_PIDFILE=$withval, CMIRRORD_PIDFILE="$DEFAULT_PID_DIR/cmirrord.pid") AC_DEFINE_UNQUOTED(CMIRRORD_PIDFILE, ["$CMIRRORD_PIDFILE"], [Path to cmirrord pidfile.]) fi ################################################################################ dnl -- Look for corosync libraries if required. if [[ "$BUILD_CMIRRORD" = yes ]]; then pkg_config_init AC_DEFINE([CMIRROR_HAS_CHECKPOINT], 1, [Define to 1 to include libSaCkpt.]) PKG_CHECK_MODULES(SACKPT, libSaCkpt, [HAVE_SACKPT=yes], [AC_MSG_RESULT([no libSaCkpt, compiling without it]) AC_DEFINE([CMIRROR_HAS_CHECKPOINT], 0, [Define to 0 to exclude libSaCkpt.])]) if test "$HAVE_CPG" != yes; then PKG_CHECK_MODULES(CPG, libcpg) fi fi ################################################################################ dnl -- Enable debugging AC_MSG_CHECKING(whether to enable debugging) AC_ARG_ENABLE(debug, AC_HELP_STRING([--enable-debug], [enable debugging]), DEBUG=$enableval, DEBUG=no) AC_MSG_RESULT($DEBUG) dnl -- Normally turn off optimisation for debug builds if test "$DEBUG" = yes; then COPTIMISE_FLAG= else CSCOPE_CMD= fi dnl -- Check if compiler supports -Wjump-misses-init AC_TRY_CCFLAG([-Wjump-misses-init], [HAVE_WJUMP], [], []) AC_SUBST(HAVE_WJUMP) AC_TRY_CCFLAG([-Wclobbered], [HAVE_WCLOBBERED], [], []) AC_SUBST(HAVE_WCLOBBERED) AC_TRY_CCFLAG([-Wsync-nand], [HAVE_WSYNCNAND], [], []) AC_SUBST(HAVE_WSYNCNAND) ################################################################################ dnl -- Override optimisation AC_MSG_CHECKING(for C optimisation flag) AC_ARG_WITH(optimisation, AC_HELP_STRING([--with-optimisation=OPT], [C optimisation flag [OPT=-O2]]), COPTIMISE_FLAG=$withval) AC_MSG_RESULT($COPTIMISE_FLAG) ################################################################################ dnl -- Enable profiling AC_MSG_CHECKING(whether to gather gcov profiling data) AC_ARG_ENABLE(profiling, AC_HELP_STRING([--enable-profiling], [gather gcov profiling data]), PROFILING=$enableval, PROFILING=no) AC_MSG_RESULT($PROFILING) if test "$PROFILING" = yes; then COPTIMISE_FLAG="$COPTIMISE_FLAG -fprofile-arcs -ftest-coverage" AC_PATH_TOOL(LCOV, lcov) AC_PATH_TOOL(GENHTML, genhtml) test -z "$LCOV" -o -z "$GENHTML" && AC_MSG_ERROR([lcov and genhtml are required for profiling]) AC_PATH_TOOL(GENPNG, genpng) if test -n "$GENPNG"; then AC_MSG_CHECKING([whether $GENPNG has all required modules]) if "$GENPNG" --help > /dev/null 2>&1 ; then AC_MSG_RESULT(ok) GENHTML="$GENHTML --frames" else AC_MSG_RESULT([not supported]) AC_MSG_WARN([GD.pm perl module is not installed]) GENPNG= fi fi fi ################################################################################ dnl -- Enable testing AC_MSG_CHECKING(whether to enable unit testing) AC_ARG_ENABLE(testing, AC_HELP_STRING([--enable-testing], [enable testing targets in the makefile]), TESTING=$enableval, TESTING=no) AC_MSG_RESULT($TESTING) if test "$TESTING" = yes; then pkg_config_init PKG_CHECK_MODULES(CUNIT, cunit >= 2.0) fi ################################################################################ dnl -- Set LVM2 testsuite data TESTSUITE_DATA='${datarootdir}/lvm2-testsuite' # double eval needed ${datarootdir} -> ${prefix}/share -> real path AC_DEFINE_UNQUOTED(TESTSUITE_DATA, ["$(eval echo $(eval echo $TESTSUITE_DATA))"], [Path to testsuite data]) ################################################################################ dnl -- Enable valgrind awareness of memory pools AC_MSG_CHECKING(whether to enable valgrind awareness of pools) AC_ARG_ENABLE(valgrind_pool, AC_HELP_STRING([--enable-valgrind-pool], [enable valgrind awareness of pools]), VALGRIND_POOL=$enableval, VALGRIND_POOL=no) AC_MSG_RESULT($VALGRIND_POOL) pkg_config_init PKG_CHECK_MODULES(VALGRIND, valgrind, [HAVE_VALGRIND=yes], [if test x$VALGRIND_POOL = xyes; then AC_MSG_ERROR(bailing out); fi]) AC_SUBST(VALGRIND_CFLAGS) if test x$HAVE_VALGRIND = xyes; then AC_DEFINE([HAVE_VALGRIND], 1, [valgrind.h found]) fi if test x$VALGRIND_POOL = xyes; then AC_DEFINE([VALGRIND_POOL], 1, [Enable a valgrind aware build of pool]) fi ################################################################################ dnl -- Disable devmapper AC_MSG_CHECKING(whether to use device-mapper) AC_ARG_ENABLE(devmapper, AC_HELP_STRING([--disable-devmapper], [disable LVM2 device-mapper interaction]), DEVMAPPER=$enableval) AC_MSG_RESULT($DEVMAPPER) if test "$DEVMAPPER" = yes; then AC_DEFINE([DEVMAPPER_SUPPORT], 1, [Define to 1 to enable LVM2 device-mapper interaction.]) fi ################################################################################ dnl -- Build lvmetad AC_MSG_CHECKING(whether to build LVMetaD) AC_ARG_ENABLE(lvmetad, AC_HELP_STRING([--enable-lvmetad], [enable the LVM Metadata Daemon]), LVMETAD=$enableval) test -n "$LVMETAD" && BUILD_LVMETAD=$LVMETAD AC_MSG_RESULT($BUILD_LVMETAD) ################################################################################ dnl -- Build lvmpolld AC_MSG_CHECKING(whether to build lvmpolld) AC_ARG_ENABLE(lvmpolld, AC_HELP_STRING([--enable-lvmpolld], [enable the LVM Polling Daemon]), LVMPOLLD=$enableval) test -n "$LVMPOLLD" && BUILD_LVMPOLLD=$LVMPOLLD AC_MSG_RESULT($BUILD_LVMPOLLD) ################################################################################ BUILD_LVMLOCKD=no dnl -- Build lvmlockdsanlock AC_MSG_CHECKING(whether to build lvmlockdsanlock) AC_ARG_ENABLE(lvmlockd-sanlock, AC_HELP_STRING([--enable-lvmlockd-sanlock], [enable the LVM lock daemon using sanlock]), LOCKDSANLOCK=$enableval) AC_MSG_RESULT($LOCKDSANLOCK) BUILD_LOCKDSANLOCK=$LOCKDSANLOCK dnl -- Look for sanlock libraries if test "$BUILD_LOCKDSANLOCK" = yes; then PKG_CHECK_MODULES(LOCKD_SANLOCK, libsanlock_client >= 3.3.0, [HAVE_LOCKD_SANLOCK=yes], $bailout) AC_DEFINE([LOCKDSANLOCK_SUPPORT], 1, [Define to 1 to include code that uses lvmlockd sanlock option.]) BUILD_LVMLOCKD=yes fi ################################################################################ dnl -- Build lvmlockddlm AC_MSG_CHECKING(whether to build lvmlockddlm) AC_ARG_ENABLE(lvmlockd-dlm, AC_HELP_STRING([--enable-lvmlockd-dlm], [enable the LVM lock daemon using dlm]), LOCKDDLM=$enableval) AC_MSG_RESULT($LOCKDDLM) BUILD_LOCKDDLM=$LOCKDDLM dnl -- Look for dlm libraries if test "$BUILD_LOCKDDLM" = yes; then PKG_CHECK_MODULES(LOCKD_DLM, libdlm, [HAVE_LOCKD_DLM=yes], $bailout) AC_DEFINE([LOCKDDLM_SUPPORT], 1, [Define to 1 to include code that uses lvmlockd dlm option.]) BUILD_LVMLOCKD=yes fi ################################################################################ dnl -- Build lvmlockd AC_MSG_CHECKING(whether to build lvmlockd) AC_MSG_RESULT($BUILD_LVMLOCKD) if test "$BUILD_LVMLOCKD" = yes; then AS_IF([test "$LVMPOLLD" = no], [AC_MSG_ERROR([cannot build lvmlockd with --disable-lvmpolld.])]) AS_IF([test "$LVMETAD" = no], [AC_MSG_ERROR([cannot build lvmlockd with --disable-lvmetad.])]) AS_IF([test "$BUILD_LVMPOLLD" = no], [BUILD_LVMPOLLD=yes; AC_MSG_WARN([Enabling lvmpolld - required by lvmlockd.])]) AS_IF([test "$BUILD_LVMETAD" = no], [BUILD_LVMETAD=yes; AC_MSG_WARN([Enabling lvmetad - required by lvmlockd.])]) AC_MSG_CHECKING([defaults for use_lvmlockd]) AC_ARG_ENABLE(use_lvmlockd, AC_HELP_STRING([--disable-use-lvmlockd], [disable usage of LVM lock daemon]), [case ${enableval} in yes) DEFAULT_USE_LVMLOCKD=1 ;; *) DEFAULT_USE_LVMLOCKD=0 ;; esac], DEFAULT_USE_LVMLOCKD=1) AC_MSG_RESULT($DEFAULT_USE_LVMLOCKD) AC_DEFINE([LVMLOCKD_SUPPORT], 1, [Define to 1 to include code that uses lvmlockd.]) AC_ARG_WITH(lvmlockd-pidfile, AC_HELP_STRING([--with-lvmlockd-pidfile=PATH], [lvmlockd pidfile [PID_DIR/lvmlockd.pid]]), LVMLOCKD_PIDFILE=$withval, LVMLOCKD_PIDFILE="$DEFAULT_PID_DIR/lvmlockd.pid") AC_DEFINE_UNQUOTED(LVMLOCKD_PIDFILE, ["$LVMLOCKD_PIDFILE"], [Path to lvmlockd pidfile.]) else DEFAULT_USE_LVMLOCKD=0 fi AC_DEFINE_UNQUOTED(DEFAULT_USE_LVMLOCKD, [$DEFAULT_USE_LVMLOCKD], [Use lvmlockd by default.]) ################################################################################ dnl -- Check lvmetad if test "$BUILD_LVMETAD" = yes; then AC_MSG_CHECKING([defaults for use_lvmetad]) AC_ARG_ENABLE(use_lvmetad, AC_HELP_STRING([--disable-use-lvmetad], [disable usage of LVM Metadata Daemon]), [case ${enableval} in yes) DEFAULT_USE_LVMETAD=1 ;; *) DEFAULT_USE_LVMETAD=0 ;; esac], DEFAULT_USE_LVMETAD=1) AC_MSG_RESULT($DEFAULT_USE_LVMETAD) AC_DEFINE([LVMETAD_SUPPORT], 1, [Define to 1 to include code that uses lvmetad.]) AC_ARG_WITH(lvmetad-pidfile, AC_HELP_STRING([--with-lvmetad-pidfile=PATH], [lvmetad pidfile [PID_DIR/lvmetad.pid]]), LVMETAD_PIDFILE=$withval, LVMETAD_PIDFILE="$DEFAULT_PID_DIR/lvmetad.pid") AC_DEFINE_UNQUOTED(LVMETAD_PIDFILE, ["$LVMETAD_PIDFILE"], [Path to lvmetad pidfile.]) else DEFAULT_USE_LVMETAD=0 fi AC_DEFINE_UNQUOTED(DEFAULT_USE_LVMETAD, [$DEFAULT_USE_LVMETAD], [Use lvmetad by default.]) ################################################################################ dnl -- Check lvmpolld if test "$BUILD_LVMPOLLD" = yes; then AC_MSG_CHECKING([defaults for use_lvmpolld]) AC_ARG_ENABLE(use_lvmpolld, AC_HELP_STRING([--disable-use-lvmpolld], [disable usage of LVM Poll Daemon]), [case ${enableval} in yes) DEFAULT_USE_LVMPOLLD=1 ;; *) DEFAULT_USE_LVMPOLLD=0 ;; esac], DEFAULT_USE_LVMPOLLD=1) AC_MSG_RESULT($DEFAULT_USE_LVMPOLLD) AC_DEFINE([LVMPOLLD_SUPPORT], 1, [Define to 1 to include code that uses lvmpolld.]) AC_ARG_WITH(lvmpolld-pidfile, AC_HELP_STRING([--with-lvmpolld-pidfile=PATH], [lvmpolld pidfile [PID_DIR/lvmpolld.pid]]), LVMPOLLD_PIDFILE=$withval, LVMPOLLD_PIDFILE="$DEFAULT_PID_DIR/lvmpolld.pid") AC_DEFINE_UNQUOTED(LVMPOLLD_PIDFILE, ["$LVMPOLLD_PIDFILE"], [Path to lvmpolld pidfile.]) else DEFAULT_USE_LVMPOLLD=0 fi AC_DEFINE_UNQUOTED(DEFAULT_USE_LVMPOLLD, [$DEFAULT_USE_LVMPOLLD], [Use lvmpolld by default.]) ################################################################################ dnl -- Check dmfilemapd AC_MSG_CHECKING(whether to build dmfilemapd) AC_ARG_ENABLE(dmfilemapd, AC_HELP_STRING([--enable-dmfilemapd], [enable the dmstats filemap daemon]), BUILD_DMFILEMAPD=$enableval, BUILD_DMFILEMAPD=no) AC_MSG_RESULT($BUILD_DMFILEMAPD) AC_DEFINE([DMFILEMAPD], $BUILD_DMFILEMAPD, [Define to 1 to enable the device-mapper filemap daemon.]) dnl -- dmfilemapd requires FIEMAP if test "$BUILD_DMFILEMAPD" = yes; then AC_CHECK_HEADER([linux/fiemap.h], , [AC_MSG_ERROR(--enable-dmfilemapd requires fiemap.h)]) fi ################################################################################ dnl -- Build notifydbus AC_MSG_CHECKING(whether to build notifydbus) AC_ARG_ENABLE(notify-dbus, AC_HELP_STRING([--enable-notify-dbus], [enable LVM notification using dbus]), NOTIFYDBUS_SUPPORT=$enableval, NOTIFYDBUS_SUPPORT=no) AC_MSG_RESULT($NOTIFYDBUS_SUPPORT) if test "$NOTIFYDBUS_SUPPORT" = yes; then AC_DEFINE([NOTIFYDBUS_SUPPORT], 1, [Define to 1 to include code that uses dbus notification.]) SYSTEMD_LIBS="-lsystemd" fi ################################################################################ dnl -- Look for dbus libraries if test "$NOTIFYDBUS_SUPPORT" = yes; then PKG_CHECK_MODULES(NOTIFY_DBUS, systemd >= 221, [HAVE_NOTIFY_DBUS=yes], $bailout) fi ################################################################################ dnl -- Enable blkid wiping functionality AC_ARG_ENABLE(blkid_wiping, AC_HELP_STRING([--disable-blkid_wiping], [disable libblkid detection of signatures when wiping and use native code instead]), BLKID_WIPING=$enableval, BLKID_WIPING=maybe) DEFAULT_USE_BLKID_WIPING=0 if test "$BLKID_WIPING" != no; then pkg_config_init PKG_CHECK_MODULES(BLKID, blkid >= 2.24, [ BLKID_WIPING=yes BLKID_PC="blkid" DEFAULT_USE_BLKID_WIPING=1 AC_DEFINE([BLKID_WIPING_SUPPORT], 1, [Define to 1 to use libblkid detection of signatures when wiping.]) ], [if test "$BLKID_WIPING" = maybe; then BLKID_WIPING=no else AC_MSG_ERROR([bailing out... blkid library >= 2.24 is required]) fi]) fi AC_MSG_CHECKING([whether to enable libblkid detection of signatures when wiping]) AC_MSG_RESULT($BLKID_WIPING) AC_DEFINE_UNQUOTED(DEFAULT_USE_BLKID_WIPING, [$DEFAULT_USE_BLKID_WIPING], [Use blkid wiping by default.]) ################################################################################ dnl -- Enable udev-systemd protocol to instantiate a service for background jobs dnl -- Requires systemd version 205 at least (including support for systemd-run) AC_ARG_ENABLE(udev-systemd-background-jobs, AC_HELP_STRING([--disable-udev-systemd-background-jobs], [disable udev-systemd protocol to instantiate a service for background job]), UDEV_SYSTEMD_BACKGROUND_JOBS=$enableval, UDEV_SYSTEMD_BACKGROUND_JOBS=maybe) if test "$UDEV_SYSTEMD_BACKGROUND_JOBS" != no; then pkg_config_init PKG_CHECK_MODULES(SYSTEMD, systemd >= 205, [UDEV_SYSTEMD_BACKGROUND_JOBS=yes], [if test "$UDEV_SYSTEMD_BACKGROUND_JOBS" = maybe; then UDEV_SYSTEMD_BACKGROUND_JOBS=no else AC_MSG_ERROR([bailing out... systemd >= 205 is required]) fi]) fi AC_MSG_CHECKING(whether to use udev-systemd protocol for jobs in background) AC_MSG_RESULT($UDEV_SYSTEMD_BACKGROUND_JOBS) ################################################################################ dnl -- Enable udev synchronisation AC_MSG_CHECKING(whether to enable synchronisation with udev processing) AC_ARG_ENABLE(udev_sync, AC_HELP_STRING([--enable-udev_sync], [enable synchronisation with udev processing]), UDEV_SYNC=$enableval, UDEV_SYNC=no) AC_MSG_RESULT($UDEV_SYNC) if test "$UDEV_SYNC" = yes; then pkg_config_init PKG_CHECK_MODULES(UDEV, libudev >= 143, [UDEV_PC="libudev"]) AC_DEFINE([UDEV_SYNC_SUPPORT], 1, [Define to 1 to enable synchronisation with udev processing.]) AC_CHECK_LIB(udev, udev_device_get_is_initialized, AC_DEFINE([HAVE_LIBUDEV_UDEV_DEVICE_GET_IS_INITIALIZED], 1, [Define to 1 if udev_device_get_is_initialized is available.])) LIBS=$ac_check_lib_save_LIBS fi dnl -- Enable udev rules AC_MSG_CHECKING(whether to enable installation of udev rules required for synchronisation) AC_ARG_ENABLE(udev_rules, AC_HELP_STRING([--enable-udev_rules], [install rule files needed for udev synchronisation]), UDEV_RULES=$enableval, UDEV_RULES=$UDEV_SYNC) AC_MSG_RESULT($UDEV_RULES) AC_MSG_CHECKING(whether to enable executable path detection in udev rules) AC_ARG_ENABLE(udev_rule_exec_detection, AC_HELP_STRING([--enable-udev-rule-exec-detection], [enable executable path detection in udev rules]), UDEV_RULE_EXEC_DETECTION=$enableval, UDEV_RULE_EXEC_DETECTION=no) AC_MSG_RESULT($UDEV_RULE_EXEC_DETECTION) dnl -- Check support for built-in blkid against target udev version if test "$UDEV_RULE" != no ; then AC_MSG_CHECKING(whether udev supports built-in blkid) pkg_config_init if $("$PKG_CONFIG" --atleast-version=176 libudev); then UDEV_HAS_BUILTIN_BLKID=yes else UDEV_HAS_BUILTIN_BLKID=no fi AC_MSG_RESULT($UDEV_HAS_BUILTIN_BLKID) fi ################################################################################ dnl -- Compatibility mode AC_ARG_ENABLE(compat, AC_HELP_STRING([--enable-compat], [enable support for old device-mapper versions]), DM_COMPAT=$enableval, DM_COMPAT=no) AS_IF([test "$DM_COMPAT" = yes], [AC_DEFINE([DM_COMPAT], 1, [Define to enable compat protocol]) AC_MSG_ERROR([--enable-compat is not currently supported. Since device-mapper version 1.02.66, only one version (4) of the device-mapper ioctl protocol is supported.])]) ################################################################################ dnl -- Compatible units suffix mode AC_ARG_ENABLE(units-compat, AC_HELP_STRING([--enable-units-compat], [enable output compatibility with old versions that that do not use KiB-style unit suffixes]), UNITS_COMPAT=$enableval, UNITS_COMPAT=no) if test "$UNITS_COMPAT" = yes; then AC_DEFINE([DEFAULT_SI_UNIT_CONSISTENCY], 0, [Define to 0 to reinstate the pre-2.02.54 handling of unit suffixes.]) fi ################################################################################ dnl -- Disable ioctl AC_ARG_ENABLE(ioctl, AC_HELP_STRING([--disable-ioctl], [disable ioctl calls to device-mapper in the kernel]), DM_IOCTLS=$enableval) AS_IF([test "$DM_IOCTLS" = yes], [AC_DEFINE([DM_IOCTLS], 1, [Define to enable ioctls calls to kernel])]) ################################################################################ dnl -- Disable O_DIRECT AC_MSG_CHECKING(whether to enable O_DIRECT) AC_ARG_ENABLE(o_direct, AC_HELP_STRING([--disable-o_direct], [disable O_DIRECT]), ODIRECT=$enableval) AC_MSG_RESULT($ODIRECT) if test "$ODIRECT" = yes; then AC_DEFINE([O_DIRECT_SUPPORT], 1, [Define to 1 to enable O_DIRECT support.]) fi ################################################################################ dnl -- Enable liblvm2app.so AC_MSG_CHECKING(whether to build liblvm2app.so application library) AC_ARG_ENABLE(applib, AC_HELP_STRING([--enable-applib], [build application library]), APPLIB=$enableval, APPLIB=no) AC_MSG_RESULT($APPLIB) AC_SUBST([LVM2APP_LIB]) test "$APPLIB" = yes \ && LVM2APP_LIB=-llvm2app \ || LVM2APP_LIB= AS_IF([test "$APPLIB"], [AC_MSG_WARN([Python bindings are deprecated. Use D-Bus API])]) ################################################################################ dnl -- Enable cmdlib AC_MSG_CHECKING(whether to compile liblvm2cmd.so) AC_ARG_ENABLE(cmdlib, AC_HELP_STRING([--enable-cmdlib], [build shared command library]), CMDLIB=$enableval, CMDLIB=no) AC_MSG_RESULT($CMDLIB) AC_SUBST([LVM2CMD_LIB]) test "$CMDLIB" = yes \ && LVM2CMD_LIB=-llvm2cmd \ || LVM2CMD_LIB= ################################################################################ dnl -- Enable D-Bus service AC_MSG_CHECKING(whether to include Python D-Bus support) AC_ARG_ENABLE(dbus-service, AC_HELP_STRING([--enable-dbus-service], [install D-Bus support]), BUILD_LVMDBUSD=$enableval, BUILD_LVMDBUSD=no) AC_MSG_RESULT($BUILD_LVMDBUSD) AS_IF([test "$NOTIFYDBUS_SUPPORT" = yes && test "BUILD_LVMDBUSD" = yes], [AC_MSG_WARN([Building D-Bus support without D-Bus notifications.])]) ################################################################################ dnl -- Enable Python liblvm2app bindings AC_MSG_CHECKING(whether to build Python wrapper for liblvm2app.so) AC_ARG_ENABLE(python_bindings, AC_HELP_STRING([--enable-python_bindings], [build default Python applib bindings]), PYTHON_BINDINGS=$enableval, PYTHON_BINDINGS=no) AC_MSG_RESULT($PYTHON_BINDINGS) AC_MSG_CHECKING(whether to build Python2 wrapper for liblvm2app.so) AC_ARG_ENABLE(python2_bindings, AC_HELP_STRING([--enable-python2_bindings], [build Python2 applib bindings]), PYTHON2_BINDINGS=$enableval, PYTHON2_BINDINGS=no) AC_MSG_RESULT($PYTHON2_BINDINGS) AC_MSG_CHECKING(whether to build Python3 wrapper for liblvm2app.so) AC_ARG_ENABLE(python3_bindings, AC_HELP_STRING([--enable-python3_bindings], [build Python3 applib bindings]), PYTHON3_BINDINGS=$enableval, PYTHON3_BINDINGS=no) AC_MSG_RESULT($PYTHON3_BINDINGS) if test "$PYTHON_BINDINGS" = yes; then AC_MSG_ERROR([--enable-python-bindings is replaced by --enable-python2-bindings and --enable-python3-bindings]) fi if test "$PYTHON2_BINDINGS" = yes; then AM_PATH_PYTHON([2]) AC_PATH_TOOL(PYTHON2, python2) test -z "$PYTHON2" && AC_MSG_ERROR([python2 is required for --enable-python2_bindings but cannot be found]) AC_PATH_TOOL(PYTHON2_CONFIG, python2-config) test -z "$PYTHON2_CONFIG" && AC_PATH_TOOL(PYTHON2_CONFIG, python-config) test -z "$PYTHON2_CONFIG" && AC_MSG_ERROR([python headers are required for --enable-python2_bindings but cannot be found]) PYTHON2_INCDIRS=`"$PYTHON2_CONFIG" --includes` PYTHON2_LIBDIRS=`"$PYTHON2_CONFIG" --libs` PYTHON2DIR=$pythondir PYTHON_BINDINGS=yes fi if test "$PYTHON3_BINDINGS" = yes -o "$BUILD_LVMDBUSD" = yes; then unset PYTHON PYTHON_CONFIG unset am_cv_pathless_PYTHON ac_cv_path_PYTHON am_cv_python_platform unset am_cv_python_pythondir am_cv_python_version am_cv_python_pyexecdir unset ac_cv_path_PYTHON_CONFIG ac_cv_path_ac_pt_PYTHON_CONFIG AM_PATH_PYTHON([3]) PYTHON3=$PYTHON test -z "$PYTHON3" && AC_MSG_ERROR([python3 is required for --enable-python3_bindings or --enable-dbus-service but cannot be found]) AC_PATH_TOOL(PYTHON3_CONFIG, python3-config) test -z "$PYTHON3_CONFIG" && AC_MSG_ERROR([python3 headers are required for --enable-python3_bindings or --enable-dbus-service but cannot be found]) PYTHON3_INCDIRS=`"$PYTHON3_CONFIG" --includes` PYTHON3_LIBDIRS=`"$PYTHON3_CONFIG" --libs` PYTHON3DIR=$pythondir PYTHON_BINDINGS=yes fi if test "$BUILD_LVMDBUSD" = yes; then # To get this macro, install autoconf-archive package then run autoreconf AC_PYTHON_MODULE([pyudev], [Required], python3) AC_PYTHON_MODULE([dbus], [Required], python3) fi if test "$PYTHON_BINDINGS" = yes -o "$PYTHON2_BINDINGS" = yes -o "$PYTHON3_BINDINGS" = yes; then AC_MSG_WARN([Python bindings are deprecated. Use D-Bus API]) test "$APPLIB" != yes && AC_MSG_ERROR([Python_bindings require --enable-applib]) fi ################################################################################ dnl -- Enable pkg-config AC_ARG_ENABLE(pkgconfig, AC_HELP_STRING([--enable-pkgconfig], [install pkgconfig support]), PKGCONFIG=$enableval, PKGCONFIG=no) ################################################################################ dnl -- Enable installation of writable files by user AC_ARG_ENABLE(write_install, AC_HELP_STRING([--enable-write_install], [install user writable files]), WRITE_INSTALL=$enableval, WRITE_INSTALL=no) ################################################################################ dnl -- Enable fsadm AC_MSG_CHECKING(whether to install fsadm) AC_ARG_ENABLE(fsadm, AC_HELP_STRING([--disable-fsadm], [disable fsadm]), FSADM=$enableval) AC_MSG_RESULT($FSADM) ################################################################################ dnl -- Enable blkdeactivate AC_MSG_CHECKING(whether to install blkdeactivate) AC_ARG_ENABLE(blkdeactivate, AC_HELP_STRING([--disable-blkdeactivate], [disable blkdeactivate]), BLKDEACTIVATE=$enableval) AC_MSG_RESULT($BLKDEACTIVATE) ################################################################################ dnl -- enable dmeventd handling AC_MSG_CHECKING(whether to use dmeventd) AC_ARG_ENABLE(dmeventd, AC_HELP_STRING([--enable-dmeventd], [enable the device-mapper event daemon]), BUILD_DMEVENTD=$enableval, BUILD_DMEVENTD=no) AC_MSG_RESULT($BUILD_DMEVENTD) dnl -- dmeventd currently requires internal mirror support if test "$BUILD_DMEVENTD" = yes; then if test "$MIRRORS" != internal; then AC_MSG_ERROR([--enable-dmeventd currently requires --with-mirrors=internal]) fi if test "$CMDLIB" = no; then AC_MSG_ERROR([--enable-dmeventd requires --enable-cmdlib to be used as well]) fi AC_DEFINE([DMEVENTD], 1, [Define to 1 to enable the device-mapper event daemon.]) fi ################################################################################ dnl -- getline included in recent libc AC_CHECK_LIB(c, getline, AC_DEFINE([HAVE_GETLINE], 1, [Define to 1 if getline is available.])) ################################################################################ dnl -- canonicalize_file_name included in recent libc AC_CHECK_LIB(c, canonicalize_file_name, AC_DEFINE([HAVE_CANONICALIZE_FILE_NAME], 1, [Define to 1 if canonicalize_file_name is available.])) ################################################################################ dnl -- Check for dlopen AC_CHECK_LIB(dl, dlopen, [AC_DEFINE([HAVE_LIBDL], 1, [Define to 1 if dynamic libraries are available.]) DL_LIBS="-ldl" HAVE_LIBDL=yes], [DL_LIBS= HAVE_LIBDL=no ]) ################################################################################ dnl -- Check for shared/static conflicts if [[ \( "$LVM1" = shared -o "$POOL" = shared -o "$CLUSTER" = shared \ -o "$SNAPSHOTS" = shared -o "$MIRRORS" = shared \ -o "$RAID" = shared -o "$CACHE" = shared \ \) -a "$STATIC_LINK" = yes ]]; then AC_MSG_ERROR([Features cannot be 'shared' when building statically]) fi ################################################################################ AC_CHECK_LIB(m, log10, [M_LIBS="-lm"], hard_bailout) ################################################################################ AC_CHECK_LIB([pthread], [pthread_mutex_lock], [PTHREAD_LIBS="-lpthread"], hard_bailout) ################################################################################ dnl -- Disable selinux AC_MSG_CHECKING(whether to enable selinux support) AC_ARG_ENABLE(selinux, AC_HELP_STRING([--disable-selinux], [disable selinux support]), SELINUX=$enableval) AC_MSG_RESULT($SELINUX) ################################################################################ dnl -- Check for selinux if test "$SELINUX" = yes; then AC_CHECK_LIB([sepol], [sepol_check_context], [ AC_DEFINE([HAVE_SEPOL], 1, [Define to 1 if sepol_check_context is available.]) SELINUX_LIBS="-lsepol"]) AC_CHECK_LIB([selinux], [is_selinux_enabled], [ AC_CHECK_HEADERS([selinux/selinux.h],, hard_bailout) AC_CHECK_HEADERS([selinux/label.h]) AC_DEFINE([HAVE_SELINUX], 1, [Define to 1 to include support for selinux.]) SELINUX_LIBS="-lselinux $SELINUX_LIBS" SELINUX_PC="libselinux" HAVE_SELINUX=yes ], [ AC_MSG_WARN(Disabling selinux) SELINUX_LIBS= SELINUX_PC= HAVE_SELINUX=no ]) fi ################################################################################ dnl -- Check for realtime clock support RT_LIBS= HAVE_REALTIME=no if test "$REALTIME" = yes; then AC_CHECK_FUNCS([clock_gettime], HAVE_REALTIME=yes) AS_IF([test "$HAVE_REALTIME" != yes], [ # try again with -lrt AC_CHECK_LIB([rt], [clock_gettime], RT_LIBS="-lrt"; HAVE_REALTIME=yes)]) if test "$HAVE_REALTIME" = yes; then AC_DEFINE([HAVE_REALTIME], 1, [Define to 1 to include support for realtime clock.]) else AC_MSG_WARN(Disabling realtime clock) fi fi dnl Check if the system has struct stat st_ctim. AC_CACHE_CHECK([for struct stat has st_ctim.], [ac_cv_stat_st_ctim], [AC_COMPILE_IFELSE([AC_LANG_PROGRAM( [#include long bar(void) { struct stat s; return (long)(s.st_ctim.tv_sec + s.st_ctim.tv_nsec);}] )], [ac_cv_stat_st_ctim=yes], [ac_cv_stat_st_ctim=no])]) AC_IF_YES(ac_cv_stat_st_ctim, AC_DEFINE(HAVE_STAT_ST_CTIM, 1, [Define if struct stat has a field st_ctim with timespec for ctime])) ################################################################################ dnl -- Check for getopt AC_CHECK_HEADERS(getopt.h, AC_DEFINE([HAVE_GETOPTLONG], 1, [Define to 1 if getopt_long is available.])) ################################################################################ dnl -- Check for readline (Shamelessly copied from parted 1.4.17) if test "$READLINE" != no; then lvm_saved_libs=$LIBS AC_SEARCH_LIBS([tgetent], [tinfo ncurses curses termcap termlib], READLINE_LIBS=$ac_cv_search_tgetent, [ if test "$READLINE" = yes; then AC_MSG_ERROR( [termcap could not be found which is required for the --enable-readline option (which is enabled by default). Either disable readline support with --disable-readline or download and install termcap from: ftp.gnu.org/gnu/termcap Note: if you are using precompiled packages you will also need the development package as well (which may be called termcap-devel or something similar). Note: (n)curses also seems to work as a substitute for termcap. This was not found either - but you could try installing that as well.]) fi]) dnl -- Old systems may need extra termcap dependency explicitly in LIBS AC_CHECK_LIB([readline], [readline], [ AC_DEFINE([READLINE_SUPPORT], 1, [Define to 1 to include the LVM readline shell.]) dnl -- Try only with -lreadline and check for different symbol READLINE=yes LIBS=$lvm_saved_libs AC_CHECK_LIB([readline], [rl_line_buffer], [ READLINE_LIBS="-lreadline" ], [ AC_MSG_RESULT([linking -lreadline with $READLINE_LIBS needed]) READLINE_LIBS="-lreadline $READLINE_LIBS" ]) ], [ READLINE_LIBS= if test "$READLINE" = yes; then AC_MSG_ERROR( [GNU Readline could not be found which is required for the --enable-readline option (which is enabled by default). Either disable readline support with --disable-readline or download and install readline from: ftp.gnu.org/gnu/readline Note: if you are using precompiled packages you will also need the development package as well (which may be called readline-devel or something similar).]) fi ]) LIBS="$READLINE_LIBS $lvm_saved_libs" AC_CHECK_FUNCS([rl_completion_matches]) LIBS=$lvm_saved_libs fi ################################################################################ dnl -- Internationalisation stuff AC_MSG_CHECKING(whether to enable internationalisation) AC_ARG_ENABLE(nls, AC_HELP_STRING([--enable-nls], [enable Native Language Support]), INTL=$enableval, INTL=no) AC_MSG_RESULT($INTL) if test "$INTL" = yes; then # FIXME - Move this - can be device-mapper too INTL_PACKAGE="lvm2" AC_PATH_TOOL(MSGFMT, msgfmt) AS_IF([test -z "$MSGFMT"], [AC_MSG_ERROR([msgfmt not found in path $PATH])]) AC_ARG_WITH(localedir, AC_HELP_STRING([--with-localedir=DIR], [locale-dependent data [DATAROOTDIR/locale]]), localedir=$withval, localedir=${localedir-'${datarootdir}/locale'}) AC_DEFINE_UNQUOTED([INTL_PACKAGE], ["$INTL_PACKAGE"], [Internalization package]) # double eval needed ${datarootdir} -> ${prefix}/share -> real path AC_DEFINE_UNQUOTED([LOCALEDIR], ["$(eval echo $(eval echo $localedir))"], [Locale-dependent data]) fi ################################################################################ dnl -- FIXME: need to switch to regular option here --sysconfdir AC_ARG_WITH(confdir, AC_HELP_STRING([--with-confdir=DIR], [configuration files in DIR [/etc]]), CONFDIR=$withval, CONFDIR='/etc') AC_DEFINE_UNQUOTED(DEFAULT_ETC_DIR, ["$CONFDIR"], [Default system configuration directory.]) AC_ARG_WITH(staticdir, AC_HELP_STRING([--with-staticdir=DIR], [static binaries in DIR [EPREFIX/sbin]]), STATICDIR=$withval, STATICDIR='${exec_prefix}/sbin') AC_ARG_WITH(usrlibdir, AC_HELP_STRING([--with-usrlibdir=DIR], [usrlib in DIR [PREFIX/lib]]), usrlibdir=$withval, usrlibdir='${prefix}/lib') AC_ARG_WITH(usrsbindir, AC_HELP_STRING([--with-usrsbindir=DIR], [usrsbin executables in DIR [PREFIX/sbin]]), usrsbindir=$withval, usrsbindir='${prefix}/sbin') ################################################################################ AC_ARG_WITH(udev_prefix, AC_HELP_STRING([--with-udev-prefix=UPREFIX], [install udev rule files in UPREFIX [EPREFIX]]), udev_prefix=$withval, udev_prefix='${exec_prefix}') AC_ARG_WITH(udevdir, AC_HELP_STRING([--with-udevdir=DIR], [udev rules in DIR [UPREFIX/lib/udev/rules.d]]), udevdir=$withval, udevdir='${udev_prefix}/lib/udev/rules.d') ################################################################################ dnl -- Get the systemd system unit dir value from pkg_config automatically if value not given explicitly. dnl -- This follows the recommendation for systemd integration best practices mentioned in daemon(7) manpage. AC_ARG_WITH(systemdsystemunitdir, AC_HELP_STRING([--with-systemdsystemunitdir=DIR], [systemd service files in DIR]), systemdsystemunitdir=$withval, pkg_config_init pkg_systemdsystemunitdir=$("$PKG_CONFIG" --variable=systemdsystemunitdir systemd)) test -n "$pkg_systemdsystemunitdir" && systemdsystemunitdir=$pkg_systemdsystemunitdir test -z "$systemdsystemunitdir" && systemdsystemunitdir='${exec_prefix}/lib/systemd/system'; systemdutildir=$("$PKG_CONFIG" --variable=systemdutildir systemd) test -z "$systemdutildir" && systemdutildir='${exec_prefix}/lib/systemd'; ################################################################################ AC_ARG_WITH(tmpfilesdir, AC_HELP_STRING([--with-tmpfilesdir=DIR], [install configuration files for management of volatile files and directories in DIR [PREFIX/lib/tmpfiles.d]]), tmpfilesdir=$withval, tmpfilesdir='${prefix}/lib/tmpfiles.d') ################################################################################ dnl -- Ensure additional headers required if test "$READLINE" = yes; then AC_CHECK_HEADERS(readline/readline.h readline/history.h,,hard_bailout) fi AC_MSG_CHECKING(whether to enable readline) AC_MSG_RESULT($READLINE) if test "$BUILD_CMIRRORD" = yes; then AC_CHECK_FUNCS(atexit,,hard_bailout) fi if test "$BUILD_LVMLOCKD" = yes; then AS_IF([test "$HAVE_REALTIME" != yes], [AC_MSG_ERROR([Realtime clock support is mandatory for lvmlockd.])]) AC_CHECK_FUNCS(strtoull,,hard_bailout) fi if test "$BUILD_LVMPOLLD" = yes; then AC_CHECK_FUNCS(strpbrk,,hard_bailout) AC_FUNC_STRERROR_R fi if test "$CLVMD" != none; then AC_CHECK_HEADERS(mntent.h netdb.h netinet/in.h pthread.h search.h sys/mount.h sys/socket.h sys/uio.h sys/un.h utmpx.h,,AC_MSG_ERROR(bailing out)) AC_CHECK_FUNCS(dup2 getmntent memmove select socket,,hard_bailout) AC_FUNC_GETMNTENT AC_FUNC_SELECT_ARGTYPES fi if test "$CLUSTER" != none; then AC_CHECK_HEADERS(sys/socket.h sys/un.h,,hard_bailout) AC_CHECK_FUNCS(socket,,hard_bailout) fi if test "$BUILD_DMEVENTD" = yes; then AC_CHECK_HEADERS(arpa/inet.h,,hard_bailout) fi if test "$HAVE_LIBDL" = yes; then AC_CHECK_HEADERS(dlfcn.h,,hard_bailout) fi if test "$INTL" = yes; then AC_CHECK_HEADERS(libintl.h,,hard_bailout) fi if test "$UDEV_SYNC" = yes; then AC_CHECK_HEADERS(sys/ipc.h sys/sem.h,,hard_bailout) fi if test "$BUILD_DMFILEMAPD" = yes; then AC_CHECK_HEADERS([sys/inotify.h],,hard_bailout) fi ################################################################################ AC_PATH_TOOL(MODPROBE_CMD, modprobe) if test -n "$MODPROBE_CMD"; then AC_DEFINE_UNQUOTED([MODPROBE_CMD], ["$MODPROBE_CMD"], [The path to 'modprobe', if available.]) fi SYSCONFDIR="$(eval echo $(eval echo $sysconfdir))" SBINDIR="$(eval echo $(eval echo $sbindir))" LVM_PATH="$SBINDIR/lvm" AC_DEFINE_UNQUOTED(LVM_PATH, ["$LVM_PATH"], [Path to lvm binary.]) USRSBINDIR="$(eval echo $(eval echo $usrsbindir))" CLVMD_PATH="$USRSBINDIR/clvmd" AC_DEFINE_UNQUOTED(CLVMD_PATH, ["$CLVMD_PATH"], [Path to clvmd binary.]) FSADM_PATH="$SBINDIR/fsadm" AC_DEFINE_UNQUOTED(FSADM_PATH, ["$FSADM_PATH"], [Path to fsadm binary.]) ################################################################################ dnl -- dmeventd pidfile and executable path if test "$BUILD_DMEVENTD" = yes; then AC_ARG_WITH(dmeventd-pidfile, AC_HELP_STRING([--with-dmeventd-pidfile=PATH], [dmeventd pidfile [PID_DIR/dmeventd.pid]]), DMEVENTD_PIDFILE=$withval, DMEVENTD_PIDFILE="$DEFAULT_PID_DIR/dmeventd.pid") AC_DEFINE_UNQUOTED(DMEVENTD_PIDFILE, ["$DMEVENTD_PIDFILE"], [Path to dmeventd pidfile.]) fi if test "$BUILD_DMEVENTD" = yes; then AC_ARG_WITH(dmeventd-path, AC_HELP_STRING([--with-dmeventd-path=PATH], [dmeventd path [EPREFIX/sbin/dmeventd]]), DMEVENTD_PATH=$withval, DMEVENTD_PATH="$SBINDIR/dmeventd") AC_DEFINE_UNQUOTED(DMEVENTD_PATH, ["$DMEVENTD_PATH"], [Path to dmeventd binary.]) fi ################################################################################ dnl -- various defaults dnl -- FIXME: need to switch to regular option here --sysconfdir AC_ARG_WITH(default-system-dir, AC_HELP_STRING([--with-default-system-dir=DIR], [default LVM system directory [/etc/lvm]]), DEFAULT_SYS_DIR=$withval, DEFAULT_SYS_DIR="/etc/lvm") AC_DEFINE_UNQUOTED(DEFAULT_SYS_DIR, ["$DEFAULT_SYS_DIR"], [Path to LVM system directory.]) AC_ARG_WITH(default-profile-subdir, AC_HELP_STRING([--with-default-profile-subdir=SUBDIR], [default configuration profile subdir [profile]]), DEFAULT_PROFILE_SUBDIR=$withval, DEFAULT_PROFILE_SUBDIR=profile) AC_DEFINE_UNQUOTED(DEFAULT_PROFILE_SUBDIR, ["$DEFAULT_PROFILE_SUBDIR"], [Name of default configuration profile subdirectory.]) AC_ARG_WITH(default-archive-subdir, AC_HELP_STRING([--with-default-archive-subdir=SUBDIR], [default metadata archive subdir [archive]]), DEFAULT_ARCHIVE_SUBDIR=$withval, DEFAULT_ARCHIVE_SUBDIR=archive) AC_DEFINE_UNQUOTED(DEFAULT_ARCHIVE_SUBDIR, ["$DEFAULT_ARCHIVE_SUBDIR"], [Name of default metadata archive subdirectory.]) AC_ARG_WITH(default-backup-subdir, AC_HELP_STRING([--with-default-backup-subdir=SUBDIR], [default metadata backup subdir [backup]]), DEFAULT_BACKUP_SUBDIR=$withval, DEFAULT_BACKUP_SUBDIR=backup) AC_DEFINE_UNQUOTED(DEFAULT_BACKUP_SUBDIR, ["$DEFAULT_BACKUP_SUBDIR"], [Name of default metadata backup subdirectory.]) AC_ARG_WITH(default-cache-subdir, AC_HELP_STRING([--with-default-cache-subdir=SUBDIR], [default metadata cache subdir [cache]]), DEFAULT_CACHE_SUBDIR=$withval, DEFAULT_CACHE_SUBDIR=cache) AC_DEFINE_UNQUOTED(DEFAULT_CACHE_SUBDIR, ["$DEFAULT_CACHE_SUBDIR"], [Name of default metadata cache subdirectory.]) # Select default system locking dir, prefer /run/lock over /var/lock DEFAULT_SYS_LOCK_DIR="$RUN_DIR/lock" test -d "$DEFAULT_SYS_LOCK_DIR" || DEFAULT_SYS_LOCK_DIR="/var/lock" # Support configurable locking subdir for lvm AC_ARG_WITH(default-locking-dir, AC_HELP_STRING([--with-default-locking-dir=DIR], [default locking directory [autodetect_lock_dir/lvm]]), DEFAULT_LOCK_DIR=$withval, [AC_MSG_CHECKING(for default lock directory) DEFAULT_LOCK_DIR="$DEFAULT_SYS_LOCK_DIR/lvm" AC_MSG_RESULT($DEFAULT_LOCK_DIR)]) AC_DEFINE_UNQUOTED(DEFAULT_LOCK_DIR, ["$DEFAULT_LOCK_DIR"], [Name of default locking directory.]) ################################################################################ dnl -- Setup default data alignment AC_ARG_WITH(default-data-alignment, AC_HELP_STRING([--with-default-data-alignment=NUM], [set the default data alignment in MiB [1]]), DEFAULT_DATA_ALIGNMENT=$withval, DEFAULT_DATA_ALIGNMENT=1) AC_DEFINE_UNQUOTED(DEFAULT_DATA_ALIGNMENT, [$DEFAULT_DATA_ALIGNMENT], [Default data alignment.]) ################################################################################ dnl -- which kernel interface to use (ioctl only) AC_MSG_CHECKING(for kernel interface choice) AC_ARG_WITH(interface, AC_HELP_STRING([--with-interface=IFACE], [choose kernel interface (ioctl) [ioctl]]), interface=$withval, interface=ioctl) test "$interface" != ioctl && AC_MSG_ERROR([--with-interface=ioctl required. fs no longer supported.]) AC_MSG_RESULT($interface) ################################################################################ read DM_LIB_VERSION < "$srcdir"/VERSION_DM 2>/dev/null || DM_LIB_VERSION=Unknown AC_DEFINE_UNQUOTED(DM_LIB_VERSION, "$DM_LIB_VERSION", [Library version]) DM_LIB_PATCHLEVEL=`cat "$srcdir"/VERSION_DM | $AWK -F '[[-. ]]' '{printf "%s.%s.%s",$1,$2,$3}'` read VER < "$srcdir"/VERSION 2>/dev/null || VER=Unknown LVM_VERSION=\"$VER\" LVM_RELEASE_DATE="\"`echo $VER | $SED 's/.* (//;s/).*//'`\"" VER=`echo "$VER" | $AWK '{print $1}'` LVM_RELEASE="\"`echo "$VER" | $AWK -F '-' '{print $2}'`\"" VER=`echo "$VER" | $AWK -F '-' '{print $1}'` LVM_MAJOR=`echo "$VER" | $AWK -F '.' '{print $1}'` LVM_MINOR=`echo "$VER" | $AWK -F '.' '{print $2}'` LVM_PATCHLEVEL=`echo "$VER" | $AWK -F '[[(.]]' '{print $3}'` LVM_LIBAPI=`echo "$VER" | $AWK -F '[[()]]' '{print $2}'` AC_DEFINE_UNQUOTED(LVM_CONFIGURE_LINE, "$CONFIGURE_LINE", [configure command line used]) ################################################################################ AC_SUBST(APPLIB) AC_SUBST(AWK) AC_SUBST(BLKID_PC) AC_SUBST(BUILD_CMIRRORD) AC_SUBST(BUILD_DMEVENTD) AC_SUBST(BUILD_LVMDBUSD) AC_SUBST(BUILD_LVMETAD) AC_SUBST(BUILD_LVMPOLLD) AC_SUBST(BUILD_LVMLOCKD) AC_SUBST(BUILD_LOCKDSANLOCK) AC_SUBST(BUILD_LOCKDDLM) AC_SUBST(BUILD_DMFILEMAPD) AC_SUBST(CACHE) AC_SUBST(CFLAGS) AC_SUBST(CFLOW_CMD) AC_SUBST(CHMOD) AC_SUBST(CLDFLAGS) AC_SUBST(CLDNOWHOLEARCHIVE) AC_SUBST(CLDWHOLEARCHIVE) AC_SUBST(CLUSTER) AC_SUBST(CLVMD) AC_SUBST(CLVMD_CMANAGERS) AC_SUBST(CLVMD_PATH) AC_SUBST(CMAN_CFLAGS) AC_SUBST(CMAN_LIBS) AC_SUBST(CMAP_CFLAGS) AC_SUBST(CMAP_LIBS) AC_SUBST(CMDLIB) AC_SUBST(CONFDB_CFLAGS) AC_SUBST(CONFDB_LIBS) AC_SUBST(CONFDIR) AC_SUBST(COPTIMISE_FLAG) AC_SUBST(CPG_CFLAGS) AC_SUBST(CPG_LIBS) AC_SUBST(CSCOPE_CMD) AC_SUBST(DEBUG) AC_SUBST(DEFAULT_ARCHIVE_SUBDIR) AC_SUBST(DEFAULT_BACKUP_SUBDIR) AC_SUBST(DEFAULT_CACHE_SUBDIR) AC_SUBST(DEFAULT_DATA_ALIGNMENT) AC_SUBST(DEFAULT_DM_RUN_DIR) AC_SUBST(DEFAULT_LOCK_DIR) AC_SUBST(DEFAULT_FALLBACK_TO_LVM1) AC_SUBST(DEFAULT_MIRROR_SEGTYPE) AC_SUBST(DEFAULT_PID_DIR) AC_SUBST(DEFAULT_PROFILE_SUBDIR) AC_SUBST(DEFAULT_RAID10_SEGTYPE) AC_SUBST(DEFAULT_RUN_DIR) AC_SUBST(DEFAULT_SPARSE_SEGTYPE) AC_SUBST(DEFAULT_SYS_DIR) AC_SUBST(DEFAULT_SYS_LOCK_DIR) AC_SUBST(DEFAULT_USE_BLKID_WIPING) AC_SUBST(DEFAULT_USE_LVMETAD) AC_SUBST(DEFAULT_USE_LVMPOLLD) AC_SUBST(DEFAULT_USE_LVMLOCKD) AC_SUBST(DEVMAPPER) AC_SUBST(DLM_CFLAGS) AC_SUBST(DLM_LIBS) AC_SUBST(DL_LIBS) AC_SUBST(DMEVENTD_PATH) AC_SUBST(DM_LIB_PATCHLEVEL) AC_SUBST(ELDFLAGS) AC_SUBST(FSADM) AC_SUBST(FSADM_PATH) AC_SUBST(BLKDEACTIVATE) AC_SUBST(HAVE_LIBDL) AC_SUBST(HAVE_REALTIME) AC_SUBST(HAVE_VALGRIND) AC_SUBST(INTL) AC_SUBST(JOBS) AC_SUBST(LDDEPS) AC_SUBST(LIBS) AC_SUBST(LIB_SUFFIX) AC_SUBST(LVM1) AC_SUBST(LVM1_FALLBACK) AC_SUBST(LVM_VERSION) AC_SUBST(LVM_LIBAPI) AC_SUBST(LVM_MAJOR) AC_SUBST(LVM_MINOR) AC_SUBST(LVM_PATCHLEVEL) AC_SUBST(LVM_PATH) AC_SUBST(LVM_RELEASE) AC_SUBST(LVM_RELEASE_DATE) AC_SUBST(localedir) AC_SUBST(MANGLING) AC_SUBST(MIRRORS) AC_SUBST(MSGFMT) AC_SUBST(OCF) AC_SUBST(OCFDIR) AC_SUBST(ODIRECT) AC_SUBST(PKGCONFIG) AC_SUBST(POOL) AC_SUBST(M_LIBS) AC_SUBST(PTHREAD_LIBS) AC_SUBST(PYTHON2) AC_SUBST(PYTHON3) AC_SUBST(PYTHON_BINDINGS) AC_SUBST(PYTHON2_BINDINGS) AC_SUBST(PYTHON3_BINDINGS) AC_SUBST(PYTHON2_INCDIRS) AC_SUBST(PYTHON3_INCDIRS) AC_SUBST(PYTHON2_LIBDIRS) AC_SUBST(PYTHON3_LIBDIRS) AC_SUBST(PYTHON2DIR) AC_SUBST(PYTHON3DIR) AC_SUBST(QUORUM_CFLAGS) AC_SUBST(QUORUM_LIBS) AC_SUBST(RAID) AC_SUBST(RT_LIBS) AC_SUBST(READLINE_LIBS) AC_SUBST(REPLICATORS) AC_SUBST(SACKPT_CFLAGS) AC_SUBST(SACKPT_LIBS) AC_SUBST(SALCK_CFLAGS) AC_SUBST(SALCK_LIBS) AC_SUBST(SBINDIR) AC_SUBST(SELINUX_LIBS) AC_SUBST(SELINUX_PC) AC_SUBST(SYSCONFDIR) AC_SUBST(SYSTEMD_LIBS) AC_SUBST(SNAPSHOTS) AC_SUBST(STATICDIR) AC_SUBST(STATIC_LINK) AC_SUBST(TESTING) AC_SUBST(TESTSUITE_DATA) AC_SUBST(THIN) AC_SUBST(THIN_CHECK_CMD) AC_SUBST(THIN_DUMP_CMD) AC_SUBST(THIN_REPAIR_CMD) AC_SUBST(THIN_RESTORE_CMD) AC_SUBST(CACHE_CHECK_CMD) AC_SUBST(CACHE_DUMP_CMD) AC_SUBST(CACHE_REPAIR_CMD) AC_SUBST(CACHE_RESTORE_CMD) AC_SUBST(UDEV_PC) AC_SUBST(UDEV_RULES) AC_SUBST(UDEV_SYNC) AC_SUBST(UDEV_SYSTEMD_BACKGROUND_JOBS) AC_SUBST(UDEV_RULE_EXEC_DETECTION) AC_SUBST(UDEV_HAS_BUILTIN_BLKID) AC_SUBST(USE_TRACKING) AC_SUBST(USRSBINDIR) AC_SUBST(VALGRIND_POOL) AC_SUBST(WRITE_INSTALL) AC_SUBST(DMEVENTD_PIDFILE) AC_SUBST(LVMETAD_PIDFILE) AC_SUBST(LVMPOLLD_PIDFILE) AC_SUBST(LVMLOCKD_PIDFILE) AC_SUBST(CLVMD_PIDFILE) AC_SUBST(CMIRRORD_PIDFILE) AC_SUBST(interface) AC_SUBST(kerneldir) AC_SUBST(missingkernel) AC_SUBST(kernelvsn) AC_SUBST(tmpdir) AC_SUBST(udev_prefix) AC_SUBST(udevdir) AC_SUBST(systemdsystemunitdir) AC_SUBST(systemdutildir) AC_SUBST(tmpfilesdir) AC_SUBST(usrlibdir) AC_SUBST(usrsbindir) ################################################################################ dnl -- First and last lines should not contain files to generate in order to dnl -- keep utility scripts running properly AC_CONFIG_FILES([ Makefile make.tmpl daemons/Makefile daemons/clvmd/Makefile daemons/cmirrord/Makefile daemons/dmeventd/Makefile daemons/dmeventd/libdevmapper-event.pc daemons/dmeventd/plugins/Makefile daemons/dmeventd/plugins/lvm2/Makefile daemons/dmeventd/plugins/raid/Makefile daemons/dmeventd/plugins/mirror/Makefile daemons/dmeventd/plugins/snapshot/Makefile daemons/dmeventd/plugins/thin/Makefile daemons/dmfilemapd/Makefile daemons/lvmdbusd/Makefile daemons/lvmdbusd/path.py daemons/lvmetad/Makefile daemons/lvmpolld/Makefile daemons/lvmlockd/Makefile conf/Makefile conf/example.conf conf/lvmlocal.conf conf/command_profile_template.profile conf/metadata_profile_template.profile include/.symlinks include/Makefile lib/Makefile lib/format1/Makefile lib/format_pool/Makefile lib/locking/Makefile lib/mirror/Makefile include/lvm-version.h lib/raid/Makefile lib/snapshot/Makefile lib/thin/Makefile lib/cache_segtype/Makefile libdaemon/Makefile libdaemon/client/Makefile libdaemon/server/Makefile libdm/Makefile libdm/libdevmapper.pc liblvm/Makefile liblvm/liblvm2app.pc man/Makefile po/Makefile python/Makefile python/setup.py scripts/blkdeactivate.sh scripts/blk_availability_init_red_hat scripts/blk_availability_systemd_red_hat.service scripts/clvmd_init_red_hat scripts/cmirrord_init_red_hat scripts/com.redhat.lvmdbus1.service scripts/dm_event_systemd_red_hat.service scripts/dm_event_systemd_red_hat.socket scripts/lvm2_cluster_activation_red_hat.sh scripts/lvm2_cluster_activation_systemd_red_hat.service scripts/lvm2_clvmd_systemd_red_hat.service scripts/lvm2_cmirrord_systemd_red_hat.service scripts/lvm2_lvmdbusd_systemd_red_hat.service scripts/lvm2_lvmetad_init_red_hat scripts/lvm2_lvmetad_systemd_red_hat.service scripts/lvm2_lvmetad_systemd_red_hat.socket scripts/lvm2_lvmpolld_init_red_hat scripts/lvm2_lvmpolld_systemd_red_hat.service scripts/lvm2_lvmpolld_systemd_red_hat.socket scripts/lvm2_lvmlockd_systemd_red_hat.service scripts/lvm2_lvmlocking_systemd_red_hat.service scripts/lvm2_monitoring_init_red_hat scripts/lvm2_monitoring_systemd_red_hat.service scripts/lvm2_pvscan_systemd_red_hat@.service scripts/lvm2_tmpfiles_red_hat.conf scripts/lvmdump.sh scripts/Makefile test/Makefile test/api/Makefile test/unit/Makefile tools/Makefile udev/Makefile unit-tests/datastruct/Makefile unit-tests/regex/Makefile unit-tests/mm/Makefile ]) AC_OUTPUT AS_IF([test -n "$THIN_CONFIGURE_WARN"], [AC_MSG_WARN([Support for thin provisioning is limited since some thin provisioning tools are missing!])]) AS_IF([test -n "$THIN_CHECK_VERSION_WARN"], [AC_MSG_WARN([You should also install latest thin_check vsn 0.7.0 (or later) for lvm2 thin provisioning])]) AS_IF([test -n "$CACHE_CONFIGURE_WARN"], [AC_MSG_WARN([Support for cache is limited since some cache tools are missing!])]) AS_IF([test -n "$CACHE_CHECK_VERSION_WARN"], [AC_MSG_WARN([You should install latest cache_check vsn 0.7.0 to use lvm2 cache metadata format 2])]) AS_IF([test "$ODIRECT" != yes], [AC_MSG_WARN([O_DIRECT disabled: low-memory pvmove may lock up])]) LVM2.2.02.176/scripts/0000755000000000000120000000000013176752421013013 5ustar rootwheelLVM2.2.02.176/scripts/blk_availability_systemd_red_hat.service.in0000644000000000000120000000060713176752421023545 0ustar rootwheel[Unit] Description=Availability of block devices After=lvm2-activation.service lvm2-lvmetad.service iscsi-shutdown.service iscsi.service iscsid.service fcoe.service DefaultDependencies=no Conflicts=shutdown.target [Service] Type=oneshot ExecStart=/usr/bin/true ExecStop=@SBINDIR@/blkdeactivate -u -l wholevg -m disablequeueing -r wait RemainAfterExit=yes [Install] WantedBy=sysinit.target LVM2.2.02.176/scripts/Makefile.in0000644000000000000120000001427713176752421015073 0ustar rootwheel# # Copyright (C) 2006-2015 Red Hat, Inc. All rights reserved. # # This file is part of LVM2. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA srcdir = @srcdir@ top_srcdir = @top_srcdir@ top_builddir = @top_builddir@ ifeq ("@APPLIB@", "yes") SOURCES = lvm2_activation_generator_systemd_red_hat.c TARGETS = lvm2_activation_generator_systemd_red_hat endif include $(top_builddir)/make.tmpl ifeq ("@APPLIB@", "yes") DEPLIBS += $(top_builddir)/liblvm/liblvm2app.so $(top_builddir)/libdm/libdevmapper.so LDFLAGS += -L$(top_builddir)/liblvm ifeq ("@BUILD_DMEVENTD@", "yes") LDFLAGS += -Wl,-rpath-link,$(top_builddir)/daemons/dmeventd endif LVMLIBS = @LVM2APP_LIB@ -ldevmapper endif LVM_SCRIPTS = lvmdump.sh lvmconf.sh DM_SCRIPTS = ifeq ("@FSADM@", "yes") LVM_SCRIPTS += fsadm.sh endif ifeq ("@BLKDEACTIVATE@", "yes") DM_SCRIPTS += blkdeactivate.sh endif OCF_SCRIPTS = ifeq ("@OCF@", "yes") OCF_SCRIPTS += VolumeGroup.ocf endif vpath %.sh $(srcdir) vpath %.ocf $(srcdir) %_install: %.sh $(INSTALL_PROGRAM) -D $< $(sbindir)/$(basename $(/dev/null 2>&1 } start() { ret=0 action "Starting LVM metadata daemon:" "$sbindir/$DAEMON" || ret=$? return $ret } stop() { ret=0 action "Signaling LVM metadata daemon to exit:" killproc -p "$PID_FILE" "$DAEMON" -TERM || ret=$? return $ret } rtrn=1 # See how we were called. case "$1" in start) rh_status_q && exit 0 start rtrn=$? [ "$rtrn" = 0 ] && touch "$LOCK_FILE" ;; stop|force-stop) rh_status_q || exit 0 stop rtrn=$? [ "$rtrn" = 0 ] && rm -f "$LOCK_FILE" ;; restart) if stop then start fi rtrn=$? ;; condrestart|try-restart) rh_status_q || exit 0 if stop then start fi rtrn=$? ;; status) rh_status rtrn=$? ;; *) echo $"Usage: $0 {start|stop|force-stop|restart|condrestart|try-restart|status}" ;; esac exit $rtrn LVM2.2.02.176/scripts/VolumeGroup.ocf0000644000000000000120000001541213176752421015773 0ustar rootwheel#!/bin/sh # # VolumeGroup # # Description: Manages an LVM2 volume group as an HA resource in # an OCF-compliant cluster # # # Authors: Alan Robertson, Lars Marowsky-Bree, Florian Haas, # and others from the Linux-HA project # License: GNU General Public License (GPL) # Copyright: (C) 2002 - 2005 International Business Machines, Inc. # (C) 2010 LINBIT HA-Solutions GmbH # # This code significantly inspired by the LVM resource # in FailSafe by Lars Marowsky-Bree # ####################################################################### # Initialization: : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/resource.d/heartbeat} . ${OCF_FUNCTIONS_DIR}/.ocf-shellfuncs ####################################################################### usage() { methods=`VolumeGroup_methods` methods=`echo $methods | tr ' ' '|'` cat < 1.0 Resource script for an LVM Volume Group. Controls the availability of an LVM Volume Group The name of volume group. Volume group name If set, the volume group will be activated exclusively. Exclusive activation EOF } # # methods: What methods/operations do we support? # VolumeGroup_methods() { cat <&1` || exit $OCF_ERR_GENERIC echo "$VGOUT" | grep -i 'Status[ \t]*available' >/dev/null rc=$? if [ $rc -eq 0 ]; then ocf_log debug "LVM Volume Group $OCF_RESKEY_volgrpname is available (started)" else ocf_log debug "LVM Volume Group $OCF_RESKEY_volgrpname is not available (stopped)" return $OCF_NOT_RUNNING fi if echo "$VGOUT" | grep -i 'Access.*read/write' >/dev/null; then ocf_log debug "Volume $OCF_RESKEY_volgrpname is available read/write (running)" else ocf_log debug "Volume $OCF_RESKEY_volgrpname is available read-only (running)" fi return $OCF_SUCCESS } # # Monitor the volume - does it really seem to be working? May report # $OCF_SUCCESS or $OCF_NOT_RUNNING like VolumeGroup_status, plus # $OCF_ERR_GENERIC in case vgck reports an error. # VolumeGroup_monitor() { if ! VolumeGroup_status $OCF_RESKEY_volgrpname; then ocf_log info "LVM Volume Group $OCF_RESKEY_volgrpname is offline" return $OCF_NOT_RUNNING fi ocf_run vgck $OCF_RESKEY_volgrpname || exit $OCF_ERR_GENERIC return $OCF_SUCCESS } # # Activate the volume group, either locally (if $OCF_RESKEY_exclusive # is false or unset), or exclusively (if $OCF_RESKEY_exclusive is # true). # Either returns successfully, or exits with $OCF_ERR_GENERIC. # VolumeGroup_start() { ocf_log info "Activating volume group $OCF_RESKEY_volgrpname" ocf_run vgscan local active_mode active_mode="ly" if ocf_is_true "$OCF_RESKEY_exclusive" ; then active_mode="ey" fi ocf_run vgchange -a $active_mode $OCF_RESKEY_volgrpname || exit $OCF_ERR_GENERIC if ! VolumeGroup_status $OCF_RESKEY_volgrpname; then ocf_log err "LVM: $OCF_RESKEY_volgrpname did not activate correctly" exit $OCF_ERR_GENERIC fi return $OCF_SUCCESS } # # Deactivate the volume group. # Either returns successfully, or exits with $OCF_ERR_GENERIC. # VolumeGroup_stop() { if ! VolumeGroup_status; then ocf_log debug "Volume Group $OCF_RESKEY_volgrpname already stopped" return $OCF_SUCCESS fi ocf_log info "Deactivating volume group $OCF_RESKEY_volgrpname" ocf_run vgchange -a ln $OCF_RESKEY_volgrpname || exit $OCF_ERR_GENERIC if VolumeGroup_status; then ocf_log err "LVM: $OCF_RESKEY_volgrpname did not stop correctly" exit $OCF_ERR_GENERIC fi return $OCF_SUCCESS } # # Check whether the OCF instance parameters are valid. # Either returns successfully, or exits with # $OCF_ERR_CONFIGURED if required parameters are missing; # $OCF_ERR_INSTALLED if required binaries are missing; # $OCF_ERR_GENERIC in case of any other error. # VolumeGroup_validate_all() { if [ -z $OCF_RESKEY_volgrpname ]; then ocf_log err 'Missing required parameter "volgrpname"!' exit $OCF_ERR_CONFIGURED fi check_binary vgchange check_binary vgck check_binary vgdisplay # Run the following tests only if we're not invoked by a probe # operation if ! ocf_is_probe; then # Off-the-shelf tests... vgck "$OCF_RESKEY_volgrpname" >/dev/null 2>&1 if [ $? -ne 0 ]; then ocf_log err "Volume group $OCF_RESKEY_volgrpname does not exist or contains error!" exit $OCF_ERR_GENERIC fi # Double-check vgdisplay -v "$OCF_RESKEY_volgrpname" >/dev/null 2>&1 if [ $? -ne 0 ]; then ocf_log err "Volume group $OCF_RESKEY_volgrpname does not exist or contains error!" exit $OCF_ERR_GENERIC fi fi return $OCF_SUCCESS } # # 'main' starts here... # if [ $# -ne 1 ]; then usage exit $OCF_ERR_ARGS fi case $1 in meta-data) meta_data exit $OCF_SUCCESS;; methods) VolumeGroup_methods exit $OCF_SUCCESS;; usage) usage exit $OCF_SUCCESS;; *) ;; esac # Everything except usage and meta-data must pass the validate test VolumeGroup_validate_all # What kind of method was invoked? case "$1" in start) VolumeGroup_start ;; stop) VolumeGroup_stop ;; status) VolumeGroup_status ;; monitor) VolumeGroup_monitor ;; validate-all) ;; notify|promote|demote|migrate_from|migrate_to) usage exit $OCF_ERR_UNIMPLEMENTED ;; *) usage exit $OCF_ERR_ARGS ;; esac exit $? LVM2.2.02.176/scripts/lvmconf_lockingtype2.sh0000644000000000000120000001522213176752421017507 0ustar rootwheel#!/bin/bash # # Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved. # # This file is part of the lvm2-cluster package. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # Edit an lvm.conf file to adjust various properties # function usage { echo "usage: $0 " echo "" echo "Commands:" echo "Enable clvm: --enable-cluster [--lockinglibdir ] [--lockinglib ]" echo "Disable clvm: --disable-cluster" echo "Set locking library: --lockinglibdir [--lockinglib ]" echo "" echo "Global options:" echo "Config file location: --file " echo "" } function parse_args { while [ -n "$1" ]; do case "$1" in --enable-cluster) LOCKING_TYPE=2 shift ;; --disable-cluster) LOCKING_TYPE=1 shift ;; --lockinglibdir) if [ -n "$2" ]; then LOCKINGLIBDIR=$2 shift 2 else usage exit 1 fi ;; --lockinglib) if [ -n "$2" ]; then LOCKINGLIB=$2 shift 2 else usage exit 1 fi ;; --file) if [ -n "$2" ]; then CONFIGFILE=$2 shift 2 else usage exit 1 fi ;; *) usage exit 1 esac done } function validate_args { [ -z "$CONFIGFILE" ] && CONFIGFILE="/etc/lvm/lvm.conf" if [ ! -f "$CONFIGFILE" ] then echo "$CONFIGFILE does not exist" exit 10 fi if [ -z "$LOCKING_TYPE" ] && [ -z "$LOCKINGLIBDIR" ]; then usage exit 1 fi if [ -n "$LOCKINGLIBDIR" ]; then [ -z "$LOCKINGLIB" ] && LOCKINGLIB="liblvm2clusterlock.so" if [ "${LOCKINGLIBDIR:0:1}" != "/" ] then echo "Prefix must be an absolute path name (starting with a /)" exit 12 fi if [ ! -f "$LOCKINGLIBDIR/$LOCKINGLIB" ] then echo "$LOCKINGLIBDIR/$LOCKINGLIB does not exist, did you do a \"make install\" ?" exit 11 fi fi if [ "$LOCKING_TYPE" = "1" ] ; then if [ -n "$LOCKINGLIBDIR" ] || [ -n "$LOCKINGLIB" ]; then echo "Superfluous locking lib parameter, ignoring" fi fi } umask 0077 parse_args "$@" validate_args SCRIPTFILE=/etc/lvm/.lvmconf-script.tmp TMPFILE=/etc/lvm/.lvmconf-tmp.tmp # Flags so we know which parts of the file we can replace and which need # adding. These are return codes from grep, so zero means it IS present! have_type=1 have_dir=1 have_library=1 have_global=1 grep -q '^[[:blank:]]*locking_type[[:blank:]]*=' "$CONFIGFILE" have_type=$? grep -q '^[[:blank:]]*library_dir[[:blank:]]*=' "$CONFIGFILE" have_dir=$? grep -q '^[[:blank:]]*locking_library[[:blank:]]*=' "$CONFIGFILE" have_library=$? # Those options are in section "global {" so we must have one if any are present. if [ "$have_type" = "0" ] || [ "$have_dir" = "0" ] || [ "$have_library" = "0" ]; then # See if we can find it... grep -q '^[[:blank:]]*global[[:blank:]]*{' "$CONFIGFILE" have_global=$? if [ "$have_global" = "1" ] then echo "global keys but no 'global {' found, can't edit file" exit 13 fi fi if [ "$LOCKING_TYPE" = "2" ] && [ -z "$LOCKINGLIBDIR" ] && [ "$have_dir" = "1" ]; then echo "no library_dir specified in $CONFIGFILE" exit 16 fi # So if we don't have "global {" we need to create one and # populate it if [ "$have_global" = "1" ] then if [ -z "$LOCKING_TYPE" ]; then LOCKING_TYPE=1 fi if [ "$LOCKING_TYPE" = "2" ]; then cat "$CONFIGFILE" - < $TMPFILE global { # Enable locking for cluster LVM locking_type = $LOCKING_TYPE library_dir = "$LOCKINGLIBDIR" locking_library = "$LOCKINGLIB" } EOF fi # if we aren't setting cluster locking, we don't need to create a global section if [ $? != 0 ] then echo "failed to create temporary config file, $CONFIGFILE not updated" exit 14 fi else # # We have a "global {" section, so add or replace the # locking entries as appropriate # if [ -n "$LOCKING_TYPE" ]; then if [ "$have_type" = "0" ] then SEDCMD=" s/^[[:blank:]]*locking_type[[:blank:]]*=.*/\ \ \ \ locking_type = $LOCKING_TYPE/g" else SEDCMD=" /global[[:blank:]]*{/a\ \ \ \ locking_type = $LOCKING_TYPE" fi fi if [ -n "$LOCKINGLIBDIR" ]; then if [ "$have_dir" = "0" ] then SEDCMD="${SEDCMD}\ns'^[[:blank:]]*library_dir[[:blank:]]*=.*'\ \ \ \ library_dir = \"$LOCKINGLIBDIR\"'g" else SEDCMD="${SEDCMD}\n/global[[:blank:]]*{/a\ \ \ \ library_dir = \"$LOCKINGLIBDIR\"" fi if [ "$have_library" = "0" ] then SEDCMD="${SEDCMD}\ns/^[[:blank:]]*locking_library[[:blank:]]*=.*/\ \ \ \ locking_library = \"$LOCKINGLIB\"/g" else SEDCMD="${SEDCMD}\n/global[[:blank:]]*{/a\ \ \ \ locking_library = \"$LOCKINGLIB\"" fi fi if [ "$LOCKING_TYPE" = "1" ]; then # if we're not using cluster locking, remove the library dir and locking library name if [ "$have_dir" = "0" ] then SEDCMD="${SEDCMD}\n/^[[:blank:]]*library_dir[[:blank:]]*=.*/d" fi if [ "$have_library" = "0" ] then SEDCMD="${SEDCMD}\n/^[[:blank:]]*locking_library[[:blank:]]*=.*/d" fi fi echo -e "$SEDCMD" > "$SCRIPTFILE" sed <"$CONFIGFILE" >"$TMPFILE" -f "$SCRIPTFILE" if [ $? != 0 ] then echo "sed failed, $CONFIGFILE not updated" exit 15 fi fi # Now we have a suitably editted config file in a temp place, # backup the original and copy our new one into place. cp "$CONFIGFILE" "$CONFIGFILE.lvmconfold" if [ $? != 0 ] then echo "failed to backup old config file, $CONFIGFILE not updated" exit 2 fi cp "$TMPFILE" "$CONFIGFILE" if [ $? != 0 ] then echo "failed to copy new config file into place, check $CONFIGFILE is still OK" exit 3 fi rm -f "$SCRIPTFILE" "$TMPFILE" LVM2.2.02.176/scripts/lvm2_lvmetad_systemd_red_hat.socket.in0000644000000000000120000000034013176752421022461 0ustar rootwheel[Unit] Description=LVM2 metadata daemon socket Documentation=man:lvmetad(8) DefaultDependencies=no [Socket] ListenStream=@DEFAULT_RUN_DIR@/lvmetad.socket SocketMode=0600 RemoveOnStop=true [Install] WantedBy=sysinit.target LVM2.2.02.176/scripts/lvm2_monitoring_init_red_hat.in0000644000000000000120000000577613176752421021220 0ustar rootwheel#!/bin/bash # # Copyright (C) 2007-2009 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # This file is part of LVM2. # It is required for the proper handling of failures of LVM2 mirror # devices that were created using the -m option of lvcreate. # # # chkconfig: 12345 02 99 # description: Starts and stops dmeventd monitoring for lvm2 # # For Red-Hat-based distributions such as Fedora, RHEL, CentOS. # ### BEGIN INIT INFO # Provides: lvm2-monitor # Required-Start: $local_fs # Required-Stop: $local_fs # Default-Start: 1 2 3 4 5 # Default-Stop: 0 6 # Short-Description: Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling ### END INIT INFO . /etc/init.d/functions DAEMON=lvm2-monitor DMEVENTD_DAEMON=dmeventd sbindir=@SBINDIR@ VGCHANGE="$sbindir/vgchange" VGS="$sbindir/vgs" LVS="$sbindir/lvs" LOCK_FILE="@DEFAULT_SYS_LOCK_DIR@/subsys/$DAEMON" PID_FILE="@DMEVENTD_PIDFILE@" WARN=1 export LVM_SUPPRESS_LOCKING_FAILURE_MESSAGES=1 rh_status() { status -p "$PID_FILE" "$DMEVENTD_DAEMON" } rh_status_q() { rh_status >/dev/null 2>&1 } start() { ret=0 # TODO do we want to separate out already active groups only? VGSLIST=`$VGS --noheadings -o name --ignoreskippedcluster --config 'log{command_names=0 prefix=" "}' 2> /dev/null` for vg in $VGSLIST do action "Starting monitoring for VG $vg:" "$VGCHANGE" --monitor y --poll y --ignoreskippedcluster --config 'log{command_names=0 prefix=" "}' $vg || ret=$? done return $ret } stop() { ret=0 # TODO do we want to separate out already active groups only? if test "$WARN" = "1"; then echo "Not stopping monitoring, this is a dangerous operation. Please use force-stop to override." return 1 fi VGSLIST=`$VGS --noheadings -o name --ignoreskippedcluster --config 'log{command_names=0 prefix=" "}' 2> /dev/null` for vg in $VGSLIST do action "Stopping monitoring for VG $vg:" "$VGCHANGE" --monitor n --ignoreskippedcluster --config 'log{command_names=0 prefix=" "}' $vg || ret=$? done return $ret } rtrn=1 # See how we were called. case "$1" in start) rh_status_q && exit 0 start rtrn=$? [ "$rtrn" = 0 ] && touch "$LOCK_FILE" ;; force-stop) rh_status_q || exit 0 WARN=0 stop rtrn=$? [ "$rtrn" = 0 ] && rm -f "$LOCK_FILE" ;; stop) rh_status_q || exit 0 test "$runlevel" = "0" && WARN=0 test "$runlevel" = "6" && WARN=0 stop rtrn=$? [ "$rtrn" = 0 ] && rm -f "$LOCK_FILE" ;; restart) WARN=0 if stop then start fi rtrn=$? ;; status) rh_status rtrn=$? [ "$rtrn" = 0 ] && "$LVS" -S 'seg_monitor=monitored' -o lv_full_name,seg_monitor ;; *) echo $"Usage: $0 {start|stop|restart|status|force-stop}" ;; esac exit $rtrn LVM2.2.02.176/scripts/clvmd_fix_conf.sh0000644000000000000120000000746313176752421016341 0ustar rootwheel#!/bin/bash # # Edit an lvm.conf file to enable cluster locking. # # $1 is the directory where the locking library is installed. # $2 (optional) is the config file # $3 (optional) is the locking library name # # PREFIX=$1 LVMCONF=$2 LIB=$3 if [ -z "$PREFIX" ] then echo "usage: $0 [] []" echo "" echo "|UNDO location of the cluster locking shared library. (no default)" echo " UNDO will reset the locking back to local" echo " name of the LVM config file (default: /etc/lvm/lvm.conf)" echo " name of the shared library (default: liblvm2clusterlock.so)" echo "" exit 0 fi [ -z "$LVMCONF" ] && LVMCONF="/etc/lvm/lvm.conf" [ -z "$LIB" ] && LIB="liblvm2clusterlock.so" if [ "$PREFIX" = "UNDO" ] then locking_type="1" else locking_type="2" if [ "${PREFIX:0:1}" != "/" ] then echo "Prefix must be an absolute path name (starting with a /)" exit 12 fi if [ ! -f "$PREFIX/$LIB" ] then echo "$PREFIX/$LIB does not exist, did you do a \"make install\" ?" exit 11 fi fi if [ ! -f "$LVMCONF" ] then echo "$LVMCONF does not exist" exit 10 fi SCRIPTFILE=$(mktemp -t lvmscript.XXXXXXXXXX) TMPFILE=$(mktemp -t lvmtmp.XXXXXXXXXX) # Flags so we know which parts of the file we can replace and which need # adding. These are return codes from grep, so zero means it IS present! have_type=1 have_dir=1 have_library=1 have_global=1 grep -q '^[[:blank:]]*locking_type[[:blank:]]*=' "$LVMCONF" have_type=$? grep -q '^[[:blank:]]*library_dir[[:blank:]]*=' "$LVMCONF" have_dir=$? grep -q '^[[:blank:]]*locking_library[[:blank:]]*=' "$LVMCONF" have_library=$? # Those options are in section "global {" so we must have one if any are present. if [ "$have_type" = 0 ] || [ "$have_dir" = 0 ] || [ "$have_library" = 0 ] ; then # See if we can find it... grep -q '^[[:blank:]]*global[[:blank:]]*{' "$LVMCONF" have_global=$? if [ "$have_global" = "1" ] then echo "global keys but no 'global {' found, can't edit file" exit 12 fi fi # So if we don't have "global {" we need to create one and # populate it if [ "$have_global" = "1" ] then cat "$LVMCONF" - < "$TMPFILE" global { # Enable locking for cluster LVM locking_type = $locking_type library_dir = "$PREFIX" locking_library = "$LIB" } EOF if [ $? != 0 ] then echo "failed to create temporary config file, $LVMCONF not updated" exit 1 fi else # # We have a "global {" section, so add or replace the # locking entries as appropriate # if [ "$have_type" = "0" ] then SEDCMD=" s/^[[:blank:]]*locking_type[[:blank:]]*=.*/\ \ \ \ locking_type = $locking_type/g" else SEDCMD=" /global[[:blank:]]*{/a\ \ \ \ locking_type = 2" fi if [ "$have_dir" = "0" ] then SEDCMD="${SEDCMD}\ns'^[[:blank:]]*library_dir[[:blank:]]*=.*'\ \ \ \ library_dir = \"$PREFIX\"'g" else SEDCMD="${SEDCMD}\n/global[[:blank:]]*{/a\ \ \ \ library_dir = \"$PREFIX\"" fi if [ "$have_library" = "0" ] then SEDCMD="${SEDCMD}\ns/^[[:blank:]]*locking_library[[:blank:]]*=.*/\ \ \ \ locking_library = \"$LIB\"/g" else SEDCMD="${SEDCMD}\n/global[[:blank:]]*{/a\ \ \ \ locking_library = \"$LIB\"" fi echo -e "$SEDCMD" > "$SCRIPTFILE" sed <"$LVMCONF" >"$TMPFILE" -f "$SCRIPTFILE" if [ $? != 0 ] then echo "sed failed, $LVMCONF not updated" exit 1 fi fi # Now we have a suitably editted config file in a temp place, # backup the original and copy our new one into place. cp "$LVMCONF" "$LVMCONF.nocluster" if [ $? != 0 ] then echo "failed to backup old config file, $LVMCONF not updated" exit 2 fi cp "$TMPFILE" "$LVMCONF" if [ $? != 0 ] then echo "failed to copy new config file into place, check $LVMCONF is still OK" exit 3 fi rm -f "$SCRIPTFILE" "$TMPFILE" LVM2.2.02.176/scripts/blk_availability_init_red_hat.in0000644000000000000120000000270713176752421021364 0ustar rootwheel#!/bin/bash # # Copyright (C) 2012-2017 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # This script is responsible for executing blkdeactivate at shutdown # to properly unmount filesystems and deactivate device stacks containing # device-mapper devices (including plain device-mapper devices, LVM2 and # multipath devices) and MD devices. # # chkconfig: 12345 25 75 # description: Controls availability of block devices # # For Red-Hat-based distributions such as Fedora, RHEL, CentOS. # ### BEGIN INIT INFO # Provides: blk-availability # Required-Start: # Required-Stop: # Default-Start: 1 2 3 4 5 # Default-Stop: 0 6 # Short-Description: Availability of block devices ### END INIT INFO . /etc/init.d/functions script=blkdeactivate sbindir=@SBINDIR@ options="-u -l wholevg -m disablequeueing -r wait" LOCK_FILE="@DEFAULT_LOCK_DIR@/subsys/blk-availability" case "$1" in start) touch "$LOCK_FILE" ;; stop) action "Stopping block device availability:" "$sbindir/$script" $options rm -f "$LOCK_FILE" ;; status) ;; *) echo $"Usage: $0 {start|stop|status}" ;; esac LVM2.2.02.176/scripts/vg_convert0000755000000000000120000000111713176752421015115 0ustar rootwheel#!/bin/sh -x # Original script used to convert a VG from LVM1 to LVM2 metadata format. # Superceded by 'vgconvert', but left here to show how to do it step-by-step. # Takes vgname as parameter. No error checking. Uses temp file 'lvmbackup'. echo "Please use the 'vgconvert' tool instead" exit 1 ./vgcfgbackup $1 || exit 1 ./vgcfgbackup --file lvmbackup $1 || exit 1 CMDS=`./pvscan -u | sed -ne "s/.*PV \(.*\) with UUID \(.*\) VG $1 .*/.\/pvcreate -ff -y -M lvm2 --restorefile lvmbackup -u \2 \1 ; /p"` sh -x -c "$CMDS" || exit 1 ./vgcfgrestore --file lvmbackup -M lvm2 $1 || exit 1 LVM2.2.02.176/scripts/lvm2_activation_generator_systemd_red_hat.c0000644000000000000120000001346013176752421023570 0ustar rootwheel/* * Copyright (C) 2012 Red Hat, Inc. All rights reserved. * * This file is part of the device-mapper userspace tools. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License v.2. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include /* For PATH_MAX for musl libc */ #include "lvm2app.h" #include "configure.h" /* for LVM_PATH */ #define KMSG_DEV_PATH "/dev/kmsg" #define LVM_CONF_USE_LVMETAD "global/use_lvmetad" #define LVM_CONF_USE_LVMPOLLD "global/use_lvmpolld" #define UNIT_TARGET_LOCAL_FS "local-fs-pre.target" #define UNIT_TARGET_REMOTE_FS "remote-fs-pre.target" static char unit_path[PATH_MAX]; static char target_path[PATH_MAX]; static char message[PATH_MAX + 3]; /* +3 for '' where n is the log level */ static int kmsg_fd = -1; enum { UNIT_EARLY, UNIT_MAIN, UNIT_NET }; static const char *unit_names[] = { [UNIT_EARLY] = "lvm2-activation-early.service", [UNIT_MAIN] = "lvm2-activation.service", [UNIT_NET] = "lvm2-activation-net.service" }; __attribute__ ((format(printf, 2, 3))) static void kmsg(int log_level, const char *format, ...) { va_list ap; int n; snprintf(message, 4, "<%d>", log_level); va_start(ap, format); n = vsnprintf(message + 3, PATH_MAX, format, ap); va_end(ap); if (kmsg_fd < 0 || (n < 0 || ((unsigned) n + 1 > PATH_MAX))) return; /* The n+4: +3 for "" prefix and +1 for '\0' suffix */ if (write(kmsg_fd, message, n + 4)) { /* Ignore result code */; } } static void lvm_get_use_lvmetad_and_lvmpolld(int *use_lvmetad, int *use_lvmpolld) { *use_lvmetad = *use_lvmpolld = 0; *use_lvmetad = lvm_config_find_bool(NULL, LVM_CONF_USE_LVMETAD, 0); *use_lvmpolld = lvm_config_find_bool(NULL, LVM_CONF_USE_LVMPOLLD, 0); } static int register_unit_with_target(const char *dir, const char *unit, const char *target) { int r = 1; if (dm_snprintf(target_path, PATH_MAX, "%s/%s.wants", dir, target) < 0) { r = 0; goto out; } (void) dm_prepare_selinux_context(target_path, S_IFDIR); if (mkdir(target_path, 0755) < 0 && errno != EEXIST) { kmsg(LOG_ERR, "LVM: Failed to create target directory %s: %m.\n", target_path); r = 0; goto out; } if (dm_snprintf(target_path, PATH_MAX, "%s/%s.wants/%s", dir, target, unit) < 0) { r = 0; goto out; } (void) dm_prepare_selinux_context(target_path, S_IFLNK); if (symlink(unit_path, target_path) < 0) { kmsg(LOG_ERR, "LVM: Failed to create symlink for unit %s: %m.\n", unit); r = 0; } out: dm_prepare_selinux_context(NULL, 0); return r; } static int generate_unit(const char *dir, int unit, int sysinit_needed) { FILE *f; const char *unit_name = unit_names[unit]; const char *target_name = unit == UNIT_NET ? UNIT_TARGET_REMOTE_FS : UNIT_TARGET_LOCAL_FS; if (dm_snprintf(unit_path, PATH_MAX, "%s/%s", dir, unit_name) < 0) return 0; if (!(f = fopen(unit_path, "wxe"))) { kmsg(LOG_ERR, "LVM: Failed to create unit file %s: %m.\n", unit_name); return 0; } fputs("# Automatically generated by lvm2-activation-generator.\n" "#\n" "# This unit is responsible for direct activation of LVM2 logical volumes\n" "# if lvmetad daemon is not used (global/use_lvmetad=0 lvm.conf setting),\n" "# hence volume autoactivation is not applicable.\n" "# Direct LVM2 activation requires udev to be settled!\n\n" "[Unit]\n" "Description=Activation of LVM2 logical volumes\n" "Documentation=man:lvm2-activation-generator(8)\n" "SourcePath=/etc/lvm/lvm.conf\n" "DefaultDependencies=no\n", f); if (unit == UNIT_NET) { fprintf(f, "After=%s iscsi.service fcoe.service\n" "Before=remote-fs-pre.target shutdown.target\n\n" "[Service]\n" "ExecStartPre=/usr/bin/udevadm settle\n", unit_names[UNIT_MAIN]); } else { if (unit == UNIT_EARLY) { fputs("After=systemd-udev-settle.service\n" "Before=cryptsetup.target\n", f); } else fprintf(f, "After=%s cryptsetup.target\n", unit_names[UNIT_EARLY]); fputs("Before=local-fs-pre.target shutdown.target\n" "Wants=systemd-udev-settle.service\n\n" "[Service]\n", f); } fputs("ExecStart=" LVM_PATH " vgchange -aay --ignoreskippedcluster", f); if (sysinit_needed) fputs (" --sysinit", f); fputs("\nType=oneshot\n", f); if (fclose(f) < 0) { kmsg(LOG_ERR, "LVM: Failed to write unit file %s: %m.\n", unit_name); return 0; } if (!register_unit_with_target(dir, unit_name, target_name)) { kmsg(LOG_ERR, "LVM: Failed to register unit %s with target %s.\n", unit_name, target_name); return 0; } return 1; } int main(int argc, char *argv[]) { int use_lvmetad, use_lvmpolld, sysinit_needed; const char *dir; int r = EXIT_SUCCESS; mode_t old_mask; kmsg_fd = open(KMSG_DEV_PATH, O_WRONLY|O_NOCTTY); if (argc != 4) { kmsg(LOG_ERR, "LVM: Incorrect number of arguments for activation generator.\n"); r = EXIT_FAILURE; goto out; } /* If lvmetad used, rely on autoactivation instead of direct activation. */ lvm_get_use_lvmetad_and_lvmpolld(&use_lvmetad, &use_lvmpolld); if (use_lvmetad) goto out; dir = argv[1]; /* mark lvm2-activation.*.service as world-accessible */ old_mask = umask(0022); sysinit_needed = !use_lvmpolld; if (!generate_unit(dir, UNIT_EARLY, sysinit_needed) || !generate_unit(dir, UNIT_MAIN, sysinit_needed) || !generate_unit(dir, UNIT_NET, sysinit_needed)) r = EXIT_FAILURE; umask(old_mask); out: if (r) kmsg(LOG_ERR, "LVM: Activation generator failed.\n"); if (kmsg_fd != -1) (void) close(kmsg_fd); return r; } LVM2.2.02.176/scripts/lvm2_monitoring_init_rhel40000644000000000000120000000407713176752421020214 0ustar rootwheel#!/bin/bash # # Copyright (C) 2007 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # This file is part of LVM2. # It is required for the proper handling of failures of LVM2 mirror # devices that were created using the -m option of lvcreate. # # # chkconfig: 12345 02 99 # description: Starts and stops dmeventd monitoring for lvm2 # ### BEGIN INIT INFO # Provides: ### END INIT INFO . /etc/init.d/functions VGCHANGE="/usr/sbin/vgchange" WARN=1 start() { ret=0 # TODO do we want to separate out already active groups only? VGS=`vgs --noheadings -o name --config 'log{command_names=0 prefix=" "}' 2> /dev/null` for vg in $VGS do action "Starting monitoring for VG $vg:" $VGCHANGE --monitor y --config 'log{command_names=0 prefix=" "}' $vg || ret=$? done return $ret } stop() { ret=0 # TODO do we want to separate out already active groups only? if test "$WARN" = "1"; then echo "Not stopping monitoring, this is a dangerous operation. Please use force-stop to override." return 1 fi VGS=`vgs --noheadings -o name --config 'log{command_names=0 prefix=" "}' 2> /dev/null` for vg in $VGS do action "Stopping monitoring for VG $vg:" $VGCHANGE --monitor n --config 'log{command_names=0 prefix=" "}' $vg || ret=$? done return $ret } result=1 # See how we were called. case "$1" in start) start result=$? ;; force-stop) WARN=0 stop result=$? ;; stop) test "$runlevel" = "0" && WARN=0 test "$runlevel" = "6" && WARN=0 stop result=$? ;; restart) WARN=0 if stop then start fi result=$? ;; status) # TODO anyone with an idea how to dump monitored volumes? ;; *) echo $"Usage: $0 {start|stop|restart|status|force-stop}" ;; esac exit $result LVM2.2.02.176/scripts/lvm2_cmirrord_systemd_red_hat.service.in0000644000000000000120000000053113176752421023020 0ustar rootwheel[Unit] Description=Clustered LVM mirror log daemon Documentation=man:cmirrord(8) Requires=corosync.service After=corosync.service Before=remote-fs-pre.target DefaultDependencies=no Conflicts=shutdown.target [Service] Type=forking ExecStart=@USRSBINDIR@/cmirrord PIDFile=@CMIRRORD_PIDFILE@ Restart=on-abort [Install] WantedBy=multi-user.target LVM2.2.02.176/scripts/com.redhat.lvmdbus1.service.in0000644000000000000120000000016713176752421020566 0ustar rootwheel[D-BUS Service] Name=com.redhat.lvmdbus1 Exec=@SBINDIR@/lvmdbusd --udev User=root SystemdService=lvm2-lvmdbusd.service LVM2.2.02.176/scripts/relpath.awk0000755000000000000120000000215013176752421015157 0ustar rootwheel#!/usr/bin/awk -f # # Copyright (C) 2010 Red Hat, Inc. All rights reserved. # # This file is part of LVM2. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # relpath.awk: Script is used to calculate relative path # between two real absolute paths. # # echo /a/b/c/d /a/b/e/f | relpath.awk # -> ../../e/f/ { length_from = split($1, from, "/"); length_to = split($2, to, "/") ; l = 1; while (l <= length_from && l <= length_to && from[l] == to[l]) l++; for (i = l; i <= length_from && length(from[i]); i++) { if (i > l) p = sprintf("%s/", p); p = sprintf("%s..", p); } for (i = l; i <= length_to && length(to[i]); i++) { if (length(p) > 0) p = sprintf("%s/", p); p = sprintf("%s%s", p, to[i]); } if (length(p)) p = sprintf("%s/", p); print p } LVM2.2.02.176/scripts/lvm2_lvmpolld_systemd_red_hat.socket.in0000644000000000000120000000033613176752421022663 0ustar rootwheel[Unit] Description=LVM2 poll daemon socket Documentation=man:lvmpolld(8) DefaultDependencies=no [Socket] ListenStream=@DEFAULT_RUN_DIR@/lvmpolld.socket SocketMode=0600 RemoveOnStop=true [Install] WantedBy=sysinit.target LVM2.2.02.176/scripts/lvm2_clvmd_systemd_red_hat.service.in0000644000000000000120000000105513176752421022306 0ustar rootwheel[Unit] Description=Clustered LVM daemon Documentation=man:clvmd(8) After=dlm.service corosync.service Before=remote-fs-pre.target Requires=network.target dlm.service corosync.service RefuseManualStart=true RefuseManualStop=true StopWhenUnneeded=true DefaultDependencies=no Conflicts=shutdown.target [Service] Type=forking Environment=CLVMD_OPTS=-T30 EnvironmentFile=-@SYSCONFDIR@/sysconfig/clvmd ExecStart=@USRSBINDIR@/clvmd $CLVMD_OPTS SuccessExitStatus=5 TimeoutStartSec=30 TimeoutStopSec=10 OOMScoreAdjust=-1000 Restart=on-abort PIDFile=@CLVMD_PIDFILE@ LVM2.2.02.176/scripts/lvm2_cluster_activation_systemd_red_hat.service.in0000644000000000000120000000071513176752421025105 0ustar rootwheel[Unit] Description=Clustered LVM volumes activation service Requires=lvm2-clvmd.service After=lvm2-clvmd.service lvm2-cmirrord.service OnFailure=lvm2-clvmd.service DefaultDependencies=no Conflicts=shutdown.target [Service] Type=simple RemainAfterExit=yes EnvironmentFile=-@SYSCONFDIR@/sysconfig/clvmd ExecStart=@systemdutildir@/lvm2-cluster-activation activate ExecStop=@systemdutildir@/lvm2-cluster-activation deactivate [Install] WantedBy=multi-user.target LVM2.2.02.176/scripts/fsadm.sh0000755000000000000120000005700413176752421014452 0ustar rootwheel#!/bin/bash # # Copyright (C) 2007-2017 Red Hat, Inc. All rights reserved. # # This file is part of LVM2. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # Author: Zdenek Kabelac # # Script for resizing devices (usable for LVM resize) # # Needed utilities: # mount, umount, grep, readlink, blockdev, blkid, fsck, xfs_check, cryptsetup # # ext2/ext3/ext4: resize2fs, tune2fs # reiserfs: resize_reiserfs, reiserfstune # xfs: xfs_growfs, xfs_info # # Return values: # 0 success # 1 error # 2 break detected # 3 unsupported online filesystem check for given mounted fs TOOL=fsadm _SAVEPATH=$PATH PATH="/sbin:/usr/sbin:/bin:/usr/sbin:$PATH" # utilities TUNE_EXT=tune2fs RESIZE_EXT=resize2fs TUNE_REISER=reiserfstune RESIZE_REISER=resize_reiserfs TUNE_XFS=xfs_info RESIZE_XFS=xfs_growfs MOUNT=mount UMOUNT=umount MKDIR=mkdir RMDIR=rmdir BLOCKDEV=blockdev BLKID=blkid DATE=date GREP=grep READLINK=readlink READLINK_E="-e" FSCK=fsck XFS_CHECK=xfs_check # XFS_REPAIR -n is used when XFS_CHECK is not found XFS_REPAIR=xfs_repair CRYPTSETUP=cryptsetup # user may override lvm location by setting LVM_BINARY LVM=${LVM_BINARY:-lvm} YES=${_FSADM_YES} DRY=0 VERB= FORCE= EXTOFF=${_FSADM_EXTOFF:-0} DO_LVRESIZE=0 FSTYPE=unknown VOLUME=unknown TEMPDIR="${TMPDIR:-/tmp}/${TOOL}_${RANDOM}$$/m" DM_DEV_DIR="${DM_DEV_DIR:-/dev}" BLOCKSIZE= BLOCKCOUNT= MOUNTPOINT= MOUNTED= REMOUNT= PROCDIR="/proc" PROCMOUNTS="$PROCDIR/mounts" PROCSELFMOUNTINFO="$PROCDIR/self/mountinfo" NULL="$DM_DEV_DIR/null" IFS_OLD=$IFS # without bash $'\n' NL=' ' tool_usage() { echo "${TOOL}: Utility to resize or check the filesystem on a device" echo echo " ${TOOL} [options] check " echo " - Check the filesystem on device using fsck" echo echo " ${TOOL} [options] resize [[BKMGTPE]]" echo " - Change the size of the filesystem on device to new_size" echo echo " Options:" echo " -h | --help Show this help message" echo " -v | --verbose Be verbose" echo " -e | --ext-offline unmount filesystem before ext2/ext3/ext4 resize" echo " -f | --force Bypass sanity checks" echo " -n | --dry-run Print commands without running them" echo " -l | --lvresize Resize given device (if it is LVM device)" echo " -c | --cryptresize Resize given crypt device" echo " -y | --yes Answer \"yes\" at any prompts" echo echo " new_size - Absolute number of filesystem blocks to be in the filesystem," echo " or an absolute size using a suffix (in powers of 1024)." echo " If new_size is not supplied, the whole device is used." exit } verbose() { test -z "$VERB" || echo "$TOOL:" "$@" } # Support multi-line error messages error() { for i in "$@" ; do echo "$TOOL: $i" >&2 done cleanup 1 } dry() { if [ "$DRY" -ne 0 ]; then verbose "Dry execution" "$@" return 0 fi verbose "Executing" "$@" "$@" } cleanup() { trap '' 2 # reset MOUNTPOINT - avoid recursion test "$MOUNTPOINT" = "$TEMPDIR" && MOUNTPOINT="" temp_umount if [ -n "$REMOUNT" ]; then verbose "Remounting unmounted filesystem back" dry "$MOUNT" "$VOLUME" "$MOUNTED" fi IFS=$IFS_OLD trap 2 test "$1" -eq 2 && verbose "Break detected" if [ "$DO_LVRESIZE" -eq 2 ]; then # start LVRESIZE with the filesystem modification flag # and allow recursive call of fsadm _FSADM_YES=$YES _FSADM_EXTOFF=$EXTOFF export _FSADM_YES _FSADM_EXTOFF unset FSADM_RUNNING test -n "$LVM_BINARY" && PATH=$_SAVEPATH dry exec "$LVM" lvresize $VERB $FORCE -r -L"${NEWSIZE_ORIG}b" "$VOLUME_ORIG" fi # error exit status for break exit "${1:-1}" } # convert parameter from Exa/Peta/Tera/Giga/Mega/Kilo/Bytes and blocks # (2^(60/50/40/30/20/10/0)) decode_size() { case "$1" in *[eE]) NEWSIZE=$(( ${1%[eE]} * 1152921504606846976 )) ;; *[pP]) NEWSIZE=$(( ${1%[pP]} * 1125899906842624 )) ;; *[tT]) NEWSIZE=$(( ${1%[tT]} * 1099511627776 )) ;; *[gG]) NEWSIZE=$(( ${1%[gG]} * 1073741824 )) ;; *[mM]) NEWSIZE=$(( ${1%[mM]} * 1048576 )) ;; *[kK]) NEWSIZE=$(( ${1%[kK]} * 1024 )) ;; *[bB]) NEWSIZE=${1%[bB]} ;; *) NEWSIZE=$(( $1 * $2 )) ;; esac #NEWBLOCKCOUNT=$(round_block_size $NEWSIZE $2) NEWBLOCKCOUNT=$(( NEWSIZE / $2 )) if [ "$DO_LVRESIZE" -eq 1 ]; then # start lvresize, but first cleanup mounted dirs DO_LVRESIZE=2 cleanup 0 fi } decode_major_minor() { # 0x00000fff00 mask MAJOR # 0xfffff000ff mask MINOR #MINOR=$(( $1 / 1048576 )) #MAJOR=$(( ($1 - ${MINOR} * 1048576) / 256 )) #MINOR=$(( $1 - ${MINOR} * 1048576 - ${MAJOR} * 256 + ${MINOR} * 256)) echo "$(( ( $1 >> 8 ) & 4095 )):$(( ( ( $1 >> 12 ) & 268435200 ) | ( $1 & 255 ) ))" } # detect filesystem on the given device # dereference device name if it is symbolic link detect_fs() { test -n "$VOLUME_ORIG" || VOLUME_ORIG=$1 VOLUME=${1/#"${DM_DEV_DIR}/"/} VOLUME=$("$READLINK" $READLINK_E "$DM_DEV_DIR/$VOLUME") test -n "$VOLUME" || error "Cannot get readlink \"$1\"." RVOLUME=$VOLUME case "$RVOLUME" in # hardcoded /dev since udev does not create these entries elsewhere /dev/dm-[0-9]*) read -r <"/sys/block/${RVOLUME#/dev/}/dm/name" SYSVOLUME 2>&1 && VOLUME="$DM_DEV_DIR/mapper/$SYSVOLUME" read -r <"/sys/block/${RVOLUME#/dev/}/dev" MAJORMINOR 2>&1 || error "Cannot get major:minor for \"$VOLUME\"." MAJOR=${MAJORMINOR%%:*} MINOR=${MAJORMINOR##*:} ;; *) STAT=$(stat --format "MAJOR=\$((0x%t)) MINOR=\$((0x%T))" "$RVOLUME") test -n "$STAT" || error "Cannot get major:minor for \"$VOLUME\"." eval "$STAT" MAJORMINOR="${MAJOR}:${MINOR}" ;; esac # use null device as cache file to be sure about the result # not using option '-o value' to be compatible with older version of blkid FSTYPE=$("$BLKID" -c "$NULL" -s TYPE "$VOLUME") test -n "$FSTYPE" || error "Cannot get FSTYPE of \"$VOLUME\"." FSTYPE=${FSTYPE##*TYPE=\"} # cut quotation marks FSTYPE=${FSTYPE%%\"*} verbose "\"$FSTYPE\" filesystem found on \"$VOLUME\"." } # Check that passed mounted MAJOR:MINOR is not matching $MAJOR:MINOR of resized $VOLUME validate_mounted_major_minor() { test "$1" = "$MAJORMINOR" || { local REFNAME local CURNAME REFNAME=$(dmsetup info -c -j "${1%%:*}" -m "${1##*:}" -o name --noheadings 2>/dev/null) CURNAME=$(dmsetup info -c -j "$MAJOR" -m "$MINOR" -o name --noheadings 2>/dev/null) error "Cannot ${CHECK+CHECK}${RESIZE+RESIZE} device \"$VOLUME\" without umounting filesystem $MOUNTED first." \ "Mounted filesystem is using device $CURNAME, but referenced device is $REFNAME." \ "Filesystem utilities currently do not support renamed devices." } } # ATM fsresize & fsck tools are not able to work properly # when mounted device has changed its name. # So whenever such device no longer exists with original name # abort further command processing check_valid_mounted_device() { local MOUNTEDMAJORMINOR local VOL local CURNAME local SUGGEST="Possibly device \"$1\" has been renamed to \"$CURNAME\"?" VOL=$("$READLINK" $READLINK_E "$1") CURNAME=$(dmsetup info -c -j "$MAJOR" -m "$MINOR" -o name --noheadings) # more confused, device is not DM.... test -n "$CURNAME" || SUGGEST="Mounted volume is not a device mapper device???" test -n "$VOL" || error "Cannot access device \"$1\" referenced by mounted filesystem \"$MOUNTED\"." \ "$SUGGEST" \ "Filesystem utilities currently do not support renamed devices." case "$VOL" in # hardcoded /dev since udev does not create these entries elsewhere /dev/dm-[0-9]*) read -r <"/sys/block/${VOL#/dev/}/dev" MOUNTEDMAJORMINOR 2>&1 || error "Cannot get major:minor for \"$VOLUME\"." ;; *) STAT=$(stat --format "MOUNTEDMAJORMINOR=\$((0x%t)):\$((0x%T))" "$VOL") test -n "$STAT" || error "Cannot get major:minor for \"$VOLUME\"." eval "$STAT" ;; esac validate_mounted_major_minor "$MOUNTEDMAJORMINOR" } detect_mounted_with_proc_self_mountinfo() { # Check self mountinfo # grab major:minor mounted_device mount_point MOUNTED=$("$GREP" "^[0-9]* [0-9]* $MAJORMINOR " "$PROCSELFMOUNTINFO" 2>/dev/null | head -1) # If device is opened and not yet found as self mounted # check all other mountinfos (since it can be mounted in cgroups) # Use 'find' to not fail on to long list of args with too many pids # only 1st. line is needed test -z "$MOUNTED" && test "$(dmsetup info -c --noheading -o open -j "$MAJOR" -m "$MINOR")" -gt 0 && MOUNTED=$(find "$PROCDIR" -maxdepth 2 -name mountinfo -print0 | xargs -0 "$GREP" "^[0-9]* [0-9]* $MAJORMINOR " 2>/dev/null | head -1 2>/dev/null) # TODO: for performance compare with sed and stop with 1st. match: # sed -n "/$MAJORMINOR/ {;p;q;}" # extract 2nd field after ' - ' separator as mouted device MOUNTDEV=$(echo "${MOUNTED##* - }" | cut -d ' ' -f 2) MOUNTDEV=$(echo -n -e "$MOUNTDEV") # extract 5th field as mount point # echo -e translates \040 to spaces MOUNTED=$(echo "$MOUNTED" | cut -d ' ' -f 5) MOUNTED=$(echo -n -e "$MOUNTED") test -n "$MOUNTED" || return 1 # Not seen mounted anywhere check_valid_mounted_device "$MOUNTDEV" } # With older systems without /proc/*/mountinfo we may need to check # every mount point as cannot easily depend on the name of mounted # device (which could have been renamed). # We need to visit every mount point and check it's major minor detect_mounted_with_proc_mounts() { MOUNTED=$("$GREP" "^$VOLUME[ \t]" "$PROCMOUNTS") # for empty string try again with real volume name test -z "$MOUNTED" && MOUNTED=$("$GREP" "^$RVOLUME[ \t]" "$PROCMOUNTS") MOUNTDEV=$(echo -n -e "${MOUNTED%% *}") # cut device name prefix and trim everything past mountpoint # echo translates \040 to spaces MOUNTED=${MOUNTED#* } MOUNTED=$(echo -n -e "${MOUNTED%% *}") # for systems with different device names - check also mount output if test -z "$MOUNTED" ; then # will not work with spaces in paths MOUNTED=$(LC_ALL=C "$MOUNT" | "$GREP" "^$VOLUME[ \t]") test -z "$MOUNTED" && MOUNTED=$(LC_ALL=C "$MOUNT" | "$GREP" "^$RVOLUME[ \t]") MOUNTDEV=${MOUNTED%% on *} MOUNTED=${MOUNTED##* on } MOUNTED=${MOUNTED% type *} # allow type in the mount name fi if test -n "$MOUNTED" ; then check_valid_mounted_device "$MOUNTDEV" return 0 # mounted fi # If still nothing found and volume is in use # check every known mount point against MAJOR:MINOR if test "$(dmsetup info -c --noheading -o open -j "$MAJOR" -m "$MINOR")" -gt 0 ; then while IFS=$'\n' read -r i ; do MOUNTDEV=$(echo -n -e "${i%% *}") MOUNTED=${i#* } MOUNTED=$(echo -n -e "${MOUNTED%% *}") STAT=$(stat --format "%d" "$MOUNTED") validate_mounted_major_minor "$(decode_major_minor "$STAT")" done < "$PROCMOUNTS" fi return 1 # nothing is mounted } # check if the given device is already mounted and where # FIXME: resolve swap usage and device stacking detect_mounted() { if test -e "$PROCSELFMOUNTINFO"; then detect_mounted_with_proc_self_mountinfo elif test -e "$PROCMOUNTS"; then detect_mounted_with_proc_mounts else error "Cannot detect mounted device \"$VOLUME\"." fi } # get the full size of device in bytes detect_device_size() { # check if blockdev supports getsize64 "$BLOCKDEV" --help 2>&1 | "$GREP" getsize64 >"$NULL" if test $? -eq 0; then DEVSIZE=$("$BLOCKDEV" --getsize64 "$VOLUME") test -n "$DEVSIZE" || error "Cannot read size of device \"$VOLUME\"." else DEVSIZE=$("$BLOCKDEV" --getsize "$VOLUME") test -n "$DEVSIZE" || error "Cannot read size of device \"$VOLUME\"." SSSIZE=$("$BLOCKDEV" --getss "$VOLUME") test -n "$SSSIZE" || error "Cannot read sector size of device \"$VOLUME\"." DEVSIZE=$(("$DEVSIZE" * "$SSSIZE")) fi } # round up $1 / $2 # could be needed to gaurantee 'at least given size' # but it makes many troubles round_up_block_size() { echo $(( ($1 + $2 - 1) / $2 )) } temp_mount() { dry "$MKDIR" -p -m 0000 "$TEMPDIR" || error "Failed to create $TEMPDIR." dry "$MOUNT" "$VOLUME" "$TEMPDIR" || error "Failed to mount $TEMPDIR." } temp_umount() { dry "$UMOUNT" "$TEMPDIR" || error "Failed to umount \"$TEMPDIR\"." dry "$RMDIR" "${TEMPDIR}" || error "Failed to remove \"$TEMPDIR\"," dry "$RMDIR" "${TEMPDIR%%m}" || error "Failed to remove \"${TEMPDIR%%m}\"." } yes_no() { echo -n "$@" "? [Y|n] " if [ -n "$YES" ]; then echo y ; return 0 fi while read -r -s -n 1 ANS ; do case "$ANS" in "y" | "Y" ) echo y ; return 0 ;; "n" | "N") break ;; "" ) if [ -t 1 ] ; then echo y ; return 0 fi ;; esac done echo n return 1 } try_umount() { yes_no "Do you want to unmount \"$MOUNTED\"" && dry "$UMOUNT" "$MOUNTED" && return 0 error "Cannot proceed with mounted filesystem \"$MOUNTED\"." } validate_parsing() { if test -z "$BLOCKSIZE" || test -z "$BLOCKCOUNT" ; then error "Cannot parse $1 output." fi } #################################### # Resize ext2/ext3/ext4 filesystem # - unmounted or mounted for upsize # - unmounted for downsize #################################### resize_ext() { local IS_MOUNTED=0 detect_mounted && IS_MOUNTED=1 verbose "Parsing $TUNE_EXT -l \"$VOLUME\"" for i in $(LC_ALL=C "$TUNE_EXT" -l "$VOLUME"); do case "$i" in "Block size"*) BLOCKSIZE=${i##* } ;; "Block count"*) BLOCKCOUNT=${i##* } ;; esac done validate_parsing "$TUNE_EXT" decode_size "$1" "$BLOCKSIZE" FSFORCE=$FORCE if test "$NEWBLOCKCOUNT" -lt "$BLOCKCOUNT" || test "$EXTOFF" -eq 1 ; then test "$IS_MOUNTED" -eq 1 && verbose "$RESIZE_EXT needs unmounted filesystem" && try_umount REMOUNT=$MOUNTED if test -n "$MOUNTED" ; then # Forced fsck -f for umounted extX filesystem. case "$-" in *i*) dry "$FSCK" $YES -f "$VOLUME" ;; *) dry "$FSCK" -f -p "$VOLUME" ;; esac fi fi verbose "Resizing filesystem on device \"$VOLUME\" to $NEWSIZE bytes ($BLOCKCOUNT -> $NEWBLOCKCOUNT blocks of $BLOCKSIZE bytes)" dry "$RESIZE_EXT" $FSFORCE "$VOLUME" "$NEWBLOCKCOUNT" } ############################# # Resize reiserfs filesystem # - unmounted for upsize # - unmounted for downsize ############################# resize_reiser() { detect_mounted && verbose "ReiserFS resizes only unmounted filesystem" && try_umount REMOUNT=$MOUNTED verbose "Parsing $TUNE_REISER \"$VOLUME\"" for i in $(LC_ALL=C "$TUNE_REISER" "$VOLUME"); do case "$i" in "Blocksize"*) BLOCKSIZE=${i##*: } ;; "Count of blocks"*) BLOCKCOUNT=${i##*: } ;; esac done validate_parsing "$TUNE_REISER" decode_size "$1" "$BLOCKSIZE" verbose "Resizing \"$VOLUME\" $BLOCKCOUNT -> $NEWBLOCKCOUNT blocks ($NEWSIZE bytes, bs: $NEWBLOCKCOUNT)" if [ -n "$YES" ]; then echo y | dry "$RESIZE_REISER" -s "$NEWSIZE" "$VOLUME" else dry "$RESIZE_REISER" -s "$NEWSIZE" "$VOLUME" fi } ######################## # Resize XFS filesystem # - mounted for upsize # - cannot downsize ######################## resize_xfs() { detect_mounted MOUNTPOINT=$MOUNTED if [ -z "$MOUNTED" ]; then MOUNTPOINT=$TEMPDIR temp_mount || error "Cannot mount Xfs filesystem." fi verbose "Parsing $TUNE_XFS \"$MOUNTPOINT\"" for i in $(LC_ALL=C "$TUNE_XFS" "$MOUNTPOINT"); do case "$i" in "data"*) BLOCKSIZE=${i##*bsize=} ; BLOCKCOUNT=${i##*blocks=} ;; esac done BLOCKSIZE=${BLOCKSIZE%%[^0-9]*} BLOCKCOUNT=${BLOCKCOUNT%%[^0-9]*} validate_parsing "$TUNE_XFS" decode_size "$1" "$BLOCKSIZE" if [ "$NEWBLOCKCOUNT" -gt "$BLOCKCOUNT" ]; then verbose "Resizing Xfs mounted on \"$MOUNTPOINT\" to fill device \"$VOLUME\"" dry "$RESIZE_XFS" "$MOUNTPOINT" elif [ "$NEWBLOCKCOUNT" -eq "$BLOCKCOUNT" ]; then verbose "Xfs filesystem already has the right size" else error "Xfs filesystem shrinking is unsupported." fi } # Find active LUKS device on original volume # 1) look for LUKS device with well-known UUID format (CRYPT-LUKS[12]--) # 2) the dm-crypt device has to be on top of original device (dont't support detached LUKS headers) detect_luks_device() { local _LUKS_VERSION local _LUKS_UUID CRYPT_NAME="" CRYPT_DATA_OFFSET="" _LUKS_VERSION=$($CRYPTSETUP luksDump $VOLUME 2> /dev/null | $GREP "Version:") if [ -z "$_LUKS_VERSION" ]; then verbose "Failed to parse LUKS version on volume \"$VOLUME\"" return fi _LUKS_VERSION=${_LUKS_VERSION//[Version:[:space:]]/} _LUKS_UUID=$($CRYPTSETUP luksDump $VOLUME 2> /dev/null | $GREP "UUID:") if [ -z "$_LUKS_UUID" ]; then verbose "Failed to parse LUKS UUID on volume \"$VOLUME\"" return fi _LUKS_UUID="CRYPT-LUKS$_LUKS_VERSION-${_LUKS_UUID//[UID:[:space:]-]/}-" CRYPT_NAME=$(dmsetup info -c --noheadings -S "UUID=~^$_LUKS_UUID&&segments=1&&devnos_used='$MAJOR:$MINOR'" -o name) test -z "$CRYPT_NAME" || CRYPT_DATA_OFFSET=$(dmsetup table $CRYPT_NAME | cut -d ' ' -f 8) # LUKS device must be active and mapped over volume where detected if [ -z "$CRYPT_NAME" -o -z "$CRYPT_DATA_OFFSET" ]; then error "Can not find active LUKS device. Unlock \"$VOLUME\" volume first." fi } ###################################### # Resize active LUKS device # - LUKS must be active for fs resize ###################################### resize_luks() { local L_NEWSIZE local L_NEWBLOCKCOUNT local NAME local SHRINK=0 detect_luks_device NAME=$CRYPT_NAME verbose "Found active LUKS device \"$NAME\" for volume \"$VOLUME\"" decode_size "$1" 512 if [ $((NEWSIZE % 512)) -gt 0 ]; then error "New size is not sector alligned" fi if [ $((NEWBLOCKCOUNT - CRYPT_DATA_OFFSET)) -lt 1 ]; then error "New size is smaller than minimum ($(((CRYPT_DATA_OFFSET + 1) * 512)) bytes) for LUKS device $VOLUME" fi L_NEWBLOCKCOUNT=$((NEWBLOCKCOUNT - CRYPT_DATA_OFFSET)) L_NEWSIZE=$(( L_NEWBLOCKCOUNT * 512)) VOLUME="$DM_DEV_DIR/mapper/$NAME" detect_device_size test "$DEVSIZE" -le "$L_NEWSIZE" || SHRINK=1 if [ $SHRINK -eq 1 ]; then # shrink fs on LUKS device first resize "$DM_DEV_DIR/mapper/$NAME" "$L_NEWSIZE"b fi # resize LUKS device dry $CRYPTSETUP resize $NAME --size $L_NEWBLOCKCOUNT || error "Failed to resize active LUKS device" if [ $SHRINK -eq 0 ]; then # grow fs on top of LUKS device resize "$DM_DEV_DIR/mapper/$NAME" "$L_NEWSIZE"b fi } detect_crypt_device() { local CRYPT_TYPE local L_NEWSIZE local TMP which $CRYPTSETUP > /dev/null 2>&1 || error "$CRYPTSETUP utility required to resize crypt device" CRYPT_TYPE=$($CRYPTSETUP status $1 2> /dev/null | $GREP "type:") test -n "$CRYPT_TYPE" || error "$CRYPTSETUP failed to detect device type on $1." CRYPT_TYPE=${CRYPT_TYPE##*[[:space:]]} case "$CRYPT_TYPE" in LUKS[12]|PLAIN) verbose "\"$1\" crypt device is type $CRYPT_TYPE" ;; *) error "Unsupported crypt type \"$CRYPT_TYPE\"" esac TMP=$NEWSIZE decode_size "$2" 512 L_NEWSIZE=$NEWSIZE NEWSIZE=$TMP if [ $((L_NEWSIZE % 512)) -ne 0 ]; then error "New size is not sector alligned" fi CRYPT_RESIZE_BLOCKS=$NEWBLOCKCOUNT if [ "$DEVSIZE" -ge "$L_NEWSIZE" ]; then CRYPT_SHRINK=1 else CRYPT_GROW=1 fi } ################################# # Resize active crypt device # (on direct user request only) ################################# resize_crypt() { dry $CRYPTSETUP resize "$1" --size $CRYPT_RESIZE_BLOCKS || error "$CRYPTSETUP failed to resize device $1" } #################### # Resize filesystem #################### resize() { NEWSIZE=$2 detect_fs "$1" detect_device_size verbose "Device \"$VOLUME\" size is $DEVSIZE bytes" # if the size parameter is missing use device size #if [ -n "$NEWSIZE" -a $NEWSIZE < test -z "$NEWSIZE" && NEWSIZE=${DEVSIZE}b test -n "$NEWSIZE_ORIG" || NEWSIZE_ORIG=$NEWSIZE IFS=$NL test -z "$DO_CRYPTRESIZE" || detect_crypt_device "$VOLUME_ORIG" "$NEWSIZE_ORIG" test -z "$CRYPT_GROW" || resize_crypt "$VOLUME_ORIG" case "$FSTYPE" in "ext3"|"ext2"|"ext4") resize_ext $NEWSIZE ;; "reiserfs") resize_reiser $NEWSIZE ;; "xfs") resize_xfs $NEWSIZE ;; "crypto_LUKS") which $CRYPTSETUP > /dev/null 2>&1 || error "$CRYPTSETUP utility required to resize LUKS volume" resize_luks $NEWSIZE ;; *) error "Filesystem \"$FSTYPE\" on device \"$VOLUME\" is not supported by this tool." ;; esac || error "Resize $FSTYPE failed." test -z "$CRYPT_SHRINK" || resize_crypt "$VOLUME_ORIG" } #################################### # Calclulate diff between two dates # LC_ALL=C input is expected the # only one supported #################################### diff_dates() { echo $(( $("$DATE" -u -d"$1" +%s 2>"$NULL") - $("$DATE" -u -d"$2" +%s 2>"$NULL") )) } check_luks() { detect_luks_device check "$DM_DEV_DIR/mapper/$CRYPT_NAME" } ################### # Check filesystem ################### check() { detect_fs "$1" if detect_mounted ; then verbose "Skipping filesystem check for device \"$VOLUME\" as the filesystem is mounted on $MOUNTED"; cleanup 3 fi case "$FSTYPE" in "ext2"|"ext3"|"ext4") IFS_CHECK=$IFS IFS=$NL for i in $(LC_ALL=C "$TUNE_EXT" -l "$VOLUME"); do case "$i" in "Last mount"*) LASTMOUNT=${i##*: } ;; "Last checked"*) LASTCHECKED=${i##*: } ;; esac done case "$LASTMOUNT" in *"n/a") ;; # nothing to do - system was not mounted yet *) LASTDIFF=$(diff_dates "$LASTMOUNT" "$LASTCHECKED") if test "$LASTDIFF" -gt 0 ; then verbose "Filesystem has not been checked after the last mount, using fsck -f" FORCE="-f" fi ;; esac IFS=$IFS_CHECK esac case "$FSTYPE" in "xfs") if which "$XFS_CHECK" >"$NULL" 2>&1 ; then dry "$XFS_CHECK" "$VOLUME" else # Replacement for outdated xfs_check # FIXME: for small devices we need to force_geometry, # since we run in '-n' mode, it shouldn't be problem. # Think about better way.... dry "$XFS_REPAIR" -n -o force_geometry "$VOLUME" fi ;; "ext2"|"ext3"|"ext4"|"reiserfs") # check if executed from interactive shell environment case "$-" in *i*) dry "$FSCK" $YES $FORCE "$VOLUME" ;; *) dry "$FSCK" $FORCE -p "$VOLUME" ;; esac ;; "crypto_LUKS") which $CRYPTSETUP > /dev/null 2>&1 || error "$CRYPTSETUP utility required." check_luks ;; *) error "Filesystem \"$FSTYPE\" on device \"$VOLUME\" is not supported by this tool." ;; esac } ############################# # start point of this script # - parsing parameters ############################# trap "cleanup 2" 2 # test if we are not invoked recursively test -n "$FSADM_RUNNING" && exit 0 # test some prerequisities for i in "$TUNE_EXT" "$RESIZE_EXT" "$TUNE_REISER" "$RESIZE_REISER" \ "$TUNE_XFS" "$RESIZE_XFS" "$MOUNT" "$UMOUNT" "$MKDIR" \ "$RMDIR" "$BLOCKDEV" "$BLKID" "$GREP" "$READLINK" \ "$DATE" "$FSCK" "$XFS_CHECK" "$XFS_REPAIR" "$LVM" ; do test -n "$i" || error "Required command definitions in the script are missing!" done "$LVM" version >"$NULL" 2>&1 || error "Could not run lvm binary \"$LVM\"." "$READLINK" -e / >"$NULL" 2>&1 || READLINK_E="-f" TEST64BIT=$(( 1000 * 1000000000000 )) test "$TEST64BIT" -eq 1000000000000000 || error "Shell does not handle 64bit arithmetic." echo Y | "$GREP" Y >"$NULL" || error "Grep does not work properly." test "$("$DATE" -u -d"Jan 01 00:00:01 1970" +%s)" -eq 1 || error "Date translation does not work." if [ "$#" -eq 0 ] ; then tool_usage fi while [ "$#" -ne 0 ] do case "$1" in "") ;; "-h"|"--help") tool_usage ;; "-v"|"--verbose") VERB="-v" ;; "-n"|"--dry-run") DRY=1 ;; "-f"|"--force") FORCE="-f" ;; "-e"|"--ext-offline") EXTOFF=1 ;; "-y"|"--yes") YES="-y" ;; "-l"|"--lvresize") DO_LVRESIZE=1 ;; "-c"|"--cryptresize") DO_CRYPTRESIZE=1 ;; "check") CHECK=$2 ; shift ;; "resize") RESIZE=$2 ; NEWSIZE=$3 ; shift 2 ;; *) error "Wrong argument \"$1\". (see: $TOOL --help)" esac shift done test "$YES" = "-y" || YES="" test "$EXTOFF" -eq 1 || EXTOFF=0 if [ -n "$CHECK" ]; then check "$CHECK" elif [ -n "$RESIZE" ]; then export FSADM_RUNNING="fsadm" resize "$RESIZE" "$NEWSIZE" cleanup 0 else error "Missing command. (see: $TOOL --help)" fi LVM2.2.02.176/scripts/lvm2_lvmlocking_systemd_red_hat.service.in0000644000000000000120000000127013176752421023345 0ustar rootwheel[Unit] Description=Availability of lockspaces in lvmlockd Documentation=man:lvmlockd(8) After=lvm2-lvmlockd.service sanlock.service dlm.service [Service] Type=oneshot RemainAfterExit=yes # start lockspaces and wait for them to finish starting ExecStart=@SBINDIR@/lvm vgchange --lock-start --lock-opt autowait # auto activate LVs in the newly started lockd VGs ExecStart=@SBINDIR@/lvm vgchange -aay -S 'locktype=sanlock || locktype=dlm' # deactivate LVs in lockd VGs ExecStop=@SBINDIR@/lvm vgchange -an -S 'locktype=sanlock || locktype=dlm' # stop lockspaces and wait for them to finish stopping ExecStop=@SBINDIR@/lvmlockctl --stop-lockspaces --wait 1 [Install] WantedBy=multi-user.target LVM2.2.02.176/scripts/dm_event_systemd_red_hat.service.in0000644000000000000120000000053213176752421022041 0ustar rootwheel[Unit] Description=Device-mapper event daemon Documentation=man:dmeventd(8) Requires=dm-event.socket After=dm-event.socket Before=local-fs-pre.target shutdown.target Conflicts=shutdown.target DefaultDependencies=no [Service] Type=simple ExecStart=@SBINDIR@/dmeventd -f Environment=SD_ACTIVATION=1 PIDFile=@DMEVENTD_PIDFILE@ OOMScoreAdjust=-1000 LVM2.2.02.176/scripts/lvm2_tmpfiles_red_hat.conf.in0000644000000000000120000000011313176752421020533 0ustar rootwheeld @DEFAULT_LOCK_DIR@ 0700 root root - d @DEFAULT_RUN_DIR@ 0700 root root - LVM2.2.02.176/scripts/blkdeactivate.sh.in0000644000000000000120000003477013176752421016571 0ustar rootwheel#!/bin/bash # # Copyright (C) 2012-2017 Red Hat, Inc. All rights reserved. # # This file is part of LVM2. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # Author: Peter Rajnoha # # Script for deactivating block devices # # Requires: # bash >= 4.0 (associative array support) # util-linux { # lsblk >= 2.22 (lsblk -s support) # umount # } # dmsetup >= 1.02.68 (--retry option support) # lvm >= 2.2.89 (activation/retry_deactivation config support) # #set -x shopt -s dotglob nullglob TOOL=blkdeactivate sbindir="@SBINDIR@" DEV_DIR="/dev" SYS_BLK_DIR="/sys/block" MOUNTPOINT="/bin/mountpoint" UMOUNT="/bin/umount" DMSETUP="$sbindir/dmsetup" LVM="$sbindir/lvm" MDADM="$sbindir/mdadm" MPATHD="/sbin/multipathd" if "$UMOUNT" --help | grep -- "--all-targets" >"$DEV_DIR/null"; then UMOUNT_OPTS="--all-targets " else UMOUNT_OPTS="" FINDMNT="/bin/findmnt -r --noheadings -u -o TARGET" FINDMNT_READ="read -r mnt" fi DMSETUP_OPTS="" LVM_OPTS="" MDADM_OPTS="" MPATHD_OPTS="" LSBLK="/bin/lsblk -r --noheadings -o TYPE,KNAME,NAME,MOUNTPOINT" LSBLK_VARS="local devtype local kname local name local mnt" LSBLK_READ="read -r devtype kname name mnt" SORT_MNT="/bin/sort -r -u -k 4" # Do not show tool errors by default (only done/skipping summary # message provided by this script) and no verbose mode by default. ERRORS=0 VERBOSE=0 # Do not unmount mounted devices by default. DO_UMOUNT=0 # Deactivate each LV separately by default (not the whole VG). LVM_DO_WHOLE_VG=0 # Do not retry LV deactivation by default. LVM_CONFIG="activation{retry_deactivation=0}" # Do not wait for MD RAID device resync, recovery or reshape. MDRAID_DO_WAIT=0 # Do not disable queueing if set on multipath devices. MPATHD_DO_DISABLEQUEUEING=0 # # List of device names and/or VGs to be skipped. # Device name is the KNAME from lsblk output. # # If deactivation of any device fails, it's automatically # added to the SKIP_DEVICE_LIST (also a particular VG # added to the SKIP_VG_LIST for a device that is an LV). # # These lists provide device tree pruning to skip # particular device/VG deactivation that failed already. # (lists are associative arrays!) # declare -A SKIP_DEVICE_LIST=() declare -A SKIP_VG_LIST=() # # List of mountpoints to be skipped. Any device that is mounted on the mountpoint # listed here will be added to SKIP_DEVICE_LIST (and SKIP_VG_LIST) automatically. # (list is an associative array!) # declare -A SKIP_UMOUNT_LIST=(["/"]=1 \ ["/lib"]=1 ["/lib64"]=1 \ ["/bin"]=1 ["/sbin"]=1 \ ["/var"]=1 ["/var/log"]=1 \ ["/usr"]=1 \ ["/usr/lib"]=1 ["/usr/lib64"]=1 \ ["/usr/sbin"]=1 ["/usr/bin"]=1) # Bash can't properly handle '[' and ']' used as a subscript # within the '()'initialization - it needs to be done separately! SKIP_UMOUNT_LIST["[SWAP]"]=1 usage() { echo "${TOOL}: Utility to deactivate block devices" echo echo " ${TOOL} [options] [device...]" echo " - Deactivate block device tree." echo " If devices are specified, deactivate only supplied devices and their holders." echo echo " Options:" echo " -e | --errors Show errors reported from tools" echo " -h | --help Show this help message" echo " -d | --dmoptions DM_OPTIONS Comma separated DM specific options" echo " -l | --lvmoptions LVM_OPTIONS Comma separated LVM specific options" echo " -m | --mpathoptions MPATH_OPTIONS Comma separated DM-multipath specific options" echo " -r | --mdraidoptions MDRAID_OPTIONS Comma separated MD RAID specific options" echo " -u | --umount Unmount the device if mounted" echo " -v | --verbose Verbose mode (also implies -e)" echo echo " Device specific options:" echo " DM_OPTIONS:" echo " retry retry removal several times in case of failure" echo " force force device removal" echo " LVM_OPTIONS:" echo " retry retry removal several times in case of failure" echo " wholevg deactivate the whole VG when processing an LV" echo " MDRAID_OPTIONS:" echo " wait wait for resync, recovery or reshape to complete first" echo " MPATH_OPTIONS:" echo " disablequeueing disable queueing on all DM-multipath devices first" exit } add_device_to_skip_list() { SKIP_DEVICE_LIST+=(["$kname"]=1) return 1 } add_vg_to_skip_list() { SKIP_VG_LIST+=(["$DM_VG_NAME"]=1) return 1 } is_top_level_device() { # top level devices do not have any holders, that is # the SYS_BLK_DIR//holders dir is empty files=$(echo "$SYS_BLK_DIR/$kname/holders/"*) test -z "$files" } device_umount_one() { test -z "$mnt" && return 0 if test -z "${SKIP_UMOUNT_LIST["$mnt"]}" -a "$DO_UMOUNT" -eq "1"; then echo -n " [UMOUNT]: unmounting $name ($kname) mounted on $mnt... " if eval "$UMOUNT" $UMOUNT_OPTS "$(printf "%s" "$mnt")" "$OUT" "$ERR"; then echo "done" elif "$MOUNTPOINT" -q "$mnt"; then echo "skipping" add_device_to_skip_list else echo "already unmounted" fi else echo " [SKIP]: unmount of $name ($kname) mounted on $mnt" add_device_to_skip_list fi } device_umount() { test "$devtype" != "lvm" && test "${kname:0:3}" != "dm-" \ && test "${kname:0:2}" != "md" && return 0 # FINDMNT is defined only if umount --all-targets is not available. # In that case, read the list of multiple mount points of one device # using FINDMNT and unmount it one by one manually. if test -z "$FINDMNT"; then device_umount_one else while $FINDMNT_READ; do device_umount_one || return 1 done <<< "$($FINDMNT "$DEV_DIR/$kname")" fi } deactivate_holders () { local skip=1; $LSBLK_VARS # Get holders for the device - either a mount or another device. # First line on the lsblk output is the device itself - skip it for # the deactivate call as this device is already being deactivated. while $LSBLK_READ; do test -e "$SYS_BLK_DIR/$kname" || continue # check if the device not on the skip list already test -z "${SKIP_DEVICE_LIST["$kname"]}" || return 1 # try to deactivate the holder test "$skip" -eq 1 && skip=0 && continue deactivate || return 1 done <<< "$($LSBLK "$1")" } deactivate_dm () { local xname xname=$(printf "%s" "$name") test -b "$DEV_DIR/mapper/$xname" || return 0 test -z "${SKIP_DEVICE_LIST["$kname"]}" || return 1 deactivate_holders "$DEV_DIR/mapper/$xname" || return 1 echo -n " [DM]: deactivating $devtype device $xname ($kname)... " if eval "$DMSETUP" $DMSETUP_OPTS remove "$xname" "$OUT" "$ERR"; then echo "done" else echo "skipping" add_device_to_skip_list fi } deactivate_lvm () { local DM_VG_NAME; local DM_LV_NAME eval "$(eval "$DMSETUP" splitname --nameprefixes --noheadings --rows "$name" LVM "$ERR")" test -b "$DEV_DIR/$DM_VG_NAME/$DM_LV_NAME" || return 0 test -z "${SKIP_VG_LIST["$DM_VG_NAME"]}" || return 1 if test "$LVM_DO_WHOLE_VG" -eq 0; then # Skip LVM device deactivation if LVM tools missing. test "$LVM_AVAILABLE" -eq 0 && { add_device_to_skip_list return 1 } # Deactivating only the LV specified deactivate_holders "$DEV_DIR/$DM_VG_NAME/$DM_LV_NAME" || { add_device_to_skip_list return 1 } echo -n " [LVM]: deactivating Logical Volume $DM_VG_NAME/$DM_LV_NAME... " if eval "$LVM" lvchange $LVM_OPTS --config \'log\{prefix=\"\"\} $LVM_CONFIG\' -aln "$DM_VG_NAME/$DM_LV_NAME" "$OUT" "$ERR"; then echo "done" else echo "skipping" add_device_to_skip_list fi else # Skip LVM VG deactivation if LVM tools missing. test "$LVM_AVAILABLE" -eq 0 && { add_vg_to_skip_list return 1 } # Deactivating the whole VG the LV is part of lv_list=$(eval "$LVM" vgs --config "$LVM_CONFIG" --noheadings --rows -o lv_name "$DM_VG_NAME" "$ERR") for lv in $lv_list; do test -b "$DEV_DIR/$DM_VG_NAME/$lv" || continue deactivate_holders "$DEV_DIR/$DM_VG_NAME/$lv" || { add_vg_to_skip_list return 1 } done echo -n " [LVM]: deactivating Volume Group $DM_VG_NAME... " if eval "$LVM" vgchange $LVM_OPTS --config \'log\{prefix=\" \"\} $LVM_CONFIG\' -aln "$DM_VG_NAME" "$OUT" "$ERR"; then echo "done" else echo "skipping" add_vg_to_skip_list fi fi } deactivate_md () { local xname xname=$(printf "%s" "$name") local sync_action test -b "$DEV_DIR/$xname" || return 0 test -z "${SKIP_DEVICE_LIST["$kname"]}" || return 1 # Skip MD device deactivation if MD tools missing. test "$MDADM_AVAILABLE" -eq 0 && { add_device_to_skip_list return 1 } deactivate_holders "$DEV_DIR/$xname" || return 1 echo -n " [MD]: deactivating $devtype device $kname... " test "$MDRAID_DO_WAIT" -eq 1 && { sync_action=$(cat "$SYS_BLK_DIR/$kname/md/sync_action") test "$sync_action" != "idle" && { echo -n "$sync_action action in progress... " if eval "$MDADM" $MDADM_OPTS -W "$DEV_DIR/$kname" "$OUT" "$ERR"; then echo -n "complete... " else test $? -ne 1 && echo -n "failed to wait for $sync_action action... " fi } } if eval "$MDADM" $MDADM_OPTS -S "$xname" "$OUT" "$ERR"; then echo "done" else echo "skipping" add_device_to_skip_list fi } deactivate () { ###################################################################### # DEACTIVATION HOOKS FOR NEW DEVICE TYPES GO HERE! # # # # Identify a new device type either by inspecting the TYPE provided # # by lsblk directly ($devtype) or by any other mean that is suitable # # e.g. the KNAME provided by lsblk ($kname). See $LSBLK_VARS for # # complete list of variables that may be used. Then call a # # device-specific deactivation function that handles the exact type. # # # # This device-specific function will certainly need to call # # deactivate_holders first to recursively deactivate any existing # # holders it might have before deactivating the device it processes. # ###################################################################### if test "$devtype" = "lvm"; then deactivate_lvm elif test "${kname:0:3}" = "dm-"; then deactivate_dm elif test "${kname:0:2}" = "md"; then deactivate_md fi } deactivate_all() { $LSBLK_VARS skip=0 echo "Deactivating block devices:" test "$MPATHD_RUNNING" -eq 1 && { echo -n " [DM]: disabling queueing on all multipath devices... " eval "$MPATHD" $MPATHD_OPTS disablequeueing maps "$ERR" | grep '^ok$' >"$DEV_DIR/null" && echo "done" || echo "failed" } if test $# -eq 0; then ####################### # Process all devices # ####################### # Unmount all relevant mountpoints first while $LSBLK_READ; do device_umount done <<< "$($LSBLK | $SORT_MNT)" # Do deactivate while $LSBLK_READ; do # 'disk' is at the bottom already and it's a real device test "$devtype" = "disk" && continue # if deactivation of any device fails, skip processing # any subsequent devices within its subtree as the # top-level device could not be deactivated anyway test "$skip" -eq 1 && { # reset 'skip' on top level device if is_top_level_device ; then skip=0 else continue fi } # check if the device is not on the skip list already test -z "${SKIP_DEVICE_LIST["$kname"]}" || continue # try to deactivate top-level device, set 'skip=1' # if it fails to do so - this will cause all the # device's subtree to be skipped when processing # devices further in this loop deactivate || skip=1 done <<< "$($LSBLK -s)" else ################################## # Process only specified devices # ################################## while test $# -ne 0; do # Unmount all relevant mountpoints first while $LSBLK_READ; do device_umount done <<< "$($LSBLK "$1" | $SORT_MNT)" # Do deactivate # Single dm device tree deactivation. if test -b "$1"; then $LSBLK_READ <<< "$($LSBLK --nodeps "$1")" # check if the device is not on the skip list already test -z "${SKIP_DEVICE_LIST["$kname"]}" || { shift continue } deactivate else echo "$1: device not found" return 1 fi shift done; fi } get_dmopts() { ORIG_IFS=$IFS; IFS=',' for opt in $1; do case $opt in "") ;; "retry") DMSETUP_OPTS+="--retry " ;; "force") DMSETUP_OPTS+="--force " ;; *) echo "$opt: unknown DM option" esac done IFS=$ORIG_IFS } get_lvmopts() { ORIG_IFS=$IFS; IFS=',' for opt in $1; do case "$opt" in "") ;; "retry") LVM_CONFIG="activation{retry_deactivation=1}" ;; "wholevg") LVM_DO_WHOLE_VG=1 ;; *) echo "$opt: unknown LVM option" esac done IFS=$ORIG_IFS } get_mdraidopts() { ORIG_IFS=$IFS; IFS=',' for opt in $1; do case "$opt" in "") ;; "wait") MDRAID_DO_WAIT=1 ;; *) echo "$opt: unknown MD RAID option" esac done IFS=$ORIG_IFS } get_mpathopts() { ORIG_IFS=$IFS; IFS=',' for opt in $1; do case "$opt" in "") ;; "disablequeueing") MPATHD_DO_DISABLEQUEUEING=1 ;; *) echo "$opt: unknown DM-multipath option" esac done IFS=$ORIG_IFS } set_env() { if test "$ERRORS" -eq "1"; then unset ERR else ERR="2>$DEV_DIR/null" fi if test "$VERBOSE" -eq "1"; then unset OUT UMOUNT_OPTS+="-v" DMSETUP_OPTS+="-vvvv" LVM_OPTS+="-vvvv" MDADM_OPTS+="-vv" MPATHD_OPTS+="-v 3" else OUT="1>$DEV_DIR/null" fi if test -f "$LVM"; then LVM_AVAILABLE=1 else LVM_AVAILABLE=0 fi if test -f $MDADM; then MDADM_AVAILABLE=1 else MDADM_AVAILABLE=0 fi MPATHD_RUNNING=0 test "$MPATHD_DO_DISABLEQUEUEING" -eq 1 && { if test -f "$MPATHD"; then if eval "$MPATHD" show daemon "$ERR" | grep "running" >"$DEV_DIR/null"; then MPATHD_RUNNING=1 fi fi } } while test $# -ne 0; do case "$1" in "") ;; "-e"|"--errors") ERRORS=1 ;; "-h"|"--help") usage ;; "-d"|"--dmoptions") get_dmopts "$2" ; shift ;; "-l"|"--lvmoptions") get_lvmopts "$2" ; shift ;; "-m"|"--mpathoptions") get_mpathopts "$2" ; shift ;; "-r"|"--mdraidoptions") get_mdraidopts "$2"; shift ;; "-u"|"--umount") DO_UMOUNT=1 ;; "-v"|"--verbose") VERBOSE=1 ; ERRORS=1 ;; "-vv") VERBOSE=1 ; ERRORS=1 ; set -x ;; *) break ;; esac shift done set_env deactivate_all "$@" LVM2.2.02.176/scripts/com.redhat.lvmdbus1.conf0000644000000000000120000000072213176752421017443 0ustar rootwheel LVM2.2.02.176/scripts/lvm2_lvmlockd_systemd_red_hat.service.in0000644000000000000120000000042213176752421023011 0ustar rootwheel[Unit] Description=LVM2 lock daemon Documentation=man:lvmlockd(8) After=lvm2-lvmetad.service [Service] Type=simple NonBlocking=true ExecStart=@SBINDIR@/lvmlockd -f Environment=SD_ACTIVATION=1 PIDFile=@LVMLOCKD_PIDFILE@ SendSIGKILL=no [Install] WantedBy=multi-user.target LVM2.2.02.176/scripts/lvmconf.sh0000644000000000000120000003105713176752421015021 0ustar rootwheel#!/bin/bash # # Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved. # # This file is part of the lvm2 package. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # Edit an lvm.conf file to adjust various properties # # cluster with clvmd and/or locking lib? HANDLE_CLUSTER=0 # cluster without clvmd? HANDLE_HALVM=0 # also enable services appropriately (lvmetad, clvmd)? HANDLE_SERVICES=0 # also enable cmirrord service in addition? HANDLE_MIRROR_SERVICE=0 # also start/start services in addition to enabling/disabling them? START_STOP_SERVICES=0 function usage { echo "Usage: $0 " echo "" echo "Commands:" echo "Enable clvm: --enable-cluster [--lockinglibdir ] [--lockinglib ]" echo "Disable clvm: --disable-cluster" echo "Enable halvm: --enable-halvm" echo "Disable halvm: --disable-halvm" echo "Set locking library: --lockinglibdir [--lockinglib ]" echo "" echo "Global options:" echo "Config file location: --file " echo "Set services: --services [--mirrorservice] [--startstopservices]" echo "" echo "Use the separate command 'lvmconfig' to display configuration information" } function set_default_use_lvmetad_var { eval "$(lvm dumpconfig --type default global/use_lvmetad 2>/dev/null)" if [ "$?" != 0 ]; then USE_LVMETAD=0 else USE_LVMETAD=$use_lvmetad fi } function parse_args { while [ -n "$1" ]; do case "$1" in --enable-cluster) LOCKING_TYPE=3 USE_LVMETAD=0 HANDLE_CLUSTER=1 shift ;; --disable-cluster) LOCKING_TYPE=1 set_default_use_lvmetad_var HANDLE_CLUSTER=1 shift ;; --enable-halvm) LOCKING_TYPE=1 USE_LVMETAD=0 HANDLE_HALVM=1 shift ;; --disable-halvm) LOCKING_TYPE=1 set_default_use_lvmetad_var HANDLE_HALVM=1 shift ;; --lockinglibdir) if [ -n "$2" ]; then LOCKINGLIBDIR=$2 shift 2 else usage exit 1 fi HANDLE_CLUSTER=1 ;; --lockinglib) if [ -n "$2" ]; then LOCKINGLIB=$2 shift 2 else usage exit 1 fi HANDLE_CLUSTER=1 ;; --file) if [ -n "$2" ]; then CONFIGFILE=$2 shift 2 else usage exit 1 fi ;; --services) HANDLE_SERVICES=1 shift ;; --mirrorservice) HANDLE_MIRROR_SERVICE=1 shift ;; --startstopservices) START_STOP_SERVICES=1 shift ;; *) usage exit 1 esac done if [ -n "$LOCKINGLIBDIR" ] || [ -n "$LOCKINGLIB" ]; then LOCKING_TYPE=2 USE_LVMETAD=0 fi } function validate_args { [ -z "$CONFIGFILE" ] && CONFIGFILE="/etc/lvm/lvm.conf" if [ ! -f "$CONFIGFILE" ] then echo "$CONFIGFILE does not exist" exit 10 fi if [ "$HANDLE_CLUSTER" = 1 ] && [ "$HANDLE_HALVM" = 1 ]; then echo "Either HA LVM or cluster method may be used at one time" exit 18 fi if [ "$HANDLE_SERVICES" = 0 ]; then if [ "$HANDLE_MIRROR_SERVICE" = 1 ]; then echo "--mirrorservice may be used only with --services" exit 19 fi if [ "$START_STOP_SERVICES" = 1 ]; then echo "--startstopservices may be used only with --services" exit 19 fi fi if [ -z "$LOCKING_TYPE" ] && [ -z "$LOCKINGLIBDIR" ]; then usage exit 1 fi if [ -n "$LOCKINGLIBDIR" ]; then if [ "${LOCKINGLIBDIR:0:1}" != "/" ] then echo "Prefix must be an absolute path name (starting with a /)" exit 12 fi if [ -n "$LOCKINGLIB" ] && [ ! -f "$LOCKINGLIBDIR/$LOCKINGLIB" ] then echo "$LOCKINGLIBDIR/$LOCKINGLIB does not exist, did you do a \"make install\" ?" exit 11 fi fi if [ "$LOCKING_TYPE" = 1 ] ; then if [ -n "$LOCKINGLIBDIR" ] || [ -n "$LOCKINGLIB" ]; then echo "Superfluous locking lib parameter, ignoring" fi fi } umask 0077 parse_args "$@" validate_args SCRIPTFILE=/etc/lvm/.lvmconf-script.tmp TMPFILE=/etc/lvm/.lvmconf-tmp.tmp # Flags so we know which parts of the file we can replace and which need # adding. These are return codes from grep, so zero means it IS present! have_type=1 have_dir=1 have_library=1 have_use_lvmetad=1 have_global=1 grep -q '^[[:blank:]]*locking_type[[:blank:]]*=' "$CONFIGFILE" have_type=$? grep -q '^[[:blank:]]*library_dir[[:blank:]]*=' "$CONFIGFILE" have_dir=$? grep -q '^[[:blank:]]*locking_library[[:blank:]]*=' "$CONFIGFILE" have_library=$? grep -q '^[[:blank:]]*use_lvmetad[[:blank:]]*=' "$CONFIGFILE" have_use_lvmetad=$? # Those options are in section "global {" so we must have one if any are present. if [ "$have_type" = 0 ] || [ "$have_dir" = 0 ] || [ "$have_library" = 0 ] || [ "$have_use_lvmetad" = 0 ] then # See if we can find it... grep -q '^[[:blank:]]*global[[:blank:]]*{' $CONFIGFILE have_global=$? if [ "$have_global" = 1 ] then echo "global keys but no 'global {' found, can't edit file" exit 13 fi fi if [ "$LOCKING_TYPE" = 2 ] && [ -z "$LOCKINGLIBDIR" ] && [ "$have_dir" = 1 ]; then echo "no library_dir specified in $CONFIGFILE" exit 16 fi # So if we don't have "global {" we need to create one and # populate it if [ "$have_global" = 1 ] then if [ -z "$LOCKING_TYPE" ]; then LOCKING_TYPE=1 fi if [ "$LOCKING_TYPE" = 3 ] || [ "$LOCKING_TYPE" = 2 ]; then cat "$CONFIGFILE" - < "$TMPFILE" global { # Enable locking for cluster LVM locking_type = $LOCKING_TYPE library_dir = "$LOCKINGLIBDIR" # Disable lvmetad in cluster use_lvmetad = 0 EOF if [ $? != 0 ] then echo "failed to create temporary config file, $CONFIGFILE not updated" exit 14 fi if [ -n "$LOCKINGLIB" ]; then cat - <> "$TMPFILE" locking_library = "$LOCKINGLIB" EOF if [ $? != 0 ] then echo "failed to create temporary config file, $CONFIGFILE not updated" exit 16 fi fi cat - <> "$TMPFILE" } EOF fi # if we aren't setting cluster locking, we don't need to create a global section if [ $? != 0 ] then echo "failed to create temporary config file, $CONFIGFILE not updated" exit 17 fi else # # We have a "global {" section, so add or replace the # locking entries as appropriate # if [ -n "$LOCKING_TYPE" ]; then if [ "$have_type" = 0 ] then SEDCMD=" s/^[[:blank:]]*locking_type[[:blank:]]*=.*/\ \ \ \ locking_type = $LOCKING_TYPE/g" else SEDCMD=" /global[[:blank:]]*{/a\ \ \ \ locking_type = $LOCKING_TYPE" fi fi if [ -n "$LOCKINGLIBDIR" ]; then if [ "$have_dir" = 0 ] then SEDCMD="${SEDCMD}\ns'^[[:blank:]]*library_dir[[:blank:]]*=.*'\ \ \ \ library_dir = \"$LOCKINGLIBDIR\"'g" else SEDCMD="${SEDCMD}\n/global[[:blank:]]*{/a\ \ \ \ library_dir = \"$LOCKINGLIBDIR\"" fi fi if [ -n "$LOCKINGLIB" ]; then if [ "$have_library" = 0 ] then SEDCMD="${SEDCMD}\ns/^[[:blank:]]*locking_library[[:blank:]]*=.*/\ \ \ \ locking_library = \"$LOCKINGLIB\"/g" else SEDCMD="${SEDCMD}\n/global[[:blank:]]*{/a\ \ \ \ locking_library = \"$LOCKINGLIB\"" fi fi if [ "$have_use_lvmetad" = 0 ] then SEDCMD="${SEDCMD}\ns'^[[:blank:]]*use_lvmetad[[:blank:]]*=.*'\ \ \ \ use_lvmetad = $USE_LVMETAD'g" else SEDCMD="${SEDCMD}\n/global[[:blank:]]*{/a\ \ \ \ use_lvmetad = $USE_LVMETAD" fi echo -e "$SEDCMD" > "$SCRIPTFILE" sed <"$CONFIGFILE" >"$TMPFILE" -f "$SCRIPTFILE" if [ $? != 0 ] then echo "sed failed, $CONFIGFILE not updated" exit 15 fi fi # Now we have a suitably editted config file in a temp place, # backup the original and copy our new one into place. cp "$CONFIGFILE" "$CONFIGFILE.lvmconfold" if [ $? != 0 ] then echo "failed to backup old config file, $CONFIGFILE not updated" exit 2 fi cp "$TMPFILE" "$CONFIGFILE" if [ $? != 0 ] then echo "failed to copy new config file into place, check $CONFIGFILE is still OK" exit 3 fi rm -f "$SCRIPTFILE" "$TMPFILE" function set_service { local type=$1 local action=$2 shift 2 if [ "$type" = "systemd" ]; then if [ "$action" = "activate" ]; then for i in "$@"; do unset LoadState eval "$($SYSTEMCTL_BIN show "$i" -p LoadState 2>/dev/null)" test "$LoadState" = "loaded" || continue $SYSTEMCTL_BIN enable "$i" if [ "$START_STOP_SERVICES" = 1 ]; then $SYSTEMCTL_BIN start "$i" fi done elif [ "$action" = "deactivate" ]; then for i in "$@"; do unset LoadState eval "$($SYSTEMCTL_BIN show "$i" -p LoadState 2>/dev/null)" test "$LoadState" = "loaded" || continue "$SYSTEMCTL_BIN" disable "$i" if [ "$START_STOP_SERVICES" = 1 ]; then "$SYSTEMCTL_BIN" stop "$i" fi done fi elif [ "$type" = "sysv" ]; then if [ "$action" = "activate" ]; then for i in "$@"; do "$CHKCONFIG_BIN" --list "$i" > /dev/null || continue "$CHKCONFIG_BIN" "$i" on if [ "$START_STOP_SERVICES" = 1 ]; then "$SERVICE_BIN" "$i" start fi done elif [ "$action" = "deactivate" ]; then for i in "$@"; do "$CHKCONFIG_BIN" --list "$i" > /dev/null || continue if [ "$START_STOP_SERVICES" = 1 ]; then "$SERVICE_BIN" "$i" stop fi "$CHKCONFIG_BIN" "$i" off done fi fi } # Start/stop and enable/disable services if needed. if [ "$HANDLE_SERVICES" = 1 ]; then SYSTEMCTL_BIN=$(which systemctl 2>/dev/null) CHKCONFIG_BIN=$(which chkconfig 2>/dev/null) SERVICE_BIN=$(which service 2>/dev/null) # Systemd services if [ -n "$SYSTEMCTL_BIN" ]; then if [ "$USE_LVMETAD" = 0 ]; then set_service systemd deactivate lvm2-lvmetad.service lvm2-lvmetad.socket else set_service systemd activate lvm2-lvmetad.socket fi if [ "$LOCKING_TYPE" = 3 ]; then set_service systemd activate lvm2-cluster-activation.service if [ "$HANDLE_MIRROR_SERVICE" = 1 ]; then set_service activate lvm2-cmirrord.service fi else set_service systemd deactivate lvm2-cluster-activation.service if [ "$HANDLE_MIRROR_SERVICE" = 1 ]; then set_service systemd deactivate lvm2-cmirrord.service fi fi # System V init scripts elif [ -n "$SERVICE_BIN" ] && [ -n "$CHKCONFIG_BIN" ]; then if [ "$USE_LVMETAD" = 0 ]; then set_service sysv deactivate lvm2-lvmetad else set_service sysv activate lvm2-lvmetad fi if [ "$LOCKING_TYPE" = 3 ]; then set_service sysv activate clvmd if [ "$HANDLE_MIRROR_SERVICE" = 1 ]; then set_service sysv activate cmirrord fi else set_service sysv deactivate clvmd if [ "$HANDLE_MIRROR_SERVICE" = 1 ]; then set_service sysv deactivate cmirrord fi fi # None of the service tools found, error out else echo "Missing tools to handle services" exit 20 fi fi LVM2.2.02.176/scripts/cmirrord_init_red_hat.in0000755000000000000120000000272713176752421017710 0ustar rootwheel#!/bin/bash # # chkconfig: - 22 78 # description: Starts and stops cmirrord # pidfile: @CMIRRORD_PIDFILE@ # # For Red-Hat-based distributions such as Fedora, RHEL, CentOS. # ### BEGIN INIT INFO # Provides: cmirrord # Required-Start: $network $time $local_fs # Required-Stop: $network $time $local_fs # Short-Description: Starts and stops cmirrord # Description: Starts and stops the cluster mirror log daemon ### END INIT INFO . /etc/init.d/functions DAEMON=cmirrord usrsbindir="@USRSBINDIR@" LOCK_FILE="@DEFAULT_SYS_LOCK_DIR@/subsys/$DAEMON" start() { rtrn=0 if ! pidof "$DAEMON" > /dev/null then echo -n "Starting $DAEMON: " daemon "$usrsbindir/$DAEMON" rtrn=$? echo fi return $rtrn } stop() { echo -n "Stopping $DAEMON:" killproc "$DAEMON" -TERM rtrn=$? echo return $rtrn } wait_for_finish() { count=0 while [ "$count" -le 10 -a -n "`pidof $DAEMON`" ] do sleep 1 count=$((count + 1)) done if [ "$(pidof "$DAEMON")" ] then return 1 else return 0 fi } cmirror_status() { status "$DAEMON" } rtrn=1 # See how we were called. case "$1" in start) start rtrn=$? [ "$rtrn" = 0 ] && touch "$LOCK_FILE" ;; stop) stop rtrn=$? [ "$rtrn" = 0 ] && rm -f "$LOCK_FILE" ;; restart) if stop then wait_for_finish start fi rtrn=$? ;; status) cmirror_status rtrn=$? if [ "$rtrn" -eq 0 ]; then echo "cmirror is running." fi ;; *) echo $"Usage: $0 {start|stop|restart|status}" ;; esac exit $rtrn LVM2.2.02.176/scripts/clvmd_init_red_hat.in0000644000000000000120000001064713176752421017171 0ustar rootwheel#!/bin/bash # # clvmd - Clustered LVM Daemon init script # # chkconfig: - 24 76 # description: Cluster daemon for userland logical volume management tools. # pidfile: @CLVMD_PIDFILE@ # # For Red-Hat-based distributions such as Fedora, RHEL, CentOS. # ### BEGIN INIT INFO # Provides: clvmd # Required-Start: $local_fs@CLVMD_CMANAGERS@ # Required-Stop: $local_fs@CLVMD_CMANAGERS@ # Short-Description: This service is Clusterd LVM Daemon. # Description: Cluster daemon for userland logical volume management tools. ### END INIT INFO . /etc/rc.d/init.d/functions DAEMON=clvmd sbindir="@SBINDIR@" usrsbindir="@USRSBINDIR@" lvm_vgchange="$sbindir/vgchange" lvm_vgs="$sbindir/vgs" lvm_vgscan="$sbindir/vgscan" lvm_lvs="$sbindir/lvs" CLVMDOPTS="-T30" [ -f /etc/sysconfig/cluster ] && . /etc/sysconfig/cluster [ -f "/etc/sysconfig/$DAEMON" ] && . "/etc/sysconfig/$DAEMON" [ -n "$CLVMD_CLUSTER_IFACE" ] && CLVMDOPTS="$CLVMDOPTS -I $CLVMD_CLUSTER_IFACE" # allow up to $CLVMD_STOP_TIMEOUT seconds to clvmd to complete exit operations # default to 10 seconds [ -z $CLVMD_STOP_TIMEOUT ] && CLVMD_STOP_TIMEOUT=10 LOCK_FILE="/var/lock/subsys/$DAEMON" clustered_vgs() { "$lvm_vgs" --noheadings -o vg_name -S 'vg_clustered=1' 2>/dev/null } clustered_active_lvs() { "$lvm_lvs" --noheadings -o lv_name -S 'vg_clustered=1 && lv_active!=""' 2>/dev/null } rh_status() { status "$DAEMON" } rh_status_q() { rh_status >/dev/null 2>&1 } start() { if ! rh_status_q; then echo -n "Starting $DAEMON: " "$usrsbindir/$DAEMON" $CLVMDOPTS || return $? echo fi # Refresh local cache. # # It's possible that new PVs were added to this, or other VGs # while this node was down. So we run vgscan here to avoid # any potential "Missing UUID" messages with subsequent # LVM commands. # The following step would be better and more informative to the user: # 'action "Refreshing VG(s) local cache:" ${lvm_vgscan}' # but it could show warnings such as: # 'clvmd not running on node x-y-z Unable to obtain global lock.' # and the action would be shown as FAILED when in reality it didn't. # Ideally vgscan should have a startup mode that would not print # unnecessary warnings. "$lvm_vgscan" > /dev/null 2>&1 action "Activating VG(s):" "$lvm_vgchange" -aay $LVM_VGS || return $? touch "$LOCK_FILE" return 0 } wait_for_finish() { count=0 while [ "$count" -le "$CLVMD_STOP_TIMEOUT" ] && \ rh_status_q ]; do sleep 1 count=$((count+1)) done ! rh_status_q } stop() { rh_status_q || return 0 [ -z "$LVM_VGS" ] && LVM_VGS="$(clustered_vgs)" if [ -n "$LVM_VGS" ]; then action "Deactivating clustered VG(s):" "$lvm_vgchange" -anl $LVM_VGS || return $? fi action "Signaling $DAEMON to exit" kill -TERM "$(pidofproc "$DAEMON")" || return $? # wait half second before we start the waiting loop or we will show # the loop more time than really necessary usleep 500000 # clvmd could take some time to stop rh_status_q && action "Waiting for $DAEMON to exit:" wait_for_finish if rh_status_q; then echo -n "$DAEMON failed to exit" failure echo return 1 else echo -n "$DAEMON terminated" success echo fi rm -f "$LOCK_FILE" return 0 } reload() { rh_status_q || exit 7 action "Reloading $DAEMON configuration: " "$usrsbindir/$DAEMON" -R || return $? } restart() { # if stop fails, restart will return the error and not attempt # another start. Even if start is protected by rh_status_q, # that would avoid spawning another daemon, it would try to # reactivate the VGs. # Try to get clvmd to restart itself. This will preserve # exclusive LV locks action "Restarting $DAEMON: " "$usrsbindir/$DAEMON" -S # If that fails then do a normal stop & restart if [ $? != 0 ]; then stop && start return $? else touch "$LOCK_FILE" return 0 fi } [ "$EUID" != "0" ] && { echo "clvmd init script can only be executed as root user" exit 4 } # See how we were called. case "$1" in start) start rtrn=$? ;; stop) stop rtrn=$? ;; restart|force-reload) restart rtrn=$? ;; condrestart|try-restart) rh_status_q || exit 0 restart rtrn=$? ;; reload) reload rtrn=$? ;; status) rh_status rtrn=$? if [ "$rtrn" = 0 ]; then cvgs="$(clustered_vgs)" echo Clustered Volume Groups: ${cvgs:-"(none)"} clvs="$(clustered_active_lvs)" echo Active clustered Logical Volumes: ${clvs:-"(none)"} fi ;; *) echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}" rtrn=2 ;; esac exit $rtrn LVM2.2.02.176/scripts/lvm2_monitoring_systemd_red_hat.service.in0000644000000000000120000000123213176752421023363 0ustar rootwheel[Unit] Description=Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling Documentation=man:dmeventd(8) man:lvcreate(8) man:lvchange(8) man:vgchange(8) Requires=dm-event.socket lvm2-lvmetad.socket After=dm-event.socket dm-event.service lvm2-lvmetad.socket lvm2-activation.service lvm2-lvmetad.service Before=local-fs-pre.target DefaultDependencies=no Conflicts=shutdown.target [Service] Type=oneshot Environment=LVM_SUPPRESS_LOCKING_FAILURE_MESSAGES=1 ExecStart=@SBINDIR@/lvm vgchange --monitor y --ignoreskippedcluster ExecStop=@SBINDIR@/lvm vgchange --monitor n --ignoreskippedcluster RemainAfterExit=yes [Install] WantedBy=sysinit.target LVM2.2.02.176/scripts/lvm2create_initrd/0000755000000000000120000000000013176752421016430 5ustar rootwheelLVM2.2.02.176/scripts/lvm2create_initrd/lvm2create_initrd.80000644000000000000120000003015113176752421022136 0ustar rootwheel.\" Automatically generated by Pod::Man 2.23 (Pod::Simple 3.14) .\" .\" Standard preamble: .\" ======================================================================== .de Sp \" Vertical space (when we can't use .PP) .if t .sp .5v .if n .sp .. .de Vb \" Begin verbatim text .ft CW .nf .ne \\$1 .. .de Ve \" End verbatim text .ft R .fi .. .\" Set up some character translations and predefined strings. \*(-- will .\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left .\" double quote, and \*(R" will give a right double quote. \*(C+ will .\" give a nicer C++. Capital omega is used to do unbreakable dashes and .\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff, .\" nothing in troff, for use with C<>. .tr \(*W- .ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p' .ie n \{\ . ds -- \(*W- . ds PI pi . if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch . if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch . ds L" "" . ds R" "" . ds C` "" . ds C' "" 'br\} .el\{\ . ds -- \|\(em\| . ds PI \(*p . ds L" `` . ds R" '' 'br\} .\" .\" Escape single quotes in literal strings from groff's Unicode transform. .ie \n(.g .ds Aq \(aq .el .ds Aq ' .\" .\" If the F register is turned on, we'll generate index entries on stderr for .\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index .\" entries marked with X<> in POD. Of course, you'll have to process the .\" output yourself in some meaningful fashion. .ie \nF \{\ . de IX . tm Index:\\$1\t\\n%\t"\\$2" .. . nr % 0 . rr F .\} .el \{\ . de IX .. .\} .\" .\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2). .\" Fear. Run. Save yourself. No user-serviceable parts. . \" fudge factors for nroff and troff .if n \{\ . ds #H 0 . ds #V .8m . ds #F .3m . ds #[ \f1 . ds #] \fP .\} .if t \{\ . ds #H ((1u-(\\\\n(.fu%2u))*.13m) . ds #V .6m . ds #F 0 . ds #[ \& . ds #] \& .\} . \" simple accents for nroff and troff .if n \{\ . ds ' \& . ds ` \& . ds ^ \& . ds , \& . ds ~ ~ . ds / .\} .if t \{\ . ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u" . ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u' . ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u' . ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u' . ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u' . ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u' .\} . \" troff and (daisy-wheel) nroff accents .ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V' .ds 8 \h'\*(#H'\(*b\h'-\*(#H' .ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#] .ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H' .ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u' .ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#] .ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#] .ds ae a\h'-(\w'a'u*4/10)'e .ds Ae A\h'-(\w'A'u*4/10)'E . \" corrections for vroff .if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u' .if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u' . \" for low resolution devices (crt and lpr) .if \n(.H>23 .if \n(.V>19 \ \{\ . ds : e . ds 8 ss . ds o a . ds d- d\h'-1'\(ga . ds D- D\h'-1'\(hy . ds th \o'bp' . ds Th \o'LP' . ds ae ae . ds Ae AE .\} .rm #[ #] #H #V #F C .\" ======================================================================== .\" .IX Title "lvm2create_initrd 8" .TH lvm2create_initrd 8 "2011-11-12" "lvm2create_initrd" "create LVM2 initrd" .\" For nroff, turn off justification. Always turn off hyphenation; it makes .\" way too many mistakes in technical documents. .if n .ad l .nh .SH "NAME" lvm2create_initrd \- create initrd image for booting to root\e\-on\e\-LVM2 .SH "SYNOPSIS" .IX Header "SYNOPSIS" \&\fBlvm2create_initrd\fR [ \fB\-h|\-\-help\fR ] [ \fB\-v|\-\-verbose\fR ] [ \fB\-c|\-\-lvmconf\fR \fI/path/to/lvm.conf\fR ] [ \fB\-m|\-\-modules\fR "\fImodule1 module2 ...\fR" ] [ \fB\-e|\-\-extra\fR "\fIfile1 file2 ...\fR" ] [ \fB\-r|\-\-raid\fR "\fI/dev/md1 /dev/md2 ...\fR" ] [ \fB\-R|\-\-raidconf\fR \fI/path/to/mdadm.conf\fR ] [ \fB\-M|\-\-makedev\fR \fIstyle\fR ] .SH "DESCRIPTION" .IX Header "DESCRIPTION" lvm2create_initrd creates an initial ramdisk (initrd) image suitable for booting to system that has an \s-1LVM2\s0 volume as its root filesystem. .PP To boot to such a setup, you'll either need a bootloader that understands \s-1LVM2\s0 volumes, or you'll need a filesystem on a regular volume to act as a boot partition (typically mounted on /boot). .PP The resulting initrd image is fairly full-featured. It can harbor and load kernel modules, start \s-1MD\s0 devices, and boot to a shell to perform rescue operations. .SS "Booting to your initrd Image:" .IX Subsection "Booting to your initrd Image:" The filesystem image created is an ext2fs filesystem, hence your kernel must have ext2fs built into it statically in order to boot to the image. .PP Once you create your initrd image, you must pass the correct options to the kernel when you boot using it. Your kernel command line should look something like this: .PP \&\fBroot=/dev/ram0 lvm2root=/dev/rootvg/root [ lvm2rescue ]\fR .PP of course there may be other options. .IP "\fBroot=/dev/ram0\fR" 4 .IX Item "root=/dev/ram0" This option is required. It tells the kernel that the root filesystem should initially be set to the ramdisk (/dev/ram0). .IP "\fBlvm2root=/dev/rootvg/root\fR" 4 .IX Item "lvm2root=/dev/rootvg/root" This option is also required. It tells the initrd image which \s-1LVM2\s0 device the root filesystem is located on. .IP "\fBlvm2rescue\fR" 4 .IX Item "lvm2rescue" Causes the initrd image to run a shell prior to mounting the root filesystem. This is helpful in disaster situations where your initrd image is accessable, but there is a problem with the root filesystem (corrupted image, incorrect device setup, etc.). This option is (of course) optional. .SH "OPTIONS" .IX Header "OPTIONS" Most of parameters that can be set via command-line options can also be set via environment variables. Options specified on the command-line always take precedence. .IP "\fB\-h|\-\-help\fR" 4 .IX Item "-h|--help" Display short help text and exit. If used, other options are ignored. .IP "\fB\-v|\-\-verbose\fR" 4 .IX Item "-v|--verbose" Turn on extra verbosity for debugging, etc. .IP "\fB\-c|\-\-lvmconf\fR \fI/path/to/lvm.conf\fR" 4 .IX Item "-c|--lvmconf /path/to/lvm.conf" Specify an lvm.conf file to include in the image. This is useful if you have special device filters or other options you wish to use during the initrd stage. If this option is not included, then a lvm.conf file is created that contains only the current device filter from an \fBlvm dumpconfig\fR. This can also be set via the \fB\f(CB$LVMCONF\fB\fR environment variable. .ie n .IP "\fB\-m|\-\-modules\fR ""\fI/path/to/module1.ko /path/to/module2.ko ...\fR""" 4 .el .IP "\fB\-m|\-\-modules\fR ``\fI/path/to/module1.ko /path/to/module2.ko ...\fR''" 4 .IX Item "-m|--modules ""/path/to/module1.ko /path/to/module2.ko ...""" Specify modules to include and plug in during the initrd phase. This option takes a quoted, space-separated list of modules. Full pathnames are required. These modules are loaded into the kernel early in the initrd phase of the boot process. The current modprobe.conf file is also copied to the initrd image as well. This can also be specified via the \fB\f(CB$MODULES\fB\fR environment variable. .ie n .IP "\fB\-e|\-\-extra\fR ""\fI/path/to/file1 /path/to/file2 ...\fR""" 4 .el .IP "\fB\-e|\-\-extra\fR ``\fI/path/to/file1 /path/to/file2 ...\fR''" 4 .IX Item "-e|--extra ""/path/to/file1 /path/to/file2 ...""" Extra files that should be included in the initrd image. These files will be copied to the same location in the initrd image that they are in the current filesystem. Again full pathnames are required. This can also be specified via the \fB\f(CB$EXTRAFILES\fB\fR environment variable. .ie n .IP "\fB\-r|\-\-raid\fR ""\fI/dev/md1 /dev/md2...\fR""" 4 .el .IP "\fB\-r|\-\-raid\fR ``\fI/dev/md1 /dev/md2...\fR''" 4 .IX Item "-r|--raid ""/dev/md1 /dev/md2...""" \&\s-1RAID\s0 devices to be started prior to scanning for \s-1LVM2\s0 volume groups. If this option is used then then \fBmdadm\fR program must be installed. This can also be specified via the \fB\f(CB$RAID\fB\fR environment variable. .ie n .IP "\fB\-R|\-\-raidconf\fR ""\fI/path/to/mdadm.conf\fR""" 4 .el .IP "\fB\-R|\-\-raidconf\fR ``\fI/path/to/mdadm.conf\fR''" 4 .IX Item "-R|--raidconf ""/path/to/mdadm.conf""" Location of a mdadm.conf file to include. If this is not specified, then no files are included, and any devices specified with the \fB\-r\fR option above must have minor numbers that match their superblock values. This can also be specified via the \fB\f(CB$RAIDCONF\fB\fR environment variable. .IP "\fB\-M|\-\-makedev\fR \fIstyle\fR" 4 .IX Item "-M|--makedev style" Set \s-1MAKEDEV\s0 invocation style. The script currently supports 3 styles of \&\s-1MAKEDEV\s0 programs \fIdebian\fR, \fIredhat\fR and \fIgentoo\fR. The default is \fIdebian\fR. Set to \fIredhat\fR if using the RedHat/Fedora binary \s-1MAKEDEV\s0 program. \fIgentoo\fR has the same binary but in /sbin instead of /dev. Please send a bug report to maintainer if your distribution doesn't work with any of the current options. .SH "ENVIRONMENT VARIABLES" .IX Header "ENVIRONMENT VARIABLES" Most of the options to this script can be set via environment variables. In situations where both are set, then the command-line options take precedence. .ie n .IP "\fB\fB$LVMCONF\fB\fR" 4 .el .IP "\fB\f(CB$LVMCONF\fB\fR" 4 .IX Item "$LVMCONF" Same as \-c option. .ie n .IP "\fB\fB$MODULES\fB\fR" 4 .el .IP "\fB\f(CB$MODULES\fB\fR" 4 .IX Item "$MODULES" Same as \-m option. .ie n .IP "\fB\fB$EXTRAFILES\fB\fR" 4 .el .IP "\fB\f(CB$EXTRAFILES\fB\fR" 4 .IX Item "$EXTRAFILES" Same as \-e option. .ie n .IP "\fB\fB$RAID\fB\fR" 4 .el .IP "\fB\f(CB$RAID\fB\fR" 4 .IX Item "$RAID" Same as \-r option. .ie n .IP "\fB\fB$RAIDCONF\fB\fR" 4 .el .IP "\fB\f(CB$RAIDCONF\fB\fR" 4 .IX Item "$RAIDCONF" Same as \-R option. .ie n .IP "\fB\fB$MAKEDEV\fB\fR" 4 .el .IP "\fB\f(CB$MAKEDEV\fB\fR" 4 .IX Item "$MAKEDEV" Same as \-M option. .ie n .IP "\fB\fB$BASICDEVICES\fB\fR" 4 .el .IP "\fB\f(CB$BASICDEVICES\fB\fR" 4 .IX Item "$BASICDEVICES" Overrides the default value of \f(CW$BASICDEVICES\fR in the script (which is \*(L"std consoleonly fd\*(R"). These values are passed to the \fB\s-1MAKEDEV\s0\fR program to create device entries in the initrd image. .ie n .IP "\fB\fB$BLOCKDEVICES\fB\fR" 4 .el .IP "\fB\f(CB$BLOCKDEVICES\fB\fR" 4 .IX Item "$BLOCKDEVICES" Overrides the default value of \f(CW$BLOCKDEVICES\fR in the script (which is \*(L"md hda hdb hdc hdd sda sdb sdc sdd\*(R"). This value is passed to the \fB\s-1MAKEDEV\s0\fR program to create device entries in the initrd image. .ie n .IP "\fB\fB$BINFILES\fB\fR" 4 .el .IP "\fB\f(CB$BINFILES\fB\fR" 4 .IX Item "$BINFILES" Overrides the default value of \f(CW$BINFILES\fR (which is \*(L"/lib/lvm\-200/lvm /bin/bash /bin/busybox /sbin/pivot_root\*(R"). The difference between using this and adding a file to the \f(CW$EXTRAFILES\fR list above is that libraries that these depend upon are also included. You can still use \f(CW$EXTRAFILES\fR to achieve the same effect, but you must resolve library dependencies youself. .ie n .IP "\fB\fB$INITRDSIZE\fB\fR" 4 .el .IP "\fB\f(CB$INITRDSIZE\fB\fR" 4 .IX Item "$INITRDSIZE" Force a particular size for your initrd image. The default is to total up the size of the included files and to add 512K as a buffer. .SH "BUGS" .IX Header "BUGS" I don't like having to specify a \-M option to set the \s-1MAKEDEV\s0 style, but I know of no way to reliably detect what type of \s-1MAKEDEV\s0 is being used. We'll probably have to add other \s-1MAKEDEV\s0 styles in the future as this script is tested on other distributions. .SH "AUTHORS" .IX Header "AUTHORS" The script was originally written by Miguel Cabeca, with significant improvements by Jeffrey Layton. Comments, bug reports and patches should be sent to Jeffrey Layton at \fBjtlayton@poochiereds.net\fR. .SH "SEE ALSO" .IX Header "SEE ALSO" \&\fB\s-1MAKEDEV\s0\fR(8), \fBmdadm\fR(8), \fBbusybox\fR(8), \fBlvm.conf\fR(5) LVM2.2.02.176/scripts/lvm2create_initrd/lvm2create_initrd.pod0000644000000000000120000001456113176752421022560 0ustar rootwheel=head1 NAME lvm2create_initrd - create initrd image for booting to root\-on\-LVM2 =head1 SYNOPSIS B [ B<-h|--help> ] [ B<-v|--verbose> ] [ B<-c|--lvmconf> I ] [ B<-m|--modules> "I" ] [ B<-e|--extra> "I" ] [ B<-r|--raid> "I" ] [ B<-R|--raidconf> I ] [ B<-M|--makedev> I