pax_global_header00006660000000000000000000000064131356566700014526gustar00rootroot0000000000000052 comment=3702bedf19fe60165021a225103ad586b3cf7ebd Sia-1.3.0/000077500000000000000000000000001313565667000122435ustar00rootroot00000000000000Sia-1.3.0/.gitignore000066400000000000000000000002271313565667000142340ustar00rootroot00000000000000# vim *.swp *.swo # build artifacts cover release # pdflatex doc/whitepaper.aux doc/whitepaper.log doc/whitepaper.pdf # IntelliJ IDEA .idea sia.iml Sia-1.3.0/.travis.yml000066400000000000000000000004401313565667000143520ustar00rootroot00000000000000language: go os: - linux go: - 1.8 install: - make dependencies - test -z "$(go fmt ./...)" - glyphcheck ./... - make script: make test && make test-long && make cover && make bench sudo: false branches: only: - master - staging notifications: email: false Sia-1.3.0/CHANGELOG.md000066400000000000000000000077531313565667000140700ustar00rootroot00000000000000Version History --------------- July 2017: v1.3.0 (minor release) - Add remote file repair - Add wallet 'lookahead' - Introduce difficulty hardfork May 2017: v1.2.2 (patch release) - Faster + smaller wallet database - Gracefully handle missing storage folders - >2500 lines of new testing + bug fixes April 2017: v1.2.1 (patch release) - Faster host upgrading - Fix wallet bugs - Add siac command to cancel allowance v1.2.0 (minor release) - Host overhaul - Wallet overhaul - Tons of bug fixes and efficiency improvements March 2017: v1.1.2 (patch release) - Add async download endpoint - Fix host storage proof bug February 2017: v1.1.1 (patch release) - Renter now performs much better at scale - Myriad HostDB improvements - Add siac command to support storage leaderboard January 2017: v1.1.0 (minor release) - Greatly improved upload/download speeds - Wallet now regularly "defragments" - Better contract metrics December 2016: v1.0.4 (LTS release) October 2016: v1.0.3 (patch release) - Greatly improved renter stability - Smarter HostDB - Numerous minor bug fixes July 2016: v1.0.1 (patch release) - Restricted API address to localhost - Fixed renter/host desynchronization - Fixed host silently refusing new contracts June 2016: v1.0.0 (major release) - Finalized API routes - Add optional API authentication - Improve automatic contract management May 2016: v0.6.0 (minor release) - Switched to long-form renter contracts - Added support for multiple hosting folders - Hosts are now identified by their public key January 2016: v0.5.2 (patch release) - Faster initial blockchain download - Introduced headers-only broadcasting v0.5.1 (patch release) - Fixed bug severely impacting performance - Restored (but deprecated) some siac commands - Added modules flag, allowing modules to be disabled v0.5.0 (minor release) - Major API changes to most modules - Automatic contract renewal - Data on inactive hosts is reuploaded - Support for folder structure - Smarter host October 2015: v0.4.8 (patch release) - Restored compatibility with v0.4.6 v0.4.7 (patch release) - Dropped support for v0.3.3.x v0.4.6 (patch release) - Removed over-aggressive consistency check v0.4.5 (patch release) - Fixed last prominent bug in block database - Closed some dangling resource handles v0.4.4 (patch release) - Uploading is much more reliable - Price estimations are more accurate - Bumped filesize limit to 20 GB v0.4.3 (patch release) - Block database is now faster and more stable - Wallet no longer freezes when unlocked during IBD - Optimized block encoding/decoding September 2015: v0.4.2 (patch release) - HostDB is now smarter - Tweaked renter contract creation v0.4.1 (patch release) - Added support for loading v0.3.3.x wallets - Better pruning of dead nodes - Improve database consistency August 2015: v0.4.0: Second stable currency release. - Wallets are encrypted and generated from seed phrases - Files are erasure-coded and transferred in parallel - The blockchain is now fully on-disk - Added UPnP support June 2015: v0.3.3.3 (patch release) - Host announcements can be "forced" - Wallets can be merged - Unresponsive addresses are pruned from the node list v0.3.3.2 (patch release) - Siafunds can be loaded and sent - Added block explorer - Patched two critical security vulnerabilities v0.3.3.1 (hotfix) - Mining API sends headers instead of entire blocks - Slashed default hosting price v0.3.3: First stable currency release. - Set release target - Added progress bars to uploads - Rigorous testing of consensus code May 2015: v0.3.2: Fourth open beta release. - Switched encryption from block cipher to stream cipher - Updates are now signed - Added API calls to support external miners v0.3.1: Third open beta release. - Blocks are now stored on-disk in a database - Files can be shared via .sia files or ASCII-encoded data - RPCs are now multiplexed over one physical connection March 2015: v0.3.0: Second open beta release. Jan 2015: v0.2.0: First open beta release. Dec 2014: v0.1.0: Closed beta release. Sia-1.3.0/LICENSE000066400000000000000000000020701313565667000132470ustar00rootroot00000000000000The MIT License (MIT) Copyright (c) 2016 Nebulous Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Sia-1.3.0/Makefile000066400000000000000000000106511313565667000137060ustar00rootroot00000000000000# all will build and install developer binaries, which have debugging enabled # and much faster mining and block constants. all: install # dependencies installs all of the dependencies that are required for building # Sia. dependencies: # Consensus Dependencies go get -u github.com/NebulousLabs/demotemutex go get -u github.com/NebulousLabs/fastrand go get -u github.com/NebulousLabs/merkletree go get -u github.com/NebulousLabs/bolt go get -u golang.org/x/crypto/blake2b go get -u golang.org/x/crypto/ed25519 # Module + Daemon Dependencies go get -u github.com/NebulousLabs/entropy-mnemonics go get -u github.com/NebulousLabs/errors go get -u github.com/NebulousLabs/go-upnp go get -u github.com/NebulousLabs/muxado go get -u github.com/klauspost/reedsolomon go get -u github.com/julienschmidt/httprouter go get -u github.com/inconshreveable/go-update go get -u github.com/kardianos/osext # Frontend Dependencies go get -u github.com/bgentry/speakeasy go get -u github.com/spf13/cobra/... # Developer Dependencies go install -race std go get -u github.com/golang/lint/golint go get -u github.com/NebulousLabs/glyphcheck # pkgs changes which packages the makefile calls operate on. run changes which # tests are run during testing. run = . pkgs = ./api ./build ./compatibility ./crypto ./encoding ./modules ./modules/consensus \ ./modules/explorer ./modules/gateway ./modules/host ./modules/host/contractmanager \ ./modules/renter ./modules/renter/contractor ./modules/renter/hostdb ./modules/renter/hostdb/hosttree \ ./modules/renter/proto ./modules/miner ./modules/wallet ./modules/transactionpool ./persist ./siac \ ./siad ./sync ./types # fmt calls go fmt on all packages. fmt: gofmt -s -l -w $(pkgs) # vet calls go vet on all packages. # NOTE: go vet requires packages to be built in order to obtain type info. vet: release-std go vet $(pkgs) # will always run on some packages for a while. lintpkgs = ./modules ./modules/gateway ./modules/host ./modules/renter/hostdb ./modules/renter/contractor ./persist lint: @for package in $(lintpkgs); do \ golint -min_confidence=1.0 $$package \ && test -z $$(golint -min_confidence=1.0 $$package) ; \ done # install builds and installs developer binaries. install: go install -race -tags='dev debug profile' $(pkgs) # release builds and installs release binaries. release: go install -tags='debug profile' $(pkgs) release-race: go install -race -tags='debug profile' $(pkgs) release-std: go install -ldflags='-s -w' $(pkgs) # clean removes all directories that get automatically created during # development. clean: rm -rf release doc/whitepaper.aux doc/whitepaper.log doc/whitepaper.pdf test: go test -short -tags='debug testing' -timeout=5s $(pkgs) -run=$(run) test-v: go test -race -v -short -tags='debug testing' -timeout=15s $(pkgs) -run=$(run) test-long: clean fmt vet lint go test -v -race -tags='testing debug' -timeout=500s $(pkgs) -run=$(run) test-vlong: clean fmt vet lint go test -v -race -tags='testing debug vlong' -timeout=5000s $(pkgs) -run=$(run) test-cpu: go test -v -tags='testing debug' -timeout=500s -cpuprofile cpu.prof $(pkgs) -run=$(run) test-mem: go test -v -tags='testing debug' -timeout=500s -memprofile mem.prof $(pkgs) -run=$(run) bench: clean fmt go test -tags='debug testing' -timeout=500s -run=XXX -bench=$(run) $(pkgs) cover: clean @mkdir -p cover/modules @mkdir -p cover/modules/renter @mkdir -p cover/modules/host @for package in $(pkgs); do \ go test -tags='testing debug' -timeout=500s -covermode=atomic -coverprofile=cover/$$package.out ./$$package -run=$(run) \ && go tool cover -html=cover/$$package.out -o=cover/$$package.html \ && rm cover/$$package.out ; \ done # whitepaper builds the whitepaper from whitepaper.tex. pdflatex has to be # called twice because references will not update correctly the first time. whitepaper: @pdflatex -output-directory=doc whitepaper.tex > /dev/null pdflatex -output-directory=doc whitepaper.tex .PHONY: all dependencies fmt install release release-std xc clean test test-v test-long cover cover-integration cover-unit whitepaper Sia-1.3.0/README.md000066400000000000000000000134031313565667000135230ustar00rootroot00000000000000# [![Sia Logo](http://sia.tech/img/svg/sia-green-logo.svg)](http://sia.tech) v1.3.0 (Capricorn) [![Build Status](https://travis-ci.org/NebulousLabs/Sia.svg?branch=master)](https://travis-ci.org/NebulousLabs/Sia) [![GoDoc](https://godoc.org/github.com/NebulousLabs/Sia?status.svg)](https://godoc.org/github.com/NebulousLabs/Sia) [![Go Report Card](https://goreportcard.com/badge/github.com/NebulousLabs/Sia)](https://goreportcard.com/report/github.com/NebulousLabs/Sia) Sia is a new decentralized cloud storage platform that radically alters the landscape of cloud storage. By leveraging smart contracts, client-side encryption, and sophisticated redundancy (via Reed-Solomon codes), Sia allows users to safely store their data with hosts that they do not know or trust. The result is a cloud storage marketplace where hosts compete to offer the best service at the lowest price. And since there is no barrier to entry for hosts, anyone with spare storage capacity can join the network and start making money. ![UI](http://i.imgur.com/iHoGqoL.png) Traditional cloud storage has a number of shortcomings. Users are limited to a few big-name offerings: Google, Microsoft, Amazon. These companies have little incentive to encrypt your data or make it easy to switch services later. Their code is closed-source, and they can lock you out of your account at any time. We believe that users should own their data. Sia achieves this by replacing the traditional monolithic cloud storage provider with a blockchain and a swarm of hosts, each of which stores an encrypted fragment of your data. Since the fragments are redundant, no single host can hold your data hostage: if they jack up their price or go offline, you can simply download from a different host. In other words, trust is removed from the equation, and switching to a different host is painless. Stripped of these unfair advantages, hosts must compete solely on the quality and price of the storage they provide. Sia can serve as a replacement for personal backups, bulk archiving, content distribution, and more. For developers, Sia is a low-cost alternative to Amazon S3. Storage on Sia is a full order of magnitude cheaper than on S3, with comparable bandwidth, latency, and durability. Sia works best for static content, especially media like videos, music, and photos. Distributing data across many hosts automatically confers several advantages. The most obvious is that, just like BitTorrent, uploads and downloads are highly parallel. Given enough hosts, Sia can saturate your bandwidth. Another advantage is that your data is spread across a wide geographic area, reducing latency and safeguarding your data against a range of attacks. It is important to note that users have full control over which hosts they use. You can tailor your host set for minimum latency, lowest price, widest geographic coverage, or even a strict whitelist of IP addresses or public keys. At the core of Sia is a blockchain that closely resembles Bitcoin. Transactions are conducted in Siacoin, a cryptocurrency. The blockchain is what allows Sia to enforce its smart contracts without relying on centralized authority. To acquire siacoins, use an exchange such as [Poloniex](https://poloniex.com), [Yunbi](https://yunbi.com), or [Bitsquare](https://bitsquare.io). To get started with Sia, check out the guides below: - [How to Store Data on Sia](https://blog.sia.tech/getting-started-with-private-decentralized-cloud-storage-c9565dc8c854) - [How to Become a Sia Host](https://blog.sia.tech/how-to-run-a-host-on-sia-2159ebc4725) - [Using the Sia API](https://blog.sia.tech/api-quickstart-guide-f1d160c05235) Usage ----- Sia is ready for use with small sums of money and non-critical files, but until the network has a more proven track record, we advise against using it as a sole means of storing important data. This release comes with 2 binaries, siad and siac. siad is a background service, or "daemon," that runs the Sia protocol and exposes an HTTP API on port 9980. siac is a command-line client that can be used to interact with siad in a user-friendly way. There is also a graphical client, [Sia-UI](https://github.com/NebulousLabs/Sia-UI), which is the preferred way of using Sia for most users. For interested developers, the siad API is documented [here](doc/API.md). siad and siac are run via command prompt. On Windows, you can just double- click siad.exe if you don't need to specify any command-line arguments. Otherwise, navigate to its containing folder and click File->Open command prompt. Then, start the siad service by entering `siad` and pressing Enter. The command prompt may appear to freeze; this means siad is waiting for requests. Windows users may see a warning from the Windows Firewall; be sure to check both boxes ("Private networks" and "Public networks") and click "Allow access." You can now run `siac` (in a separate command prompt) or Sia- UI to interact with siad. From here, you can send money, upload and download files, and advertise yourself as a host. Building From Source -------------------- To build from source, [Go 1.8 must be installed](https://golang.org/doc/install) on the system. Then simply use `go get`: ``` go get -u github.com/NebulousLabs/Sia/... ``` This will download the Sia repo to your `$GOPATH/src` folder, and install the `siad` and `siac` binaries in your `$GOPATH/bin` folder. To stay up-to-date, run the previous `go get` command again. Alternatively, you can use the Makefile provided in this repo. Run `git pull origin master` to pull the latest changes, and `make release-std` to build the new binaries. You can also run `make test` and `make test-long` to run the short and full test suites, respectively. Finally, `make cover` will generate code coverage reports for each package; they are stored in the `cover` folder and can be viewed in your browser. Sia-1.3.0/api/000077500000000000000000000000001313565667000130145ustar00rootroot00000000000000Sia-1.3.0/api/api.go000066400000000000000000000343561313565667000141270ustar00rootroot00000000000000package api import ( "encoding/json" "net/http" "strings" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" "github.com/julienschmidt/httprouter" ) // Error is a type that is encoded as JSON and returned in an API response in // the event of an error. Only the Message field is required. More fields may // be added to this struct in the future for better error reporting. type Error struct { // Message describes the error in English. Typically it is set to // `err.Error()`. This field is required. Message string `json:"message"` // TODO: add a Param field with the (omitempty option in the json tag) // to indicate that the error was caused by an invalid, missing, or // incorrect parameter. This is not trivial as the API does not // currently do parameter validation itself. For example, the // /gateway/connect endpoint relies on the gateway.Connect method to // validate the netaddress. However, this prevents the API from knowing // whether an error returned by gateway.Connect is because of a // connection error or an invalid netaddress parameter. Validating // parameters in the API is not sufficient, as a parameter's value may // be valid or invalid depending on the current state of a module. } // Error implements the error interface for the Error type. It returns only the // Message field. func (err Error) Error() string { return err.Message } // HttpGET is a utility function for making http get requests to sia with a // whitelisted user-agent. A non-2xx response does not return an error. func HttpGET(url string) (resp *http.Response, err error) { req, err := http.NewRequest("GET", url, nil) if err != nil { return nil, err } req.Header.Set("User-Agent", "Sia-Agent") return http.DefaultClient.Do(req) } // HttpGETAuthenticated is a utility function for making authenticated http get // requests to sia with a whitelisted user-agent and the supplied password. A // non-2xx response does not return an error. func HttpGETAuthenticated(url string, password string) (resp *http.Response, err error) { req, err := http.NewRequest("GET", url, nil) if err != nil { return nil, err } req.Header.Set("User-Agent", "Sia-Agent") req.SetBasicAuth("", password) return http.DefaultClient.Do(req) } // HttpPOST is a utility function for making post requests to sia with a // whitelisted user-agent. A non-2xx response does not return an error. func HttpPOST(url string, data string) (resp *http.Response, err error) { req, err := http.NewRequest("POST", url, strings.NewReader(data)) if err != nil { return nil, err } req.Header.Set("User-Agent", "Sia-Agent") req.Header.Set("Content-Type", "application/x-www-form-urlencoded") return http.DefaultClient.Do(req) } // HttpPOSTAuthenticated is a utility function for making authenticated http // post requests to sia with a whitelisted user-agent and the supplied // password. A non-2xx response does not return an error. func HttpPOSTAuthenticated(url string, data string, password string) (resp *http.Response, err error) { req, err := http.NewRequest("POST", url, strings.NewReader(data)) if err != nil { return nil, err } req.Header.Set("User-Agent", "Sia-Agent") req.Header.Set("Content-Type", "application/x-www-form-urlencoded") req.SetBasicAuth("", password) return http.DefaultClient.Do(req) } // RequireUserAgent is middleware that requires all requests to set a // UserAgent that contains the specified string. func RequireUserAgent(h http.Handler, ua string) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { if !strings.Contains(req.UserAgent(), ua) { WriteError(w, Error{"Browser access disabled due to security vulnerability. Use Sia-UI or siac."}, http.StatusBadRequest) return } h.ServeHTTP(w, req) }) } // RequirePassword is middleware that requires a request to authenticate with a // password using HTTP basic auth. Usernames are ignored. Empty passwords // indicate no authentication is required. func RequirePassword(h httprouter.Handle, password string) httprouter.Handle { // An empty password is equivalent to no password. if password == "" { return h } return func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { _, pass, ok := req.BasicAuth() if !ok || pass != password { w.Header().Set("WWW-Authenticate", "Basic realm=\"SiaAPI\"") WriteError(w, Error{"API authentication failed."}, http.StatusUnauthorized) return } h(w, req, ps) } } // cleanCloseHandler wraps the entire API, ensuring that underlying conns are // not leaked if the rmeote end closes the connection before the underlying // handler finishes. func cleanCloseHandler(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // Close this file handle either when the function completes or when the // connection is done. done := make(chan struct{}) go func(w http.ResponseWriter, r *http.Request) { defer close(done) next.ServeHTTP(w, r) }(w, r) select { case <-done: case <-r.Context().Done(): } // Sanity check - thread should not take more than an hour to return. This // must be done in a goroutine, otherwise the server will not close the // underlying socket for this API call. go func() { select { case <-done: case <-time.After(time.Minute * 60): build.Severe("api call is taking more than 60 minutes to return:", r.URL.Path) } }() }) } // API encapsulates a collection of modules and implements a http.Handler // to access their methods. type API struct { cs modules.ConsensusSet explorer modules.Explorer gateway modules.Gateway host modules.Host miner modules.Miner renter modules.Renter tpool modules.TransactionPool wallet modules.Wallet router http.Handler } // api.ServeHTTP implements the http.Handler interface. func (api *API) ServeHTTP(w http.ResponseWriter, r *http.Request) { api.router.ServeHTTP(w, r) } // New creates a new Sia API from the provided modules. The API will require // authentication using HTTP basic auth for certain endpoints of the supplied // password is not the empty string. Usernames are ignored for authentication. func New(requiredUserAgent string, requiredPassword string, cs modules.ConsensusSet, e modules.Explorer, g modules.Gateway, h modules.Host, m modules.Miner, r modules.Renter, tp modules.TransactionPool, w modules.Wallet) *API { api := &API{ cs: cs, explorer: e, gateway: g, host: h, miner: m, renter: r, tpool: tp, wallet: w, } // Register API handlers router := httprouter.New() router.NotFound = http.HandlerFunc(UnrecognizedCallHandler) router.RedirectTrailingSlash = false // Consensus API Calls if api.cs != nil { router.GET("/consensus", api.consensusHandler) router.POST("/consensus/validate/transactionset", api.consensusValidateTransactionsetHandler) } // Explorer API Calls if api.explorer != nil { router.GET("/explorer", api.explorerHandler) router.GET("/explorer/blocks/:height", api.explorerBlocksHandler) router.GET("/explorer/hashes/:hash", api.explorerHashHandler) } // Gateway API Calls if api.gateway != nil { router.GET("/gateway", api.gatewayHandler) router.POST("/gateway/connect/:netaddress", RequirePassword(api.gatewayConnectHandler, requiredPassword)) router.POST("/gateway/disconnect/:netaddress", RequirePassword(api.gatewayDisconnectHandler, requiredPassword)) } // Host API Calls if api.host != nil { // Calls directly pertaining to the host. router.GET("/host", api.hostHandlerGET) // Get the host status. router.POST("/host", RequirePassword(api.hostHandlerPOST, requiredPassword)) // Change the settings of the host. router.POST("/host/announce", RequirePassword(api.hostAnnounceHandler, requiredPassword)) // Announce the host to the network. router.GET("/host/estimatescore", api.hostEstimateScoreGET) // Calls pertaining to the storage manager that the host uses. router.GET("/host/storage", api.storageHandler) router.POST("/host/storage/folders/add", RequirePassword(api.storageFoldersAddHandler, requiredPassword)) router.POST("/host/storage/folders/remove", RequirePassword(api.storageFoldersRemoveHandler, requiredPassword)) router.POST("/host/storage/folders/resize", RequirePassword(api.storageFoldersResizeHandler, requiredPassword)) router.POST("/host/storage/sectors/delete/:merkleroot", RequirePassword(api.storageSectorsDeleteHandler, requiredPassword)) } // Miner API Calls if api.miner != nil { router.GET("/miner", api.minerHandler) router.GET("/miner/header", RequirePassword(api.minerHeaderHandlerGET, requiredPassword)) router.POST("/miner/header", RequirePassword(api.minerHeaderHandlerPOST, requiredPassword)) router.GET("/miner/start", RequirePassword(api.minerStartHandler, requiredPassword)) router.GET("/miner/stop", RequirePassword(api.minerStopHandler, requiredPassword)) } // Renter API Calls if api.renter != nil { router.GET("/renter", api.renterHandlerGET) router.POST("/renter", RequirePassword(api.renterHandlerPOST, requiredPassword)) router.GET("/renter/contracts", api.renterContractsHandler) router.GET("/renter/downloads", api.renterDownloadsHandler) router.GET("/renter/files", api.renterFilesHandler) router.GET("/renter/prices", api.renterPricesHandler) // TODO: re-enable these routes once the new .sia format has been // standardized and implemented. // router.POST("/renter/load", RequirePassword(api.renterLoadHandler, requiredPassword)) // router.POST("/renter/loadascii", RequirePassword(api.renterLoadAsciiHandler, requiredPassword)) // router.GET("/renter/share", RequirePassword(api.renterShareHandler, requiredPassword)) // router.GET("/renter/shareascii", RequirePassword(api.renterShareAsciiHandler, requiredPassword)) router.POST("/renter/delete/*siapath", RequirePassword(api.renterDeleteHandler, requiredPassword)) router.GET("/renter/download/*siapath", RequirePassword(api.renterDownloadHandler, requiredPassword)) router.GET("/renter/downloadasync/*siapath", RequirePassword(api.renterDownloadAsyncHandler, requiredPassword)) router.POST("/renter/rename/*siapath", RequirePassword(api.renterRenameHandler, requiredPassword)) router.POST("/renter/upload/*siapath", RequirePassword(api.renterUploadHandler, requiredPassword)) // HostDB endpoints. router.GET("/hostdb/active", api.hostdbActiveHandler) router.GET("/hostdb/all", api.hostdbAllHandler) router.GET("/hostdb/hosts/:pubkey", api.hostdbHostsHandler) } // Transaction pool API Calls if api.tpool != nil { router.GET("/tpool/fee", api.tpoolFeeHandlerGET) router.GET("/tpool/raw/:id", api.tpoolRawHandlerGET) router.POST("/tpool/raw", api.tpoolRawHandlerPOST) // TODO: re-enable this route once the transaction pool API has been finalized //router.GET("/transactionpool/transactions", api.transactionpoolTransactionsHandler) } // Wallet API Calls if api.wallet != nil { router.GET("/wallet", api.walletHandler) router.POST("/wallet/033x", RequirePassword(api.wallet033xHandler, requiredPassword)) router.GET("/wallet/address", RequirePassword(api.walletAddressHandler, requiredPassword)) router.GET("/wallet/addresses", api.walletAddressesHandler) router.GET("/wallet/backup", RequirePassword(api.walletBackupHandler, requiredPassword)) router.POST("/wallet/init", RequirePassword(api.walletInitHandler, requiredPassword)) router.POST("/wallet/init/seed", RequirePassword(api.walletInitSeedHandler, requiredPassword)) router.POST("/wallet/lock", RequirePassword(api.walletLockHandler, requiredPassword)) router.POST("/wallet/seed", RequirePassword(api.walletSeedHandler, requiredPassword)) router.GET("/wallet/seeds", RequirePassword(api.walletSeedsHandler, requiredPassword)) router.POST("/wallet/siacoins", RequirePassword(api.walletSiacoinsHandler, requiredPassword)) router.POST("/wallet/siafunds", RequirePassword(api.walletSiafundsHandler, requiredPassword)) router.POST("/wallet/siagkey", RequirePassword(api.walletSiagkeyHandler, requiredPassword)) router.POST("/wallet/sweep/seed", RequirePassword(api.walletSweepSeedHandler, requiredPassword)) router.GET("/wallet/transaction/:id", api.walletTransactionHandler) router.GET("/wallet/transactions", api.walletTransactionsHandler) router.GET("/wallet/transactions/:addr", api.walletTransactionsAddrHandler) router.GET("/wallet/verify/address/:addr", api.walletVerifyAddressHandler) router.POST("/wallet/unlock", RequirePassword(api.walletUnlockHandler, requiredPassword)) router.POST("/wallet/changepassword", RequirePassword(api.walletChangePasswordHandler, requiredPassword)) } // Apply UserAgent middleware and return the API api.router = cleanCloseHandler(RequireUserAgent(router, requiredUserAgent)) return api } // UnrecognizedCallHandler handles calls to unknown pages (404). func UnrecognizedCallHandler(w http.ResponseWriter, req *http.Request) { WriteError(w, Error{"404 - Refer to API.md"}, http.StatusNotFound) } // WriteError an error to the API caller. func WriteError(w http.ResponseWriter, err Error, code int) { w.Header().Set("Content-Type", "application/json; charset=utf-8") w.WriteHeader(code) encodingErr := json.NewEncoder(w).Encode(err) if _, isJsonErr := encodingErr.(*json.SyntaxError); isJsonErr { // Marshalling should only fail in the event of a developer error. // Specifically, only non-marshallable types should cause an error here. build.Critical("failed to encode API error response:", encodingErr) } } // WriteJSON writes the object to the ResponseWriter. If the encoding fails, an // error is written instead. The Content-Type of the response header is set // accordingly. func WriteJSON(w http.ResponseWriter, obj interface{}) { w.Header().Set("Content-Type", "application/json; charset=utf-8") err := json.NewEncoder(w).Encode(obj) if _, isJsonErr := err.(*json.SyntaxError); isJsonErr { // Marshalling should only fail in the event of a developer error. // Specifically, only non-marshallable types should cause an error here. build.Critical("failed to encode API response:", err) } } // WriteSuccess writes the HTTP header with status 204 No Content to the // ResponseWriter. WriteSuccess should only be used to indicate that the // requested action succeeded AND there is no data to return. func WriteSuccess(w http.ResponseWriter) { w.WriteHeader(http.StatusNoContent) } Sia-1.3.0/api/client.go000066400000000000000000000057071313565667000146320ustar00rootroot00000000000000package api import ( "encoding/json" "errors" "io" "io/ioutil" "net/http" "strings" ) // Client holds fields to make requests to a Sia API. type Client struct { address string password string } // NewClient creates a new api.Client using the provided address and password. // If password is not the empty string, HTTP basic authentication will be used // to communicate with the API. func NewClient(address string, password string) *Client { return &Client{ address: address, password: password, } } // Get requests the resource at `resource` and decodes it into `obj`, returning an // error if requesting or decoding the resource fails. A non-2xx status code // constitutes a request failure. func (c *Client) Get(resource string, obj interface{}) error { url := "http://" + c.address + resource req, err := http.NewRequest("GET", url, nil) if err != nil { return err } req.Header.Set("User-Agent", "Sia-Agent") if c.password != "" { req.SetBasicAuth("", c.password) } res, err := http.DefaultClient.Do(req) if err != nil { return err } defer func() { // res.Body should always be fully read even when discarding its content, // such that the underlying connection can be reused. io.Copy(ioutil.Discard, res.Body) res.Body.Close() }() if res.StatusCode == http.StatusNotFound { return errors.New("API call not recognized: " + resource) } // Decode the body as an Error and return this error if the status code is // not 2xx. if res.StatusCode < 200 || res.StatusCode > 299 { var apiErr Error err = json.NewDecoder(res.Body).Decode(&apiErr) if err != nil { return err } return apiErr } if res.StatusCode != http.StatusNoContent && obj != nil { return json.NewDecoder(res.Body).Decode(obj) } return nil } // Post makes a POST request to the resource at `resource`, using `data` as the // request body. The response, if provided, will be decoded into `obj`. func (c *Client) Post(resource string, data string, obj interface{}) error { url := "http://" + c.address + resource req, err := http.NewRequest("POST", url, strings.NewReader(data)) if err != nil { return err } req.Header.Set("User-Agent", "Sia-Agent") req.Header.Set("Content-Type", "application/x-www-form-urlencoded") if c.password != "" { req.SetBasicAuth("", c.password) } res, err := http.DefaultClient.Do(req) if err != nil { return err } defer func() { // res.Body should always be fully read even when discarding its content, // such that the underlying connection can be reused. io.Copy(ioutil.Discard, res.Body) res.Body.Close() }() if res.StatusCode == http.StatusNotFound { return errors.New("API call not recognized: " + resource) } if res.StatusCode < 200 || res.StatusCode > 299 { var apiErr Error err = json.NewDecoder(res.Body).Decode(&apiErr) if err != nil { return err } return apiErr } if res.StatusCode != http.StatusNoContent && obj != nil { return json.NewDecoder(res.Body).Decode(&obj) } return nil } Sia-1.3.0/api/client_test.go000066400000000000000000000025171313565667000156650ustar00rootroot00000000000000package api import ( "testing" ) // TestApiClient tests that the API client connects to the server tester and // can call and decode routes correctly. func TestApiClient(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() c := NewClient(st.server.listener.Addr().String(), "") var gatewayInfo GatewayGET err = c.Get("/gateway", &gatewayInfo) if err != nil { t.Fatal(err) } } // TestAuthenticatedApiClient tests that the API client connects to an // authenticated server tester and can call and decode routes correctly, using // the correct password. func TestAuthenticatedApiClient(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() testpass := "testPassword" st, err := createAuthenticatedServerTester(t.Name(), testpass) if err != nil { t.Fatal(err) } defer st.server.panicClose() c := NewClient(st.server.listener.Addr().String(), "") var walletAddress WalletAddressGET err = c.Get("/wallet/address", &walletAddress) if err == nil { t.Fatal("api.Client did not return an error when requesting an authenticated resource without a password") } c = NewClient(st.server.listener.Addr().String(), testpass) err = c.Get("/wallet/address", &walletAddress) if err != nil { t.Fatal(err) } } Sia-1.3.0/api/consensus.go000066400000000000000000000030701313565667000153630ustar00rootroot00000000000000package api import ( "encoding/json" "net/http" "github.com/NebulousLabs/Sia/types" "github.com/julienschmidt/httprouter" ) // ConsensusGET contains general information about the consensus set, with tags // to support idiomatic json encodings. type ConsensusGET struct { Synced bool `json:"synced"` Height types.BlockHeight `json:"height"` CurrentBlock types.BlockID `json:"currentblock"` Target types.Target `json:"target"` Difficulty types.Currency `json:"difficulty"` } // consensusHandler handles the API calls to /consensus. func (api *API) consensusHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { cbid := api.cs.CurrentBlock().ID() currentTarget, _ := api.cs.ChildTarget(cbid) WriteJSON(w, ConsensusGET{ Synced: api.cs.Synced(), Height: api.cs.Height(), CurrentBlock: cbid, Target: currentTarget, Difficulty: currentTarget.Difficulty(), }) } // consensusValidateTransactionsetHandler handles the API calls to // /consensus/validate/transactionset. func (api *API) consensusValidateTransactionsetHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { var txnset []types.Transaction err := json.NewDecoder(req.Body).Decode(&txnset) if err != nil { WriteError(w, Error{"could not decode transaction set: " + err.Error()}, http.StatusBadRequest) return } _, err = api.cs.TryTransactionSet(txnset) if err != nil { WriteError(w, Error{"transaction set validation failed: " + err.Error()}, http.StatusBadRequest) return } WriteSuccess(w) } Sia-1.3.0/api/consensus_test.go000066400000000000000000000063151313565667000164270ustar00rootroot00000000000000package api import ( "encoding/json" "errors" "net/url" "testing" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/types" ) // TestConsensusGet probes the GET call to /consensus. func TestIntegrationConsensusGET(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() var cg ConsensusGET err = st.getAPI("/consensus", &cg) if err != nil { t.Fatal(err) } if cg.Height != 4 { t.Error("wrong height returned in consensus GET call") } if cg.CurrentBlock != st.server.api.cs.CurrentBlock().ID() { t.Error("wrong block returned in consensus GET call") } expectedTarget := types.Target{128} if cg.Target != expectedTarget { t.Error("wrong target returned in consensus GET call") } } // TestConsensusValidateTransactionSet probes the POST call to // /consensus/validate/transactionset. func TestConsensusValidateTransactionSet(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // Anounce the host and start accepting contracts. if err := st.announceHost(); err != nil { t.Fatal(err) } if err = st.acceptContracts(); err != nil { t.Fatal(err) } if err = st.setHostStorage(); err != nil { t.Fatal(err) } // Set an allowance for the renter, allowing a contract to be formed. allowanceValues := url.Values{} allowanceValues.Set("funds", testFunds) allowanceValues.Set("period", testPeriod) if err = st.stdPostAPI("/renter", allowanceValues); err != nil { t.Fatal(err) } // Block until the allowance has finished forming contracts. err = build.Retry(50, time.Millisecond*250, func() error { var rc RenterContracts err = st.getAPI("/renter/contracts", &rc) if err != nil { return errors.New("couldn't get renter stats") } if len(rc.Contracts) != 1 { return errors.New("no contracts") } return nil }) if err != nil { t.Fatal("allowance setting failed") } _, err = st.miner.AddBlock() if err != nil { t.Fatal(err) } // Get the contract var cs RenterContracts if err = st.getAPI("/renter/contracts", &cs); err != nil { t.Fatal(err) } if len(cs.Contracts) != 1 { t.Fatalf("expected renter to have 1 contracts; got %v", len(cs.Contracts)) } contract := cs.Contracts[0] // Validate the contract jsonTxns, err := json.Marshal([]types.Transaction{contract.LastTransaction}) if err != nil { t.Fatal(err) } resp, err := HttpPOST("http://"+st.server.listener.Addr().String()+"/consensus/validate/transactionset", string(jsonTxns)) if err != nil { t.Fatal(err) } defer resp.Body.Close() if non2xx(resp.StatusCode) { t.Fatal(decodeError(resp)) } // Try again with an invalid contract contract.LastTransaction.FileContractRevisions[0].NewFileSize++ jsonTxns, err = json.Marshal([]types.Transaction{contract.LastTransaction}) if err != nil { t.Fatal(err) } resp, err = HttpPOST("http://"+st.server.listener.Addr().String()+"/consensus/validate/transactionset", string(jsonTxns)) if err != nil { t.Fatal(err) } defer resp.Body.Close() if !non2xx(resp.StatusCode) { t.Fatal("expected validation error") } } Sia-1.3.0/api/ecosystem_helpers_test.go000066400000000000000000000207471313565667000201510ustar00rootroot00000000000000package api // ecosystem_helpers_test.go has a bunch of helper functions to make setting up // large ecosystem tests easier. // // List of helper functions: // addStorageToAllHosts // adds a storage folder to every host // announceAllHosts // announce all hosts to the network (and mine a block) // fullyConnectNodes // connects each server tester to all the others // fundAllNodes // mines blocks until all server testers have money // synchronizationCheck // checks that all server testers have the same recent block // waitForBlock // block until the provided block is the most recent block for all server testers import ( "errors" "fmt" "net/url" "time" "github.com/NebulousLabs/Sia/types" ) // addStorageToAllHosts adds a storage folder with a bunch of storage to each // host. func addStorageToAllHosts(sts []*serverTester) error { for _, st := range sts { values := url.Values{} values.Set("path", st.dir) values.Set("size", "1048576") err := st.stdPostAPI("/host/storage/folders/add", values) if err != nil { return err } } return nil } // announceAllHosts will announce every host in the tester set to the // blockchain. func announceAllHosts(sts []*serverTester) error { // Check that all announcements will be on the same chain. _, err := synchronizationCheck(sts) if err != nil { return err } // Grab the inital transaction pool size to know how many total transactions // there should be after announcement. initialTpoolSize := len(sts[0].tpool.TransactionList()) // Announce each host. for _, st := range sts { // Set the host to be accepting contracts. acceptingContractsValues := url.Values{} acceptingContractsValues.Set("acceptingcontracts", "true") err = st.stdPostAPI("/host", acceptingContractsValues) if err != nil { return err } // Fetch the host net address. var hg HostGET err = st.getAPI("/host", &hg) if err != nil { return err } // Make the announcement. announceValues := url.Values{} announceValues.Set("address", string(hg.ExternalSettings.NetAddress)) err = st.stdPostAPI("/host/announce", announceValues) if err != nil { return err } } // Wait until all of the transactions have propagated to all of the nodes. // // TODO: Replace this direct transaction pool call with a call to the // /transactionpool endpoint. // // TODO: At some point the number of transactions needed to make an // announcement may change. Currently its 2. for i := 0; i < 50; i++ { if len(sts[0].tpool.TransactionList()) == len(sts)*2+initialTpoolSize { break } time.Sleep(time.Millisecond * 100) } if len(sts[0].tpool.TransactionList()) < len(sts)*2+initialTpoolSize { return fmt.Errorf("Host announcements do not seem to have propagated to the leader's tpool: %v, %v, %v", len(sts), len(sts[0].tpool.TransactionList())+initialTpoolSize, initialTpoolSize) } // Mine a block and then wait for all of the nodes to syncrhonize to it. _, err = sts[0].miner.AddBlock() if err != nil { return err } _, err = synchronizationCheck(sts) if err != nil { return err } // Block until every node has completed the scan of every other node, so // that each node has a full hostdb. for _, st := range sts { var ah HostdbActiveGET for i := 0; i < 50; i++ { err = st.getAPI("/hostdb/active", &ah) if err != nil { return err } if len(ah.Hosts) >= len(sts) { break } time.Sleep(time.Millisecond * 100) } if len(ah.Hosts) < len(sts) { return errors.New("one of the nodes hostdbs was unable to find at least one host announcement") } } return nil } // fullyConnectNodes takes a bunch of tester nodes and connects each to the // other, creating a fully connected graph so that everyone is on the same // chain. // // After connecting the nodes, it verifies that all the nodes have // synchronized. func fullyConnectNodes(sts []*serverTester) error { for i, sta := range sts { var gg GatewayGET err := sta.getAPI("/gateway", &gg) if err != nil { return err } // Connect this node to every other node. for _, stb := range sts[i+1:] { // Try connecting to the other node until both have the other in // their peer list. err = retry(100, time.Millisecond*100, func() error { // NOTE: this check depends on string-matching an error in the // gateway. If that error changes at all, this string will need to // be updated. err := stb.stdPostAPI("/gateway/connect/"+string(gg.NetAddress), nil) if err != nil && err.Error() != "already connected to this peer" { return err } // Check that the gateways are connected. bToA := false aToB := false var ggb GatewayGET err = stb.getAPI("/gateway", &ggb) if err != nil { return err } for _, peer := range ggb.Peers { if peer.NetAddress == gg.NetAddress { bToA = true break } } err = sta.getAPI("/gateway", &gg) if err != nil { return err } for _, peer := range gg.Peers { if peer.NetAddress == ggb.NetAddress { aToB = true break } } if !aToB || !bToA { return fmt.Errorf("called connect between two nodes, but they are not peers: %v %v %v %v %v %v", aToB, bToA, gg.NetAddress, ggb.NetAddress, gg.Peers, ggb.Peers) } return nil }) if err != nil { return err } } } // Perform a synchronization check. _, err := synchronizationCheck(sts) return err } // fundAllNodes will make sure that each node has mined a block in the longest // chain, then will mine enough blocks that the miner payouts manifest in the // wallets of each node. func fundAllNodes(sts []*serverTester) error { // Check that all of the nodes are synchronized. chainTip, err := synchronizationCheck(sts) if err != nil { return err } // Mine a block for each node to fund their wallet. for i := range sts { err := waitForBlock(chainTip, sts[i]) if err != nil { return err } // Mine a block. The next iteration of this loop will ensure that the // block propagates and does not get orphaned. block, err := sts[i].miner.AddBlock() if err != nil { return err } chainTip = block.ID() } // Wait until the chain tip has propagated to the first node. err = waitForBlock(chainTip, sts[0]) if err != nil { return err } // Mine types.MaturityDelay more blocks from the final node to mine a // block, to guarantee that all nodes have had their payouts mature, such // that their wallets can begin spending immediately. for i := types.BlockHeight(0); i <= types.MaturityDelay; i++ { _, err := sts[0].miner.AddBlock() if err != nil { return err } } // Block until every node has the full chain. _, err = synchronizationCheck(sts) return err } // synchronizationCheck takes a bunch of server testers as input and checks // that they all have the same current block as the first server tester. The // first server tester needs to have the most recent block in order for the // check to work. func synchronizationCheck(sts []*serverTester) (types.BlockID, error) { // Prefer returning an error in the event of a zero-length server tester - // an error should be returned if the developer accidentally uses a nil // slice instead of whatever value was intended, and there's no reason to // check for synchronization if there aren't any nodes to be synchronized. if len(sts) == 0 { return types.BlockID{}, errors.New("no server testers provided") } var cg ConsensusGET err := sts[0].getAPI("/consensus", &cg) if err != nil { return types.BlockID{}, err } leaderBlockID := cg.CurrentBlock for i := range sts { // Spin until the current block matches the leader block. success := false for j := 0; j < 100; j++ { err = sts[i].getAPI("/consensus", &cg) if err != nil { return types.BlockID{}, err } if cg.CurrentBlock == leaderBlockID { success = true break } time.Sleep(time.Millisecond * 100) } if !success { return types.BlockID{}, errors.New("synchronization check failed - nodes do not seem to be synchronized") } } return leaderBlockID, nil } // waitForBlock will block until the provided chain tip is the most recent // block in the provided testing node. func waitForBlock(chainTip types.BlockID, st *serverTester) error { var cg ConsensusGET success := false for j := 0; j < 100; j++ { err := st.getAPI("/consensus", &cg) if err != nil { return err } if cg.CurrentBlock == chainTip { success = true break } time.Sleep(time.Millisecond * 100) } if !success { return errors.New("node never reached the correct chain tip") } return nil } Sia-1.3.0/api/ecosystem_test.go000066400000000000000000000131521313565667000164170ustar00rootroot00000000000000package api // ecosystem_test.go provides tests for whole-ecosystem testing, consisting of // multiple full, non-state-sharing nodes connected in various arrangements and // performing various full-ecosystem tasks. // // To the absolute greatest extent possible, nodes are queried and updated // exclusively through the API. import ( "testing" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/types" ) // TestHostPoorConnectivity creates several full server testers and links them // together in a way that might mimic a full host ecosystem with a renter, and // then isolates one of the hosts from the network, denying the host proper // transaction propagation. The renters performed chained contract forming and // uploading in the same manner that might happen in the wild, and then the // host must get a file contract to the blockchain despite not getting any of // the dependencies into the transaction pool from the flood network. func TestHostPoorConnectivity(t *testing.T) { if testing.Short() || !build.VLONG { t.SkipNow() } t.Parallel() // Create the various nodes that will be forming the simulated ecosystem of // this test. stLeader, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer stLeader.panicClose() stHost1, err := blankServerTester(t.Name() + " - Host 1") if err != nil { t.Fatal(err) } defer stHost1.panicClose() stHost2, err := blankServerTester(t.Name() + " - Host 2") if err != nil { t.Fatal(err) } defer stHost2.panicClose() stHost3, err := blankServerTester(t.Name() + " - Host 3") if err != nil { t.Fatal(err) } defer stHost3.panicClose() stHost4, err := blankServerTester(t.Name() + " - Host 4") if err != nil { t.Fatal(err) } defer stHost4.panicClose() stRenter1, err := blankServerTester(t.Name() + " - Renter 1") if err != nil { t.Fatal(err) } defer stRenter1.panicClose() stRenter2, err := blankServerTester(t.Name() + " - Renter 2") if err != nil { t.Fatal(err) } defer stRenter2.panicClose() // Fetch all of the addresses of the nodes that got created. var ggSTL, ggSTH1, ggSTH2, ggSTH3, ggSTH4, ggSTR1, ggSTR2 GatewayGET err = stLeader.getAPI("/gateway", &ggSTL) if err != nil { t.Fatal(err) } err = stHost1.getAPI("/gateway", &ggSTH1) if err != nil { t.Fatal(err) } err = stHost2.getAPI("/gateway", &ggSTH2) if err != nil { t.Fatal(err) } err = stHost3.getAPI("/gateway", &ggSTH3) if err != nil { t.Fatal(err) } err = stHost4.getAPI("/gateway", &ggSTH4) if err != nil { t.Fatal(err) } err = stRenter1.getAPI("/gateway", &ggSTR1) if err != nil { t.Fatal(err) } err = stRenter2.getAPI("/gateway", &ggSTR2) if err != nil { t.Fatal(err) } // Connect all of the peers in a circle, so that everyone is connected but // there are a lot of hops. err = stLeader.stdPostAPI("/gateway/connect/"+string(ggSTH1.NetAddress), nil) if err != nil { t.Fatal(err) } err = stHost1.stdPostAPI("/gateway/connect/"+string(ggSTH2.NetAddress), nil) if err != nil { t.Fatal(err) } err = stHost2.stdPostAPI("/gateway/connect/"+string(ggSTH3.NetAddress), nil) if err != nil { t.Fatal(err) } err = stHost3.stdPostAPI("/gateway/connect/"+string(ggSTH4.NetAddress), nil) if err != nil { t.Fatal(err) } err = stHost4.stdPostAPI("/gateway/connect/"+string(ggSTR1.NetAddress), nil) if err != nil { t.Fatal(err) } err = stRenter1.stdPostAPI("/gateway/connect/"+string(ggSTR2.NetAddress), nil) if err != nil { t.Fatal(err) } err = stRenter2.stdPostAPI("/gateway/connect/"+string(ggSTL.NetAddress), nil) if err != nil { t.Fatal(err) } // Connectivity check - all nodes should be synchronized to the leader's // chain, which should have been the longest. allTesters := []*serverTester{stLeader, stHost1, stHost2, stHost3, stHost4, stRenter1, stRenter2} chainTip, err := synchronizationCheck(allTesters) if err != nil { t.Fatal(err) } // Mine a block from each node, to give the node money in the wallet that // is recognized by the shared chain. for i := range allTesters { // Wait until the current tester has 'chainTip' as its current // block, to make sure the network is building a community chain // instead of creating orphans. var cg ConsensusGET success := false for j := 0; j < 100; j++ { err = allTesters[i].getAPI("/consensus", &cg) if err != nil { t.Fatal(err) } if cg.CurrentBlock == chainTip { success = true break } time.Sleep(time.Millisecond * 100) } if !success { t.Fatal("nodes do not seem to be synchronizing") } err := allTesters[i].cs.Flush() if err != nil { t.Fatal(err) } // Mine a block for this node. The next iteration will wait for // synchronization before mining the block for the next node. block, err := allTesters[i].miner.AddBlock() if err != nil { t.Fatal(err, i) } chainTip = block.ID() } // Wait until the leader has the most recent block. var cg ConsensusGET success := false for i := 0; i < 100; i++ { err = allTesters[0].getAPI("/consensus", &cg) if err != nil { t.Fatal(err) } if cg.CurrentBlock == chainTip { success = true break } time.Sleep(time.Millisecond * 100) } if !success { t.Fatal("nodes do not seem to be synchronizing") } // Make sure that everyone has the most recent block. _, err = synchronizationCheck(allTesters) if err != nil { t.Fatal(err) } // Mine blocks from the leader until everyone's miner payouts have matured // and become spendable. for i := types.BlockHeight(0); i <= types.MaturityDelay; i++ { _, err := stLeader.miner.AddBlock() if err != nil { t.Fatal(err) } } _, err = synchronizationCheck(allTesters) if err != nil { t.Fatal(err) } } Sia-1.3.0/api/explorer.go000066400000000000000000000326571313565667000152200ustar00rootroot00000000000000package api import ( "fmt" "net/http" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" "github.com/julienschmidt/httprouter" ) type ( // ExplorerBlock is a block with some extra information such as the id and // height. This information is provided for programs that may not be // complex enough to compute the ID on their own. ExplorerBlock struct { MinerPayoutIDs []types.SiacoinOutputID `json:"minerpayoutids"` Transactions []ExplorerTransaction `json:"transactions"` RawBlock types.Block `json:"rawblock"` modules.BlockFacts } // ExplorerTransaction is a transcation with some extra information such as // the parent block. This information is provided for programs that may not // be complex enough to compute the extra information on their own. ExplorerTransaction struct { ID types.TransactionID `json:"id"` Height types.BlockHeight `json:"height"` Parent types.BlockID `json:"parent"` RawTransaction types.Transaction `json:"rawtransaction"` SiacoinInputOutputs []types.SiacoinOutput `json:"siacoininputoutputs"` // the outputs being spent SiacoinOutputIDs []types.SiacoinOutputID `json:"siacoinoutputids"` FileContractIDs []types.FileContractID `json:"filecontractids"` FileContractValidProofOutputIDs [][]types.SiacoinOutputID `json:"filecontractvalidproofoutputids"` // outer array is per-contract FileContractMissedProofOutputIDs [][]types.SiacoinOutputID `json:"filecontractmissedproofoutputids"` // outer array is per-contract FileContractRevisionValidProofOutputIDs [][]types.SiacoinOutputID `json:"filecontractrevisionvalidproofoutputids"` // outer array is per-revision FileContractRevisionMissedProofOutputIDs [][]types.SiacoinOutputID `json:"filecontractrevisionmissedproofoutputids"` // outer array is per-revision StorageProofOutputIDs [][]types.SiacoinOutputID `json:"storageproofoutputids"` // outer array is per-payout StorageProofOutputs [][]types.SiacoinOutput `json:"storageproofoutputs"` // outer array is per-payout SiafundInputOutputs []types.SiafundOutput `json:"siafundinputoutputs"` // the outputs being spent SiafundOutputIDs []types.SiafundOutputID `json:"siafundoutputids"` SiafundClaimOutputIDs []types.SiacoinOutputID `json:"siafundclaimoutputids"` } // ExplorerGET is the object returned as a response to a GET request to // /explorer. ExplorerGET struct { modules.BlockFacts } // ExplorerBlockGET is the object returned by a GET request to // /explorer/block. ExplorerBlockGET struct { Block ExplorerBlock `json:"block"` } // ExplorerHashGET is the object returned as a response to a GET request to // /explorer/hash. The HashType will indicate whether the hash corresponds // to a block id, a transaction id, a siacoin output id, a file contract // id, or a siafund output id. In the case of a block id, 'Block' will be // filled out and all the rest of the fields will be blank. In the case of // a transaction id, 'Transaction' will be filled out and all the rest of // the fields will be blank. For everything else, 'Transactions' and // 'Blocks' will/may be filled out and everything else will be blank. ExplorerHashGET struct { HashType string `json:"hashtype"` Block ExplorerBlock `json:"block"` Blocks []ExplorerBlock `json:"blocks"` Transaction ExplorerTransaction `json:"transaction"` Transactions []ExplorerTransaction `json:"transactions"` } ) // buildExplorerTransaction takes a transaction and the height + id of the // block it appears in an uses that to build an explorer transaction. func (api *API) buildExplorerTransaction(height types.BlockHeight, parent types.BlockID, txn types.Transaction) (et ExplorerTransaction) { // Get the header information for the transaction. et.ID = txn.ID() et.Height = height et.Parent = parent et.RawTransaction = txn // Add the siacoin outputs that correspond with each siacoin input. for _, sci := range txn.SiacoinInputs { sco, exists := api.explorer.SiacoinOutput(sci.ParentID) if build.DEBUG && !exists { panic("could not find corresponding siacoin output") } et.SiacoinInputOutputs = append(et.SiacoinInputOutputs, sco) } for i := range txn.SiacoinOutputs { et.SiacoinOutputIDs = append(et.SiacoinOutputIDs, txn.SiacoinOutputID(uint64(i))) } // Add all of the valid and missed proof ids as extra data to the file // contracts. for i, fc := range txn.FileContracts { fcid := txn.FileContractID(uint64(i)) var fcvpoids []types.SiacoinOutputID var fcmpoids []types.SiacoinOutputID for j := range fc.ValidProofOutputs { fcvpoids = append(fcvpoids, fcid.StorageProofOutputID(types.ProofValid, uint64(j))) } for j := range fc.MissedProofOutputs { fcmpoids = append(fcmpoids, fcid.StorageProofOutputID(types.ProofMissed, uint64(j))) } et.FileContractIDs = append(et.FileContractIDs, fcid) et.FileContractValidProofOutputIDs = append(et.FileContractValidProofOutputIDs, fcvpoids) et.FileContractMissedProofOutputIDs = append(et.FileContractMissedProofOutputIDs, fcmpoids) } // Add all of the valid and missed proof ids as extra data to the file // contract revisions. for _, fcr := range txn.FileContractRevisions { var fcrvpoids []types.SiacoinOutputID var fcrmpoids []types.SiacoinOutputID for j := range fcr.NewValidProofOutputs { fcrvpoids = append(fcrvpoids, fcr.ParentID.StorageProofOutputID(types.ProofValid, uint64(j))) } for j := range fcr.NewMissedProofOutputs { fcrmpoids = append(fcrmpoids, fcr.ParentID.StorageProofOutputID(types.ProofMissed, uint64(j))) } et.FileContractValidProofOutputIDs = append(et.FileContractValidProofOutputIDs, fcrvpoids) et.FileContractMissedProofOutputIDs = append(et.FileContractMissedProofOutputIDs, fcrmpoids) } // Add all of the output ids and outputs corresponding with each storage // proof. for _, sp := range txn.StorageProofs { fileContract, fileContractRevisions, fileContractExists, _ := api.explorer.FileContractHistory(sp.ParentID) if !fileContractExists && build.DEBUG { panic("could not find a file contract connected with a storage proof") } var storageProofOutputs []types.SiacoinOutput if len(fileContractRevisions) > 0 { storageProofOutputs = fileContractRevisions[len(fileContractRevisions)-1].NewValidProofOutputs } else { storageProofOutputs = fileContract.ValidProofOutputs } var storageProofOutputIDs []types.SiacoinOutputID for i := range storageProofOutputs { storageProofOutputIDs = append(storageProofOutputIDs, sp.ParentID.StorageProofOutputID(types.ProofValid, uint64(i))) } et.StorageProofOutputIDs = append(et.StorageProofOutputIDs, storageProofOutputIDs) et.StorageProofOutputs = append(et.StorageProofOutputs, storageProofOutputs) } // Add the siafund outputs that correspond to each siacoin input. for _, sci := range txn.SiafundInputs { sco, exists := api.explorer.SiafundOutput(sci.ParentID) if build.DEBUG && !exists { panic("could not find corresponding siafund output") } et.SiafundInputOutputs = append(et.SiafundInputOutputs, sco) } for i := range txn.SiafundOutputs { et.SiafundOutputIDs = append(et.SiafundOutputIDs, txn.SiafundOutputID(uint64(i))) } for _, sfi := range txn.SiafundInputs { et.SiafundClaimOutputIDs = append(et.SiafundClaimOutputIDs, sfi.ParentID.SiaClaimOutputID()) } return et } // buildExplorerBlock takes a block and its height and uses it to construct an // explorer block. func (api *API) buildExplorerBlock(height types.BlockHeight, block types.Block) ExplorerBlock { var mpoids []types.SiacoinOutputID for i := range block.MinerPayouts { mpoids = append(mpoids, block.MinerPayoutID(uint64(i))) } var etxns []ExplorerTransaction for _, txn := range block.Transactions { etxns = append(etxns, api.buildExplorerTransaction(height, block.ID(), txn)) } facts, exists := api.explorer.BlockFacts(height) if build.DEBUG && !exists { panic("incorrect request to buildExplorerBlock - block does not exist") } return ExplorerBlock{ MinerPayoutIDs: mpoids, Transactions: etxns, RawBlock: block, BlockFacts: facts, } } // explorerHandler handles API calls to /explorer/blocks/:height. func (api *API) explorerBlocksHandler(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { // Parse the height that's being requested. var height types.BlockHeight _, err := fmt.Sscan(ps.ByName("height"), &height) if err != nil { WriteError(w, Error{err.Error()}, http.StatusBadRequest) return } // Fetch and return the explorer block. block, exists := api.cs.BlockAtHeight(height) if !exists { WriteError(w, Error{"no block found at input height in call to /explorer/block"}, http.StatusBadRequest) return } WriteJSON(w, ExplorerBlockGET{ Block: api.buildExplorerBlock(height, block), }) } // buildTransactionSet returns the blocks and transactions that are associated // with a set of transaction ids. func (api *API) buildTransactionSet(txids []types.TransactionID) (txns []ExplorerTransaction, blocks []ExplorerBlock) { for _, txid := range txids { // Get the block containing the transaction - in the case of miner // payouts, the block might be the transaction. block, height, exists := api.explorer.Transaction(txid) if !exists && build.DEBUG { panic("explorer pointing to nonexistent txn") } // Check if the block is the transaction. if types.TransactionID(block.ID()) == txid { blocks = append(blocks, api.buildExplorerBlock(height, block)) } else { // Find the transaction within the block with the correct id. for _, t := range block.Transactions { if t.ID() == txid { txns = append(txns, api.buildExplorerTransaction(height, block.ID(), t)) break } } } } return txns, blocks } // explorerHashHandler handles GET requests to /explorer/hash/:hash. func (api *API) explorerHashHandler(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { // Scan the hash as a hash. If that fails, try scanning the hash as an // address. hash, err := scanHash(ps.ByName("hash")) if err != nil { addr, err := scanAddress(ps.ByName("hash")) if err != nil { WriteError(w, Error{err.Error()}, http.StatusBadRequest) return } hash = crypto.Hash(addr) } // TODO: lookups on the zero hash are too expensive to allow. Need a // better way to handle this case. if hash == (crypto.Hash{}) { WriteError(w, Error{"can't lookup the empty unlock hash"}, http.StatusBadRequest) return } // Try the hash as a block id. block, height, exists := api.explorer.Block(types.BlockID(hash)) if exists { WriteJSON(w, ExplorerHashGET{ HashType: "blockid", Block: api.buildExplorerBlock(height, block), }) return } // Try the hash as a transaction id. block, height, exists = api.explorer.Transaction(types.TransactionID(hash)) if exists { var txn types.Transaction for _, t := range block.Transactions { if t.ID() == types.TransactionID(hash) { txn = t } } WriteJSON(w, ExplorerHashGET{ HashType: "transactionid", Transaction: api.buildExplorerTransaction(height, block.ID(), txn), }) return } // Try the hash as a siacoin output id. txids := api.explorer.SiacoinOutputID(types.SiacoinOutputID(hash)) if len(txids) != 0 { txns, blocks := api.buildTransactionSet(txids) WriteJSON(w, ExplorerHashGET{ HashType: "siacoinoutputid", Blocks: blocks, Transactions: txns, }) return } // Try the hash as a file contract id. txids = api.explorer.FileContractID(types.FileContractID(hash)) if len(txids) != 0 { txns, blocks := api.buildTransactionSet(txids) WriteJSON(w, ExplorerHashGET{ HashType: "filecontractid", Blocks: blocks, Transactions: txns, }) return } // Try the hash as a siafund output id. txids = api.explorer.SiafundOutputID(types.SiafundOutputID(hash)) if len(txids) != 0 { txns, blocks := api.buildTransactionSet(txids) WriteJSON(w, ExplorerHashGET{ HashType: "siafundoutputid", Blocks: blocks, Transactions: txns, }) return } // Try the hash as an unlock hash. Unlock hash is checked last because // unlock hashes do not have collision-free guarantees. Someone can create // an unlock hash that collides with another object id. They will not be // able to use the unlock hash, but they can disrupt the explorer. This is // handled by checking the unlock hash last. Anyone intentionally creating // a colliding unlock hash (such a collision can only happen if done // intentionally) will be unable to find their unlock hash in the // blockchain through the explorer hash lookup. txids = api.explorer.UnlockHash(types.UnlockHash(hash)) if len(txids) != 0 { txns, blocks := api.buildTransactionSet(txids) WriteJSON(w, ExplorerHashGET{ HashType: "unlockhash", Blocks: blocks, Transactions: txns, }) return } // Hash not found, return an error. WriteError(w, Error{"unrecognized hash used as input to /explorer/hash"}, http.StatusBadRequest) } // explorerHandler handles API calls to /explorer func (api *API) explorerHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { facts := api.explorer.LatestBlockFacts() WriteJSON(w, ExplorerGET{ BlockFacts: facts, }) } Sia-1.3.0/api/explorer_test.go000066400000000000000000000036321313565667000162460ustar00rootroot00000000000000package api import ( "testing" "github.com/NebulousLabs/Sia/types" ) // TestIntegrationExplorerGET probes the GET call to /explorer. func TestIntegrationExplorerGET(t *testing.T) { t.Skip("Explorer has deadlock issues") if testing.Short() { t.SkipNow() } st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() var eg ExplorerGET err = st.getAPI("/explorer", &eg) if err != nil { t.Fatal(err) } if eg.Height != st.server.api.cs.Height() { t.Error("height not accurately reported by explorer") } if eg.MinerPayoutCount == 0 { t.Error("Miner payout count is incorrect") } } // TestIntegrationExplorerBlockGET probes the GET call to /explorer/block. func TestIntegrationExplorerBlockGET(t *testing.T) { t.Skip("Explorer has deadlock issues") if testing.Short() { t.SkipNow() } st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() var ebg ExplorerBlockGET err = st.getAPI("/explorer/blocks/0", &ebg) if err != nil { t.Fatal(err) } if ebg.Block.BlockID != ebg.Block.RawBlock.ID() { t.Error("block id and block do not match up from api call") } if ebg.Block.BlockID != types.GenesisBlock.ID() { t.Error("wrong block returned by /explorer/block?height=0") } } // TestIntegrationExplorerHashGet probes the GET call to /explorer/hash/:hash. func TestIntegrationExplorerHashGet(t *testing.T) { t.Skip("Explorer has deadlock issues") if testing.Short() { t.SkipNow() } st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() var ehg ExplorerHashGET gb := types.GenesisBlock err = st.getAPI("/explorer/hashes/"+gb.ID().String(), &ehg) if err != nil { t.Fatal(err) } if ehg.HashType != "blockid" { t.Error("wrong hash type returned when requesting block hash") } if ehg.Block.BlockID != gb.ID() { t.Error("wrong block type returned") } } Sia-1.3.0/api/gateway.go000066400000000000000000000030521313565667000150040ustar00rootroot00000000000000package api import ( "net/http" "github.com/NebulousLabs/Sia/modules" "github.com/julienschmidt/httprouter" ) // GatewayGET contains the fields returned by a GET call to "/gateway". type GatewayGET struct { NetAddress modules.NetAddress `json:"netaddress"` Peers []modules.Peer `json:"peers"` } // gatewayHandler handles the API call asking for the gatway status. func (api *API) gatewayHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { peers := api.gateway.Peers() // nil slices are marshalled as 'null' in JSON, whereas 0-length slices are // marshalled as '[]'. The latter is preferred, indicating that the value // exists but contains no elements. if peers == nil { peers = make([]modules.Peer, 0) } WriteJSON(w, GatewayGET{api.gateway.Address(), peers}) } // gatewayConnectHandler handles the API call to add a peer to the gateway. func (api *API) gatewayConnectHandler(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { addr := modules.NetAddress(ps.ByName("netaddress")) err := api.gateway.Connect(addr) if err != nil { WriteError(w, Error{err.Error()}, http.StatusBadRequest) return } WriteSuccess(w) } // gatewayDisconnectHandler handles the API call to remove a peer from the gateway. func (api *API) gatewayDisconnectHandler(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { addr := modules.NetAddress(ps.ByName("netaddress")) err := api.gateway.Disconnect(addr) if err != nil { WriteError(w, Error{err.Error()}, http.StatusBadRequest) return } WriteSuccess(w) } Sia-1.3.0/api/gateway_test.go000066400000000000000000000047541313565667000160550ustar00rootroot00000000000000package api import ( "testing" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules/gateway" ) // TestGatewayStatus checks that the /gateway/status call is returning a corect // peerlist. func TestGatewayStatus(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() var info GatewayGET st.getAPI("/gateway", &info) if len(info.Peers) != 0 { t.Fatal("/gateway gave bad peer list:", info.Peers) } } // TestGatewayPeerConnect checks that /gateway/connect is adding a peer to the // gateway's peerlist. func TestGatewayPeerConnect(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() peer, err := gateway.New("localhost:0", false, build.TempDir("api", t.Name()+"2", "gateway")) if err != nil { t.Fatal(err) } defer func() { err := peer.Close() if err != nil { panic(err) } }() err = st.stdPostAPI("/gateway/connect/"+string(peer.Address()), nil) if err != nil { t.Fatal(err) } var info GatewayGET err = st.getAPI("/gateway", &info) if err != nil { t.Fatal(err) } if len(info.Peers) != 1 || info.Peers[0].NetAddress != peer.Address() { t.Fatal("/gateway/connect did not connect to peer", peer.Address()) } } // TestGatewayPeerDisconnect checks that /gateway/disconnect removes the // correct peer from the gateway's peerlist. func TestGatewayPeerDisconnect(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() peer, err := gateway.New("localhost:0", false, build.TempDir("api", t.Name()+"2", "gateway")) if err != nil { t.Fatal(err) } defer func() { err := peer.Close() if err != nil { panic(err) } }() err = st.stdPostAPI("/gateway/connect/"+string(peer.Address()), nil) if err != nil { t.Fatal(err) } var info GatewayGET st.getAPI("/gateway", &info) if len(info.Peers) != 1 || info.Peers[0].NetAddress != peer.Address() { t.Fatal("/gateway/connect did not connect to peer", peer.Address()) } err = st.stdPostAPI("/gateway/disconnect/"+string(peer.Address()), nil) if err != nil { t.Fatal(err) } err = st.getAPI("/gateway", &info) if err != nil { t.Fatal(err) } if len(info.Peers) != 0 { t.Fatal("/gateway/disconnect did not disconnect from peer", peer.Address()) } } Sia-1.3.0/api/host.go000066400000000000000000000271671313565667000143350ustar00rootroot00000000000000package api import ( "errors" "fmt" "net/http" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" "github.com/julienschmidt/httprouter" ) var ( // errNoPath is returned when a call fails to provide a nonempty string // for the path parameter. errNoPath = Error{"path parameter is required"} // errStorageFolderNotFound is returned if a call is made looking for a // storage folder which does not appear to exist within the storage // manager. errStorageFolderNotFound = errors.New("storage folder with the provided path could not be found") ) type ( // HostGET contains the information that is returned after a GET request to // /host - a bunch of information about the status of the host. HostGET struct { ExternalSettings modules.HostExternalSettings `json:"externalsettings"` FinancialMetrics modules.HostFinancialMetrics `json:"financialmetrics"` InternalSettings modules.HostInternalSettings `json:"internalsettings"` NetworkMetrics modules.HostNetworkMetrics `json:"networkmetrics"` ConnectabilityStatus modules.HostConnectabilityStatus `json:"connectabilitystatus"` WorkingStatus modules.HostWorkingStatus `json:"workingstatus"` } // HostEstimateScoreGET contains the information that is returned from a // /host/estimatescore call. HostEstimateScoreGET struct { EstimatedScore types.Currency `json:"estimatedscore"` ConversionRate float64 `json:"conversionrate"` } // StorageGET contains the information that is returned after a GET request // to /host/storage - a bunch of information about the status of storage // management on the host. StorageGET struct { Folders []modules.StorageFolderMetadata `json:"folders"` } ) // folderIndex determines the index of the storage folder with the provided // path. func folderIndex(folderPath string, storageFolders []modules.StorageFolderMetadata) (int, error) { for _, sf := range storageFolders { if sf.Path == folderPath { return int(sf.Index), nil } } return -1, errStorageFolderNotFound } // hostHandlerGET handles GET requests to the /host API endpoint, returning key // information about the host. func (api *API) hostHandlerGET(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { es := api.host.ExternalSettings() fm := api.host.FinancialMetrics() is := api.host.InternalSettings() nm := api.host.NetworkMetrics() cs := api.host.ConnectabilityStatus() ws := api.host.WorkingStatus() hg := HostGET{ ExternalSettings: es, FinancialMetrics: fm, InternalSettings: is, NetworkMetrics: nm, ConnectabilityStatus: cs, WorkingStatus: ws, } WriteJSON(w, hg) } // parseHostSettings a request's query strings and returns a // modules.HostInternalSettings configured with the request's query string // parameters. func (api *API) parseHostSettings(req *http.Request) (modules.HostInternalSettings, error) { settings := api.host.InternalSettings() if req.FormValue("acceptingcontracts") != "" { var x bool _, err := fmt.Sscan(req.FormValue("acceptingcontracts"), &x) if err != nil { return modules.HostInternalSettings{}, nil } settings.AcceptingContracts = x } if req.FormValue("maxdownloadbatchsize") != "" { var x uint64 _, err := fmt.Sscan(req.FormValue("maxdownloadbatchsize"), &x) if err != nil { return modules.HostInternalSettings{}, nil } settings.MaxDownloadBatchSize = x } if req.FormValue("maxduration") != "" { var x types.BlockHeight _, err := fmt.Sscan(req.FormValue("maxduration"), &x) if err != nil { return modules.HostInternalSettings{}, nil } settings.MaxDuration = x } if req.FormValue("maxrevisebatchsize") != "" { var x uint64 _, err := fmt.Sscan(req.FormValue("maxrevisebatchsize"), &x) if err != nil { return modules.HostInternalSettings{}, nil } settings.MaxReviseBatchSize = x } if req.FormValue("netaddress") != "" { var x modules.NetAddress _, err := fmt.Sscan(req.FormValue("netaddress"), &x) if err != nil { return modules.HostInternalSettings{}, nil } settings.NetAddress = x } if req.FormValue("windowsize") != "" { var x types.BlockHeight _, err := fmt.Sscan(req.FormValue("windowsize"), &x) if err != nil { return modules.HostInternalSettings{}, nil } settings.WindowSize = x } if req.FormValue("collateral") != "" { var x types.Currency _, err := fmt.Sscan(req.FormValue("collateral"), &x) if err != nil { return modules.HostInternalSettings{}, nil } settings.Collateral = x } if req.FormValue("collateralbudget") != "" { var x types.Currency _, err := fmt.Sscan(req.FormValue("collateralbudget"), &x) if err != nil { return modules.HostInternalSettings{}, nil } settings.CollateralBudget = x } if req.FormValue("maxcollateral") != "" { var x types.Currency _, err := fmt.Sscan(req.FormValue("maxcollateral"), &x) if err != nil { return modules.HostInternalSettings{}, nil } settings.MaxCollateral = x } if req.FormValue("mincontractprice") != "" { var x types.Currency _, err := fmt.Sscan(req.FormValue("mincontractprice"), &x) if err != nil { return modules.HostInternalSettings{}, nil } settings.MinContractPrice = x } if req.FormValue("mindownloadbandwidthprice") != "" { var x types.Currency _, err := fmt.Sscan(req.FormValue("mindownloadbandwidthprice"), &x) if err != nil { return modules.HostInternalSettings{}, nil } settings.MinDownloadBandwidthPrice = x } if req.FormValue("minstorageprice") != "" { var x types.Currency _, err := fmt.Sscan(req.FormValue("minstorageprice"), &x) if err != nil { return modules.HostInternalSettings{}, nil } settings.MinStoragePrice = x } if req.FormValue("minuploadbandwidthprice") != "" { var x types.Currency _, err := fmt.Sscan(req.FormValue("minuploadbandwidthprice"), &x) if err != nil { return modules.HostInternalSettings{}, nil } settings.MinUploadBandwidthPrice = x } return settings, nil } // hostEstimateScoreGET handles the POST request to /host/estimatescore and // computes an estimated HostDB score for the provided settings. func (api *API) hostEstimateScoreGET(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { // This call requires a renter, check that it is present. if api.renter == nil { WriteError(w, Error{"cannot call /host/estimatescore without the renter module"}, http.StatusBadRequest) return } settings, err := api.parseHostSettings(req) if err != nil { WriteError(w, Error{"error parsing host settings: " + err.Error()}, http.StatusBadRequest) return } var totalStorage, remainingStorage uint64 for _, sf := range api.host.StorageFolders() { totalStorage += sf.Capacity remainingStorage += sf.CapacityRemaining } mergedSettings := modules.HostExternalSettings{ AcceptingContracts: settings.AcceptingContracts, MaxDownloadBatchSize: settings.MaxDownloadBatchSize, MaxDuration: settings.MaxDuration, MaxReviseBatchSize: settings.MaxReviseBatchSize, RemainingStorage: remainingStorage, SectorSize: modules.SectorSize, TotalStorage: totalStorage, WindowSize: settings.WindowSize, Collateral: settings.Collateral, MaxCollateral: settings.MaxCollateral, ContractPrice: settings.MinContractPrice, DownloadBandwidthPrice: settings.MinDownloadBandwidthPrice, StoragePrice: settings.MinStoragePrice, UploadBandwidthPrice: settings.MinUploadBandwidthPrice, Version: build.Version, } entry := modules.HostDBEntry{} entry.PublicKey = api.host.PublicKey() entry.HostExternalSettings = mergedSettings estimatedScoreBreakdown := api.renter.EstimateHostScore(entry) e := HostEstimateScoreGET{ EstimatedScore: estimatedScoreBreakdown.Score, ConversionRate: estimatedScoreBreakdown.ConversionRate, } WriteJSON(w, e) } // hostHandlerPOST handles POST request to the /host API endpoint, which sets // the internal settings of the host. func (api *API) hostHandlerPOST(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { settings, err := api.parseHostSettings(req) if err != nil { WriteError(w, Error{"error parsing host settings: " + err.Error()}, http.StatusBadRequest) return } err = api.host.SetInternalSettings(settings) if err != nil { WriteError(w, Error{err.Error()}, http.StatusBadRequest) return } WriteSuccess(w) } // hostAnnounceHandler handles the API call to get the host to announce itself // to the network. func (api *API) hostAnnounceHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { var err error if addr := req.FormValue("netaddress"); addr != "" { err = api.host.AnnounceAddress(modules.NetAddress(addr)) } else { err = api.host.Announce() } if err != nil { WriteError(w, Error{err.Error()}, http.StatusBadRequest) return } WriteSuccess(w) } // storageHandler returns a bunch of information about storage management on // the host. func (api *API) storageHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { WriteJSON(w, StorageGET{ Folders: api.host.StorageFolders(), }) } // storageFoldersAddHandler adds a storage folder to the storage manager. func (api *API) storageFoldersAddHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { folderPath := req.FormValue("path") var folderSize uint64 _, err := fmt.Sscan(req.FormValue("size"), &folderSize) if err != nil { WriteError(w, Error{err.Error()}, http.StatusBadRequest) return } err = api.host.AddStorageFolder(folderPath, folderSize) if err != nil { WriteError(w, Error{err.Error()}, http.StatusBadRequest) return } WriteSuccess(w) } // storageFoldersResizeHandler resizes a storage folder in the storage manager. func (api *API) storageFoldersResizeHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { folderPath := req.FormValue("path") if folderPath == "" { WriteError(w, Error{"path parameter is required"}, http.StatusBadRequest) return } storageFolders := api.host.StorageFolders() folderIndex, err := folderIndex(folderPath, storageFolders) if err != nil { WriteError(w, Error{err.Error()}, http.StatusBadRequest) return } var newSize uint64 _, err = fmt.Sscan(req.FormValue("newsize"), &newSize) if err != nil { WriteError(w, Error{err.Error()}, http.StatusBadRequest) return } err = api.host.ResizeStorageFolder(uint16(folderIndex), newSize, false) if err != nil { WriteError(w, Error{err.Error()}, http.StatusBadRequest) return } WriteSuccess(w) } // storageFoldersRemoveHandler removes a storage folder from the storage // manager. func (api *API) storageFoldersRemoveHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { folderPath := req.FormValue("path") if folderPath == "" { WriteError(w, Error{"path parameter is required"}, http.StatusBadRequest) return } storageFolders := api.host.StorageFolders() folderIndex, err := folderIndex(folderPath, storageFolders) if err != nil { WriteError(w, Error{err.Error()}, http.StatusBadRequest) return } force := req.FormValue("force") == "true" err = api.host.RemoveStorageFolder(uint16(folderIndex), force) if err != nil { WriteError(w, Error{err.Error()}, http.StatusBadRequest) return } WriteSuccess(w) } // storageSectorsDeleteHandler handles the call to delete a sector from the // storage manager. func (api *API) storageSectorsDeleteHandler(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { sectorRoot, err := scanHash(ps.ByName("merkleroot")) if err != nil { WriteError(w, Error{err.Error()}, http.StatusBadRequest) return } err = api.host.DeleteSector(sectorRoot) if err != nil { WriteError(w, Error{err.Error()}, http.StatusBadRequest) return } WriteSuccess(w) } Sia-1.3.0/api/host_test.go000066400000000000000000000733611313565667000153710ustar00rootroot00000000000000package api import ( "errors" "fmt" "io" "net/url" "os" "path/filepath" "strconv" "testing" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/modules/host/contractmanager" "github.com/NebulousLabs/Sia/types" ) var ( // Various folder sizes for testing host storage folder resizing. // Must be provided as strings to the API call. minFolderSizeString = strconv.FormatUint(modules.SectorSize*contractmanager.MinimumSectorsPerStorageFolder, 10) maxFolderSizeString = strconv.FormatUint(modules.SectorSize*contractmanager.MaximumSectorsPerStorageFolder, 10) tooSmallFolderString = strconv.FormatUint(modules.SectorSize*(contractmanager.MinimumSectorsPerStorageFolder-1), 10) tooLargeFolderString = strconv.FormatUint(modules.SectorSize*(contractmanager.MaximumSectorsPerStorageFolder+1), 10) mediumSizeFolderString = strconv.FormatUint(modules.SectorSize*contractmanager.MinimumSectorsPerStorageFolder*3, 10) // Test cases for resizing a host's storage folder. // Running all the invalid cases before the valid ones simplifies some // logic in the tests that use resizeTests. resizeTests = []struct { sizeString string size uint64 err error }{ // invalid sizes {"", 0, io.EOF}, {"0", 0, contractmanager.ErrSmallStorageFolder}, {tooSmallFolderString, modules.SectorSize * (contractmanager.MinimumSectorsPerStorageFolder - 1), contractmanager.ErrSmallStorageFolder}, {tooLargeFolderString, modules.SectorSize * (contractmanager.MaximumSectorsPerStorageFolder + 1), contractmanager.ErrLargeStorageFolder}, // valid sizes // // TODO: Re-enable these when the host can support resizing into the // same folder. // // {minFolderSizeString, contractmanager.MinimumSectorsPerStorageFolder * modules.SectorSize, nil}, // {maxFolderSizeString, contractmanager.MaximumSectorsPerStorageFolder * modules.SectorSize, nil}, // {mediumSizeFolderString, 3 * contractmanager.MinimumSectorsPerStorageFolder * modules.SectorSize, nil}, } ) // TestEstimateWeight tests that /host/estimatescore works correctly. func TestEstimateWeight(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // announce a host, create an allowance, upload some data. if err := st.announceHost(); err != nil { t.Fatal(err) } if err := st.acceptContracts(); err != nil { t.Fatal(err) } if err := st.setHostStorage(); err != nil { t.Fatal(err) } var eg HostEstimateScoreGET if err := st.getAPI("/host/estimatescore", &eg); err != nil { t.Fatal(err) } originalEstimate := eg.EstimatedScore // verify that the estimate is being correctly updated by setting a massively // increased min contract price and verifying that the score decreases. is := st.host.InternalSettings() is.MinContractPrice = is.MinContractPrice.Add(types.SiacoinPrecision.Mul64(9999999999)) if err := st.host.SetInternalSettings(is); err != nil { t.Fatal(err) } if err := st.getAPI("/host/estimatescore", &eg); err != nil { t.Fatal(err) } if eg.EstimatedScore.Cmp(originalEstimate) != -1 { t.Fatal("score estimate did not decrease after incrementing mincontractprice") } // add a few hosts to the hostdb and verify that the conversion rate is // reflected correctly st2, err := blankServerTester(t.Name() + "-st2") if err != nil { t.Fatal(err) } defer st2.panicClose() st3, err := blankServerTester(t.Name() + "-st3") if err != nil { t.Fatal(err) } defer st3.panicClose() st4, err := blankServerTester(t.Name() + "-st4") if err != nil { t.Fatal(err) } defer st4.panicClose() sts := []*serverTester{st, st2, st3, st4} err = fullyConnectNodes(sts) if err != nil { t.Fatal(err) } err = fundAllNodes(sts) if err != nil { t.Fatal(err) } for i, tester := range sts { is = tester.host.InternalSettings() is.MinContractPrice = types.SiacoinPrecision.Mul64(1000 + (1000 * uint64(i))) err = tester.host.SetInternalSettings(is) if err != nil { t.Fatal(err) } } err = announceAllHosts(sts) if err != nil { t.Fatal(err) } tests := []struct { price types.Currency minConversionRate float64 }{ {types.SiacoinPrecision, 100}, {types.SiacoinPrecision.Mul64(50), 98}, {types.SiacoinPrecision.Mul64(2500), 50}, {types.SiacoinPrecision.Mul64(3000), 10}, {types.SiacoinPrecision.Mul64(30000), 0.00001}, } for i, test := range tests { err = st.getAPI(fmt.Sprintf("/host/estimatescore?mincontractprice=%v", test.price.String()), &eg) if err != nil { t.Fatal("test", i, "failed:", err) } if eg.ConversionRate < test.minConversionRate { t.Fatalf("test %v: incorrect conversion rate: got %v wanted %v\n", i, eg.ConversionRate, test.minConversionRate) } } } // TestWorkingStatus tests that the host's WorkingStatus field is set // correctly. func TestWorkingStatus(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // announce a host, create an allowance, upload some data. if err := st.announceHost(); err != nil { t.Fatal(err) } if err := st.acceptContracts(); err != nil { t.Fatal(err) } if err := st.setHostStorage(); err != nil { t.Fatal(err) } // Set an allowance for the renter, allowing a contract to be formed. allowanceValues := url.Values{} allowanceValues.Set("funds", testFunds) allowanceValues.Set("period", testPeriod) if err = st.stdPostAPI("/renter", allowanceValues); err != nil { t.Fatal(err) } // Create a file. path := filepath.Join(st.dir, "test.dat") fileBytes := 1024 if err := createRandFile(path, fileBytes); err != nil { t.Fatal(err) } // Upload to host. uploadValues := url.Values{} uploadValues.Set("source", path) if err := st.stdPostAPI("/renter/upload/test", uploadValues); err != nil { t.Fatal(err) } // Only one piece will be uploaded (10% at current redundancy) var rf RenterFiles for i := 0; i < 200 && (len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10); i++ { st.getAPI("/renter/files", &rf) time.Sleep(50 * time.Millisecond) } if len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10 { t.Error(rf.Files[0].UploadProgress) t.Fatal("uploading has failed") } err = retry(30, time.Second, func() error { var hg HostGET st.getAPI("/host", &hg) if hg.WorkingStatus != modules.HostWorkingStatusWorking { return errors.New("expected host to be working") } return nil }) if err != nil { t.Fatal(err) } } // TestConnectabilityStatus tests that the host's ConnectabilityStatus field is // set correctly. func TestConnectabilityStatus(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() // create and announce a host st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() if err := st.announceHost(); err != nil { t.Fatal(err) } err = retry(30, time.Second, func() error { var hg HostGET st.getAPI("/host", &hg) if hg.ConnectabilityStatus != modules.HostConnectabilityStatusConnectable { return errors.New("expected host to be connectable") } return nil }) if err != nil { t.Fatal(err) } } // TestStorageHandler tests that host storage is being reported correctly. func TestStorageHandler(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // Announce the host and start accepting contracts. if err := st.announceHost(); err != nil { t.Fatal(err) } if err := st.acceptContracts(); err != nil { t.Fatal(err) } if err := st.setHostStorage(); err != nil { t.Fatal(err) } // Set an allowance for the renter, allowing a contract to be formed. allowanceValues := url.Values{} allowanceValues.Set("funds", testFunds) allowanceValues.Set("period", testPeriod) if err = st.stdPostAPI("/renter", allowanceValues); err != nil { t.Fatal(err) } // Create a file. path := filepath.Join(st.dir, "test.dat") fileBytes := 1024 if err := createRandFile(path, fileBytes); err != nil { t.Fatal(err) } // Upload to host. uploadValues := url.Values{} uploadValues.Set("source", path) if err := st.stdPostAPI("/renter/upload/test", uploadValues); err != nil { t.Fatal(err) } // Only one piece will be uploaded (10% at current redundancy) var rf RenterFiles for i := 0; i < 200 && (len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10); i++ { st.getAPI("/renter/files", &rf) time.Sleep(50 * time.Millisecond) } if len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10 { t.Error(rf.Files[0].UploadProgress) t.Fatal("uploading has failed") } var sg StorageGET if err := st.getAPI("/host/storage", &sg); err != nil { t.Fatal(err) } // Uploading succeeded, so /host/storage should be reporting a successful // write. if sg.Folders[0].SuccessfulWrites != 1 { t.Fatalf("expected 1 successful write, got %v", sg.Folders[0].SuccessfulWrites) } if used := sg.Folders[0].Capacity - sg.Folders[0].CapacityRemaining; used != modules.SectorSize { t.Fatalf("expected used capacity to be the size of one sector (%v bytes), got %v bytes", modules.SectorSize, used) } } // TestAddFolderNoPath tests that an API call to add a storage folder fails if // no path was provided. func TestAddFolderNoPath(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // Try adding a storage folder without setting "path" in the API call. addValues := url.Values{} addValues.Set("size", mediumSizeFolderString) err = st.stdPostAPI("/host/storage/folders/add", addValues) if err == nil { t.Fatal(err) } // Setting the path to an empty string should trigger the same error. addValues.Set("path", "") err = st.stdPostAPI("/host/storage/folders/add", addValues) if err == nil { t.Fatal(err) } } // TestAddFolderNoSize tests that an API call to add a storage folder fails if // no path was provided. func TestAddFolderNoSize(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // Try adding a storage folder without setting "size" in the API call. addValues := url.Values{} addValues.Set("path", st.dir) err = st.stdPostAPI("/host/storage/folders/add", addValues) if err == nil || err.Error() != io.EOF.Error() { t.Fatalf("expected error to be %v, got %v", io.EOF, err) } } // TestAddSameFolderTwice tests that an API call that attempts to add a // host storage folder that's already been added is handled gracefully. func TestAddSameFolderTwice(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // Make the call to add a storage folder twice. addValues := url.Values{} addValues.Set("path", st.dir) addValues.Set("size", mediumSizeFolderString) err = st.stdPostAPI("/host/storage/folders/add", addValues) if err != nil { t.Fatal(err) } err = st.stdPostAPI("/host/storage/folders/add", addValues) if err == nil || err.Error() != contractmanager.ErrRepeatFolder.Error() { t.Fatalf("expected err to be %v, got %v", err, contractmanager.ErrRepeatFolder) } } // TestResizeEmptyStorageFolder tests that invalid and valid calls to resize // an empty storage folder are properly handled. func TestResizeEmptyStorageFolder(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // Announce the host and start accepting contracts. if err := st.announceHost(); err != nil { t.Fatal(err) } if err := st.acceptContracts(); err != nil { t.Fatal(err) } if err := st.setHostStorage(); err != nil { t.Fatal(err) } // Find out how large the host's initial storage folder is. var sg StorageGET if err := st.getAPI("/host/storage", &sg); err != nil { t.Fatal(err) } defaultSize := sg.Folders[0].Capacity // Convert defaultSize (uint64) to a string for the API call. defaultSizeString := strconv.FormatUint(defaultSize, 10) resizeValues := url.Values{} resizeValues.Set("path", st.dir) resizeValues.Set("newsize", defaultSizeString) // Attempting to resize to the same size should return an error. err = st.stdPostAPI("/host/storage/folders/resize", resizeValues) if err == nil || err.Error() != contractmanager.ErrNoResize.Error() { t.Fatalf("expected error %v, got %v", contractmanager.ErrNoResize, err) } // Try resizing to a bunch of sizes (invalid ones first, valid ones second). // This ordering simplifies logic within the for loop. for i, test := range resizeTests { // Attempt to resize the host's storage folder. resizeValues.Set("newsize", test.sizeString) err = st.stdPostAPI("/host/storage/folders/resize", resizeValues) if (err == nil && test.err != nil) || (err != nil && err.Error() != test.err.Error()) { t.Fatalf("test %v: expected error to be %v, got %v", i, test.err, err) } // Find out if the resize call worked as expected. if err := st.getAPI("/host/storage", &sg); err != nil { t.Fatal(err) } // If the test size is valid, check that the folder has been resized // properly. if test.err == nil { // Check that the folder's total capacity has been updated. if got := sg.Folders[0].Capacity; got != test.size { t.Fatalf("test %v: expected folder to be resized to %v; got %v instead", i, test.size, got) } // Check that the folder's remaining capacity has been updated. if got := sg.Folders[0].CapacityRemaining; got != test.size { t.Fatalf("folder should be empty, but capacity remaining (%v) != total capacity (%v)", got, test.size) } } else { // If the test size is invalid, the folder should not have been // resized. The invalid test cases are all run before the valid ones, // so the folder size should still be defaultSize. if got := sg.Folders[0].Capacity; got != defaultSize { t.Fatalf("folder was resized to an invalid size (%v) in a test case that should have failed: %v", got, test) } } } } // TestResizeNonemptyStorageFolder tests that invalid and valid calls to resize // a storage folder with one sector filled are properly handled. // Ideally, we would also test a very full storage folder (including the case // where the host tries to resize to a size smaller than the amount of data // in the folder), but that would be a very expensive test. func TestResizeNonemptyStorageFolder(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // Announce the host and start accepting contracts. if err := st.announceHost(); err != nil { t.Fatal(err) } if err := st.acceptContracts(); err != nil { t.Fatal(err) } if err := st.setHostStorage(); err != nil { t.Fatal(err) } // Set an allowance for the renter, allowing a contract to be formed. allowanceValues := url.Values{} allowanceValues.Set("funds", testFunds) allowanceValues.Set("period", testPeriod) if err = st.stdPostAPI("/renter", allowanceValues); err != nil { t.Fatal(err) } // Create a file. path := filepath.Join(st.dir, "test.dat") fileBytes := 1024 if err := createRandFile(path, fileBytes); err != nil { t.Fatal(err) } // Upload to host. uploadValues := url.Values{} uploadValues.Set("source", path) if err := st.stdPostAPI("/renter/upload/test", uploadValues); err != nil { t.Fatal(err) } // Only one piece will be uploaded (10% at current redundancy) var rf RenterFiles for i := 0; i < 200 && (len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10); i++ { st.getAPI("/renter/files", &rf) time.Sleep(50 * time.Millisecond) } if len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10 { t.Error(rf.Files[0].UploadProgress) t.Fatal("uploading has failed") } // Find out how large the host's initial storage folder is. var sg StorageGET if err := st.getAPI("/host/storage", &sg); err != nil { t.Fatal(err) } defaultSize := sg.Folders[0].Capacity // Convert defaultSize (uint64) to a string for the API call. defaultSizeString := strconv.FormatUint(defaultSize, 10) resizeValues := url.Values{} resizeValues.Set("path", st.dir) resizeValues.Set("newsize", defaultSizeString) // Attempting to resize to the same size should return an error. err = st.stdPostAPI("/host/storage/folders/resize", resizeValues) if err == nil || err.Error() != contractmanager.ErrNoResize.Error() { t.Fatalf("expected error %v, got %v", contractmanager.ErrNoResize, err) } // Try resizing to a bunch of sizes (invalid ones first, valid ones second). // This ordering simplifies logic within the for loop. for _, test := range resizeTests { // Attempt to resize the host's storage folder. resizeValues.Set("newsize", test.sizeString) err = st.stdPostAPI("/host/storage/folders/resize", resizeValues) if (err == nil && test.err != nil) || (err != nil && test.err == nil) || (err != nil && err.Error() != test.err.Error()) { t.Fatalf("expected error to be %v, got %v", test.err, err) } // Find out if the resize call worked as expected. if err := st.getAPI("/host/storage", &sg); err != nil { t.Fatal(err) } // If the test size is valid, check that the folder has been resized // properly. if test.err == nil { // Check that the folder's total capacity has been updated. if sg.Folders[0].Capacity != test.size { t.Fatalf("expected folder to be resized to %v; got %v instead", test.size, sg.Folders[0].Capacity) } // Since one sector has been uploaded, the available capacity // should be one sector size smaller than the total capacity. if used := test.size - sg.Folders[0].CapacityRemaining; used != modules.SectorSize { t.Fatalf("used capacity (%v) != the size of 1 sector (%v)", used, modules.SectorSize) } } else { // If the test size is invalid, the folder should not have been // resized. The invalid test cases are all run before the valid // ones, so the folder size should still be defaultSize. if got := sg.Folders[0].Capacity; got != defaultSize { t.Fatalf("folder was resized to an invalid size (%v) in a test case that should have failed: %v", got, test) } } } } // TestResizeNonexistentFolder checks that an API call to resize a nonexistent // folder triggers the appropriate error. func TestResizeNonexistentFolder(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // No folder has been created yet at st.dir, so using it as the path for // the resize call should trigger an error. resizeValues := url.Values{} resizeValues.Set("path", st.dir) resizeValues.Set("newsize", mediumSizeFolderString) err = st.stdPostAPI("/host/storage/folders/resize", resizeValues) if err == nil || err.Error() != errStorageFolderNotFound.Error() { t.Fatalf("expected error to be %v, got %v", errStorageFolderNotFound, err) } } // TestStorageFolderUnavailable simulates the situation where a storage folder // is not available to the host when the host starts, verifying that it sets // FailedWrites and FailedReads correctly and eventually finds the storage // folder when it is made available to the host again. func TestStorageFolderUnavailable(t *testing.T) { if testing.Short() || !build.VLONG { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.Close() // add a storage folder sfPath := build.TempDir(t.Name(), "storagefolder") err = os.MkdirAll(sfPath, 0755) if err != nil { t.Fatal(err) } sfValues := url.Values{} sfValues.Set("path", sfPath) sfValues.Set("size", "1048576") err = st.stdPostAPI("/host/storage/folders/add", sfValues) if err != nil { t.Fatal(err) } var sfs StorageGET err = st.getAPI("/host/storage", &sfs) if err != nil { t.Fatal(err) } if sfs.Folders[0].FailedReads != 0 || sfs.Folders[0].FailedWrites != 0 { t.Fatal("newly added folder has failed reads or writes") } // remove the folder on disk st.server.Close() sfPath2 := build.TempDir(t.Name(), "storagefolder-old") err = os.Rename(sfPath, sfPath2) if err != nil { t.Fatal(err) } // reload the host st, err = st.reloadedServerTester() if err != nil { t.Fatal(err) } defer st.server.Close() err = st.getAPI("/host/storage", &sfs) if err != nil { t.Fatal(err) } if sfs.Folders[0].FailedWrites < 999 { t.Fatal("storage folder should have lots of failed writes after being moved on disk") } if sfs.Folders[0].FailedReads < 999 { t.Fatal("storage folder should have lots of failed reads after being moved on disk") } // try some actions on the dead storage folder // resize sfValues.Set("size", "2097152") err = st.stdPostAPI("/host/storage/folders/resize", sfValues) if err == nil { t.Fatal("expected resize on unavailable storage folder to fail") } // remove err = st.stdPostAPI("/host/storage/folders/remove", sfValues) if err == nil { t.Fatal("expected remove on unavailable storage folder to fail") } // move the folder back err = os.Rename(sfPath2, sfPath) if err != nil { t.Fatal(err) } // wait for the contract manager to recheck the storage folder // NOTE: this is a hard-coded constant based on the contractmanager's maxFolderRecheckInterval constant. time.Sleep(time.Second * 10) // verify the storage folder is reset to normal err = st.getAPI("/host/storage", &sfs) if err != nil { t.Fatal(err) } if sfs.Folders[0].FailedWrites > 0 { t.Fatal("storage folder should have no failed writes after being moved back") } if sfs.Folders[0].FailedReads > 0 { t.Fatal("storage folder should have no failed reads after being moved back") } // reload the host and verify the storage folder is still good st.server.Close() st, err = st.reloadedServerTester() if err != nil { t.Fatal(err) } defer st.server.Close() // storage folder should still be good err = st.getAPI("/host/storage", &sfs) if err != nil { t.Fatal(err) } if sfs.Folders[0].FailedWrites > 0 { t.Fatal("storage folder should have no failed writes after being moved back") } if sfs.Folders[0].FailedReads > 0 { t.Fatal("storage folder should have no failed reads after being moved back") } } // TestResizeFolderNoPath checks that an API call to resize a storage folder fails // if no path was provided. func TestResizeFolderNoPath(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // The call to resize should fail if no path has been provided. resizeValues := url.Values{} resizeValues.Set("newsize", mediumSizeFolderString) err = st.stdPostAPI("/host/storage/folders/resize", resizeValues) if err == nil || err.Error() != errNoPath.Error() { t.Fatalf("expected error to be %v; got %v", errNoPath, err) } } // TestRemoveEmptyStorageFolder checks that removing an empty storage folder // succeeds -- even if the host is left with zero storage folders. func TestRemoveEmptyStorageFolder(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // Set up a storage folder for the host. if err := st.setHostStorage(); err != nil { t.Fatal(err) } // Try to delete the host's empty storage folder. removeValues := url.Values{} removeValues.Set("path", st.dir) if err = st.stdPostAPI("/host/storage/folders/remove", removeValues); err != nil { t.Fatal(err) } } // TestRemoveStorageFolderError checks that invalid calls to // /host/storage/folders/remove fail with the appropriate error. func TestRemoveStorageFolderError(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // Set up a storage folder for the host. if err := st.setHostStorage(); err != nil { t.Fatal(err) } // Try removing a nonexistent folder. removeValues := url.Values{} removeValues.Set("path", "/foo/bar") err = st.stdPostAPI("/host/storage/folders/remove", removeValues) if err == nil || err.Error() != errStorageFolderNotFound.Error() { t.Fatalf("expected error %v, got %v", errStorageFolderNotFound, err) } // The folder path can't be an empty string. removeValues.Set("path", "") err = st.stdPostAPI("/host/storage/folders/remove", removeValues) if err == nil || err.Error() != errNoPath.Error() { t.Fatalf("expected error to be %v; got %v", errNoPath, err) } } // TestRemoveStorageFolderForced checks that if a call to remove a storage // folder will result in data loss, that call succeeds if and only if "force" // has been set to "true". func TestRemoveStorageFolderForced(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // Announce the host. if err := st.announceHost(); err != nil { t.Fatal(err) } if err := st.acceptContracts(); err != nil { t.Fatal(err) } if err := st.setHostStorage(); err != nil { t.Fatal(err) } // Set an allowance for the renter, allowing a contract to be formed. allowanceValues := url.Values{} allowanceValues.Set("funds", testFunds) allowanceValues.Set("period", testPeriod) if err = st.stdPostAPI("/renter", allowanceValues); err != nil { t.Fatal(err) } // Create a file for upload. path := filepath.Join(st.dir, "test.dat") if err := createRandFile(path, 512); err != nil { t.Fatal(err) } // Upload to host. uploadValues := url.Values{} uploadValues.Set("source", path) if err := st.stdPostAPI("/renter/upload/test", uploadValues); err != nil { t.Fatal(err) } // Only one piece will be uploaded (10% at current redundancy) var rf RenterFiles for i := 0; i < 200 && (len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10); i++ { st.getAPI("/renter/files", &rf) time.Sleep(50 * time.Millisecond) } if len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10 { t.Error(rf.Files[0].UploadProgress) t.Fatal("uploading has failed") } // The host should not be able to remove its only folder without setting // "force" to "true", since this will result in data loss (there are no // other folders for the data to be redistributed to). removeValues := url.Values{} removeValues.Set("path", st.dir) err = st.stdPostAPI("/host/storage/folders/remove", removeValues) if err == nil || err.Error() != contractmanager.ErrPartialRelocation.Error() { t.Fatalf("expected err to be %v; got %v", contractmanager.ErrPartialRelocation, err) } // Forced removal of the folder should succeed, though. removeValues.Set("force", "true") err = st.stdPostAPI("/host/storage/folders/remove", removeValues) if err != nil { t.Fatal(err) } } // TestDeleteSector tests the call to delete a storage sector from the host. func TestDeleteSector(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // Set up the host for forming contracts. if err := st.announceHost(); err != nil { t.Fatal(err) } if err := st.acceptContracts(); err != nil { t.Fatal(err) } if err := st.setHostStorage(); err != nil { t.Fatal(err) } // Set an allowance for the renter, allowing contracts to formed. allowanceValues := url.Values{} allowanceValues.Set("funds", testFunds) allowanceValues.Set("period", testPeriod) if err = st.stdPostAPI("/renter", allowanceValues); err != nil { t.Fatal(err) } // Create a file. path := filepath.Join(st.dir, "test.dat") if err := createRandFile(path, 1024); err != nil { t.Fatal(err) } // Upload to host. uploadValues := url.Values{} uploadValues.Set("source", path) if err = st.stdPostAPI("/renter/upload/test", uploadValues); err != nil { t.Fatal(err) } // Only one piece will be uploaded (10% at current redundancy) var rf RenterFiles for i := 0; i < 200 && (len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10); i++ { st.getAPI("/renter/files", &rf) time.Sleep(50 * time.Millisecond) } if len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10 { t.Error(rf.Files[0].UploadProgress) t.Fatal("uploading has failed") } // Get the Merkle root of the piece that was uploaded. contracts := st.renter.Contracts() if len(contracts) != 1 { t.Fatalf("expected exactly 1 contract to have been formed; got %v instead", len(contracts)) } sectorRoot := contracts[0].MerkleRoots[0].String() if err = st.stdPostAPI("/host/storage/sectors/delete/"+sectorRoot, url.Values{}); err != nil { t.Fatal(err) } } // TestDeleteNonexistentSector checks that attempting to delete a storage // sector that doesn't exist will fail with the appropriate error. func TestDeleteNonexistentSector(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // These calls to delete imaginary sectors should fail for a few reasons: // - the given sector root strings are invalid // - the renter hasn't uploaded anything // - the host has no storage folders yet // Right now, the calls fail for the first reason. This test will report if that behavior changes. badHash := crypto.HashObject("fake object").String() err = st.stdPostAPI("/host/storage/sectors/delete/"+badHash, url.Values{}) if err == nil || err.Error() != contractmanager.ErrSectorNotFound.Error() { t.Fatalf("expected error to be %v; got %v", contractmanager.ErrSectorNotFound, err) } wrongSize := "wrong size string" err = st.stdPostAPI("/host/storage/sectors/delete/"+wrongSize, url.Values{}) if err == nil || err.Error() != crypto.ErrHashWrongLen.Error() { t.Fatalf("expected error to be %v; got %v", crypto.ErrHashWrongLen, err) } } Sia-1.3.0/api/hostdb.go000066400000000000000000000063331313565667000146330ustar00rootroot00000000000000package api import ( "fmt" "net/http" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" "github.com/julienschmidt/httprouter" ) type ( // ExtendedHostDBEntry is an extension to modules.HostDBEntry that includes // the string representation of the public key, otherwise presented as two // fields, a string and a base64 encoded byte slice. ExtendedHostDBEntry struct { modules.HostDBEntry PublicKeyString string `json:"publickeystring"` } // HostdbActiveGET lists active hosts on the network. HostdbActiveGET struct { Hosts []ExtendedHostDBEntry `json:"hosts"` } // HostdbAllGET lists all hosts that the renter is aware of. HostdbAllGET struct { Hosts []ExtendedHostDBEntry `json:"hosts"` } // HostdbHostsGET lists detailed statistics for a particular host, selected // by pubkey. HostdbHostsGET struct { Entry ExtendedHostDBEntry `json:"entry"` ScoreBreakdown modules.HostScoreBreakdown `json:"scorebreakdown"` } ) // hostdbActiveHandler handles the API call asking for the list of active // hosts. func (api *API) hostdbActiveHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { var numHosts uint64 hosts := api.renter.ActiveHosts() if req.FormValue("numhosts") == "" { // Default value for 'numhosts' is all of them. numHosts = uint64(len(hosts)) } else { // Parse the value for 'numhosts'. _, err := fmt.Sscan(req.FormValue("numhosts"), &numHosts) if err != nil { WriteError(w, Error{err.Error()}, http.StatusBadRequest) return } // Catch any boundary errors. if numHosts > uint64(len(hosts)) { numHosts = uint64(len(hosts)) } } // Convert the entries into extended entries. var extendedHosts []ExtendedHostDBEntry for _, host := range hosts { extendedHosts = append(extendedHosts, ExtendedHostDBEntry{ HostDBEntry: host, PublicKeyString: host.PublicKey.String(), }) } WriteJSON(w, HostdbActiveGET{ Hosts: extendedHosts[:numHosts], }) } // hostdbAllHandler handles the API call asking for the list of all hosts. func (api *API) hostdbAllHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { // Get the set of all hosts and convert them into extended hosts. hosts := api.renter.AllHosts() var extendedHosts []ExtendedHostDBEntry for _, host := range hosts { extendedHosts = append(extendedHosts, ExtendedHostDBEntry{ HostDBEntry: host, PublicKeyString: host.PublicKey.String(), }) } WriteJSON(w, HostdbAllGET{ Hosts: extendedHosts, }) } // hostdbHostsHandler handles the API call asking for a specific host, // returning detailed informatino about that host. func (api *API) hostdbHostsHandler(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { var pk types.SiaPublicKey pk.LoadString(ps.ByName("pubkey")) entry, exists := api.renter.Host(pk) if !exists { WriteError(w, Error{"requested host does not exist"}, http.StatusBadRequest) return } breakdown := api.renter.ScoreBreakdown(entry) // Extend the hostdb entry to have the public key string. extendedEntry := ExtendedHostDBEntry{ HostDBEntry: entry, PublicKeyString: entry.PublicKey.String(), } WriteJSON(w, HostdbHostsGET{ Entry: extendedEntry, ScoreBreakdown: breakdown, }) } Sia-1.3.0/api/hostdb_test.go000066400000000000000000001001741313565667000156700ustar00rootroot00000000000000package api import ( "bytes" "errors" "fmt" "io/ioutil" "net/url" "path/filepath" "testing" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/modules/consensus" "github.com/NebulousLabs/Sia/modules/gateway" "github.com/NebulousLabs/Sia/modules/host" "github.com/NebulousLabs/Sia/modules/miner" "github.com/NebulousLabs/Sia/modules/renter" "github.com/NebulousLabs/Sia/modules/transactionpool" "github.com/NebulousLabs/Sia/modules/wallet" ) // TestHostDBHostsActiveHandler checks the behavior of the call to // /hostdb/active. func TestHostDBHostsActiveHandler(t *testing.T) { if testing.Short() { t.SkipNow() } st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // Try the call with numhosts unset, and set to -1, 0, and 1. var ah HostdbActiveGET err = st.getAPI("/hostdb/active", &ah) if err != nil { t.Fatal(err) } if len(ah.Hosts) != 0 { t.Fatal(len(ah.Hosts)) } err = st.getAPI("/hostdb/active?numhosts=-1", &ah) if err == nil { t.Fatal("expecting an error, got:", err) } err = st.getAPI("/hostdb/active?numhosts=0", &ah) if err != nil { t.Fatal(err) } if len(ah.Hosts) != 0 { t.Fatal(len(ah.Hosts)) } err = st.getAPI("/hostdb/active?numhosts=1", &ah) if err != nil { t.Fatal(err) } if len(ah.Hosts) != 0 { t.Fatal(len(ah.Hosts)) } // announce the host and start accepting contracts err = st.announceHost() if err != nil { t.Fatal(err) } err = st.acceptContracts() if err != nil { t.Fatal(err) } err = st.setHostStorage() if err != nil { t.Fatal(err) } // Try the call with with numhosts unset, and set to -1, 0, 1, and 2. err = st.getAPI("/hostdb/active", &ah) if err != nil { t.Fatal(err) } if len(ah.Hosts) != 1 { t.Fatal(len(ah.Hosts)) } err = st.getAPI("/hostdb/active?numhosts=-1", &ah) if err == nil { t.Fatal("expecting an error, got:", err) } err = st.getAPI("/hostdb/active?numhosts=0", &ah) if err != nil { t.Fatal(err) } if len(ah.Hosts) != 0 { t.Fatal(len(ah.Hosts)) } err = st.getAPI("/hostdb/active?numhosts=1", &ah) if err != nil { t.Fatal(err) } if len(ah.Hosts) != 1 { t.Fatal(len(ah.Hosts)) } err = st.getAPI("/hostdb/active?numhosts=2", &ah) if err != nil { t.Fatal(err) } if len(ah.Hosts) != 1 { t.Fatal(len(ah.Hosts)) } } // TestHostDBHostsAllHandler checks that announcing a host adds it to the list // of all hosts. func TestHostDBHostsAllHandler(t *testing.T) { if testing.Short() { t.SkipNow() } st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // Try the call before any hosts have been declared. var ah HostdbAllGET if err = st.getAPI("/hostdb/all", &ah); err != nil { t.Fatal(err) } if len(ah.Hosts) != 0 { t.Fatalf("expected 0 hosts, got %v", len(ah.Hosts)) } // Announce the host and try the call again. if err = st.announceHost(); err != nil { t.Fatal(err) } if err = st.getAPI("/hostdb/all", &ah); err != nil { t.Fatal(err) } if len(ah.Hosts) != 1 { t.Fatalf("expected 1 host, got %v", len(ah.Hosts)) } } // TestHostDBHostsHandler checks that the hosts handler is easily able to return func TestHostDBHostsHandler(t *testing.T) { if testing.Short() { t.SkipNow() } st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // Announce the host and then get the list of hosts. var ah HostdbActiveGET if err = st.announceHost(); err != nil { t.Fatal(err) } if err = st.getAPI("/hostdb/active", &ah); err != nil { t.Fatal(err) } if len(ah.Hosts) != 1 { t.Fatalf("expected 1 host, got %v", len(ah.Hosts)) } // Parse the pubkey from the returned list of hosts and use it to form a // request regarding the specific host. keyString := ah.Hosts[0].PublicKey.String() if keyString != ah.Hosts[0].PublicKeyString { t.Error("actual key string and provided string do not match") } query := fmt.Sprintf("/hostdb/hosts/%s", ah.Hosts[0].PublicKeyString) // Get the detailed info for the host. var hh HostdbHostsGET if err = st.getAPI(query, &hh); err != nil { t.Fatal(err) } // Check that none of the values equal zero. A value of zero indicates that // the field is no longer being tracked/reported, which could break // compatibility for some apps. The default needs to be '1', not zero. if hh.ScoreBreakdown.Score.IsZero() { t.Error("Zero vaue score in score breakdown") } if hh.ScoreBreakdown.AgeAdjustment == 0 { t.Error("Zero value in host score breakdown") } if hh.ScoreBreakdown.BurnAdjustment == 0 { t.Error("Zero value in host score breakdown") } if hh.ScoreBreakdown.CollateralAdjustment == 0 { t.Error("Zero value in host score breakdown") } if hh.ScoreBreakdown.PriceAdjustment == 0 { t.Error("Zero value in host score breakdown") } if hh.ScoreBreakdown.StorageRemainingAdjustment == 0 { t.Error("Zero value in host score breakdown") } if hh.ScoreBreakdown.UptimeAdjustment == 0 { t.Error("Zero value in host score breakdown") } if hh.ScoreBreakdown.VersionAdjustment == 0 { t.Error("Zero value in host score breakdown") } // Check that none of the supported values equals 1. A value of 1 indicates // that the hostdb is not performing any penalties or rewards for that // field, meaning that the calibration for that field is probably incorrect. if hh.ScoreBreakdown.AgeAdjustment == 1 { t.Error("One value in host score breakdown") } // Burn adjustment is not yet supported. // // if hh.ScoreBreakdown.BurnAdjustment == 1 { // t.Error("One value in host score breakdown") // } if hh.ScoreBreakdown.CollateralAdjustment == 1 { t.Error("One value in host score breakdown") } if hh.ScoreBreakdown.PriceAdjustment == 1 { t.Error("One value in host score breakdown") } if hh.ScoreBreakdown.StorageRemainingAdjustment == 1 { t.Error("One value in host score breakdown") } if hh.ScoreBreakdown.UptimeAdjustment == 1 { t.Error("One value in host score breakdown") } if hh.ScoreBreakdown.VersionAdjustment == 1 { t.Error("One value in host score breakdown") } } // assembleHostHostname is assembleServerTester but you can specify which // hostname the host should use. func assembleHostPort(key crypto.TwofishKey, hostHostname string, testdir string) (*serverTester, error) { // assembleServerTester should not get called during short tests, as it // takes a long time to run. if testing.Short() { panic("assembleServerTester called during short tests") } // Create the modules. g, err := gateway.New("localhost:0", false, filepath.Join(testdir, modules.GatewayDir)) if err != nil { return nil, err } cs, err := consensus.New(g, false, filepath.Join(testdir, modules.ConsensusDir)) if err != nil { return nil, err } tp, err := transactionpool.New(cs, g, filepath.Join(testdir, modules.TransactionPoolDir)) if err != nil { return nil, err } w, err := wallet.New(cs, tp, filepath.Join(testdir, modules.WalletDir)) if err != nil { return nil, err } if !w.Encrypted() { _, err = w.Encrypt(key) if err != nil { return nil, err } } err = w.Unlock(key) if err != nil { return nil, err } m, err := miner.New(cs, tp, w, filepath.Join(testdir, modules.MinerDir)) if err != nil { return nil, err } h, err := host.New(cs, tp, w, hostHostname, filepath.Join(testdir, modules.HostDir)) if err != nil { return nil, err } r, err := renter.New(g, cs, w, tp, filepath.Join(testdir, modules.RenterDir)) if err != nil { return nil, err } srv, err := NewServer("localhost:0", "Sia-Agent", "", cs, nil, g, h, m, r, tp, w) if err != nil { return nil, err } // Assemble the serverTester. st := &serverTester{ cs: cs, gateway: g, host: h, miner: m, renter: r, tpool: tp, wallet: w, walletKey: key, server: srv, dir: testdir, } // TODO: A more reasonable way of listening for server errors. go func() { listenErr := srv.Serve() if listenErr != nil { panic(listenErr) } }() return st, nil } // TestHostDBScanOnlineOffline checks that both online and offline hosts get // scanned in the hostdb. func TestHostDBScanOnlineOffline(t *testing.T) { if testing.Short() || !build.VLONG { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.panicClose() stHost, err := blankServerTester(t.Name() + "-Host") if err != nil { t.Fatal(err) } sts := []*serverTester{st, stHost} err = fullyConnectNodes(sts) if err != nil { t.Fatal(err) } err = fundAllNodes(sts) if err != nil { t.Fatal(err) } // Announce the host. err = stHost.acceptContracts() if err != nil { t.Fatal(err) } err = stHost.setHostStorage() if err != nil { t.Fatal(err) } err = stHost.announceHost() if err != nil { t.Fatal(err) } // Verify the host is visible. var ah HostdbActiveGET for i := 0; i < 50; i++ { if err = st.getAPI("/hostdb/active", &ah); err != nil { t.Fatal(err) } if len(ah.Hosts) == 1 { break } time.Sleep(time.Millisecond * 100) } if len(ah.Hosts) != 1 { t.Fatalf("expected 1 host, got %v", len(ah.Hosts)) } hostAddr := ah.Hosts[0].NetAddress // Close the host and wait for a scan to knock the host out of the hostdb. err = stHost.server.Close() if err != nil { t.Fatal(err) } err = retry(60, time.Second, func() error { if err := st.getAPI("/hostdb/active", &ah); err != nil { return err } if len(ah.Hosts) == 0 { return nil } return errors.New("host still in hostdb") }) if err != nil { t.Fatal(err) } // Reopen the host and wait for a scan to bring the host back into the // hostdb. stHost, err = assembleHostPort(stHost.walletKey, string(hostAddr), stHost.dir) if err != nil { t.Fatal(err) } defer stHost.panicClose() sts[1] = stHost err = fullyConnectNodes(sts) if err != nil { t.Fatal(err) } err = retry(60, time.Second, func() error { // Get the hostdb internals. if err = st.getAPI("/hostdb/active", &ah); err != nil { return err } if len(ah.Hosts) != 1 { return fmt.Errorf("expected 1 host, got %v", len(ah.Hosts)) } return nil }) if err != nil { t.Fatal(err) } } // TestHostDBAndRenterDownloadDynamicIPs checks that the hostdb and the renter are // successfully able to follow a host that has changed IP addresses and then // re-announced. func TestHostDBAndRenterDownloadDynamicIPs(t *testing.T) { if testing.Short() || !build.VLONG { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.panicClose() stHost, err := blankServerTester(t.Name() + "-Host") if err != nil { t.Fatal(err) } sts := []*serverTester{st, stHost} err = fullyConnectNodes(sts) if err != nil { t.Fatal(err) } err = fundAllNodes(sts) if err != nil { t.Fatal(err) } // Announce the host. err = stHost.acceptContracts() if err != nil { t.Fatal(err) } err = stHost.setHostStorage() if err != nil { t.Fatal(err) } err = stHost.announceHost() if err != nil { t.Fatal(err) } // Pull the host's net address and pubkey from the hostdb. var ah HostdbActiveGET for i := 0; i < 50; i++ { if err = st.getAPI("/hostdb/active", &ah); err != nil { t.Fatal(err) } if len(ah.Hosts) == 1 { break } time.Sleep(time.Millisecond * 100) } if len(ah.Hosts) != 1 { t.Fatalf("expected 1 host, got %v", len(ah.Hosts)) } addr := ah.Hosts[0].NetAddress pks := ah.Hosts[0].PublicKeyString // Upload a file to the host. allowanceValues := url.Values{} testFunds := "10000000000000000000000000000" // 10k SC testPeriod := "10" testPeriodInt := 10 allowanceValues.Set("funds", testFunds) allowanceValues.Set("period", testPeriod) err = st.stdPostAPI("/renter", allowanceValues) if err != nil { t.Fatal(err) } // Create a file. path := filepath.Join(st.dir, "test.dat") err = createRandFile(path, 1024) if err != nil { t.Fatal(err) } // Upload the file to the renter. uploadValues := url.Values{} uploadValues.Set("source", path) err = st.stdPostAPI("/renter/upload/test", uploadValues) if err != nil { t.Fatal(err) } // Only one piece will be uploaded (10% at current redundancy). var rf RenterFiles for i := 0; i < 200 && (len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10); i++ { st.getAPI("/renter/files", &rf) time.Sleep(100 * time.Millisecond) } if len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10 { t.Fatal("the uploading is not succeeding for some reason:", rf.Files[0]) } // Try downloading the file. downpath := filepath.Join(st.dir, "testdown.dat") err = st.stdGetAPI("/renter/download/test?destination=" + downpath) if err != nil { t.Fatal(err) } // Check that the download has the right contents. orig, err := ioutil.ReadFile(path) if err != nil { t.Fatal(err) } download, err := ioutil.ReadFile(downpath) if err != nil { t.Fatal(err) } if !bytes.Equal(orig, download) { t.Fatal("data mismatch when downloading a file") } // Mine a block before resetting the host, so that the host doesn't lose // it's contracts when the transaction pool resets. _, err = st.miner.AddBlock() if err != nil { t.Fatal(err) } _, err = synchronizationCheck(sts) if err != nil { t.Fatal(err) } // Give time for the upgrade to happen. time.Sleep(time.Second * 3) // Close and re-open the host. This should reset the host's address, as the // host should now be on a new port. err = stHost.server.Close() if err != nil { t.Fatal(err) } stHost, err = assembleServerTester(stHost.walletKey, stHost.dir) if err != nil { t.Fatal(err) } defer stHost.panicClose() sts[1] = stHost err = fullyConnectNodes(sts) if err != nil { t.Fatal(err) } err = stHost.announceHost() if err != nil { t.Fatal(err) } // Pull the host's net address and pubkey from the hostdb. err = retry(100, time.Millisecond*200, func() error { // Get the hostdb internals. if err = st.getAPI("/hostdb/active", &ah); err != nil { return err } // Get the host's internals. var hg HostGET if err = stHost.getAPI("/host", &hg); err != nil { return err } if len(ah.Hosts) != 1 { return fmt.Errorf("expected 1 host, got %v", len(ah.Hosts)) } if ah.Hosts[0].NetAddress != hg.ExternalSettings.NetAddress { return fmt.Errorf("hostdb net address doesn't match host net address: %v : %v", ah.Hosts[0].NetAddress, hg.ExternalSettings.NetAddress) } return nil }) if err != nil { t.Fatal(err) } if ah.Hosts[0].PublicKeyString != pks { t.Error("public key appears to have changed for host") } if ah.Hosts[0].NetAddress == addr { t.Log("NetAddress did not change for the new host") } // Try downloading the file. err = st.stdGetAPI("/renter/download/test?destination=" + downpath) if err != nil { t.Fatal(err) } // Check that the download has the right contents. download, err = ioutil.ReadFile(downpath) if err != nil { t.Fatal(err) } if !bytes.Equal(orig, download) { t.Fatal("data mismatch when downloading a file") } // Mine enough blocks that multiple renew cylces happen. After the renewing // happens, the file should still be downloadable. This is to check that the // renewal doesn't throw things off. for i := 0; i < testPeriodInt; i++ { _, err = st.miner.AddBlock() if err != nil { t.Fatal(err) } _, err = synchronizationCheck(sts) if err != nil { t.Fatal(err) } } err = retry(100, time.Millisecond*100, func() error { err = st.stdGetAPI("/renter/download/test?destination=" + downpath) if err != nil { return err } // Try downloading the file. // Check that the download has the right contents. download, err = ioutil.ReadFile(downpath) if err != nil { return err } if !bytes.Equal(orig, download) { return errors.New("downloaded file does not equal the original") } return nil }) if err != nil { t.Fatal(err) } } // TestHostDBAndRenterUploadDynamicIPs checks that the hostdb and the renter are // successfully able to follow a host that has changed IP addresses and then // re-announced. func TestHostDBAndRenterUploadDynamicIPs(t *testing.T) { if testing.Short() || !build.VLONG { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.panicClose() stHost, err := blankServerTester(t.Name() + "-Host") if err != nil { t.Fatal(err) } sts := []*serverTester{st, stHost} err = fullyConnectNodes(sts) if err != nil { t.Fatal(err) } err = fundAllNodes(sts) if err != nil { t.Fatal(err) } // Announce the host. err = stHost.acceptContracts() if err != nil { t.Fatal(err) } err = stHost.setHostStorage() if err != nil { t.Fatal(err) } err = stHost.announceHost() if err != nil { t.Fatal(err) } // Pull the host's net address and pubkey from the hostdb. var ah HostdbActiveGET for i := 0; i < 50; i++ { if err = st.getAPI("/hostdb/active", &ah); err != nil { t.Fatal(err) } if len(ah.Hosts) == 1 { break } time.Sleep(time.Millisecond * 100) } if len(ah.Hosts) != 1 { t.Fatalf("expected 1 host, got %v", len(ah.Hosts)) } addr := ah.Hosts[0].NetAddress pks := ah.Hosts[0].PublicKeyString // Upload a file to the host. allowanceValues := url.Values{} testFunds := "10000000000000000000000000000" // 10k SC testPeriod := "10" testPeriodInt := 10 allowanceValues.Set("funds", testFunds) allowanceValues.Set("period", testPeriod) err = st.stdPostAPI("/renter", allowanceValues) if err != nil { t.Fatal(err) } // Create a file. path := filepath.Join(st.dir, "test.dat") err = createRandFile(path, 1024) if err != nil { t.Fatal(err) } // Upload the file to the renter. uploadValues := url.Values{} uploadValues.Set("source", path) err = st.stdPostAPI("/renter/upload/test", uploadValues) if err != nil { t.Fatal(err) } // Only one piece will be uploaded (10% at current redundancy). var rf RenterFiles for i := 0; i < 200 && (len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10); i++ { st.getAPI("/renter/files", &rf) time.Sleep(100 * time.Millisecond) } if len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10 { t.Fatal("the uploading is not succeeding for some reason:", rf.Files[0]) } // Mine a block before resetting the host, so that the host doesn't lose // it's contracts when the transaction pool resets. _, err = st.miner.AddBlock() if err != nil { t.Fatal(err) } _, err = synchronizationCheck(sts) if err != nil { t.Fatal(err) } // Give time for the upgrade to happen. time.Sleep(time.Second * 3) // Close and re-open the host. This should reset the host's address, as the // host should now be on a new port. err = stHost.server.Close() if err != nil { t.Fatal(err) } stHost, err = assembleServerTester(stHost.walletKey, stHost.dir) if err != nil { t.Fatal(err) } defer stHost.panicClose() sts[1] = stHost err = fullyConnectNodes(sts) if err != nil { t.Fatal(err) } err = stHost.announceHost() if err != nil { t.Fatal(err) } // Pull the host's net address and pubkey from the hostdb. err = retry(50, time.Millisecond*100, func() error { // Get the hostdb internals. if err = st.getAPI("/hostdb/active", &ah); err != nil { return err } // Get the host's internals. var hg HostGET if err = stHost.getAPI("/host", &hg); err != nil { return err } if len(ah.Hosts) != 1 { return fmt.Errorf("expected 1 host, got %v", len(ah.Hosts)) } if ah.Hosts[0].NetAddress != hg.ExternalSettings.NetAddress { return fmt.Errorf("hostdb net address doesn't match host net address: %v : %v", ah.Hosts[0].NetAddress, hg.ExternalSettings.NetAddress) } return nil }) if err != nil { t.Fatal(err) } if ah.Hosts[0].PublicKeyString != pks { t.Error("public key appears to have changed for host") } if ah.Hosts[0].NetAddress == addr { t.Log("NetAddress did not change for the new host") } // Try uploading a second file. path2 := filepath.Join(st.dir, "test2.dat") test2Size := modules.SectorSize*2 + 1 err = createRandFile(path2, int(test2Size)) if err != nil { t.Fatal(err) } uploadValues = url.Values{} uploadValues.Set("source", path2) err = st.stdPostAPI("/renter/upload/test2", uploadValues) if err != nil { t.Fatal(err) } // Only one piece will be uploaded (10% at current redundancy). for i := 0; i < 200 && (len(rf.Files) != 2 || rf.Files[0].UploadProgress < 10 || rf.Files[1].UploadProgress < 10); i++ { st.getAPI("/renter/files", &rf) time.Sleep(100 * time.Millisecond) } if len(rf.Files) != 2 || rf.Files[0].UploadProgress < 10 || rf.Files[1].UploadProgress < 10 { t.Fatal("the uploading is not succeeding for some reason:", rf.Files[0], rf.Files[1]) } // Try downloading the second file. downpath2 := filepath.Join(st.dir, "testdown2.dat") err = st.stdGetAPI("/renter/download/test2?destination=" + downpath2) if err != nil { t.Fatal(err) } // Check that the download has the right contents. orig2, err := ioutil.ReadFile(path2) if err != nil { t.Fatal(err) } download2, err := ioutil.ReadFile(downpath2) if err != nil { t.Fatal(err) } if !bytes.Equal(orig2, download2) { t.Fatal("data mismatch when downloading a file") } // Mine enough blocks that multiple renew cylces happen. After the renewing // happens, the file should still be downloadable. This is to check that the // renewal doesn't throw things off. for i := 0; i < testPeriodInt; i++ { _, err = st.miner.AddBlock() if err != nil { t.Fatal(err) } _, err = synchronizationCheck(sts) if err != nil { t.Fatal(err) } // Give time for the upgrade to happen. time.Sleep(time.Second * 3) } // Try downloading the file. downpath := filepath.Join(st.dir, "testdown.dat") err = st.stdGetAPI("/renter/download/test?destination=" + downpath) if err != nil { t.Fatal(err) } // Check that the download has the right contents. download, err := ioutil.ReadFile(downpath) if err != nil { t.Fatal(err) } orig, err := ioutil.ReadFile(path) if err != nil { t.Fatal(err) } if !bytes.Equal(orig, download) { t.Fatal("data mismatch when downloading a file") } // Try downloading the second file. err = st.stdGetAPI("/renter/download/test2?destination=" + downpath2) if err != nil { t.Fatal(err) } // Check that the download has the right contents. orig2, err = ioutil.ReadFile(path2) if err != nil { t.Fatal(err) } download2, err = ioutil.ReadFile(downpath2) if err != nil { t.Fatal(err) } if !bytes.Equal(orig2, download2) { t.Fatal("data mismatch when downloading a file") } } // TestHostDBAndRenterFormDynamicIPs checks that the hostdb and the renter are // successfully able to follow a host that has changed IP addresses and then // re-announced. func TestHostDBAndRenterFormDynamicIPs(t *testing.T) { if testing.Short() || !build.VLONG { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.panicClose() stHost, err := blankServerTester(t.Name() + "-Host") if err != nil { t.Fatal(err) } sts := []*serverTester{st, stHost} err = fullyConnectNodes(sts) if err != nil { t.Fatal(err) } err = fundAllNodes(sts) if err != nil { t.Fatal(err) } // Announce the host. err = stHost.acceptContracts() if err != nil { t.Fatal(err) } err = stHost.setHostStorage() if err != nil { t.Fatal(err) } err = stHost.announceHost() if err != nil { t.Fatal(err) } // Pull the host's net address and pubkey from the hostdb. var ah HostdbActiveGET for i := 0; i < 50; i++ { if err = st.getAPI("/hostdb/active", &ah); err != nil { t.Fatal(err) } if len(ah.Hosts) == 1 { break } time.Sleep(time.Millisecond * 100) } if len(ah.Hosts) != 1 { t.Fatalf("expected 1 host, got %v", len(ah.Hosts)) } addr := ah.Hosts[0].NetAddress pks := ah.Hosts[0].PublicKeyString // Mine a block before resetting the host, so that the host doesn't lose // it's contracts when the transaction pool resets. _, err = st.miner.AddBlock() if err != nil { t.Fatal(err) } _, err = synchronizationCheck(sts) if err != nil { t.Fatal(err) } // Give time for the upgrade to happen. time.Sleep(time.Second * 3) // Close and re-open the host. This should reset the host's address, as the // host should now be on a new port. err = stHost.server.Close() if err != nil { t.Fatal(err) } stHost, err = assembleServerTester(stHost.walletKey, stHost.dir) if err != nil { t.Fatal(err) } defer stHost.panicClose() sts[1] = stHost err = fullyConnectNodes(sts) if err != nil { t.Fatal(err) } err = stHost.announceHost() if err != nil { t.Fatal(err) } // Pull the host's net address and pubkey from the hostdb. err = retry(50, time.Millisecond*100, func() error { // Get the hostdb internals. if err = st.getAPI("/hostdb/active", &ah); err != nil { return err } // Get the host's internals. var hg HostGET if err = stHost.getAPI("/host", &hg); err != nil { return err } if len(ah.Hosts) != 1 { return fmt.Errorf("expected 1 host, got %v", len(ah.Hosts)) } if ah.Hosts[0].NetAddress != hg.ExternalSettings.NetAddress { return fmt.Errorf("hostdb net address doesn't match host net address: %v : %v", ah.Hosts[0].NetAddress, hg.ExternalSettings.NetAddress) } return nil }) if err != nil { t.Fatal(err) } if ah.Hosts[0].PublicKeyString != pks { t.Error("public key appears to have changed for host") } if ah.Hosts[0].NetAddress == addr { t.Log("NetAddress did not change for the new host") } // Upload a file to the host. allowanceValues := url.Values{} testFunds := "10000000000000000000000000000" // 10k SC testPeriod := "10" testPeriodInt := 10 allowanceValues.Set("funds", testFunds) allowanceValues.Set("period", testPeriod) err = st.stdPostAPI("/renter", allowanceValues) if err != nil { t.Fatal(err) } // Create a file. path := filepath.Join(st.dir, "test.dat") err = createRandFile(path, 1024) if err != nil { t.Fatal(err) } // Upload the file to the renter. uploadValues := url.Values{} uploadValues.Set("source", path) err = st.stdPostAPI("/renter/upload/test", uploadValues) if err != nil { t.Fatal(err) } // Only one piece will be uploaded (10% at current redundancy). var rf RenterFiles for i := 0; i < 200 && (len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10); i++ { st.getAPI("/renter/files", &rf) time.Sleep(100 * time.Millisecond) } if len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10 { t.Fatal("the uploading is not succeeding for some reason:", rf.Files[0]) } // Try downloading the file. downpath := filepath.Join(st.dir, "testdown.dat") err = st.stdGetAPI("/renter/download/test?destination=" + downpath) if err != nil { t.Fatal(err) } // Check that the download has the right contents. orig, err := ioutil.ReadFile(path) if err != nil { t.Fatal(err) } download, err := ioutil.ReadFile(downpath) if err != nil { t.Fatal(err) } if !bytes.Equal(orig, download) { t.Fatal("data mismatch when downloading a file") } // Mine enough blocks that multiple renew cylces happen. After the renewing // happens, the file should still be downloadable. This is to check that the // renewal doesn't throw things off. for i := 0; i < testPeriodInt; i++ { _, err = st.miner.AddBlock() if err != nil { t.Fatal(err) } _, err = synchronizationCheck(sts) if err != nil { t.Fatal(err) } // Give time for the upgrade to happen. time.Sleep(time.Second * 3) } // Try downloading the file. err = st.stdGetAPI("/renter/download/test?destination=" + downpath) if err != nil { t.Fatal(err) } // Check that the download has the right contents. download, err = ioutil.ReadFile(downpath) if err != nil { t.Fatal(err) } if !bytes.Equal(orig, download) { t.Fatal("data mismatch when downloading a file") } } // TestHostDBAndRenterRenewDynamicIPs checks that the hostdb and the renter are // successfully able to follow a host that has changed IP addresses and then // re-announced. func TestHostDBAndRenterRenewDynamicIPs(t *testing.T) { if testing.Short() || !build.VLONG { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.panicClose() stHost, err := blankServerTester(t.Name() + "-Host") if err != nil { t.Fatal(err) } sts := []*serverTester{st, stHost} err = fullyConnectNodes(sts) if err != nil { t.Fatal(err) } err = fundAllNodes(sts) if err != nil { t.Fatal(err) } // Announce the host. err = stHost.acceptContracts() if err != nil { t.Fatal(err) } err = stHost.setHostStorage() if err != nil { t.Fatal(err) } err = stHost.announceHost() if err != nil { t.Fatal(err) } var ah HostdbActiveGET err = retry(50, 100*time.Millisecond, func() error { if err := st.getAPI("/hostdb/active", &ah); err != nil { return err } if len(ah.Hosts) != 1 { return errors.New("host not found in hostdb") } return nil }) // Upload a file to the host. allowanceValues := url.Values{} testFunds := "10000000000000000000000000000" // 10k SC testPeriod := "10" testPeriodInt := 10 allowanceValues.Set("funds", testFunds) allowanceValues.Set("period", testPeriod) err = st.stdPostAPI("/renter", allowanceValues) if err != nil { t.Fatal(err) } // Create a file. path := filepath.Join(st.dir, "test.dat") err = createRandFile(path, 1024) if err != nil { t.Fatal(err) } // Upload the file to the renter. uploadValues := url.Values{} uploadValues.Set("source", path) err = st.stdPostAPI("/renter/upload/test", uploadValues) if err != nil { t.Fatal(err) } // Only one piece will be uploaded (10% at current redundancy). var rf RenterFiles for i := 0; i < 200 && (len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10); i++ { st.getAPI("/renter/files", &rf) time.Sleep(100 * time.Millisecond) } if len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10 { t.Fatal("the uploading is not succeeding for some reason:", rf.Files[0]) } // Try downloading the file. downpath := filepath.Join(st.dir, "testdown.dat") err = st.stdGetAPI("/renter/download/test?destination=" + downpath) if err != nil { t.Fatal(err) } // Check that the download has the right contents. orig, err := ioutil.ReadFile(path) if err != nil { t.Fatal(err) } download, err := ioutil.ReadFile(downpath) if err != nil { t.Fatal(err) } if !bytes.Equal(orig, download) { t.Fatal("data mismatch when downloading a file") } // Mine a block before resetting the host, so that the host doesn't lose // it's contracts when the transaction pool resets. _, err = st.miner.AddBlock() if err != nil { t.Fatal(err) } _, err = synchronizationCheck(sts) if err != nil { t.Fatal(err) } // Give time for the upgrade to happen. time.Sleep(time.Second * 3) // Close and re-open the host. This should reset the host's address, as the // host should now be on a new port. err = stHost.server.Close() if err != nil { t.Fatal(err) } stHost, err = assembleServerTester(stHost.walletKey, stHost.dir) if err != nil { t.Fatal(err) } defer stHost.panicClose() sts[1] = stHost err = fullyConnectNodes(sts) if err != nil { t.Fatal(err) } err = stHost.announceHost() if err != nil { t.Fatal(err) } err = waitForBlock(stHost.cs.CurrentBlock().ID(), st) if err != nil { t.Fatal() } // Pull the host's net address and pubkey from the hostdb. err = retry(50, time.Millisecond*100, func() error { // Get the hostdb internals. if err = st.getAPI("/hostdb/active", &ah); err != nil { return err } // Get the host's internals. var hg HostGET if err = stHost.getAPI("/host", &hg); err != nil { return err } if len(ah.Hosts) != 1 { return fmt.Errorf("expected 1 host, got %v", len(ah.Hosts)) } if ah.Hosts[0].NetAddress != hg.ExternalSettings.NetAddress { return fmt.Errorf("hostdb net address doesn't match host net address: %v : %v", ah.Hosts[0].NetAddress, hg.ExternalSettings.NetAddress) } return nil }) if err != nil { t.Fatal(err) } // Mine enough blocks that multiple renew cylces happen. After the renewing // happens, the file should still be downloadable. for i := 0; i < testPeriodInt; i++ { _, err = st.miner.AddBlock() if err != nil { t.Fatal(err) } _, err = synchronizationCheck(sts) if err != nil { t.Fatal(err) } // Give time for the upgrade to happen. time.Sleep(time.Second * 3) } // Try downloading the file. err = st.stdGetAPI("/renter/download/test?destination=" + downpath) if err != nil { t.Fatal(err) } // Check that the download has the right contents. download, err = ioutil.ReadFile(downpath) if err != nil { t.Fatal(err) } if !bytes.Equal(orig, download) { t.Fatal("data mismatch when downloading a file") } } Sia-1.3.0/api/miner.go000066400000000000000000000041601313565667000144560ustar00rootroot00000000000000package api import ( "net/http" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/types" "github.com/julienschmidt/httprouter" ) type ( // MinerGET contains the information that is returned after a GET request // to /miner. MinerGET struct { BlocksMined int `json:"blocksmined"` CPUHashrate int `json:"cpuhashrate"` CPUMining bool `json:"cpumining"` StaleBlocksMined int `json:"staleblocksmined"` } ) // minerHandler handles the API call that queries the miner's status. func (api *API) minerHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { blocksMined, staleMined := api.miner.BlocksMined() mg := MinerGET{ BlocksMined: blocksMined, CPUHashrate: api.miner.CPUHashrate(), CPUMining: api.miner.CPUMining(), StaleBlocksMined: staleMined, } WriteJSON(w, mg) } // minerStartHandler handles the API call that starts the miner. func (api *API) minerStartHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { api.miner.StartCPUMining() WriteSuccess(w) } // minerStopHandler handles the API call to stop the miner. func (api *API) minerStopHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { api.miner.StopCPUMining() WriteSuccess(w) } // minerHeaderHandlerGET handles the API call that retrieves a block header // for work. func (api *API) minerHeaderHandlerGET(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { bhfw, target, err := api.miner.HeaderForWork() if err != nil { WriteError(w, Error{err.Error()}, http.StatusBadRequest) return } w.Write(encoding.MarshalAll(target, bhfw)) } // minerHeaderHandlerPOST handles the API call to submit a block header to the // miner. func (api *API) minerHeaderHandlerPOST(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { var bh types.BlockHeader err := encoding.NewDecoder(req.Body).Decode(&bh) if err != nil { WriteError(w, Error{err.Error()}, http.StatusBadRequest) return } err = api.miner.SubmitHeader(bh) if err != nil { WriteError(w, Error{err.Error()}, http.StatusBadRequest) return } WriteSuccess(w) } Sia-1.3.0/api/miner_test.go000066400000000000000000000075551313565667000155300ustar00rootroot00000000000000package api import ( "io/ioutil" "testing" "time" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/types" ) // TestMinerGET checks the GET call to the /miner endpoint. func TestMinerGET(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // Get the api returned fields of the miner. var mg MinerGET err = st.getAPI("/miner", &mg) if err != nil { t.Fatal(err) } // Verify the correctness of the results. blocksMined, staleBlocksMined := st.server.api.miner.BlocksMined() if mg.BlocksMined != blocksMined { t.Error("blocks mined did not succeed") } if mg.StaleBlocksMined != staleBlocksMined { t.Error("stale blocks mined is incorrect") } if mg.CPUHashrate != st.server.api.miner.CPUHashrate() { t.Error("mismatched cpu hashrate") } if mg.CPUMining != st.server.api.miner.CPUMining() { t.Error("mismatched cpu miner status") } } // TestMinerStartStop checks that the miner start and miner stop api endpoints // toggle the cpu miner. func TestMinerStartStop(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // Start the cpu miner, give time for the first hashrate readings to // appear. err = st.stdGetAPI("/miner/start") if err != nil { t.Fatal(err) } time.Sleep(100 * time.Millisecond) if !st.server.api.miner.CPUMining() { t.Error("cpu miner is reporting that it is not on") } // Check the numbers through the status api call. var mg MinerGET err = st.getAPI("/miner", &mg) if err != nil { t.Fatal(err) } if !mg.CPUMining { t.Error("cpu is not reporting through the api that it is mining") } // Stop the cpu miner and wait for the stop call to go through. err = st.stdGetAPI("/miner/stop") if err != nil { t.Fatal(err) } time.Sleep(100 * time.Millisecond) if st.server.api.miner.CPUMining() { t.Error("cpu miner is reporting that it is on after being stopped") } // Check the numbers through the status api call. err = st.getAPI("/miner", &mg) if err != nil { t.Fatal(err) } if mg.CPUMining { t.Error("cpu is not reporting through the api that it is mining") } } // TestMinerHeader checks that the header GET and POST calls are // useful tools for mining blocks. func TestMinerHeader(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() startingHeight := st.cs.Height() // Get a header that can be used for mining. resp, err := HttpGET("http://" + st.server.listener.Addr().String() + "/miner/header") if err != nil { t.Fatal(err) } defer resp.Body.Close() targetAndHeader, err := ioutil.ReadAll(resp.Body) if err != nil { t.Fatal(err) } // Twiddle the header bits until a block has been found. // // Note: this test treats the target as hardcoded, if the testing target is // changed, this test will also need to be changed. if types.RootTarget[0] != 128 { t.Fatal("test will fail because the testing constants have been unexpectedly changed") } var header [80]byte copy(header[:], targetAndHeader[32:]) headerHash := crypto.HashObject(header) for headerHash[0] >= types.RootTarget[0] { header[35]++ headerHash = crypto.HashObject(header) } // Submit the solved header through the api and check that the height of // the blockchain increases. resp, err = HttpPOST("http://"+st.server.listener.Addr().String()+"/miner/header", string(header[:])) if err != nil { t.Fatal(err) } defer resp.Body.Close() time.Sleep(500 * time.Millisecond) if st.cs.Height() != startingHeight+1 { t.Errorf("block height did not increase after trying to mine a block through the api, started at %v and ended at %v", startingHeight, st.cs.Height()) } } Sia-1.3.0/api/renter.go000066400000000000000000000464131313565667000146520ustar00rootroot00000000000000package api // TODO: When setting renter settings, leave empty values unchanged instead of // zeroing them out. import ( "fmt" "net/http" "path/filepath" "sort" "strings" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/modules/renter" "github.com/NebulousLabs/Sia/types" "github.com/julienschmidt/httprouter" ) var ( // recommendedHosts is the number of hosts that the renter will form // contracts with if the value is not specified explicitly in the call to // SetSettings. recommendedHosts = build.Select(build.Var{ Standard: uint64(50), Dev: uint64(2), Testing: uint64(1), }).(uint64) // requiredHosts specifies the minimum number of hosts that must be set in // the renter settings for the renter settings to be valid. This minimum is // there to prevent users from shooting themselves in the foot. requiredHosts = build.Select(build.Var{ Standard: uint64(20), Dev: uint64(1), Testing: uint64(1), }).(uint64) // requiredParityPieces specifies the minimum number of parity pieces that // must be used when uploading a file. This minimum exists to prevent users // from shooting themselves in the foot. requiredParityPieces = build.Select(build.Var{ Standard: int(12), Dev: int(0), Testing: int(0), }).(int) // requiredRedundancy specifies the minimum redundancy that will be // accepted by the renter when uploading a file. This minimum exists to // prevent users from shooting themselves in the foot. requiredRedundancy = build.Select(build.Var{ Standard: float64(2), Dev: float64(1), Testing: float64(1), }).(float64) // requiredRenewWindow establishes the minimum allowed renew window for the // renter settings. This minimum is here to prevent users from shooting // themselves in the foot. requiredRenewWindow = build.Select(build.Var{ Standard: types.BlockHeight(288), Dev: types.BlockHeight(1), Testing: types.BlockHeight(1), }).(types.BlockHeight) ) type ( // RenterGET contains various renter metrics. RenterGET struct { Settings modules.RenterSettings `json:"settings"` FinancialMetrics RenterFinancialMetrics `json:"financialmetrics"` CurrentPeriod types.BlockHeight `json:"currentperiod"` } // RenterFinancialMetrics contains metrics about how much the Renter has // spent on storage, uploads, and downloads. RenterFinancialMetrics struct { // Amount of money in the allowance spent on file contracts including // fees. ContractSpending types.Currency `json:"contractspending"` DownloadSpending types.Currency `json:"downloadspending"` StorageSpending types.Currency `json:"storagespending"` UploadSpending types.Currency `json:"uploadspending"` // Amount of money in the allowance that has not been spent. Unspent types.Currency `json:"unspent"` } // RenterContract represents a contract formed by the renter. RenterContract struct { // Amount of contract funds that have been spent on downloads. DownloadSpending types.Currency `json:"downloadspending"` // Block height that the file contract ends on. EndHeight types.BlockHeight `json:"endheight"` // Fees paid in order to form the file contract. Fees types.Currency `json:"fees"` // Public key of the host the contract was formed with. HostPublicKey types.SiaPublicKey `json:"hostpublickey"` // ID of the file contract. ID types.FileContractID `json:"id"` // A signed transaction containing the most recent contract revision. LastTransaction types.Transaction `json:"lasttransaction"` // Address of the host the file contract was formed with. NetAddress modules.NetAddress `json:"netaddress"` // Remaining funds left for the renter to spend on uploads & downloads. RenterFunds types.Currency `json:"renterfunds"` // Size of the file contract, which is typically equal to the number of // bytes that have been uploaded to the host. Size uint64 `json:"size"` // Block height that the file contract began on. StartHeight types.BlockHeight `json:"startheight"` // Amount of contract funds that have been spent on storage. StorageSpending types.Currency `json:"StorageSpending"` // Total cost to the wallet of forming the file contract. TotalCost types.Currency `json:"totalcost"` // Amount of contract funds that have been spent on uploads. UploadSpending types.Currency `json:"uploadspending"` } // RenterContracts contains the renter's contracts. RenterContracts struct { Contracts []RenterContract `json:"contracts"` } // DownloadQueue contains the renter's download queue. RenterDownloadQueue struct { Downloads []DownloadInfo `json:"downloads"` } // RenterFiles lists the files known to the renter. RenterFiles struct { Files []modules.FileInfo `json:"files"` } // RenterLoad lists files that were loaded into the renter. RenterLoad struct { FilesAdded []string `json:"filesadded"` } // RenterPricesGET lists the data that is returned when a GET call is made // to /renter/prices. RenterPricesGET struct { modules.RenterPriceEstimation } // RenterShareASCII contains an ASCII-encoded .sia file. RenterShareASCII struct { ASCIIsia string `json:"asciisia"` } // DownloadInfo contains all client-facing information of a file. DownloadInfo struct { SiaPath string `json:"siapath"` Destination string `json:"destination"` Filesize uint64 `json:"filesize"` Received uint64 `json:"received"` StartTime time.Time `json:"starttime"` Error string `json:"error"` } ) // renterHandlerGET handles the API call to /renter. func (api *API) renterHandlerGET(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { settings := api.renter.Settings() periodStart := api.renter.CurrentPeriod() // calculate financial metrics from contracts. We use the special // AllContracts method to include contracts that are offline. var fm RenterFinancialMetrics fm.Unspent = settings.Allowance.Funds contracts := api.renter.(interface { AllContracts() []modules.RenterContract }).AllContracts() for _, c := range contracts { if c.StartHeight < periodStart { continue } fm.ContractSpending = fm.ContractSpending.Add(c.TotalCost) fm.DownloadSpending = fm.DownloadSpending.Add(c.DownloadSpending) fm.UploadSpending = fm.UploadSpending.Add(c.UploadSpending) fm.StorageSpending = fm.StorageSpending.Add(c.StorageSpending) // total unspent is: // allowance - (cost to form contracts) + (money left in contracts) if fm.Unspent.Add(c.RenterFunds()).Cmp(c.TotalCost) > 0 { fm.Unspent = fm.Unspent.Add(c.RenterFunds()).Sub(c.TotalCost) } } WriteJSON(w, RenterGET{ Settings: settings, FinancialMetrics: fm, CurrentPeriod: periodStart, }) } // renterHandlerPOST handles the API call to set the Renter's settings. func (api *API) renterHandlerPOST(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { // Scan the allowance amount. funds, ok := scanAmount(req.FormValue("funds")) if !ok { WriteError(w, Error{"unable to parse funds"}, http.StatusBadRequest) return } // Scan the number of hosts to use. (optional parameter) var hosts uint64 if req.FormValue("hosts") != "" { _, err := fmt.Sscan(req.FormValue("hosts"), &hosts) if err != nil { WriteError(w, Error{"unable to parse hosts: " + err.Error()}, http.StatusBadRequest) return } if hosts != 0 && hosts < requiredHosts { WriteError(w, Error{fmt.Sprintf("insufficient number of hosts, need at least %v but have %v", recommendedHosts, hosts)}, http.StatusBadRequest) return } } else { hosts = recommendedHosts } // Scan the period. var period types.BlockHeight _, err := fmt.Sscan(req.FormValue("period"), &period) if err != nil { WriteError(w, Error{"unable to parse period: " + err.Error()}, http.StatusBadRequest) return } // Scan the renew window. (optional parameter) var renewWindow types.BlockHeight if req.FormValue("renewwindow") != "" { _, err = fmt.Sscan(req.FormValue("renewwindow"), &renewWindow) if err != nil { WriteError(w, Error{"unable to parse renewwindow: " + err.Error()}, http.StatusBadRequest) return } if renewWindow != 0 && renewWindow < requiredRenewWindow { WriteError(w, Error{fmt.Sprintf("renew window is too small, must be at least %v blocks but have %v blocks", requiredRenewWindow, renewWindow)}, http.StatusBadRequest) return } } else { renewWindow = period / 2 } // Set the settings in the renter. err = api.renter.SetSettings(modules.RenterSettings{ Allowance: modules.Allowance{ Funds: funds, Hosts: hosts, Period: period, RenewWindow: renewWindow, }, }) if err != nil { WriteError(w, Error{err.Error()}, http.StatusBadRequest) return } WriteSuccess(w) } // renterContractsHandler handles the API call to request the Renter's contracts. func (api *API) renterContractsHandler(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) { contracts := []RenterContract{} for _, c := range api.renter.Contracts() { contracts = append(contracts, RenterContract{ DownloadSpending: c.DownloadSpending, EndHeight: c.EndHeight(), Fees: c.TxnFee.Add(c.SiafundFee).Add(c.ContractFee), HostPublicKey: c.HostPublicKey, ID: c.ID, LastTransaction: c.LastRevisionTxn, NetAddress: c.NetAddress, RenterFunds: c.RenterFunds(), Size: c.LastRevision.NewFileSize, StartHeight: c.StartHeight, StorageSpending: c.StorageSpending, TotalCost: c.TotalCost, UploadSpending: c.UploadSpending, }) } WriteJSON(w, RenterContracts{ Contracts: contracts, }) } // renterDownloadsHandler handles the API call to request the download queue. func (api *API) renterDownloadsHandler(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) { var downloads []DownloadInfo for _, d := range api.renter.DownloadQueue() { downloads = append(downloads, DownloadInfo{ SiaPath: d.SiaPath, Destination: d.Destination.Destination(), Filesize: d.Filesize, StartTime: d.StartTime, Received: d.Received, Error: d.Error, }) } // sort the downloads by newest first sort.Slice(downloads, func(i, j int) bool { return downloads[i].StartTime.After(downloads[j].StartTime) }) WriteJSON(w, RenterDownloadQueue{ Downloads: downloads, }) } // renterLoadHandler handles the API call to load a '.sia' file. func (api *API) renterLoadHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { source := req.FormValue("source") if !filepath.IsAbs(source) { WriteError(w, Error{"source must be an absolute path"}, http.StatusBadRequest) return } files, err := api.renter.LoadSharedFiles(source) if err != nil { WriteError(w, Error{err.Error()}, http.StatusBadRequest) return } WriteJSON(w, RenterLoad{FilesAdded: files}) } // renterLoadAsciiHandler handles the API call to load a '.sia' file // in ASCII form. func (api *API) renterLoadAsciiHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { files, err := api.renter.LoadSharedFilesAscii(req.FormValue("asciisia")) if err != nil { WriteError(w, Error{err.Error()}, http.StatusBadRequest) return } WriteJSON(w, RenterLoad{FilesAdded: files}) } // renterRenameHandler handles the API call to rename a file entry in the // renter. func (api *API) renterRenameHandler(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { err := api.renter.RenameFile(strings.TrimPrefix(ps.ByName("siapath"), "/"), req.FormValue("newsiapath")) if err != nil { WriteError(w, Error{err.Error()}, http.StatusBadRequest) return } WriteSuccess(w) } // renterFilesHandler handles the API call to list all of the files. func (api *API) renterFilesHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { WriteJSON(w, RenterFiles{ Files: api.renter.FileList(), }) } // renterPricesHandler reports the expected costs of various actions given the // renter settings and the set of available hosts. func (api *API) renterPricesHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { WriteJSON(w, RenterPricesGET{ RenterPriceEstimation: api.renter.PriceEstimation(), }) } // renterDeleteHandler handles the API call to delete a file entry from the // renter. func (api *API) renterDeleteHandler(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { err := api.renter.DeleteFile(strings.TrimPrefix(ps.ByName("siapath"), "/")) if err != nil { WriteError(w, Error{err.Error()}, http.StatusBadRequest) return } WriteSuccess(w) } // renterDownloadHandler handles the API call to download a file. func (api *API) renterDownloadHandler(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { params, err := parseDownloadParameters(w, req, ps) if err != nil { WriteError(w, Error{err.Error()}, http.StatusBadRequest) return } if params.Async { // Create goroutine if `async` param set. // check for errors for 5 seconds to catch validation errors (no file with // that path, invalid parameters, insufficient hosts, etc) errchan := make(chan error) go func() { errchan <- api.renter.Download(params) }() select { case err = <-errchan: if err != nil { WriteError(w, Error{"download failed: " + err.Error()}, http.StatusInternalServerError) return } case <-time.After(time.Millisecond * 100): } } else { err := api.renter.Download(params) if err != nil { WriteError(w, Error{"download failed: " + err.Error()}, http.StatusInternalServerError) return } } if params.Httpwriter == nil { // `httpresp=true` causes writes to w before this line is run, automatically // adding `200 Status OK` code to response. Calling this results in a // multiple calls to WriteHeaders() errors. WriteSuccess(w) return } } // renterDownloadAsyncHandler handles the API call to download a file asynchronously. func (api *API) renterDownloadAsyncHandler(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { req.ParseForm() req.Form.Set("async", "true") api.renterDownloadHandler(w, req, ps) } // parseDownloadParameters parses the download parameters passed to the // /renter/download endpoint. Validation of these parameters is done by the // renter. func parseDownloadParameters(w http.ResponseWriter, req *http.Request, ps httprouter.Params) (modules.RenterDownloadParameters, error) { destination := req.FormValue("destination") // The offset and length in bytes. offsetparam := req.FormValue("offset") lengthparam := req.FormValue("length") // Determines whether the response is written to response body. httprespparam := req.FormValue("httpresp") // Determines whether to return on completion of download or straight away. // If httprespparam is present, this parameter is ignored. asyncparam := req.FormValue("async") // Parse the offset and length parameters. var offset, length uint64 if len(offsetparam) > 0 { _, err := fmt.Sscan(offsetparam, &offset) if err != nil { return modules.RenterDownloadParameters{}, build.ExtendErr("could not decode the offset as uint64: ", err) } } if len(lengthparam) > 0 { _, err := fmt.Sscan(lengthparam, &length) if err != nil { return modules.RenterDownloadParameters{}, build.ExtendErr("could not decode the offset as uint64: ", err) } } // Parse the httpresp parameter. httpresp, err := scanBool(httprespparam) if err != nil { return modules.RenterDownloadParameters{}, build.ExtendErr("httpresp parameter could not be parsed", err) } // Parse the async parameter. async, err := scanBool(asyncparam) if err != nil { return modules.RenterDownloadParameters{}, build.ExtendErr("async parameter could not be parsed", err) } siapath := strings.TrimPrefix(ps.ByName("siapath"), "/") // Sia file name. dp := modules.RenterDownloadParameters{ Destination: destination, Async: async, Length: length, Offset: offset, Siapath: siapath, } if httpresp { dp.Httpwriter = w } return dp, nil } // renterShareHandler handles the API call to create a '.sia' file that // shares a set of file. func (api *API) renterShareHandler(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { destination := req.FormValue("destination") // Check that the destination path is absolute. if !filepath.IsAbs(destination) { WriteError(w, Error{"destination must be an absolute path"}, http.StatusBadRequest) return } err := api.renter.ShareFiles(strings.Split(req.FormValue("siapaths"), ","), destination) if err != nil { WriteError(w, Error{err.Error()}, http.StatusBadRequest) return } WriteSuccess(w) } // renterShareAsciiHandler handles the API call to return a '.sia' file // in ascii form. func (api *API) renterShareAsciiHandler(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { ascii, err := api.renter.ShareFilesAscii(strings.Split(req.FormValue("siapaths"), ",")) if err != nil { WriteError(w, Error{err.Error()}, http.StatusBadRequest) return } WriteJSON(w, RenterShareASCII{ ASCIIsia: ascii, }) } // renterUploadHandler handles the API call to upload a file. func (api *API) renterUploadHandler(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { source := req.FormValue("source") if !filepath.IsAbs(source) { WriteError(w, Error{"source must be an absolute path"}, http.StatusBadRequest) return } // Check whether the erasure coding parameters have been supplied. var ec modules.ErasureCoder if req.FormValue("datapieces") != "" || req.FormValue("paritypieces") != "" { // Check that both values have been supplied. if req.FormValue("datapieces") == "" || req.FormValue("paritypieces") == "" { WriteError(w, Error{"must provide both the datapieces paramaeter and the paritypieces parameter if specifying erasure coding parameters"}, http.StatusBadRequest) return } // Parse the erasure coding parameters. var dataPieces, parityPieces int _, err := fmt.Sscan(req.FormValue("datapieces"), &dataPieces) if err != nil { WriteError(w, Error{"unable to read parameter 'datapieces': " + err.Error()}, http.StatusBadRequest) return } _, err = fmt.Sscan(req.FormValue("paritypieces"), &parityPieces) if err != nil { WriteError(w, Error{"unable to read parameter 'paritypieces': " + err.Error()}, http.StatusBadRequest) return } // Verify that sane values for parityPieces and redundancy are being // supplied. if parityPieces < requiredParityPieces { WriteError(w, Error{fmt.Sprintf("a minimum of %v parity pieces is required, but %v parity pieces requested", parityPieces, requiredParityPieces)}, http.StatusBadRequest) return } redundancy := float64(dataPieces+parityPieces) / float64(dataPieces) if float64(dataPieces+parityPieces)/float64(dataPieces) < requiredRedundancy { WriteError(w, Error{fmt.Sprintf("a redundancy of %.2f is required, but redundancy of %.2f supplied", redundancy, requiredRedundancy)}, http.StatusBadRequest) return } // Create the erasure coder. ec, err = renter.NewRSCode(dataPieces, parityPieces) if err != nil { WriteError(w, Error{"unable to encode file using the provided parameters: " + err.Error()}, http.StatusBadRequest) return } } // Call the renter to upload the file. err := api.renter.Upload(modules.FileUploadParams{ Source: source, SiaPath: strings.TrimPrefix(ps.ByName("siapath"), "/"), ErasureCode: ec, }) if err != nil { WriteError(w, Error{"upload failed: " + err.Error()}, http.StatusInternalServerError) return } WriteSuccess(w) } Sia-1.3.0/api/renter_test.go000066400000000000000000001527701313565667000157150ustar00rootroot00000000000000package api import ( "bytes" "errors" "fmt" "io" "io/ioutil" "net/url" "os" "path/filepath" "strconv" "strings" "testing" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/modules/renter" "github.com/NebulousLabs/Sia/modules/renter/contractor" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/fastrand" ) const ( testFunds = "10000000000000000000000000000" // 10k SC testPeriod = "5" ) // createRandFile creates a file on disk and fills it with random bytes. func createRandFile(path string, size int) error { return ioutil.WriteFile(path, fastrand.Bytes(size), 0600) } // setupTestDownload creates a server tester with an uploaded file of size // `size` and name `name`. func setupTestDownload(t *testing.T, size int, name string, waitOnAvailability bool) (*serverTester, string) { st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } // Announce the host and start accepting contracts. err = st.announceHost() if err != nil { t.Fatal(err) } err = st.acceptContracts() if err != nil { t.Fatal(err) } err = st.setHostStorage() if err != nil { t.Fatal(err) } // Set an allowance for the renter, allowing a contract to be formed. allowanceValues := url.Values{} testFunds := testFunds testPeriod := "10" allowanceValues.Set("funds", testFunds) allowanceValues.Set("period", testPeriod) err = st.stdPostAPI("/renter", allowanceValues) if err != nil { t.Fatal(err) } // Create a file. path := filepath.Join(build.SiaTestingDir, "api", t.Name(), name) err = createRandFile(path, size) if err != nil { t.Fatal(err) } // Upload to host. uploadValues := url.Values{} uploadValues.Set("source", path) uploadValues.Set("renew", "true") uploadValues.Set("datapieces", "1") uploadValues.Set("paritypieces", "1") err = st.stdPostAPI("/renter/upload/"+name, uploadValues) if err != nil { t.Fatal(err) } if waitOnAvailability { // wait for the file to become available err = retry(200, time.Second, func() error { var rf RenterFiles st.getAPI("/renter/files", &rf) if len(rf.Files) != 1 || !rf.Files[0].Available { return fmt.Errorf("the uploading is not succeeding for some reason: %v\n", rf.Files[0]) } return nil }) if err != nil { t.Fatal(err) } } return st, path } // runDownloadTest uploads a file and downloads it using the specified // parameters, verifying that the parameters are applied correctly and the file // is downloaded successfully. func runDownloadTest(t *testing.T, filesize, offset, length int64, useHttpResp bool, testName string) error { ulSiaPath := testName + ".dat" st, path := setupTestDownload(t, int(filesize), ulSiaPath, true) defer func() { st.server.panicClose() os.Remove(path) }() // Read the section to be downloaded from the original file. uf, err := os.Open(path) // Uploaded file. if err != nil { return err } var originalBytes bytes.Buffer _, err = uf.Seek(offset, 0) if err != nil { return err } _, err = io.CopyN(&originalBytes, uf, length) if err != nil { return err } // Download the original file from the passed offsets. fname := testName + "-download.dat" downpath := filepath.Join(st.dir, fname) defer os.Remove(downpath) dlURL := fmt.Sprintf("/renter/download/%s?offset=%d&length=%d", ulSiaPath, offset, length) var downbytes bytes.Buffer if useHttpResp { dlURL += "&httpresp=true" // Make request. resp, err := HttpGET("http://" + st.server.listener.Addr().String() + dlURL) if err != nil { return err } defer resp.Body.Close() _, err = io.Copy(&downbytes, resp.Body) if err != nil { return err } } else { dlURL += "&destination=" + downpath err := st.getAPI(dlURL, nil) if err != nil { return err } // wait for the download to complete err = retry(30, time.Second, func() error { var rdq RenterDownloadQueue err = st.getAPI("/renter/downloads", &rdq) if err != nil { return err } for _, download := range rdq.Downloads { if download.Received == download.Filesize && download.SiaPath == ulSiaPath { return nil } } return errors.New("file not downloaded") }) if err != nil { t.Fatal(err) } // open the downloaded file df, err := os.Open(downpath) if err != nil { return err } defer df.Close() _, err = io.Copy(&downbytes, df) if err != nil { return err } } // should have correct length if int64(downbytes.Len()) != length { return errors.New(fmt.Sprintf("downloaded file has incorrect size: %d, %d expected.", downbytes.Len(), length)) } // should be byte-for-byte equal to the original uploaded file if !bytes.Equal(originalBytes.Bytes(), downbytes.Bytes()) { return errors.New(fmt.Sprintf("downloaded content differs from original content")) } return nil } // TestRenterDownloadError tests that the /renter/download route sets the // download's error field if it fails. func TestRenterDownloadError(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, _ := setupTestDownload(t, 1e4, "test.dat", false) defer st.server.Close() // don't wait for the upload to complete, try to download immediately to // intentionally cause a download error downpath := filepath.Join(st.dir, "down.dat") expectedErr := st.getAPI("/renter/download/test.dat?destination="+downpath, nil) if expectedErr == nil { t.Fatal("download unexpectedly succeeded") } // verify the file has the expected error var rdq RenterDownloadQueue err := st.getAPI("/renter/downloads", &rdq) if err != nil { t.Fatal(err) } for _, download := range rdq.Downloads { if download.SiaPath == "test.dat" && download.Received == download.Filesize && download.Error == expectedErr.Error() { t.Fatal("download had unexpected error: ", download.Error) } } } // TestValidDownloads tests valid and boundary parameter combinations. func TestValidDownloads(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() sectorSize := int64(modules.SectorSize) testParams := []struct { filesize, offset, length int64 useHttpResp bool testName string }{ // file-backed tests. {sectorSize, 40, sectorSize - 40, false, "OffsetSingleChunk"}, {sectorSize * 2, 20, sectorSize*2 - 20, false, "OffsetTwoChunk"}, {int64(float64(sectorSize) * 2.4), 20, int64(float64(sectorSize)*2.4) - 20, false, "OffsetThreeChunk"}, {sectorSize, 0, sectorSize / 2, false, "ShortLengthSingleChunk"}, {sectorSize, sectorSize / 4, sectorSize / 2, false, "ShortLengthAndOffsetSingleChunk"}, {sectorSize * 2, 0, int64(float64(sectorSize) * 2 * 0.75), false, "ShortLengthTwoChunk"}, {int64(float64(sectorSize) * 2.7), 0, int64(2.2 * float64(sectorSize)), false, "ShortLengthThreeChunkInThirdChunk"}, {int64(float64(sectorSize) * 2.7), 0, int64(1.6 * float64(sectorSize)), false, "ShortLengthThreeChunkInSecondChunk"}, {sectorSize * 5, 0, int64(float64(sectorSize*5) * 0.75), false, "ShortLengthMultiChunk"}, {sectorSize * 2, 50, int64(float64(sectorSize*2) * 0.75), false, "ShortLengthAndOffsetTwoChunk"}, {sectorSize * 3, 50, int64(float64(sectorSize*3) * 0.5), false, "ShortLengthAndOffsetThreeChunkInSecondChunk"}, {sectorSize * 3, 50, int64(float64(sectorSize*3) * 0.75), false, "ShortLengthAndOffsetThreeChunkInThirdChunk"}, // http response tests. {sectorSize, 40, sectorSize - 40, true, "HttpRespOffsetSingleChunk"}, {sectorSize * 2, 40, sectorSize*2 - 40, true, "HttpRespOffsetTwoChunk"}, {sectorSize * 5, 40, sectorSize*5 - 40, true, "HttpRespOffsetManyChunks"}, {sectorSize, 40, 4 * sectorSize / 5, true, "RespOffsetAndLengthSingleChunk"}, {sectorSize * 2, 80, 3 * (sectorSize * 2) / 4, true, "RespOffsetAndLengthTwoChunk"}, {sectorSize * 5, 150, 3 * (sectorSize * 5) / 4, true, "HttpRespOffsetAndLengthManyChunks"}, {sectorSize * 5, 150, sectorSize * 5 / 4, true, "HttpRespOffsetAndLengthManyChunksSubsetOfChunks"}, } for i, params := range testParams { params := params t.Run(fmt.Sprintf("%v-%v", t.Name(), i), func(st *testing.T) { st.Parallel() err := runDownloadTest(st, params.filesize, params.offset, params.length, params.useHttpResp, params.testName) if err != nil { st.Fatal(err) } }) } } func runDownloadParamTest(t *testing.T, length, offset, filesize int) error { ulSiaPath := "test.dat" st, _ := setupTestDownload(t, int(filesize), ulSiaPath, true) defer st.server.Close() // Download the original file from offset 40 and length 10. fname := "offsetsinglechunk.dat" downpath := filepath.Join(st.dir, fname) dlURL := fmt.Sprintf("/renter/download/%s?destination=%s", ulSiaPath, downpath) dlURL += fmt.Sprintf("&length=%d", length) dlURL += fmt.Sprintf("&offset=%d", offset) return st.getAPI(dlURL, nil) } func TestInvalidDownloadParameters(t *testing.T) { if testing.Short() || !build.VLONG { t.SkipNow() } t.Parallel() testParams := []struct { length int offset int filesize int errorMsg string }{ {0, -10, 1e4, "/download not prompting error when passing negative offset."}, {0, 1e4, 1e4, "/download not prompting error when passing offset equal to filesize."}, {1e4 + 1, 0, 1e4, "/download not prompting error when passing length exceeding filesize."}, {1e4 + 11, 10, 1e4, "/download not prompting error when passing length exceeding filesize with non-zero offset."}, {-1, 0, 1e4, "/download not prompting error when passing negative length."}, } for _, params := range testParams { err := runDownloadParamTest(t, params.length, params.offset, params.filesize) if err == nil { t.Fatal(params.errorMsg) } } } func TestRenterDownloadAsyncAndHttpRespError(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() filesize := 1e4 ulSiaPath := "test.dat" st, _ := setupTestDownload(t, int(filesize), ulSiaPath, true) defer st.server.Close() // Download the original file from offset 40 and length 10. fname := "offsetsinglechunk.dat" dlURL := fmt.Sprintf("/renter/download/%s?destination=%s&async=true&httpresp=true", ulSiaPath, fname) err := st.getAPI(dlURL, nil) if err == nil { t.Fatalf("/download not prompting error when only passing both async and httpresp fields.") } } func TestRenterDownloadAsyncNonexistentFile(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.Close() downpath := filepath.Join(st.dir, "testfile") err = st.getAPI(fmt.Sprintf("/renter/downloadasync/doesntexist?destination=%v", downpath), nil) if err == nil || err.Error() != fmt.Sprintf("download failed: no file with that path: doesntexist") { t.Fatal("downloadasync did not return error on nonexistent file") } } func TestRenterDownloadAsyncAndNotDestinationError(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() filesize := 1e4 ulSiaPath := "test.dat" st, _ := setupTestDownload(t, int(filesize), ulSiaPath, true) defer st.server.Close() // Download the original file from offset 40 and length 10. dlURL := fmt.Sprintf("/renter/download/%s?async=true", ulSiaPath) err := st.getAPI(dlURL, nil) if err == nil { t.Fatal("/download not prompting error when async is specified but destination is empty.") } } func TestRenterDownloadHttpRespAndDestinationError(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() filesize := 1e4 ulSiaPath := "test.dat" st, _ := setupTestDownload(t, int(filesize), ulSiaPath, true) defer st.server.Close() // Download the original file from offset 40 and length 10. fname := "test.dat" dlURL := fmt.Sprintf("/renter/download/%s?destination=%shttpresp=true", ulSiaPath, fname) err := st.getAPI(dlURL, nil) if err == nil { t.Fatal("/download not prompting error when httpresp is specified and destination is non-empty.") } } // TestRenterAsyncDownloadError tests that the /renter/asyncdownload route sets // the download's error field if it fails. func TestRenterAsyncDownloadError(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, _ := setupTestDownload(t, 1e4, "test.dat", false) defer st.server.panicClose() // don't wait for the upload to complete, try to download immediately to // intentionally cause a download error downpath := filepath.Join(st.dir, "asyncdown.dat") st.getAPI("/renter/downloadasync/test.dat?destination="+downpath, nil) // verify the file has an error var rdq RenterDownloadQueue err := st.getAPI("/renter/downloads", &rdq) if err != nil { t.Fatal(err) } for _, download := range rdq.Downloads { if download.SiaPath == "test.dat" && download.Received == download.Filesize && download.Error == "" { t.Fatal("download had nil error") } } } func TestRenterAsyncSpecifyAsyncFalseError(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, _ := setupTestDownload(t, 1e4, "test.dat", false) defer st.server.Close() // don't wait for the upload to complete, try to download immediately to // intentionally cause a download error downpath := filepath.Join(st.dir, "asyncdown.dat") err := st.getAPI("/renter/downloadasync/test.dat?async=false&destination="+downpath, nil) if err == nil { t.Fatal("/downloadasync does not return error when passing `async=false`") } } // TestRenterAsyncDownload tests that the /renter/downloadasync route works // correctly. func TestRenterAsyncDownload(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, _ := setupTestDownload(t, 1e4, "test.dat", true) defer st.server.panicClose() // Download the file asynchronously. downpath := filepath.Join(st.dir, "asyncdown.dat") err := st.getAPI("/renter/downloadasync/test.dat?destination="+downpath, nil) if err != nil { t.Fatal(err) } // download should eventually complete var rdq RenterDownloadQueue success := false for start := time.Now(); time.Since(start) < 30*time.Second; time.Sleep(time.Millisecond * 10) { err = st.getAPI("/renter/downloads", &rdq) if err != nil { t.Fatal(err) } for _, download := range rdq.Downloads { if download.Received == download.Filesize && download.SiaPath == "test.dat" { success = true } } if success { break } } if !success { t.Fatal("/renter/downloadasync did not download our test file") } } // TestRenterPaths tests that the /renter routes handle path parameters // properly. func TestRenterPaths(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // Announce the host. err = st.announceHost() if err != nil { t.Fatal(err) } // Create a file. path := filepath.Join(build.SiaTestingDir, "api", t.Name(), "test.dat") err = createRandFile(path, 1024) if err != nil { t.Fatal(err) } // Upload to host. uploadValues := url.Values{} uploadValues.Set("source", path) uploadValues.Set("renew", "true") err = st.stdPostAPI("/renter/upload/foo/bar/test", uploadValues) if err != nil { t.Fatal(err) } // File should be listed by the renter. var rf RenterFiles err = st.getAPI("/renter/files", &rf) if err != nil { t.Fatal(err) } if len(rf.Files) != 1 || rf.Files[0].SiaPath != "foo/bar/test" { t.Fatal("/renter/files did not return correct file:", rf) } } // TestRenterConflicts tests that the renter handles naming conflicts properly. func TestRenterConflicts(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // Announce the host. err = st.announceHost() if err != nil { t.Fatal(err) } // Create a file. path := filepath.Join(build.SiaTestingDir, "api", t.Name(), "test.dat") err = createRandFile(path, 1024) if err != nil { t.Fatal(err) } // Upload to host, using a path designed to cause conflicts. The renter // should automatically create a folder called foo/bar.sia. Later, we'll // exploit this by uploading a file called foo/bar. uploadValues := url.Values{} uploadValues.Set("source", path) uploadValues.Set("renew", "true") err = st.stdPostAPI("/renter/upload/foo/bar.sia/test", uploadValues) if err != nil { t.Fatal(err) } // File should be listed by the renter. var rf RenterFiles err = st.getAPI("/renter/files", &rf) if err != nil { t.Fatal(err) } if len(rf.Files) != 1 || rf.Files[0].SiaPath != "foo/bar.sia/test" { t.Fatal("/renter/files did not return correct file:", rf) } // Upload using the same nickname. err = st.stdPostAPI("/renter/upload/foo/bar.sia/test", uploadValues) expectedErr := Error{"upload failed: " + renter.ErrPathOverload.Error()} if err != expectedErr { t.Fatalf("expected %v, got %v", Error{"upload failed: " + renter.ErrPathOverload.Error()}, err) } // Upload using nickname that conflicts with folder. err = st.stdPostAPI("/renter/upload/foo/bar", uploadValues) if err == nil { t.Fatal("expecting conflict error, got nil") } } // TestRenterHandlerContracts checks that contract formation between a host and // renter behaves as expected, and that contract spending is the right amount. func TestRenterHandlerContracts(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // Anounce the host and start accepting contracts. if err := st.announceHost(); err != nil { t.Fatal(err) } if err = st.acceptContracts(); err != nil { t.Fatal(err) } if err = st.setHostStorage(); err != nil { t.Fatal(err) } // The renter should not have any contracts yet. var contracts RenterContracts if err = st.getAPI("/renter/contracts", &contracts); err != nil { t.Fatal(err) } if len(contracts.Contracts) != 0 { t.Fatalf("expected renter to have 0 contracts; got %v", len(contracts.Contracts)) } // Set an allowance for the renter, allowing a contract to be formed. allowanceValues := url.Values{} allowanceValues.Set("funds", testFunds) allowanceValues.Set("period", testPeriod) if err = st.stdPostAPI("/renter", allowanceValues); err != nil { t.Fatal(err) } // Block until the allowance has finished forming contracts. err = build.Retry(50, time.Millisecond*250, func() error { var rc RenterContracts err = st.getAPI("/renter/contracts", &rc) if err != nil { return errors.New("couldn't get renter stats") } if len(rc.Contracts) != 1 { return errors.New("no contracts") } return nil }) if err != nil { t.Fatal("allowance setting failed") } // The renter should now have 1 contract. if err = st.getAPI("/renter/contracts", &contracts); err != nil { t.Fatal(err) } if len(contracts.Contracts) != 1 { t.Fatalf("expected renter to have 1 contract; got %v", len(contracts.Contracts)) } // Check the renter's contract spending. var get RenterGET if err = st.getAPI("/renter", &get); err != nil { t.Fatal(err) } expectedContractSpending := get.Settings.Allowance.Funds.Sub(get.FinancialMetrics.Unspent) for _, contract := range contracts.Contracts { expectedContractSpending = expectedContractSpending.Add(contract.RenterFunds) } if got := get.FinancialMetrics.ContractSpending; got.Cmp(expectedContractSpending) != 0 { t.Fatalf("expected contract spending to be %v; got %v", expectedContractSpending, got) } } // TestRenterHandlerGetAndPost checks that valid /renter calls successfully set // allowance values, while /renter calls with invalid allowance values are // correctly handled. func TestRenterHandlerGetAndPost(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // Anounce the host and start accepting contracts. if err := st.announceHost(); err != nil { t.Fatal(err) } if err = st.acceptContracts(); err != nil { t.Fatal(err) } if err = st.setHostStorage(); err != nil { t.Fatal(err) } // Set an allowance for the renter, allowing a contract to be formed. allowanceValues := url.Values{} allowanceValues.Set("funds", testFunds) allowanceValues.Set("period", testPeriod) if err = st.stdPostAPI("/renter", allowanceValues); err != nil { t.Fatal(err) } // Check that a call to /renter returns the expected values. var get RenterGET if err = st.getAPI("/renter", &get); err != nil { t.Fatal(err) } // Check the renter's funds. expectedFunds, ok := scanAmount(testFunds) if !ok { t.Fatal("scanAmount failed") } if got := get.Settings.Allowance.Funds; got.Cmp(expectedFunds) != 0 { t.Fatalf("expected funds to be %v; got %v", expectedFunds, got) } // Check the renter's period. intPeriod, err := strconv.Atoi(testPeriod) if err != nil { t.Fatal(err) } expectedPeriod := types.BlockHeight(intPeriod) if got := get.Settings.Allowance.Period; got != expectedPeriod { t.Fatalf("expected period to be %v; got %v", expectedPeriod, got) } // Check the renter's renew window. expectedRenewWindow := expectedPeriod / 2 if got := get.Settings.Allowance.RenewWindow; got != expectedRenewWindow { t.Fatalf("expected renew window to be %v; got %v", expectedRenewWindow, got) } // Try an empty funds string. allowanceValues = url.Values{} allowanceValues.Set("funds", "") allowanceValues.Set("period", testPeriod) err = st.stdPostAPI("/renter", allowanceValues) if err == nil || err.Error() != "unable to parse funds" { t.Errorf("expected error to be 'unable to parse funds'; got %v", err) } // Try an invalid funds string. Can't test a negative value since // ErrNegativeCurrency triggers a build.Critical, which calls a panic in // debug mode. allowanceValues.Set("funds", "0") err = st.stdPostAPI("/renter", allowanceValues) if err == nil || err.Error() != contractor.ErrInsufficientAllowance.Error() { t.Errorf("expected error to be %v; got %v", contractor.ErrInsufficientAllowance, err) } // Try a empty period string. allowanceValues.Set("funds", testFunds) allowanceValues.Set("period", "") err = st.stdPostAPI("/renter", allowanceValues) if err == nil || !strings.HasPrefix(err.Error(), "unable to parse period: ") { t.Errorf("expected error to begin with 'unable to parse period: '; got %v", err) } // Try an invalid period string. allowanceValues.Set("period", "-1") err = st.stdPostAPI("/renter", allowanceValues) if err == nil || !strings.Contains(err.Error(), "unable to parse period") { t.Errorf("expected error to begin with 'unable to parse period'; got %v", err) } // Try a period that will lead to a length-zero RenewWindow. allowanceValues.Set("period", "1") err = st.stdPostAPI("/renter", allowanceValues) if err == nil || err.Error() != contractor.ErrAllowanceZeroWindow.Error() { t.Errorf("expected error to be %v, got %v", contractor.ErrAllowanceZeroWindow, err) } } // TestRenterLoadNonexistent checks that attempting to upload or download a // nonexistent file triggers the appropriate error. func TestRenterLoadNonexistent(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // Anounce the host and start accepting contracts. if err := st.announceHost(); err != nil { t.Fatal(err) } if err = st.acceptContracts(); err != nil { t.Fatal(err) } if err = st.setHostStorage(); err != nil { t.Fatal(err) } // Set an allowance for the renter, allowing a contract to be formed. allowanceValues := url.Values{} allowanceValues.Set("funds", testFunds) allowanceValues.Set("period", testPeriod) if err = st.stdPostAPI("/renter", allowanceValues); err != nil { t.Fatal(err) } // Try uploading a nonexistent file. fakepath := filepath.Join(st.dir, "dne.dat") uploadValues := url.Values{} uploadValues.Set("source", fakepath) err = st.stdPostAPI("/renter/upload/dne", uploadValues) if err == nil { t.Errorf("expected error when uploading nonexistent file") } // Try downloading a nonexistent file. downpath := filepath.Join(st.dir, "dnedown.dat") err = st.stdGetAPI("/renter/download/dne?destination=" + downpath) hasPrefix := strings.HasPrefix(err.Error(), "download failed: no file with that path") if err == nil || !hasPrefix { t.Errorf("expected error to be 'download failed: no file with that path'; got %v instead", err) } // The renter's downloads queue should be empty. var queue RenterDownloadQueue if err = st.getAPI("/renter/downloads", &queue); err != nil { t.Fatal(err) } if len(queue.Downloads) != 0 { t.Fatalf("expected renter to have 0 downloads in the queue; got %v", len(queue.Downloads)) } } // TestRenterHandlerRename checks that valid /renter/rename calls are // successful, and that invalid calls fail with the appropriate error. func TestRenterHandlerRename(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // Anounce the host and start accepting contracts. if err := st.announceHost(); err != nil { t.Fatal(err) } if err = st.acceptContracts(); err != nil { t.Fatal(err) } if err = st.setHostStorage(); err != nil { t.Fatal(err) } // Try renaming a nonexistent file. renameValues := url.Values{} renameValues.Set("newsiapath", "newdne") err = st.stdPostAPI("/renter/rename/dne", renameValues) if err == nil || err.Error() != renter.ErrUnknownPath.Error() { t.Errorf("expected error to be %v; got %v", renter.ErrUnknownPath, err) } // Set an allowance for the renter, allowing a contract to be formed. allowanceValues := url.Values{} allowanceValues.Set("funds", testFunds) allowanceValues.Set("period", testPeriod) if err = st.stdPostAPI("/renter", allowanceValues); err != nil { t.Fatal(err) } // Create a file. path1 := filepath.Join(st.dir, "test1.dat") if err = createRandFile(path1, 512); err != nil { t.Fatal(err) } // Upload to host. uploadValues := url.Values{} uploadValues.Set("source", path1) if err = st.stdPostAPI("/renter/upload/test1", uploadValues); err != nil { t.Fatal(err) } // Try renaming to an empty string. renameValues.Set("newsiapath", "") err = st.stdPostAPI("/renter/rename/test1", renameValues) if err == nil || err.Error() != renter.ErrEmptyFilename.Error() { t.Fatalf("expected error to be %v; got %v", renter.ErrEmptyFilename, err) } // Rename the file. renameValues.Set("newsiapath", "newtest1") if err = st.stdPostAPI("/renter/rename/test1", renameValues); err != nil { t.Fatal(err) } // Should be able to continue uploading and downloading using the new name. var rf RenterFiles for i := 0; i < 200 && (len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10); i++ { st.getAPI("/renter/files", &rf) time.Sleep(100 * time.Millisecond) } if len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10 { t.Fatal("upload is not succeeding:", rf.Files[0]) } err = st.stdGetAPI("/renter/download/newtest1?destination=" + filepath.Join(st.dir, "testdown2.dat")) if err != nil { t.Fatal(err) } // Create and upload another file. path2 := filepath.Join(st.dir, "test2.dat") if err = createRandFile(path2, 512); err != nil { t.Fatal(err) } uploadValues.Set("source", path2) if err = st.stdPostAPI("/renter/upload/test2", uploadValues); err != nil { t.Fatal(err) } // Try renaming to a name that's already taken. renameValues.Set("newsiapath", "newtest1") err = st.stdPostAPI("/renter/rename/test2", renameValues) if err == nil || err.Error() != renter.ErrPathOverload.Error() { t.Errorf("expected error to be %v; got %v", renter.ErrPathOverload, err) } } // TestRenterHandlerDelete checks that deleting a valid file from the renter // goes as planned and that attempting to delete a nonexistent file fails with // the appropriate error. func TestRenterHandlerDelete(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // Anounce the host and start accepting contracts. if err := st.announceHost(); err != nil { t.Fatal(err) } if err = st.acceptContracts(); err != nil { t.Fatal(err) } if err = st.setHostStorage(); err != nil { t.Fatal(err) } // Set an allowance for the renter, allowing a contract to be formed. allowanceValues := url.Values{} allowanceValues.Set("funds", testFunds) allowanceValues.Set("period", testPeriod) if err = st.stdPostAPI("/renter", allowanceValues); err != nil { t.Fatal(err) } // Create a file. path := filepath.Join(st.dir, "test.dat") if err = createRandFile(path, 1024); err != nil { t.Fatal(err) } // Upload to host. uploadValues := url.Values{} uploadValues.Set("source", path) if err = st.stdPostAPI("/renter/upload/test", uploadValues); err != nil { t.Fatal(err) } // Delete the file. if err = st.stdPostAPI("/renter/delete/test", url.Values{}); err != nil { t.Fatal(err) } // The renter's list of files should now be empty. var files RenterFiles if err = st.getAPI("/renter/files", &files); err != nil { t.Fatal(err) } if len(files.Files) != 0 { t.Fatalf("renter's list of files should be empty; got %v instead", files) } // Try deleting a nonexistent file. err = st.stdPostAPI("/renter/delete/dne", url.Values{}) if err == nil || err.Error() != renter.ErrUnknownPath.Error() { t.Errorf("expected error to be %v, got %v", renter.ErrUnknownPath, err) } } // Tests that the /renter/upload call checks for relative paths. func TestRenterRelativePathErrorUpload(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // Anounce the host and start accepting contracts. if err := st.announceHost(); err != nil { t.Fatal(err) } if err = st.acceptContracts(); err != nil { t.Fatal(err) } if err = st.setHostStorage(); err != nil { t.Fatal(err) } // Set an allowance for the renter, allowing a contract to be formed. allowanceValues := url.Values{} allowanceValues.Set("funds", testFunds) allowanceValues.Set("period", testPeriod) if err = st.stdPostAPI("/renter", allowanceValues); err != nil { t.Fatal(err) } renterUploadAbsoluteError := "source must be an absolute path" // Create a file. path := filepath.Join(st.dir, "test.dat") if err = createRandFile(path, 1024); err != nil { t.Fatal(err) } // This should fail. uploadValues := url.Values{} uploadValues.Set("source", "test.dat") if err = st.stdPostAPI("/renter/upload/test", uploadValues); err.Error() != renterUploadAbsoluteError { t.Fatal(err) } // As should this. uploadValues = url.Values{} uploadValues.Set("source", "../test.dat") if err = st.stdPostAPI("/renter/upload/test", uploadValues); err.Error() != renterUploadAbsoluteError { t.Fatal(err) } // This should succeed. uploadValues = url.Values{} uploadValues.Set("source", path) if err = st.stdPostAPI("/renter/upload/test", uploadValues); err != nil { t.Fatal(err) } } // Tests that the /renter/download call checks for relative paths. func TestRenterRelativePathErrorDownload(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // Anounce the host and start accepting contracts. if err := st.announceHost(); err != nil { t.Fatal(err) } if err = st.acceptContracts(); err != nil { t.Fatal(err) } if err = st.setHostStorage(); err != nil { t.Fatal(err) } // Set an allowance for the renter, allowing a contract to be formed. allowanceValues := url.Values{} allowanceValues.Set("funds", testFunds) allowanceValues.Set("period", testPeriod) if err = st.stdPostAPI("/renter", allowanceValues); err != nil { t.Fatal(err) } renterDownloadAbsoluteError := "download failed: destination must be an absolute path" // Create a file, and upload it. path := filepath.Join(st.dir, "test.dat") if err = createRandFile(path, 1024); err != nil { t.Fatal(err) } uploadValues := url.Values{} uploadValues.Set("source", path) if err = st.stdPostAPI("/renter/upload/test", uploadValues); err != nil { t.Fatal(err) } var rf RenterFiles for i := 0; i < 200 && (len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10); i++ { st.getAPI("/renter/files", &rf) time.Sleep(200 * time.Millisecond) } if len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10 { t.Fatal("the uploading is not succeeding for some reason:", rf.Files[0]) } // Use a relative destination, which should fail. downloadPath := "test1.dat" if err = st.stdGetAPI("/renter/download/test?destination=" + downloadPath); err.Error() != renterDownloadAbsoluteError { t.Fatal(err) } // Relative destination stepping backwards should also fail. downloadPath = "../test1.dat" if err = st.stdGetAPI("/renter/download/test?destination=" + downloadPath); err.Error() != renterDownloadAbsoluteError { t.Fatal(err) } // Long relative destination should also fail (just missing leading slash). downloadPath = filepath.Join(st.dir[1:], "test1.dat") err = st.stdGetAPI("/renter/download/test?destination=" + downloadPath) if err == nil { t.Fatal("expecting an error") } // Full destination should succeed. downloadPath = filepath.Join(st.dir, "test1.dat") err = st.stdGetAPI("/renter/download/test?destination=" + downloadPath) if err != nil { t.Fatal("expecting an error") } } // TestRenterPricesHandler checks that the prices command returns reasonable // values given the settings of the hosts. func TestRenterPricesHandler(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // Announce the host and then get the calculated prices for when there is a // single host. var rpeSingle modules.RenterPriceEstimation if err = st.announceHost(); err != nil { t.Fatal(err) } if err = st.getAPI("/renter/prices", &rpeSingle); err != nil { t.Fatal(err) } // Create several more hosts all using the default settings. stHost1, err := blankServerTester(t.Name() + " - Host 1") if err != nil { t.Fatal(err) } defer stHost1.panicClose() stHost2, err := blankServerTester(t.Name() + " - Host 2") if err != nil { t.Fatal(err) } defer stHost2.panicClose() // Connect all the nodes and announce all of the hosts. sts := []*serverTester{st, stHost1, stHost2} err = fullyConnectNodes(sts) if err != nil { t.Fatal(err) } err = fundAllNodes(sts) if err != nil { t.Fatal(err) } err = announceAllHosts(sts) if err != nil { t.Fatal(err) } // Grab the price estimates for when there are a bunch of hosts with the // same stats. var rpeMulti modules.RenterPriceEstimation if err = st.getAPI("/renter/prices", &rpeMulti); err != nil { t.Fatal(err) } // Verify that the aggregate is the same. if !rpeMulti.DownloadTerabyte.Equals(rpeSingle.DownloadTerabyte) { t.Log(rpeMulti.DownloadTerabyte) t.Log(rpeSingle.DownloadTerabyte) t.Error("price changed from single to multi") } if !rpeMulti.FormContracts.Equals(rpeSingle.FormContracts) { t.Error("price changed from single to multi") } if !rpeMulti.StorageTerabyteMonth.Equals(rpeSingle.StorageTerabyteMonth) { t.Error("price changed from single to multi") } if !rpeMulti.UploadTerabyte.Equals(rpeSingle.UploadTerabyte) { t.Error("price changed from single to multi") } } // TestRenterPricesHandlerCheap checks that the prices command returns // reasonable values given the settings of the hosts. func TestRenterPricesHandlerCheap(t *testing.T) { if testing.Short() || !build.VLONG { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // Announce the host and then get the calculated prices for when there is a // single host. var rpeSingle modules.RenterPriceEstimation if err = st.announceHost(); err != nil { t.Fatal(err) } if err = st.getAPI("/renter/prices", &rpeSingle); err != nil { t.Fatal(err) } // Create several more hosts all using the default settings. stHost1, err := blankServerTester(t.Name() + " - Host 1") if err != nil { t.Fatal(err) } defer stHost1.panicClose() stHost2, err := blankServerTester(t.Name() + " - Host 2") if err != nil { t.Fatal(err) } defer stHost2.panicClose() var hg HostGET err = st.getAPI("/host", &hg) if err != nil { t.Fatal(err) } err = stHost1.getAPI("/host", &hg) if err != nil { t.Fatal(err) } err = stHost2.getAPI("/host", &hg) if err != nil { t.Fatal(err) } // Set host 5 to be cheaper than the rest by a substantial amount. This // should result in a reduction for the price estimation. vals := url.Values{} vals.Set("mincontractprice", "1") vals.Set("mindownloadbandwidthprice", "1") vals.Set("minstorageprice", "1") vals.Set("minuploadbandwidthprice", "1") err = stHost2.stdPostAPI("/host", vals) if err != nil { t.Fatal(err) } // Connect all the nodes and announce all of the hosts. sts := []*serverTester{st, stHost1, stHost2} err = fullyConnectNodes(sts) if err != nil { t.Fatal(err) } err = fundAllNodes(sts) if err != nil { t.Fatal(err) } err = announceAllHosts(sts) if err != nil { t.Fatal(err) } // Grab the price estimates for when there are a bunch of hosts with the // same stats. var rpeMulti modules.RenterPriceEstimation if err = st.announceHost(); err != nil { t.Fatal(err) } if err = st.getAPI("/renter/prices", &rpeMulti); err != nil { t.Fatal(err) } // Verify that the aggregate is the same. if !(rpeMulti.DownloadTerabyte.Cmp(rpeSingle.DownloadTerabyte) < 0) { t.Log(rpeMulti.DownloadTerabyte) t.Log(rpeSingle.DownloadTerabyte) t.Error("price did not drop from single to multi") } if !(rpeMulti.FormContracts.Cmp(rpeSingle.FormContracts) < 0) { t.Error("price did not drop from single to multi") } if !(rpeMulti.StorageTerabyteMonth.Cmp(rpeSingle.StorageTerabyteMonth) < 0) { t.Error("price did not drop from single to multi") } if !(rpeMulti.UploadTerabyte.Cmp(rpeSingle.UploadTerabyte) < 0) { t.Error("price did not drop from single to multi") } } // TestRenterPricesHandlerIgnorePricey checks that the prices command returns // reasonable values given the settings of the hosts. func TestRenterPricesHandlerIgnorePricey(t *testing.T) { if testing.Short() || !build.VLONG { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // Announce the host and then get the calculated prices for when there is a // single host. var rpeSingle modules.RenterPriceEstimation if err = st.announceHost(); err != nil { t.Fatal(err) } if err = st.getAPI("/renter/prices", &rpeSingle); err != nil { t.Fatal(err) } // Create several more hosts all using the default settings. stHost1, err := blankServerTester(t.Name() + " - Host 1") if err != nil { t.Fatal(err) } defer stHost1.panicClose() stHost2, err := blankServerTester(t.Name() + " - Host 2") if err != nil { t.Fatal(err) } defer stHost2.panicClose() stHost3, err := blankServerTester(t.Name() + " - Host 3") if err != nil { t.Fatal(err) } defer stHost3.panicClose() stHost4, err := blankServerTester(t.Name() + " - Host 4") if err != nil { t.Fatal(err) } defer stHost4.panicClose() stHost5, err := blankServerTester(t.Name() + " - Host 5") if err != nil { t.Fatal(err) } defer stHost5.panicClose() // Set host 5 to be cheaper than the rest by a substantial amount. This // should result in a reduction for the price estimation. vals := url.Values{} vals.Set("mindownloadbandwidthprice", "100000000000000000000") vals.Set("mincontractprice", "1000000000000000000000000000") vals.Set("minstorageprice", "100000000000000000000") vals.Set("minuploadbandwidthprice", "100000000000000000000") err = stHost5.stdPostAPI("/host", vals) if err != nil { t.Fatal(err) } // Connect all the nodes and announce all of the hosts. sts := []*serverTester{st, stHost1, stHost2, stHost3, stHost4, stHost5} err = fullyConnectNodes(sts) if err != nil { t.Fatal(err) } err = fundAllNodes(sts) if err != nil { t.Fatal(err) } err = announceAllHosts(sts) if err != nil { t.Fatal(err) } // Grab the price estimates for when there are a bunch of hosts with the // same stats. var rpeMulti modules.RenterPriceEstimation if err = st.announceHost(); err != nil { t.Fatal(err) } if err = st.getAPI("/renter/prices", &rpeMulti); err != nil { t.Fatal(err) } // Verify that the aggregate is the same - price should not have moved // because the expensive host will be ignored as there is only one. if !rpeMulti.DownloadTerabyte.Equals(rpeSingle.DownloadTerabyte) { t.Log(rpeMulti.DownloadTerabyte) t.Log(rpeSingle.DownloadTerabyte) t.Error("price changed from single to multi") } if !rpeMulti.FormContracts.Equals(rpeSingle.FormContracts) { t.Error("price changed from single to multi") } if !rpeMulti.StorageTerabyteMonth.Equals(rpeSingle.StorageTerabyteMonth) { t.Error("price changed from single to multi") } if !rpeMulti.UploadTerabyte.Equals(rpeSingle.UploadTerabyte) { t.Error("price changed from single to multi") } } // TestRenterPricesHandlerPricey checks that the prices command returns // reasonable values given the settings of the hosts. func TestRenterPricesHandlerPricey(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // Announce the host and then get the calculated prices for when there is a // single host. var rpeSingle modules.RenterPriceEstimation if err = st.announceHost(); err != nil { t.Fatal(err) } if err = st.getAPI("/renter/prices", &rpeSingle); err != nil { t.Fatal(err) } // Create several more hosts all using the default settings. stHost1, err := blankServerTester(t.Name() + " - Host 1") if err != nil { t.Fatal(err) } stHost2, err := blankServerTester(t.Name() + " - Host 2") if err != nil { t.Fatal(err) } var hg HostGET err = st.getAPI("/host", &hg) if err != nil { t.Fatal(err) } err = stHost1.getAPI("/host", &hg) if err != nil { t.Fatal(err) } err = stHost2.getAPI("/host", &hg) if err != nil { t.Fatal(err) } // Set host 5 to be cheaper than the rest by a substantial amount. This // should result in a reduction for the price estimation. vals := url.Values{} vals.Set("mindownloadbandwidthprice", "100000000000000000000") vals.Set("mincontractprice", "1000000000000000000000000000") vals.Set("minstorageprice", "100000000000000000000") vals.Set("minuploadbandwidthprice", "100000000000000000000") err = stHost2.stdPostAPI("/host", vals) if err != nil { t.Fatal(err) } // Connect all the nodes and announce all of the hosts. sts := []*serverTester{st, stHost1, stHost2} err = fullyConnectNodes(sts) if err != nil { t.Fatal(err) } err = fundAllNodes(sts) if err != nil { t.Fatal(err) } err = announceAllHosts(sts) if err != nil { t.Fatal(err) } // Grab the price estimates for when there are a bunch of hosts with the // same stats. var rpeMulti modules.RenterPriceEstimation if err = st.announceHost(); err != nil { t.Fatal(err) } if err = st.getAPI("/renter/prices", &rpeMulti); err != nil { t.Fatal(err) } // Verify that the aggregate is the same. if !(rpeMulti.DownloadTerabyte.Cmp(rpeSingle.DownloadTerabyte) > 0) { t.Error("price did not drop from single to multi") } if !(rpeMulti.FormContracts.Cmp(rpeSingle.FormContracts) > 0) { t.Log(rpeMulti.FormContracts) t.Log(rpeSingle.FormContracts) t.Error("price did not drop from single to multi") } if !(rpeMulti.StorageTerabyteMonth.Cmp(rpeSingle.StorageTerabyteMonth) > 0) { t.Error("price did not drop from single to multi") } if !(rpeMulti.UploadTerabyte.Cmp(rpeSingle.UploadTerabyte) > 0) { t.Error("price did not drop from single to multi") } } // TestContractorHostRemoval checks that the contractor properly migrates away // from low quality hosts when there are higher quality hosts available. func TestContractorHostRemoval(t *testing.T) { // Create a renter and 2 hosts. Connect to the hosts and start uploading. if testing.Short() { t.SkipNow() } st, err := createServerTester(t.Name() + "renter") if err != nil { t.Fatal(err) } defer st.server.panicClose() stH1, err := blankServerTester(t.Name() + " - Host 1") if err != nil { t.Fatal(err) } defer stH1.server.Close() testGroup := []*serverTester{st, stH1} // Connect the testers to eachother so that they are all on the same // blockchain. err = fullyConnectNodes(testGroup) if err != nil { t.Fatal(err) } // Make sure that every wallet has money in it. err = fundAllNodes(testGroup) if err != nil { t.Fatal(err) } // Add storage to every host. err = addStorageToAllHosts(testGroup) if err != nil { t.Fatal(err) } // Raise the prices significantly for the two hosts. raisedPrice := url.Values{} raisedPrice.Set("mincontractprice", "5000000000000000000000000000") // 5 KS raisedPrice.Set("period", testPeriod) err = st.stdPostAPI("/host", raisedPrice) if err != nil { t.Fatal(err) } err = stH1.stdPostAPI("/host", raisedPrice) if err != nil { t.Fatal(err) } // Anounce the hosts. err = announceAllHosts(testGroup) if err != nil { t.Fatal(err) } // Set an allowance with two hosts. allowanceValues := url.Values{} allowanceValues.Set("funds", "500000000000000000000000000000") // 500k SC allowanceValues.Set("hosts", "2") allowanceValues.Set("period", "10") err = st.stdPostAPI("/renter", allowanceValues) if err != nil { t.Fatal(err) } // Create a file to upload. filesize := int(100) path := filepath.Join(st.dir, "test.dat") err = createRandFile(path, filesize) if err != nil { t.Fatal(err) } origBytes, err := ioutil.ReadFile(path) if err != nil { t.Fatal(err) } // upload the file uploadValues := url.Values{} uploadValues.Set("source", path) uploadValues.Set("datapieces", "1") uploadValues.Set("paritypieces", "1") err = st.stdPostAPI("/renter/upload/test", uploadValues) if err != nil { t.Fatal(err) } // redundancy should reach 2 var rf RenterFiles err = retry(120, 250*time.Millisecond, func() error { st.getAPI("/renter/files", &rf) if len(rf.Files) >= 1 && rf.Files[0].Redundancy == 2 { return nil } return errors.New("file not uploaded") }) if err != nil { t.Fatal(err) } // verify we can download downloadPath := filepath.Join(st.dir, "test-downloaded-verify.dat") err = st.stdGetAPI("/renter/download/test?destination=" + downloadPath) if err != nil { t.Fatal(err) } downloadBytes, err := ioutil.ReadFile(downloadPath) if err != nil { t.Fatal(err) } if !bytes.Equal(downloadBytes, origBytes) { t.Fatal("downloaded file and uploaded file do not match") } // Get the values of the first and second contract. var rc RenterContracts err = st.getAPI("/renter/contracts", &rc) if err != nil { t.Fatal(err) } if len(rc.Contracts) != 2 { t.Fatal("wrong contract count") } rc1Host := rc.Contracts[0].HostPublicKey.String() rc2Host := rc.Contracts[1].HostPublicKey.String() // Add 3 new hosts that will be competing with the expensive hosts. stH2, err := blankServerTester(t.Name() + " - Host 2") if err != nil { t.Fatal(err) } defer stH2.server.Close() stH3, err := blankServerTester(t.Name() + " - Host 3") if err != nil { t.Fatal(err) } defer stH3.server.Close() stH4, err := blankServerTester(t.Name() + " - Host 4") if err != nil { t.Fatal(err) } defer stH4.server.Close() testGroup = []*serverTester{st, stH1, stH2, stH3, stH4} // Connect the testers to eachother so that they are all on the same // blockchain. err = fullyConnectNodes(testGroup) if err != nil { t.Fatal(err) } // Make sure that every wallet has money in it. err = fundAllNodes(testGroup) if err != nil { t.Fatal(err) } // Add storage to every host. err = addStorageToAllHosts([]*serverTester{stH2, stH3, stH4}) if err != nil { t.Fatal(err) } // Anounce the hosts. err = announceAllHosts(testGroup) if err != nil { t.Fatal(err) } // Block until the hostdb reaches five hosts. err = build.Retry(50, time.Millisecond*250, func() error { var ah HostdbActiveGET err = st.getAPI("/hostdb/active", &ah) if err != nil { return err } if len(ah.Hosts) < 5 { return errors.New("new hosts never appeared in hostdb") } return nil }) if err != nil { t.Fatal(err) } // Mine a block to trigger a second run of threadedContractMaintenance. _, err = st.miner.AddBlock() if err != nil { t.Fatal(err) } // Verify that st and stH1 are dropped in favor of the newer, better hosts. err = build.Retry(50, time.Millisecond*250, func() error { var newContracts int err = st.getAPI("/renter/contracts", &rc) if err != nil { return errors.New("couldn't get renter stats") } hostMap := make(map[string]struct{}) hostMap[rc1Host] = struct{}{} hostMap[rc2Host] = struct{}{} for _, contract := range rc.Contracts { _, exists := hostMap[contract.HostPublicKey.String()] if !exists { newContracts++ hostMap[contract.HostPublicKey.String()] = struct{}{} } } if newContracts != 2 { return fmt.Errorf("not the right number of new contracts: %v", newContracts) } return nil }) if err != nil { t.Fatal(err) } // Block until redundancy is restored to 2. err = retry(120, 250*time.Millisecond, func() error { st.getAPI("/renter/files", &rf) if len(rf.Files) == 1 && rf.Files[0].Redundancy == 2 { return nil } return errors.New("file not uploaded to full redundancy") }) if err != nil { t.Fatal(err) } // Grab the old contracts, then mine blocks to trigger a renew, and then // wait until the renew is complete. err = st.getAPI("/renter/contracts", &rc) if err != nil { t.Fatal(err) } // Check the amount of data in each contract. for _, contract := range rc.Contracts { if contract.Size != modules.SectorSize { t.Error("Each contrat should have 1 sector:", contract.Size, contract.ID) } } for i := 0; i < 5; i++ { _, err := st.miner.AddBlock() if err != nil { t.Fatal(err) } _, err = synchronizationCheck(testGroup) if err != nil { t.Fatal(err) } } // Give the renter time to renew. Two of the contracts should renew. var rc2 RenterContracts err = build.Retry(50, time.Millisecond*250, func() error { err = st.getAPI("/renter/contracts", &rc2) if err != nil { return errors.New("couldn't get renter stats") } // Check that at least 2 contracts are different between rc and rc2. tracker := make(map[types.FileContractID]struct{}) // Add all the contracts. for _, contract := range rc.Contracts { tracker[contract.ID] = struct{}{} } // Count the number of contracts that were not seen in the previous // batch of contracts, and check that the new contracts are not with the // expensive hosts. var unseen int for _, contract := range rc2.Contracts { _, exists := tracker[contract.ID] if !exists { unseen++ tracker[contract.ID] = struct{}{} if contract.HostPublicKey.String() == rc1Host || contract.HostPublicKey.String() == rc2Host { return errors.New("the wrong contracts are being renewed") } } } if unseen != 2 { return fmt.Errorf("the wrong number of contracts seem to be getting renewed") } return nil }) if err != nil { t.Fatal(err) } // The renewing process should not have resulted in additional data being // uploaded - it should be the same data in the contracts. for _, contract := range rc2.Contracts { if contract.Size != modules.SectorSize { t.Error("Contract has the wrong size:", contract.Size) } } // Try again to download the file we uploaded. It should still be // retrievable. downloadPath2 := filepath.Join(st.dir, "test-downloaded-verify-2.dat") err = st.stdGetAPI("/renter/download/test?destination=" + downloadPath2) if err != nil { t.Fatal(err) } downloadBytes2, err := ioutil.ReadFile(downloadPath2) if err != nil { t.Fatal(err) } if !bytes.Equal(downloadBytes2, origBytes) { t.Fatal("downloaded file and uploaded file do not match") } // Mine out another set of the blocks so that the bad contracts expire. for i := 0; i < 6; i++ { _, err := st.miner.AddBlock() if err != nil { t.Fatal(err) } _, err = synchronizationCheck(testGroup) if err != nil { t.Fatal(err) } } // Should be back down to 2 contracts now, with the new hosts. // Verify that st and stH1 are dropped in favor of the newer, better hosts. err = build.Retry(50, time.Millisecond*250, func() error { err = st.getAPI("/renter/contracts", &rc) if err != nil { return errors.New("couldn't get renter stats") } if len(rc.Contracts) != 2 { return fmt.Errorf("renewing seems to have failed: %v", len(rc.Contracts)) } return nil }) if err != nil { t.Fatal(err) } if rc.Contracts[0].HostPublicKey.String() == rc1Host || rc.Contracts[0].HostPublicKey.String() == rc2Host { t.Error("renter is renewing the wrong contracts", rc.Contracts[0].HostPublicKey.String()) } if rc.Contracts[1].HostPublicKey.String() == rc1Host || rc.Contracts[1].HostPublicKey.String() == rc2Host { t.Error("renter is renewing the wrong contracts", rc.Contracts[1].HostPublicKey.String()) } // Redundancy should still be 2. err = retry(120, 250*time.Millisecond, func() error { st.getAPI("/renter/files", &rf) if len(rf.Files) >= 1 && rf.Files[0].Redundancy == 2 { return nil } return errors.New("file not uploaded to full redundancy") }) if err != nil { t.Fatal(err, "::", rf.Files[0].Redundancy) } // Check that the amount of data in each contract has remained at the // correct amount - just one sector each. err = st.getAPI("/renter/contracts", &rc) if err != nil { t.Fatal(err) } for _, contract := range rc.Contracts { if contract.Size != modules.SectorSize { t.Error("Each contrat should have 1 sector:", contract.Size, contract.ID) } } // Try again to download the file we uploaded. It should still be // retrievable. downloadPath3 := filepath.Join(st.dir, "test-downloaded-verify-3.dat") err = st.stdGetAPI("/renter/download/test?destination=" + downloadPath3) if err != nil { t.Log("FINAL DOWNLOAD HAS FAILED:", err) } downloadBytes3, err := ioutil.ReadFile(downloadPath3) if err != nil { t.Log(err) } if !bytes.Equal(downloadBytes3, origBytes) { t.Log("downloaded file and uploaded file do not match") } } Sia-1.3.0/api/renterhost_test.go000066400000000000000000001510071313565667000166030ustar00rootroot00000000000000package api // renterhost_test.go sets up larger integration tests between renters and // hosts, checking that the whole storage ecosystem is functioning cohesively. import ( "bytes" "errors" "fmt" "io/ioutil" "net/url" "os" "path/filepath" "strconv" "strings" "sync" "testing" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) // TestRenterLocalRepair verifies that the renter will use the local file to // repair if the file exists locally func TestRenterLocalRepair(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.Close() stH1, err := blankServerTester(t.Name() + " - Host 1") if err != nil { t.Fatal(err) } defer stH1.server.Close() testGroup := []*serverTester{st, stH1} // Connect the testers to eachother so that they are all on the same // blockchain. err = fullyConnectNodes(testGroup) if err != nil { t.Fatal(err) } // Make sure that every wallet has money in it. err = fundAllNodes(testGroup) if err != nil { t.Fatal(err) } // Add storage to every host. err = addStorageToAllHosts(testGroup) if err != nil { t.Fatal(err) } err = announceAllHosts(testGroup) if err != nil { t.Fatal(err) } // Set an allowance with two hosts. allowanceValues := url.Values{} allowanceValues.Set("funds", "50000000000000000000000000000") // 50k SC allowanceValues.Set("hosts", "2") allowanceValues.Set("period", "10") err = st.stdPostAPI("/renter", allowanceValues) if err != nil { t.Fatal(err) } // Create a file to upload. filesize := int(1024) path := filepath.Join(st.dir, "test.dat") err = createRandFile(path, filesize) if err != nil { t.Fatal(err) } // upload the file uploadValues := url.Values{} uploadValues.Set("source", path) err = st.stdPostAPI("/renter/upload/test", uploadValues) if err != nil { t.Fatal(err) } // redundancy should reach 2 var rf RenterFiles err = retry(60, time.Second, func() error { st.getAPI("/renter/files", &rf) if len(rf.Files) >= 1 && rf.Files[0].Redundancy == 2 { return nil } return errors.New("file not uploaded") }) if err != nil { t.Fatal(err) } // download spending should not have increased var rg RenterGET err = st.getAPI("/renter", &rg) if err != nil { t.Fatal(err) } if rg.FinancialMetrics.DownloadSpending.Cmp(types.NewCurrency64(0)) > 0 { t.Fatalf("expected no download spending, got %v instead\n", rg.FinancialMetrics.DownloadSpending) } // take down one of the hosts err = stH1.server.Close() if err != nil { t.Fatal(err) } // wait for the redundancy to decrement err = retry(60, time.Second, func() error { st.getAPI("/renter/files", &rf) if len(rf.Files) >= 1 && rf.Files[0].Redundancy == 1 { return nil } return errors.New("file redundancy not decremented") }) if err != nil { t.Fatal(err) } // bring up a new host stNewHost, err := blankServerTester(t.Name() + "-newhost") if err != nil { t.Fatal(err) } defer stNewHost.server.Close() testGroup = []*serverTester{st, stNewHost} // Connect the testers to eachother so that they are all on the same // blockchain. err = fullyConnectNodes(testGroup) if err != nil { t.Fatal(err) } _, err = synchronizationCheck(testGroup) if err != nil { t.Fatal(err) } // Make sure that every wallet has money in it. err = fundAllNodes(testGroup) if err != nil { t.Fatal(err) } err = stNewHost.setHostStorage() if err != nil { t.Fatal(err) } err = stNewHost.announceHost() if err != nil { t.Fatal(err) } err = waitForBlock(stNewHost.cs.CurrentBlock().ID(), st) if err != nil { t.Fatal(err) } // add a few new blocks in order to cause the renter to form contracts with the new host for i := 0; i < 10; i++ { b, err := stNewHost.miner.AddBlock() if err != nil { t.Fatal(err) } for _, tester := range testGroup { err = waitForBlock(b.ID(), tester) if err != nil { t.Fatal(err) } } } // redundancy should increment back to 2 as the renter uploads to the new // host using the download-to-upload strategy err = retry(240, time.Second, func() error { st.getAPI("/renter/files", &rf) if len(rf.Files) >= 1 && rf.Files[0].Redundancy == 2 && rf.Files[0].Available { return nil } return errors.New("file redundancy not incremented") }) if err != nil { t.Fatal(err) } if rg.FinancialMetrics.DownloadSpending.Cmp(types.NewCurrency64(0)) > 0 { t.Fatalf("expected no download spending, got %v instead\n", rg.FinancialMetrics.DownloadSpending) } } // TestRemoteFileRepair verifies that if a trackedFile is made unavailable // locally by being deleted, the repair loop will download the necessary chunks // from the living hosts and upload them to new hosts. func TestRemoteFileRepair(t *testing.T) { if testing.Short() || !build.VLONG { t.SkipNow() } st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.Close() stH1, err := blankServerTester(t.Name() + " - Host 1") if err != nil { t.Fatal(err) } defer stH1.server.Close() testGroup := []*serverTester{st, stH1} // Connect the testers to eachother so that they are all on the same // blockchain. err = fullyConnectNodes(testGroup) if err != nil { t.Fatal(err) } // Make sure that every wallet has money in it. err = fundAllNodes(testGroup) if err != nil { t.Fatal(err) } // Add storage to every host. err = addStorageToAllHosts(testGroup) if err != nil { t.Fatal(err) } err = announceAllHosts(testGroup) if err != nil { t.Fatal(err) } // Set an allowance with two hosts. allowanceValues := url.Values{} allowanceValues.Set("funds", "50000000000000000000000000000") // 50k SC allowanceValues.Set("hosts", "2") allowanceValues.Set("period", "10") err = st.stdPostAPI("/renter", allowanceValues) if err != nil { t.Fatal(err) } // Create a file to upload. filesize := int(45678) path := filepath.Join(st.dir, "test.dat") err = createRandFile(path, filesize) if err != nil { t.Fatal(err) } // upload the file uploadValues := url.Values{} uploadValues.Set("source", path) err = st.stdPostAPI("/renter/upload/test", uploadValues) if err != nil { t.Fatal(err) } // redundancy should reach 2 var rf RenterFiles err = retry(60, time.Second, func() error { st.getAPI("/renter/files", &rf) if len(rf.Files) >= 1 && rf.Files[0].Redundancy == 2 { return nil } return errors.New("file not uploaded") }) if err != nil { t.Fatal(err) } // verify we can download downloadPath := filepath.Join(st.dir, "test-downloaded-verify.dat") err = st.stdGetAPI("/renter/download/test?destination=" + downloadPath) if err != nil { t.Fatal(err) } // save a copy of the file contents in memory for verification later orig, err := ioutil.ReadFile(path) if err != nil { t.Fatal(err) } // remove the local copy of the file err = os.Remove(path) if err != nil { t.Fatal(err) } // take down one of the hosts err = stH1.server.Close() if err != nil { t.Fatal(err) } // wait for the redundancy to decrement err = retry(60, time.Second, func() error { st.getAPI("/renter/files", &rf) if len(rf.Files) >= 1 && rf.Files[0].Redundancy == 1 { return nil } return errors.New("file redundancy not decremented") }) if err != nil { t.Fatal(err) } // verify we still can download downloadPath = filepath.Join(st.dir, "test-downloaded-verify2.dat") err = st.stdGetAPI("/renter/download/test?destination=" + downloadPath) if err != nil { t.Fatal(err) } // bring up a new host stNewHost, err := blankServerTester(t.Name() + "-newhost") if err != nil { t.Fatal(err) } defer stNewHost.server.Close() testGroup = []*serverTester{st, stNewHost} // Connect the testers to eachother so that they are all on the same // blockchain. err = fullyConnectNodes(testGroup) if err != nil { t.Fatal(err) } _, err = synchronizationCheck(testGroup) if err != nil { t.Fatal(err) } // Make sure that every wallet has money in it. err = fundAllNodes(testGroup) if err != nil { t.Fatal(err) } err = stNewHost.setHostStorage() if err != nil { t.Fatal(err) } err = stNewHost.announceHost() if err != nil { t.Fatal(err) } err = waitForBlock(stNewHost.cs.CurrentBlock().ID(), st) if err != nil { t.Fatal(err) } // add a few new blocks in order to cause the renter to form contracts with the new host for i := 0; i < 10; i++ { b, err := stNewHost.miner.AddBlock() if err != nil { t.Fatal(err) } for _, tester := range testGroup { err = waitForBlock(b.ID(), tester) if err != nil { t.Fatal(err) } } } // redundancy should increment back to 2 as the renter uploads to the new // host using the download-to-upload strategy err = retry(240, time.Second, func() error { st.getAPI("/renter/files", &rf) if len(rf.Files) >= 1 && rf.Files[0].Redundancy == 2 && rf.Files[0].Available { return nil } return errors.New("file redundancy not incremented") }) if err != nil { t.Fatal(err) } // we have to wait a bit for the download loop to update with the new // contracts. retry the download for up to 90 seconds. downloadPath = filepath.Join(st.dir, "test-downloaded.dat") err = retry(90, time.Second, func() error { return st.stdGetAPI("/renter/download/test?destination=" + downloadPath) }) if err != nil { t.Fatal(err) } // Check that the download has the right contents. downloaded, err := ioutil.ReadFile(downloadPath) if err != nil { t.Fatal(err) } if !bytes.Equal(orig, downloaded) { t.Fatal("data mismatch when downloading a file") } } // TestHostAndRentVanilla sets up an integration test where a host and renter // do basic uploads and downloads. func TestHostAndRentVanilla(t *testing.T) { if testing.Short() || !build.VLONG { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // Announce the host and start accepting contracts. err = st.announceHost() if err != nil { t.Fatal(err) } err = st.setHostStorage() if err != nil { t.Fatal(err) } err = st.acceptContracts() if err != nil { t.Fatal(err) } // Set an allowance for the renter, allowing a contract to be formed. allowanceValues := url.Values{} testFunds := "10000000000000000000000000000" // 10k SC testPeriod := "20" testPeriodInt := 20 allowanceValues.Set("funds", testFunds) allowanceValues.Set("period", testPeriod) err = st.stdPostAPI("/renter", allowanceValues) if err != nil { t.Fatal(err) } // Block until the allowance has finished forming contracts. err = build.Retry(50, time.Millisecond*250, func() error { var rc RenterContracts err = st.getAPI("/renter/contracts", &rc) if err != nil { return errors.New("couldn't get renter stats") } if len(rc.Contracts) != 1 { return errors.New("no contracts") } return nil }) if err != nil { t.Fatal("allowance setting failed") } // Check the host, who should now be reporting file contracts. // // TODO: Switch to using an API call. obligations := st.host.StorageObligations() if len(obligations) != 1 { t.Error("Host has wrong number of obligations:", len(obligations)) } // Create a file. path := filepath.Join(st.dir, "test.dat") err = createRandFile(path, 1024) if err != nil { t.Fatal(err) } // Upload the file to the renter. uploadValues := url.Values{} uploadValues.Set("source", path) err = st.stdPostAPI("/renter/upload/test", uploadValues) if err != nil { t.Fatal(err) } // Only one piece will be uploaded (10% at current redundancy). var rf RenterFiles for i := 0; i < 200 && (len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10); i++ { st.getAPI("/renter/files", &rf) time.Sleep(100 * time.Millisecond) } if len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10 { t.Fatal("the uploading is not succeeding for some reason:", rf.Files[0]) } // On a second connection, upload another file. path2 := filepath.Join(st.dir, "test2.dat") test2Size := modules.SectorSize*2 + 1 err = createRandFile(path2, int(test2Size)) if err != nil { t.Fatal(err) } uploadValues = url.Values{} uploadValues.Set("source", path2) err = st.stdPostAPI("/renter/upload/test2", uploadValues) if err != nil { t.Fatal(err) } // Only one piece will be uploaded (10% at current redundancy). for i := 0; i < 200 && (len(rf.Files) != 2 || rf.Files[0].UploadProgress < 10 || rf.Files[1].UploadProgress < 10); i++ { st.getAPI("/renter/files", &rf) time.Sleep(100 * time.Millisecond) } if len(rf.Files) != 2 || rf.Files[0].UploadProgress < 10 || rf.Files[1].UploadProgress < 10 { t.Fatal("the uploading is not succeeding for some reason:", rf.Files[0], rf.Files[1]) } // Try downloading the first file. downpath := filepath.Join(st.dir, "testdown.dat") err = st.stdGetAPI("/renter/download/test?destination=" + downpath) if err != nil { t.Fatal(err) } // Check that the download has the right contents. orig, err := ioutil.ReadFile(path) if err != nil { t.Fatal(err) } download, err := ioutil.ReadFile(downpath) if err != nil { t.Fatal(err) } if bytes.Compare(orig, download) != 0 { t.Fatal("data mismatch when downloading a file") } // The renter's downloads queue should have 1 entry now. var queue RenterDownloadQueue if err = st.getAPI("/renter/downloads", &queue); err != nil { t.Fatal(err) } if len(queue.Downloads) != 1 { t.Fatalf("expected renter to have 1 download in the queue; got %v", len(queue.Downloads)) } // Try downloading the second file. downpath2 := filepath.Join(st.dir, "testdown2.dat") err = st.stdGetAPI("/renter/download/test2?destination=" + downpath2) if err != nil { t.Fatal(err) } // Check that the download has the right contents. orig2, err := ioutil.ReadFile(path2) if err != nil { t.Fatal(err) } download2, err := ioutil.ReadFile(downpath2) if err != nil { t.Fatal(err) } if bytes.Compare(orig2, download2) != 0 { t.Fatal("data mismatch when downloading a file") } // The renter's downloads queue should have 2 entries now. if err = st.getAPI("/renter/downloads", &queue); err != nil { t.Fatal(err) } if len(queue.Downloads) != 2 { t.Fatalf("expected renter to have 1 download in the queue; got %v", len(queue.Downloads)) } // Mine two blocks, which should cause the host to submit the storage // obligation to the blockchain. for i := 0; i < 2; i++ { _, err := st.miner.AddBlock() if err != nil { t.Fatal(err) } time.Sleep(time.Millisecond * 200) } // Check that the host was able to get the file contract confirmed on the // blockchain. obligations = st.host.StorageObligations() if len(obligations) != 1 { t.Error("Host has wrong number of obligations:", len(obligations)) } if !obligations[0].OriginConfirmed { t.Error("host has not seen the file contract on the blockchain") } // Mine blocks until the host should have submitted a storage proof. for i := 0; i <= testPeriodInt+5; i++ { _, err := st.miner.AddBlock() if err != nil { t.Fatal(err) } time.Sleep(time.Millisecond * 200) } success := false obligations = st.host.StorageObligations() for _, obligation := range obligations { if obligation.ProofConfirmed { success = true break } } if !success { t.Error("does not seem like the host has submitted a storage proof successfully to the network") } } // TestHostAndRentMultiHost sets up an integration test where three hosts and a // renter do basic (parallel) uploads and downloads. func TestHostAndRentMultiHost(t *testing.T) { if testing.Short() || !build.VLONG { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() stH1, err := blankServerTester(t.Name() + " - Host 2") if err != nil { t.Fatal(err) } defer stH1.server.panicClose() stH2, err := blankServerTester(t.Name() + " - Host 3") if err != nil { t.Fatal(err) } defer stH2.server.panicClose() testGroup := []*serverTester{st, stH1, stH2} // Connect the testers to eachother so that they are all on the same // blockchain. err = fullyConnectNodes(testGroup) if err != nil { t.Fatal(err) } // Make sure that every wallet has money in it. err = fundAllNodes(testGroup) if err != nil { t.Fatal(err) } // Add storage to every host. err = addStorageToAllHosts(testGroup) if err != nil { t.Fatal(err) } // Announce every host. err = announceAllHosts(testGroup) if err != nil { t.Fatal(err) } // Set an allowance with three hosts. allowanceValues := url.Values{} allowanceValues.Set("funds", "50000000000000000000000000000") // 50k SC allowanceValues.Set("hosts", "3") allowanceValues.Set("period", "10") allowanceValues.Set("renewwindow", "2") err = st.stdPostAPI("/renter", allowanceValues) if err != nil { t.Fatal(err) } // Create a file to upload. filesize := int(45678) path := filepath.Join(st.dir, "test.dat") err = createRandFile(path, filesize) if err != nil { t.Fatal(err) } // Upload a file with 2-of-6 redundancy. uploadValues := url.Values{} uploadValues.Set("source", path) uploadValues.Set("datapieces", "2") uploadValues.Set("paritypieces", "4") err = st.stdPostAPI("/renter/upload/test", uploadValues) if err != nil { t.Fatal(err) } // Three pieces should get uploaded. var rf RenterFiles for i := 0; i < 200 && (len(rf.Files) != 1 || rf.Files[0].UploadProgress < 50); i++ { st.getAPI("/renter/files", &rf) time.Sleep(100 * time.Millisecond) } if len(rf.Files) != 1 || rf.Files[0].UploadProgress < 50 { t.Fatal("the uploading is not succeeding for some reason:", rf.Files[0]) } // Try downloading the file. downpath := filepath.Join(st.dir, "testdown.dat") err = st.stdGetAPI("/renter/download/test?destination=" + downpath) if err != nil { t.Fatal(err) } // Check that the download has the right contents. orig, err := ioutil.ReadFile(path) if err != nil { t.Fatal(err) } download, err := ioutil.ReadFile(downpath) if err != nil { t.Fatal(err) } if bytes.Compare(orig, download) != 0 { t.Fatal("data mismatch when downloading a file") } // The renter's downloads queue should have 1 entry now. var queue RenterDownloadQueue if err = st.getAPI("/renter/downloads", &queue); err != nil { t.Fatal(err) } if len(queue.Downloads) != 1 { t.Fatalf("expected renter to have 1 download in the queue; got %v", len(queue.Downloads)) } } // TestHostAndRentManyFiles sets up an integration test where a single renter // is uploading many files to the network. func TestHostAndRentManyFiles(t *testing.T) { if testing.Short() || !build.VLONG { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() stH1, err := blankServerTester(t.Name() + " - Host 2") if err != nil { t.Fatal(err) } defer stH1.server.panicClose() stH2, err := blankServerTester(t.Name() + " - Host 3") if err != nil { t.Fatal(err) } defer stH2.server.panicClose() stH3, err := blankServerTester(t.Name() + " - Host 4") if err != nil { t.Fatal(err) } defer stH3.server.panicClose() testGroup := []*serverTester{st, stH1, stH2, stH3} // Connect the testers to eachother so that they are all on the same // blockchain. err = fullyConnectNodes(testGroup) if err != nil { t.Fatal(err) } // Make sure that every wallet has money in it. err = fundAllNodes(testGroup) if err != nil { t.Fatal(err) } // Add storage to every host. err = addStorageToAllHosts(testGroup) if err != nil { t.Fatal(err) } // Announce every host. err = announceAllHosts(testGroup) if err != nil { t.Fatal(err) } // Set an allowance with four hosts. allowanceValues := url.Values{} allowanceValues.Set("funds", "50000000000000000000000000000") // 50k SC allowanceValues.Set("hosts", "4") allowanceValues.Set("period", "5") allowanceValues.Set("renewwindow", "2") err = st.stdPostAPI("/renter", allowanceValues) if err != nil { t.Fatal(err) } // Create 3 files to upload at the same time. filesize1 := int(12347) filesize2 := int(22343) filesize3 := int(32349) path1 := filepath.Join(st.dir, "test1.dat") path2 := filepath.Join(st.dir, "test2.dat") path3 := filepath.Join(st.dir, "test3.dat") err = createRandFile(path1, filesize1) if err != nil { t.Fatal(err) } err = createRandFile(path2, filesize2) if err != nil { t.Fatal(err) } err = createRandFile(path3, filesize3) if err != nil { t.Fatal(err) } // Concurrently upload a file with 1-of-4 redundancy, 2-of-4 redundancy, // and 3-of-4 redundancy. var wg sync.WaitGroup wg.Add(3) go func() { defer wg.Done() uploadValues := url.Values{} uploadValues.Set("source", path1) uploadValues.Set("datapieces", "1") uploadValues.Set("paritypieces", "3") err := st.stdPostAPI("/renter/upload/test1", uploadValues) if err != nil { t.Error(err) } }() go func() { defer wg.Done() uploadValues := url.Values{} uploadValues.Set("source", path2) uploadValues.Set("datapieces", "2") uploadValues.Set("paritypieces", "2") err := st.stdPostAPI("/renter/upload/test2", uploadValues) if err != nil { t.Error(err) } }() go func() { defer wg.Done() uploadValues := url.Values{} uploadValues.Set("source", path3) uploadValues.Set("datapieces", "3") uploadValues.Set("paritypieces", "1") err := st.stdPostAPI("/renter/upload/test3", uploadValues) if err != nil { t.Error(err) } }() // Block until the upload call is complete for all three files. wg.Wait() // Block until all files hit 100% uploaded. var rf RenterFiles for i := 0; i < 200 && (len(rf.Files) != 3 || rf.Files[0].UploadProgress < 100 || rf.Files[1].UploadProgress < 100 || rf.Files[2].UploadProgress < 100); i++ { st.getAPI("/renter/files", &rf) time.Sleep(500 * time.Millisecond) } if len(rf.Files) != 3 || rf.Files[0].UploadProgress < 100 || rf.Files[1].UploadProgress < 100 || rf.Files[2].UploadProgress < 100 { t.Fatal("the uploading is not succeeding for some reason:", rf.Files[0], rf.Files[1], rf.Files[2]) } // Download all three files in parallel. wg.Add(3) go func() { defer wg.Done() downpath := filepath.Join(st.dir, "testdown1.dat") err := st.stdGetAPI("/renter/download/test1?destination=" + downpath) if err != nil { t.Error(err) } // Check that the download has the right contents. orig, err := ioutil.ReadFile(path1) if err != nil { t.Error(err) } download, err := ioutil.ReadFile(downpath) if err != nil { t.Error(err) } if bytes.Compare(orig, download) != 0 { t.Error("data mismatch when downloading a file") } }() go func() { defer wg.Done() downpath := filepath.Join(st.dir, "testdown2.dat") err := st.stdGetAPI("/renter/download/test2?destination=" + downpath) if err != nil { t.Error(err) } // Check that the download has the right contents. orig, err := ioutil.ReadFile(path2) if err != nil { t.Error(err) } download, err := ioutil.ReadFile(downpath) if err != nil { t.Error(err) } if bytes.Compare(orig, download) != 0 { t.Error("data mismatch when downloading a file") } }() go func() { defer wg.Done() downpath := filepath.Join(st.dir, "testdown3.dat") err := st.stdGetAPI("/renter/download/test3?destination=" + downpath) if err != nil { t.Error(err) } // Check that the download has the right contents. orig, err := ioutil.ReadFile(path3) if err != nil { t.Error(err) } download, err := ioutil.ReadFile(downpath) if err != nil { t.Error(err) } if bytes.Compare(orig, download) != 0 { t.Error("data mismatch when downloading a file") } }() wg.Wait() // The renter's downloads queue should have 3 entries now. var queue RenterDownloadQueue if err = st.getAPI("/renter/downloads", &queue); err != nil { t.Fatal(err) } if len(queue.Downloads) != 3 { t.Fatalf("expected renter to have 1 download in the queue; got %v", len(queue.Downloads)) } } // TestRenterUploadDownload tests that downloading and uploading in parallel // does not result in failures or stalling. func TestRenterUploadDownload(t *testing.T) { if testing.Short() { t.SkipNow() } st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // Announce the host and start accepting contracts. err = st.announceHost() if err != nil { t.Fatal(err) } err = st.acceptContracts() if err != nil { t.Fatal(err) } err = st.setHostStorage() if err != nil { t.Fatal(err) } // Set an allowance for the renter, allowing a contract to be formed. allowanceValues := url.Values{} testFunds := "10000000000000000000000000000" // 10k SC testPeriod := "10" allowanceValues.Set("funds", testFunds) allowanceValues.Set("period", testPeriod) err = st.stdPostAPI("/renter", allowanceValues) if err != nil { t.Fatal(err) } // Block until the allowance has finished forming contracts. err = build.Retry(50, time.Millisecond*250, func() error { var rc RenterContracts err = st.getAPI("/renter/contracts", &rc) if err != nil { return errors.New("couldn't get renter stats") } if len(rc.Contracts) != 1 { return errors.New("no contracts") } return nil }) if err != nil { t.Fatal("allowance setting failed") } // Check financial metrics; coins should have been spent on contracts var rg RenterGET err = st.getAPI("/renter", &rg) if err != nil { t.Fatal(err) } spent := rg.Settings.Allowance.Funds.Sub(rg.FinancialMetrics.Unspent) if spent.IsZero() { t.Fatal("financial metrics do not reflect contract spending") } // Create a file. path := filepath.Join(st.dir, "test.dat") err = createRandFile(path, 1024) if err != nil { t.Fatal(err) } // Upload to host. uploadValues := url.Values{} uploadValues.Set("source", path) err = st.stdPostAPI("/renter/upload/test", uploadValues) if err != nil { t.Fatal(err) } // Only one piece will be uploaded (10% at current redundancy). var rf RenterFiles for i := 0; i < 200 && (len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10); i++ { st.getAPI("/renter/files", &rf) time.Sleep(100 * time.Millisecond) } if len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10 { t.Fatal("the uploading is not succeeding for some reason:", rf.Files[0]) } // In parallel, upload another file and download the first file. path2 := filepath.Join(st.dir, "test2.dat") test2Size := modules.SectorSize*2 + 1 err = createRandFile(path2, int(test2Size)) if err != nil { t.Fatal(err) } uploadValues = url.Values{} uploadValues.Set("source", path2) err = st.stdPostAPI("/renter/upload/test2", uploadValues) if err != nil { t.Fatal(err) } downpath := filepath.Join(st.dir, "testdown.dat") err = st.stdGetAPI("/renter/download/test?destination=" + downpath) if err != nil { t.Fatal(err) } // Check that the download has the right contents. orig, err := ioutil.ReadFile(path) if err != nil { t.Fatal(err) } download, err := ioutil.ReadFile(downpath) if err != nil { t.Fatal(err) } if bytes.Compare(orig, download) != 0 { t.Fatal("data mismatch when downloading a file") } // Wait for upload to complete. for i := 0; i < 200 && (len(rf.Files) != 2 || rf.Files[0].UploadProgress < 10 || rf.Files[1].UploadProgress < 10); i++ { st.getAPI("/renter/files", &rf) time.Sleep(100 * time.Millisecond) } if len(rf.Files) != 2 || rf.Files[0].UploadProgress < 10 || rf.Files[1].UploadProgress < 10 { t.Fatal("the uploading is not succeeding for some reason:", rf.Files[0], rf.Files[1]) } // Check financial metrics; funds should have been spent on uploads/downloads err = st.getAPI("/renter", &rg) if err != nil { t.Fatal(err) } fm := rg.FinancialMetrics newSpent := rg.Settings.Allowance.Funds.Sub(fm.Unspent) // all new spending should be reflected in upload/download/storage spending diff := fm.UploadSpending.Add(fm.DownloadSpending).Add(fm.StorageSpending) if !diff.Equals(newSpent.Sub(spent)) { t.Fatal("all new spending should be reflected in metrics:", diff, newSpent.Sub(spent)) } } // TestRenterCancelAllowance tests that setting an empty allowance causes // uploads, downloads, and renewals to cease. func TestRenterCancelAllowance(t *testing.T) { if testing.Short() { t.SkipNow() } st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // Announce the host and start accepting contracts. err = st.announceHost() if err != nil { t.Fatal(err) } err = st.acceptContracts() if err != nil { t.Fatal(err) } err = st.setHostStorage() if err != nil { t.Fatal(err) } // Set an allowance for the renter, allowing a contract to be formed. allowanceValues := url.Values{} testFunds := "10000000000000000000000000000" // 10k SC testPeriod := "10" allowanceValues.Set("funds", testFunds) allowanceValues.Set("period", testPeriod) err = st.stdPostAPI("/renter", allowanceValues) if err != nil { t.Fatal(err) } // Create a file. path := filepath.Join(st.dir, "test.dat") err = createRandFile(path, 1024) if err != nil { t.Fatal(err) } // Upload the file to the renter. uploadValues := url.Values{} uploadValues.Set("source", path) err = st.stdPostAPI("/renter/upload/test", uploadValues) if err != nil { t.Fatal(err) } // Only one piece will be uploaded (10% at current redundancy). var rf RenterFiles for i := 0; i < 200 && (len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10); i++ { st.getAPI("/renter/files", &rf) time.Sleep(100 * time.Millisecond) } if len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10 { t.Fatal("the uploading is not succeeding for some reason:", rf.Files[0]) } // Cancel the allowance allowanceValues = url.Values{} allowanceValues.Set("funds", "0") allowanceValues.Set("hosts", "0") allowanceValues.Set("period", "0") allowanceValues.Set("renewwindow", "0") err = st.stdPostAPI("/renter", allowanceValues) if err != nil { t.Fatal(err) } // Try downloading the file; should fail downpath := filepath.Join(st.dir, "testdown.dat") err = st.stdGetAPI("/renter/download/test?destination=" + downpath) if err == nil || !strings.Contains(err.Error(), "insufficient hosts") { t.Fatal("expected insufficient hosts error, got", err) } } // TestRenterParallelDelete tests that uploading and deleting parallel does not // result in failures or stalling. func TestRenterParallelDelete(t *testing.T) { if testing.Short() { t.SkipNow() } st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // Announce the host and start accepting contracts. err = st.announceHost() if err != nil { t.Fatal(err) } err = st.acceptContracts() if err != nil { t.Fatal(err) } err = st.setHostStorage() if err != nil { t.Fatal(err) } // Set an allowance for the renter, allowing a contract to be formed. allowanceValues := url.Values{} testFunds := "10000000000000000000000000000" // 10k SC testPeriod := "10" allowanceValues.Set("funds", testFunds) allowanceValues.Set("period", testPeriod) err = st.stdPostAPI("/renter", allowanceValues) if err != nil { t.Fatal(err) } // Create two files. path := filepath.Join(st.dir, "test.dat") err = createRandFile(path, 1024) if err != nil { t.Fatal(err) } path2 := filepath.Join(st.dir, "test2.dat") err = createRandFile(path2, 1024) if err != nil { t.Fatal(err) } // Upload the first file to host. uploadValues := url.Values{} uploadValues.Set("source", path) err = st.stdPostAPI("/renter/upload/test", uploadValues) if err != nil { t.Fatal(err) } // Wait for the first file to be registered in the renter. var rf RenterFiles for i := 0; i < 200 && len(rf.Files) != 1; i++ { st.getAPI("/renter/files", &rf) time.Sleep(100 * time.Millisecond) } if len(rf.Files) != 1 { t.Fatal("file is not being registered:", rf.Files) } // In parallel, start uploading the other file, and delete the first file. uploadValues = url.Values{} uploadValues.Set("source", path2) err = st.stdPostAPI("/renter/upload/test2", uploadValues) if err != nil { t.Fatal(err) } err = st.stdPostAPI("/renter/delete/test", url.Values{}) if err != nil { t.Fatal(err) } // Only the second file should be present st.getAPI("/renter/files", &rf) if len(rf.Files) != 1 || rf.Files[0].SiaPath != "test2" { t.Fatal("file was not deleted properly:", rf.Files) } // Wait for the second upload to complete. for i := 0; i < 200 && (len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10); i++ { st.getAPI("/renter/files", &rf) time.Sleep(100 * time.Millisecond) } if len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10 { t.Fatal("the uploading is not succeeding for some reason:", rf.Files) } // In parallel, download and delete the second file. go st.stdPostAPI("/renter/delete/test2", url.Values{}) time.Sleep(100 * time.Millisecond) downpath := filepath.Join(st.dir, "testdown.dat") err = st.stdGetAPI("/renter/download/test2?destination=" + downpath) if err == nil { t.Fatal("download should fail after delete") } // No files should be present st.getAPI("/renter/files", &rf) if len(rf.Files) != 0 { t.Fatal("file was not deleted properly:", rf.Files) } } // TestRenterRenew sets up an integration test where a renter renews a // contract with a host. func TestRenterRenew(t *testing.T) { if testing.Short() { t.SkipNow() } st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // Announce the host and start accepting contracts. err = st.announceHost() if err != nil { t.Fatal(err) } err = st.acceptContracts() if err != nil { t.Fatal(err) } err = st.setHostStorage() if err != nil { t.Fatal(err) } var ah HostdbActiveGET for i := 0; i < 50; i++ { if err = st.getAPI("/hostdb/active", &ah); err != nil { t.Fatal(err) } if len(ah.Hosts) == 1 { break } time.Sleep(time.Millisecond * 100) } if len(ah.Hosts) != 1 { t.Fatalf("expected 1 host, got %v", len(ah.Hosts)) } // Set an allowance for the renter, allowing a contract to be formed. allowanceValues := url.Values{} testFunds := "10000000000000000000000000000" // 10k SC testPeriod := 10 allowanceValues.Set("funds", testFunds) allowanceValues.Set("period", strconv.Itoa(testPeriod)) err = st.stdPostAPI("/renter", allowanceValues) if err != nil { t.Fatal(err) } // Create a file. path := filepath.Join(st.dir, "test.dat") err = createRandFile(path, 1024) if err != nil { t.Fatal(err) } // Upload the file to the renter. uploadValues := url.Values{} uploadValues.Set("source", path) err = st.stdPostAPI("/renter/upload/test", uploadValues) if err != nil { t.Fatal(err) } // Only one piece will be uploaded (10% at current redundancy). var rf RenterFiles for i := 0; i < 200 && (len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10); i++ { st.getAPI("/renter/files", &rf) time.Sleep(100 * time.Millisecond) } if len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10 { t.Fatal("the uploading is not succeeding for some reason:", rf.Files[0]) } // Get current contract ID. var rc RenterContracts err = st.getAPI("/renter/contracts", &rc) if err != nil { t.Fatal(err) } contractID := rc.Contracts[0].ID // Mine enough blocks to enter the renewal window. testWindow := testPeriod / 2 for i := 0; i < testWindow+1; i++ { st.miner.AddBlock() } // Wait for the contract to be renewed. for i := 0; i < 200 && (len(rc.Contracts) != 1 || rc.Contracts[0].ID == contractID); i++ { st.getAPI("/renter/contracts", &rc) time.Sleep(100 * time.Millisecond) } if rc.Contracts[0].ID == contractID { t.Fatal("contract was not renewed:", rc.Contracts[0]) } // Try downloading the file. downpath := filepath.Join(st.dir, "testdown.dat") err = st.stdGetAPI("/renter/download/test?destination=" + downpath) if err != nil { t.Fatal(err) } // Check that the download has the right contents. orig, err := ioutil.ReadFile(path) if err != nil { t.Fatal(err) } download, err := ioutil.ReadFile(downpath) if err != nil { t.Fatal(err) } if bytes.Compare(orig, download) != 0 { t.Fatal("data mismatch when downloading a file") } } // TestRenterAllowance sets up an integration test where a renter attempts to // download a file after changing the allowance. func TestRenterAllowance(t *testing.T) { t.Skip("bypassing NDF") if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // Announce the host and start accepting contracts. err = st.announceHost() if err != nil { t.Fatal(err) } err = st.acceptContracts() if err != nil { t.Fatal(err) } err = st.setHostStorage() if err != nil { t.Fatal(err) } // Set an allowance for the renter, allowing a contract to be formed. allowanceValues := url.Values{} testFunds := types.SiacoinPrecision.Mul64(10000) // 10k SC testPeriod := 20 allowanceValues.Set("funds", testFunds.String()) allowanceValues.Set("period", strconv.Itoa(testPeriod)) err = st.stdPostAPI("/renter", allowanceValues) if err != nil { t.Fatal(err) } // Create a file. path := filepath.Join(st.dir, "test.dat") err = createRandFile(path, 1024) if err != nil { t.Fatal(err) } // Upload the file to the renter. uploadValues := url.Values{} uploadValues.Set("source", path) err = st.stdPostAPI("/renter/upload/test", uploadValues) if err != nil { t.Fatal(err) } // Only one piece will be uploaded (10% at current redundancy). var rf RenterFiles for i := 0; i < 200 && (len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10); i++ { st.getAPI("/renter/files", &rf) time.Sleep(100 * time.Millisecond) } if len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10 { t.Fatal("the uploading is not succeeding for some reason:", rf.Files[0]) } t.Skip("ndf - re-enable after contractor overhaul") // Try downloading the file after modifying the allowance in various ways. allowances := []struct { funds types.Currency period int }{ {testFunds.Mul64(10), testPeriod / 2}, {testFunds, testPeriod / 2}, {testFunds.Div64(10), testPeriod / 2}, {testFunds.Mul64(10), testPeriod}, {testFunds, testPeriod}, {testFunds.Div64(10), testPeriod}, {testFunds.Mul64(10), testPeriod * 2}, {testFunds, testPeriod * 2}, {testFunds.Div64(10), testPeriod * 2}, } for _, a := range allowances { allowanceValues.Set("funds", a.funds.String()) allowanceValues.Set("period", strconv.Itoa(a.period)) err = st.stdPostAPI("/renter", allowanceValues) if err != nil { t.Fatal(err) } time.Sleep(100 * time.Millisecond) // Try downloading the file. downpath := filepath.Join(st.dir, "testdown.dat") err = st.stdGetAPI("/renter/download/test?destination=" + downpath) if err != nil { t.Fatal(err) } // Check that the download has the right contents. orig, err := ioutil.ReadFile(path) if err != nil { t.Fatal(err) } download, err := ioutil.ReadFile(downpath) if err != nil { t.Fatal(err) } if bytes.Compare(orig, download) != 0 { t.Fatal("data mismatch when downloading a file") } } } // TestHostAndRentReload sets up an integration test where a host and renter // do basic uploads and downloads, with an intervening shutdown+startup. func TestHostAndRentReload(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } // Announce the host and start accepting contracts. err = st.announceHost() if err != nil { t.Fatal(err) } err = st.acceptContracts() if err != nil { t.Fatal(err) } err = st.setHostStorage() if err != nil { t.Fatal(err) } // Mine a block so that the wallet reclaims refund outputs _, err = st.miner.AddBlock() if err != nil { t.Fatal(err) } // Set an allowance for the renter, allowing a contract to be formed. allowanceValues := url.Values{} testFunds := "10000000000000000000000000000" // 10k SC testPeriod := "10" allowanceValues.Set("funds", testFunds) allowanceValues.Set("period", testPeriod) err = st.stdPostAPI("/renter", allowanceValues) if err != nil { t.Fatal(err) } // Create a file. path := filepath.Join(st.dir, "test.dat") err = createRandFile(path, 1024) if err != nil { t.Fatal(err) } // Upload the file to the renter. uploadValues := url.Values{} uploadValues.Set("source", path) err = st.stdPostAPI("/renter/upload/test", uploadValues) if err != nil { t.Fatal(err) } // Only one piece will be uploaded (10% at current redundancy). var rf RenterFiles for i := 0; i < 200 && (len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10); i++ { st.getAPI("/renter/files", &rf) time.Sleep(100 * time.Millisecond) } if len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10 { t.Fatal("the uploading is not succeeding for some reason:", rf.Files[0]) } // Try downloading the file. downpath := filepath.Join(st.dir, "testdown.dat") err = st.stdGetAPI("/renter/download/test?destination=" + downpath) if err != nil { t.Fatal(err) } // Check that the download has the right contents. orig, err := ioutil.ReadFile(path) if err != nil { t.Fatal(err) } download, err := ioutil.ReadFile(downpath) if err != nil { t.Fatal(err) } if bytes.Compare(orig, download) != 0 { t.Fatal("data mismatch when downloading a file") } // The renter's downloads queue should have 1 entry now. var queue RenterDownloadQueue if err = st.getAPI("/renter/downloads", &queue); err != nil { t.Fatal(err) } if len(queue.Downloads) != 1 { t.Fatalf("expected renter to have 1 download in the queue; got %v", len(queue.Downloads)) } // close and reopen the server err = st.server.Close() if err != nil { t.Fatal(err) } st, err = assembleServerTester(st.walletKey, st.dir) if err != nil { t.Fatal(err) } defer st.server.panicClose() err = st.announceHost() if err != nil { t.Fatal(err) } // Try downloading the file. err = st.stdGetAPI("/renter/download/test?destination=" + downpath) if err != nil { t.Fatal(err) } // Check that the download has the right contents. orig, err = ioutil.ReadFile(path) if err != nil { t.Fatal(err) } download, err = ioutil.ReadFile(downpath) if err != nil { t.Fatal(err) } if bytes.Compare(orig, download) != 0 { t.Fatal("data mismatch when downloading a file") } } // TestHostAndRenterRenewInterrupt func TestHostAndRenterRenewInterrupt(t *testing.T) { t.Skip("active test following contractor overhaul") if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } stHost, err := blankServerTester(t.Name() + "-Host") if err != nil { t.Fatal(err) } sts := []*serverTester{st, stHost} err = fullyConnectNodes(sts) if err != nil { t.Fatal(err) } err = fundAllNodes(sts) if err != nil { t.Fatal(err) } // Announce the host. err = stHost.acceptContracts() if err != nil { t.Fatal(err) } err = stHost.setHostStorage() if err != nil { t.Fatal(err) } err = stHost.announceHost() if err != nil { t.Fatal(err) } // Wait for host to be seen in renter's hostdb var ah HostdbActiveGET for i := 0; i < 50; i++ { if err = st.getAPI("/hostdb/active", &ah); err != nil { t.Fatal(err) } if len(ah.Hosts) == 1 { break } time.Sleep(time.Millisecond * 100) } if len(ah.Hosts) != 1 { t.Fatalf("expected 1 host, got %v", len(ah.Hosts)) } // Upload a file to the host. allowanceValues := url.Values{} testFunds := "10000000000000000000000000000" // 10k SC testPeriod := "10" testPeriodInt := 10 allowanceValues.Set("funds", testFunds) allowanceValues.Set("period", testPeriod) err = st.stdPostAPI("/renter", allowanceValues) if err != nil { t.Fatal(err) } // Create a file. path := filepath.Join(st.dir, "test.dat") err = createRandFile(path, 10e3) if err != nil { t.Fatal(err) } // Upload the file to the renter. uploadValues := url.Values{} uploadValues.Set("source", path) err = st.stdPostAPI("/renter/upload/test", uploadValues) if err != nil { t.Fatal(err) } // Get current contract ID. var rc RenterContracts err = st.getAPI("/renter/contracts", &rc) if err != nil { t.Fatal(err) } contractID := rc.Contracts[0].ID // Mine enough blocks to enter the renewal window. testWindow := testPeriodInt / 2 for i := 0; i < testWindow+1; i++ { st.miner.AddBlock() } // Wait for the contract to be renewed. for i := 0; i < 200 && (len(rc.Contracts) != 1 || rc.Contracts[0].ID == contractID); i++ { st.getAPI("/renter/contracts", &rc) time.Sleep(100 * time.Millisecond) } if rc.Contracts[0].ID == contractID { t.Fatal("contract was not renewed:", rc.Contracts[0]) } // Only one piece will be uploaded (10% at current redundancy). var rf RenterFiles for i := 0; i < 200 && (len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10); i++ { st.getAPI("/renter/files", &rf) time.Sleep(1000 * time.Millisecond) } if len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10 { t.Fatal("the uploading is not succeeding for some reason:", rf.Files[0]) } // Try downloading the file. downpath := filepath.Join(st.dir, "testdown.dat") err = st.stdGetAPI("/renter/download/test?destination=" + downpath) if err != nil { t.Fatal(err) } // Check that the download has the right contents. orig, err := ioutil.ReadFile(path) if err != nil { t.Fatal(err) } download, err := ioutil.ReadFile(downpath) if err != nil { t.Fatal(err) } if bytes.Compare(orig, download) != 0 { t.Fatal("data mismatch when downloading a file") } } // TestRedundancyReporting verifies that redundancy reporting is accurate if // contracts become offline. func TestRedundancyReporting(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.Close() stH1, err := blankServerTester(t.Name() + " - Host 2") if err != nil { t.Fatal(err) } testGroup := []*serverTester{st, stH1} // Connect the testers to eachother so that they are all on the same // blockchain. err = fullyConnectNodes(testGroup) if err != nil { t.Fatal(err) } // Make sure that every wallet has money in it. err = fundAllNodes(testGroup) if err != nil { t.Fatal(err) } // Add storage to every host. err = addStorageToAllHosts(testGroup) if err != nil { t.Fatal(err) } // Announce every host. err = announceAllHosts(testGroup) if err != nil { t.Fatal(err) } // Set an allowance with two hosts. allowanceValues := url.Values{} allowanceValues.Set("funds", "50000000000000000000000000000") // 50k SC allowanceValues.Set("hosts", "2") allowanceValues.Set("period", "10") err = st.stdPostAPI("/renter", allowanceValues) if err != nil { t.Fatal(err) } // Create a file to upload. filesize := int(1024) path := filepath.Join(st.dir, "test.dat") err = createRandFile(path, filesize) if err != nil { t.Fatal(err) } // upload the file uploadValues := url.Values{} uploadValues.Set("source", path) err = st.stdPostAPI("/renter/upload/test", uploadValues) if err != nil { t.Fatal(err) } // redundancy should reach 2 var rf RenterFiles err = retry(60, time.Second, func() error { st.getAPI("/renter/files", &rf) if len(rf.Files) >= 1 && rf.Files[0].Redundancy == 2 { return nil } return errors.New("file not uploaded") }) if err != nil { t.Fatal(err) } // take down one of the hosts stH1.server.Close() // wait for the redundancy to decrement err = retry(60, time.Second, func() error { st.getAPI("/renter/files", &rf) if len(rf.Files) >= 1 && rf.Files[0].Redundancy == 1 { return nil } return errors.New("file redundancy not decremented") }) if err != nil { t.Fatal(err) } // bring back the host stH1, err = assembleServerTester(stH1.walletKey, stH1.dir) if err != nil { t.Fatal(err) } defer stH1.server.Close() testGroup = []*serverTester{st, stH1} err = fullyConnectNodes(testGroup) if err != nil { t.Fatal(err) } // Add a block to clear the transaction pool and give the host an output to // make an announcement, and then make the announcement. _, err = st.miner.AddBlock() if err != nil { t.Fatal(err) } _, err = synchronizationCheck(testGroup) if err != nil { t.Fatal(err) } err = announceAllHosts(testGroup) if err != nil { t.Fatal(err) } // Wait until the host shows back up in the hostdb. var ah HostdbActiveGET err = retry(200, 100*time.Millisecond, func() error { err := st.getAPI("/hostdb/active", &ah) if err != nil { return err } if len(ah.Hosts) != 2 { return errors.New("not enough hosts") } return nil }) if err != nil { t.Fatal(err) } // Mine another block so that the contract checker updates the IsGood status // of the contracts. _, err = st.miner.AddBlock() if err != nil { t.Fatal(err) } _, err = synchronizationCheck(testGroup) if err != nil { t.Fatal(err) } // Redundancy should re-report at 2. err = retry(250, 100*time.Millisecond, func() error { st.getAPI("/renter/files", &rf) if len(rf.Files) >= 1 && rf.Files[0].Redundancy == 2 { return nil } return errors.New("file redundancy not incremented") }) if err != nil { t.Fatal(err) } } // TestRenterMissingHosts verifies that if hosts are taken offline, downloads // fail. func TestRenterMissingHosts(t *testing.T) { if testing.Short() || !build.VLONG { t.SkipNow() } st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.Close() stH1, err := blankServerTester(t.Name() + " - Host 1") if err != nil { t.Fatal(err) } defer stH1.server.Close() stH2, err := blankServerTester(t.Name() + " - Host 2") if err != nil { t.Fatal(err) } defer stH2.server.Close() stH3, err := blankServerTester(t.Name() + " - Host 3") if err != nil { t.Fatal(err) } defer stH3.server.Close() testGroup := []*serverTester{st, stH1, stH2, stH3} // Connect the testers to eachother so that they are all on the same // blockchain. err = fullyConnectNodes(testGroup) if err != nil { t.Fatal(err) } // Make sure that every wallet has money in it. err = fundAllNodes(testGroup) if err != nil { t.Fatal(err) } // Add storage to every host. err = addStorageToAllHosts(testGroup) if err != nil { t.Fatal(err) } err = announceAllHosts(testGroup) if err != nil { t.Fatal(err) } // Set an allowance with two hosts. allowanceValues := url.Values{} allowanceValues.Set("funds", "50000000000000000000000000000") // 50k SC allowanceValues.Set("hosts", "3") allowanceValues.Set("period", "10") err = st.stdPostAPI("/renter", allowanceValues) if err != nil { t.Fatal(err) } // Create a file to upload. filesize := int(100) path := filepath.Join(st.dir, "test.dat") err = createRandFile(path, filesize) if err != nil { t.Fatal(err) } // upload the file uploadValues := url.Values{} uploadValues.Set("source", path) uploadValues.Set("datapieces", "2") uploadValues.Set("paritypieces", "1") err = st.stdPostAPI("/renter/upload/test", uploadValues) if err != nil { t.Fatal(err) } // redundancy should reach 1.5 var rf RenterFiles err = retry(20, time.Second, func() error { st.getAPI("/renter/files", &rf) if len(rf.Files) >= 1 && rf.Files[0].Redundancy == 1.5 { return nil } return errors.New("file not uploaded") }) if err != nil { t.Fatal(err) } // verify we can download downloadPath := filepath.Join(st.dir, "test-downloaded-verify.dat") err = st.stdGetAPI("/renter/download/test?destination=" + downloadPath) if err != nil { t.Fatal(err) } // take down one of the hosts err = stH1.server.Close() if err != nil { t.Fatal(err) } // wait for the redundancy to decrement err = retry(60, time.Second, func() error { st.getAPI("/renter/files", &rf) if len(rf.Files) >= 1 && rf.Files[0].Redundancy == 1 { return nil } return errors.New("file redundancy not decremented: " + fmt.Sprint(rf.Files[0].Redundancy)) }) if err != nil { t.Log(err) } // verify we still can download downloadPath = filepath.Join(st.dir, "test-downloaded-verify2.dat") err = st.stdGetAPI("/renter/download/test?destination=" + downloadPath) if err != nil { t.Fatal(err) } // take down another host err = stH2.server.Close() if err != nil { t.Fatal(err) } // wait for the redundancy to decrement err = retry(60, time.Second, func() error { st.getAPI("/renter/files", &rf) if len(rf.Files) >= 1 && rf.Files[0].Redundancy == 0.5 { return nil } return errors.New("file redundancy not decremented: " + fmt.Sprint(rf.Files[0].Redundancy)) }) if err != nil { t.Log(err) } // verify that the download fails downloadPath = filepath.Join(st.dir, "test-downloaded-verify3.dat") err = st.stdGetAPI("/renter/download/test?destination=" + downloadPath) if err == nil { t.Fatal("expected download to fail with redundancy <1") } // take down another host err = stH3.server.Close() if err != nil { t.Fatal(err) } // wait for the redundancy to decrement err = retry(60, time.Second, func() error { st.getAPI("/renter/files", &rf) if len(rf.Files) >= 1 && rf.Files[0].Redundancy == 0 { return nil } return errors.New("file redundancy not decremented: " + fmt.Sprint(rf.Files[0].Redundancy)) }) if err != nil { t.Log(err) } // verify that the download fails downloadPath = filepath.Join(st.dir, "test-downloaded-verify4.dat") err = st.stdGetAPI("/renter/download/test?destination=" + downloadPath) if err == nil { t.Fatal("expected download to fail with redundancy <1") } } Sia-1.3.0/api/scan.go000066400000000000000000000023701313565667000142710ustar00rootroot00000000000000package api import ( "math/big" "errors" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/types" ) // scanAmount scans a types.Currency from a string. func scanAmount(amount string) (types.Currency, bool) { // use SetString manually to ensure that amount does not contain // multiple values, which would confuse fmt.Scan i, ok := new(big.Int).SetString(amount, 10) if !ok { return types.Currency{}, ok } return types.NewCurrency(i), true } // scanAddress scans a types.UnlockHash from a string. func scanAddress(addrStr string) (addr types.UnlockHash, err error) { err = addr.LoadString(addrStr) if err != nil { return types.UnlockHash{}, err } return addr, nil } // scanHash scans a crypto.Hash from a string. func scanHash(s string) (h crypto.Hash, err error) { err = h.LoadString(s) if err != nil { return crypto.Hash{}, err } return h, nil } // scanBool converts "true" and "false" strings to their respective // boolean value and returns an error if conversion is not possible. func scanBool(param string) (bool, error) { if param == "true" { return true, nil } else if param == "false" || len(param) == 0 { return false, nil } return false, errors.New("could not decode boolean: value was not true or false") } Sia-1.3.0/api/server_helpers_test.go000066400000000000000000000440701313565667000174370ustar00rootroot00000000000000package api import ( "encoding/json" "errors" "fmt" "io" "net" "net/http" "net/url" "path/filepath" "runtime/debug" "strings" "sync" "testing" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/modules/consensus" "github.com/NebulousLabs/Sia/modules/explorer" "github.com/NebulousLabs/Sia/modules/gateway" "github.com/NebulousLabs/Sia/modules/host" "github.com/NebulousLabs/Sia/modules/miner" "github.com/NebulousLabs/Sia/modules/renter" "github.com/NebulousLabs/Sia/modules/transactionpool" "github.com/NebulousLabs/Sia/modules/wallet" "github.com/NebulousLabs/Sia/persist" "github.com/NebulousLabs/Sia/types" ) // A Server is essentially a collection of modules and an API server to talk // to them all. type Server struct { api *API apiServer *http.Server listener net.Listener requiredUserAgent string // wg is used to block Close() from returning until Serve() has finished. A // WaitGroup is used instead of a chan struct{} so that Close() can be called // without necessarily calling Serve() first. wg sync.WaitGroup } // panicClose will cloae a Server, panicking if there is an error upon close. func (srv *Server) panicClose() { err := srv.Close() if err != nil { // Print the stack. debug.PrintStack() panic(err) } } // Close closes the Server's listener, causing the HTTP server to shut down. func (srv *Server) Close() error { var errs []error // Close the listener, which will cause Server.Serve() to return. if err := srv.listener.Close(); err != nil { errs = append(errs, fmt.Errorf("listener.Close failed: %v", err)) } // Wait for Server.Serve() to exit. We wait so that it's guaranteed that the // server has completely closed after Close() returns. This is particularly // useful during testing so that we don't exit a test before Serve() finishes. srv.wg.Wait() // Safely close each module. mods := []struct { name string c io.Closer }{ {"explorer", srv.api.explorer}, {"host", srv.api.host}, {"renter", srv.api.renter}, {"miner", srv.api.miner}, {"wallet", srv.api.wallet}, {"tpool", srv.api.tpool}, {"consensus", srv.api.cs}, {"gateway", srv.api.gateway}, } for _, mod := range mods { if mod.c != nil { if err := mod.c.Close(); err != nil { errs = append(errs, fmt.Errorf("%v.Close failed: %v", mod.name, err)) } } } return build.JoinErrors(errs, "\n") } // Serve listens for and handles API calls. It is a blocking function. func (srv *Server) Serve() error { // Block the Close() method until Serve() has finished. srv.wg.Add(1) defer srv.wg.Done() // The server will run until an error is encountered or the listener is // closed, via either the Close method or the signal handling above. // Closing the listener will result in the benign error handled below. err := srv.apiServer.Serve(srv.listener) if err != nil && !strings.HasSuffix(err.Error(), "use of closed network connection") { return err } return nil } // NewServer creates a new API server from the provided modules. The API will // require authentication using HTTP basic auth if the supplied password is not // the empty string. Usernames are ignored for authentication. This type of // authentication sends passwords in plaintext and should therefore only be // used if the APIaddr is localhost. func NewServer(APIaddr string, requiredUserAgent string, requiredPassword string, cs modules.ConsensusSet, e modules.Explorer, g modules.Gateway, h modules.Host, m modules.Miner, r modules.Renter, tp modules.TransactionPool, w modules.Wallet) (*Server, error) { l, err := net.Listen("tcp", APIaddr) if err != nil { return nil, err } a := New(requiredUserAgent, requiredPassword, cs, e, g, h, m, r, tp, w) srv := &Server{ api: a, listener: l, requiredUserAgent: requiredUserAgent, apiServer: &http.Server{ Handler: a, }, } return srv, nil } // serverTester contains a server and a set of channels for keeping all of the // modules synchronized during testing. type serverTester struct { cs modules.ConsensusSet explorer modules.Explorer gateway modules.Gateway host modules.Host miner modules.TestMiner renter modules.Renter tpool modules.TransactionPool wallet modules.Wallet walletKey crypto.TwofishKey server *Server dir string } // assembleServerTester creates a bunch of modules and assembles them into a // server tester, without creating any directories or mining any blocks. func assembleServerTester(key crypto.TwofishKey, testdir string) (*serverTester, error) { // assembleServerTester should not get called during short tests, as it // takes a long time to run. if testing.Short() { panic("assembleServerTester called during short tests") } // Create the modules. g, err := gateway.New("localhost:0", false, filepath.Join(testdir, modules.GatewayDir)) if err != nil { return nil, err } cs, err := consensus.New(g, false, filepath.Join(testdir, modules.ConsensusDir)) if err != nil { return nil, err } tp, err := transactionpool.New(cs, g, filepath.Join(testdir, modules.TransactionPoolDir)) if err != nil { return nil, err } w, err := wallet.New(cs, tp, filepath.Join(testdir, modules.WalletDir)) if err != nil { return nil, err } if !w.Encrypted() { _, err = w.Encrypt(key) if err != nil { return nil, err } } err = w.Unlock(key) if err != nil { return nil, err } m, err := miner.New(cs, tp, w, filepath.Join(testdir, modules.MinerDir)) if err != nil { return nil, err } h, err := host.New(cs, tp, w, "localhost:0", filepath.Join(testdir, modules.HostDir)) if err != nil { return nil, err } r, err := renter.New(g, cs, w, tp, filepath.Join(testdir, modules.RenterDir)) if err != nil { return nil, err } srv, err := NewServer("localhost:0", "Sia-Agent", "", cs, nil, g, h, m, r, tp, w) if err != nil { return nil, err } // Assemble the serverTester. st := &serverTester{ cs: cs, gateway: g, host: h, miner: m, renter: r, tpool: tp, wallet: w, walletKey: key, server: srv, dir: testdir, } // TODO: A more reasonable way of listening for server errors. go func() { listenErr := srv.Serve() if listenErr != nil { panic(listenErr) } }() return st, nil } // assembleAuthenticatedServerTester creates a bunch of modules and assembles // them into a server tester that requires authentication with the given // requiredPassword. No directories are created and no blocks are mined. func assembleAuthenticatedServerTester(requiredPassword string, key crypto.TwofishKey, testdir string) (*serverTester, error) { // assembleAuthenticatedServerTester should not get called during short // tests, as it takes a long time to run. if testing.Short() { panic("assembleServerTester called during short tests") } // Create the modules. g, err := gateway.New("localhost:0", false, filepath.Join(testdir, modules.GatewayDir)) if err != nil { return nil, err } cs, err := consensus.New(g, false, filepath.Join(testdir, modules.ConsensusDir)) if err != nil { return nil, err } tp, err := transactionpool.New(cs, g, filepath.Join(testdir, modules.TransactionPoolDir)) if err != nil { return nil, err } w, err := wallet.New(cs, tp, filepath.Join(testdir, modules.WalletDir)) if err != nil { return nil, err } if !w.Encrypted() { _, err = w.Encrypt(key) if err != nil { return nil, err } } err = w.Unlock(key) if err != nil { return nil, err } m, err := miner.New(cs, tp, w, filepath.Join(testdir, modules.MinerDir)) if err != nil { return nil, err } h, err := host.New(cs, tp, w, "localhost:0", filepath.Join(testdir, modules.HostDir)) if err != nil { return nil, err } r, err := renter.New(g, cs, w, tp, filepath.Join(testdir, modules.RenterDir)) if err != nil { return nil, err } srv, err := NewServer("localhost:0", "Sia-Agent", requiredPassword, cs, nil, g, h, m, r, tp, w) if err != nil { return nil, err } // Assemble the serverTester. st := &serverTester{ cs: cs, gateway: g, host: h, miner: m, renter: r, tpool: tp, wallet: w, walletKey: key, server: srv, dir: testdir, } // TODO: A more reasonable way of listening for server errors. go func() { listenErr := srv.Serve() if listenErr != nil { panic(listenErr) } }() return st, nil } // assembleExplorerServerTester creates all the explorer dependencies and // explorer module without creating any directories. The user agent requirement // is disabled. func assembleExplorerServerTester(testdir string) (*serverTester, error) { // assembleExplorerServerTester should not get called during short tests, // as it takes a long time to run. if testing.Short() { panic("assembleServerTester called during short tests") } // Create the modules. g, err := gateway.New("localhost:0", false, filepath.Join(testdir, modules.GatewayDir)) if err != nil { return nil, err } cs, err := consensus.New(g, false, filepath.Join(testdir, modules.ConsensusDir)) if err != nil { return nil, err } e, err := explorer.New(cs, filepath.Join(testdir, modules.ExplorerDir)) if err != nil { return nil, err } srv, err := NewServer("localhost:0", "", "", cs, e, g, nil, nil, nil, nil, nil) if err != nil { return nil, err } // Assemble the serverTester. st := &serverTester{ cs: cs, explorer: e, gateway: g, server: srv, dir: testdir, } // TODO: A more reasonable way of listening for server errors. go func() { listenErr := srv.Serve() if listenErr != nil { panic(listenErr) } }() return st, nil } // blankServerTester creates a server tester object that is ready for testing, // without mining any blocks. func blankServerTester(name string) (*serverTester, error) { // createServerTester is expensive, and therefore should not be called // during short tests. if testing.Short() { panic("blankServerTester called during short tests") } // Create the server tester with key. testdir := build.TempDir("api", name) key := crypto.GenerateTwofishKey() st, err := assembleServerTester(key, testdir) if err != nil { return nil, err } return st, nil } // createServerTester creates a server tester object that is ready for testing, // including money in the wallet and all modules initialized. func createServerTester(name string) (*serverTester, error) { // createServerTester is expensive, and therefore should not be called // during short tests. if testing.Short() { panic("createServerTester called during short tests") } // Create the testing directory. testdir := build.TempDir("api", name) key := crypto.GenerateTwofishKey() st, err := assembleServerTester(key, testdir) if err != nil { return nil, err } // Mine blocks until the wallet has confirmed money. for i := types.BlockHeight(0); i <= types.MaturityDelay; i++ { _, err := st.miner.AddBlock() if err != nil { return nil, err } } return st, nil } // createAuthenticatedServerTester creates an authenticated server tester // object that is ready for testing, including money in the wallet and all // modules initialized. func createAuthenticatedServerTester(name string, password string) (*serverTester, error) { // createAuthenticatedServerTester should not get called during short // tests, as it takes a long time to run. if testing.Short() { panic("assembleServerTester called during short tests") } // Create the testing directory. testdir := build.TempDir("authenticated-api", name) key := crypto.GenerateTwofishKey() st, err := assembleAuthenticatedServerTester(password, key, testdir) if err != nil { return nil, err } // Mine blocks until the wallet has confirmed money. for i := types.BlockHeight(0); i <= types.MaturityDelay; i++ { _, err := st.miner.AddBlock() if err != nil { return nil, err } } return st, nil } // createExplorerServerTester creates a server tester object containing only // the explorer and some presets that match standard explorer setups. func createExplorerServerTester(name string) (*serverTester, error) { testdir := build.TempDir("api", name) st, err := assembleExplorerServerTester(testdir) if err != nil { return nil, err } return st, nil } // decodeError returns the api.Error from a API response. This method should // only be called if the response's status code is non-2xx. The error returned // may not be of type api.Error in the event of an error unmarshalling the // JSON. func decodeError(resp *http.Response) error { var apiErr Error err := json.NewDecoder(resp.Body).Decode(&apiErr) if err != nil { return err } return apiErr } // non2xx returns true for non-success HTTP status codes. func non2xx(code int) bool { return code < 200 || code > 299 } // panicClose attempts to close a serverTester. If it fails, panic is called // with the error. func (st *serverTester) panicClose() { st.server.panicClose() } // retry will retry a function multiple times until it returns 'nil'. It will // sleep the specified duration between tries. If success is not achieved in the // specified number of attempts, the final error is returned. func retry(tries int, durationBetweenAttempts time.Duration, fn func() error) (err error) { for i := 0; i < tries-1; i++ { err = fn() if err == nil { return nil } time.Sleep(durationBetweenAttempts) } return fn() } // reloadedServerTester creates a server tester where all of the persistent // data has been copied to a new folder and all of the modules re-initialized // on the new folder. This gives an opportunity to see how modules will behave // when they are relying on their persistent structures. func (st *serverTester) reloadedServerTester() (*serverTester, error) { // Copy the testing directory. copiedDir := st.dir + " - " + persist.RandomSuffix() err := build.CopyDir(st.dir, copiedDir) if err != nil { return nil, err } copyST, err := assembleServerTester(st.walletKey, copiedDir) if err != nil { return nil, err } return copyST, nil } // netAddress returns the NetAddress of the caller. func (st *serverTester) netAddress() modules.NetAddress { return st.server.api.gateway.Address() } // coinAddress returns a coin address that the caller is able to spend from. func (st *serverTester) coinAddress() string { var addr struct { Address string } st.getAPI("/wallet/address", &addr) return addr.Address } // acceptContracts instructs the host to begin accepting contracts. func (st *serverTester) acceptContracts() error { settingsValues := url.Values{} settingsValues.Set("acceptingcontracts", "true") return st.stdPostAPI("/host", settingsValues) } // setHostStorage adds a storage folder to the host. func (st *serverTester) setHostStorage() error { values := url.Values{} values.Set("path", st.dir) values.Set("size", "1048576") return st.stdPostAPI("/host/storage/folders/add", values) } // announceHost announces the host, mines a block, and waits for the // announcement to register. func (st *serverTester) announceHost() error { // Set the host to be accepting contracts. acceptingContractsValues := url.Values{} acceptingContractsValues.Set("acceptingcontracts", "true") err := st.stdPostAPI("/host", acceptingContractsValues) if err != nil { return build.ExtendErr("couldn't make an api call to the host:", err) } announceValues := url.Values{} announceValues.Set("address", string(st.host.ExternalSettings().NetAddress)) err = st.stdPostAPI("/host/announce", announceValues) if err != nil { return err } // mine block _, err = st.miner.AddBlock() if err != nil { return err } // wait for announcement var hosts HostdbActiveGET err = st.getAPI("/hostdb/active", &hosts) if err != nil { return err } for i := 0; i < 50 && len(hosts.Hosts) == 0; i++ { time.Sleep(100 * time.Millisecond) err = st.getAPI("/hostdb/active", &hosts) if err != nil { return err } } if len(hosts.Hosts) == 0 { return errors.New("host announcement not seen") } return nil } // getAPI makes an API call and decodes the response. func (st *serverTester) getAPI(call string, obj interface{}) error { resp, err := HttpGET("http://" + st.server.listener.Addr().String() + call) if err != nil { return err } defer resp.Body.Close() if non2xx(resp.StatusCode) { return decodeError(resp) } // Return early because there is no content to decode. if resp.StatusCode == http.StatusNoContent { return nil } // Decode the response into 'obj'. err = json.NewDecoder(resp.Body).Decode(obj) if err != nil { return err } return nil } // postAPI makes an API call and decodes the response. func (st *serverTester) postAPI(call string, values url.Values, obj interface{}) error { resp, err := HttpPOST("http://"+st.server.listener.Addr().String()+call, values.Encode()) if err != nil { return err } defer resp.Body.Close() if non2xx(resp.StatusCode) { return decodeError(resp) } // Return early because there is no content to decode. if resp.StatusCode == http.StatusNoContent { return nil } // Decode the response into 'obj'. err = json.NewDecoder(resp.Body).Decode(obj) if err != nil { return err } return nil } // stdGetAPI makes an API call and discards the response. func (st *serverTester) stdGetAPI(call string) error { resp, err := HttpGET("http://" + st.server.listener.Addr().String() + call) if err != nil { return err } defer resp.Body.Close() if non2xx(resp.StatusCode) { return decodeError(resp) } return nil } // stdGetAPIUA makes an API call with a custom user agent. func (st *serverTester) stdGetAPIUA(call string, userAgent string) error { req, err := http.NewRequest("GET", "http://"+st.server.listener.Addr().String()+call, nil) if err != nil { return err } req.Header.Set("User-Agent", userAgent) resp, err := http.DefaultClient.Do(req) if err != nil { return err } defer resp.Body.Close() if non2xx(resp.StatusCode) { return decodeError(resp) } return nil } // stdPostAPI makes an API call and discards the response. func (st *serverTester) stdPostAPI(call string, values url.Values) error { resp, err := HttpPOST("http://"+st.server.listener.Addr().String()+call, values.Encode()) if err != nil { return err } defer resp.Body.Close() if non2xx(resp.StatusCode) { return decodeError(resp) } return nil } Sia-1.3.0/api/server_test.go000066400000000000000000000063711313565667000157170ustar00rootroot00000000000000package api import ( "net/http" "testing" ) // TestExplorerPreset checks that the default configuration for the explorer is // working correctly. func TestExplorerPreset(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createExplorerServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // Try calling a legal endpoint without a user agent. err = st.stdGetAPIUA("/explorer", "") if err != nil { t.Fatal(err) } } // TestReloading reloads a server and does smoke testing to see that modules // are still working after reload. func TestReloading(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) height := st.server.api.cs.Height() if err != nil { t.Fatal(err) } err = st.server.Close() if err != nil { t.Fatal(err) } rst, err := st.reloadedServerTester() if err != nil { t.Fatal(err) } defer rst.server.panicClose() if height != rst.server.api.cs.Height() { t.Error("server heights do not match") } // Mine some blocks on the reloaded server and see if any errors or panics // are triggered. for i := 0; i < 3; i++ { _, err := rst.miner.AddBlock() if err != nil { t.Fatal(err) } } } // TestAuthenticated tests creating a server that requires authenticated API // calls, and then makes (un)authenticated API calls to test the // authentication. func TestAuthentication(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createAuthenticatedServerTester(t.Name(), "password") if err != nil { t.Fatal(err) } defer st.server.panicClose() testGETURL := "http://" + st.server.listener.Addr().String() + "/wallet/seeds" testPOSTURL := "http://" + st.server.listener.Addr().String() + "/host/announce" // Test that unauthenticated API calls fail. // GET resp, err := HttpGET(testGETURL) if err != nil { t.Fatal(err) } if resp.StatusCode != http.StatusUnauthorized { t.Fatal("unauthenticated API call succeeded on a server that requires authentication") } // POST resp, err = HttpPOST(testPOSTURL, "") if err != nil { t.Fatal(err) } if resp.StatusCode != http.StatusUnauthorized { t.Fatal("unauthenticated API call succeeded on a server that requires authentication") } // Test that authenticated API calls with the wrong password fail. // GET resp, err = HttpGETAuthenticated(testGETURL, "wrong password") if err != nil { t.Fatal(err) } if resp.StatusCode != http.StatusUnauthorized { t.Fatal("authenticated API call succeeded with an incorrect password") } // POST resp, err = HttpPOSTAuthenticated(testPOSTURL, "", "wrong password") if err != nil { t.Fatal(err) } if resp.StatusCode != http.StatusUnauthorized { t.Fatal("authenticated API call succeeded with an incorrect password") } // Test that authenticated API calls with the correct password succeed. // GET resp, err = HttpGETAuthenticated(testGETURL, "password") if err != nil { t.Fatal(err) } if non2xx(resp.StatusCode) { t.Fatal("authenticated API call failed with the correct password") } // POST resp, err = HttpPOSTAuthenticated(testPOSTURL, "", "password") if err != nil { t.Fatal(err) } if non2xx(resp.StatusCode) { t.Fatal("authenticated API call failed with the correct password") } } Sia-1.3.0/api/transactionpool.go000066400000000000000000000070001313565667000165570ustar00rootroot00000000000000package api import ( "encoding/base64" "net/http" "github.com/julienschmidt/httprouter" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) type ( TpoolFeeGET struct { Minimum types.Currency `json:"minimum"` Maximum types.Currency `json:"maximum"` } // TpoolRawGET contains the requested transaction encoded to the raw // format, along with the id of that transaction. TpoolRawGET struct { ID types.TransactionID `json:"id"` Parents []byte `json:"parents"` Transaction []byte `json:"transaction"` } ) // decodeTransactionID will decode a transaction id from a string. func decodeTransactionID(txidStr string) (types.TransactionID, error) { txid := new(crypto.Hash) err := txid.LoadString(txidStr) if err != nil { return types.TransactionID{}, err } return types.TransactionID(*txid), nil } // tpoolFeeHandlerGET returns the current estimated fee. Transactions with // fees are lower than the estimated fee may take longer to confirm. func (api *API) tpoolFeeHandlerGET(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { min, max := api.tpool.FeeEstimation() WriteJSON(w, TpoolFeeGET{ Minimum: min, Maximum: max, }) } // tpoolRawHandlerGET will provide the raw byte representation of a // transaction that matches the input id. func (api *API) tpoolRawHandlerGET(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { txid, err := decodeTransactionID(ps.ByName("id")) if err != nil { WriteError(w, Error{"error decoding transaction id:" + err.Error()}, http.StatusBadRequest) return } txn, parents, exists := api.tpool.Transaction(txid) if !exists { WriteError(w, Error{"transaction not found in transaction pool"}, http.StatusBadRequest) return } WriteJSON(w, TpoolRawGET{ ID: txid, Parents: encoding.Marshal(parents), Transaction: encoding.Marshal(txn), }) } // tpoolRawHandlerPOST takes a raw encoded transaction set and posts // it to the transaction pool, relaying it to the transaction pool's peers // regardless of if the set is accepted. func (api *API) tpoolRawHandlerPOST(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { // Try accepting the transactions both as base64 and as clean values. rawParents, err := base64.StdEncoding.DecodeString(req.FormValue("parents")) if err != nil { rawParents = []byte(req.FormValue("parents")) } rawTransaction, err := base64.StdEncoding.DecodeString(req.FormValue("transaction")) if err != nil { rawTransaction = []byte(req.FormValue("transaction")) } // Decode the transaction and parents into a transaction set that can be // given to the transaction pool. var parents []types.Transaction var txn types.Transaction err = encoding.Unmarshal(rawParents, &parents) if err != nil { WriteError(w, Error{"error decoding parents:" + err.Error()}, http.StatusBadRequest) return } err = encoding.Unmarshal(rawTransaction, &txn) if err != nil { WriteError(w, Error{"error decoding transaction:" + err.Error()}, http.StatusBadRequest) return } txnSet := append(parents, txn) // Re-broadcast the transactions, so that they are passed to any peers that // may have rejected them earlier. api.tpool.Broadcast(txnSet) err = api.tpool.AcceptTransactionSet(txnSet) if err != nil && err != modules.ErrDuplicateTransactionSet { WriteError(w, Error{"error accepting transaction set:" + err.Error()}, http.StatusBadRequest) return } WriteSuccess(w) } Sia-1.3.0/api/transactionpool_test.go000066400000000000000000000123571313565667000176310ustar00rootroot00000000000000package api import ( "bytes" "encoding/base64" "net/url" "testing" "time" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/types" ) // TestTransactionPoolRawHandler verifies that the transaction pools' raw // transaction post endpoint works correctly. func TestTransactionPoolRawHandlerPOST(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } // Spin up a second and fourth server tester, and get them all on the same // block. The fourth server tester will be used later, after a third is // created and used. st2, err := blankServerTester(t.Name() + "-st2") if err != nil { t.Fatal(err) } st4, err := blankServerTester(t.Name() + "-st4") if err != nil { t.Fatal(err) } err = fullyConnectNodes([]*serverTester{st, st2, st4}) if err != nil { t.Fatal(err) } // Reset the peers, giving them different ip addresses, preventing them // from connecting to eachother. err = st.server.Close() if err != nil { t.Fatal(err) } err = st2.server.Close() if err != nil { t.Fatal(err) } err = st4.server.Close() if err != nil { t.Fatal(err) } st, err = assembleServerTester(st.walletKey, st.dir) if err != nil { t.Fatal(err) } defer st.panicClose() st2, err = assembleServerTester(st2.walletKey, st2.dir) if err != nil { t.Fatal(err) } defer st2.panicClose() st4, err = assembleServerTester(st4.walletKey, st4.dir) if err != nil { t.Fatal(err) } defer st4.panicClose() // Create a transaction on one node and fetch it. sentValue := types.SiacoinPrecision.Mul64(1000) txns, err := st.wallet.SendSiacoins(sentValue, types.UnlockHash{}) if err != nil { t.Fatal(err) } lastTxn := txns[len(txns)-1] var trg TpoolRawGET err = st.getAPI("/tpool/raw/"+lastTxn.ID().String(), &trg) if err != nil { t.Fatal(err) } // Verify the correctness of the transaction. var decodedTxn types.Transaction err = encoding.Unmarshal(trg.Transaction, &decodedTxn) if err != nil { t.Fatal(err) } if decodedTxn.ID() != lastTxn.ID() { t.Fatal("tpool raw get returned the wrong transaction") } // Verify the correctness of the parents. var decodedParents []types.Transaction err = encoding.Unmarshal(trg.Parents, &decodedParents) if err != nil { t.Fatal(err) } if len(decodedParents) != len(txns)-1 { t.Fatal("returned the incorrect number of parents") } // Transaction should not be visible on node 2. var trg2 TpoolRawGET err = st2.getAPI("/tpool/raw/"+lastTxn.ID().String(), &trg2) if err.Error() != "transaction not found in transaction pool" { t.Fatal("transaction should be missing initially from the second tpool") } // Try posting the transaction to node 2. postValues := url.Values{} postValues.Set("parents", string(trg.Parents)) postValues.Set("transaction", string(trg.Transaction)) err = st2.stdPostAPI("/tpool/raw", postValues) if err != nil { t.Fatal(err) } // Verify that the two transactions returned from each server are // identical. err = st2.getAPI("/tpool/raw/"+lastTxn.ID().String(), &trg2) if err != nil { t.Fatal(err) } if !bytes.Equal(trg2.Parents, trg.Parents) { t.Error("transaction parents mismatch") } if !bytes.Equal(trg2.Transaction, trg.Transaction) { t.Error("transaction parents mismatch") } // Create a third server tester, connect it to the second one. st3, err := blankServerTester(t.Name() + "-st3") if err != nil { t.Fatal(err) } defer st3.server.panicClose() err = fullyConnectNodes([]*serverTester{st2, st3}) if err != nil { t.Fatal(err) } // Posting the raw transaction to the second server again should cause it // to be broadcast to the third server. err = st2.stdPostAPI("/tpool/raw", postValues) if err != nil { t.Fatal(err) } retry(100, time.Millisecond*100, func() error { return st3.getAPI("/tpool/raw/"+lastTxn.ID().String(), &trg) }) // Mine a block on the first server, which should clear its transaction // pool. _, err = st.miner.AddBlock() if err != nil { t.Fatal(err) } err = st.getAPI("/tpool/raw/"+lastTxn.ID().String(), &trg) if err.Error() != "transaction not found in transaction pool" { t.Fatal("transaction should be gone from the pool after mining a block") } // Convert the returned transactions to base64, which is how they will be // presetned to someone using curl. Submit those to the POST endpoint. The // POST endpoint should gracefully handle that submission as base64. // // The first 3 st's all have the transactions already, so now we'll use st4. b64Parents := base64.StdEncoding.EncodeToString(trg.Parents) b64Transaction := base64.StdEncoding.EncodeToString(trg.Transaction) postValues = url.Values{} postValues.Set("parents", b64Parents) postValues.Set("transaction", b64Transaction) err = st4.stdPostAPI("/tpool/raw", postValues) if err != nil { t.Fatal(err) } } // TestTransactionPoolFee tests the /tpool/fee endpoint. func TestTransactionPoolFee(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } var fees TpoolFeeGET err = st.getAPI("/tpool/fee", &fees) if err != nil { t.Fatal(err) } min, max := st.tpool.FeeEstimation() if !min.Equals(fees.Minimum) || !max.Equals(fees.Maximum) { t.Fatal("fee mismatch") } } Sia-1.3.0/api/wallet.go000066400000000000000000000477221313565667000146470ustar00rootroot00000000000000package api import ( "encoding/json" "net/http" "path/filepath" "strconv" "strings" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/entropy-mnemonics" "github.com/julienschmidt/httprouter" ) type ( // WalletGET contains general information about the wallet. WalletGET struct { Encrypted bool `json:"encrypted"` Unlocked bool `json:"unlocked"` Rescanning bool `json:"rescanning"` ConfirmedSiacoinBalance types.Currency `json:"confirmedsiacoinbalance"` UnconfirmedOutgoingSiacoins types.Currency `json:"unconfirmedoutgoingsiacoins"` UnconfirmedIncomingSiacoins types.Currency `json:"unconfirmedincomingsiacoins"` SiafundBalance types.Currency `json:"siafundbalance"` SiacoinClaimBalance types.Currency `json:"siacoinclaimbalance"` } // WalletAddressGET contains an address returned by a GET call to // /wallet/address. WalletAddressGET struct { Address types.UnlockHash `json:"address"` } // WalletAddressesGET contains the list of wallet addresses returned by a // GET call to /wallet/addresses. WalletAddressesGET struct { Addresses []types.UnlockHash `json:"addresses"` } // WalletInitPOST contains the primary seed that gets generated during a // POST call to /wallet/init. WalletInitPOST struct { PrimarySeed string `json:"primaryseed"` } // WalletSiacoinsPOST contains the transaction sent in the POST call to // /wallet/siacoins. WalletSiacoinsPOST struct { TransactionIDs []types.TransactionID `json:"transactionids"` } // WalletSiafundsPOST contains the transaction sent in the POST call to // /wallet/siafunds. WalletSiafundsPOST struct { TransactionIDs []types.TransactionID `json:"transactionids"` } // WalletSeedsGET contains the seeds used by the wallet. WalletSeedsGET struct { PrimarySeed string `json:"primaryseed"` AddressesRemaining int `json:"addressesremaining"` AllSeeds []string `json:"allseeds"` } // WalletSweepPOST contains the coins and funds returned by a call to // /wallet/sweep. WalletSweepPOST struct { Coins types.Currency `json:"coins"` Funds types.Currency `json:"funds"` } // WalletTransactionGETid contains the transaction returned by a call to // /wallet/transaction/:id WalletTransactionGETid struct { Transaction modules.ProcessedTransaction `json:"transaction"` } // WalletTransactionsGET contains the specified set of confirmed and // unconfirmed transactions. WalletTransactionsGET struct { ConfirmedTransactions []modules.ProcessedTransaction `json:"confirmedtransactions"` UnconfirmedTransactions []modules.ProcessedTransaction `json:"unconfirmedtransactions"` } // WalletTransactionsGETaddr contains the set of wallet transactions // relevant to the input address provided in the call to // /wallet/transaction/:addr WalletTransactionsGETaddr struct { ConfirmedTransactions []modules.ProcessedTransaction `json:"confirmedtransactions"` UnconfirmedTransactions []modules.ProcessedTransaction `json:"unconfirmedtransactions"` } // WalletVerifyAddressGET contains a bool indicating if the address passed to // /wallet/verify/address/:addr is a valid address. WalletVerifyAddressGET struct { Valid bool } ) // encryptionKeys enumerates the possible encryption keys that can be derived // from an input string. func encryptionKeys(seedStr string) (validKeys []crypto.TwofishKey) { dicts := []mnemonics.DictionaryID{"english", "german", "japanese"} for _, dict := range dicts { seed, err := modules.StringToSeed(seedStr, dict) if err != nil { continue } validKeys = append(validKeys, crypto.TwofishKey(crypto.HashObject(seed))) } validKeys = append(validKeys, crypto.TwofishKey(crypto.HashObject(seedStr))) return validKeys } // walletHander handles API calls to /wallet. func (api *API) walletHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { siacoinBal, siafundBal, siaclaimBal := api.wallet.ConfirmedBalance() siacoinsOut, siacoinsIn := api.wallet.UnconfirmedBalance() WriteJSON(w, WalletGET{ Encrypted: api.wallet.Encrypted(), Unlocked: api.wallet.Unlocked(), Rescanning: api.wallet.Rescanning(), ConfirmedSiacoinBalance: siacoinBal, UnconfirmedOutgoingSiacoins: siacoinsOut, UnconfirmedIncomingSiacoins: siacoinsIn, SiafundBalance: siafundBal, SiacoinClaimBalance: siaclaimBal, }) } // wallet033xHandler handles API calls to /wallet/033x. func (api *API) wallet033xHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { source := req.FormValue("source") // Check that source is an absolute paths. if !filepath.IsAbs(source) { WriteError(w, Error{"error when calling /wallet/033x: source must be an absolute path"}, http.StatusBadRequest) return } potentialKeys := encryptionKeys(req.FormValue("encryptionpassword")) for _, key := range potentialKeys { err := api.wallet.Load033xWallet(key, source) if err == nil { WriteSuccess(w) return } if err != nil && err != modules.ErrBadEncryptionKey { WriteError(w, Error{"error when calling /wallet/033x: " + err.Error()}, http.StatusBadRequest) return } } WriteError(w, Error{modules.ErrBadEncryptionKey.Error()}, http.StatusBadRequest) } // walletAddressHandler handles API calls to /wallet/address. func (api *API) walletAddressHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { unlockConditions, err := api.wallet.NextAddress() if err != nil { WriteError(w, Error{"error when calling /wallet/addresses: " + err.Error()}, http.StatusBadRequest) return } WriteJSON(w, WalletAddressGET{ Address: unlockConditions.UnlockHash(), }) } // walletAddressHandler handles API calls to /wallet/addresses. func (api *API) walletAddressesHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { WriteJSON(w, WalletAddressesGET{ Addresses: api.wallet.AllAddresses(), }) } // walletBackupHandler handles API calls to /wallet/backup. func (api *API) walletBackupHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { destination := req.FormValue("destination") // Check that the destination is absolute. if !filepath.IsAbs(destination) { WriteError(w, Error{"error when calling /wallet/backup: destination must be an absolute path"}, http.StatusBadRequest) return } err := api.wallet.CreateBackup(destination) if err != nil { WriteError(w, Error{"error when calling /wallet/backup: " + err.Error()}, http.StatusBadRequest) return } WriteSuccess(w) } // walletInitHandler handles API calls to /wallet/init. func (api *API) walletInitHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { var encryptionKey crypto.TwofishKey if req.FormValue("encryptionpassword") != "" { encryptionKey = crypto.TwofishKey(crypto.HashObject(req.FormValue("encryptionpassword"))) } if req.FormValue("force") == "true" { err := api.wallet.Reset() if err != nil { WriteError(w, Error{"error when calling /wallet/init: " + err.Error()}, http.StatusBadRequest) return } } seed, err := api.wallet.Encrypt(encryptionKey) if err != nil { WriteError(w, Error{"error when calling /wallet/init: " + err.Error()}, http.StatusBadRequest) return } dictID := mnemonics.DictionaryID(req.FormValue("dictionary")) if dictID == "" { dictID = "english" } seedStr, err := modules.SeedToString(seed, dictID) if err != nil { WriteError(w, Error{"error when calling /wallet/init: " + err.Error()}, http.StatusBadRequest) return } WriteJSON(w, WalletInitPOST{ PrimarySeed: seedStr, }) } // walletInitSeedHandler handles API calls to /wallet/init/seed. func (api *API) walletInitSeedHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { var encryptionKey crypto.TwofishKey if req.FormValue("encryptionpassword") != "" { encryptionKey = crypto.TwofishKey(crypto.HashObject(req.FormValue("encryptionpassword"))) } dictID := mnemonics.DictionaryID(req.FormValue("dictionary")) if dictID == "" { dictID = "english" } seed, err := modules.StringToSeed(req.FormValue("seed"), dictID) if err != nil { WriteError(w, Error{"error when calling /wallet/init/seed: " + err.Error()}, http.StatusBadRequest) return } if req.FormValue("force") == "true" { err = api.wallet.Reset() if err != nil { WriteError(w, Error{"error when calling /wallet/init/seed: " + err.Error()}, http.StatusBadRequest) return } } err = api.wallet.InitFromSeed(encryptionKey, seed) if err != nil { WriteError(w, Error{"error when calling /wallet/init/seed: " + err.Error()}, http.StatusBadRequest) return } WriteSuccess(w) } // walletSeedHandler handles API calls to /wallet/seed. func (api *API) walletSeedHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { // Get the seed using the ditionary + phrase dictID := mnemonics.DictionaryID(req.FormValue("dictionary")) if dictID == "" { dictID = "english" } seed, err := modules.StringToSeed(req.FormValue("seed"), dictID) if err != nil { WriteError(w, Error{"error when calling /wallet/seed: " + err.Error()}, http.StatusBadRequest) return } potentialKeys := encryptionKeys(req.FormValue("encryptionpassword")) for _, key := range potentialKeys { err := api.wallet.LoadSeed(key, seed) if err == nil { WriteSuccess(w) return } if err != nil && err != modules.ErrBadEncryptionKey { WriteError(w, Error{"error when calling /wallet/seed: " + err.Error()}, http.StatusBadRequest) return } } WriteError(w, Error{"error when calling /wallet/seed: " + modules.ErrBadEncryptionKey.Error()}, http.StatusBadRequest) } // walletSiagkeyHandler handles API calls to /wallet/siagkey. func (api *API) walletSiagkeyHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { // Fetch the list of keyfiles from the post body. keyfiles := strings.Split(req.FormValue("keyfiles"), ",") potentialKeys := encryptionKeys(req.FormValue("encryptionpassword")) for _, keypath := range keyfiles { // Check that all key paths are absolute paths. if !filepath.IsAbs(keypath) { WriteError(w, Error{"error when calling /wallet/siagkey: keyfiles contains a non-absolute path"}, http.StatusBadRequest) return } } for _, key := range potentialKeys { err := api.wallet.LoadSiagKeys(key, keyfiles) if err == nil { WriteSuccess(w) return } if err != nil && err != modules.ErrBadEncryptionKey { WriteError(w, Error{"error when calling /wallet/siagkey: " + err.Error()}, http.StatusBadRequest) return } } WriteError(w, Error{"error when calling /wallet/siagkey: " + modules.ErrBadEncryptionKey.Error()}, http.StatusBadRequest) } // walletLockHanlder handles API calls to /wallet/lock. func (api *API) walletLockHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { err := api.wallet.Lock() if err != nil { WriteError(w, Error{err.Error()}, http.StatusBadRequest) return } WriteSuccess(w) } // walletSeedsHandler handles API calls to /wallet/seeds. func (api *API) walletSeedsHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { dictionary := mnemonics.DictionaryID(req.FormValue("dictionary")) if dictionary == "" { dictionary = mnemonics.English } // Get the primary seed information. primarySeed, addrsRemaining, err := api.wallet.PrimarySeed() if err != nil { WriteError(w, Error{"error when calling /wallet/seeds: " + err.Error()}, http.StatusBadRequest) return } primarySeedStr, err := modules.SeedToString(primarySeed, dictionary) if err != nil { WriteError(w, Error{"error when calling /wallet/seeds: " + err.Error()}, http.StatusBadRequest) return } // Get the list of seeds known to the wallet. allSeeds, err := api.wallet.AllSeeds() if err != nil { WriteError(w, Error{"error when calling /wallet/seeds: " + err.Error()}, http.StatusBadRequest) return } var allSeedsStrs []string for _, seed := range allSeeds { str, err := modules.SeedToString(seed, dictionary) if err != nil { WriteError(w, Error{"error when calling /wallet/seeds: " + err.Error()}, http.StatusBadRequest) return } allSeedsStrs = append(allSeedsStrs, str) } WriteJSON(w, WalletSeedsGET{ PrimarySeed: primarySeedStr, AddressesRemaining: int(addrsRemaining), AllSeeds: allSeedsStrs, }) } // walletSiacoinsHandler handles API calls to /wallet/siacoins. func (api *API) walletSiacoinsHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { var txns []types.Transaction if req.FormValue("outputs") != "" { // multiple amounts + destinations if req.FormValue("amount") != "" || req.FormValue("destination") != "" { WriteError(w, Error{"cannot supply both 'outputs' and single amount+destination pair"}, http.StatusInternalServerError) return } var outputs []types.SiacoinOutput err := json.Unmarshal([]byte(req.FormValue("outputs")), &outputs) if err != nil { WriteError(w, Error{"could not decode outputs: " + err.Error()}, http.StatusInternalServerError) return } txns, err = api.wallet.SendSiacoinsMulti(outputs) if err != nil { WriteError(w, Error{"error when calling /wallet/siacoins: " + err.Error()}, http.StatusInternalServerError) return } } else { // single amount + destination amount, ok := scanAmount(req.FormValue("amount")) if !ok { WriteError(w, Error{"could not read amount from POST call to /wallet/siacoins"}, http.StatusBadRequest) return } dest, err := scanAddress(req.FormValue("destination")) if err != nil { WriteError(w, Error{"could not read address from POST call to /wallet/siacoins"}, http.StatusBadRequest) return } txns, err = api.wallet.SendSiacoins(amount, dest) if err != nil { WriteError(w, Error{"error when calling /wallet/siacoins: " + err.Error()}, http.StatusInternalServerError) return } } var txids []types.TransactionID for _, txn := range txns { txids = append(txids, txn.ID()) } WriteJSON(w, WalletSiacoinsPOST{ TransactionIDs: txids, }) } // walletSiafundsHandler handles API calls to /wallet/siafunds. func (api *API) walletSiafundsHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { amount, ok := scanAmount(req.FormValue("amount")) if !ok { WriteError(w, Error{"could not read 'amount' from POST call to /wallet/siafunds"}, http.StatusBadRequest) return } dest, err := scanAddress(req.FormValue("destination")) if err != nil { WriteError(w, Error{"error when calling /wallet/siafunds: " + err.Error()}, http.StatusBadRequest) return } txns, err := api.wallet.SendSiafunds(amount, dest) if err != nil { WriteError(w, Error{"error when calling /wallet/siafunds: " + err.Error()}, http.StatusInternalServerError) return } var txids []types.TransactionID for _, txn := range txns { txids = append(txids, txn.ID()) } WriteJSON(w, WalletSiafundsPOST{ TransactionIDs: txids, }) } // walletSweepSeedHandler handles API calls to /wallet/sweep/seed. func (api *API) walletSweepSeedHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { // Get the seed using the ditionary + phrase dictID := mnemonics.DictionaryID(req.FormValue("dictionary")) if dictID == "" { dictID = "english" } seed, err := modules.StringToSeed(req.FormValue("seed"), dictID) if err != nil { WriteError(w, Error{"error when calling /wallet/sweep/seed: " + err.Error()}, http.StatusBadRequest) return } coins, funds, err := api.wallet.SweepSeed(seed) if err != nil { WriteError(w, Error{"error when calling /wallet/sweep/seed: " + err.Error()}, http.StatusBadRequest) return } WriteJSON(w, WalletSweepPOST{ Coins: coins, Funds: funds, }) } // walletTransactionHandler handles API calls to /wallet/transaction/:id. func (api *API) walletTransactionHandler(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { // Parse the id from the url. var id types.TransactionID jsonID := "\"" + ps.ByName("id") + "\"" err := id.UnmarshalJSON([]byte(jsonID)) if err != nil { WriteError(w, Error{"error when calling /wallet/history: " + err.Error()}, http.StatusBadRequest) return } txn, ok := api.wallet.Transaction(id) if !ok { WriteError(w, Error{"error when calling /wallet/transaction/:id : transaction not found"}, http.StatusBadRequest) return } WriteJSON(w, WalletTransactionGETid{ Transaction: txn, }) } // walletTransactionsHandler handles API calls to /wallet/transactions. func (api *API) walletTransactionsHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { startheightStr, endheightStr := req.FormValue("startheight"), req.FormValue("endheight") if startheightStr == "" || endheightStr == "" { WriteError(w, Error{"startheight and endheight must be provided to a /wallet/transactions call."}, http.StatusBadRequest) return } // Get the start and end blocks. start, err := strconv.Atoi(startheightStr) if err != nil { WriteError(w, Error{"parsing integer value for parameter `startheight` failed: " + err.Error()}, http.StatusBadRequest) return } end, err := strconv.Atoi(endheightStr) if err != nil { WriteError(w, Error{"parsing integer value for parameter `endheight` failed: " + err.Error()}, http.StatusBadRequest) return } confirmedTxns, err := api.wallet.Transactions(types.BlockHeight(start), types.BlockHeight(end)) if err != nil { WriteError(w, Error{"error when calling /wallet/transactions: " + err.Error()}, http.StatusBadRequest) return } unconfirmedTxns := api.wallet.UnconfirmedTransactions() WriteJSON(w, WalletTransactionsGET{ ConfirmedTransactions: confirmedTxns, UnconfirmedTransactions: unconfirmedTxns, }) } // walletTransactionsAddrHandler handles API calls to // /wallet/transactions/:addr. func (api *API) walletTransactionsAddrHandler(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { // Parse the address being input. jsonAddr := "\"" + ps.ByName("addr") + "\"" var addr types.UnlockHash err := addr.UnmarshalJSON([]byte(jsonAddr)) if err != nil { WriteError(w, Error{"error when calling /wallet/transactions: " + err.Error()}, http.StatusBadRequest) return } confirmedATs := api.wallet.AddressTransactions(addr) unconfirmedATs := api.wallet.AddressUnconfirmedTransactions(addr) WriteJSON(w, WalletTransactionsGETaddr{ ConfirmedTransactions: confirmedATs, UnconfirmedTransactions: unconfirmedATs, }) } // walletUnlockHandler handles API calls to /wallet/unlock. func (api *API) walletUnlockHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { potentialKeys := encryptionKeys(req.FormValue("encryptionpassword")) for _, key := range potentialKeys { err := api.wallet.Unlock(key) if err == nil { WriteSuccess(w) return } if err != nil && err != modules.ErrBadEncryptionKey { WriteError(w, Error{"error when calling /wallet/unlock: " + err.Error()}, http.StatusBadRequest) return } } WriteError(w, Error{"error when calling /wallet/unlock: " + modules.ErrBadEncryptionKey.Error()}, http.StatusBadRequest) } // walletChangePasswordHandler handles API calls to /wallet/changepassword func (api *API) walletChangePasswordHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { var newKey crypto.TwofishKey newPassword := req.FormValue("newpassword") if newPassword == "" { WriteError(w, Error{"a password must be provided to newpassword"}, http.StatusBadRequest) return } newKey = crypto.TwofishKey(crypto.HashObject(newPassword)) originalKeys := encryptionKeys(req.FormValue("encryptionpassword")) for _, key := range originalKeys { err := api.wallet.ChangeKey(key, newKey) if err == nil { WriteSuccess(w) return } if err != nil && err != modules.ErrBadEncryptionKey { WriteError(w, Error{"error when calling /wallet/changepassword: " + err.Error()}, http.StatusBadRequest) return } } WriteError(w, Error{"error when calling /wallet/changepassword: " + modules.ErrBadEncryptionKey.Error()}, http.StatusBadRequest) } // walletVerifyAddressHandler handles API calls to /wallet/verify/address/:addr. func (api *API) walletVerifyAddressHandler(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { addrString := ps.ByName("addr") err := new(types.UnlockHash).LoadString(addrString) WriteJSON(w, WalletVerifyAddressGET{Valid: err == nil}) } Sia-1.3.0/api/wallet_test.go000066400000000000000000001133631313565667000157010ustar00rootroot00000000000000package api import ( "encoding/json" "errors" "fmt" "net/url" "os" "path/filepath" "testing" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/modules/consensus" "github.com/NebulousLabs/Sia/modules/gateway" "github.com/NebulousLabs/Sia/modules/miner" "github.com/NebulousLabs/Sia/modules/transactionpool" "github.com/NebulousLabs/Sia/modules/wallet" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/fastrand" ) // TestWalletGETEncrypted probes the GET call to /wallet when the // wallet has never been encrypted. func TestWalletGETEncrypted(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() // Check a wallet that has never been encrypted. testdir := build.TempDir("api", t.Name()) g, err := gateway.New("localhost:0", false, filepath.Join(testdir, modules.GatewayDir)) if err != nil { t.Fatal("Failed to create gateway:", err) } cs, err := consensus.New(g, false, filepath.Join(testdir, modules.ConsensusDir)) if err != nil { t.Fatal("Failed to create consensus set:", err) } tp, err := transactionpool.New(cs, g, filepath.Join(testdir, modules.TransactionPoolDir)) if err != nil { t.Fatal("Failed to create tpool:", err) } w, err := wallet.New(cs, tp, filepath.Join(testdir, modules.WalletDir)) if err != nil { t.Fatal("Failed to create wallet:", err) } srv, err := NewServer("localhost:0", "Sia-Agent", "", cs, nil, g, nil, nil, nil, tp, w) if err != nil { t.Fatal(err) } // Assemble the serverTester and start listening for api requests. st := &serverTester{ cs: cs, gateway: g, tpool: tp, wallet: w, server: srv, } errChan := make(chan error) go func() { listenErr := srv.Serve() errChan <- listenErr }() defer func() { err := <-errChan if err != nil { t.Fatalf("API server quit: %v", err) } }() defer st.server.panicClose() var wg WalletGET err = st.getAPI("/wallet", &wg) if err != nil { t.Fatal(err) } if wg.Encrypted { t.Error("Wallet has never been encrypted") } if wg.Unlocked { t.Error("Wallet has never been unlocked") } } // TestWalletEncrypt tries to encrypt and unlock the wallet through the api // using a provided encryption key. func TestWalletEncrypt(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() testdir := build.TempDir("api", t.Name()) walletPassword := "testpass" key := crypto.TwofishKey(crypto.HashObject(walletPassword)) st, err := assembleServerTester(key, testdir) if err != nil { t.Fatal(err) } // lock the wallet err = st.stdPostAPI("/wallet/lock", nil) if err != nil { t.Fatal(err) } // Use the password to call /wallet/unlock. unlockValues := url.Values{} unlockValues.Set("encryptionpassword", walletPassword) err = st.stdPostAPI("/wallet/unlock", unlockValues) if err != nil { t.Fatal(err) } // Check that the wallet actually unlocked. if !st.wallet.Unlocked() { t.Error("wallet is not unlocked") } // reload the server and verify unlocking still works err = st.server.Close() if err != nil { t.Fatal(err) } st2, err := assembleServerTester(st.walletKey, st.dir) if err != nil { t.Fatal(err) } defer st2.server.panicClose() // lock the wallet err = st2.stdPostAPI("/wallet/lock", nil) if err != nil { t.Fatal(err) } // Use the password to call /wallet/unlock. err = st2.stdPostAPI("/wallet/unlock", unlockValues) if err != nil { t.Fatal(err) } // Check that the wallet actually unlocked. if !st2.wallet.Unlocked() { t.Error("wallet is not unlocked") } } // TestWalletBlankEncrypt tries to encrypt and unlock the wallet // through the api using a blank encryption key - meaning that the wallet seed // returned by the encryption call can be used as the encryption key. func TestWalletBlankEncrypt(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() // Create a server object without encrypting or unlocking the wallet. testdir := build.TempDir("api", t.Name()) g, err := gateway.New("localhost:0", false, filepath.Join(testdir, modules.GatewayDir)) if err != nil { t.Fatal(err) } cs, err := consensus.New(g, false, filepath.Join(testdir, modules.ConsensusDir)) if err != nil { t.Fatal(err) } tp, err := transactionpool.New(cs, g, filepath.Join(testdir, modules.TransactionPoolDir)) if err != nil { t.Fatal(err) } w, err := wallet.New(cs, tp, filepath.Join(testdir, modules.WalletDir)) if err != nil { t.Fatal(err) } srv, err := NewServer("localhost:0", "Sia-Agent", "", cs, nil, g, nil, nil, nil, tp, w) if err != nil { t.Fatal(err) } // Assemble the serverTester. st := &serverTester{ cs: cs, gateway: g, tpool: tp, wallet: w, server: srv, } go func() { listenErr := srv.Serve() if listenErr != nil { panic(listenErr) } }() defer st.server.panicClose() // Make a call to /wallet/init and get the seed. Provide no encryption // key so that the encryption key is the seed that gets returned. var wip WalletInitPOST err = st.postAPI("/wallet/init", url.Values{}, &wip) if err != nil { t.Fatal(err) } // Use the seed to call /wallet/unlock. unlockValues := url.Values{} unlockValues.Set("encryptionpassword", wip.PrimarySeed) err = st.stdPostAPI("/wallet/unlock", unlockValues) if err != nil { t.Fatal(err) } // Check that the wallet actually unlocked. if !w.Unlocked() { t.Error("wallet is not unlocked") } } // TestIntegrationWalletInitSeed tries to encrypt and unlock the wallet // through the api using a supplied seed. func TestIntegrationWalletInitSeed(t *testing.T) { if testing.Short() { t.SkipNow() } // Create a server object without encrypting or unlocking the wallet. testdir := build.TempDir("api", "TestIntegrationWalletInitSeed") g, err := gateway.New("localhost:0", false, filepath.Join(testdir, modules.GatewayDir)) if err != nil { t.Fatal(err) } cs, err := consensus.New(g, false, filepath.Join(testdir, modules.ConsensusDir)) if err != nil { t.Fatal(err) } tp, err := transactionpool.New(cs, g, filepath.Join(testdir, modules.TransactionPoolDir)) if err != nil { t.Fatal(err) } w, err := wallet.New(cs, tp, filepath.Join(testdir, modules.WalletDir)) if err != nil { t.Fatal(err) } srv, err := NewServer("localhost:0", "Sia-Agent", "", cs, nil, g, nil, nil, nil, tp, w) if err != nil { t.Fatal(err) } // Assemble the serverTester. st := &serverTester{ cs: cs, gateway: g, tpool: tp, wallet: w, server: srv, } go func() { listenErr := srv.Serve() if listenErr != nil { panic(listenErr) } }() defer st.server.panicClose() // Make a call to /wallet/init/seed using an invalid seed qs := url.Values{} qs.Set("seed", "foo") err = st.stdPostAPI("/wallet/init/seed", qs) if err == nil { t.Fatal("expected error, got nil") } // Make a call to /wallet/init/seed. Provide no encryption key so that the // encryption key is the seed. var seed modules.Seed fastrand.Read(seed[:]) seedStr, _ := modules.SeedToString(seed, "english") qs.Set("seed", seedStr) err = st.stdPostAPI("/wallet/init/seed", qs) if err != nil { t.Fatal(err) } // Try to re-init the wallet using a different encryption key qs.Set("encryptionpassword", "foo") err = st.stdPostAPI("/wallet/init/seed", qs) if err == nil { t.Fatal("expected error, got nil") } // Use the seed to call /wallet/unlock. unlockValues := url.Values{} unlockValues.Set("encryptionpassword", seedStr) err = st.stdPostAPI("/wallet/unlock", unlockValues) if err != nil { t.Fatal(err) } // Check that the wallet actually unlocked. if !w.Unlocked() { t.Error("wallet is not unlocked") } } // TestWalletGETSiacoins probes the GET call to /wallet when the // siacoin balance is being manipulated. func TestWalletGETSiacoins(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // Check the initial wallet is encrypted, unlocked, and has the siacoins // that got mined. var wg WalletGET err = st.getAPI("/wallet", &wg) if err != nil { t.Fatal(err) } if !wg.Encrypted { t.Error("Wallet has been encrypted") } if !wg.Unlocked { t.Error("Wallet has been unlocked") } if wg.ConfirmedSiacoinBalance.Cmp(types.CalculateCoinbase(1)) != 0 { t.Error("reported wallet balance does not reflect the single block that has been mined") } if wg.UnconfirmedOutgoingSiacoins.Cmp64(0) != 0 { t.Error("there should not be unconfirmed outgoing siacoins") } if wg.UnconfirmedIncomingSiacoins.Cmp64(0) != 0 { t.Error("there should not be unconfirmed incoming siacoins") } // Send coins to a wallet address through the api. var wag WalletAddressGET err = st.getAPI("/wallet/address", &wag) if err != nil { t.Fatal(err) } sendSiacoinsValues := url.Values{} sendSiacoinsValues.Set("amount", "1234") sendSiacoinsValues.Set("destination", wag.Address.String()) err = st.stdPostAPI("/wallet/siacoins", sendSiacoinsValues) if err != nil { t.Fatal(err) } // Check that the wallet is reporting unconfirmed siacoins. err = st.getAPI("/wallet", &wg) if err != nil { t.Fatal(err) } if !wg.Encrypted { t.Error("Wallet has been encrypted") } if !wg.Unlocked { t.Error("Wallet has been unlocked") } if wg.ConfirmedSiacoinBalance.Cmp(types.CalculateCoinbase(1)) != 0 { t.Error("reported wallet balance does not reflect the single block that has been mined") } if wg.UnconfirmedOutgoingSiacoins.Cmp64(0) <= 0 { t.Error("there should be unconfirmed outgoing siacoins") } if wg.UnconfirmedIncomingSiacoins.Cmp64(0) <= 0 { t.Error("there should be unconfirmed incoming siacoins") } if wg.UnconfirmedOutgoingSiacoins.Cmp(wg.UnconfirmedIncomingSiacoins) <= 0 { t.Error("net movement of siacoins should be outgoing (miner fees)") } // Mine a block and see that the unconfirmed balances reduce back to // nothing. _, err = st.miner.AddBlock() if err != nil { t.Fatal(err) } err = st.getAPI("/wallet", &wg) if err != nil { t.Fatal(err) } if wg.ConfirmedSiacoinBalance.Cmp(types.CalculateCoinbase(1).Add(types.CalculateCoinbase(2))) >= 0 { t.Error("reported wallet balance does not reflect mining two blocks and eating a miner fee") } if wg.UnconfirmedOutgoingSiacoins.Cmp64(0) != 0 { t.Error("there should not be unconfirmed outgoing siacoins") } if wg.UnconfirmedIncomingSiacoins.Cmp64(0) != 0 { t.Error("there should not be unconfirmed incoming siacoins") } } // TestIntegrationWalletSweepSeedPOST probes the POST call to // /wallet/sweep/seed. func TestIntegrationWalletSweepSeedPOST(t *testing.T) { if testing.Short() { t.SkipNow() } st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // send coins to a new wallet, then sweep them back key := crypto.GenerateTwofishKey() w, err := wallet.New(st.cs, st.tpool, filepath.Join(st.dir, "wallet2")) if err != nil { t.Fatal(err) } _, err = w.Encrypt(key) if err != nil { t.Fatal(err) } err = w.Unlock(key) if err != nil { t.Fatal(err) } addr, _ := w.NextAddress() st.wallet.SendSiacoins(types.SiacoinPrecision.Mul64(100), addr.UnlockHash()) st.miner.AddBlock() seed, _, _ := w.PrimarySeed() seedStr, _ := modules.SeedToString(seed, "english") // Sweep the coins we sent var wsp WalletSweepPOST qs := url.Values{} qs.Set("seed", seedStr) err = st.postAPI("/wallet/sweep/seed", qs, &wsp) if err != nil { t.Fatal(err) } // Should have swept more than 80 SC if wsp.Coins.Cmp(types.SiacoinPrecision.Mul64(80)) <= 0 { t.Fatalf("swept fewer coins (%v SC) than expected %v+", wsp.Coins.Div(types.SiacoinPrecision), 80) } // Add a block so that the sweep transaction is processed st.miner.AddBlock() // Sweep again; should find no coins. An error will be returned because // the found coins cannot cover the transaction fee. err = st.postAPI("/wallet/sweep/seed", qs, &wsp) if err == nil { t.Fatal("expected error, got nil") } // Call /wallet/sweep/seed with an invalid seed qs.Set("seed", "foo") err = st.postAPI("/wallet/sweep/seed", qs, &wsp) if err == nil { t.Fatal("expected error, got nil") } } // TestIntegrationWalletLoadSeedPOST probes the POST call to // /wallet/seed. func TestIntegrationWalletLoadSeedPOST(t *testing.T) { if testing.Short() { t.SkipNow() } // Create a wallet. key := crypto.TwofishKey(crypto.HashObject("password")) st, err := assembleServerTester(key, build.TempDir("api", t.Name())) if err != nil { t.Fatal(err) } defer st.panicClose() // Mine blocks until the wallet has confirmed money. for i := types.BlockHeight(0); i <= types.MaturityDelay; i++ { st.miner.AddBlock() } // Create a wallet to load coins from. key2 := crypto.GenerateTwofishKey() w2, err := wallet.New(st.cs, st.tpool, filepath.Join(st.dir, "wallet2")) if err != nil { t.Fatal(err) } _, err = w2.Encrypt(key2) if err != nil { t.Fatal(err) } err = w2.Unlock(key2) if err != nil { t.Fatal(err) } // Mine coins into the second wallet. m, err := miner.New(st.cs, st.tpool, w2, filepath.Join(st.dir, "miner2")) if err != nil { t.Fatal(err) } for i := types.BlockHeight(0); i <= types.MaturityDelay; i++ { m.AddBlock() } // Record starting balances. oldBal, _, _ := st.wallet.ConfirmedBalance() w2bal, _, _ := w2.ConfirmedBalance() if w2bal.IsZero() { t.Fatal("second wallet's balance should not be zero") } // Load the second wallet's seed into the first wallet seed, _, _ := w2.PrimarySeed() seedStr, _ := modules.SeedToString(seed, "english") qs := url.Values{} qs.Set("seed", seedStr) qs.Set("encryptionpassword", "password") err = st.stdPostAPI("/wallet/seed", qs) if err != nil { t.Fatal(err) } // First wallet should now have balance of both wallets bal, _, _ := st.wallet.ConfirmedBalance() if exp := oldBal.Add(w2bal); !bal.Equals(exp) { t.Fatalf("wallet did not load seed correctly: expected %v coins, got %v", exp, bal) } } // TestWalletTransactionGETid queries the /wallet/transaction/:id // api call. func TestWalletTransactionGETid(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // Mining blocks should have created transactions for the wallet containing // miner payouts. Get the list of transactions. var wtg WalletTransactionsGET err = st.getAPI("/wallet/transactions?startheight=0&endheight=10", &wtg) if err != nil { t.Fatal(err) } if len(wtg.ConfirmedTransactions) == 0 { t.Error("expecting a few wallet transactions, corresponding to miner payouts.") } if len(wtg.UnconfirmedTransactions) != 0 { t.Error("expecting 0 unconfirmed transactions") } // A call to /wallet/transactions without startheight and endheight parameters // should return a descriptive error message. err = st.getAPI("/wallet/transactions", &wtg) if err == nil || err.Error() != "startheight and endheight must be provided to a /wallet/transactions call." { t.Error("expecting /wallet/transactions call with empty parameters to error") } // Query the details of the first transaction using // /wallet/transaction/:id var wtgid WalletTransactionGETid wtgidQuery := fmt.Sprintf("/wallet/transaction/%s", wtg.ConfirmedTransactions[0].TransactionID) err = st.getAPI(wtgidQuery, &wtgid) if err != nil { t.Fatal(err) } if len(wtgid.Transaction.Inputs) != 0 { t.Error("miner payout should appear as an output, not an input") } if len(wtgid.Transaction.Outputs) != 1 { t.Fatal("a single miner payout output should have been created") } if wtgid.Transaction.Outputs[0].FundType != types.SpecifierMinerPayout { t.Error("fund type should be a miner payout") } if wtgid.Transaction.Outputs[0].Value.IsZero() { t.Error("output should have a nonzero value") } // Query the details of a transaction where siacoins were sent. // // NOTE: We call the SendSiacoins method directly to get convenient access // to the txid. sentValue := types.SiacoinPrecision.Mul64(3) txns, err := st.wallet.SendSiacoins(sentValue, types.UnlockHash{}) if err != nil { t.Fatal(err) } st.miner.AddBlock() var wtgid2 WalletTransactionGETid err = st.getAPI(fmt.Sprintf("/wallet/transaction/%s", txns[1].ID()), &wtgid2) if err != nil { t.Fatal(err) } txn := wtgid2.Transaction if txn.TransactionID != txns[1].ID() { t.Error("wrong transaction was fetched") } else if len(txn.Inputs) != 1 || len(txn.Outputs) != 2 { t.Error("expected 1 input and 2 outputs, got", len(txn.Inputs), len(txn.Outputs)) } else if !txn.Outputs[0].Value.Equals(sentValue) { t.Errorf("expected first output to equal %v, got %v", sentValue, txn.Outputs[0].Value) } else if exp := txn.Inputs[0].Value.Sub(sentValue); !txn.Outputs[1].Value.Equals(exp) { t.Errorf("expected first output to equal %v, got %v", exp, txn.Outputs[1].Value) } // Create a second wallet and send money to that wallet. st2, err := blankServerTester(t.Name() + "w2") if err != nil { t.Fatal(err) } err = fullyConnectNodes([]*serverTester{st, st2}) if err != nil { t.Fatal(err) } // Send a transaction from the one wallet to the other. var wag WalletAddressGET err = st2.getAPI("/wallet/address", &wag) if err != nil { t.Fatal(err) } sendSiacoinsValues := url.Values{} sendSiacoinsValues.Set("amount", sentValue.String()) sendSiacoinsValues.Set("destination", wag.Address.String()) err = st.stdPostAPI("/wallet/siacoins", sendSiacoinsValues) if err != nil { t.Fatal(err) } // Check the unconfirmed transactions in the sending wallet to see the id of // the output being spent. err = st.getAPI("/wallet/transactions?startheight=0&endheight=10000", &wtg) if err != nil { t.Fatal(err) } if len(wtg.UnconfirmedTransactions) != 2 { t.Fatal("expecting two unconfirmed transactions in sender wallet") } // Get the id of the non-change output sent to the receiving wallet. expectedOutputID := wtg.UnconfirmedTransactions[1].Outputs[0].ID // Check the unconfirmed transactions struct to make sure all fields are // filled out correctly in the receiving wallet. err = st2.getAPI("/wallet/transactions?startheight=0&endheight=10000", &wtg) if err != nil { t.Fatal(err) } // There should be at least one unconfirmed transaction: err = retry(50, time.Millisecond*100, func() error { if len(wtg.UnconfirmedTransactions) < 1 { return errors.New("unconfirmed transaction not found") } return nil }) // The unconfirmed transaction should have inputs and outputs, and both of // those should have value. for _, txn := range wtg.UnconfirmedTransactions { if len(txn.Inputs) < 1 { t.Fatal("transaction should have an input") } if len(txn.Outputs) < 1 { t.Fatal("transaciton should have outputs") } for _, input := range txn.Inputs { if input.Value.IsZero() { t.Error("input should not have zero value") } } for _, output := range txn.Outputs { if output.Value.IsZero() { t.Error("output should not have zero value") } } if txn.Outputs[0].ID != expectedOutputID { t.Error("transactions should have matching output ids for the same transaction") } } // Restart st2. err = st2.server.Close() if err != nil { t.Fatal(err) } st2, err = assembleServerTester(st2.walletKey, st2.dir) if err != nil { t.Fatal(err) } err = st2.getAPI("/wallet/transactions?startheight=0&endheight=10000", &wtg) if err != nil { t.Fatal(err) } // Reconnect st2 and st. err = fullyConnectNodes([]*serverTester{st, st2}) if err != nil { t.Fatal(err) } // Mine a block on st to get the transactions into the blockchain. _, err = st.miner.AddBlock() if err != nil { t.Fatal(err) } _, err = synchronizationCheck([]*serverTester{st, st2}) if err != nil { t.Fatal(err) } err = st2.getAPI("/wallet/transactions?startheight=0&endheight=10000", &wtg) if err != nil { t.Fatal(err) } // There should be at least one confirmed transaction: if len(wtg.ConfirmedTransactions) < 1 { t.Fatal("confirmed transaction not found") } for _, txn := range wtg.ConfirmedTransactions { if len(txn.Inputs) < 1 { t.Fatal("transaction should have an input") } if len(txn.Outputs) < 1 { t.Fatal("transaciton should have outputs") } for _, input := range txn.Inputs { if input.Value.IsZero() { t.Error("input should not have zero value") } } for _, output := range txn.Outputs { if output.Value.IsZero() { t.Error("output should not have zero value") } } } // Reset the wallet and see that the confirmed transactions are still there. err = st2.server.Close() if err != nil { t.Fatal(err) } st2, err = assembleServerTester(st2.walletKey, st2.dir) if err != nil { t.Fatal(err) } defer st2.server.Close() err = st2.getAPI("/wallet/transactions?startheight=0&endheight=10000", &wtg) if err != nil { t.Fatal(err) } // There should be at least one confirmed transaction: if len(wtg.ConfirmedTransactions) < 1 { t.Fatal("unconfirmed transaction not found") } // Check whether the confirmed transactions remain. for _, txn := range wtg.ConfirmedTransactions { if len(txn.Inputs) < 1 { t.Fatal("transaction should have an input") } if len(txn.Outputs) < 1 { t.Fatal("transaciton should have outputs") } for _, input := range txn.Inputs { if input.Value.IsZero() { t.Error("input should not have zero value") } } for _, output := range txn.Outputs { if output.Value.IsZero() { t.Error("output should not have zero value") } } } } // Tests that the /wallet/backup call checks for relative paths. func TestWalletRelativePathErrorBackup(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // Announce the host. if err := st.announceHost(); err != nil { t.Fatal(err) } // Create tmp directory for uploads/downloads. walletTestDir := build.TempDir("wallet_relative_path_backup") err = os.MkdirAll(walletTestDir, 0700) if err != nil { t.Fatal(err) } // Wallet backup should error if its destination is a relative path backupAbsoluteError := "error when calling /wallet/backup: destination must be an absolute path" // This should error. err = st.stdGetAPI("/wallet/backup?destination=test_wallet.backup") if err == nil || err.Error() != backupAbsoluteError { t.Fatal(err) } // This as well. err = st.stdGetAPI("/wallet/backup?destination=../test_wallet.backup") if err == nil || err.Error() != backupAbsoluteError { t.Fatal(err) } // This should succeed. err = st.stdGetAPI("/wallet/backup?destination=" + filepath.Join(walletTestDir, "test_wallet.backup")) if err != nil { t.Fatal(err) } // Make sure the backup was actually created. _, errStat := os.Stat(filepath.Join(walletTestDir, "test_wallet.backup")) if errStat != nil { t.Error(errStat) } } // Tests that the /wallet/033x call checks for relative paths. func TestWalletRelativePathError033x(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // Announce the host. if err := st.announceHost(); err != nil { t.Fatal(err) } // Create tmp directory for uploads/downloads. walletTestDir := build.TempDir("wallet_relative_path_033x") err = os.MkdirAll(walletTestDir, 0700) if err != nil { t.Fatal(err) } // Wallet loading from 033x should error if its source is a relative path load033xAbsoluteError := "error when calling /wallet/033x: source must be an absolute path" // This should fail. load033xValues := url.Values{} load033xValues.Set("source", "test.dat") err = st.stdPostAPI("/wallet/033x", load033xValues) if err == nil || err.Error() != load033xAbsoluteError { t.Fatal(err) } // As should this. load033xValues = url.Values{} load033xValues.Set("source", "../test.dat") err = st.stdPostAPI("/wallet/033x", load033xValues) if err == nil || err.Error() != load033xAbsoluteError { t.Fatal(err) } // This should succeed (though the wallet method will still return an error) load033xValues = url.Values{} if err = createRandFile(filepath.Join(walletTestDir, "test.dat"), 0); err != nil { t.Fatal(err) } load033xValues.Set("source", filepath.Join(walletTestDir, "test.dat")) err = st.stdPostAPI("/wallet/033x", load033xValues) if err == nil || err.Error() == load033xAbsoluteError { t.Fatal(err) } } // Tests that the /wallet/siagkey call checks for relative paths. func TestWalletRelativePathErrorSiag(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() // Announce the host. if err := st.announceHost(); err != nil { t.Fatal(err) } // Create tmp directory for uploads/downloads. walletTestDir := build.TempDir("wallet_relative_path_sig") err = os.MkdirAll(walletTestDir, 0700) if err != nil { t.Fatal(err) } // Wallet loading from siag should error if its source is a relative path loadSiagAbsoluteError := "error when calling /wallet/siagkey: keyfiles contains a non-absolute path" // This should fail. loadSiagValues := url.Values{} loadSiagValues.Set("keyfiles", "test.dat") err = st.stdPostAPI("/wallet/siagkey", loadSiagValues) if err == nil || err.Error() != loadSiagAbsoluteError { t.Fatal(err) } // As should this. loadSiagValues = url.Values{} loadSiagValues.Set("keyfiles", "../test.dat") err = st.stdPostAPI("/wallet/siagkey", loadSiagValues) if err == nil || err.Error() != loadSiagAbsoluteError { t.Fatal(err) } // This should fail. loadSiagValues = url.Values{} loadSiagValues.Set("keyfiles", "/test.dat,test.dat,../test.dat") err = st.stdPostAPI("/wallet/siagkey", loadSiagValues) if err == nil || err.Error() != loadSiagAbsoluteError { t.Fatal(err) } // As should this. loadSiagValues = url.Values{} loadSiagValues.Set("keyfiles", "../test.dat,/test.dat") err = st.stdPostAPI("/wallet/siagkey", loadSiagValues) if err == nil || err.Error() != loadSiagAbsoluteError { t.Fatal(err) } // This should succeed. loadSiagValues = url.Values{} if err = createRandFile(filepath.Join(walletTestDir, "test.dat"), 0); err != nil { t.Fatal(err) } loadSiagValues.Set("keyfiles", filepath.Join(walletTestDir, "test.dat")) err = st.stdPostAPI("/wallet/siagkey", loadSiagValues) if err == nil || err.Error() == loadSiagAbsoluteError { t.Fatal(err) } // As should this. loadSiagValues = url.Values{} if err = createRandFile(filepath.Join(walletTestDir, "test1.dat"), 0); err != nil { t.Fatal(err) } loadSiagValues.Set("keyfiles", filepath.Join(walletTestDir, "test.dat")+","+filepath.Join(walletTestDir, "test1.dat")) err = st.stdPostAPI("/wallet/siagkey", loadSiagValues) if err == nil || err.Error() == loadSiagAbsoluteError { t.Fatal(err) } } func TestWalletReset(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() testdir := build.TempDir("api", t.Name()) walletPassword := "testpass" key := crypto.TwofishKey(crypto.HashObject(walletPassword)) st, err := assembleServerTester(key, testdir) if err != nil { t.Fatal(err) } // lock the wallet err = st.stdPostAPI("/wallet/lock", nil) if err != nil { t.Fatal(err) } // reencrypt the wallet newPassword := "testpass2" newKey := crypto.TwofishKey(crypto.HashObject(newPassword)) initValues := url.Values{} initValues.Set("force", "true") initValues.Set("encryptionpassword", newPassword) err = st.stdPostAPI("/wallet/init", initValues) if err != nil { t.Fatal(err) } // Use the password to call /wallet/unlock. unlockValues := url.Values{} unlockValues.Set("encryptionpassword", newPassword) err = st.stdPostAPI("/wallet/unlock", unlockValues) if err != nil { t.Fatal(err) } // Check that the wallet actually unlocked. if !st.wallet.Unlocked() { t.Error("wallet is not unlocked") } // reload the server and verify unlocking still works err = st.server.Close() if err != nil { t.Fatal(err) } st2, err := assembleServerTester(newKey, st.dir) if err != nil { t.Fatal(err) } defer st2.server.panicClose() // lock the wallet err = st2.stdPostAPI("/wallet/lock", nil) if err != nil { t.Fatal(err) } // Use the password to call /wallet/unlock. err = st2.stdPostAPI("/wallet/unlock", unlockValues) if err != nil { t.Fatal(err) } // Check that the wallet actually unlocked. if !st2.wallet.Unlocked() { t.Error("wallet is not unlocked") } } func TestWalletSiafunds(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() walletPassword := "testpass" key := crypto.TwofishKey(crypto.HashObject(walletPassword)) testdir := build.TempDir("api", t.Name()) st, err := assembleServerTester(key, testdir) if err != nil { t.Fatal(err) } defer st.server.panicClose() // mine some money for i := types.BlockHeight(0); i <= types.MaturityDelay; i++ { _, err := st.miner.AddBlock() if err != nil { t.Fatal(err) } } // record transactions var wtg WalletTransactionsGET err = st.getAPI("/wallet/transactions?startheight=0&endheight=100", &wtg) if err != nil { t.Fatal(err) } numTxns := len(wtg.ConfirmedTransactions) // load siafunds into the wallet siagPath, _ := filepath.Abs("../types/siag0of1of1.siakey") loadSiagValues := url.Values{} loadSiagValues.Set("keyfiles", siagPath) loadSiagValues.Set("encryptionpassword", walletPassword) err = st.stdPostAPI("/wallet/siagkey", loadSiagValues) if err != nil { t.Fatal(err) } err = st.getAPI("/wallet/transactions?startheight=0&endheight=100", &wtg) if err != nil { t.Fatal(err) } if len(wtg.ConfirmedTransactions) != numTxns+1 { t.Errorf("expected %v transactions, got %v", numTxns+1, len(wtg.ConfirmedTransactions)) } // check balance var wg WalletGET err = st.getAPI("/wallet", &wg) if err != nil { t.Fatal(err) } if wg.SiafundBalance.Cmp64(2000) != 0 { t.Fatalf("bad siafund balance: expected %v, got %v", 2000, wg.SiafundBalance) } // spend the siafunds into the wallet seed var wag WalletAddressGET err = st.getAPI("/wallet/address", &wag) if err != nil { t.Fatal(err) } sendSiafundsValues := url.Values{} sendSiafundsValues.Set("amount", "2000") sendSiafundsValues.Set("destination", wag.Address.String()) err = st.stdPostAPI("/wallet/siafunds", sendSiafundsValues) if err != nil { t.Fatal(err) } // Announce the host and form an allowance with it. This will result in a // siafund claim. err = st.announceHost() if err != nil { t.Fatal(err) } err = st.setHostStorage() if err != nil { t.Fatal(err) } err = st.acceptContracts() if err != nil { t.Fatal(err) } // mine a block so that the announcement makes it into the blockchain _, err = st.miner.AddBlock() if err != nil { t.Fatal(err) } // form allowance allowanceValues := url.Values{} testFunds := "10000000000000000000000000000" // 10k SC testPeriod := "20" allowanceValues.Set("funds", testFunds) allowanceValues.Set("period", testPeriod) err = st.stdPostAPI("/renter", allowanceValues) if err != nil { t.Fatal(err) } // Block until allowance has finished forming. err = build.Retry(50, time.Millisecond*250, func() error { var rc RenterContracts err = st.getAPI("/renter/contracts", &rc) if err != nil { return errors.New("couldn't get renter stats") } if len(rc.Contracts) != 1 { return errors.New("no contracts") } return nil }) if err != nil { t.Fatal("allowance setting failed") } // mine a block so that the file contract makes it into the blockchain _, err = st.miner.AddBlock() if err != nil { t.Fatal(err) } // wallet should now have a claim balance err = st.getAPI("/wallet", &wg) if err != nil { t.Fatal(err) } if wg.SiacoinClaimBalance.IsZero() { t.Fatal("expected non-zero claim balance") } } // TestWalletVerifyAddress tests that the /wallet/verify/address/:addr endpoint // validates wallet addresses correctly. func TestWalletVerifyAddress(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() var res WalletVerifyAddressGET fakeaddr := "thisisaninvalidwalletaddress" if err = st.getAPI("/wallet/verify/address/"+fakeaddr, &res); err != nil { t.Fatal(err) } if res.Valid == true { t.Fatal("expected /wallet/verify to fail an invalid address") } var wag WalletAddressGET err = st.getAPI("/wallet/address", &wag) if err != nil { t.Fatal(err) } if err = st.getAPI("/wallet/verify/address/"+wag.Address.String(), &res); err != nil { t.Fatal(err) } if res.Valid == false { t.Fatal("expected /wallet/verify to pass a valid address") } } // TestWalletChangePassword verifies that the /wallet/changepassword endpoint // works correctly and changes a wallet password. func TestWalletChangePassword(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() testdir := build.TempDir("api", t.Name()) originalPassword := "testpass" newPassword := "newpass" originalKey := crypto.TwofishKey(crypto.HashObject(originalPassword)) newKey := crypto.TwofishKey(crypto.HashObject(newPassword)) st, err := assembleServerTester(originalKey, testdir) if err != nil { t.Fatal(err) } // lock the wallet err = st.stdPostAPI("/wallet/lock", nil) if err != nil { t.Fatal(err) } // Use the password to call /wallet/unlock. unlockValues := url.Values{} unlockValues.Set("encryptionpassword", originalPassword) err = st.stdPostAPI("/wallet/unlock", unlockValues) if err != nil { t.Fatal(err) } // Check that the wallet actually unlocked. if !st.wallet.Unlocked() { t.Error("wallet is not unlocked") } // change the wallet key changeKeyValues := url.Values{} changeKeyValues.Set("encryptionpassword", originalPassword) changeKeyValues.Set("newpassword", newPassword) err = st.stdPostAPI("/wallet/changepassword", changeKeyValues) if err != nil { t.Fatal(err) } // wallet should still be unlocked if !st.wallet.Unlocked() { t.Fatal("changepassword locked the wallet") } // lock the wallet and verify unlocking works with the new password err = st.stdPostAPI("/wallet/lock", nil) if err != nil { t.Fatal(err) } unlockValues.Set("encryptionpassword", newPassword) err = st.stdPostAPI("/wallet/unlock", unlockValues) if err != nil { t.Fatal(err) } // Check that the wallet actually unlocked. if !st.wallet.Unlocked() { t.Error("wallet is not unlocked") } // reload the server and verify unlocking still works err = st.server.Close() if err != nil { t.Fatal(err) } st2, err := assembleServerTester(newKey, st.dir) if err != nil { t.Fatal(err) } defer st2.server.panicClose() // lock the wallet err = st2.stdPostAPI("/wallet/lock", nil) if err != nil { t.Fatal(err) } // Use the password to call /wallet/unlock. err = st2.stdPostAPI("/wallet/unlock", unlockValues) if err != nil { t.Fatal(err) } // Check that the wallet actually unlocked. if !st2.wallet.Unlocked() { t.Error("wallet is not unlocked") } } // TestWalletSiacoins tests the /wallet/siacoins endpoint, including sending // to multiple addresses. func TestWalletSiacoins(t *testing.T) { if testing.Short() || !build.VLONG { t.SkipNow() } t.Parallel() st, err := createServerTester(t.Name()) if err != nil { t.Fatal(err) } defer st.server.panicClose() st2, err := blankServerTester(t.Name() + "-wallet2") if err != nil { t.Fatal(err) } defer st2.server.Close() st3, err := blankServerTester(t.Name() + "-wallet3") if err != nil { t.Fatal(err) } defer st3.server.Close() st4, err := blankServerTester(t.Name() + "-wallet4") if err != nil { t.Fatal(err) } defer st4.server.Close() st5, err := blankServerTester(t.Name() + "-wallet5") if err != nil { t.Fatal(err) } defer st5.server.Close() st6, err := blankServerTester(t.Name() + "-wallet6") if err != nil { t.Fatal(err) } defer st6.server.Close() // Mine two more blocks with 'st' to get extra outputs to spend. for i := 0; i < 2; i++ { _, err := st.miner.AddBlock() if err != nil { t.Fatal(err) } } // Connect all the wallets together. wallets := []*serverTester{st, st2, st3, st4, st5, st6} err = fullyConnectNodes(wallets) if err != nil { t.Fatal(err) } // Send 10KS in a single-send to st2. sendAmount := types.SiacoinPrecision.Mul64(10000) var wag WalletAddressGET err = st2.getAPI("/wallet/address", &wag) if err != nil { t.Fatal(err) } sendSiacoinsValues := url.Values{} outputsJSON, _ := json.Marshal([]types.SiacoinOutput{{ UnlockHash: wag.Address, Value: sendAmount, }}) sendSiacoinsValues.Set("outputs", string(outputsJSON)) if err = st.stdPostAPI("/wallet/siacoins", sendSiacoinsValues); err != nil { t.Fatal(err) } // Send 10KS to 3, 4, 5 in a single send. var outputs []types.SiacoinOutput for _, w := range wallets[2:5] { var wag WalletAddressGET err = w.getAPI("/wallet/address", &wag) if err != nil { t.Fatal(err) } outputs = append(outputs, types.SiacoinOutput{ UnlockHash: wag.Address, Value: sendAmount, }) } outputsJSON, _ = json.Marshal(outputs) sendSiacoinsValues = url.Values{} sendSiacoinsValues.Set("outputs", string(outputsJSON)) if err = st.stdPostAPI("/wallet/siacoins", sendSiacoinsValues); err != nil { t.Fatal(err) } // Send 10KS to 6 through a joined 250 sends. outputs = nil smallSend := sendAmount.Div64(250) for i := 0; i < 250; i++ { var wag WalletAddressGET err = st6.getAPI("/wallet/address", &wag) if err != nil { t.Fatal(err) } outputs = append(outputs, types.SiacoinOutput{ UnlockHash: wag.Address, Value: smallSend, }) } outputsJSON, _ = json.Marshal(outputs) sendSiacoinsValues = url.Values{} sendSiacoinsValues.Set("outputs", string(outputsJSON)) if err = st.stdPostAPI("/wallet/siacoins", sendSiacoinsValues); err != nil { t.Fatal(err) } // Mine a block to confirm the send. _, err = st.miner.AddBlock() if err != nil { t.Fatal(err) } // Wait for the block to propagate. _, err = synchronizationCheck(wallets) if err != nil { t.Fatal(err) } // Check that the wallets all have 10KS. for i, w := range wallets[1:] { var wg WalletGET err = w.getAPI("/wallet", &wg) if err != nil { t.Fatal(err) } if !wg.ConfirmedSiacoinBalance.Equals(sendAmount) { t.Errorf("wallet %d should have %v coins, has %v", i+2, sendAmount, wg.ConfirmedSiacoinBalance) } } } Sia-1.3.0/appveyor.yml000066400000000000000000000006631313565667000146400ustar00rootroot00000000000000version: "{build}" platform: x64 clone_folder: c:\GOPATH\src\github.com\NebulousLabs\Sia shallow_clone: true environment: GOPATH: c:\GOPATH branches: only: - master - staging install: - go version - go env - go get -d ./... build_script: - go test -tags="testing debug" -short ./... - go test -tags="testing debug" -v -timeout=1200s -race ./... - go test -tags="testing debug" -v -run=XXX -bench=. ./... Sia-1.3.0/build/000077500000000000000000000000001313565667000133425ustar00rootroot00000000000000Sia-1.3.0/build/critical.go000066400000000000000000000021541313565667000154650ustar00rootroot00000000000000package build import ( "fmt" "os" "runtime/debug" ) // Critical should be called if a sanity check has failed, indicating developer // error. Critical is called with an extended message guiding the user to the // issue tracker on Github. If the program does not panic, the call stack for // the running goroutine is printed to help determine the error. func Critical(v ...interface{}) { s := "Critical error: " + fmt.Sprintln(v...) + "Please submit a bug report here: https://github.com/NebulousLabs/Sia/issues\n" if Release != "testing" { debug.PrintStack() os.Stderr.WriteString(s) } if DEBUG { panic(s) } } // Severe will print a message to os.Stderr. If DEBUG has been set panic will // be called as well. Severe should be called in situations which indicate // significant problems for the user (such as disk failure or random number // generation failure), but where crashing is not strictly required to preserve // integrity. func Severe(v ...interface{}) { s := "Severe error: " + fmt.Sprintln(v...) if Release != "testing" { debug.PrintStack() os.Stderr.WriteString(s) } if DEBUG { panic(s) } } Sia-1.3.0/build/critical_test.go000066400000000000000000000030711313565667000165230ustar00rootroot00000000000000package build import ( "testing" ) // TestCritical checks that a panic is called in debug mode. func TestCritical(t *testing.T) { k0 := "critical test killstring" killstring := "Critical error: critical test killstring\nPlease submit a bug report here: https://github.com/NebulousLabs/Sia/issues\n" defer func() { r := recover() if r != killstring { t.Error("panic did not work:", r, killstring) } }() Critical(k0) } // TestCriticalVariadic checks that a panic is called in debug mode. func TestCriticalVariadic(t *testing.T) { k0 := "variadic" k1 := "critical" k2 := "test" k3 := "killstring" killstring := "Critical error: variadic critical test killstring\nPlease submit a bug report here: https://github.com/NebulousLabs/Sia/issues\n" defer func() { r := recover() if r != killstring { t.Error("panic did not work:", r, killstring) } }() Critical(k0, k1, k2, k3) } // TestSevere checks that a panic is called in debug mode. func TestSevere(t *testing.T) { k0 := "severe test killstring" killstring := "Severe error: severe test killstring\n" defer func() { r := recover() if r != killstring { t.Error("panic did not work:", r, killstring) } }() Severe(k0) } // TestSevereVariadic checks that a panic is called in debug mode. func TestSevereVariadic(t *testing.T) { k0 := "variadic" k1 := "severe" k2 := "test" k3 := "killstring" killstring := "Severe error: variadic severe test killstring\n" defer func() { r := recover() if r != killstring { t.Error("panic did not work:", r, killstring) } }() Severe(k0, k1, k2, k3) } Sia-1.3.0/build/debug_off.go000066400000000000000000000000651313565667000156120ustar00rootroot00000000000000// +build !debug package build const DEBUG = false Sia-1.3.0/build/debug_on.go000066400000000000000000000000631313565667000154520ustar00rootroot00000000000000// +build debug package build const DEBUG = true Sia-1.3.0/build/errors.go000066400000000000000000000030241313565667000152040ustar00rootroot00000000000000package build import ( "errors" "strings" ) // ComposeErrors will take multiple errors and compose them into a single // errors with a longer message. Any nil errors used as inputs will be stripped // out, and if there are zero non-nil inputs then 'nil' will be returned. // // The original types of the errors is not preserved at all. func ComposeErrors(errs ...error) error { // Strip out any nil errors. var errStrings []string for _, err := range errs { if err != nil { errStrings = append(errStrings, err.Error()) } } // Return nil if there are no non-nil errors in the input. if len(errStrings) <= 0 { return nil } // Combine all of the non-nil errors into one larger return value. return errors.New(strings.Join(errStrings, "; ")) } // ExtendErr will return a new error which extends the input error with a // string. If the input error is nil, then 'nil' will be returned, discarding // the input string. func ExtendErr(s string, err error) error { if err == nil { return nil } return errors.New(s + ": " + err.Error()) } // JoinErrors concatenates the elements of errs to create a single error. The // separator string sep is placed between elements in the resulting error. Nil // errors are skipped. If errs is empty or only contains nil elements, // JoinErrors returns nil. func JoinErrors(errs []error, sep string) error { var strs []string for _, err := range errs { if err != nil { strs = append(strs, err.Error()) } } if len(strs) > 0 { return errors.New(strings.Join(strs, sep)) } return nil } Sia-1.3.0/build/errors_test.go000066400000000000000000000033371313565667000162520ustar00rootroot00000000000000package build import ( "errors" "testing" ) // TestJoinErrors tests that JoinErrors only returns non-nil when there are // non-nil elements in errs. And tests that the returned error's string the // concatenation of all the strings of the elements in errs, in order and // separated by sep. func TestJoinErrors(t *testing.T) { tests := []struct { errs []error sep string wantNil bool errStrWant string }{ // Test that JoinErrors returns nil when errs is nil. { wantNil: true, }, // Test that JoinErrors returns nil when errs is an empty slice. { errs: []error{}, wantNil: true, }, // Test that JoinErrors returns nil when errs has only nil elements. { errs: []error{nil}, wantNil: true, }, { errs: []error{nil, nil, nil}, wantNil: true, }, // Test that JoinErrors returns non-nil with the expected string when errs has only one non-nil element. { errs: []error{errors.New("foo")}, sep: ";", errStrWant: "foo", }, // Test that JoinErrors returns non-nil with the expected string when errs has multiple non-nil elements. { errs: []error{errors.New("foo"), errors.New("bar"), errors.New("baz")}, sep: ";", errStrWant: "foo;bar;baz", }, // Test that nil errors are ignored. { errs: []error{nil, errors.New("foo"), nil, nil, nil, errors.New("bar"), errors.New("baz"), nil, nil, nil}, sep: ";", errStrWant: "foo;bar;baz", }, } for _, tt := range tests { err := JoinErrors(tt.errs, tt.sep) if tt.wantNil && err != nil { t.Errorf("expected nil error, got '%v'", err) } else if err != nil && err.Error() != tt.errStrWant { t.Errorf("expected '%v', got '%v'", tt.errStrWant, err) } } } Sia-1.3.0/build/release_dev.go000066400000000000000000000000641313565667000161470ustar00rootroot00000000000000// +build dev package build const Release = "dev" Sia-1.3.0/build/release_standard.go000066400000000000000000000001031313565667000171630ustar00rootroot00000000000000// +build !testing,!dev package build const Release = "standard" Sia-1.3.0/build/release_testing.go000066400000000000000000000000741313565667000170470ustar00rootroot00000000000000// +build testing package build const Release = "testing" Sia-1.3.0/build/testdata/000077500000000000000000000000001313565667000151535ustar00rootroot00000000000000Sia-1.3.0/build/testdata/test.tar.gz000066400000000000000000000002311313565667000172550ustar00rootroot00000000000000vX10 aϜ"G49O*RJ.@p*oJKE-lf.s$ntnSrw/u7`;aǪxϿVocc'v!K܇(Sia-1.3.0/build/testing.go000066400000000000000000000061351313565667000153530ustar00rootroot00000000000000package build import ( "archive/tar" "compress/gzip" "errors" "io" "io/ioutil" "os" "path/filepath" "time" ) var ( // SiaTestingDir is the directory that contains all of the files and // folders created during testing. SiaTestingDir = filepath.Join(os.TempDir(), "SiaTesting") ) // TempDir joins the provided directories and prefixes them with the Sia // testing directory. func TempDir(dirs ...string) string { path := filepath.Join(SiaTestingDir, filepath.Join(dirs...)) os.RemoveAll(path) // remove old test data return path } // CopyFile copies a file from a source to a destination. func CopyFile(source, dest string) error { sf, err := os.Open(source) if err != nil { return err } defer sf.Close() df, err := os.Create(dest) if err != nil { return err } defer df.Close() _, err = io.Copy(df, sf) if err != nil { return err } return nil } // CopyDir copies a directory and all of its contents to the destination // directory. func CopyDir(source, dest string) error { stat, err := os.Stat(source) if err != nil { return err } if !stat.IsDir() { return errors.New("source is not a directory") } err = os.MkdirAll(dest, stat.Mode()) if err != nil { return err } files, err := ioutil.ReadDir(source) for _, file := range files { newSource := filepath.Join(source, file.Name()) newDest := filepath.Join(dest, file.Name()) if file.IsDir() { err = CopyDir(newSource, newDest) if err != nil { return err } } else { err = CopyFile(newSource, newDest) if err != nil { return err } } } return nil } // ExtractTarGz extracts the specified .tar.gz file to dir, overwriting // existing files in the event of a name conflict. func ExtractTarGz(filename, dir string) error { // Open the zipped archive. file, err := os.Open(filename) if err != nil { return err } defer file.Close() z, err := gzip.NewReader(file) if err != nil { return err } defer z.Close() t := tar.NewReader(z) // Create the output directory if it does not exist. if err := os.MkdirAll(dir, 0700); err != nil { return err } // Read the file entries, writing each to dir. for { // Read header. hdr, err := t.Next() if err == io.EOF { return nil } else if err != nil { return err } path := filepath.Join(dir, hdr.Name) info := hdr.FileInfo() if info.IsDir() { // Create directory. if err := os.MkdirAll(path, info.Mode()); err != nil { return err } } else { // Create file. tf, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, info.Mode()) if err != nil { return err } _, err = io.Copy(tf, t) tf.Close() if err != nil { return err } } } } // Retry will call 'fn' 'tries' times, waiting 'durationBetweenAttempts' // between each attempt, returning 'nil' the first time that 'fn' returns nil. // If 'nil' is never returned, then the final error returned by 'fn' is // returned. func Retry(tries int, durationBetweenAttempts time.Duration, fn func() error) (err error) { for i := 1; i < tries; i++ { err = fn() if err == nil { return nil } time.Sleep(durationBetweenAttempts) } return fn() } Sia-1.3.0/build/testing_test.go000066400000000000000000000035331313565667000164110ustar00rootroot00000000000000package build import ( "bytes" "io/ioutil" "os" "path/filepath" "reflect" "sort" "testing" "github.com/NebulousLabs/fastrand" ) // TestCopyDir checks that CopyDir copies directories as expected. func TestCopyDir(t *testing.T) { // Create some nested folders to copy. os.MkdirAll(TempDir("build"), 0700) root := TempDir("build", t.Name()) os.MkdirAll(root, 0700) data := make([][]byte, 2) for i := range data { data[i] = fastrand.Bytes(4e3) } // Create a file and a directory. err := ioutil.WriteFile(filepath.Join(root, "f1"), data[0], 0700) if err != nil { t.Fatal(err) } err = os.MkdirAll(filepath.Join(root, "d1"), 0700) if err != nil { t.Fatal(err) } err = ioutil.WriteFile(filepath.Join(root, "d1", "d1f1"), data[1], 0700) if err != nil { t.Fatal(err) } // Copy the root directory. rootCopy := root + "-copied" err = CopyDir(root, rootCopy) if err != nil { t.Fatal(err) } // Verify that the two files, and dir with two files are all correctly // copied. f1, err := ioutil.ReadFile(filepath.Join(rootCopy, "f1")) if err != nil { t.Fatal(err) } if !bytes.Equal(f1, data[0]) { t.Error("f1 did not match") } d1f1, err := ioutil.ReadFile(filepath.Join(rootCopy, "d1", "d1f1")) if err != nil { t.Fatal(err) } if !bytes.Equal(d1f1, data[1]) { t.Error("f1 did not match") } } // TestExtractTarGz tests that ExtractTarGz can extract a valid .tar.gz file. func TestExtractTarGz(t *testing.T) { dir := TempDir("build", t.Name()) os.MkdirAll(dir, 0700) if err := ExtractTarGz("testdata/test.tar.gz", dir); err != nil { t.Fatal(err) } folder, err := os.Open(dir) if err != nil { t.Fatal(err) } files, err := folder.Readdirnames(-1) if err != nil { t.Fatal(err) } sort.Strings(files) exp := []string{"1", "2", "3"} if !reflect.DeepEqual(files, exp) { t.Fatal("filenames do not match:", files, exp) } } Sia-1.3.0/build/var.go000066400000000000000000000027451313565667000144710ustar00rootroot00000000000000package build import "reflect" // A Var represents a variable whose value depends on which Release is being // compiled. None of the fields may be nil, and all fields must have the same // type. type Var struct { Standard interface{} Dev interface{} Testing interface{} // prevent unkeyed literals _ struct{} } // Select returns the field of v that corresponds to the current Release. // // Since the caller typically makes a type assertion on the result, it is // important to point out that type assertions are stricter than conversions. // Specifically, you cannot write: // // type myint int // Select(Var{0, 0, 0}).(myint) // // Because 0 will be interpreted as an int, which is not assignable to myint. // Instead, you must explicitly cast each field in the Var, or cast the return // value of Select after the type assertion. The former is preferred. func Select(v Var) interface{} { if v.Standard == nil || v.Dev == nil || v.Testing == nil { panic("nil value in build variable") } st, dt, tt := reflect.TypeOf(v.Standard), reflect.TypeOf(v.Dev), reflect.TypeOf(v.Testing) if !dt.AssignableTo(st) || !tt.AssignableTo(st) { // NOTE: we use AssignableTo instead of the more lenient ConvertibleTo // because type assertions require the former. panic("build variables must have a single type") } switch Release { case "standard": return v.Standard case "dev": return v.Dev case "testing": return v.Testing default: panic("unrecognized Release: " + Release) } } Sia-1.3.0/build/var_test.go000066400000000000000000000033051313565667000155210ustar00rootroot00000000000000package build import "testing" // didPanic returns true if fn panicked. func didPanic(fn func()) (p bool) { defer func() { p = (recover() != nil) }() fn() return } // TestSelect tests the Select function. Since we can't change the Release // constant during testing, we can only test the "testing" branches. func TestSelect(t *testing.T) { var v Var if !didPanic(func() { Select(v) }) { t.Error("Select should panic with all nil fields") } v.Standard = 0 if !didPanic(func() { Select(v) }) { t.Error("Select should panic with some nil fields") } v = Var{ Standard: 0, Dev: 0, Testing: 0, } if didPanic(func() { Select(v) }) { t.Error("Select should not panic with valid fields") } if !didPanic(func() { _ = Select(v).(string) }) { t.Error("improper type assertion should panic") } // should fail even if types are convertible type myint int if !didPanic(func() { _ = Select(v).(myint) }) { t.Error("improper type assertion should panic") } v.Standard = "foo" if !didPanic(func() { Select(v) }) { t.Error("Select should panic if field types do not match") } // Even though myint is convertible to int, it is not *assignable*. That // means that this code will panic, as checked in a previous test: // // _ = Select(v).(myint) // // This is important because users of Select may assume that type // assertions only require convertibility. To guard against this, we // enforce that all Var fields must be assignable to each other; otherwise // a type assertion may succeed for certain Release constants and fail for // others. v.Standard = myint(0) if !didPanic(func() { Select(v) }) { t.Error("Select should panic if field types are not mutually assignable") } } Sia-1.3.0/build/version.go000066400000000000000000000031721313565667000153610ustar00rootroot00000000000000package build import ( "strconv" "strings" ) const ( // Version is the current version of siad. Version = "1.3.0" // MaxEncodedVersionLength is the maximum length of a version string encoded // with the encode package. 100 is much larger than any version number we send // now, but it allows us to send additional information in the version string // later if we choose. For example appending the version string with the HEAD // commit hash. MaxEncodedVersionLength = 100 ) // IsVersion returns whether str is a valid version number. func IsVersion(str string) bool { for _, n := range strings.Split(str, ".") { if _, err := strconv.Atoi(n); err != nil { return false } } return true } // min returns the smaller of two integers. func min(a, b int) int { if a < b { return a } return b } // VersionCmp returns an int indicating the difference between a and b. It // follows the convention of bytes.Compare and big.Cmp: // // -1 if a < b // 0 if a == b // +1 if a > b // // One important quirk is that "1.1.0" is considered newer than "1.1", despite // being numerically equal. func VersionCmp(a, b string) int { aNums := strings.Split(a, ".") bNums := strings.Split(b, ".") for i := 0; i < min(len(aNums), len(bNums)); i++ { // assume that both version strings are valid aInt, _ := strconv.Atoi(aNums[i]) bInt, _ := strconv.Atoi(bNums[i]) if aInt < bInt { return -1 } else if aInt > bInt { return 1 } } // all shared digits are equal, but lengths may not be equal if len(aNums) < len(bNums) { return -1 } else if len(aNums) > len(bNums) { return 1 } // strings are identical return 0 } Sia-1.3.0/build/version_test.go000066400000000000000000000020151313565667000164130ustar00rootroot00000000000000package build import ( "testing" ) // TestVersionCmp checks that in all cases, VersionCmp returns the correct // result. func TestVersionCmp(t *testing.T) { versionTests := []struct { a, b string exp int }{ {"0.1", "0.0.9", 1}, {"0.1", "0.1", 0}, {"0.1", "0.1.1", -1}, {"0.1", "0.1.0", -1}, {"0.1", "1.1", -1}, {"0.1.1.0", "0.1.1", 1}, } for _, test := range versionTests { if actual := VersionCmp(test.a, test.b); actual != test.exp { t.Errorf("Comparing %v to %v should return %v (got %v)", test.a, test.b, test.exp, actual) } } } // TestIsVersion tests the IsVersion function. func TestIsVersion(t *testing.T) { versionTests := []struct { str string exp bool }{ {"1.0", true}, {"1", true}, {"0.1.2.3.4.5", true}, {"foo", false}, {".1", false}, {"1.", false}, {"a.b", false}, {"1.o", false}, {".", false}, {"", false}, } for _, test := range versionTests { if IsVersion(test.str) != test.exp { t.Errorf("IsVersion(%v) should return %v", test.str, test.exp) } } } Sia-1.3.0/build/vlong_off.go000066400000000000000000000000651313565667000156510ustar00rootroot00000000000000// +build !vlong package build const VLONG = false Sia-1.3.0/build/vlong_on.go000066400000000000000000000000631313565667000155110ustar00rootroot00000000000000// +build vlong package build const VLONG = true Sia-1.3.0/compatibility/000077500000000000000000000000001313565667000151145ustar00rootroot00000000000000Sia-1.3.0/compatibility/compatibility.go000066400000000000000000000002271313565667000203150ustar00rootroot00000000000000// package compatibility contains a bunch of tests to make sure that // compatibility has been preserved with previous versions. package compatibility Sia-1.3.0/compatibility/siafile_v0.4.8.sia000066400000000000000000000002241313565667000201410ustar00rootroot00000000000000Sia Shared File0.4 na⒴̜T]C c'f&XߧMm\_ٷ#/ǘcfbt#nugwNvpd9+af@XQ2# ˆSia-1.3.0/compatibility/siag_1.0_1of1_Key0.siakey000066400000000000000000000002571313565667000213460ustar00rootroot00000000000000siag1.0 $>1Q_ثˇ ȳ~f½o%aWvz` r_~s-VWSHHӅ;h}MNޮ/6Y Ged25519 Ӆ;h}MNޮ/6Y Ged25519 R<]?a8+0`d=߄(gπSdSia-1.3.0/compatibility/siag_1.0_1of2_Key1.siakey000066400000000000000000000003471313565667000213500ustar00rootroot00000000000000siag1.0vutG;X-'iGp$:R<]?a8+0`d=߄(gπSded25519 Ӆ;h}MNޮ/6Y Ged25519 R<]?a8+0`d=߄(gπSdSia-1.3.0/compatibility/siag_1.0_2of3_Key0.siakey000066400000000000000000000004371313565667000213510ustar00rootroot00000000000000siag1.0P?7Cֆ+]nГjOWc!j i le o_tqF jued25519  le o_tqF jued25519 6e%/ȴB=OgVOz%6ed25519 Z 6q)ϴ'N2(G2Sia-1.3.0/compatibility/siag_1.0_2of3_Key1.siakey000066400000000000000000000004371313565667000213520ustar00rootroot00000000000000siag1.0e!G e ]yhbK( E6e%/ȴB=OgVOz%6ed25519  le o_tqF jued25519 6e%/ȴB=OgVOz%6ed25519 Z 6q)ϴ'N2(G2Sia-1.3.0/compatibility/siag_1.0_2of3_Key2.siakey000066400000000000000000000004371313565667000213530ustar00rootroot00000000000000siag1.0 1QpYMPO PIqperZPZ 6q)ϴ'N2(G2ed25519  le o_tqF jued25519 6e%/ȴB=OgVOz%6ed25519 Z 6q)ϴ'N2(G2Sia-1.3.0/compatibility/siag_1.0_3of3_Key0.siakey000066400000000000000000000004371313565667000213520ustar00rootroot00000000000000siag1.0ց{ʖaݤR2ijV^iY>a/=x8Tp㤮/ed25519 iY>a/=x8Tp㤮/ed25519 h 13NlDaεQQ ed25519 x{7,!J4u=> =Sia-1.3.0/compatibility/siag_1.0_3of3_Key1.siakey000066400000000000000000000004371313565667000213530ustar00rootroot00000000000000siag1.0v*oˤiopiܲlRw I1eh 13NlDaεQQ ed25519 iY>a/=x8Tp㤮/ed25519 h 13NlDaεQQ ed25519 x{7,!J4u=> =Sia-1.3.0/compatibility/siag_1.0_3of3_Key2.siakey000066400000000000000000000004371313565667000213540ustar00rootroot00000000000000siag1.0´{Dܻ"(ԋqჃ画S6ƪx{7,!J4u=> =ed25519 iY>a/=x8Tp㤮/ed25519 h 13NlDaεQQ ed25519 x{7,!J4u=> =Sia-1.3.0/compatibility/siag_1.0_4of9_Key0.siakey000066400000000000000000000011571313565667000213610ustar00rootroot00000000000000siag1.0Xd1§fu#Br fڬa 8 8ZPEBVq, ed25519 r fڬa 8 8ZPEBVq,ed25519 HHphE8Dg)DFOUed25519 z0$k5$y$j TW9wkG_ed25519 ϼ_o+Nx>"Red25519 SWxWp{ϼE2qXzj*ed25519 d7Exk_. f`3 a^fed25519 :c"P{c׏Z1d[5Hnڱt-ed25519 *~wL8a"U?-&}Ted25519 s/y_y5ǘ֍Cr(,lSia-1.3.0/compatibility/siag_1.0_4of9_Key1.siakey000066400000000000000000000011571313565667000213620ustar00rootroot00000000000000siag1.0:Z-ąLDll`r>>EdHHphE8Dg)DFOU ed25519 r fڬa 8 8ZPEBVq,ed25519 HHphE8Dg)DFOUed25519 z0$k5$y$j TW9wkG_ed25519 ϼ_o+Nx>"Red25519 SWxWp{ϼE2qXzj*ed25519 d7Exk_. f`3 a^fed25519 :c"P{c׏Z1d[5Hnڱt-ed25519 *~wL8a"U?-&}Ted25519 s/y_y5ǘ֍Cr(,lSia-1.3.0/compatibility/siag_1.0_4of9_Key2.siakey000066400000000000000000000011571313565667000213630ustar00rootroot00000000000000siag1.0 1;`<P K^8mSUZvz0$k5$y$j TW9wkG_ ed25519 r fڬa 8 8ZPEBVq,ed25519 HHphE8Dg)DFOUed25519 z0$k5$y$j TW9wkG_ed25519 ϼ_o+Nx>"Red25519 SWxWp{ϼE2qXzj*ed25519 d7Exk_. f`3 a^fed25519 :c"P{c׏Z1d[5Hnڱt-ed25519 *~wL8a"U?-&}Ted25519 s/y_y5ǘ֍Cr(,lSia-1.3.0/compatibility/siag_1.0_4of9_Key3.siakey000066400000000000000000000011571313565667000213640ustar00rootroot00000000000000siag1.0VV%RF8JE-Ν^9ϼ_o+Nx>"R ed25519 r fڬa 8 8ZPEBVq,ed25519 HHphE8Dg)DFOUed25519 z0$k5$y$j TW9wkG_ed25519 ϼ_o+Nx>"Red25519 SWxWp{ϼE2qXzj*ed25519 d7Exk_. f`3 a^fed25519 :c"P{c׏Z1d[5Hnڱt-ed25519 *~wL8a"U?-&}Ted25519 s/y_y5ǘ֍Cr(,lSia-1.3.0/compatibility/siag_1.0_4of9_Key4.siakey000066400000000000000000000011571313565667000213650ustar00rootroot00000000000000siag1.0(Z *l9"I8#SWxWp{ϼE2qXzj* ed25519 r fڬa 8 8ZPEBVq,ed25519 HHphE8Dg)DFOUed25519 z0$k5$y$j TW9wkG_ed25519 ϼ_o+Nx>"Red25519 SWxWp{ϼE2qXzj*ed25519 d7Exk_. f`3 a^fed25519 :c"P{c׏Z1d[5Hnڱt-ed25519 *~wL8a"U?-&}Ted25519 s/y_y5ǘ֍Cr(,lSia-1.3.0/compatibility/siag_1.0_4of9_Key5.siakey000066400000000000000000000011571313565667000213660ustar00rootroot00000000000000siag1.0,a^qRǝHK,IDd7Exk_. f`3 a^f ed25519 r fڬa 8 8ZPEBVq,ed25519 HHphE8Dg)DFOUed25519 z0$k5$y$j TW9wkG_ed25519 ϼ_o+Nx>"Red25519 SWxWp{ϼE2qXzj*ed25519 d7Exk_. f`3 a^fed25519 :c"P{c׏Z1d[5Hnڱt-ed25519 *~wL8a"U?-&}Ted25519 s/y_y5ǘ֍Cr(,lSia-1.3.0/compatibility/siag_1.0_4of9_Key6.siakey000066400000000000000000000011571313565667000213670ustar00rootroot00000000000000siag1.0ǸT\"6c1<9臇M~:c"P{c׏Z1d[5Hnڱt- ed25519 r fڬa 8 8ZPEBVq,ed25519 HHphE8Dg)DFOUed25519 z0$k5$y$j TW9wkG_ed25519 ϼ_o+Nx>"Red25519 SWxWp{ϼE2qXzj*ed25519 d7Exk_. f`3 a^fed25519 :c"P{c׏Z1d[5Hnڱt-ed25519 *~wL8a"U?-&}Ted25519 s/y_y5ǘ֍Cr(,lSia-1.3.0/compatibility/siag_1.0_4of9_Key7.siakey000066400000000000000000000011571313565667000213700ustar00rootroot00000000000000siag1.0>c9ƶhIy(r;{Yy*~wL8a"U?-&}T ed25519 r fڬa 8 8ZPEBVq,ed25519 HHphE8Dg)DFOUed25519 z0$k5$y$j TW9wkG_ed25519 ϼ_o+Nx>"Red25519 SWxWp{ϼE2qXzj*ed25519 d7Exk_. f`3 a^fed25519 :c"P{c׏Z1d[5Hnڱt-ed25519 *~wL8a"U?-&}Ted25519 s/y_y5ǘ֍Cr(,lSia-1.3.0/compatibility/siag_1.0_4of9_Key8.siakey000066400000000000000000000011571313565667000213710ustar00rootroot00000000000000siag1.0} aڄܢؐ"$mLjr"s/y_y5ǘ֍Cr(,l ed25519 r fڬa 8 8ZPEBVq,ed25519 HHphE8Dg)DFOUed25519 z0$k5$y$j TW9wkG_ed25519 ϼ_o+Nx>"Red25519 SWxWp{ϼE2qXzj*ed25519 d7Exk_. f`3 a^fed25519 :c"P{c׏Z1d[5Hnڱt-ed25519 *~wL8a"U?-&}Ted25519 s/y_y5ǘ֍Cr(,lSia-1.3.0/compatibility/siag_1.0_test.go000066400000000000000000000062121313565667000200040ustar00rootroot00000000000000package compatibility // siag.go checks that any changes made to the code retain compatibility with // old versions of siag. import ( "errors" "path/filepath" "strconv" "testing" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/types" ) // KeyPairSiag_1_0 matches the KeyPair struct of the siag 1.0 code. type KeyPairSiag_1_0 struct { Header string Version string Index int SecretKey crypto.SecretKey UnlockConditions types.UnlockConditions } // verifyKeysSiag_1_0 is a copy-pasted version of the verifyKeys method // from siag 1.0. func verifyKeysSiag_1_0(uc types.UnlockConditions, folder string, keyname string) error { keysRequired := uc.SignaturesRequired totalKeys := uint64(len(uc.PublicKeys)) loadedKeys := make([]KeyPairSiag_1_0, totalKeys) for i := 0; i < len(loadedKeys); i++ { err := encoding.ReadFile(filepath.Join(folder, keyname+"_Key"+strconv.Itoa(i)+".siakey"), &loadedKeys[i]) if err != nil { return err } } for _, loadedKey := range loadedKeys { if loadedKey.UnlockConditions.UnlockHash() != uc.UnlockHash() { return errors.New("ErrCorruptedKey") } } txn := types.Transaction{ SiafundInputs: []types.SiafundInput{ { UnlockConditions: loadedKeys[0].UnlockConditions, }, }, } var i uint64 for i != totalKeys { if i+keysRequired > totalKeys { i = totalKeys - keysRequired } var j uint64 for j < keysRequired { txn.TransactionSignatures = append(txn.TransactionSignatures, types.TransactionSignature{ PublicKeyIndex: i, CoveredFields: types.CoveredFields{WholeTransaction: true}, }) sigHash := txn.SigHash(int(j)) sig := crypto.SignHash(sigHash, loadedKeys[i].SecretKey) txn.TransactionSignatures[j].Signature = sig[:] i++ j++ } err := txn.StandaloneValid(0) if err != nil { return err } txn.TransactionSignatures = nil } return nil } // TestVerifyKeysSiag_1_0 loads some keys generated by siag1.0. // Verification must still work. func TestVerifyKeysSiag_1_0(t *testing.T) { if testing.Short() { t.SkipNow() } var kp KeyPairSiag_1_0 // 1 of 1 err := encoding.ReadFile("siag_1.0_1of1_Key0.siakey", &kp) if err != nil { t.Fatal(err) } err = verifyKeysSiag_1_0(kp.UnlockConditions, "", "siag_1.0_1of1") if err != nil { t.Fatal(err) } // 1 of 2 err = encoding.ReadFile("siag_1.0_1of2_Key0.siakey", &kp) if err != nil { t.Fatal(err) } err = verifyKeysSiag_1_0(kp.UnlockConditions, "", "siag_1.0_1of2") if err != nil { t.Fatal(err) } // 2 of 3 err = encoding.ReadFile("siag_1.0_2of3_Key0.siakey", &kp) if err != nil { t.Fatal(err) } err = verifyKeysSiag_1_0(kp.UnlockConditions, "", "siag_1.0_2of3") if err != nil { t.Fatal(err) } // 3 of 3 err = encoding.ReadFile("siag_1.0_3of3_Key0.siakey", &kp) if err != nil { t.Fatal(err) } err = verifyKeysSiag_1_0(kp.UnlockConditions, "", "siag_1.0_3of3") if err != nil { t.Fatal(err) } // 4 of 9 err = encoding.ReadFile("siag_1.0_4of9_Key0.siakey", &kp) if err != nil { t.Fatal(err) } err = verifyKeysSiag_1_0(kp.UnlockConditions, "", "siag_1.0_4of9") if err != nil { t.Fatal(err) } } Sia-1.3.0/crypto/000077500000000000000000000000001313565667000135635ustar00rootroot00000000000000Sia-1.3.0/crypto/discard.go000066400000000000000000000005311313565667000155220ustar00rootroot00000000000000package crypto // SecureWipe destroys the data contained within a byte slice. There are no // strong guarantees that all copies of the memory have been eliminated. If the // OS was doing context switching or using swap space the keys may still be // elsewhere in memory. func SecureWipe(data []byte) { for i := range data { data[i] = 0 } } Sia-1.3.0/crypto/discard_test.go000066400000000000000000000007671313565667000165740ustar00rootroot00000000000000package crypto import ( "bytes" "testing" ) // TestUnitSecureWipe tests that the SecureWipe function sets all the elements // in a byte slice to 0. func TestUnitSecureWipe(t *testing.T) { s := []byte{1, 2, 3, 4} SecureWipe(s) if !bytes.Equal(s, make([]byte, len(s))) { t.Error("some bytes not set to 0") } } // TestUnitSecureWipeEdgeCases tests that SecureWipe doesn't panic on nil or // empty slices. func TestUnitSecureWipeEdgeCases(t *testing.T) { SecureWipe(nil) SecureWipe([]byte{}) } Sia-1.3.0/crypto/encrypt.go000066400000000000000000000055351313565667000156060ustar00rootroot00000000000000package crypto // encrypt.go contains functions for encrypting and decrypting data byte slices // and readers. import ( "crypto/cipher" "encoding/json" "errors" "io" "github.com/NebulousLabs/fastrand" "golang.org/x/crypto/twofish" ) const ( TwofishOverhead = 28 // number of bytes added by EncryptBytes ) var ( ErrInsufficientLen = errors.New("supplied ciphertext is not long enough to contain a nonce") ) type ( Ciphertext []byte TwofishKey [EntropySize]byte ) // GenerateEncryptionKey produces a key that can be used for encrypting and // decrypting files. func GenerateTwofishKey() (key TwofishKey) { fastrand.Read(key[:]) return } // NewCipher creates a new Twofish cipher from the key. func (key TwofishKey) NewCipher() cipher.Block { // NOTE: NewCipher only returns an error if len(key) != 16, 24, or 32. cipher, _ := twofish.NewCipher(key[:]) return cipher } // EncryptBytes encrypts a []byte using the key. EncryptBytes uses GCM and // prepends the nonce (12 bytes) to the ciphertext. func (key TwofishKey) EncryptBytes(plaintext []byte) Ciphertext { // Create the cipher. // NOTE: NewGCM only returns an error if twofishCipher.BlockSize != 16. aead, _ := cipher.NewGCM(key.NewCipher()) // Create the nonce. nonce := fastrand.Bytes(aead.NonceSize()) // Encrypt the data. No authenticated data is provided, as EncryptBytes is // meant for file encryption. return aead.Seal(nonce, nonce, plaintext, nil) } // DecryptBytes decrypts the ciphertext created by EncryptBytes. The nonce is // expected to be the first 12 bytes of the ciphertext. func (key TwofishKey) DecryptBytes(ct Ciphertext) ([]byte, error) { // Create the cipher. // NOTE: NewGCM only returns an error if twofishCipher.BlockSize != 16. aead, _ := cipher.NewGCM(key.NewCipher()) // Check for a nonce. if len(ct) < aead.NonceSize() { return nil, ErrInsufficientLen } // Decrypt the data. return aead.Open(nil, ct[:aead.NonceSize()], ct[aead.NonceSize():], nil) } // NewWriter returns a writer that encrypts or decrypts its input stream. func (key TwofishKey) NewWriter(w io.Writer) io.Writer { // OK to use a zero IV if the key is unique for each ciphertext. iv := make([]byte, twofish.BlockSize) stream := cipher.NewOFB(key.NewCipher(), iv) return &cipher.StreamWriter{S: stream, W: w} } // NewReader returns a reader that encrypts or decrypts its input stream. func (key TwofishKey) NewReader(r io.Reader) io.Reader { // OK to use a zero IV if the key is unique for each ciphertext. iv := make([]byte, twofish.BlockSize) stream := cipher.NewOFB(key.NewCipher(), iv) return &cipher.StreamReader{S: stream, R: r} } func (c Ciphertext) MarshalJSON() ([]byte, error) { return json.Marshal([]byte(c)) } func (c *Ciphertext) UnmarshalJSON(b []byte) error { var umarB []byte err := json.Unmarshal(b, &umarB) if err != nil { return err } *c = Ciphertext(umarB) return nil } Sia-1.3.0/crypto/encrypt_test.go000066400000000000000000000146271313565667000166470ustar00rootroot00000000000000package crypto import ( "bytes" "compress/gzip" "testing" "github.com/NebulousLabs/fastrand" ) // TestTwofishEncryption checks that encryption and decryption works correctly. func TestTwofishEncryption(t *testing.T) { // Get a key for encryption. key := GenerateTwofishKey() // Encrypt and decrypt a zero plaintext, and compare the decrypted to the // original. plaintext := make([]byte, 600) ciphertext := key.EncryptBytes(plaintext) decryptedPlaintext, err := key.DecryptBytes(ciphertext) if err != nil { t.Fatal(err) } if !bytes.Equal(plaintext, decryptedPlaintext) { t.Fatal("Encrypted and decrypted zero plaintext do not match") } // Try again with a nonzero plaintext. plaintext = fastrand.Bytes(600) ciphertext = key.EncryptBytes(plaintext) decryptedPlaintext, err = key.DecryptBytes(ciphertext) if err != nil { t.Fatal(err) } if !bytes.Equal(plaintext, decryptedPlaintext) { t.Fatal("Encrypted and decrypted zero plaintext do not match") } // Try to decrypt using a different key key2 := GenerateTwofishKey() _, err = key2.DecryptBytes(ciphertext) if err == nil { t.Fatal("Expecting failed authentication err", err) } // Try to decrypt using bad ciphertexts. ciphertext[0]++ _, err = key.DecryptBytes(ciphertext) if err == nil { t.Fatal("Expecting failed authentication err", err) } _, err = key.DecryptBytes(ciphertext[:10]) if err != ErrInsufficientLen { t.Error("Expecting ErrInsufficientLen:", err) } // Try to trigger a panic or error with nil values. key.EncryptBytes(nil) _, err = key.DecryptBytes(nil) if err != ErrInsufficientLen { t.Error("Expecting ErrInsufficientLen:", err) } } // TestReaderWriter probes the NewReader and NewWriter methods of the key type. func TestReaderWriter(t *testing.T) { // Get a key for encryption. key := GenerateTwofishKey() // Generate plaintext. const plaintextSize = 600 plaintext := fastrand.Bytes(plaintextSize) // Create writer and encrypt plaintext. buf := new(bytes.Buffer) key.NewWriter(buf).Write(plaintext) // There should be no overhead present. if buf.Len() != plaintextSize { t.Fatalf("encryption introduced %v bytes of overhead", buf.Len()-plaintextSize) } // Create reader and decrypt ciphertext. var decrypted = make([]byte, plaintextSize) key.NewReader(buf).Read(decrypted) if !bytes.Equal(plaintext, decrypted) { t.Error("couldn't decrypt encrypted stream") } } // TestTwofishEntropy encrypts and then decrypts a zero plaintext, checking // that the ciphertext is high entropy. func TestTwofishEntropy(t *testing.T) { if testing.Short() { t.SkipNow() } // Encrypt a larger zero plaintext and make sure that the outcome is high // entropy. Entropy is measured by compressing the ciphertext with gzip. // 10 * 1000 bytes was chosen to minimize the impact of gzip overhead. const cipherSize = 10e3 key := GenerateTwofishKey() plaintext := make([]byte, cipherSize) ciphertext := key.EncryptBytes(plaintext) // Gzip the ciphertext var b bytes.Buffer zip := gzip.NewWriter(&b) _, err := zip.Write(ciphertext) if err != nil { t.Fatal(err) } zip.Close() if b.Len() < cipherSize { t.Error("supposedly high entropy ciphertext has been compressed!") } } // TestUnitCiphertextUnmarshalInvalidJSON tests that Ciphertext.UnmarshalJSON // correctly fails on invalid JSON marshalled Ciphertext. func TestUnitCiphertextUnmarshalInvalidJSON(t *testing.T) { // Test unmarshalling invalid JSON. invalidJSONBytes := [][]byte{ nil, {}, []byte("\""), } for _, jsonBytes := range invalidJSONBytes { var ct Ciphertext err := ct.UnmarshalJSON(jsonBytes) if err == nil { t.Errorf("expected unmarshall to fail on the invalid JSON: %q\n", jsonBytes) } } } // TestCiphertextMarshalling tests that marshalling Ciphertexts to JSON results // in the expected JSON. Also tests that marshalling that JSON back to // Ciphertext results in the original Ciphertext. func TestCiphertextMarshalling(t *testing.T) { // Ciphertexts and corresponding JSONs to test marshalling and // unmarshalling. ciphertextMarshallingTests := []struct { ct Ciphertext jsonBytes []byte }{ {ct: Ciphertext(nil), jsonBytes: []byte("null")}, {ct: Ciphertext(""), jsonBytes: []byte(`""`)}, {ct: Ciphertext("a ciphertext"), jsonBytes: []byte(`"YSBjaXBoZXJ0ZXh0"`) /* base64 encoding of the Ciphertext */}, } for _, test := range ciphertextMarshallingTests { expectedCt := test.ct expectedJSONBytes := test.jsonBytes // Create a copy of expectedCt so Unmarshalling does not modify it, as // we need it later for comparison. var ct Ciphertext if expectedCt == nil { ct = nil } else { ct = make(Ciphertext, len(expectedCt)) copy(ct, expectedCt) } // Marshal Ciphertext to JSON. jsonBytes, err := ct.MarshalJSON() if err != nil { t.Fatal(err) } if !bytes.Equal(jsonBytes, expectedJSONBytes) { // Use %#v instead of %v because %v prints Ciphertexts constructed // with nil and []byte{} identically. t.Fatalf("Ciphertext %#v marshalled incorrectly: expected %q, got %q\n", ct, expectedJSONBytes, jsonBytes) } // Unmarshal back to Ciphertext. err = ct.UnmarshalJSON(jsonBytes) if err != nil { t.Fatal(err) } // Compare resulting Ciphertext with expected Ciphertext. if expectedCt == nil && ct != nil || expectedCt != nil && ct == nil || !bytes.Equal(expectedCt, ct) { // Use %#v instead of %v because %v prints Ciphertexts constructed // with nil and []byte{} identically. t.Errorf("Ciphertext %#v unmarshalled incorrectly: got %#v\n", expectedCt, ct) } } } // TestTwofishNewCipherAssumption tests that the length of a TwofishKey is 16, // 24, or 32 as these are the only cases where twofish.NewCipher(key[:]) // doesn't return an error. func TestTwofishNewCipherAssumption(t *testing.T) { // Generate key. key := GenerateTwofishKey() // Test key length. keyLen := len(key) if keyLen != 16 && keyLen != 24 && keyLen != 32 { t.Errorf("TwofishKey must have length 16, 24, or 32, but generated key has length %d\n", keyLen) } } // TestCipherNewGCMAssumption tests that the BlockSize of a cipher block is 16, // as this is the only case where cipher.NewGCM(block) doesn't return an error. func TestCipherNewGCMAssumption(t *testing.T) { // Generate a key and then cipher block from key. key := GenerateTwofishKey() // Test block size. block := key.NewCipher() if block.BlockSize() != 16 { t.Errorf("cipher must have BlockSize 16, but generated cipher has BlockSize %d\n", block.BlockSize()) } } Sia-1.3.0/crypto/hash.go000066400000000000000000000057031313565667000150420ustar00rootroot00000000000000package crypto // hash.go supplies a few geneeral hashing functions, using the hashing // algorithm blake2b. Because changing the hashing algorithm for Sia has much // stronger implications than changing any of the other algorithms, blake2b is // the only supported algorithm. Sia is not really flexible enough to support // multiple. import ( "bytes" "encoding/hex" "encoding/json" "errors" "hash" "github.com/NebulousLabs/Sia/encoding" "golang.org/x/crypto/blake2b" ) const ( HashSize = 32 ) type ( Hash [HashSize]byte // HashSlice is used for sorting HashSlice []Hash ) var ( ErrHashWrongLen = errors.New("encoded value has the wrong length to be a hash") ) // NewHash returns a blake2b 256bit hasher. func NewHash() hash.Hash { h, _ := blake2b.New256(nil) // cannot fail with nil argument return h } // HashAll takes a set of objects as input, encodes them all using the encoding // package, and then hashes the result. func HashAll(objs ...interface{}) (hash Hash) { h := NewHash() enc := encoding.NewEncoder(h) for _, obj := range objs { enc.Encode(obj) } h.Sum(hash[:0]) return } // HashBytes takes a byte slice and returns the result. func HashBytes(data []byte) Hash { return Hash(blake2b.Sum256(data)) } // HashObject takes an object as input, encodes it using the encoding package, // and then hashes the result. func HashObject(obj interface{}) (hash Hash) { h := NewHash() encoding.NewEncoder(h).Encode(obj) h.Sum(hash[:0]) return } // These functions implement sort.Interface, allowing hashes to be sorted. func (hs HashSlice) Len() int { return len(hs) } func (hs HashSlice) Less(i, j int) bool { return bytes.Compare(hs[i][:], hs[j][:]) < 0 } func (hs HashSlice) Swap(i, j int) { hs[i], hs[j] = hs[j], hs[i] } // LoadString takes a string, parses the hash value of the string, and sets the // value of the hash equal to the hash value of the string. func (h *Hash) LoadString(s string) error { // *2 because there are 2 hex characters per byte. if len(s) != HashSize*2 { return ErrHashWrongLen } hBytes, err := hex.DecodeString(s) if err != nil { return errors.New("could not unmarshal hash: " + err.Error()) } copy(h[:], hBytes) return nil } // MarshalJSON marshales a hash as a hex string. func (h Hash) MarshalJSON() ([]byte, error) { return json.Marshal(h.String()) } // String prints the hash in hex. func (h Hash) String() string { return hex.EncodeToString(h[:]) } // UnmarshalJSON decodes the json hex string of the hash. func (h *Hash) UnmarshalJSON(b []byte) error { // *2 because there are 2 hex characters per byte. // +2 because the encoded JSON string has a `"` added at the beginning and end. if len(b) != HashSize*2+2 { return ErrHashWrongLen } // b[1 : len(b)-1] cuts off the leading and trailing `"` in the JSON string. hBytes, err := hex.DecodeString(string(b[1 : len(b)-1])) if err != nil { return errors.New("could not unmarshal hash: " + err.Error()) } copy(h[:], hBytes) return nil } Sia-1.3.0/crypto/hash_test.go000066400000000000000000000103251313565667000160750ustar00rootroot00000000000000package crypto import ( "bytes" "encoding/json" "sort" "strings" "testing" "github.com/NebulousLabs/fastrand" ) type ( // TestObject is a struct that's used for testing HashAll and HashObject. The // fields have to be exported so the encoder can read them. TestObject struct { A int B byte C bool D string } ) // TestHashing uses each of the functions in hash.go and verifies that the // results are as expected. func TestHashing(t *testing.T) { // Create a test object. to := TestObject{ A: 12345, B: 5, C: true, D: "testing", } // Call HashObject on the object. var emptyHash Hash h0 := HashObject(to) if h0 == emptyHash { t.Error("HashObject returned the zero hash!") } // Call HashAll on the test object and some other fields. h1 := HashAll( int(122), byte(115), string("test"), to, ) if h1 == emptyHash { t.Error("HashObject returned the zero hash!") } // Call HashBytes on a random byte slice. h2 := HashBytes(fastrand.Bytes(435)) if h2 == emptyHash { t.Error("HashObject returned the zero hash!") } } // TestHashSorting takes a set of hashses and checks that they can be sorted. func TestHashSorting(t *testing.T) { // Created an unsorted list of hashes. hashes := make([]Hash, 5) hashes[0][0] = 12 hashes[1][0] = 7 hashes[2][0] = 13 hashes[3][0] = 14 hashes[4][0] = 1 // Sort the hashes. sort.Sort(HashSlice(hashes)) if hashes[0][0] != 1 { t.Error("bad sort") } if hashes[1][0] != 7 { t.Error("bad sort") } if hashes[2][0] != 12 { t.Error("bad sort") } if hashes[3][0] != 13 { t.Error("bad sort") } if hashes[4][0] != 14 { t.Error("bad sort") } } // TestUnitHashMarshalJSON tests that Hashes are correctly marshalled to JSON. func TestUnitHashMarshalJSON(t *testing.T) { h := HashObject("an object") jsonBytes, err := h.MarshalJSON() if err != nil { t.Fatal(err) } if !bytes.Equal(jsonBytes, []byte(`"`+h.String()+`"`)) { t.Errorf("hash %s encoded incorrectly: got %s\n", h, jsonBytes) } } // TestUnitHashUnmarshalJSON tests that unmarshalling invalid JSON will result // in an error. func TestUnitHashUnmarshalJSON(t *testing.T) { // Test unmarshalling invalid data. invalidJSONBytes := [][]byte{ // Invalid JSON. nil, {}, []byte("\""), // JSON of wrong length. []byte(""), []byte(`"` + strings.Repeat("a", HashSize*2-1) + `"`), []byte(`"` + strings.Repeat("a", HashSize*2+1) + `"`), // JSON of right length but invalid Hashes. []byte(`"` + strings.Repeat("z", HashSize*2) + `"`), []byte(`"` + strings.Repeat(".", HashSize*2) + `"`), []byte(`"` + strings.Repeat("\n", HashSize*2) + `"`), } for _, jsonBytes := range invalidJSONBytes { var h Hash err := h.UnmarshalJSON(jsonBytes) if err == nil { t.Errorf("expected unmarshall to fail on the invalid JSON: %q\n", jsonBytes) } } // Test unmarshalling valid data. expectedH := HashObject("an object") jsonBytes := []byte(`"` + expectedH.String() + `"`) var h Hash err := h.UnmarshalJSON(jsonBytes) if err != nil { t.Fatal(err) } if !bytes.Equal(h[:], expectedH[:]) { t.Errorf("Hash %s marshalled incorrectly: got %s\n", expectedH, h) } } // TestHashMarshalling checks that the marshalling of the hash type works as // expected. func TestHashMarshalling(t *testing.T) { h := HashObject("an object") hBytes, err := json.Marshal(h) if err != nil { t.Fatal(err) } var uMarH Hash err = uMarH.UnmarshalJSON(hBytes) if err != nil { t.Fatal(err) } if h != uMarH { t.Error("encoded and decoded hash do not match!") } } // TestHashLoadString checks that the LoadString method of the hash function is // working properly. func TestHashLoadString(t *testing.T) { h1 := Hash{} h2 := HashObject("tame") h1e := h1.String() h2e := h2.String() var h1d, h2d Hash err := h1d.LoadString(h1e) if err != nil { t.Fatal(err) } err = h2d.LoadString(h2e) if err != nil { t.Fatal(err) } if h1d != h1 { t.Error("decoding h1 failed") } if h2d != h2 { t.Error("decoding h2 failed") } // Try some bogus strings. h1e = h1e + "a" err = h1.LoadString(h1e) if err == nil { t.Fatal("expecting error when decoding hash of too large length") } h1e = h1e[:60] err = h1.LoadString(h1e) if err == nil { t.Fatal("expecting error when decoding hash of too small length") } } Sia-1.3.0/crypto/merkle.go000066400000000000000000000107721313565667000154000ustar00rootroot00000000000000package crypto import ( "bytes" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/merkletree" ) const ( // SegmentSize is the chunk size that is used when taking the Merkle root // of a file. 64 is chosen because bandwidth is scarce and it optimizes for // the smallest possible storage proofs. Using a larger base, even 256 // bytes, would result in substantially faster hashing, but the bandwidth // tradeoff was deemed to be more important, as blockchain space is scarce. SegmentSize = 64 ) // MerkleTree wraps merkletree.Tree, changing some of the function definitions // to assume sia-specific constants and return sia-specific types. type MerkleTree struct { merkletree.Tree } // NewTree returns a MerkleTree, which can be used for getting Merkle roots and // Merkle proofs on data. See merkletree.Tree for more details. func NewTree() *MerkleTree { return &MerkleTree{*merkletree.New(NewHash())} } // PushObject encodes and adds the hash of the encoded object to the tree as a // leaf. func (t *MerkleTree) PushObject(obj interface{}) { t.Push(encoding.Marshal(obj)) } // Root is a redefinition of merkletree.Tree.Root, returning a Hash instead of // a []byte. func (t *MerkleTree) Root() (h Hash) { copy(h[:], t.Tree.Root()) return } // CachedMerkleTree wraps merkletree.CachedTree, changing some of the function // definitions to assume sia-specific constants and return sia-specific types. type CachedMerkleTree struct { merkletree.CachedTree } // NewCachedTree returns a CachedMerkleTree, which can be used for getting // Merkle roots and proofs from data that has cached subroots. See // merkletree.CachedTree for more details. func NewCachedTree(height uint64) *CachedMerkleTree { return &CachedMerkleTree{*merkletree.NewCachedTree(NewHash(), height)} } // Prove is a redefinition of merkletree.CachedTree.Prove, so that Sia-specific // types are used instead of the generic types used by the parent package. The // base is not a return value because the base is used as input. func (ct *CachedMerkleTree) Prove(base []byte, cachedHashSet []Hash) []Hash { // Turn the input in to a proof set that will be recognized by the high // level tree. cachedProofSet := make([][]byte, len(cachedHashSet)+1) cachedProofSet[0] = base for i := range cachedHashSet { cachedProofSet[i+1] = cachedHashSet[i][:] } _, proofSet, _, _ := ct.CachedTree.Prove(cachedProofSet) // convert proofSet to base and hashSet hashSet := make([]Hash, len(proofSet)-1) for i, proof := range proofSet[1:] { copy(hashSet[i][:], proof) } return hashSet } // Push is a redefinition of merkletree.CachedTree.Push, with the added type // safety of only accepting a hash. func (ct *CachedMerkleTree) Push(h Hash) { ct.CachedTree.Push(h[:]) } // Root is a redefinition of merkletree.CachedTree.Root, returning a Hash // instead of a []byte. func (ct *CachedMerkleTree) Root() (h Hash) { copy(h[:], ct.CachedTree.Root()) return } // CalculateLeaves calculates the number of leaves that would be pushed from // data of size 'dataSize'. func CalculateLeaves(dataSize uint64) uint64 { numSegments := dataSize / SegmentSize if dataSize == 0 || dataSize%SegmentSize != 0 { numSegments++ } return numSegments } // MerkleRoot returns the Merkle root of the input data. func MerkleRoot(b []byte) Hash { t := NewTree() buf := bytes.NewBuffer(b) for buf.Len() > 0 { t.Push(buf.Next(SegmentSize)) } return t.Root() } // MerkleProof builds a Merkle proof that the data at segment 'proofIndex' is a // part of the Merkle root formed by 'b'. func MerkleProof(b []byte, proofIndex uint64) (base []byte, hashSet []Hash) { // Create the tree. t := NewTree() t.SetIndex(proofIndex) // Fill the tree. buf := bytes.NewBuffer(b) for buf.Len() > 0 { t.Push(buf.Next(SegmentSize)) } // Get the proof and convert it to a base + hash set. _, proof, _, _ := t.Prove() if len(proof) == 0 { // There's no proof, because there's no data. Return blank values. return nil, nil } base = proof[0] hashSet = make([]Hash, len(proof)-1) for i, p := range proof[1:] { copy(hashSet[i][:], p) } return base, hashSet } // VerifySegment will verify that a segment, given the proof, is a part of a // Merkle root. func VerifySegment(base []byte, hashSet []Hash, numSegments, proofIndex uint64, root Hash) bool { // convert base and hashSet to proofSet proofSet := make([][]byte, len(hashSet)+1) proofSet[0] = base for i := range hashSet { proofSet[i+1] = hashSet[i][:] } return merkletree.VerifyProof(NewHash(), root[:], proofSet, proofIndex, numSegments) } Sia-1.3.0/crypto/merkle_test.go000066400000000000000000000110501313565667000164250ustar00rootroot00000000000000package crypto import ( "testing" "github.com/NebulousLabs/fastrand" ) // TestTreeBuilder builds a tree and gets the merkle root. func TestTreeBuilder(t *testing.T) { tree := NewTree() tree.PushObject("a") tree.PushObject("b") _ = tree.Root() // Correctness is assumed, as it's tested by the merkletree package. This // function is really for code coverage. } // TestCalculateLeaves probes the CalculateLeaves function. func TestCalculateLeaves(t *testing.T) { tests := []struct { size, expSegs uint64 }{ {0, 1}, {63, 1}, {64, 1}, {65, 2}, {127, 2}, {128, 2}, {129, 3}, } for i, test := range tests { if segs := CalculateLeaves(test.size); segs != test.expSegs { t.Errorf("miscalculation for test %v: expected %v, got %v", i, test.expSegs, segs) } } } // TestStorageProof builds a storage proof and checks that it verifies // correctly. func TestStorageProof(t *testing.T) { // Generate proof data. numSegments := uint64(7) data := fastrand.Bytes(int(numSegments * SegmentSize)) rootHash := MerkleRoot(data) // Create and verify proofs for all indices. for i := uint64(0); i < numSegments; i++ { baseSegment, hashSet := MerkleProof(data, i) if !VerifySegment(baseSegment, hashSet, numSegments, i, rootHash) { t.Error("Proof", i, "did not pass verification") } } // Try an incorrect proof. baseSegment, hashSet := MerkleProof(data, 3) if VerifySegment(baseSegment, hashSet, numSegments, 4, rootHash) { t.Error("Verified a bad proof") } } // TestNonMultipleNumberOfSegmentsStorageProof builds a storage proof that has // a last leaf of size less than SegmentSize. func TestNonMultipleLeafSizeStorageProof(t *testing.T) { // Generate proof data. data := fastrand.Bytes((2 * SegmentSize) + 10) rootHash := MerkleRoot(data) // Create and verify a proof for the last index. baseSegment, hashSet := MerkleProof(data, 2) if !VerifySegment(baseSegment, hashSet, 3, 2, rootHash) { t.Error("padded segment proof failed") } } // TestCachedTree tests the cached tree functions of the package. func TestCachedTree(t *testing.T) { if testing.Short() { t.SkipNow() } // Build a cached tree out of 4 subtrees, each subtree of height 2 (4 // elements). tree1Bytes := fastrand.Bytes(SegmentSize * 4) tree2Bytes := fastrand.Bytes(SegmentSize * 4) tree3Bytes := fastrand.Bytes(SegmentSize * 4) tree4Bytes := fastrand.Bytes(SegmentSize * 4) tree1Root := MerkleRoot(tree1Bytes) tree2Root := MerkleRoot(tree2Bytes) tree3Root := MerkleRoot(tree3Bytes) tree4Root := MerkleRoot(tree4Bytes) fullRoot := MerkleRoot(append(tree1Bytes, append(tree2Bytes, append(tree3Bytes, tree4Bytes...)...)...)) // Get a cached proof for index 0. base, cachedHashSet := MerkleProof(tree1Bytes, 0) if !VerifySegment(base, cachedHashSet, 4, 0, tree1Root) { t.Fatal("the proof for the subtree was invalid") } ct := NewCachedTree(2) ct.SetIndex(0) ct.Push(tree1Root) ct.Push(tree2Root) ct.Push(tree3Root) ct.Push(tree4Root) hashSet := ct.Prove(base, cachedHashSet) if !VerifySegment(base, hashSet, 4*4, 0, fullRoot) { t.Fatal("cached proof construction appears unsuccessful") } if ct.Root() != fullRoot { t.Fatal("cached Merkle root is not matching the full Merkle root") } // Get a cached proof for index 6. base, cachedHashSet = MerkleProof(tree2Bytes, 2) if !VerifySegment(base, cachedHashSet, 4, 2, tree2Root) { t.Fatal("the proof for the subtree was invalid") } ct = NewCachedTree(2) ct.SetIndex(6) ct.Push(tree1Root) ct.Push(tree2Root) ct.Push(tree3Root) ct.Push(tree4Root) hashSet = ct.Prove(base, cachedHashSet) if !VerifySegment(base, hashSet, 4*4, 6, fullRoot) { t.Fatal("cached proof construction appears unsuccessful") } if ct.Root() != fullRoot { t.Fatal("cached Merkle root is not matching the full Merkle root") } } // TestMerkleTreeOddDataSize checks that MerkleRoot and MerkleProof still // function correctly if you provide data which does not have a size evenly // divisible by SegmentSize. func TestOddDataSize(t *testing.T) { if testing.Short() { t.SkipNow() } // Create some random data that's not evenly padded. for i := 0; i < 25; i++ { randFullSegments := fastrand.Intn(65) randOverflow := fastrand.Intn(63) + 1 randProofIndex := fastrand.Intn(randFullSegments + 1) data := fastrand.Bytes(SegmentSize*randFullSegments + randOverflow) root := MerkleRoot(data) base, hashSet := MerkleProof(data, uint64(randProofIndex)) if !VerifySegment(base, hashSet, uint64(randFullSegments)+1, uint64(randProofIndex), root) { t.Error("Padded data proof failed for", randFullSegments, randOverflow, randProofIndex) } } } Sia-1.3.0/crypto/signatures.go000066400000000000000000000063411313565667000163020ustar00rootroot00000000000000package crypto import ( "bytes" "errors" "io" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/fastrand" "golang.org/x/crypto/ed25519" ) const ( // EntropySize defines the amount of entropy necessary to do secure // cryptographic operations, in bytes. EntropySize = 32 // PublicKeySize defines the size of public keys in bytes. PublicKeySize = ed25519.PublicKeySize // SecretKeySize defines the size of secret keys in bytes. SecretKeySize = ed25519.PrivateKeySize // SignatureSize defines the size of signatures in bytes. SignatureSize = ed25519.SignatureSize ) var ( // ErrInvalidSignature is returned if a signature is provided that does not // match the data and public key. ErrInvalidSignature = errors.New("invalid signature") ) type ( // PublicKey is an object that can be used to verify signatures. PublicKey [PublicKeySize]byte // SecretKey can be used to sign data for the corresponding public key. SecretKey [SecretKeySize]byte // Signature proves that data was signed by the owner of a particular // public key's corresponding secret key. Signature [SignatureSize]byte ) // PublicKey returns the public key that corresponds to a secret key. func (sk SecretKey) PublicKey() (pk PublicKey) { copy(pk[:], sk[SecretKeySize-PublicKeySize:]) return } // GenerateKeyPair creates a public-secret keypair that can be used to sign and verify // messages. func GenerateKeyPair() (sk SecretKey, pk PublicKey) { // no error possible when using fastrand.Reader epk, esk, _ := ed25519.GenerateKey(fastrand.Reader) copy(sk[:], esk) copy(pk[:], epk) return } // GenerateKeyPairDeterministic generates keys deterministically using the input // entropy. The input entropy must be 32 bytes in length. func GenerateKeyPairDeterministic(entropy [EntropySize]byte) (sk SecretKey, pk PublicKey) { // no error possible when using bytes.Reader epk, esk, _ := ed25519.GenerateKey(bytes.NewReader(entropy[:])) copy(sk[:], esk) copy(pk[:], epk) return } // ReadSignedObject reads a length-prefixed object prefixed by its signature, // and verifies the signature. func ReadSignedObject(r io.Reader, obj interface{}, maxLen uint64, pk PublicKey) error { // read the signature var sig Signature err := encoding.NewDecoder(r).Decode(&sig) if err != nil { return err } // read the encoded object encObj, err := encoding.ReadPrefix(r, maxLen) if err != nil { return err } // verify the signature if err := VerifyHash(HashBytes(encObj), pk, sig); err != nil { return err } // decode the object return encoding.Unmarshal(encObj, obj) } // SignHash signs a message using a secret key. func SignHash(data Hash, sk SecretKey) (sig Signature) { copy(sig[:], ed25519.Sign(sk[:], data[:])) return } // VerifyHash uses a public key and input data to verify a signature. func VerifyHash(data Hash, pk PublicKey, sig Signature) error { verifies := ed25519.Verify(pk[:], data[:], sig[:]) if !verifies { return ErrInvalidSignature } return nil } // WriteSignedObject writes a length-prefixed object prefixed by its signature. func WriteSignedObject(w io.Writer, obj interface{}, sk SecretKey) error { objBytes := encoding.Marshal(obj) sig := SignHash(HashBytes(objBytes), sk) return encoding.NewEncoder(w).EncodeAll(sig, objBytes) } Sia-1.3.0/crypto/signatures_test.go000066400000000000000000000116261313565667000173430ustar00rootroot00000000000000package crypto import ( "bytes" "testing" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/fastrand" ) // TestUnitSignatureEncoding creates and encodes a public key, and verifies // that it decodes correctly, does the same with a signature. func TestUnitSignatureEncoding(t *testing.T) { // Create a dummy key pair. var sk SecretKey sk[0] = 4 sk[32] = 5 pk := sk.PublicKey() // Marshal and unmarshal the public key. marshalledPK := encoding.Marshal(pk) var unmarshalledPK PublicKey err := encoding.Unmarshal(marshalledPK, &unmarshalledPK) if err != nil { t.Fatal(err) } // Test the public keys for equality. if pk != unmarshalledPK { t.Error("pubkey not the same after marshalling and unmarshalling") } // Create a signature using the secret key. var signedData Hash fastrand.Read(signedData[:]) sig := SignHash(signedData, sk) // Marshal and unmarshal the signature. marshalledSig := encoding.Marshal(sig) var unmarshalledSig Signature err = encoding.Unmarshal(marshalledSig, &unmarshalledSig) if err != nil { t.Fatal(err) } // Test signatures for equality. if sig != unmarshalledSig { t.Error("signature not same after marshalling and unmarshalling") } } // TestUnitSigning creates a bunch of keypairs and signs random data with each of // them. func TestUnitSigning(t *testing.T) { if testing.Short() { t.SkipNow() } // Try a bunch of signatures because at one point there was a library that // worked around 98% of the time. Tests would usually pass, but 200 // iterations would normally cause a failure. iterations := 200 for i := 0; i < iterations; i++ { // Create dummy key pair. sk, pk := GenerateKeyPair() // Generate and sign the data. var randData Hash fastrand.Read(randData[:]) sig := SignHash(randData, sk) // Verify the signature. err := VerifyHash(randData, pk, sig) if err != nil { t.Fatal(err) } // Attempt to verify after the data has been altered. randData[0] += 1 err = VerifyHash(randData, pk, sig) if err != ErrInvalidSignature { t.Fatal(err) } // Restore the data and make sure the signature is valid again. randData[0] -= 1 err = VerifyHash(randData, pk, sig) if err != nil { t.Fatal(err) } // Attempt to verify after the signature has been altered. sig[0] += 1 err = VerifyHash(randData, pk, sig) if err != ErrInvalidSignature { t.Fatal(err) } } } // TestIntegrationSigKeyGenerate is an integration test checking that // GenerateKeyPair and GenerateKeyPairDeterminisitc accurately create keys. func TestIntegrationSigKeyGeneration(t *testing.T) { if testing.Short() { t.SkipNow() } message := HashBytes([]byte{'m', 's', 'g'}) // Create a random key and use it. randSecKey, randPubKey := GenerateKeyPair() sig := SignHash(message, randSecKey) err := VerifyHash(message, randPubKey, sig) if err != nil { t.Error(err) } // Corrupt the signature sig[0]++ err = VerifyHash(message, randPubKey, sig) if err == nil { t.Error("corruption failed") } // Create a deterministic key and use it. var detEntropy [EntropySize]byte detEntropy[0] = 35 detSecKey, detPubKey := GenerateKeyPairDeterministic(detEntropy) sig = SignHash(message, detSecKey) err = VerifyHash(message, detPubKey, sig) if err != nil { t.Error(err) } // Corrupt the signature sig[0]++ err = VerifyHash(message, detPubKey, sig) if err == nil { t.Error("corruption failed") } } // TestReadWriteSignedObject tests the ReadSignObject and WriteSignedObject // functions, which are inverses of each other. func TestReadWriteSignedObject(t *testing.T) { sk, pk := GenerateKeyPair() // Write signed object into buffer. b := new(bytes.Buffer) err := WriteSignedObject(b, "foo", sk) if err != nil { t.Fatal(err) } // Keep a copy of b's bytes. buf := b.Bytes() // Read and verify object. var read string err = ReadSignedObject(b, &read, 11, pk) if err != nil { t.Fatal(err) } if read != "foo" { t.Fatal("encode/decode mismatch: expected 'foo', got", []byte(read)) } // Check that maxlen is being respected. b = bytes.NewBuffer(buf) // reset b err = ReadSignedObject(b, &read, 10, pk) if err == nil || err.Error() != "length 11 exceeds maxLen of 10" { t.Fatal("expected length error, got", err) } // Disrupt the decoding to get coverage on the failure branch. err = ReadSignedObject(b, &read, 11, pk) if err == nil || err.Error() != "could not decode type crypto.Signature: unexpected EOF" { t.Fatal(err) } // Try with an invalid signature. buf[0]++ // alter the first byte of the signature, invalidating it. b = bytes.NewBuffer(buf) // reset b err = ReadSignedObject(b, &read, 11, pk) if err != ErrInvalidSignature { t.Fatal(err) } } // TestUnitPublicKey tests the PublicKey method func TestUnitPublicKey(t *testing.T) { for i := 0; i < 1000; i++ { sk, pk := GenerateKeyPair() if sk.PublicKey() != pk { t.Error("PublicKey does not match actual public key:", pk, sk.PublicKey()) } } } Sia-1.3.0/doc/000077500000000000000000000000001313565667000130105ustar00rootroot00000000000000Sia-1.3.0/doc/API.md000066400000000000000000001374571313565667000137640ustar00rootroot00000000000000Siad API ======== Sia uses semantic versioning and is backwards compatible to version v1.0.0. API calls return either JSON or no content. Success is indicated by 2xx HTTP status codes, while errors are indicated by 4xx and 5xx HTTP status codes. If an endpoint does not specify its expected status code refer to [#standard-responses](#standard-responses). There may be functional API calls which are not documented. These are not guaranteed to be supported beyond the current release, and should not be used in production. Notes: - Requests must set their User-Agent string to contain the substring "Sia-Agent". - By default, siad listens on "localhost:9980". This can be changed using the `--api-addr` flag when running siad. - **Do not bind or expose the API to a non-loopback address unless you are aware of the possible dangers.** Example GET curl call: ``` curl -A "Sia-Agent" "localhost:9980/wallet/transactions?startheight=1&endheight=250" ``` Example POST curl call: ``` curl -A "Sia-Agent" --data "amount=123&destination=abcd" "localhost:9980/wallet/siacoins" ``` Standard responses ------------------ #### Success The standard response indicating the request was successfully processed is HTTP status code `204 No Content`. If the request was successfully processed and the server responded with JSON the HTTP status code is `200 OK`. Specific endpoints may specify other 2xx status codes on success. #### Error The standard error response indicating the request failed for any reason, is a 4xx or 5xx HTTP status code with an error JSON object describing the error. ```javascript { "message": String // There may be additional fields depending on the specific error. } ``` Authentication -------------- API authentication can be enabled with the `--authenticate-api` siad flag. Authentication is HTTP Basic Authentication as described in [RFC 2617](https://tools.ietf.org/html/rfc2617), however, the username is the empty string. The flag does not enforce authentication on all API endpoints. Only endpoints that expose sensitive information or modify state require authentication. For example, if the API password is "foobar" the request header should include ``` Authorization: Basic OmZvb2Jhcg== ``` Units ----- Unless otherwise specified, all parameters should be specified in their smallest possible unit. For example, size should always be specified in bytes and Siacoins should be specified in hastings. JSON values returned by the API will also use the smallest possible unit, unless otherwise specified. If a numbers is returned as a string in JSON, it should be treated as an arbitrary-precision number (bignum), and it should be parsed with your language's corresponding bignum library. Currency values are the most common example where this is necessary. Table of contents ----------------- - [Daemon](#daemon) - [Consensus](#consensus) - [Gateway](#gateway) - [Host](#host) - [Host DB](#host-db) - [Miner](#miner) - [Renter](#renter) - [Transaction Pool](#transaction-pool) - [Wallet](#wallet) Daemon ------ | Route | HTTP verb | | ----------------------------------------- | --------- | | [/daemon/constants](#daemonconstants-get) | GET | | [/daemon/stop](#daemonstop-get) | GET | | [/daemon/version](#daemonversion-get) | GET | For examples and detailed descriptions of request and response parameters, refer to [Daemon.md](/doc/api/Daemon.md). #### /daemon/constants [GET] returns the set of constants in use. ###### JSON Response [(with comments)](/doc/api/Daemon.md#json-response) ```javascript { "blockfrequency": 600, // seconds per block "blocksizelimit": 2000000, // bytes "extremefuturethreshold": 10800, // seconds "futurethreshold": 10800, // seconds "genesistimestamp": 1257894000, // Unix time "maturitydelay": 144, // blocks "mediantimestampwindow": 11, // blocks "siafundcount": "10000", "siafundportion": "39/1000", "targetwindow": 1000, // blocks "initialcoinbase": 300000, // Siacoins (see note in Daemon.md) "minimumcoinbase": 30000, // Siacoins (see note in Daemon.md) "roottarget": [0,0,0,0,32,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], "rootdepth": [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255], "maxadjustmentup": "5/2", "maxadjustmentdown": "2/5", "siacoinprecision": "1000000000000000000000000" // hastings per siacoin } ``` #### /daemon/stop [GET] cleanly shuts down the daemon. May take a few seconds. ###### Response standard success or error response. See [#standard-responses](#standard-responses). #### /daemon/version [GET] returns the version of the Sia daemon currently running. ###### JSON Response [(with comments)](/doc/api/Daemon.md#json-response-1) ```javascript { "version": "1.0.0" } ``` Consensus --------- | Route | HTTP verb | | --------------------------------------------------------------------------- | --------- | | [/consensus](#consensus-get) | GET | | [/consensus/validate/transactionset](#consensusvalidatetransactionset-post) | POST | For examples and detailed descriptions of request and response parameters, refer to [Consensus.md](/doc/api/Consensus.md). #### /consensus [GET] returns information about the consensus set, such as the current block height. ###### JSON Response [(with comments)](/doc/api/Consensus.md#json-response) ```javascript { "synced": true, "height": 62248, "currentblock": "00000000000008a84884ba827bdc868a17ba9c14011de33ff763bd95779a9cf1", "target": [0,0,0,0,0,0,11,48,125,79,116,89,136,74,42,27,5,14,10,31,23,53,226,238,202,219,5,204,38,32,59,165], "difficulty": "1234" } ``` #### /consensus/validate/transactionset [POST] validates a set of transactions using the current utxo set. ###### Request Body Bytes Since transactions may be large, the transaction set is supplied in the POST body, encoded in JSON format. ###### Response standard success or error response. See [#standard-responses](#standard-responses). Gateway ------- | Route | HTTP verb | | ---------------------------------------------------------------------------------- | --------- | | [/gateway](#gateway-get-example) | GET | | [/gateway/connect/:___netaddress___](#gatewayconnectnetaddress-post-example) | POST | | [/gateway/disconnect/:___netaddress___](#gatewaydisconnectnetaddress-post-example) | POST | For examples and detailed descriptions of request and response parameters, refer to [Gateway.md](/doc/api/Gateway.md). #### /gateway [GET] [(example)](/doc/api/Gateway.md#gateway-info) returns information about the gateway, including the list of connected peers. ###### JSON Response [(with comments)](/doc/api/Gateway.md#json-response) ```javascript { "netaddress": String, "peers": []{ "netaddress": String, "version": String, "inbound": Boolean } } ``` #### /gateway/connect/:___netaddress___ [POST] [(example)](/doc/api/Gateway.md#connecting-to-a-peer) connects the gateway to a peer. The peer is added to the node list if it is not already present. The node list is the list of all nodes the gateway knows about, but is not necessarily connected to. ###### Path Parameters [(with comments)](/doc/api/Gateway.md#path-parameters) ``` :netaddress ``` ###### Response standard success or error response. See [#standard-responses](#standard-responses). #### /gateway/disconnect/:___netaddress___ [POST] [(example)](/doc/api/Gateway.md#disconnecting-from-a-peer) disconnects the gateway from a peer. The peer remains in the node list. ###### Path Parameters [(with comments)](/doc/api/Gateway.md#path-parameters-1) ``` :netaddress ``` ###### Response standard success or error response. See [#standard-responses](#standard-responses). Host ---- | Route | HTTP verb | | ------------------------------------------------------------------------------------------ | --------- | | [/host](#host-get) | GET | | [/host](#host-post) | POST | | [/host/announce](#hostannounce-post) | POST | | [/host/estimatescore](#hostestimatescore-get) | GET | | [/host/storage](#hoststorage-get) | GET | | [/host/storage/folders/add](#hoststoragefoldersadd-post) | POST | | [/host/storage/folders/remove](#hoststoragefoldersremove-post) | POST | | [/host/storage/folders/resize](#hoststoragefoldersresize-post) | POST | | [/host/storage/sectors/delete/:___merkleroot___](#hoststoragesectorsdeletemerkleroot-post) | POST | For examples and detailed descriptions of request and response parameters, refer to [Host.md](/doc/api/Host.md). #### /host [GET] fetches status information about the host. ###### JSON Response [(with comments)](/doc/api/Host.md#json-response) ```javascript { "externalsettings": { "acceptingcontracts": true, "maxdownloadbatchsize": 17825792, // bytes "maxduration": 25920, // blocks "maxrevisebatchsize": 17825792, // bytes "netaddress": "123.456.789.0:9982", "remainingstorage": 35000000000, // bytes "sectorsize": 4194304, // bytes "totalstorage": 35000000000, // bytes "unlockhash": "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789ab", "windowsize": 144, // blocks "collateral": "57870370370", // hastings / byte / block "maxcollateral": "100000000000000000000000000000", // hastings "contractprice": "30000000000000000000000000", // hastings "downloadbandwidthprice": "250000000000000", // hastings / byte "storageprice": "231481481481", // hastings / byte / block "uploadbandwidthprice": "100000000000000", // hastings / byte "revisionnumber": 0, "version": "1.0.0" }, "financialmetrics": { "contractcount": 2, "contractcompensation": "123", // hastings "potentialcontractcompensation": "123", // hastings "lockedstoragecollateral": "123", // hastings "lostrevenue": "123", // hastings "loststoragecollateral": "123", // hastings "potentialstoragerevenue": "123", // hastings "riskedstoragecollateral": "123", // hastings "storagerevenue": "123", // hastings "transactionfeeexpenses": "123", // hastings "downloadbandwidthrevenue": "123", // hastings "potentialdownloadbandwidthrevenue": "123", // hastings "potentialuploadbandwidthrevenue": "123", // hastings "uploadbandwidthrevenue": "123" // hastings }, "internalsettings": { "acceptingcontracts": true, "maxdownloadbatchsize": 17825792, // bytes "maxduration": 25920, // blocks "maxrevisebatchsize": 17825792, // bytes "netaddress": "123.456.789.0:9982", "windowsize": 144, // blocks "collateral": "57870370370", // hastings / byte / block "collateralbudget": "2000000000000000000000000000000", // hastings "maxcollateral": "100000000000000000000000000000", // hastings "mincontractprice": "30000000000000000000000000", // hastings "mindownloadbandwidthprice": "250000000000000", // hastings / byte "minstorageprice": "231481481481", // hastings / byte / block "minuploadbandwidthprice": "100000000000000" // hastings / byte }, "networkmetrics": { "downloadcalls": 0, "errorcalls": 1, "formcontractcalls": 2, "renewcalls": 3, "revisecalls": 4, "settingscalls": 5, "unrecognizedcalls": 6 }, "connectabilitystatus": "checking", "workingstatus": "checking" } ``` #### /host [POST] configures hosting parameters. All parameters are optional; unspecified parameters will be left unchanged. ###### Query String Parameters [(with comments)](/doc/api/Host.md#query-string-parameters) ``` acceptingcontracts // Optional, true / false maxdownloadbatchsize // Optional, bytes maxduration // Optional, blocks maxrevisebatchsize // Optional, bytes netaddress // Optional windowsize // Optional, blocks collateral // Optional, hastings / byte / block collateralbudget // Optional, hastings maxcollateral // Optional, hastings mincontractprice // Optional, hastings mindownloadbandwidthprice // Optional, hastings / byte minstorageprice // Optional, hastings / byte / block minuploadbandwidthprice // Optional, hastings / byte ``` ###### Response standard success or error response. See [#standard-responses](#standard-responses). #### /host/announce [POST] Announces the host to the network as a source of storage. Generally only needs to be called once. ###### Query String Parameters [(with comments)](/doc/api/Host.md#query-string-parameters-1) ``` netaddress string // Optional ``` ###### Response standard success or error response. See [#standard-responses](#standard-responses). #### /host/storage [GET] gets a list of folders tracked by the host's storage manager. ###### JSON Response [(with comments)](/doc/api/Host.md#json-response-1) ```javascript { "folders": [ { "path": "/home/foo/bar", "capacity": 50000000000, // bytes "capacityremaining": 100000, // bytes "failedreads": 0, "failedwrites": 1, "successfulreads": 2, "successfulwrites": 3 } ] } ``` #### /host/storage/folders/add [POST] adds a storage folder to the manager. The manager may not check that there is enough space available on-disk to support as much storage as requested ###### Query String Parameters [(with comments)](/doc/api/Host.md#query-string-parameters-2) ``` path // Required size // bytes, Required ``` ###### Response standard success or error response. See [#standard-responses](#standard-responses). #### /host/storage/folders/remove [POST] remove a storage folder from the manager. All storage on the folder will be moved to other storage folders, meaning that no data will be lost. If the manager is unable to save data, an error will be returned and the operation will be stopped. ###### Query String Parameters [(with comments)](/doc/api/Host.md#query-string-parameters-3) ``` path // Required force // bool, Optional, default is false ``` ###### Response standard success or error response. See [#standard-responses](#standard-responses). #### /host/storage/folders/resize [POST] grows or shrink a storage folder in the manager. The manager may not check that there is enough space on-disk to support growing the storage folder, but should gracefully handle running out of space unexpectedly. When shrinking a storage folder, any data in the folder that needs to be moved will be placed into other storage folders, meaning that no data will be lost. If the manager is unable to migrate the data, an error will be returned and the operation will be stopped. ###### Query String Parameters [(with comments)](/doc/api/Host.md#query-string-parameters-4) ``` path // Required newsize // bytes, Required ``` ###### Response standard success or error response. See [#standard-responses](#standard-responses). #### /host/storage/sectors/delete/:___merkleroot___ [POST] deletes a sector, meaning that the manager will be unable to upload that sector and be unable to provide a storage proof on that sector. This endpoint is for removing the data entirely, and will remove instances of the sector appearing at all heights. The primary purpose is to comply with legal requests to remove data. ###### Path Parameters [(with comments)](/doc/api/Host.md#path-parameters) ``` :merkleroot ``` ###### Response standard success or error response. See [#standard-responses](#standard-responses). #### /host/estimatescore [GET] returns the estimated HostDB score of the host using its current settings, combined with the provided settings. ###### JSON Response [(with comments)](/doc/api/Host.md#json-response-2) ```javascript { "estimatedscore": "123456786786786786786786786742133", "conversionrate": 95 } ``` ###### Query String Parameters [(with comments)](/doc/api/Host.md#query-string-parameters-5) ``` acceptingcontracts // Optional, true / false maxdownloadbatchsize // Optional, bytes maxduration // Optional, blocks maxrevisebatchsize // Optional, bytes netaddress // Optional windowsize // Optional, blocks collateral // Optional, hastings / byte / block collateralbudget // Optional, hastings maxcollateral // Optional, hastings mincontractprice // Optional, hastings mindownloadbandwidthprice // Optional, hastings / byte minstorageprice // Optional, hastings / byte / block minuploadbandwidthprice // Optional, hastings / byte ``` Host DB ------- | Route | HTTP verb | | ------------------------------------------------------- | --------- | | [/hostdb/active](#hostdbactive-get-example) | GET | | [/hostdb/all](#hostdball-get-example) | GET | | [/hostdb/hosts/:___pubkey___](#hostdbhostspubkey-get-example) | GET | For examples and detailed descriptions of request and response parameters, refer to [HostDB.md](/doc/api/HostDB.md). #### /hostdb/active [GET] [(example)](/doc/api/HostDB.md#active-hosts) lists all of the active hosts known to the renter, sorted by preference. ###### Query String Parameters [(with comments)](/doc/api/HostDB.md#query-string-parameters) ``` numhosts // Optional ``` ###### JSON Response [(with comments)](/doc/api/HostDB.md#json-response) ```javascript { "hosts": [ { "acceptingcontracts": true, "maxdownloadbatchsize": 17825792, // bytes "maxduration": 25920, // blocks "maxrevisebatchsize": 17825792, // bytes "netaddress": "123.456.789.2:9982", "remainingstorage": 35000000000, // bytes "sectorsize": 4194304, // bytes "totalstorage": 35000000000, // bytes "unlockhash": "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789ab", "windowsize": 144, // blocks "publickey": { "algorithm": "ed25519", "key": "RW50cm9weSBpc24ndCB3aGF0IGl0IHVzZWQgdG8gYmU=" } "publickeystring": "ed25519:1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", } ] } ``` #### /hostdb/all [GET] [(example)](/doc/api/HostDB.md#all-hosts) lists all of the hosts known to the renter. Hosts are not guaranteed to be in any particular order, and the order may change in subsequent calls. ###### JSON Response [(with comments)](/doc/api/HostDB.md#json-response-1) ```javascript { "hosts": [ { "acceptingcontracts": true, "maxdownloadbatchsize": 17825792, // bytes "maxduration": 25920, // blocks "maxrevisebatchsize": 17825792, // bytes "netaddress": "123.456.789.0:9982", "remainingstorage": 35000000000, // bytes "sectorsize": 4194304, // bytes "totalstorage": 35000000000, // bytes "unlockhash": "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789ab", "windowsize": 144, // blocks "publickey": { "algorithm": "ed25519", "key": "RW50cm9weSBpc24ndCB3aGF0IGl0IHVzZWQgdG8gYmU=" } "publickeystring": "ed25519:1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", } ] } ``` #### /hostdb/hosts/:___pubkey___ [GET] [(example)](/doc/api/HostDB.md#host-details) fetches detailed information about a particular host, including metrics regarding the score of the host within the database. It should be noted that each renter uses different metrics for selecting hosts, and that a good score on in one hostdb does not mean that the host will be successful on the network overall. ###### Path Parameters [(with comments)](/doc/api/HostDB.md#path-parameters) ``` :pubkey ``` ###### JSON Response [(with comments)](/doc/api/HostDB.md#json-response-2) ```javascript { "entry": { "acceptingcontracts": true, "maxdownloadbatchsize": 17825792, // bytes "maxduration": 25920, // blocks "maxrevisebatchsize": 17825792, // bytes "netaddress": "123.456.789.0:9982", "remainingstorage": 35000000000, // bytes "sectorsize": 4194304, // bytes "totalstorage": 35000000000, // bytes "unlockhash": "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789ab", "windowsize": 144, // blocks "publickey": { "algorithm": "ed25519", "key": "RW50cm9weSBpc24ndCB3aGF0IGl0IHVzZWQgdG8gYmU=" } "publickeystring": "ed25519:1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", }, "scorebreakdown": { "score": 1, "ageadjustment": 0.1234, "burnadjustment": 0.1234, "collateraladjustment": 23.456, "interactionadjustment": 0.1234, "priceadjustment": 0.1234, "storageremainingadjustment": 0.1234, "uptimeadjustment": 0.1234, "versionadjustment": 0.1234, } } ``` Miner ----- | Route | HTTP verb | | ---------------------------------- | --------- | | [/miner](#miner-get) | GET | | [/miner/start](#minerstart-get) | GET | | [/miner/stop](#minerstop-get) | GET | | [/miner/header](#minerheader-get) | GET | | [/miner/header](#minerheader-post) | POST | For examples and detailed descriptions of request and response parameters, refer to [Miner.md](/doc/api/Miner.md). #### /miner [GET] returns the status of the miner. ###### JSON Response [(with comments)](/doc/api/Miner.md#json-response) ```javascript { "blocksmined": 9001, "cpuhashrate": 1337, "cpumining": false, "staleblocksmined": 0, } ``` #### /miner/start [GET] starts a single threaded cpu miner. Does nothing if the cpu miner is already running. ###### Response standard success or error response. See [#standard-responses](#standard-responses). #### /miner/stop [GET] stops the cpu miner. Does nothing if the cpu miner is not running. ###### Response standard success or error response. See [#standard-responses](#standard-responses). #### /miner/header [GET] provides a block header that is ready to be grinded on for work. ###### Byte Response For efficiency the header for work is returned as a raw byte encoding of the header, rather than encoded to JSON. Refer to [Miner.md#byte-response](/doc/api/Miner.md#byte-response) for a detailed description of the byte encoding. #### /miner/header [POST] submits a header that has passed the POW. ###### Request Body Bytes For efficiency headers are submitted as raw byte encodings of the header in the body of the request, rather than as a query string parameter or path parameter. The request body should contain only the 80 bytes of the encoded header. The encoding is the same encoding used in `/miner/header [GET]` endpoint. Refer to [Miner.md#byte-response](/doc/api/Miner.md#byte-response) for a detailed description of the byte encoding. Renter ------ | Route | HTTP verb | | ----------------------------------------------------------------------- | --------- | | [/renter](#renter-get) | GET | | [/renter](#renter-post) | POST | | [/renter/contracts](#rentercontracts-get) | GET | | [/renter/downloads](#renterdownloads-get) | GET | | [/renter/prices](#renterprices-get) | GET | | [/renter/files](#renterfiles-get) | GET | | [/renter/delete/*___siapath___](#renterdeletesiapath-post) | POST | | [/renter/download/*___siapath___](#renterdownloadsiapath-get) | GET | | [/renter/downloadasync/*___siapath___](#renterdownloadasyncsiapath-get) | GET | | [/renter/rename/*___siapath___](#renterrenamesiapath-post) | POST | | [/renter/upload/*___siapath___](#renteruploadsiapath-post) | POST | For examples and detailed descriptions of request and response parameters, refer to [Renter.md](/doc/api/Renter.md). #### /renter [GET] returns the current settings along with metrics on the renter's spending. ###### JSON Response [(with comments)](/doc/api/Renter.md#json-response) ```javascript { "settings": { "allowance": { "funds": "1234", // hastings "hosts": 24, "period": 6048, // blocks "renewwindow": 3024 // blocks } }, "financialmetrics": { "contractspending": "1234", // hastings "downloadspending": "5678", // hastings "storagespending": "1234", // hastings "uploadspending": "5678", // hastings "unspent": "1234" // hastings } } ``` #### /renter [POST] modify settings that control the renter's behavior. ###### Query String Parameters [(with comments)](/doc/api/Renter.md#query-string-parameters) ``` funds // hastings hosts period // block height renewwindow // block height ``` ###### Response standard success or error response. See [#standard-responses](#standard-responses). #### /renter/contracts [GET] returns active contracts. Expired contracts are not included. ###### JSON Response [(with comments)](/doc/api/Renter.md#json-response-1) ```javascript { "contracts": [ { // Amount of contract funds that have been spent on downloads. "downloadspending": "1234", // hastings // Block height that the file contract ends on. "endheight": 50000, // block height // Fees paid in order to form the file contract. "fees": "1234", // hastings // Public key of the host the contract was formed with. "hostpublickey": { "algorithm": "ed25519", "key": "RW50cm9weSBpc24ndCB3aGF0IGl0IHVzZWQgdG8gYmU=" }, // ID of the file contract. "id": "1234567890abcdef0123456789abcdef0123456789abcdef0123456789abcdef", // A signed transaction containing the most recent contract revision. "lasttransaction": {}, // Address of the host the file contract was formed with. "netaddress": "12.34.56.78:9", // Remaining funds left for the renter to spend on uploads & downloads. "renterfunds": "1234", // hastings // Size of the file contract, which is typically equal to the number of // bytes that have been uploaded to the host. "size": 8192, // bytes // Block height that the file contract began on. "startheight": 50000, // block height // Amount of contract funds that have been spent on storage. "storagespending": "1234", // hastings // Total cost to the wallet of forming the file contract. // This includes both the fees and the funds allocated in the contract. "totalcost": "1234", // hastings // Amount of contract funds that have been spent on uploads. "uploadspending": "1234" // hastings } ] } ``` #### /renter/downloads [GET] lists all files in the download queue. ###### JSON Response [(with comments)](/doc/api/Renter.md#json-response-2) ```javascript { "downloads": [ { "siapath": "foo/bar.txt", "destination": "/home/users/alice/bar.txt", "filesize": 8192, // bytes "received": 4096, // bytes "starttime": "2009-11-10T23:00:00Z", // RFC 3339 time "error": "" } ] } ``` #### /renter/files [GET] lists the status of all files. ###### JSON Response [(with comments)](/doc/api/Renter.md#json-response-3) ```javascript { "files": [ { "siapath": "foo/bar.txt", "filesize": 8192, // bytes "available": true, "renewing": true, "redundancy": 5, "uploadprogress": 100, // percent "expiration": 60000 } ] } ``` #### /renter/prices [GET] lists the estimated prices of performing various storage and data operations. ###### JSON Response [(with comments)](/doc/api/Renter.md#json-response-4) ```javascript { "downloadterabyte": "1234", // hastings "formcontracts": "1234", // hastings "storageterabytemonth": "1234", // hastings "uploadterabyte": "1234" // hastings } ``` #### /renter/delete/*___siapath___ [POST] deletes a renter file entry. Does not delete any downloads or original files, only the entry in the renter. ###### Path Parameters [(with comments)](/doc/api/Renter.md#path-parameters) ``` *siapath ``` ###### Response standard success or error response. See [#standard-responses](#standard-responses). #### /renter/download/*___siapath___ [GET] downloads a file to the local filesystem. The call will block until the file has been downloaded. ###### Path Parameters [(with comments)](/doc/api/Renter.md#path-parameters-1) ``` *siapath ``` ###### Query String Parameters [(with comments)](/doc/api/Renter.md#query-string-parameters-1) ``` destination ``` ###### Response standard success or error response. See [#standard-responses](#standard-responses). #### /renter/downloadasync/*___siapath___ [GET] downloads a file to the local filesystem. The call will return immediately. ###### Path Parameters [(with comments)](/doc/api/Renter.md#path-parameters-2) ``` *siapath ``` ###### Query String Parameters [(with comments)](/doc/api/Renter.md#query-string-parameters-2) ``` destination ``` ###### Response standard success or error response. See [#standard-responses](#standard-responses). #### /renter/rename/*___siapath___ [POST] renames a file. Does not rename any downloads or source files, only renames the entry in the renter. An error is returned if `siapath` does not exist or `newsiapath` already exists. ###### Path Parameters [(with comments)](/doc/api/Renter.md#path-parameters-3) ``` *siapath ``` ###### Query String Parameters [(with comments)](/doc/api/Renter.md#query-string-parameters-3) ``` newsiapath ``` ###### Response standard success or error response. See [#standard-responses](#standard-responses). #### /renter/upload/*___siapath___ [POST] uploads a file to the network from the local filesystem. ###### Path Parameters [(with comments)](/doc/api/Renter.md#path-parameters-4) ``` *siapath ``` ###### Query String Parameters [(with comments)](/doc/api/Renter.md#query-string-parameters-4) ``` datapieces // int paritypieces // int source // string - a filepath ``` ###### Response standard success or error response. See [#standard-responses](#standard-responses). Transaction Pool ------ | Route | HTTP verb | | ------------------------------- | --------- | | [/tpool/fee](#tpoolfee-get) | GET | | [/tpool/raw/:id](#tpoolraw-get) | GET | | [/tpool/raw](#tpoolraw-post) | POST | #### /tpool/fee [GET] returns the minimum and maximum estimated fees expected by the transaction pool. ###### JSON Response [(with comments)](/doc/api/Transactionpool.md#json-response-1) ```javascript { "minimum": "1234", // hastings / byte "maximum": "5678" // hastings / byte } ``` #### /tpool/raw/:id [GET] returns the ID for the requested transaction and its raw encoded parents and transaction data. ###### JSON Response [(with comments)](/doc/api/Transactionpool.md#json-response-2) ```javascript { // id of the transaction "id": "124302d30a219d52f368ecd94bae1bfb922a3e45b6c32dd7fb5891b863808788", // raw, base64 encoded transaction data "transaction": "AQAAAAAAAADBM1ca/FyURfizmSukoUQ2S0GwXMit1iNSeYgrnhXOPAAAAAAAAAAAAQAAAAAAAABlZDI1NTE5AAAAAAAAAAAAIAAAAAAAAACdfzoaJ1MBY7L0fwm7O+BoQlFkkbcab5YtULa6B9aecgEAAAAAAAAAAQAAAAAAAAAMAAAAAAAAAAM7Ljyf0IA86AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAACgAAAAAAAACe0ZTbGbI4wAAAAAAAAAAAAAABAAAAAAAAAMEzVxr8XJRF+LOZK6ShRDZLQbBcyK3WI1J5iCueFc48AAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAA+z4P1wc98IqKxykTSJxiVT+BVbWezIBnIBO1gRRlLq2x/A+jIc6G7/BA5YNJRbdnqPHrzsZvkCv4TKYd/XzwBA==", "parents": "AQAAAAAAAAABAAAAAAAAAJYYmFUdXXfLQ2p6EpF+tcqM9M4Pw5SLSFHdYwjMDFCjAAAAAAAAAAABAAAAAAAAAGVkMjU1MTkAAAAAAAAAAAAgAAAAAAAAAAHONvdzzjHfHBx6psAN8Z1rEVgqKPZ+K6Bsqp3FbrfjAQAAAAAAAAACAAAAAAAAAAwAAAAAAAAAAzvNDjSrme8gwAAA4w8ODnW8DxbOV/JribivvTtjJ4iHVOug0SXJc31BdSINAAAAAAAAAAPGHY4699vggx5AAAC2qBhm5vwPaBsmwAVPho/1Pd8ecce/+BGv4UimnEPzPQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAACWGJhVHV13y0NqehKRfrXKjPTOD8OUi0hR3WMIzAxQowAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAABnt64wN1qxym/CfiMgOx5fg/imVIEhY+4IiiM7gwvSx8qtqKniOx50ekrGv8B+gTKDXpmm2iJibWTI9QLZHWAY=", } ``` #### /tpool/raw [POST] submits a raw transaction to the transaction pool, broadcasting it to the transaction pool's peers. ###### Query String Parameters [(with comments)](/doc/api/Transactionpool.md#query-string-parameters) ``` parents string // raw base64 encoded transaction parents transaction string // raw base64 encoded transaction ``` ###### Response standard success or error response. See [#standard-responses](#standard-responses). Wallet ------ | Route | HTTP verb | | --------------------------------------------------------------- | --------- | | [/wallet](#wallet-get) | GET | | [/wallet/033x](#wallet033x-post) | POST | | [/wallet/address](#walletaddress-get) | GET | | [/wallet/addresses](#walletaddresses-get) | GET | | [/wallet/backup](#walletbackup-get) | GET | | [/wallet/init](#walletinit-post) | POST | | [/wallet/init/seed](#walletinitseed-post) | POST | | [/wallet/lock](#walletlock-post) | POST | | [/wallet/seed](#walletseed-post) | POST | | [/wallet/seeds](#walletseeds-get) | GET | | [/wallet/siacoins](#walletsiacoins-post) | POST | | [/wallet/siafunds](#walletsiafunds-post) | POST | | [/wallet/siagkey](#walletsiagkey-post) | POST | | [/wallet/sweep/seed](#walletsweepseed-post) | POST | | [/wallet/transaction/:___id___](#wallettransactionid-get) | GET | | [/wallet/transactions](#wallettransactions-get) | GET | | [/wallet/transactions/:___addr___](#wallettransactionsaddr-get) | GET | | [/wallet/unlock](#walletunlock-post) | POST | | [/wallet/verify/address/:___addr___](#walletverifyaddressaddr-get) | GET | | [/wallet/changepassword](#walletchangepassword-post) | POST | For examples and detailed descriptions of request and response parameters, refer to [Wallet.md](/doc/api/Wallet.md). #### /wallet [GET] returns basic information about the wallet, such as whether the wallet is locked or unlocked. ###### JSON Response [(with comments)](/doc/api/Wallet.md#json-response) ```javascript { "encrypted": true, "unlocked": true, "rescanning": false, "confirmedsiacoinbalance": "123456", // hastings, big int "unconfirmedoutgoingsiacoins": "0", // hastings, big int "unconfirmedincomingsiacoins": "789", // hastings, big int "siafundbalance": "1", // siafunds, big int "siacoinclaimbalance": "9001", // hastings, big int } ``` #### /wallet/033x [POST] loads a v0.3.3.x wallet into the current wallet, harvesting all of the secret keys. All spendable addresses in the loaded wallet will become spendable from the current wallet. ###### Query String Parameters [(with comments)](/doc/api/Wallet.md#query-string-parameters) ``` source encryptionpassword ``` ###### Response standard success or error response. See [#standard-responses](#standard-responses). #### /wallet/address [GET] gets a new address from the wallet generated by the primary seed. An error will be returned if the wallet is locked. ###### JSON Response [(with comments)](/doc/api/Wallet.md#json-response-1) ```javascript { "address": "1234567890abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789ab" } ``` #### /wallet/addresses [GET] fetches the list of addresses from the wallet. If the wallet has not been created or unlocked, no addresses will be returned. After the wallet is unlocked, this call will continue to return its addresses even after the wallet is locked again. ###### JSON Response [(with comments)](/doc/api/Wallet.md#json-response-2) ```javascript { "addresses": [ "1234567890abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789ab", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" ] } ``` #### /wallet/backup [GET] creates a backup of the wallet settings file. Though this can easily be done manually, the settings file is often in an unknown or difficult to find location. The /wallet/backup call can spare users the trouble of needing to find their wallet file. ###### Parameters [(with comments)](/doc/api/Wallet.md#query-string-parameters-1) ``` destination ``` ###### Response standard success or error response. See [#standard-responses](#standard-responses). #### /wallet/init [POST] initializes the wallet. After the wallet has been initialized once, it does not need to be initialized again, and future calls to /wallet/init will return an error. The encryption password is provided by the api call. If the password is blank, then the password will be set to the same as the seed. ###### Query String Parameters [(with comments)](/doc/api/Wallet.md#query-string-parameters-2) ``` encryptionpassword dictionary // Optional, default is english. force // Optional, when set to true it will destroy an existing wallet and reinitialize a new one. ``` ###### JSON Response [(with comments)](/doc/api/Wallet.md#json-response-3) ```javascript { "primaryseed": "hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello" } ``` #### /wallet/init/seed [POST] initializes the wallet using a preexisting seed. After the wallet has been initialized once, it does not need to be initialized again, and future calls to /wallet/init/seed will return an error. The encryption password is provided by the api call. If the password is blank, then the password will be set to the same as the seed. Note that loading a preexisting seed requires scanning the blockchain to determine how many keys have been generated from the seed. For this reason, /wallet/init/seed can only be called if the blockchain is synced. ###### Query String Parameters [(with comments)](/doc/api/Wallet.md#query-string-parameters-3) ``` encryptionpassword dictionary // Optional, default is english. seed force // Optional, when set to true it will destroy an existing wallet and reinitialize a new one. ``` ###### Response standard success or error response. See [#standard-responses](#standard-responses). #### /wallet/seed [POST] gives the wallet a seed to track when looking for incoming transactions. The wallet will be able to spend outputs related to addresses created by the seed. The seed is added as an auxiliary seed, and does not replace the primary seed. Only the primary seed will be used for generating new addresses. ###### Query String Parameters [(with comments)](/doc/api/Wallet.md#query-string-parameters-4) ``` encryptionpassword dictionary seed ``` ###### Response standard success or error response. See [#standard-responses](#standard-responses). #### /wallet/seeds [GET] returns the list of seeds in use by the wallet. The primary seed is the only seed that gets used to generate new addresses. This call is unavailable when the wallet is locked. ###### Query String Parameters [(with comments)](/doc/api/Wallet.md#query-string-parameters-5) ``` dictionary ``` ###### JSON Response [(with comments)](/doc/api/Wallet.md#json-response-4) ```javascript { "primaryseed": "hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello", "addressesremaining": 2500, "allseeds": [ "hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello", "foo bar foo bar foo bar foo bar foo bar foo bar foo bar foo bar foo bar foo bar foo bar foo bar foo bar foo bar foo", ] } ``` #### /wallet/siacoins [POST] sends siacoins to an address or set of addresses. The outputs are arbitrarily selected from addresses in the wallet. If 'outputs' is supplied, 'amount' and 'destination' must be empty. ###### Query String Parameters [(with comments)](/doc/api/Wallet.md#query-string-parameters-6) ``` amount // hastings destination // address outputs // JSON array of {unlockhash, value} pairs ``` ###### JSON Response [(with comments)](/doc/api/Wallet.md#json-response-5) ```javascript { "transactionids": [ "1234567890abcdef0123456789abcdef0123456789abcdef0123456789abcdef", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" ] } ``` #### /wallet/siafunds [POST] sends siafunds to an address. The outputs are arbitrarily selected from addresses in the wallet. Any siacoins available in the siafunds being sent (as well as the siacoins available in any siafunds that end up in a refund address) will become available to the wallet as siacoins after 144 confirmations. To access all of the siacoins in the siacoin claim balance, send all of the siafunds to an address in your control (this will give you all the siacoins, while still letting you control the siafunds). ###### Query String Parameters [(with comments)](/doc/api/Wallet.md#query-string-parameters-7) ``` amount // siafunds destination // address ``` ###### JSON Response [(with comments)](/doc/api/Wallet.md#json-response-6) ```javascript { "transactionids": [ "1234567890abcdef0123456789abcdef0123456789abcdef0123456789abcdef", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" ] } ``` #### /wallet/siagkey [POST] loads a key into the wallet that was generated by siag. Most siafunds are currently in addresses created by siag. ###### Query String Parameters [(with comments)](/doc/api/Wallet.md#query-string-parameters-8) ``` encryptionpassword keyfiles ``` ###### Response standard success or error response. See [#standard-responses](#standard-responses). #### /wallet/sweep/seed [POST] Function: Scan the blockchain for outputs belonging to a seed and send them to an address owned by the wallet. ###### Query String Parameters [(with comments)](/doc/api/Wallet.md#query-string-parameters-9) ``` dictionary // Optional, default is english. seed ``` ###### JSON Response [(with comments)](/doc/api/Wallet.md#json-response-7) ```javascript { "coins": "123456", // hastings, big int "funds": "1", // siafunds, big int } ``` #### /wallet/lock [POST] locks the wallet, wiping all secret keys. After being locked, the keys are encrypted. Queries for the seed, to send siafunds, and related queries become unavailable. Queries concerning transaction history and balance are still available. ###### Response standard success or error response. See [#standard-responses](#standard-responses). #### /wallet/transaction/:___id___ [GET] gets the transaction associated with a specific transaction id. ###### Path Parameters [(with comments)](/doc/api/Wallet.md#path-parameters) ``` :id ``` ###### JSON Response [(with comments)](/doc/api/Wallet.md#json-response-8) ```javascript { "transaction": { "transaction": { // See types.Transaction in https://github.com/NebulousLabs/Sia/blob/master/types/transactions.go }, "transactionid": "1234567890abcdef0123456789abcdef0123456789abcdef0123456789abcdef", "confirmationheight": 50000, "confirmationtimestamp": 1257894000, "inputs": [ { "parentid": "1234567890abcdef0123456789abcdef0123456789abcdef0123456789abcdef", "fundtype": "siacoin input", "walletaddress": false, "relatedaddress": "1234567890abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789ab", "value": "1234", // hastings or siafunds, depending on fundtype, big int } ], "outputs": [ { "id": "1234567890abcdef0123456789abcdef0123456789abcdef0123456789abcdef", "fundtype": "siacoin output", "maturityheight": 50000, "walletaddress": false, "relatedaddress": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "value": "1234", // hastings or siafunds, depending on fundtype, big int } ] } } ``` #### /wallet/transactions [GET] returns a list of transactions related to the wallet in chronological order. ###### Query String Parameters [(with comments)](/doc/api/Wallet.md#query-string-parameters-10) ``` startheight // block height endheight // block height ``` ###### JSON Response [(with comments)](/doc/api/Wallet.md#json-response-9) ```javascript { "confirmedtransactions": [ { // See the documentation for '/wallet/transaction/:id' for more information. } ], "unconfirmedtransactions": [ { // See the documentation for '/wallet/transaction/:id' for more information. } ] } ``` #### /wallet/transactions/:___addr___ [GET] returns all of the transactions related to a specific address. ###### Path Parameters [(with comments)](/doc/api/Wallet.md#path-parameters-1) ``` :addr ``` ###### JSON Response [(with comments)](/doc/api/Wallet.md#json-response-10) ```javascript { "transactions": [ { // See the documentation for '/wallet/transaction/:id' for more information. } ] } ``` #### /wallet/unlock [POST] unlocks the wallet. The wallet is capable of knowing whether the correct password was provided. ###### Query String Parameters [(with comments)](/doc/api/Wallet.md#query-string-parameters-11) ``` encryptionpassword ``` ###### Response standard success or error response. See [#standard-responses](#standard-responses). #### /wallet/verify/address/:addr [GET] takes the address specified by :addr and returns a JSON response indicating if the address is valid. ###### JSON Response [(with comments)](/doc/api/Wallet.md#json-response-11) ```javascript { "valid": true } ``` #### /wallet/changepassword [POST] changes the wallet's encryption key. ###### Query String Parameters [(with comments)](/doc/api/Wallet.md#query-string-parameters-12) ``` encryptionpassword newpassword ``` ###### Response standard success or error response. See [#standard-responses](#standard-responses). Sia-1.3.0/doc/Consensus.md000066400000000000000000000443221313565667000153170ustar00rootroot00000000000000Consensus Rules =============== This document is meant to provide a good high level overview of the Sia cryptosystem, but does not fully explain all of the small details. The most accurate explanation of the consensus rules is the consensus package (and all dependencies). This document will be more understandable if you have a general understanding of proof of work blockchains, and does not try to build up from first principles. Cryptographic Algorithms ------------------------ Sia uses cryptographic hashing and cryptographic signing, each of which has many potentially secure algorithms that can be used. We acknowledge our inexperience, and that we have chosen these algorithms not because of our own confidence in their properties, but because other people seem confident in their properties. For hashing, our primary goal is to use an algorithm that cannot be merge mined with Bitcoin, even partially. A secondary goal is hashing speed on consumer hardware, including phones and other low power devices. For signing, our primary goal is verification speed. A secondary goal is an algorithm that supports HD keys. A tertiary goal is an algorithm that supports threshold signatures. #### Hashing: blake2b [blake2b](http://en.wikipedia.org/wiki/BLAKE_%28hash_function%29#BLAKE2 "Wiki page") has been chosen as a hashing algorithm because it is fast, it has had substantial review, and it has invulnerability to length extension attacks. Another particularly important feature of BLAKE2b is that it is not SHA-2. We wish to avoid merge mining with Bitcoin, because that may result in many apathetic Bitcoin miners mining on our blockchain, which may make soft forks harder to coordinate. #### Signatures: variable type signatures Each public key will have an specifier (a 16 byte array) and a byte slice containing an encoding of the public key. The specifier will tell the signature verification which signing algorithm to use when verifying a signature. Each signature will be a byte slice, the encoding can be determined by looking at the specifier of the corresponding public key. This method allows new signature types to be easily added to the currency in a way that does not invalidate existing outputs and keys. Adding a new signature type requires a hard fork, but allows easy protection against cryptographic breaks, and easy migration to new cryptography if there are any breakthroughs in areas like verification speed, ring signatures, etc. Allowed algorithms: ed25519: The specifier must match the string "ed25519". The public key must be encoded into 32 bytes. Signatures and public keys will need to follow the ed25519 specification. More information can be found at ed25519.cr.yp.to entropy: The specifier must match the string "entropy". The signature will always be invalid. This provides a way to add entropy buffers to SpendCondition objects to protect low entropy information, while being able to prove that the entropy buffers are invalid public keys. There are plans to also add ECDSA secp256k1 and Schnorr secp256k1. New signing algorithms can be added to Sia through a soft fork, because unrecognized algorithm types are always considered to have valid signatures. Currency -------- The Sia cryptosystem has two types of currency. The first is the Siacoin. Siacoins are generated every block and distributed to the miners. These miners can then use the siacoins to fund file contracts, or can send the siacoins to other parties. The siacoin is represented by an infinite precision unsigned integer. The second currency in the Sia cryptosystem is the Siafund, which is a special asset limited to 10,000 indivisible units. Each time a file contract payout is made, 3.9% of the payout is put into the siafund pool. The number of siacoins in the siafund pool must always be divisible by 10,000; the number of coins taken from the payout is rounded down to the nearest 10,000. The siafund is also represented by an infinite precision unsigned integer. Siafund owners can collect the siacoins in the siafund pool. For every 10,000 siacoins added to the siafund pool, a siafund owner can withdraw 1 siacoin. Approx. 8790 siafunds are owned by Nebulous Inc. The remaining siafunds are owned by early backers of the Sia project. There are future plans to enable sidechain compatibility with Sia. This would allow other currencies such as Bitcoin to be spent in all the same places that the Siacoin can be spent. Marshalling ----------- Many of the Sia types need to be hashed at some point, which requires having a consistent algorithm for marshalling types into a set of bytes that can be hashed. The following rules are used for hashing: - Integers are little-endian, and are always encoded as 8 bytes. - Bools are encoded as one byte, where zero is false and one is true. - Variable length types such as strings are prefaced by 8 bytes containing their length. - Arrays and structs are encoded as their individual elements concatenated together. The ordering of the struct is determined by the struct definition. There is only one way to encode each struct. - The Currency type (an infinite precision integer) is encoded in big endian using as many bytes as necessary to represent the underlying number. As it is a variable length type, it is prefixed by 8 bytes containing the length. Block Size ---------- The maximum block size is 2e6 bytes. There is no limit on transaction size, though it must fit inside of the block. Most miners enforce a size limit of 16e3 bytes per transaction. Block Timestamps ---------------- Each block has a minimum allowed timestamp. The minimum timestamp is found by taking the median timestamp of the previous 11 blocks. If there are not 11 previous blocks, the genesis timestamp is used repeatedly. Blocks will be rejected if they are timestamped more than three hours in the future, but can be accepted again once enough time has passed. Block ID -------- The ID of a block is derived using: Hash(Parent Block ID + 64 bit Nonce + Block Merkle Root) The block Merkle root is obtained by creating a Merkle tree whose leaves are the hash of the timestamp, the hashes of the miner outputs (one leaf per miner output), and the hashes of the transactions (one leaf per transaction). Block Target ------------ For a block to be valid, the id of the block must be below a certain target. The target is adjusted once every 500 blocks, and it is adjusted by looking at the timestamps of the previous 1000 blocks. The expected amount of time passed between the most recent block and the 1000th previous block is 10e3 minutes. If more time has passed, the target is lowered. If less time has passed, the target is increased. Each adjustment can adjust the target by up to 2.5x. The target is changed in proportion to the difference in time (If the time was half of what was expected, the new target is 1/2 the old target). There is a clamp on the adjustment. In one block, the target cannot adjust upwards by more more than 1001/1000, and cannot adjust downwards by more than 999/1000. The new target is calculated using (expected time passed in seconds) / (actual time passed in seconds) * (current target). The division and multiplication should be done using infinite precision, and the result should be truncated. If there are not 1000 blocks, the genesis timestamp is used for comparison. The expected time is (10 minutes * block height). Block Subsidy ------------- The coinbase for a block is (300,000 - height) * 10^24, with a minimum of 30,000 \* 10^24. Any miner fees get added to the coinbase to create the block subsidy. The block subsidy is then given to multiple outputs, called the miner payouts. The total value of the miner payouts must equal the block subsidy. The ids of the outputs created by the miner payouts is determined by taking the block id and concatenating the index of the payout that the output corresponds to. The outputs created by the block subsidy cannot be spent for 50 blocks, and are not considered a part of the consensus set until 50 blocks have transpired. This limitation is in place because a simple blockchain reorganization is enough to invalidate the output; double spend attacks and false spend attacks are much easier to execute. Transactions ------------ A Transaction is composed of the following: - Siacoin Inputs - Siacoin Outputs - File Contracts - File Contract Revisions - Storage Proofs - Siafund Inputs - Siafund Outputs - Miner Fees - Arbitrary Data - Transaction Signatures The sum of all the siacoin inputs must equal the sum of all the miner fees, siacoin outputs, and file contract payouts. There can be no leftovers. The sum of all siafund inputs must equal the sum of all siafund outputs. Several objects have unlock hashes. An unlock hash is the Merkle root of the 'unlock conditions' object. The unlock conditions contain a timelock, a number of required signatures, and a set of public keys that can be used during signing. The Merkle root of the unlock condition objects is formed by taking the Merkle root of a tree whose leaves are the timelock, the public keys (one leaf per key), and the number of signatures. This ordering is chosen specifically because the timelock and the number of signatures are low entropy. By using random data as the first and last public key, you can make it safe to reveal any of the public keys without revealing the low entropy items. The unlock conditions cannot be satisfied until enough signatures have provided, and until the height of the blockchain is at least equal to the value of the timelock. The unlock conditions contains a set of public keys which can each be used only once when providing signatures. The same public key can be listed twice, which means that it can be used twice. The number of required signatures indicates how many public keys must be used to validate the input. If required signatures is '0', the input is effectively 'anyone can spend'. If the required signature count is greater than the number of public keys, the input is unspendable. There must be exactly enough signatures. For example, if there are 3 public keys and only two required signatures, then only two signatures can be included into the transaction. Siacoin Inputs -------------- Each input spends an output. The output being spent must exist in the consensus set. The 'value' field of the output indicates how many siacoins must be used in the outputs of the transaction. Valid outputs are miner fees, siacoin outputs, and contract payouts. Siacoin Outputs --------------- Siacoin outputs contain a value and an unlock hash (also called a coin address). The unlock hash is the Merkle root of the spend conditions that must be met to spend the output. File Contracts -------------- A file contract is an agreement by some party to prove they have a file at a given point in time. The contract contains the Merkle root of the data being stored, and the size in bytes of the data being stored. The Merkle root is formed by breaking the file into 64 byte segments and hashing each segment to form the leaves of the Merkle tree. The final segment is not padded out. The storage proof must be submitted between the 'WindowStart' and 'WindowEnd' fields of the contract. There is a 'Payout', which indicates how many siacoins are given out when the storage proof is provided. 3.9% of this payout (rounded down to the nearest 10,000) is put aside for the owners of siafunds. If the storage proof is provided and is valid, the remaining payout is put in an output spendable by the 'valid proof spend hash', and if a valid storage proof is not provided to the blockchain by 'end', the remaining payout is put in an output spendable by the 'missed proof spend hash'. All contracts must have a non-zero payout, 'start' must be before 'end', and 'start' must be greater than the current height of the blockchain. A storage proof is acceptable if it is submitted in the block of height 'end'. File contracts are created with a 'Revision Hash', which is the Merkle root of an unlock conditions object. A 'file contract revision' can be submitted which fulfills the unlock conditions object, resulting in the file contract being replaced by a new file contract, as specified in the revision. File Contract Revisions ----------------------- A file contract revision modifies a contract. File contracts have a revision number, and any revision submitted to the blockchain must have a higher revision number in order to be valid. Any field can be changed except for the payout - siacoins cannot be added to or removed from the file contract during a revision, though the destination upon a successful or unsuccessful storage proof can be changed. The greatest application for file contract revisions is file-diff channels - a file contract can be edited many times off-blockchain as a user uploads new or different content to the host. This improves the overall scalability of Sia. Storage Proofs -------------- A storage proof transaction is any transaction containing a storage proof. Storage proof transactions are not allowed to have siacoin or siafund outputs, and are not allowed to have file contracts. When creating a storage proof, you only prove that you have a single 64 byte segment of the file. The piece that you must prove you have is chosen randomly using the contract id and the id of the 'trigger block'. The trigger block is the block at height 'Start' - 1, where 'Start' is the value 'Start' in the contract that the storage proof is fulfilling. The file is composed of 64 byte segments whose hashes compose the leaves of a Merkle tree. When proving you have the file, you must prove you have one of the leaves. To determine which leaf, take the hash of the contract id concatenated to the trigger block id, then take the numerical value of the result modulus the number of segments: Hash(file contract id + trigger block id) % num segments The proof is formed by providing the 64 byte segment, and then the missing hashes required to fill out the remaining tree. The total size of the proof will be 64 bytes + 32 bytes * log(num segments), and can be verified by anybody who knows the root hash and the file size. Storage proof transactions are not allowed to have siacoin outputs, siafund outputs, or contracts. All outputs created by the storage proofs cannot be spent for 50 blocks. These limits are in place because a simple blockchain reorganization can change the trigger block, which will invalidate the storage proof and therefore the entire transaction. This makes double spend attacks and false spend attacks significantly easier to execute. Siafund Inputs -------------- A siafund input works similar to a siacoin input. It contains the id of a siafund output being spent, and the unlock conditions required to spend the output. A special output is created when a siafund output is used as input. All of the siacoins that have accrued in the siafund since its last spend are sent to the 'claim spend hash' found in the siafund output, which is a normal siacoin address. The value of the siacoin output is determined by taking the size of the siacoin pool when the output was created and comparing it to the current size of the siacoin pool. The equation is: ((Current Pool Size - Previous Pool Size) / 10,000) * siafund quantity Like the miner outputs and the storage proof outputs, the siafund output cannot be spent for 50 blocks because the value of the output can change if the blockchain reorganizes. Reorganizations will not however cause the transaction to be invalidated, so the ban on contracts and outputs does not need to be in place. Siafund Outputs --------------- Like siacoin outputs, siafund outputs contain a value and an unlock hash. The value indicates the number of siafunds that are put into the output, and the unlock hash is the Merkle root of the unlock conditions object which allows the output to be spent. Siafund outputs also contain a claim unlock hash field, which indicates the unlock hash of the siacoin output that is created when the siafund output is spent. The value of the output that gets created will depend on the growth of the siacoin pool between the creation and the spending of the output. This growth is measured by storing a 'claim start', which indicates the size of the siafund pool at the moment the siafund output was created. Miner Fees ---------- A miner fee is a volume of siacoins that get added to the block subsidy. Arbitrary Data -------------- Arbitrary data is a set of data that is ignored by consensus. In the future, it may be used for soft forks, paired with 'anyone can spend' transactions. In the meantime, it is an easy way for third party applications to make use of the siacoin blockchain. Transaction Signatures ---------------------- Each signature points to a single public key index in a single unlock conditions object. No two signatures can point to the same public key index for the same set of unlock conditions. Each signature also contains a timelock, and is not valid until the blockchain has reached a height equal to the timelock height. Signatures also have a 'covered fields' object, which indicates which parts of the transaction get included in the signature. There is a 'whole transaction' flag, which indicates that every part of the transaction except for the signatures gets included, which eliminates any malleability outside of the signatures. The signatures can also be individually included, to enforce that your signature is only valid if certain other signatures are present. If the 'whole transaction' is not set, all fields need to be added manually, and additional parties can add new fields, meaning the transaction will be malleable. This does however allow other parties to add additional inputs, fees, etc. after you have signed the transaction without invalidating your signature. If the whole transaction flag is set, all other elements in the covered fields object must be empty except for the signatures field. The covered fields object contains a slice of indexes for each element of the transaction (siacoin inputs, miner fees, etc.). The slice must be sorted, and there can be no repeated elements. Entirely nonmalleable transactions can be achieved by setting the 'whole transaction' flag and then providing the last signature, including every other signature in your signature. Because no frivolous signatures are allowed, the transaction cannot be changed without your signature being invalidated. Sia-1.3.0/doc/Developers.md000066400000000000000000000230241313565667000154430ustar00rootroot00000000000000Developer Environment ===================== Sia is written in Go. To build and test Sia, you are going to need a working go environment, including having both $GOROOT/bin and $GOPATH/bin in your $PATH. For most Linux distributions, Go will be in the package manager, though it may be an old version that is incompatible with Sia. Once you have a working Go environment, you are set to build the project. If you plan on cross compiling Sia, you may need to install Go from source. You can find information on that [here](http://golang.org/doc/install/source). Sia has a development build, an automated testing build, and a release build. The release build is the only one that can synchronize to the full network. To get the release build, it is usually sufficient to run `go get -u github.com/NebulousLabs/Sia/...`. This will download Sia and its dependencies and install binaries in $GOPATH/bin. After downloading, you can find the Sia source code in $GOPATH/src/github.com/NebulousLabs/Sia. To build the release binary, run `make release-std` from this directory. To build the release binary with a (slow) race detector and an array of debugging asserts, run `make release`. To build the developer binary (which has a different genesis block, faster block times, and a few other tweaks), just run `make`. If you intend to contribute to Sia, you should start by forking the project on GitHub, and then adding your fork as a "remote" in the Sia git repository via `git remote add [fork name] [fork url]`. Now you can develop by pulling changes from `origin`, pushing your modifications to `[fork name]`, and then making a pull request on GitHub. If you see an error like the one below, it means that you either forgot to run `make dependencies`, or you cloned the project into a path that the go tool does not recognize (usually the wrong path, or symbolic links were somehow involved). ``` consensus/fork.go:4:2: cannot find package "github.com/NebulousLabs/Sia/crypto" in any of: /usr/lib/go/src/github.com/NebulousLabs/Sia/crypto (from $GOROOT) /home/user/gopath/src/github.com/NebulousLabs/Sia/crypto (from $GOPATH) ``` Developer Conventions ===================== This file is meant to help a developer navigate the codebase and develop clean, maintainable code. Knowing all of these conventions will also make it easier to read and code review the Sia project. The primary purpose of the conventions within Sia is to keep the codebase simple. Simpler constructions means easier code reviews, greater accessibility to newcomers, and less potential for mistakes. It is also to keep things uniform, much in the spirit of 'go fmt'. When everything looks the same, everyone has an easier time reading and reviewing code they did not write themselves. Documentation ------------- All structs, functions, and interfaces must have a docstring. Anytime that something is left unfinished, place a comment containing the string 'TODO:'. This sends a clear message to other developers, and creates a greppable way to find unfinished parts of the codebase. 'TODO' statements are currently discouraged. As the codebase matures, 'TODO' statements will become increasingly frowned upon. 'TODO' statements should not document feature requests, but instead document incompleteness where the incompleteness causes disruption to user experience or causes a security vulnerability. Documentation should give a sense of what each function does, but should also give a sense of the overall architecture of the code. Where useful, examples should be provided, and common pitfalls should be explained. Anything that breaks other conventions in any way needs to have a comment, even if it is obvious why the convention had to be broken. The goal of the codebase is to be accessible to newbies. Anything more advanced than what you would expect to remember from an 'Intro to Data Structures' class should have an explanation about what the concept it is and why it was picked over other potential choices. Code that exists purely to be compatible with previous versions of the software should be tagged with a 'COMPATvX.X.X' comment. Examples below. ```go // Find and sort the outputs. outputs := getOutputs() // TODO: actually sort the outputs. ``` ```go // Disallow unknown agents. // // COMPATv0.4.0: allow a blank agent to preserve compatibility with // 'siac' v0.4.0, which did not set an agent. if agent != "SiaAgent" && agent != "" { return errors.New("unrecognized agent!") } ``` Naming ------ Names are used to give readers and reviewers a sense of what is happening in the code. When naming variables, you should assume that the person reading your code is unfamiliar with the codebase. Short names (like 'cs' instead of 'consensusSet') should only be used when the context is immediately obvious. For example 'cs := new(ConsensusSet)' is immediately obvious context for 'cs', and so 'cs' is appropriate for the rest of the function. Data structures should never have shortened names. 'FileContract.mr' is confusing to anyone who has not used the data structure extensively. The code should be accessible to people who are unfamiliar with the codebase. One exception is for the variable called 'mu', which is short for 'mutex'. This exception is made because 'mu' appears in many data structures. When calling functions with obscure parameters, named variables should be used to indicate what the parameters do. For example, 'm := NewMiner(1)' is confusing. Instead, use 'threads := 1; m := NewMiner(threads)'. The name gives readers a sense of what the parameter within 'NewMiner' does even when they are not familiar with the 'NewMiner' function. Where possible, functions with obscure, untyped inputs should be avoided. The most important thing to remember when choosing names is to cater to people who are unfamiliar with the code. A reader should never have to ask 'What is `cs`?' on their first pass through the code, even though to experienced developers it is obvious that `cs` refers to a consensus.ConsensusSet. Control Flow ------------ Where possible, control structures should be minimized or avoided. This includes avoiding nested if statements, and avoiding else statements where possible. Sometimes, complex control structures are necessary, but where possible use alternative code patterns and insert functions to break things up. Example: ```go // Do not do this: if err != nil { return } else { forkBlockchain(node) } // Instead to this: if err != nil { return } forkBlockchain(node) ``` Mutexes ------- All exported functions from a package and/or object need to be thread safe. Usually, this means that the first lines of the function contain a `Lock(); defer Unlock()`. Simple locking schemes should be preferred over performant locking schemes. As will everything else, anything unusual or convention breaking should have a comment. Non-exported functions should not do any locking, unless they have a special prefix to the name (explained below). The responsibility for thread-safety comes from the exported functions which call the non-exported functions. Maintaining this convention minimizes developer overhead when working with complex objects. Functions prefixed 'threaded' (example 'threadedMine') are meant to be called in their own goroutine (`go threadedMine()`) and will manage their own thread-safety. Error Handling -------------- All errors need to be checked as soon as they are received, even if they are known to not cause problems. The statement that checks the error needs to be `if err != nil`, and if there is a good reason to use an alternative statement (such as `err == nil`), it must be documented. The body of the if statement should be at most 4 lines, but usually only one. Anything requiring more lines needs to be its own function. Example: ```go block, err := s.AcceptBlock() if err != nil { handleAcceptBlockErr(block, err) return } ``` Sanity Checks ------------- Some functions make assumptions. For example, the 'addTransaction' function assumes that the transaction being added is not in conflict with any other transactions. Where possible, these explicit assumptions should be validated. Example: ```go if build.DEBUG { _, exists := tp.usedOutputs[input.OutputID] if exists { panic("incorrect use of addTransaction") } } ``` In the example, a panic is called for incorrect use of the function, but only in debug mode. This failure will be invisible in production code, but the code will have higher performance because the code should never fail anyway. If the code is continually checking items that should be universally true, mistakes are easier to catch during testing, and side effects are less likely to go unnoticed. Sanity checks and panics are purely to check for developer mistakes. A user should not be able to trigger a panic, and no set of network communications or real-world conditions should be able to trigger a panic. Testing ------- The test suite code should be the same quality as the rest of the codebase. When writing new code in a pull request, the pull request should include test coverage for the code. Most modules have a tester object, which can be created by calling `createXXXTester`. Module testers typically have a consensus set, a miner, a wallet, and a few other relevant modules that can be used to build transactions, mine blocks, etc. In general, testing that uses exclusively exported functions to achieve full coverage is preferred. These types of tests seem to find more bugs and trigger more asserts. Any testing provided by a third party which is both maintainable and reasonably quick will be accepted. There is little downside to more testing, even when the testing is largely redundant. Sia-1.3.0/doc/Encoding.md000066400000000000000000000056171313565667000150710ustar00rootroot00000000000000Encoding ======== The encoding package converts arbitrary objects into byte slices, and vice versa. Objects are encoded as binary data, without type information. The decoder will attempt to decode its input bytes into whatever type it is passed. For example: ```go Marshal(int64(3)) == []byte{3, 0, 0, 0, 0, 0, 0, 0} var x int64 Unmarshal([]byte{3, 0, 0, 0, 0, 0, 0, 0}, &x) // x == 3 ``` Note that this leads to some ambiguity. Since an `int64` and a `uint64` are both 8 bytes long, it is possible to encode an `int64` and successfully decode it as a `uint64`. As a result, it is imperative that *the decoder knows exactly what it is decoding*. Developers must rely on context to determine what type to decode into. The specific rules for encoding Go's builtin types are as follows: Integers are little-endian, and are always encoded as 8 bytes, i.e. their `int64` or `uint64` equivalent. Booleans are encoded as one byte, either zero (false) or one (true). No other values may be used. Nil pointers are equivalent to "false," i.e. a single zero byte. Valid pointers are represented by a "true" byte (0x01) followed by the encoding of the dereferenced value. Variable-length types, such as strings and slices, are represented by an 8-byte unsigned length prefix followed by the encoded value. Strings are encoded as their literal UTF-8 bytes. Slices are encoded as the concatenation of their encoded elements. For example: ```go // slice len: 1 string len: 3 string data Marshal([]string{"foo"}) == []byte{1,0,0,0,0,0,0,0, 3,0,0,0,0,0,0,0, 'f','o','o'} ``` Maps are not supported; attempting to encode a map will cause `Marshal` to panic. This is because their elements are not ordered in a consistent way, and it is imperative that this encoding scheme be deterministic. To encode a map, either convert it to a slice of structs, or define a `MarshalSia` method (see below). Arrays and structs are simply the concatenation of their encoded elements. Byte slices are not subject to the 8-byte integer rule; they are encoded as their literal representation, one byte per byte. All struct fields must be exported. (For some types this is a bit awkward, so this rule is subject to change.) The ordering of struct fields is determined by their type definition. For example: ```go type foo struct { S string I int } Marshal(foo{"bar", 3}) == append(Marshal("bar"), Marshal(3)...) ``` Finally, if a type implements the SiaMarshaler interface, its MarshalSia method will be used to encode the type. Similarly, if a type implements the SiaUnmarshal interface, its UnmarshalSia method will be used to decode the type. Note that unless a type implements both interfaces, it must conform to the spec above. Otherwise, it may encode and decode itself however desired. This may be an attractive option where speed is critical, since it allows for more compact representations, and bypasses the use of reflection. Sia-1.3.0/doc/File Contract Negotiation.md000066400000000000000000000343641313565667000202220ustar00rootroot00000000000000File Contract Negotiation ========================= Securing data on Sia requires creating and revising file contracts in an untrusted environment. Managing data on Sia happens through several protocols: + Settings Request - the host sends the renter its settings. + Revision Request - the renter will send the host a file contract id, and the host will send the most recent file contract revision that it knows of for that file contract, with the signatures. A challenge and response is also performed to verify that the renter is able to create the signatures to modify the file contract revision. + File Contract Creation - no data is uploaded during the initial creation of a file contract, but funds are allocated so that the file contract can be iteratively revised in the future. + File Contract Revision - an existing file contract is revised so that data can be added to an arbitrary place, or removed from an arbitrary place. + File Contract Renewal - an existing file contract is renewed, meaning that a new file contract with a different id is created, but that has the same data. New funds are added to this file contract, and it can now be modified separately from the previous contract. + Data Request - data is requested from the host by hash. + (planned for later) Storage Proof Request - the renter requests that the host perform an out-of-band storage proof. + (planned for later) Metadata Request - the renter requests some metadata about the file contract from the host, namely the list of hashes that compose the file. This list of hashes is provided along with a cryptographic proof that the hashes are valid. The proof is only needed if only a subset of hashes are being sent. A frequently seen construction is 'acceptance'. The renter or host may have the opportunity to accept or reject a communication, which takes the form of a string. The acceptance string is always the same, and any string that is not the acceptance string is a rejection. The rejection string can include reasons why the rejection occurred, but most not exceed 255 bytes. After a rejection, the connection is always closed. The protocols described below are numbered. The number indicates when the communicator is switching. Each pair of numbers is a full round trip of communications. All communications attempt to support slow connections and Tor connections. Any connection with a throughput below 100kbps may struggle to perform the uploads and downloads, and any connection with a rountrip latency greater than 2 minutes may struggle to complete the protocols. Settings Request ---------------- The host signs the settings request to prove that the connection has opened to the right party. Hosts announce on the blockchain and perform burn, therefore identity is important. 1. The renter makes an RPC to the host, opening a connection. The connection deadline should be at least 120 seconds. 2. The host sends the renter the most recent copy of its external settings, signed by the host public key. The connection is then closed. Revision Request ---------------- The renter requests a recent revision from the host. Often, this request precedes modifications. A file contract can only be open for revision with one party at a time. To prevent DoS attacks, the party must authenticate here by performing a challenge-response protocol during the revision request. Putting this challenge-response requirement in the revision-request can help improve privacy, though the host is under no cryptographic or incentive-based obligation to preserve the privacy of the revision. 1. The renter makes an RPC to the host, opening a connection. The connection deadline should be at least 120 seconds. The renter sends the file contract id for the revision being requested. 2. The host writes 32 bytes of random data that the renter must sign for the renter key in the corresponding file contract. 3. The renter returns the signed challenge. 4. The host verifies the signature from the renter and then sends the renter the most recent file contract revision, along with the transaction signatures from both the renter and the host. The connection is then closed. File Contract Creation ---------------------- A few decisions were made regarding the file contract protocol. The first is that the renter should not sign the file contract until the host has formally accepted the file contract. The second is that the host should be the last one to sign the file contract, as the renter is the party with the strong reputation system. Instead of sending a whole transaction each time, the transaction is sent piecemeal, and only the new parts at each step are sent to the other party. This minimizes the surface area of data for a malicious party to manipulate, which means less verification code, which means less chances of having a bug in the verification code. The renter pays for the siafund fee on the host's collateral and contract fee. If a renter opens a file contract and then never uses it, the host does not lose money. This does put the renter at risk, as they may open up a file contract and then watch the host leave, but the renter is spreading the risk over communications with many hosts, and has a reputation system that will help ensure that the renter is only dealing with upstanding hosts. 1. The renter makes an RPC to the host, opening a connection. The connection deadline should be at least 360 seconds. 2. The host sends the renter the most recent copy of its settings, signed. If the host is not accepting new file contracts, the connection is closed. 3. The renter sends a notice of acceptance or rejection. If the renter accepts, the renter then sends a funded file contract transaction without a signature, followed by the public key that will be used to create the renter's portion of the UnlockConditions for the file contract. 4. The host sends an acceptance or rejection of the file contract. If the host accepts, the host will add collateral to the file contract, and will send the renter the inputs + outputs for the collateral, followed by any new parent transactions. The length of any of these may be zero. 5. The renter indicates acceptance or rejection of the file contract. If the renter accepts, the renter will sign the file contract and send the transaction signatures to the host. The renter will also send a signature for a no-op file contract revision that follows the file contract. 6. The host may only reject the file contract in the event that the renter has sent invalid signatures, so the acceptance step is skipped. The host signs the file contract and sends the transaction signatures to the renter, and the host creates and sends a signature for the no-op revision that follows the file contract. The connection is closed. File Contract Revision ---------------------- 1. The renter makes an RPC to the host, opening a connection. The minimum deadline for the connection is 600 seconds. The renter then sends a file contract ID, indicating the file contract that is getting revised during the RPC. 2. The host will respond with a 32 byte challenge - a random 32 bytes that the renter will need to sign. 3. The renter will sign the challenge with the renter key that protects the file contract. This is to prove that the renter has access to the file contract. 4. The host will verify the challenge signature, then send an acceptance or rejection. If accetped, the host will send the most recent file contract revision for the file contract along with the transaction signagtures that validate the revision. The host will lock the file contract, meaning no other changes can be made to the revision file contract until this connection has closed. A loop begins. The host sends the most recent revision of the host settings to the renter, signed. The settings are sent after each iteration of the loop to enable high resolution dynamic pricing for the host, especially for bandwidth. 6. The renter may reject or accept the settings + revision. A specific rejection message will gracefully terminate the loop here. The renter will send an unsigned file contract revision followed by a batch of modification actions which the revision pays for. Batching allows the renter to send a lot of data in a single, one-way connection, improving throughput. The renter will send a number indicating how many modifications will be made in a batch, and then sends each modification in order. A single modification can either be an insert, a modify, or a delete. An insert is an index, indicating the index where the data is going to be inserted. '0' indicates that the data is inserted at the very beginning, '1' indicates that the data will be inserted between the first and second existing sectors, etc. The index is followed by the 4MB of data. A modify is an index indicating which sector is being modified, followed by an offset indicating which data within the sector is being modified. Finally, some data is provided indicating what the data in the sector should be replaced with starting from that offset. The offset + len of the data should not exceed the sector size of 4MB. A delete is an index indicating the index of the sector that is being deleted. Each operation within a batch is atomic, meaning if you are inserting 3 sectors at the front of the file contract, the indexes of each should be '0', '1', '2'. 7. The host indicates either acceptance or rejection of the new revision. 8. The renter signs the revision and sends the signature to the host. 9. The host signs the revision and sends the signature to the renter. Both parties submit the new revision to the transaction pool. The connection deadline is reset to 600 seconds (unless the maximum deadline has been reached), and the loop restarts. File Contract Renewal --------------------- 1. The renter makes an RPC to the host, opening a connection. The minimum deadline for the connection is 600 seconds. The renter then sends a file contract ID, indicating the file contract that is getting revised during the RPC. 2. The host will respond with a 32 byte challenge - a random 32 bytes that the renter will need to sign. 3. The renter will sign the challenge with the renter key that protects the file contract. This is to prove that the renter has access to the file contract. 4. The host will verify the challenge signature, then send an acceptance or rejection. If accetped, the host will send the most recent file contract revision for the file contract along with the transaction signagtures that validate the revision. The host will lock the file contract, meaning no other changes can be made to the revision file contract until this connection has closed. The host sends the most recent revision of the host settings to the renter, signed. If the host is not accepting new file contracts, the connection is closed. 5. The renter either accepts or rejects the settings. If accepted, the renter sends a funded, unsigned file contract to the host, containing the same Merkle root as the previous file contract, and also containing a renewed payout with conditional payments to the host to cover the host storing the data for the extended duration. 6. The host will accept or reject the renewed file contract. If accepted, the host will add collateral (and miner fees if desired) and send the inputs + outputs for the collateral, along with any new parent transactions. The length of any of these may be zero. 7. The renter will accept or reject the host's additions. If accepting, the renter will send signatures for the transaction to the host. The renter will also send a signature for a no-op file contract revision that follows the file contract. 8. The host may only reject the file contract in the event that the renter has sent invalid signatures, so the acceptance step is skipped. The host signs the file contract and sends the transaction signatures to the renter, and the host creates and sends a signature for the no-op revision that follows the file contract. The connection is closed. Data Request ------------ 1. The renter makes an RPC to the host, opening a connection. The connection deadline is at least 600 seconds. The renter will send a file contract id corresponding to the file contract that will be used to pay for the download. 2. The host will respond with a 32 byte challenge - a random 32 bytes that the renter will need to sign. 3. The renter will sign the challenge with the public key that protects the file contract being used to pay for the download. This proves that the renter has access to the payment. 4. The host will verify the challenge signature, and then send an acceptance or rejection. If accepted, the host will send the most recent file contract revision followed by the signautres that validate the revision. The host will lock the file contract, preventing other connections from making changes to the underlying storage obligation. A loop begins. The host sends the most recent external settings to the renter, signed. The settings are sent each iteration to provide high resolution dynamic bandwidth pricing. 5. The host will send the renter the most recent file contract revision, along with the signatures that validate the revision. A loop begins, which will allow the renter to download multiple batches of data from the same connection. The host will send the host settings, and the most recent file contract revision transaction. If there is no revision yet, the host will send a blank transaction. The host is expected to always have the most recent revision (the host signs last), the renter may not have the most recent revision. 6. The renter will accept or reject the host's settings. If accepting, the renter will send a file contract revision, unsigned, to pay for the download request. The renter will then send the download request itself. 7. The host will either accept or reject the revision. 8. The renter will send a signature for the file contract revision. 9. The host sends a signature for the file contract revision, followed by the data that was requested by the download request. The loop starts over, and the connection deadline is reset to a minimum of 600 seconds. Sia-1.3.0/doc/Guide to Contributing to Sia.md000066400000000000000000000220001313565667000205140ustar00rootroot00000000000000# Contributing to Sia #### Table of Contents * [Get started with Go](#go) * [Install Go](#install-go) * [Learn Go]("learn-go") * [Build Sia](#build-sia) * [Contribute to the codebase](#contribute) * [Set up git](#setup-git) * [Fork the Sia repository](#fork) * [Write some code](#write) * [Submit your code for review](#pull) * [More git resources](#git-resources) * [Where to start](#where-to-start) * [Contact us](#contact) ## Get started with Go ### Install Go To install Go on your computer, follow the [official installation guide][install-go]. You should install the latest [official Go binary][binary] for your system (if not available, [install from source][source]). If you plan to cross compile Sia, see [Cross Compilation with Go 1.5][cross] by Dave Cheney. Now make a workspace directory in which you will store source code and dependencies. You can choose any filepath except where you installed Go (don't choose `/usr/local`). ```bash # make a working directory called golang in your home directory $ mkdir $HOME/golang # store base path in an environmental variable $ echo 'export GOPATH=$HOME/golang' >> $HOME/.profile # add bin subdirectory to PATH environmental variable $ echo 'export PATH=$PATH:$GOPATH/bin' >> $HOME/.profile ``` ### Learn Go * To get familiar with the language, start with the official [Tour of Go][tour]. * Move onto [How to Write Go Code][how] to learn how to organize Go packages and use the go tool. * Finish with the [Effective Go][effective] guide. ## Build Sia To build Sia on your machine, enter the following on the command line: ```bash # Download Sia and its dependencies # Binaries will be installed in $GOPATH/bin $ go get -u github.com/NebulousLabs/Sia/... # Switch to directory containing Sia source code $ cd $GOPATH/src/github.com/NebulousLabs/Sia # You have three Sia builds to choose from. # To build the standard release binary: $ make release-std # Or to build the release binary with race detection and an array debugging # asserts: $ make release # Or to build the developer binary (with a different genesis block, faster # block times, and other changes): $ make ``` ## Contribute to the codebase ### Set up git Install git on your machine according to [these instructions][install-git] in the Pro Git book. You will first need to set up global settings using the command line. ```bash $ git config --global user.name "Your Name" $ git config --global user.email you@somedomain.com # Tell git to remember your login information for a certain amount of time. # Default time is 15 minutes: $ git config --global credential.helper cache # Or you can choose a different amount of time: $ git config --global credential.helper "cache --timeout=[seconds]" ``` ### Fork the Sia repository While logged into your Github account, navigate to the [Sia repository][sia] and click the 'Fork' button in the upper righthand corner. Your account now has a 'forked' copy of the original repo at `https://github.com//Sia`. When you installed Sia using `go get`, the go tool put the Sia source code in $GOPATH/src/github.com/NebulousLabs/Sia. Change to that directory and set up your fork as a git [remote][remote]: ```bash $ cd $GOPATH/src/github.com/NebulousLabs/Sia # Add your fork as a remote. Name it whatever is convenient, # e.g your GitHub username $ git remote add https://github.com//Sia.git ``` ### Write some code Right now your git local repository only has one branch (called 'master' by default). If you want to make changes, add a new branch and make your changes there. You should maintain master as an up-to-date copy of the NebulousLabs/Sia repository's master branch. To create and checkout a new branch: ```bash # If you're not already in the right directory: $ cd $GOPATH/src/NebulousLabs/Sia # Make sure you're on branch master $ git checkout master # Create and checkout a new branch $ git checkout -b ``` Now write some code while the new branch is checked out. Only implement one logical change per branch. If you're working on several things at once, make multiple branches. To switch between branches you're working on, you have to stash the changes in the branch you're switching from by running `git stash`, which tucks away all changes since the last commit. ```bash # Stash changes to current branch. $ git stash # Checkout other branch. $ git checkout ... # Make changes ... # Return to first branch: $ git checkout # View a list of stashes and their corresponding hashes. $ git stash list # Reapply changes from the stash you want to recover and remove that stash from. # the list $ git stash pop ``` To learn more about branching, see [Using the Fork-and-Branch Git Workflow][branch] and [Pro Git - Branches in a Nutshell][nutshell]. For more on stashing, see [Pro Git - Stashing and Cleaning][stashing]. Be sure to follow the conventions detailed in [docs/Developers.md][developers.md]. We will reject pull requests that do not satisfy these best practices. Once you've finished making changes, stage and commit your changes then update your fork on Github: ```bash # Make sure the code is up to date with the original repo: $ git checkout master $ git pull origin master # Checkout branch with changes. $ git checkout $ git rebase master # Before every pull request, you should run `make test-long` # to test your code and fix formatting and style problems. $ make test-long # If all goes well, proceed to staging your changed files: $ git add # Use `git status` to see what files have been staged. $ git status # Commit your changes. If you just run `commit`, a text editor will pop up for # you to enter a description of your changes. $ git commit -m "Add new tests for CommitSync method" # Push the changes to your fork on GitHub, which you should have set up as a # remote already. $ git push ``` ### Submit your code for review Once you've tested your new code and pushed changes to your fork, navigate to your fork at `https://github.com//Sia` in your browser. Switch to the branch you've made changes on by selecting it from the list on the upper left. Then click 'New pull request' on the upper right. Once you have made the pull request, we will review your code. We will reject code that is unsafe, difficult to read, or otherwise violates the conventions outlined in [docs/Developers.md][developers.md]. Here's a sample code review comment: ![Screenshot](assets/codereview.png) If you want to tweak code for which you've already submitted a pull request, push the updated code to your fork with `git push -f ` and summarize the changes you've made in a comment on the pull request page on GitHub. Once we have accepted your changes and merged them into the original repo, you have some cleanup to do: ```bash # Update local master branch to reflect changes in origin (the original # repo). $ git pull origin master # Delete the branch you made the pull request from. $ git branch -d # Delete the remote branch on your fork. $ git push : # Update your fork. $ git push master ``` ### More Git resources * [How to into git (and Github)][luke] by Luke Champine * [Official resources for learning Git][git] ## Where to start If you'd like to contribute to Sia but don't have any specific ideas, writing tests is a good way to get your feet wet. See [doc/Running and Writing Tests for Sia.md](Running%20and%20Writing%20Tests%20for%20Sia.md) to get started. ## Contact us Feel free to ask for help on the #dev channel on [Slack][slack]. [cross]: http://dave.cheney.net/2015/08/22/cross-compilation-with-go-1-5 [binary]: https://golang.org/dl/ [source]: https://golang.org/doc/install/source [tour]: https://tour.golang.org/welcome/1 [how]: https://golang.org/doc/code.html [luke]: https://gist.github.com/lukechampine/6418449 [git]: https://git-scm.com/doc [cheney]: http://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go [install-go]: https://golang.org/doc/install [signup]: https://github.com/join?source=header-home [effective]: https://golang.org/doc/effective_go.html [sia]: https://github.com/NebulousLabs/Sia [branch]: http://blog.scottlowe.org/2015/01/27/using-fork-branch-git-workflow/ [developers.md]: https://github.com/NebulousLabs/Sia/blob/master/doc/Developers.md [gofmt]: https://golang.org/cmd/gofmt/ [nutshell]: https://git-scm.com/book/en/v2/Git-Branching-Branches-in-a-Nutshell [slack]: http://slackin.siacoin.com [install-git]: https://git-scm.com/book/en/v2/Getting-Started-Installing-Git [test-doc]: https://github.com/NebulousLabs/Sia/blob/master/doc/Testing.md [stashing]: https://git-scm.com/book/en/v2/Git-Tools-Stashing-and-Cleaning [remote]: https://git-scm.com/book/en/v2/Git-Basics-Working-with-Remotes Sia-1.3.0/doc/Modules.md000066400000000000000000000037301313565667000147450ustar00rootroot00000000000000Module Conventions ================== Each module has a file/directory where they store persistent data (if necessary). When module.New is called, the module is responsible for creating and populating that directory. The logic for saving and loading data belongs in persist.go. Modules that depend on external information (such as the state of consensus) have an update.go to manage fetching and integrating the external information. If that information is coming from another module, a subscription should be used. Module subscription uses a ModuleSubscriber interface (which the subscriber must satisfy) and a ModuleSubscribe method (implemented by the parent module). As the parent module gets updates, it will call ReceiveModuleUpdate (the only method of the ModuleSubscriber interface) on all subscribers, taking care that each subscriber always receives the updates in the correct order. This method of subscription is chosen to keep information flow simple and synchronized - a child module should never have information that the parent module does not (it just causes problems). For testing, it is often important to know that an update has propagated to all modules. Any module that subscribes to another must also implement a ModuleNotify function in subscriptions.go. ModuleNotify returns a channel down which a struct{} will be sent every time that module receives an update from a parent module. To keep things simple, a module should not subscribe to the parent of another module that it is subscribed to. For example, the transaction pool is subscribed to the consensus set. Therefore, no module should subscribe to both the transaction pool and the consensus set. All consensus set updates should be received through the transaction pool. This helps with synchronization and ensures that no child module ever has information that the parent module has not yet received (desynchronization). #### Module Update Flow consensus -> (host, hostdb, renter, (transaction pool -> miner, wallet)) Sia-1.3.0/doc/RPC.md000066400000000000000000000074201313565667000137610ustar00rootroot00000000000000RPC === Sia peers communicate with each other via Remote Procedure Calls. An RPC consists of a unique ID and a pair of functions, one on the calling end and one on the receiving end. After the ID is written/received, both peers hand the connection off to their respective functions. Typically, the calling end writes an object to the receiver and then reads a response. RPC IDs are always 8 bytes and contain a human-readable name for the RPC. If the name is shorter than 8 bytes, the remainder is padded with zeros. If the name is longer than 8 bytes, it is truncated. ### Call Listing Unless otherwise specified, these calls follow a request/response pattern and use the [encoding](./Encoding.md) package to serialize data. **All data received via RPC should be considered untrusted and potentially malicious.** #### ShareNodes ShareNodes requests peer addresses from a peer. The gateway calls this RPC regularly to update its list of potential peers. ID: `"ShareNod"` Request: None Response: ```go []modules.NetAddress ``` Recommendations: + Requesting peers should limit the request to 3000 bytes. + Responding peers should send no more than 10 peers, and should not send peers that are unlikely to be reachable. #### SendBlocks SendBlocks requests blocks from a peer. The blocks are added to the requesting peer's blockchain, and optionally rebroadcast to other peers. Unlike most RPCs, the SendBlocks call is a loop of requests and responses that continues until the responding peer has no more blocks to send. ID: `"SendBloc"` Request: ```go // Exponentially-spaced IDs of most-recently-seen blocks, // ordered from most recent to least recent. // Less than 32 elements may be present, but the last element // (index 31) is always the ID of the genesis block. [32]types.BlockID ``` Response: ```go struct { // sequential list of blocks, beginning with the first // block in the main chain not seen by the requesting peer. blocks []types.Block // true if the responding peer can send more blocks more bool } ``` Recommendations: + Requesting peers should limit the request to 20MB. + Responding peers should identify the most recent BlockID that is in their blockchain, and send up to 10 blocks following that block. + Responding peers should set `more = true` if they have not sent the most recent block in their chain. #### RelayHeader RelayHeader sends a block header ID to a peer, with the expectation that the peer will relay the ID to its own peers. ID: `"RelayHea"` Request: ```go types.BlockHeader ``` Response: None Recommendation: + Requesting (sending) peers should call this RPC on all of their peers as soon as they mine or receive a block via `SendBlocks` or `SendBlk`. + Responding (receiving) peers should use the `SendBlk` RPC to download the actual block content. If the block is an orphan, `SendBlocks` should be used to discover the block's parent(s). + Responding peers should not rebroadcast the received ID until they have downloaded and verified the actual block. #### SendBlk SendBlk requests a block's contents from a peer, given the block's ID. ID: `"SendBlk\0"` Request: ```go types.BlockID ``` Response: ```go types.Block ``` + Requesting peers should limit the received block to 2 MB (the maximum block size). + Requesting peers should broadcast the block's ID using `RelayHeader` once the received block has been verified. + Responding peers may simply close the connection if the block ID does not match a known block. #### RelayTransactionSet RelayTransactionSet sends a transaction set to a peer. ID: `"RelayTra"` Request: ```go []types.Transaction ``` Response: None Recommendations: + Requesting peers should limit the request to 2 MB (the maximum block size). + Responding peers should broadcast the received transaction set once it has been verified. Sia-1.3.0/doc/Running and Writing Tests for Sia.md000066400000000000000000000227701313565667000214400ustar00rootroot00000000000000# Running and Writing Tests for Sia Improving test coverage is a great way to start contributing to Sia. This guide focuses on how to write tests. To learn about making pull requests to submit the code you've written, see [doc/Guide to Contributing to Sia.md][guide]. You should also read [doc/Developers.md][developers] to learn about Sia code conventions and quality standards. #### Table of Contents * [Running tests for Sia](#existing) * [Updating code before testing](#update) * [Testing the entire build](#entire) * [Testing a particular package](#particular) * [Writing new tests for Sia](#write) * [A few guidelines](#naming) * [Basic test format](#basic) * [Table-driven tests](#table) * [Questions?](#questions) ## Running tests for Sia Go's comprehensive [test package][pkg/testing] makes testing straightforward, particularly when you use the bundled tools included in the [Sia makefile][makefile], including `make test`, `make cover`, `make bench`, and their variants. ### Updating code before testing If you just want to run existing tests on the codebase as is, you just need to pull the latest version of the original repo to your master branch. (If that sentence didn't make sense, go read [doc/Guide to Contributing to Sia.md][guide].) ```bash # Make sure you are in the right directory. $ cd $GOPATH/src/github.com//Sia # Also make sure you're working with the right branch. $ git checkout master # Pull latest changes from origin, the original Sia repo. $ git pull origin master # Update your fork of the repo, which should be set up as a remote. $ git push master ``` If you want to run tests on the new code you've added, first make sure the rest of the code is up to date. New code should be on its own branch (again, see [doc/Guide to Contributing to Sia.md][guide]). ```bash # Make sure you are in the right directory. $ cd $GOPATH/src/github.com//Sia # Checkout the branch you made the changes on. $ git checkout # Stash any tracked but uncommitted changes. $ git stash # Then switch back to `master` and update it to match the original repo. $ git checkout master $ git pull origin master # Update your fork of the repo, which you should have set up as a remote. $ git push master # Make the updated `master` the new base of the branch you made the changes on, # which involves reapplying all the commits made to that branch. Without the # `--ignore-date` flag, git rebase changes the date on all the commits to the # current date. $ git checkout $ git rebase master --ignore-date # Restore the changes you stashed earlier. $ git stash pop ``` When you call `rebase`, you may run into some merge conflicts. Luke Champine's ['How to into git and GitHub'][luke] has more details (and many useful tricks). Once the branch you want to test is up to date, you're ready to run some tests. ### Testing the entire build The `make test` command runs all tests (functions starting with `Test` in `_test.go` files) for each package, setting off a panic for any test that runs longer than 5s. For verbose output, run `make test-v` (which panics after 15s instead of 5s). Finally, `make test-long` has verbose output, only panics when a test takes 5 minutes, and also cleans up your code using `gofmt` and `golint`. **You should run** `make test-long` **before each pull request.** Run `make cover` to run all tests for each package and generate color-coded .html visualizations of test coverage by function for each source file. Open `cover/.html` in a browser to inspect a module's test coverage. For example, here's part of the html file generated for the persist package: ![Screenshot](assets/covertool.png) Meanwhile, `make bench` will call `gofmt` on all packages, then run all benchmarks (functions starting with `Benchmark` in `_test.go` files). ### Testing a particular package or function To run tests for just a certain package, run `make test pkgs=./`. To run a certain test function, run `make test pkgs=./ run=`. The same goes for `make test-long`, `make cover` and `make bench`. For example, running `test-long` on the package persist produces this output: ```bash $ make test-long pkgs=./persist rm -rf release doc/whitepaper.aux doc/whitepaper.log doc/whitepaper.pdf gofmt -s -l -w ./persist go install ./persist go vet ./persist go test -v -race -tags='testing debug' -timeout=300s ./persist -run=Test === RUN TestOpenDatabase --- PASS: TestOpenDatabase (0.42s) === RUN TestSaveLoad --- PASS: TestSaveLoad (0.00s) === RUN TestSaveLoadFile --- PASS: TestSaveLoadFile (0.01s) === RUN TestSaveLoadFileSync --- PASS: TestSaveLoadFileSync (0.00s) === RUN TestLogger --- PASS: TestLogger (0.00s) === RUN TestLoggerCritical --- PASS: TestLoggerCritical (0.00s) === RUN TestIntegrationRandomSuffix --- PASS: TestIntegrationRandomSuffix (0.01s) === RUN TestAbsolutePathSafeFile --- PASS: TestAbsolutePathSafeFile (0.00s) === RUN TestRelativePathSafeFile --- PASS: TestRelativePathSafeFile (0.00s) PASS ok github.com/NebulousLabs/Sia/persist 1.485s $ ``` ## Writing new tests for Sia When you run `make cover`, you'll notice that many files have pretty low coverage. We're working on fixing that, but we could use your help. ### A few guidelines * The test functions for `filename.go` should go in `filename_test.go` in the same directory and package. * A test function name should start with `Test` and clearly convey what is being tested. * You should declare function-specific variables and constants locally (inside the test function) instead of globally (outside the test function). [That holds in general][global], not just for tests. * As always, code should adhere to the standards and conventions laid out in [doc/Developers.md][developers]. ### Basic test format Suppose we'd like to test the Bar method belonging to type Foo. ```go // TestFoo checks that the Bar method on type Foo responds correctly to a normal // input and returns the expected error when given a bad input. func TestFoo(t *testing.T) { foo, err := NewFoo() if err != nil { // If NewFoo failed, we can't continue testing. t.Fatal(err) } // Try a normal input; should succeed. err := foo.Bar(3) if err != nil { // Report the error, but don't abort the test. t.Error(err) } // Try a bad input; should return an error. // NOTE: Always prefer to compare to a specific error, rather than // err == nil err = Foo.Bar(0) if err != errDivideByZero { t.Errorf("expected errDivideByZero, got %v", err) } } ``` ### Table-driven tests in Go If you're looking to test a bunch of inputs, write a [table-driven test][table] with a slice of anonymous structs. For example, see `TestParseFileSize` in [siac/parse_test.go][parse_test]: ```go func TestParseFilesize(t *testing.T) { // Define a table of test cases in the form of a slice of anonymous structs. tests := []struct { in, out string err error }{ {"1b", "1", nil}, {"1KB", "1000", nil}, {"1MB", "1000000", nil}, {"1GB", "1000000000", nil}, {"1TB", "1000000000000", nil}, {"1KiB", "1024", nil}, {"1MiB", "1048576", nil}, {"1GiB", "1073741824", nil}, {"1TiB", "1099511627776", nil}, {"", "", errUnableToParseSize}, {"123", "123", nil}, {"123TB", "123000000000000", nil}, {"123GiB", "132070244352", nil}, {"123BiB", "", errUnableToParseSize}, {"GB", "", errUnableToParseSize}, {"123G", "", errUnableToParseSize}, {"123B99", "", errUnableToParseSize}, {"12A3456", "", errUnableToParseSize}, {"1.23KB", "1230", nil}, {"1.234KB", "1234", nil}, {"1.2345KB", "1234", nil}, } // Loop through the table of test cases to make sure ParseFileSize returns // the expected output and error for each. for _, test := range tests { res, err := parseFilesize(test.in) if res != test.out || err != test.err { t.Errorf("parseFilesize(%v): expected %v %v, got %v %v", test.in, test.out, test.err, res, err) } } } ``` ## Questions? Read these if you haven't already: * [doc/Guide to Contributing to Sia.md][guide]: getting started with Go, Sia, and git * [doc/Developers.md][developers]: conventions and quality standards for Sia code Some other useful resources, some of which have been linked to already: * [Golang.org page on the go testing package][pkg/testing] * [Writing Table-Driven Tests in Go][table] * [How to Write Benchmarks in Go][cheney-benchmarks] * [How to into git and GitHub][luke]: an essential introduction to git And feel free to ask questions on the [#dev channel][slack] on the Sia Slack. Odds are, someone else is wondering the same thing. [pkg/testing]: https://golang.org/pkg/testing/ [makefile]: https://github.com/NebulousLabs/Sia/blob/master/Makefile [luke]: https://gist.github.com/lukechampine/6418449 [guide]: https://github.com/NebulousLabs/Sia/blob/master/doc/Guide%20to%20Contributing%20to%20Sia.md [developers]: https://github.com/NebulousLabs/Sia/blob/master/doc/Developers.md [table]: http://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go [boltdb_test.go]: https://github.com/NebulousLabs/Sia/blob/master/persist/boltdb_test.go [cheney-benchmarks]: http://dave.cheney.net/2013/06/30/how-to-write-benchmarks-in-go [pkg/testing]: https://golang.org/pkg/testing/ [slack]: https://siatalk.slack.com/messages/dev/ [parse_test]: https://github.com/NebulousLabs/Sia/blob/master/siac/parse_test.go [global]: http://c2.com/cgi/wiki?GlobalVariablesAreBad Sia-1.3.0/doc/Standard.md000066400000000000000000000051161313565667000150750ustar00rootroot00000000000000Standard Transaction Rules ========================== Some transactions will not be accepted by miners unless they appear in a block. This is equivalent to the 'IsStandard' function in Bitcoin. This file dictates the rules for standard Sia transactions. Transaction Size ---------------- Consensus rules limit the size of a block, but not the size of a transaction. Standard rules however limit the size of a single transaction to 16kb. A chain of dependent transactions cannot exceed 500kb. Double Spend Rules ------------------ When two conflicting transactions are seen, the first transaction is the only one that is kept. If the blockchain reorganizes, the transaction that is kept is the transaction that was most recently in the blockchain. This is to discourage double spending, and enforce that the first transaction seen is the one that should be kept by the network. Other conflicts are thrown out. Transactions are currently included into blocks using a first-come first-serve algorithm. Eventually, transactions will be rejected if the fee does not meet a certain minimum. For the near future, there are no plans to prioritize transactions with substantially higher fees. Other mining software may take alternative approaches. File Contract Rules ------------------- File Contracts that start in less than 10 blocks time are not accepted into the transaction pool. This is because a file contract becomes invalid if it is not accepted into the blockchain by the start block, and this might result in a cascade of invalidated unconfirmed transactions, which may make it easier to launch double spend attacks on zero confirmation outputs. 10 blocks is plenty of time on the other hand for a file contract to make it into the blockchain. Signature Algorithms -------------------- Miners will reject transactions that have public keys using algorithms that the miner does not understand. Arbitrary Data Usage -------------------- Arbitrary data can be used to make verifiable announcements, or to have other protocols sit on top of Sia. The arbitrary data can also be used for soft forks, and for protocol relevant information. Any arbitrary data is allowed by consensus, but only certain arbitrary data is considered standard. Arbitrary data that is prefixed by the string 'NonSia' is always allowed. This indicates that the remaining data has no relevance to Sia protocol rules, and never will. Arbitrary data that is prefixed by the string 'HostAnnouncement' is allowed, but only if the data within accurately decodes to the HostAnnouncement struct found in modules/hostdb.go, and contains no extra information. Sia-1.3.0/doc/api/000077500000000000000000000000001313565667000135615ustar00rootroot00000000000000Sia-1.3.0/doc/api/Consensus.md000066400000000000000000000041711313565667000160660ustar00rootroot00000000000000Consensus API ============= This document contains detailed descriptions of the consensus's API routes. For an overview of the consensus' API routes, see [API.md#consensus](/doc/API.md#consensus). For an overview of all API routes, see [API.md](/doc/API.md) There may be functional API calls which are not documented. These are not guaranteed to be supported beyond the current release, and should not be used in production. Overview -------- The consensus set manages everything related to consensus and keeps the blockchain in sync with the rest of the network. The consensus set's API endpoint returns information about the state of the blockchain. Index ----- | Route | HTTP verb | | --------------------------------------------------------------------------- | --------- | | [/consensus](#consensus-get) | GET | | [/consensus/validate/transactionset](#consensusvalidatetransactionset-post) | POST | #### /consensus [GET] returns information about the consensus set, such as the current block height. ###### JSON Response ```javascript { // True if the consensus set is synced with the network, i.e. it has downloaded the entire blockchain. "synced": true, // Number of blocks preceding the current block. "height": 62248, // Hash of the current block. "currentblock": "00000000000008a84884ba827bdc868a17ba9c14011de33ff763bd95779a9cf1", // An immediate child block of this block must have a hash less than this // target for it to be valid. "target": [0,0,0,0,0,0,11,48,125,79,116,89,136,74,42,27,5,14,10,31,23,53,226,238,202,219,5,204,38,32,59,165], // The difficulty of the current block target. "difficulty": "1234" // arbitrary-precision integer } ``` #### /consensus/validate/transactionset [POST] validates a set of transactions using the current utxo set. ###### Request Body Bytes Since transactions may be large, the transaction set is supplied in the POST body, encoded in JSON format. ###### Response standard success or error response. See [#standard-responses](#standard-responses). Sia-1.3.0/doc/api/Daemon.md000066400000000000000000000072321313565667000153120ustar00rootroot00000000000000Daemon API =========== This document contains detailed descriptions of the daemon's API routes. For an overview of the daemon's API routes, see [API.md#daemon](/doc/API.md#daemon). For an overview of all API routes, see [API.md](/doc/API.md) There may be functional API calls which are not documented. These are not guaranteed to be supported beyond the current release, and should not be used in production. Overview -------- The daemon is responsible for starting and stopping the modules which make up the rest of Sia. It also provides endpoints for viewing build constants. Index ----- | Route | HTTP verb | | ----------------------------------------- | --------- | | [/daemon/constants](#daemonconstants-get) | GET | | [/daemon/stop](#daemonstop-get) | GET | | [/daemon/version](#daemonversion-get) | GET | #### /daemon/constants [GET] returns the set of constants in use. ###### JSON Response ```javascript { // Timestamp of the genesis block. "genesistimestamp": 1433600000, // Unix time // Maximum size, in bytes, of a block. Blocks larger than this will be // rejected by peers. "blocksizelimit": 2000000, // bytes // Target for how frequently new blocks should be mined. "blockfrequency": 600, // seconds per block // Farthest a block's timestamp can be in the future before the block is // rejected outright. "extremefuturethreshold": 10800, // seconds // Height of the window used to adjust the difficulty. "targetwindow": 1000, // blocks // Duration of the window used to adjust the difficulty. "mediantimestampwindow": 11, // blocks // How far in the future a block can be without being rejected. A block // further into the future will not be accepted immediately, but the daemon // will attempt to accept the block as soon as it is valid. "futurethreshold": 10800, // seconds // Total number of siafunds. "siafundcount": "10000", // Fraction of each file contract payout given to siafund holders. "siafundportion": "39/1000", // Number of children a block must have before it is considered "mature." "maturitydelay": 144, // blocks // Number of coins given to the miner of the first block. Note that elsewhere // in the API currency is typically returned in hastings and as a bignum. // This is not the case here. "initialcoinbase": 300000, // Siacoins // Minimum number of coins paid out to the miner of a block (the coinbase // decreases with each block). Note that elsewhere in the API currency is // typically returned in hastings and as a bignum. This is not the case // here. "minimumcoinbase": 30000, // Siacoins // Initial target. "roottarget": [0,0,0,0,32,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], // Initial depth. "rootdepth": [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255], // Largest allowed ratio between the old difficulty and the new difficulty. "maxadjustmentup": "5/2", // Smallest allowed ratio between the old difficulty and the new difficulty. "maxadjustmentdown": "2/5", // Number of Hastings in one siacoin. "siacoinprecision": "1000000000000000000000000" // hastings per siacoin } ``` #### /daemon/stop [GET] cleanly shuts down the daemon. May take a few seconds. ###### Response standard success or error response. See [#standard-responses](#standard-responses). #### /daemon/version [GET] returns the version of the Sia daemon currently running. ###### JSON Response ```javascript { // Version number of the running Sia Daemon. This number is visible to its // peers on the network. "version": "1.0.0" } ``` Sia-1.3.0/doc/api/Gateway.md000066400000000000000000000115361313565667000155120ustar00rootroot00000000000000Gateway API =========== This document contains detailed descriptions of the gateway's API routes. For an overview of the gateway's API routes, see [API.md#gateway](/doc/API.md#gateway). For an overview of all API routes, see [API.md](/doc/API.md) There may be functional API calls which are not documented. These are not guaranteed to be supported beyond the current release, and should not be used in production. Overview -------- The gateway maintains a peer to peer connection to the network and provides a method for calling RPCs on connected peers. The gateway's API endpoints expose methods for viewing the connected peers, manually connecting to peers, and manually disconnecting from peers. The gateway may connect or disconnect from peers on its own. Index ----- | Route | HTTP verb | Examples | | ---------------------------------------------------------------------------------- | --------- | ------------------------------------------------------- | | [/gateway](#gateway-get-example) | GET | [Gateway info](#gateway-info) | | [/gateway/connect/___:netaddress___](#gatewayconnectnetaddress-post-example) | POST | [Connecting to a peer](#connecting-to-a-peer) | | [/gateway/disconnect/___:netaddress___](#gatewaydisconnectnetaddress-post-example) | POST | [Disconnecting from a peer](#disconnecting-from-a-peer) | #### /gateway [GET] [(example)](#gateway-info) returns information about the gateway, including the list of connected peers. ###### JSON Response ```javascript { // netaddress is the network address of the gateway as seen by the rest of // the network. The address consists of the external IP address and the // port Sia is listening on. It represents a `modules.NetAddress`. "netaddress": String, // peers is an array of peers the gateway is connected to. It represents // an array of `modules.Peer`s. "peers": []{ // netaddress is the address of the peer. It represents a // `modules.NetAddress`. "netaddress": String, // version is the version number of the peer. "version": String, // inbound is true when the peer initiated the connection. This field // is exposed as outbound peers are generally trusted more than inbound // peers, as inbound peers are easily manipulated by an adversary. "inbound": Boolean, // local is true if the peer's IP address belongs to a local address // range such as 192.168.x.x or 127.x.x.x "local": Boolean } } ``` #### /gateway/connect/{netaddress} [POST] [(example)](#connecting-to-a-peer) connects the gateway to a peer. The peer is added to the node list if it is not already present. The node list is the list of all nodes the gateway knows about, but is not necessarily connected to. ###### Path Parameters ``` // netaddress is the address of the peer to connect to. It should be a // reachable ip address and port number, of the form 'IP:port'. IPV6 addresses // must be enclosed in square brackets. // // Example IPV4 address: 123.456.789.0:123 // Example IPV6 address: [123::456]:789 :netaddress ``` ###### Response standard success or error response. See [API.md#standard-responses](/doc/API.md#standard-responses). #### /gateway/disconnect/{netaddress} [POST] [(example)](#disconnecting-from-a-peer) disconnects the gateway from a peer. The peer remains in the node list. Disconnecting from a peer does not prevent the gateway from automatically connecting to the peer in the future. ###### Path Parameters ``` // netaddress is the address of the peer to connect to. It should be a // reachable ip address and port number, of the form 'IP:port'. IPV6 addresses // must be enclosed in square brackets. // // Example IPV4 address: 123.456.789.0:123 // Example IPV6 address: [123::456]:789 :netaddress ``` ###### Response standard success or error response. See [API.md#standard-responses](/doc/API.md#standard-responses). Examples -------- #### Gateway info ###### Request ``` /gateway ``` ###### Expected Response Code ``` 200 OK ``` ###### Example JSON Response ```json { "netaddress":"333.333.333.333:9981", "peers":[ { "netaddress":"222.222.222.222:9981", "version":"1.0.0", "inbound":false }, { "netaddress":"111.111.111.111:9981", "version":"0.6.0", "inbound":true } ] } ``` #### Connecting to a peer ###### Request ``` /gateway/connect/123.456.789.0:123 ``` ###### Expected Response Code ``` 204 No Content ``` #### Disconnecting from a peer ###### Request ``` /gateway/disconnect/123.456.789.0:123 ``` ###### Expected Response Code ``` 204 No Content ``` Sia-1.3.0/doc/api/Host.md000066400000000000000000000575221313565667000150330ustar00rootroot00000000000000Host API -------- This document contains detailed descriptions of the host's API routes. For an overview of the host's API routes, see [API.md#host](/doc/API.md#host). For an overview of all API routes, see [API.md](/doc/API.md) There may be functional API calls which are not documented. These are not guaranteed to be supported beyond the current release, and should not be used in production. Overview -------- The host provides storage from local disks to the network. The host negotiates file contracts with remote renters to earn money for storing other users' files. The host's endpoints expose methods for viewing and modifying host settings, announcing to the network, and managing how files are stored on disk. Index ----- | Route | HTTP verb | | ------------------------------------------------------------------------------------------ | --------- | | [/host](#host-get) | GET | | [/host](#host-post) | POST | | [/host/announce](#hostannounce-post) | POST | | [/host/estimatescore](#hostestimatescore-get) | GET | | [/host/storage](#hoststorage-get) | GET | | [/host/storage/folders/add](#hoststoragefoldersadd-post) | POST | | [/host/storage/folders/remove](#hoststoragefoldersremove-post) | POST | | [/host/storage/folders/resize](#hoststoragefoldersresize-post) | POST | | [/host/storage/sectors/delete/:___merkleroot___](#hoststoragesectorsdeletemerkleroot-post) | POST | #### /host [GET] fetches status information about the host. ###### JSON Response ```javascript { // The settings that get displayed to untrusted nodes querying the host's // status. "externalsettings": { // Whether or not the host is accepting new contracts. "acceptingcontracts": true, // The maximum size of a single download request from a renter. Each // download request has multiple round trips of communication that // exchange money. Larger batch sizes mean fewer round trips, but more // financial risk for the host - the renter can get a free batch when // downloading by refusing to provide a signature. "maxdownloadbatchsize": 17825792, // bytes // The maximum duration that a host will allow for a file contract. The // host commits to keeping files for the full duration under the threat // of facing a large penalty for losing or dropping data before the // duration is complete. The storage proof window of an incoming file // contract must end before the current height + maxduration. "maxduration": 25920, // blocks // The maximum size of a single batch of file contract revisions. The // renter can perform DoS attacks on the host by uploading a batch of // data then refusing to provide a signature to pay for the data. The // host can reduce this exposure by limiting the batch size. Larger // batch sizes allow for higher throughput as there is significant // communication overhead associated with performing a batch upload. "maxrevisebatchsize": 17825792, // bytes // The IP address or hostname (including port) that the host should be // contacted at. "netaddress": "123.456.789.0:9982", // The amount of unused storage capacity on the host in bytes. It // should be noted that the host can lie. "remainingstorage": 35000000000, // bytes // The smallest amount of data in bytes that can be uploaded or // downloaded when performing calls to the host. "sectorsize": 4194304, // bytes // The total amount of storage capacity on the host. It should be noted // that the host can lie. "totalstorage": 35000000000, // bytes // The unlock hash is the address at which the host can be paid when // forming file contracts. "unlockhash": "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789ab", // The storage proof window is the number of blocks that the host has // to get a storage proof onto the blockchain. The window size is the // minimum size of window that the host will accept in a file contract. "windowsize": 144, // blocks // The maximum amount of money that the host will put up as collateral // for storage that is contracted by the renter. "collateral": "57870370370", // hastings / byte / block // The maximum amount of collateral that the host will put into a // single file contract. "maxcollateral": "100000000000000000000000000000", // hastings // The price that a renter has to pay to create a contract with the // host. The payment is intended to cover transaction fees // for the file contract revision and the storage proof that the host // will be submitting to the blockchain. "contractprice": "30000000000000000000000000", // hastings // The price that a renter has to pay when downloading data from the // host. "downloadbandwidthprice": "250000000000000", // hastings / byte // The price that a renter has to pay to store files with the host. "storageprice": "231481481481", // hastings / byte / block // The price that a renter has to pay when uploading data to the host. "uploadbandwidthprice": "100000000000000", // hastings / byte // The revision number indicates to the renter what iteration of // settings the host is currently at. Settings are generally signed. // If the renter has multiple conflicting copies of settings from the // host, the renter can expect the one with the higher revision number // to be more recent. "revisionnumber": 0, // The version of external settings being used. This field helps // coordinate updates while preserving compatibility with older nodes. "version": "1.0.0" }, // The financial status of the host. "financialmetrics": { // Number of open file contracts. "contractcount": 2, // The amount of money that renters have given to the host to pay for // file contracts. The host is required to submit a file contract // revision and a storage proof for every file contract that gets created, // and the renter pays for the miner fees on these objects. "contractcompensation": "123", // hastings // The amount of money that renters have given to the host to pay for // file contracts which have not been confirmed yet. The potential // compensation becomes compensation after the storage proof is // submitted. "potentialcontractcompensation": "123", // hastings // The amount of storage collateral which the host has tied up in file // contracts. The host has to commit collateral to a file contract even // if there is no storage, but the locked collateral will be returned // even if the host does not submit a storage proof - the collateral is // not at risk, it is merely set aside so that it can be put at risk // later. "lockedstoragecollateral": "123", // hastings // The amount of revenue, including storage revenue and bandwidth // revenue, that has been lost due to failed file contracts and // failed storage proofs. "lostrevenue": "123", // hastings // The amount of collateral that was put up to protect data which has // been lost due to failed file contracts and missed storage proofs. "loststoragecollateral": "123", // hastings // The amount of revenue that the host stands to earn if all storage // proofs are submitted corectly and in time. "potentialstoragerevenue": "123", // hastings // The amount of money that the host has risked on file contracts. If // the host starts missing storage proofs, the host can forfeit up to // this many coins. In the event of a missed storage proof, locked // storage collateral gets returned, but risked storage collateral // does not get returned. "riskedstoragecollateral": "123", // hastings // The amount of money that the host has earned from storing data. This // money has been locked down by successful storage proofs. "storagerevenue": "123", // hastings // The amount of money that the host has spent on transaction fees when // submitting host announcements, file contract revisions, and storage // proofs. "transactionfeeexpenses": "123", // hastings // The amount of money that the host has made from renters downloading // their files. This money has been locked in by successsful storage // proofs. "downloadbandwidthrevenue": "123", // hastings // The amount of money that the host stands to make from renters that // downloaded their files. The host will only realize this revenue if // the host successfully submits storage proofs for the related file // contracts. "potentialdownloadbandwidthrevenue": "123", // hastings // The amount of money that the host stands to make from renters that // uploaded files. The host will only realize this revenue if the host // successfully submits storage proofs for the related file contracts. "potentialuploadbandwidthrevenue": "123", // hastings // The amount of money that the host has made from renters uploading // their files. This money has been locked in by successful storage // proofs. "uploadbandwidthrevenue": "123" // hastings }, // The settings of the host. Most interactions between the user and the // host occur by changing the internal settings. "internalsettings": { // When set to true, the host will accept new file contracts if the // terms are reasonable. When set to false, the host will not accept new // file contracts at all. "acceptingcontracts": true, // The maximum size of a single download request from a renter. Each // download request has multiple round trips of communication that // exchange money. Larger batch sizes mean fewer round trips, but more // financial risk for the host - the renter can get a free batch when // downloading by refusing to provide a signature. "maxdownloadbatchsize": 17825792, // bytes // The maximum duration of a file contract that the host will accept. // The storage proof window must end before the current height + // maxduration. "maxduration": 25920, // blocks // The maximum size of a single batch of file contract revisions. The // renter can perform DoS attacks on the host by uploading a batch of // data then refusing to provide a signature to pay for the data. The // host can reduce this exposure by limiting the batch size. Larger // batch sizes allow for higher throughput as there is significant // communication overhead associated with performing a batch upload. "maxrevisebatchsize": 17825792, // bytes // The IP address or hostname (including port) that the host should be // contacted at. If left blank, the host will automatically figure out // its ip address and use that. If given, the host will use the address // given. "netaddress": "123.456.789.0:9982", // The storage proof window is the number of blocks that the host has // to get a storage proof onto the blockchain. The window size is the // minimum size of window that the host will accept in a file contract. "windowsize": 144, // blocks // The maximum amount of money that the host will put up as collateral // per byte per block of storage that is contracted by the renter. "collateral": "57870370370", // hastings / byte / block // The total amount of money that the host will allocate to collateral // across all file contracts. "collateralbudget": "2000000000000000000000000000000", // hastings // The maximum amount of collateral that the host will put into a // single file contract. "maxcollateral": "100000000000000000000000000000", // hastings // The minimum price that the host will demand from a renter when // forming a contract. Typically this price is to cover transaction // fees on the file contract revision and storage proof, but can also // be used if the host has a low amount of collateral. The price is a // minimum because the host may automatically adjust the price upwards // in times of high demand. "mincontractprice": "30000000000000000000000000", // hastings // The minimum price that the host will demand from a renter when the // renter is downloading data. If the host is saturated, the host may // increase the price from the minimum. "mindownloadbandwidthprice": "250000000000000", // hastings / byte // The minimum price that the host will demand when storing data for // extended periods of time. If the host is low on space, the price of // storage may be set higher than the minimum. "minstorageprice": "231481481481", // hastings / byte / block // The minimum price that the host will demand from a renter when the // renter is uploading data. If the host is saturated, the host may // increase the price from the minimum. "minuploadbandwidthprice": "100000000000000" // hastings / byte }, // Information about the network, specifically various ways in which // renters have contacted the host. "networkmetrics": { // The number of times that a renter has attempted to download // something from the host. "downloadcalls": 0, // The number of calls that have resulted in errors. A small number of // errors are expected, but a large number of errors indicate either // buggy software or malicious network activity. Usually buggy // software. "errorcalls": 1, // The number of times that a renter has tried to form a contract with // the host. "formcontractcalls": 2, // The number of times that a renter has tried to renew a contract with // the host. "renewcalls": 3, // The number of times that the renter has tried to revise a contract // with the host. "revisecalls": 4, // The number of times that a renter has queried the host for the // host's settings. The settings include the price of bandwidth, which // is a price that can adjust every few minutes. This value is usually // very high compared to the others. "settingscalls": 5, // The number of times that a renter has attempted to use an // unrecognized call. Larger numbers typically indicate buggy software. "unrecognizedcalls": 6 }, // Information about the health of the host. // connectabilitystatus is one of "checking", "connectable", // or "not connectable", and indicates if the host can connect to // itself on its configured NetAddress. "connectabilitystatus": "checking", // workingstatus is one of "checking", "working", or "not working" // and indicates if the host is being actively used by renters. "workingstatus": "checking" } ``` #### /host [POST] configures hosting parameters. All parameters are optional; unspecified parameters will be left unchanged. ###### Query String Parameters ``` // When set to true, the host will accept new file contracts if the // terms are reasonable. When set to false, the host will not accept new // file contracts at all. acceptingcontracts // Optional, true / false // The maximum size of a single download request from a renter. Each // download request has multiple round trips of communication that // exchange money. Larger batch sizes mean fewer round trips, but more // financial risk for the host - the renter can get a free batch when // downloading by refusing to provide a signature. maxdownloadbatchsize // Optional, bytes // The maximum duration of a file contract that the host will accept. // The storage proof window must end before the current height + // maxduration. maxduration // Optional, blocks // The maximum size of a single batch of file contract revisions. The // renter can perform DoS attacks on the host by uploading a batch of // data then refusing to provide a signature to pay for the data. The // host can reduce this exposure by limiting the batch size. Larger // batch sizes allow for higher throughput as there is significant // communication overhead associated with performing a batch upload. maxrevisebatchsize // Optional, bytes // The IP address or hostname (including port) that the host should be // contacted at. If left blank, the host will automatically figure out // its ip address and use that. If given, the host will use the address // given. netaddress // Optional // The storage proof window is the number of blocks that the host has // to get a storage proof onto the blockchain. The window size is the // minimum size of window that the host will accept in a file contract. windowsize // Optional, blocks // The maximum amount of money that the host will put up as collateral // per byte per block of storage that is contracted by the renter. collateral // Optional, hastings / byte / block // The total amount of money that the host will allocate to collateral // across all file contracts. collateralbudget // Optional, hastings // The maximum amount of collateral that the host will put into a // single file contract. maxcollateral // Optional, hastings // The minimum price that the host will demand from a renter when // forming a contract. Typically this price is to cover transaction // fees on the file contract revision and storage proof, but can also // be used if the host has a low amount of collateral. The price is a // minimum because the host may automatically adjust the price upwards // in times of high demand. mincontractprice // Optional, hastings // The minimum price that the host will demand from a renter when the // renter is downloading data. If the host is saturated, the host may // increase the price from the minimum. mindownloadbandwidthprice // Optional, hastings / byte // The minimum price that the host will demand when storing data for // extended periods of time. If the host is low on space, the price of // storage may be set higher than the minimum. minstorageprice // Optional, hastings / byte / block // The minimum price that the host will demand from a renter when the // renter is uploading data. If the host is saturated, the host may // increase the price from the minimum. minuploadbandwidthprice // Optional, hastings / byte ``` ###### Response standard success or error response. See [#standard-responses](#standard-responses). #### /host/announce [POST] Announce the host to the network as a source of storage. Generally only needs to be called once. ###### Query String Parameters ``` // The address to be announced. If no address is provided, the automatically // discovered address will be used instead. netaddress string // Optional ``` ###### Response standard success or error response. See [#standard-responses](#standard-responses). #### /host/storage [GET] gets a list of folders tracked by the host's storage manager. ###### JSON Response ```javascript { "folders": [ { // Absolute path to the storage folder on the local filesystem. "path": "/home/foo/bar", // Maximum capacity of the storage folder. The host will not store more // than this many bytes in the folder. This capacity is not checked // against the drive's remaining capacity. Therefore, you must manually // ensure the disk has sufficient capacity for the folder at all times. // Otherwise you risk losing renter's data and failing storage proofs. "capacity": 50000000000, // bytes // Unused capacity of the storage folder. "capacityremaining": 100000, // bytes // Number of failed disk read & write operations. A large number of // failed reads or writes indicates a problem with the filesystem or // drive's hardware. "failedreads": 0, "failedwrites": 1, // Number of successful read & write operations. "successfulreads": 2, "successfulwrites": 3 } ] } ``` #### /host/storage/folders/add [POST] adds a storage folder to the manager. The manager may not check that there is enough space available on-disk to support as much storage as requested ###### Query String Parameters ``` // Local path on disk to the storage folder to add. path // Required // Initial capacity of the storage folder. This value isn't validated so it is // possible to set the capacity of the storage folder greater than the capacity // of the disk. Do not do this. size // bytes, Required ``` ###### Response standard success or error response. See [#standard-responses](#standard-responses). #### /host/storage/folders/remove [POST] remove a storage folder from the manager. All storage on the folder will be moved to other storage folders, meaning that no data will be lost. If the manager is unable to save data, an error will be returned and the operation will be stopped. ###### Query String Parameters ``` // Local path on disk to the storage folder to remove. path // Required // If `force` is true, the storage folder will be removed even if the data in // the storage folder cannot be moved to other storage folders, typically // because they don't have sufficient capacity. If `force` is true and the data // cannot be moved, data will be lost. force // bool, Optional, default is false ``` ###### Response standard success or error response. See [#standard-responses](#standard-responses). #### /host/storage/folders/resize [POST] grows or shrink a storage folder in the manager. The manager may not check that there is enough space on-disk to support growing the storage folder, but should gracefully handle running out of space unexpectedly. When shrinking a storage folder, any data in the folder that needs to be moved will be placed into other storage folders, meaning that no data will be lost. If the manager is unable to migrate the data, an error will be returned and the operation will be stopped. ###### Query String Parameters ``` // Local path on disk to the storage folder to resize. path // Required // Desired new size of the storage folder. This will be the new capacity of the // storage folder. newsize // bytes, Required ``` ###### Response standard success or error response. See [#standard-responses](#standard-responses). #### /host/storage/sectors/delete/___*merkleroot___ [POST] deletes a sector, meaning that the manager will be unable to upload that sector and be unable to provide a storage proof on that sector. This endpoint is for removing the data entirely, and will remove instances of the sector appearing at all heights. The primary purpose is to comply with legal requests to remove data. ###### Path Parameters ``` // Merkleroot of the sector to delete. :merkleroot ``` ###### Response standard success or error response. See [#standard-responses](#standard-responses). #### /host/estimatescore [GET] returns the estimated HostDB score of the host using its current settings, combined with the provided settings. ###### JSON Response ```javascript { // estimatedscore is the estimated HostDB score of the host given the // settings passed to estimatescore. "estimatedscore": "123456786786786786786786786742133", // conversionrate is the likelyhood given the settings passed to // estimatescore that the host will be selected by renters forming contracts. "conversionrate": 95 } ``` ###### Query String Parameters ``` acceptingcontracts // Optional, true / false maxdownloadbatchsize // Optional, bytes maxduration // Optional, blocks maxrevisebatchsize // Optional, bytes netaddress // Optional windowsize // Optional, blocks collateral // Optional, hastings / byte / block collateralbudget // Optional, hastings maxcollateral // Optional, hastings mincontractprice // Optional, hastings mindownloadbandwidthprice // Optional, hastings / byte minstorageprice // Optional, hastings / byte / block minuploadbandwidthprice // Optional, hastings / byte ``` Sia-1.3.0/doc/api/HostDB.md000066400000000000000000000415151313565667000152340ustar00rootroot00000000000000Host DB API =========== This document contains detailed descriptions of the hostdb's API routes. For an overview of the hostdb's API routes, see [API.md#host-db](/doc/API.md#host-db). For an overview of all API routes, see [API.md](/doc/API.md) There may be functional API calls which are not documented. These are not guaranteed to be supported beyond the current release, and should not be used in production. Overview -------- The hostdb maintains a database of all hosts known to the network. The database identifies hosts by their public key and keeps track of metrics such as price. Index ----- | Request | HTTP Verb | Examples | | ------------------------------------------------------- | --------- | ----------------------------- | | [/hostdb/active](#hostdbactive-get-example) | GET | [Active hosts](#active-hosts) | | [/hostdb/all](#hostdball-get-example) | GET | [All hosts](#all-hosts) | | [/hostdb/hosts/___:pubkey___](#hostdbhosts-get-example) | GET | [Hosts](#hosts) | #### /hostdb/active [GET] [(example)](#active-hosts) lists all of the active hosts known to the renter, sorted by preference. ###### Query String Parameters ``` // Number of hosts to return. The actual number of hosts returned may be less // if there are insufficient active hosts. Optional, the default is all active // hosts. numhosts ``` ###### JSON Response ```javascript { "hosts": [ { // true if the host is accepting new contracts. "acceptingcontracts": true, // Maximum number of bytes that the host will allow to be requested by a // single download request. "maxdownloadbatchsize": 17825792, // Maximum duration in blocks that a host will allow for a file contract. // The host commits to keeping files for the full duration under the // threat of facing a large penalty for losing or dropping data before // the duration is complete. The storage proof window of an incoming file // contract must end before the current height + maxduration. // // There is a block approximately every 10 minutes. // e.g. 1 day = 144 blocks "maxduration": 25920, // Maximum size in bytes of a single batch of file contract // revisions. Larger batch sizes allow for higher throughput as there is // significant communication overhead associated with performing a batch // upload. "maxrevisebatchsize": 17825792, // Remote address of the host. It can be an IPv4, IPv6, or hostname, // along with the port. IPv6 addresses are enclosed in square brackets. "netaddress": "123.456.789.0:9982", // Unused storage capacity the host claims it has, in bytes. "remainingstorage": 35000000000, // Smallest amount of data in bytes that can be uploaded or downloaded to // or from the host. "sectorsize": 4194304, // Total amount of storage capacity the host claims it has, in bytes. "totalstorage": 35000000000, // Address at which the host can be paid when forming file contracts. "unlockhash": "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789ab", // A storage proof window is the number of blocks that the host has to // get a storage proof onto the blockchain. The window size is the // minimum size of window that the host will accept in a file contract. "windowsize": 144, // Public key used to identify and verify hosts. "publickey": { // Algorithm used for signing and verification. Typically "ed25519". "algorithm": "ed25519", // Key used to verify signed host messages. "key": "RW50cm9weSBpc24ndCB3aGF0IGl0IHVzZWQgdG8gYmU=" } } ] } ``` #### /hostdb/all [GET] [(example)](#all-hosts) lists all of the hosts known to the renter. Hosts are not guaranteed to be in any particular order, and the order may change in subsequent calls. ###### JSON Response ```javascript { "hosts": [ { // true if the host is accepting new contracts. "acceptingcontracts": true, // Maximum number of bytes that the host will allow to be requested by a // single download request. "maxdownloadbatchsize": 17825792, // Maximum duration in blocks that a host will allow for a file contract. // The host commits to keeping files for the full duration under the // threat of facing a large penalty for losing or dropping data before // the duration is complete. The storage proof window of an incoming file // contract must end before the current height + maxduration. // // There is a block approximately every 10 minutes. // e.g. 1 day = 144 blocks "maxduration": 25920, // Maximum size in bytes of a single batch of file contract // revisions. Larger batch sizes allow for higher throughput as there is // significant communication overhead associated with performing a batch // upload. "maxrevisebatchsize": 17825792, // Remote address of the host. It can be an IPv4, IPv6, or hostname, // along with the port. IPv6 addresses are enclosed in square brackets. "netaddress": "123.456.789.0:9982", // Unused storage capacity the host claims it has, in bytes. "remainingstorage": 35000000000, // Smallest amount of data in bytes that can be uploaded or downloaded to // or from the host. "sectorsize": 4194304, // Total amount of storage capacity the host claims it has, in bytes. "totalstorage": 35000000000, // Address at which the host can be paid when forming file contracts. "unlockhash": "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789ab", // A storage proof window is the number of blocks that the host has to // get a storage proof onto the blockchain. The window size is the // minimum size of window that the host will accept in a file contract. "windowsize": 144, // Public key used to identify and verify hosts. "publickey": { // Algorithm used for signing and verification. Typically "ed25519". "algorithm": "ed25519", // Key used to verify signed host messages. "key": "RW50cm9weSBpc24ndCB3aGF0IGl0IHVzZWQgdG8gYmU=" } } ] } ``` #### /hostdb/hosts/___:pubkey___ [GET] [(example)](#hosts) fetches detailed information about a particular host, including metrics regarding the score of the host within the database. It should be noted that each renter uses different metrics for selecting hosts, and that a good score on in one hostdb does not mean that the host will be successful on the network overall. ###### Path Parameters ``` // The public key of the host. Each public key identifies a single host. // // Example Pubkey: ed25519:1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef :pubkey ``` ###### JSON Response ```javascript { "entry": { // true if the host is accepting new contracts. "acceptingcontracts": true, // Maximum number of bytes that the host will allow to be requested by a // single download request. "maxdownloadbatchsize": 17825792, // Maximum duration in blocks that a host will allow for a file contract. // The host commits to keeping files for the full duration under the // threat of facing a large penalty for losing or dropping data before // the duration is complete. The storage proof window of an incoming file // contract must end before the current height + maxduration. // // There is a block approximately every 10 minutes. // e.g. 1 day = 144 blocks "maxduration": 25920, // Maximum size in bytes of a single batch of file contract // revisions. Larger batch sizes allow for higher throughput as there is // significant communication overhead associated with performing a batch // upload. "maxrevisebatchsize": 17825792, // Remote address of the host. It can be an IPv4, IPv6, or hostname, // along with the port. IPv6 addresses are enclosed in square brackets. "netaddress": "123.456.789.0:9982", // Unused storage capacity the host claims it has, in bytes. "remainingstorage": 35000000000, // Smallest amount of data in bytes that can be uploaded or downloaded to // or from the host. "sectorsize": 4194304, // Total amount of storage capacity the host claims it has, in bytes. "totalstorage": 35000000000, // Address at which the host can be paid when forming file contracts. "unlockhash": "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789ab", // A storage proof window is the number of blocks that the host has to // get a storage proof onto the blockchain. The window size is the // minimum size of window that the host will accept in a file contract. "windowsize": 144, // Public key used to identify and verify hosts. "publickey": { // Algorithm used for signing and verification. Typically "ed25519". "algorithm": "ed25519", // Key used to verify signed host messages. "key": "RW50cm9weSBpc24ndCB3aGF0IGl0IHVzZWQgdG8gYmU=" }, // The string representation of the full public key, used when calling // /hostdb/hosts. "publickeystring": "ed25519:1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef" }, // A set of scores as determined by the renter. Generally, the host's final // final score is all of the values multiplied together. Modified renters may // have additional criteria that they use to judge a host, or may ignore // certin criteia. In general, these fields should only be used as a loose // guide for the score of a host, as every renter sees the world differently // and uses different metrics to evaluate hosts. "scorebreakdown": { // The overall score for the host. Scores are entriely relative, and are // consistent only within the current hostdb. Between different machines, // different configurations, and different versions the absolute scores for // a given host can be off by many orders of magnitude. When displaying to a // human, some form of normalization with respect to the other hosts (for // example, divide all scores by the median score of the hosts) is // recommended. "score": 123456, // The multiplier that gets applied to the host based on how long it has // been a host. Older hosts typically have a lower penalty. "ageadjustment": 0.1234, // The multiplier that gets applied to the host based on how much // proof-of-burn the host has performed. More burn causes a linear increase // in score. "burnadjustment": 23.456, // The multiplier that gets applied to a host based on how much collateral // the host is offering. More collateral is typically better, though above // a point it can be detrimental. "collateraladjustment": 23.456, // The multipler that gets applied to a host based on previous interactions // with the host. A high ratio of successful interactions will improve this // hosts score, and a high ratio of failed interactions will hurt this // hosts score. This adjustment helps account for hosts that are on // unstable connections, don't keep their wallets unlocked, ran out of // funds, etc. "interactionadjustment": 0.1234, // The multiplier that gets applied to a host based on the host's price. // Lower prices are almost always better. Below a certain, very low price, // there is no advantage. "priceadjustment": 0.1234, // The multiplier that gets applied to a host based on how much storage is // remaining for the host. More storage remaining is better, to a point. "storageremainingadjustment": 0.1234, // The multiplier that gets applied to a host based on the uptime percentage // of the host. The penalty increases extremely quickly as uptime drops // below 90%. "uptimeadjustment": 0.1234, // The multiplier that gets applied to a host based on the version of Sia // that they are running. Versions get penalties if there are known bugs, // scaling limitations, performance limitations, etc. Generally, the most // recent version is always the one with the highest score. "versionadjustment": 0.1234 } } ``` Examples -------- #### Active hosts ###### Request ``` /hostdb/active?numhosts=2 ``` ###### Expected Response Code ``` 200 OK ``` ###### Example JSON Response ```javascript { "hosts": [ { "acceptingcontracts": true, "maxdownloadbatchsize": 17825792, "maxduration": 25920, "maxrevisebatchsize": 17825792, "netaddress": "123.456.789.0:9982", "remainingstorage": 35000000000, "sectorsize": 4194304, "totalstorage": 35000000000, "unlockhash": "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789ab", "windowsize": 144, "publickey": { "algorithm": "ed25519", "key": "RW50cm9weSBpc24ndCB3aGF0IGl0IHVzZWQgdG8gYmU=" } "publickeystring": "ed25519:1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", }, { "acceptingcontracts": true, "maxdownloadbatchsize": 17825792, "maxduration": 25920, "maxrevisebatchsize": 17825792, "netaddress": "123.456.789.1:9982", "remainingstorage": 314, "sectorsize": 4194304, "totalstorage": 314159265359, "unlockhash": "ba9876543210fedcba9876543210fedcba9876543210fedcba9876543210fedcba9876543210", "windowsize": 144, "publickey": { "algorithm": "ed25519", "key": "WWVzIEJydWNlIFNjaG5laWVyIGNhbiByZWFkIHRoaXM=" } "publickeystring": "ed25519:1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", } ] } ``` #### All hosts ###### Request ``` /hostdb/all ``` ###### Expected Response Code ``` 200 OK ``` ###### Example JSON Response ```javascript { "hosts": [ { "acceptingcontracts": false, "maxdownloadbatchsize": 17825792, "maxduration": 25920, "maxrevisebatchsize": 17825792, "netaddress": "123.456.789.2:9982", "remainingstorage": 314, "sectorsize": 4194304, "totalstorage": 314159265359, "unlockhash": "abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", "windowsize": 144, "publickey": { "algorithm": "ed25519", "key": "SSByYW4gb3V0IG9mIDMyIGNoYXIgbG9uZyBqb2tlcy4=" } "publickeystring": "ed25519:1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", }, { "acceptingcontracts": true, "maxdownloadbatchsize": 17825792, "maxduration": 25920, "maxrevisebatchsize": 17825792, "netaddress": "123.456.789.0:9982", "remainingstorage": 35000000000, "sectorsize": 4194304, "totalstorage": 35000000000, "unlockhash": "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789ab", "windowsize": 144, "publickey": { "algorithm": "ed25519", "key": "RW50cm9weSBpc24ndCB3aGF0IGl0IHVzZWQgdG8gYmU=" } "publickeystring": "ed25519:1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", }, { "acceptingcontracts": true, "maxdownloadbatchsize": 17825792, "maxduration": 25920, "maxrevisebatchsize": 17825792, "netaddress": "123.456.789.1:9982", "remainingstorage": 314, "sectorsize": 4194304, "totalstorage": 314159265359, "unlockhash": "ba9876543210fedcba9876543210fedcba9876543210fedcba9876543210fedcba9876543210", "windowsize": 144, "publickey": { "algorithm": "ed25519", "key": "WWVzIEJydWNlIFNjaG5laWVyIGNhbiByZWFkIHRoaXM=" } "publickeystring": "ed25519:1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", } ] } ``` #### Hosts ###### Request ``` /hostdb/hosts/ed25519:1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef ``` ###### Expected Response Code ``` 200 OK ``` ###### Example JSON Response ```javascript { "entry": { "acceptingcontracts": false, "maxdownloadbatchsize": 17825792, "maxduration": 25920, "maxrevisebatchsize": 17825792, "netaddress": "123.456.789.2:9982", "remainingstorage": 314, "sectorsize": 4194304, "totalstorage": 314159265359, "unlockhash": "abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", "windowsize": 144, "publickey": { "algorithm": "ed25519", "key": "SSByYW4gb3V0IG9mIDMyIGNoYXIgbG9uZyBqb2tlcy4=" } "publickeystring": "ed25519:1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", }, "scorebreakdown": { "ageadjustment": 0.1234, "burnadjustment": 0.1234, "collateraladjustment": 23.456, "priceadjustment": 0.1234, "storageremainingadjustment": 0.1234, "uptimeadjustment": 0.1234, "versionadjustment": 0.1234, } } ``` Sia-1.3.0/doc/api/Miner.md000066400000000000000000000115761313565667000151670ustar00rootroot00000000000000Miner API ========= This document contains detailed descriptions of the miner's API routes. For an overview of the miner's API routes, see [API.md#miner](/doc/API.md#miner). For an overview of all API routes, see [API.md](/doc/API.md) There may be functional API calls which are not documented. These are not guaranteed to be supported beyond the current release, and should not be used in production. Overview -------- The miner provides endpoints for getting headers for work and submitting solved headers to the network. The miner also provides endpoints for controlling a basic CPU mining implementation. Index ----- | Route | HTTP verb | | ---------------------------------- | --------- | | [/miner](#miner-get) | GET | | [/miner/start](#minerstart-get) | GET | | [/miner/stop](#minerstop-get) | GET | | [/miner/header](#minerheader-get) | GET | | [/miner/header](#minerheader-post) | POST | #### /miner [GET] returns the status of the miner. ###### JSON Response ```javascript { // Number of mined blocks. This value is remembered after restarting. "blocksmined": 9001, // How fast the cpu is hashing, in hashes per second. "cpuhashrate": 1337, // true if the cpu miner is active. "cpumining": false, // Number of mined blocks that are stale, indicating that they are not // included in the current longest chain, likely because some other block at // the same height had its chain extended first. "staleblocksmined": 0, } ``` #### /miner/start [GET] starts a single threaded cpu miner. Does nothing if the cpu miner is already running. ###### Response standard success or error response. See [API.md#standard-responses](/doc/API.md#standard-responses). #### /miner/stop [GET] stops the cpu miner. Does nothing if the cpu miner is not running. ###### Response standard success or error response. See [API.md#standard-responses](/doc/API.md#standard-responses). #### /miner/header [GET] provides a block header that is ready to be grinded on for work. ###### Byte Response For efficiency the header for work is returned as a raw byte encoding of the header, rather than encoded to JSON. Blocks are mined by repeatedly changing the nonce of the header, hashing the header's bytes, and comparing the resulting hash to the target. The block with that nonce is valid if the hash is less than the target. If none of the 2^64 possible nonces result in a header with a hash less than the target, call `/miner/header [GET]` again to get a new block header with a different merkle root. The above process can then be repeated for the new block header. The other fields can generally be ignored. The parent block ID field is the hash of the parent block's header. Modifying this field will result in an orphan block. The timestamp is the time at which the block was mined and is set by the Sia Daemon. Modifying this field can result in invalid block. The merkle root is the merkle root of a merkle tree consisting of the timestamp, the miner outputs (one leaf per payout), and the transactions (one leaf per transaction). Modifying this field will result in an invalid block. | Field | Byte range within response | Byte range within header | | --------------- | -------------------------- | ------------------------ | | target | [0-32) | | | header | [32-112) | | | parent block ID | [32-64) | [0-32) | | nonce | [64-72) | [32-40) | | timestamp | [72-80) | [40-48) | | merkle root | [80-112) | [48-80) | ``` xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx (returned bytes) tttttttttttttttttttttttttttttttt (target) hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh (header) pppppppppppppppppppppppppppppppp (parent block ID) nnnnnnnn (nonce) ssssssss (timestamp) mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm (merkle root) ``` #### /miner/header [POST] submits a header that has passed the POW. ###### Request Body Bytes For efficiency headers are submitted as raw byte encodings of the header in the body of the request, rather than as a query string parameter or path parameter. The request body should contain only the 80 bytes of the encoded header. The encoding is the same encoding used in `/miner/header [GET]` endpoint. Refer to [#byte-response](#byte-response) for a detailed description of the byte encoding. Sia-1.3.0/doc/api/Renter.md000066400000000000000000000243051313565667000153460ustar00rootroot00000000000000Renter API ========== This document contains detailed descriptions of the renter's API routes. For an overview of the renter's API routes, see [API.md#renter](/doc/API.md#renter). For an overview of all API routes, see [API.md](/doc/API.md) There may be functional API calls which are not documented. These are not guaranteed to be supported beyond the current release, and should not be used in production. Overview -------- The renter manages the user's files on the network. The renter's API endpoints expose methods for managing files on the network and managing the renter's allocated funds. Index ----- | Route | HTTP verb | | ----------------------------------------------------------------------- | --------- | | [/renter](#renter-get) | GET | | [/renter](#renter-post) | POST | | [/renter/contracts](#rentercontracts-get) | GET | | [/renter/downloads](#renterdownloads-get) | GET | | [/renter/files](#renterfiles-get) | GET | | [/renter/prices](#renter-prices-get) | GET | | [/renter/delete/___*siapath___](#renterdeletesiapath-post) | POST | | [/renter/download/___*siapath___](#renterdownloadsiapath-get) | GET | | [/renter/downloadasync/___*siapath___](#renterdownloadasyncsiapath-get) | GET | | [/renter/rename/___*siapath___](#renterrenamesiapath-post) | POST | | [/renter/upload/___*siapath___](#renteruploadsiapath-post) | POST | #### /renter [GET] returns the current settings along with metrics on the renter's spending. ###### JSON Response ```javascript { // Settings that control the behavior of the renter. "settings": { // Allowance dictates how much the renter is allowed to spend in a given // period. Note that funds are spent on both storage and bandwidth. "allowance": { // Amount of money allocated for contracts. Funds are spent on both // storage and bandwidth. "funds": "1234", // hastings // Number of hosts that contracts will be formed with. "hosts":24, // Duration of contracts formed, in number of blocks. "period": 6048, // blocks // If the current blockheight + the renew window >= the height the // contract is scheduled to end, the contract is renewed automatically. // Is always nonzero. "renewwindow": 3024 // blocks } }, // Metrics about how much the Renter has spent on storage, uploads, and // downloads. "financialmetrics": { // How much money, in hastings, the Renter has spent on file contracts, // including fees. "contractspending": "1234", // hastings // Amount of money spent on downloads. "downloadspending": "5678", // hastings // Amount of money spend on storage. "storagespending": "1234", // hastings // Amount of money spent on uploads. "uploadspending": "5678", // hastings // Amount of money in the allowance that has not been spent. "unspent": "1234" // hastings } } ``` #### /renter [POST] modify settings that control the renter's behavior. ###### Query String Parameters ``` // Number of hastings allocated for file contracts in the given period. funds // hastings // Number of hosts that contracts should be formed with. Files cannot be // uploaded to more hosts than you have contracts with, and it's generally good // to form a few more contracts than you need. hosts // Duration of contracts formed. Must be nonzero. period // block height // Renew window specifies how many blocks before the expiration of the current // contracts the renter will wait before renewing the contracts. A smaller // renew window means that Sia must be run more frequently, but also means // fewer total transaction fees. Storage spending is not affected by the renew // window size. renewwindow // block height ``` ###### Response standard success or error response. See [API.md#standard-responses](/doc/API.md#standard-responses). #### /renter/contracts [GET] returns active contracts. Expired contracts are not included. ###### JSON Response ```javascript { "contracts": [ { // Block height that the file contract ends on. "endheight": 50000, // block height // ID of the file contract. "id": "1234567890abcdef0123456789abcdef0123456789abcdef0123456789abcdef", // Address of the host the file contract was formed with. "netaddress": "12.34.56.78:9", // A signed transaction containing the most recent contract revision. "lasttransaction": {}, // Remaining funds left for the renter to spend on uploads & downloads. "renterfunds": "1234", // hastings // Size of the file contract, which is typically equal to the number of // bytes that have been uploaded to the host. "size": 8192 // bytes } ] } ``` #### /renter/downloads [GET] lists all files in the download queue. ###### JSON Response ```javascript { "downloads": [ { // Siapath given to the file when it was uploaded. "siapath": "foo/bar.txt", // Local path that the file will be downloaded to. "destination": "/home/users/alice", // Size, in bytes, of the file being downloaded. "filesize": 8192, // bytes // Number of bytes downloaded thus far. "received": 4096, // bytes // Time at which the download was initiated. "starttime": "2009-11-10T23:00:00Z", // RFC 3339 time // Error encountered while downloading, if it exists. "error": "" } ] } ``` #### /renter/files [GET] lists the status of all files. ###### JSON Response ```javascript { "files": [ { // Path to the file in the renter on the network. "siapath": "foo/bar.txt", // Size of the file in bytes. "filesize": 8192, // bytes // true if the file is available for download. Files may be available // before they are completely uploaded. "available": true, // true if the file's contracts will be automatically renewed by the // renter. "renewing": true, // Average redundancy of the file on the network. Redundancy is // calculated by dividing the amount of data uploaded in the file's open // contracts by the size of the file. Redundancy does not necessarily // correspond to availability. Specifically, a redundancy >= 1 does not // indicate the file is available as there could be a chunk of the file // with 0 redundancy. "redundancy": 5, // Percentage of the file uploaded, including redundancy. Uploading has // completed when uploadprogress is 100. Files may be available for // download before upload progress is 100. "uploadprogress": 100, // percent // Block height at which the file ceases availability. "expiration": 60000 } ] } ``` #### /renter/prices [GET] lists the estimated prices of performing various storage and data operations. ###### JSON Response ```javascript { // The estimated cost of downloading one terabyte of data from the // network. "downloadterabyte": "1234", // hastings // The estimated cost of forming a set of contracts on the network. This // cost also applies to the estimated cost of renewing the renter's set of // contracts. "formcontracts": "1234", // hastings // The estimated cost of storing one terabyte of data on the network for // a month, including accounting for redundancy. "storageterabytemonth": "1234", // hastings // The estimated cost of uploading one terabyte of data to the network, // including accounting for redundancy. "uploadterabyte": "1234", // hastings } ``` #### /renter/delete/___*siapath___ [POST] deletes a renter file entry. Does not delete any downloads or original files, only the entry in the renter. ###### Path Parameters ``` // Location of the file in the renter on the network. *siapath ``` ###### Response standard success or error response. See [API.md#standard-responses](/doc/API.md#standard-responses). #### /renter/download/___*siapath___ [GET] downloads a file to the local filesystem. The call will block until the file has been downloaded. ###### Path Parameters ``` // Location of the file in the renter on the network. *siapath ``` ###### Query String Parameters ``` // Location on disk that the file will be downloaded to. destination ``` ###### Response standard success or error response. See [API.md#standard-responses](/doc/API.md#standard-responses). #### /renter/downloadasync/___*siapath___ [GET] downloads a file to the local filesystem. The call will return immediately. ###### Path Parameters ``` *siapath ``` ###### Query String Parameters ``` destination ``` ###### Response standard success or error response. See [API.md#standard-responses](/doc/API.md#standard-responses). #### /renter/rename/___*siapath___ [POST] renames a file. Does not rename any downloads or source files, only renames the entry in the renter. An error is returned if `siapath` does not exist or `newsiapath` already exists. ###### Path Parameters ``` // Current location of the file in the renter on the network. *siapath ``` ###### Query String Parameters ``` // New location of the file in the renter on the network. newsiapath ``` ###### Response standard success or error response. See [API.md#standard-responses](/doc/API.md#standard-responses). #### /renter/upload/___*siapath___ [POST] uploads a file to the network from the local filesystem. ###### Path Parameters ``` // Location where the file will reside in the renter on the network. *siapath ``` ###### Query String Parameters ``` // The number of data pieces to use when erasure coding the file. datapieces // int // The number of parity pieces to use when erasure coding the file. Total // redundancy of the file is (datapieces+paritypieces)/datapieces. paritypieces // int // Location on disk of the file being uploaded. source // string - a filepath ``` ###### Response standard success or error response. See [API.md#standard-responses](/doc/API.md#standard-responses). Sia-1.3.0/doc/api/Transactionpool.md000066400000000000000000000062101313565667000172610ustar00rootroot00000000000000Transaction Pool API ========= This document contains detailed descriptions of the transaction pool's API routes. For an overview of the transaction pool's API routes, see [API.md#transactionpool](/doc/API.md#transactionpool). For an overview of all API routes, see [API.md](/doc/API.md) There may be functional API calls which are not documented. These are not guaranteed to be supported beyond the current release, and should not be used in production. Overview -------- The transaction pool provides endpoints for getting transactions currently in the transaction pool and submitting transactions to the transaction pool. Index ----- | Route | HTTP verb | | ------------------------------- | --------- | | [/tpool/fee](#tpoolfee-get) | GET | | [/tpool/raw/:id](#tpoolraw-get) | GET | | [/tpool/raw](#tpoolraw-post) | POST | #### /tpool/fee [GET] returns the minimum and maximum estimated fees expected by the transaction pool. ###### JSON Response ```javascript { "minimum": "1234", // hastings / byte "maximum": "5678" // hastings / byte } ``` #### /tpool/raw/:id [GET] returns the ID for the requested transaction and its raw encoded parents and transaction data. ###### JSON Response ```javascript { // id of the transaction "id": "124302d30a219d52f368ecd94bae1bfb922a3e45b6c32dd7fb5891b863808788", // raw, base64 encoded transaction data "transaction": "AQAAAAAAAADBM1ca/FyURfizmSukoUQ2S0GwXMit1iNSeYgrnhXOPAAAAAAAAAAAAQAAAAAAAABlZDI1NTE5AAAAAAAAAAAAIAAAAAAAAACdfzoaJ1MBY7L0fwm7O+BoQlFkkbcab5YtULa6B9aecgEAAAAAAAAAAQAAAAAAAAAMAAAAAAAAAAM7Ljyf0IA86AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAACgAAAAAAAACe0ZTbGbI4wAAAAAAAAAAAAAABAAAAAAAAAMEzVxr8XJRF+LOZK6ShRDZLQbBcyK3WI1J5iCueFc48AAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAA+z4P1wc98IqKxykTSJxiVT+BVbWezIBnIBO1gRRlLq2x/A+jIc6G7/BA5YNJRbdnqPHrzsZvkCv4TKYd/XzwBA==", "parents": "AQAAAAAAAAABAAAAAAAAAJYYmFUdXXfLQ2p6EpF+tcqM9M4Pw5SLSFHdYwjMDFCjAAAAAAAAAAABAAAAAAAAAGVkMjU1MTkAAAAAAAAAAAAgAAAAAAAAAAHONvdzzjHfHBx6psAN8Z1rEVgqKPZ+K6Bsqp3FbrfjAQAAAAAAAAACAAAAAAAAAAwAAAAAAAAAAzvNDjSrme8gwAAA4w8ODnW8DxbOV/JribivvTtjJ4iHVOug0SXJc31BdSINAAAAAAAAAAPGHY4699vggx5AAAC2qBhm5vwPaBsmwAVPho/1Pd8ecce/+BGv4UimnEPzPQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAACWGJhVHV13y0NqehKRfrXKjPTOD8OUi0hR3WMIzAxQowAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAABnt64wN1qxym/CfiMgOx5fg/imVIEhY+4IiiM7gwvSx8qtqKniOx50ekrGv8B+gTKDXpmm2iJibWTI9QLZHWAY=", } ``` #### /tpool/raw [POST] submits a raw transaction to the transaction pool, broadcasting it to the transaction pool's peers. ###### Query String Parameters [(with comments)](/doc/api/Transactionpool.md#query-string-parameters) ``` parents string // raw base64 encoded transaction parents transaction string // raw base64 encoded transaction ``` ###### Response standard success or error response. See [#standard-responses](#standard-responses). Sia-1.3.0/doc/api/Wallet.md000066400000000000000000000566511313565667000153500ustar00rootroot00000000000000Wallet ====== This document contains detailed descriptions of the wallet's API routes. For an overview of the wallet's API routes, see [API.md#wallet](/doc/API.md#wallet). For an overview of all API routes, see [API.md](/doc/API.md) There may be functional API calls which are not documented. These are not guaranteed to be supported beyond the current release, and should not be used in production. Overview -------- The wallet stores and manages siacoins and siafunds. The wallet's API endpoints expose methods for creating and loading wallets, locking and unlocking, sending siacoins and siafunds, and getting the wallet's balance. You must create a wallet before you can use the wallet's API endpoints. You can create a wallet with the `/wallet/init` endpoint. Wallets are always encrypted on disk. Calls to some wallet API endpoints will fail until the wallet is unlocked. The wallet can be unlocked with the `/wallet/unlock` endpoint. Once the wallet is unlocked calls to the API endpoints will succeed until the wallet is locked again with `/wallet/lock`, or Siad is restarted. The host and renter require the miner to be unlocked. Index ----- | Route | HTTP verb | | --------------------------------------------------------------- | --------- | | [/wallet](#wallet-get) | GET | | [/wallet/033x](#wallet033x-post) | POST | | [/wallet/address](#walletaddress-get) | GET | | [/wallet/addresses](#walletaddresses-get) | GET | | [/wallet/backup](#walletbackup-get) | GET | | [/wallet/init](#walletinit-post) | POST | | [/wallet/init/seed](#walletinitseed-post) | POST | | [/wallet/lock](#walletlock-post) | POST | | [/wallet/seed](#walletseed-post) | POST | | [/wallet/seeds](#walletseeds-get) | GET | | [/wallet/siacoins](#walletsiacoins-post) | POST | | [/wallet/siafunds](#walletsiafunds-post) | POST | | [/wallet/siagkey](#walletsiagkey-post) | POST | | [/wallet/sweep/seed](#walletsweepseed-post) | POST | | [/wallet/transaction/___:id___](#wallettransactionid-get) | GET | | [/wallet/transactions](#wallettransactions-get) | GET | | [/wallet/transactions/___:addr___](#wallettransactionsaddr-get) | GET | | [/wallet/unlock](#walletunlock-post) | POST | | [/wallet/verify/address/:___addr___](#walletverifyaddress-get) | GET | | [/wallet/changepassword](#walletchangepassword-post) | POST | #### /wallet [GET] returns basic information about the wallet, such as whether the wallet is locked or unlocked. ###### JSON Response ```javascript { // Indicates whether the wallet has been encrypted or not. If the wallet // has not been encrypted, then no data has been generated at all, and the // first time the wallet is unlocked, the password given will be used as // the password for encrypting all of the data. 'encrypted' will only be // set to false if the wallet has never been unlocked before (the unlocked // wallet is still encryped - but the encryption key is in memory). "encrypted": true, // Indicates whether the wallet is currently locked or unlocked. Some calls // become unavailable when the wallet is locked. "unlocked": true, // Indicates whether the wallet is currently rescanning the blockchain. This // will be true for the duration of calls to /unlock, /seeds, /init/seed, // and /sweep/seed. "rescanning": false, // Number of siacoins, in hastings, available to the wallet as of the most // recent block in the blockchain. "confirmedsiacoinbalance": "123456", // hastings, big int // Number of siacoins, in hastings, that are leaving the wallet according // to the set of unconfirmed transactions. Often this number appears // inflated, because outputs are frequently larger than the number of coins // being sent, and there is a refund. These coins are counted as outgoing, // and the refund is counted as incoming. The difference in balance can be // calculated using 'unconfirmedincomingsiacoins' - 'unconfirmedoutgoingsiacoins' "unconfirmedoutgoingsiacoins": "0", // hastings, big int // Number of siacoins, in hastings, are entering the wallet according to // the set of unconfirmed transactions. This number is often inflated by // outgoing siacoins, because outputs are frequently larger than the amount // being sent. The refund will be included in the unconfirmed incoming // siacoins balance. "unconfirmedincomingsiacoins": "789", // hastings, big int // Number of siafunds available to the wallet as of the most recent block // in the blockchain. "siafundbalance": "1", // big int // Number of siacoins, in hastings, that can be claimed from the siafunds // as of the most recent block. Because the claim balance increases every // time a file contract is created, it is possible that the balance will // increase before any claim transaction is confirmed. "siacoinclaimbalance": "9001", // hastings, big int } ``` #### /wallet/033x [POST] loads a v0.3.3.x wallet into the current wallet, harvesting all of the secret keys. All spendable addresses in the loaded wallet will become spendable from the current wallet. An error will be returned if the given `encryptionpassword` is incorrect. ###### Query String Parameters ``` // Path on disk to the v0.3.3.x wallet to be loaded. source // Encryption key of the wallet. encryptionpassword ``` ###### Response standard success or error response. See [API.md#standard-responses](/doc/API.md#standard-responses). #### /wallet/address [GET] gets a new address from the wallet generated by the primary seed. An error will be returned if the wallet is locked. ###### JSON Response ```javascript { // Wallet address that can receive siacoins or siafunds. Addresses are 76 character long hex strings. "address": "1234567890abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789ab" } ``` #### /wallet/addresses [GET] fetches the list of addresses from the wallet. If the wallet has not been created or unlocked, no addresses will be returned. After the wallet is unlocked, this call will continue to return its addresses even after the wallet is locked again. ###### JSON Response ```javascript { // Array of wallet addresses owned by the wallet. "addresses": [ "1234567890abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789ab", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" ] } ``` #### /wallet/backup [GET] creates a backup of the wallet settings file. Though this can easily be done manually, the settings file is often in an unknown or difficult to find location. The /wallet/backup call can spare users the trouble of needing to find their wallet file. The destination file is overwritten if it already exists. ###### Query String Parameters ``` // path to the location on disk where the backup file will be saved. destination ``` ###### Response standard success or error response. See [API.md#standard-responses](/doc/API.md#standard-responses). #### /wallet/init [POST] initializes the wallet. After the wallet has been initialized once, it does not need to be initialized again, and future calls to /wallet/init will return an error, unless the force flag is set. The encryption password is provided by the api call. If the password is blank, then the password will be set to the same as the seed. ###### Query String Parameters ``` // Password that will be used to encrypt the wallet. All subsequent calls // should use this password. If left blank, the seed that gets returned will // also be the encryption password. encryptionpassword // Name of the dictionary that should be used when encoding the seed. 'english' // is the most common choice when picking a dictionary. dictionary // Optional, default is english. // boolean, when set to true /wallet/init will Reset the wallet if one exists // instead of returning an error. This allows API callers to reinitialize a new // wallet. force ``` ###### JSON Response ```javascript { // Wallet seed used to generate addresses that the wallet is able to spend. "primaryseed": "hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello" } ``` #### /wallet/init/seed [POST] initializes the wallet using a preexisting seed. After the wallet has been initialized once, it does not need to be initialized again, and future calls to /wallet/init/seed will return an error unless the force flag is set. The encryption password is provided by the api call. If the password is blank, then the password will be set to the same as the seed. Note that loading a preexisting seed requires scanning the blockchain to determine how many keys have been generated from the seed. For this reason, /wallet/init/seed can only be called if the blockchain is synced. ###### Query String Parameters ``` // Password that will be used to encrypt the wallet. All subsequent calls // should use this password. If left blank, the seed that gets returned will // also be the encryption password. encryptionpassword // Name of the dictionary that should be used when encoding the seed. 'english' // is the most common choice when picking a dictionary. dictionary // Optional, default is english. // Dictionary-encoded phrase that corresponds to the seed being used to // initialize the wallet. seed // boolean, when set to true /wallet/init will Reset the wallet if one exists // instead of returning an error. This allows API callers to reinitialize a new // wallet. force ``` ###### Response standard success or error response. See [API.md#standard-responses](/doc/API.md#standard-responses). #### /wallet/seed [POST] gives the wallet a seed to track when looking for incoming transactions. The wallet will be able to spend outputs related to addresses created by the seed. The seed is added as an auxiliary seed, and does not replace the primary seed. Only the primary seed will be used for generating new addresses. ###### Query String Parameters ``` // Key used to encrypt the new seed when it is saved to disk. encryptionpassword // Name of the dictionary that should be used when encoding the seed. 'english' // is the most common choice when picking a dictionary. dictionary // Dictionary-encoded phrase that corresponds to the seed being added to the // wallet. seed ``` ###### Response standard success or error response. See [API.md#standard-responses](/doc/API.md#standard-responses). #### /wallet/seeds [GET] returns a list of seeds in use by the wallet. The primary seed is the only seed that gets used to generate new addresses. This call is unavailable when the wallet is locked. A seed is an encoded version of a 128 bit random seed. The output is 15 words chosen from a small dictionary as indicated by the input. The most common choice for the dictionary is going to be 'english'. The underlying seed is the same no matter what dictionary is used for the encoding. The encoding also contains a small checksum of the seed, to help catch simple mistakes when copying. The library [entropy-mnemonics](https://github.com/NebulousLabs/entropy-mnemonics) is used when encoding. ###### Query String Parameters ``` // Name of the dictionary that should be used when encoding the seed. 'english' // is the most common choice when picking a dictionary. dictionary ``` ###### JSON Response ```javascript { // Seed that is actively being used to generate new addresses for the wallet. "primaryseed": "hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello", // Number of addresses that remain in the primary seed until exhaustion has // been reached. Once exhaustion has been reached, new addresses will // continue to be generated but they will be more difficult to recover in the // event of a lost wallet file or encryption password. "addressesremaining": 2500, // Array of all seeds that the wallet references when scanning the blockchain // for outputs. The wallet is able to spend any output generated by any of // the seeds, however only the primary seed is being used to generate new // addresses. "allseeds": [ "hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello", "foo bar foo bar foo bar foo bar foo bar foo bar foo bar foo bar foo bar foo bar foo bar foo bar foo bar foo bar foo", ] } ``` #### /wallet/siacoins [POST] Function: Send siacoins to an address or set of addresses. The outputs are arbitrarily selected from addresses in the wallet. If 'outputs' is supplied, 'amount' and 'destination' must be empty. The number of outputs should not exceed 400; this may result in a transaction too large to fit in the transaction pool. ###### Query String Parameters ``` // Number of hastings being sent. A hasting is the smallest unit in Sia. There // are 10^24 hastings in a siacoin. amount // hastings // Address that is receiving the coins. destination // address // JSON array of outputs. The structure of each output is: // {"unlockhash": "", "value": ""} outputs ``` ###### JSON Response ```javascript { // Array of IDs of the transactions that were created when sending the coins. // The last transaction contains the output headed to the 'destination'. // Transaction IDs are 64 character long hex strings. transactionids [ "1234567890abcdef0123456789abcdef0123456789abcdef0123456789abcdef", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" ] } ``` #### /wallet/siafunds [POST] sends siafunds to an address. The outputs are arbitrarily selected from addresses in the wallet. Any siacoins available in the siafunds being sent (as well as the siacoins available in any siafunds that end up in a refund address) will become available to the wallet as siacoins after 144 confirmations. To access all of the siacoins in the siacoin claim balance, send all of the siafunds to an address in your control (this will give you all the siacoins, while still letting you control the siafunds). ###### Query String Parameters ``` // Number of siafunds being sent. amount // siafunds // Address that is receiving the funds. destination // address ``` ###### JSON Response ```javascript { // Array of IDs of the transactions that were created when sending the coins. // The last transaction contains the output headed to the 'destination'. // Transaction IDs are 64 character long hex strings. "transactionids": [ "1234567890abcdef0123456789abcdef0123456789abcdef0123456789abcdef", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" ] } ``` #### /wallet/siagkey [POST] Function: Load a key into the wallet that was generated by siag. Most siafunds are currently in addresses created by siag. ###### Query String Parameters ``` // Key that is used to encrypt the siag key when it is imported to the wallet. encryptionpassword // List of filepaths that point to the keyfiles that make up the siag key. // There should be at least one keyfile per required signature. The filenames // need to be commna separated (no spaces), which means filepaths that contain // a comma are not allowed. keyfiles ``` ###### Response standard success or error response. See [API.md#standard-responses](/doc/API.md#standard-responses). #### /wallet/sweep/seed [POST] Function: Scan the blockchain for outputs belonging to a seed and send them to an address owned by the wallet. ###### Query String Parameters ``` // Name of the dictionary that should be used when decoding the seed. 'english' // is the most common choice when picking a dictionary. dictionary // Optional, default is english. // Dictionary-encoded phrase that corresponds to the seed being added to the // wallet. seed ``` ###### JSON Response ```javascript { // Number of siacoins, in hastings, transferred to the wallet as a result of // the sweep. "coins": "123456", // hastings, big int // Number of siafunds transferred to the wallet as a result of the sweep. "funds": "1", // siafunds, big int } ``` #### /wallet/lock [POST] locks the wallet, wiping all secret keys. After being locked, the keys are encrypted. Queries for the seed, to send siafunds, and related queries become unavailable. Queries concerning transaction history and balance are still available. ###### Response standard success or error response. See [API.md#standard-responses](/doc/API.md#standard-responses). #### /wallet/transaction/___:id___ [GET] gets the transaction associated with a specific transaction id. ###### Path Parameters ``` // ID of the transaction being requested. :id ``` ###### JSON Response ```javascript { "transaction": { // Raw transaction. The rest of the fields in the resposne are determined // from this raw transaction. It is left undocumented here as the processed // transaction (the rest of the fields in this object) are usually what is // desired. "transaction": { // See types.Transaction in https://github.com/NebulousLabs/Sia/blob/master/types/transactions.go }, // ID of the transaction from which the wallet transaction was derived. "transactionid": "1234567890abcdef0123456789abcdef0123456789abcdef0123456789abcdef", // Block height at which the transaction was confirmed. If the transaction // is unconfirmed the height will be the max value of an unsigned 64-bit // integer. "confirmationheight": 50000, // Time, in unix time, at which a transaction was confirmed. If the // transaction is unconfirmed the timestamp will be the max value of an // unsigned 64-bit integer. "confirmationtimestamp": 1257894000, // Array of processed inputs detailing the inputs to the transaction. "inputs": [ { // The id of the output being spent. "parentid": "1234567890abcdef0123456789abcdef0123456789abcdef0123456789abcdef", // Type of fund represented by the input. Possible values are // 'siacoin input' and 'siafund input'. "fundtype": "siacoin input", // true if the address is owned by the wallet. "walletaddress": false, // Address that is affected. For inputs (outgoing money), the related // address is usually not important because the wallet arbitrarily // selects which addresses will fund a transaction. "relatedaddress": "1234567890abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789ab", // Amount of funds that have been moved in the input. "value": "1234", // hastings or siafunds, depending on fundtype, big int } ], // Array of processed outputs detailing the outputs of the transaction. // Outputs related to file contracts are excluded. "outputs": [ { // The id of the output that was created. "id": "1234567890abcdef0123456789abcdef0123456789abcdef0123456789abcdef", // Type of fund is represented by the output. Possible values are // 'siacoin output', 'siafund output', 'claim output', and 'miner // payout'. Siacoin outputs and claim outputs both relate to siacoins. // Siafund outputs relate to siafunds. Miner payouts point to siacoins // that have been spent on a miner payout. Because the destination of // the miner payout is determined by the block and not the transaction, // the data 'maturityheight', 'walletaddress', and 'relatedaddress' are // left blank. "fundtype": "siacoin output", // Block height the output becomes available to be spent. Siacoin // outputs and siafund outputs mature immediately - their maturity // height will always be the confirmation height of the transaction. // Claim outputs cannot be spent until they have had 144 confirmations, // thus the maturity height of a claim output will always be 144 larger // than the confirmation height of the transaction. "maturityheight": 50000, // true if the address is owned by the wallet. "walletaddress": false, // Address that is affected. For outputs (incoming money), the related // address field can be used to determine who has sent money to the // wallet. "relatedaddress": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", // Amount of funds that have been moved in the output. "value": "1234", // hastings or siafunds, depending on fundtype, big int } ] } } ``` #### /wallet/transactions [GET] returns a list of transactions related to the wallet. ###### Query String Parameters ``` // Height of the block where transaction history should begin. startheight // block height // Height of of the block where the transaction history should end. If // 'endheight' is greater than the current height, all transactions up to and // including the most recent block will be provided. endheight // block height ``` ###### JSON Response ```javascript { // All of the confirmed transactions appearing between height 'startheight' // and height 'endheight' (inclusive). "confirmedtransactions": [ { // See the documentation for '/wallet/transaction/:id' for more information. } ], // All of the unconfirmed transactions. "unconfirmedtransactions": [ { // See the documentation for '/wallet/transaction/:id' for more information. } ] } ``` #### /wallet/transactions/___:addr___ [GET] returns all of the transactions related to a specific address. ###### Path Parameters ``` // Unlock hash (i.e. wallet address) whose transactions are being requested. :addr ``` ###### JSON Response ```javascript { // Array of processed transactions that relate to the supplied address. "transactions": [ { // See the documentation for '/wallet/transaction/:id' for more information. } ] } ``` #### /wallet/unlock [POST] unlocks the wallet. The wallet is capable of knowing whether the correct password was provided. ###### Query String Parameters ``` // Password that gets used to decrypt the file. Most frequently, the encryption // password is the same as the primary wallet seed. encryptionpassword string ``` ###### Response standard success or error response. See [API.md#standard-responses](/doc/API.md#standard-responses). #### /wallet/verify/address/:addr [GET] takes the address specified by :addr and returns a JSON response indicating if the address is valid. ###### JSON Response ```javascript { // valid indicates if the address supplied to :addr is a valid UnlockHash. "valid": true } ``` #### /wallet/changepassword [POST] changes the wallet's encryption password. ###### Query String Parameter ``` // encryptionpassword is the wallet's current encryption password. encryptionpassword // newpassword is the new password for the wallet. newpassword ``` ###### Response standard success or error response. See [#standard-responses](#standard-responses). Sia-1.3.0/doc/assets/000077500000000000000000000000001313565667000143125ustar00rootroot00000000000000Sia-1.3.0/doc/assets/codereview.png000066400000000000000000001153721313565667000171650ustar00rootroot00000000000000PNG  IHDRmc=,iCCPICC ProfileXy 8U>Yf2$~}箽ڟ{u/-Aadkm^G'g^8 'hYZy4h#Ė-t@vuD=!00?2,d / ;s `m[k"PopFP mAAj"<`oCtvoyfM/eu}CCHQK`@1v!ʇlh32oe&[ AnG_m-<nh[=T3 xtM%9N7%"(s0#؍l>*"(dO?g/_}##cko`jۘ;cGnY ^d}50_;i{,Vk=CMp{x#ѥmoRHo}g<÷B#lClg~$cRH74 xA8R@0s5sȧ}@d V/wuR!3Aޞw[\=L=f3y䶕Z}b 8bMwn?:h!ZG@-x@|S@0/mO~˩Ũ~p2:mE?y[N#)^ [o$LnGŸѬ#v|HWHJIy:!Qd_o0^-D5 r++-s|>!d$V,9*Lٛl*ܱv'G[ dgnDd"P@ ` d}@ :AHgA6 A1(7P#<`Fbc |` A@8$ )C{!= W ¡P.BWr6T=B}+4 }VQ0 ňB P(- :F%ΠrQEj#3 j f` Xց-`g &ñp*p=/qx^Ac h^h;;:} }]F_ߡѿ0D 'F18b1$Lsӎ)e c`>vñq8  K]5qSxJ<^w9F|?~FAK!HJaAAENQBQOC1EF# ?qB.NxCAIIRҊҗ22w+TTbT:T.TTgQ5SA$Dgb J#f6>FG]MOFFM4M]9Z Z!ZZm,mm0" ] )tOfqBz 0?;CC C;#Qшя1&c7<=<=a5.>?-;pp_xHR!Cه&P(thm#U+\$Dy?##;t+J,*%j:Z?#-G?.F+J,rXⱩ8ǟKǟ_HpHOJK8ap":4|Rda2:7;E.BʯT4鴜S:O˜=yLwbسAg242ѝ>7iYś}0i|Ny㹦.~`v^U>g~JrGA%K\i}/bpH([QľRҴҍkAˬʕ˯s^O@UWp{SfmD*檴[V]o1rWn={VCQ5>5N}uu-jH>אaz#1q)i9y-[[ڬںMڟ<ڡDISէuʝ5Uw)t~bwuROmJo}ߞ~G/t_<0x6h>7d7rex˙WD{y:J;39VVmոwۼ=>i2tr}*C4tLìl}>|ZKL9ȗ{_5v;O}#~ۏk -cKKk˩?~(t:NEs7D7zB" HEyyD'<@.0v`IBPmpƱY)xTiju4st Ōb,QM4%\?x&=<-Lʉy),%"m%'[!7RQگRnQ]yFm7:x]E=wcƐy=˗V?mm,N;V:=s~oeyAJ IkyX{&/%XR&C 7FEf v:btT=F)VJqxİ'N$MiNJ:.f,[\ZAg}{ޱ܂KM.\)/-Y(1k_wuNe_շtw Q]k} V5778+|M^/ R +Eg-%9##,XT\:^ 3t[:\Fzv&Afc-,OX%Y'ۤڞKOtrquݧᢿ@l[Nv^G|}|z҃Cȇ\Țr5-=qzG⧗:< y.эӛoBo@ojm8e⫓#1茲΍ս=9N=ɂC4fg}19Kאo?"} ilyj:6zp:Z݃JagqW>R+NBH5, 5+frL&ofkuVQ6&u~FJb<]I$!=a^"]b↻$PROeɻ*(b{UT^yUǪHҡ? y ZZ,,Y-?Z5Zgتo:u4vbrz\/W?<wPGwwA1y됷Oo?o8j>P ,s#DE:059zHфXإcqǽuYOP&SRONɟƝ9s+=9s3eeEgkhO7YvIP"blte4H$pyfՋ[wD:;w֩.~PѠɭأK-o6u<~znў}^2lnu[wi3QͿ/Xmp[dHid yXXUeP I Whv5: ݎ0#u6Gs]}k3((2)ȚS:RC2a2Qx04M7>m3^&zNQ$3]eJgc~r͊{Ss+ۘg.ߙ] H%d2+3r )u*VFGZjF&YGQ@WEO^_Qys =KN˲9ome١¾8{yd{Yc ~-b0mYt1B1&{'&v&%cSNO%Ho͈t9vA-O@P qID)ǵnL7F+oY/SsYMn3mC.oC/'Ff,& gs放(]X^K\oX(dn +b@G 2 5CQ(]T5Fp" oz:F ƊbcGq{qEx<>?@BQ@@T-ihi]hK?gb$21i3abff=Ʈ!ȹ5]s7t4?V`EW"b5%\% ke^gW0RWjURuQC6je֫4p633hlig-k3blhb )mC3ߛ8,Ԑø[#ã<|-:>:O=)2fwjLYɌWj9r/'\+THnige@J"wߏ^Mz0Iy%1g]ݗz_btmIx*~5ܗ/_ CEK>KK˳?]~TWCVr׾mnrucڴۼ^rD flssldlnmnn#vپkh:UY͈NiTXtXML:com.adobe.xmp 670 192 p0@IDATx \Uי/Wl50c k~! Azm!/8@ ^k$&3[E +$N83(8A˜}Ƥk)>zֳd B@!P( یL_W( B@!PHA!P( B ;D!P( B@J B@!P(#0J B@!P(B|Ap\z###UB@!P( 8{1{접:%at IBASQ B@!P(J9f]֫ B@!P( @Q؎1Z^:O!P( B`"SP( B@!(vh* B@!PA@c Q B@!P(exTMB@!P( 1(s $*B!P( Bv "ES!P(wׯCU}@ $$f͒(~P' B@!0>}aبp"  .] 瞻Q٤xel+;k \GN``p #[L6 0Ә80#ԉ΅ӧ!d̍({Ms&HE@!P(2:?. Ν˗/+ee71}3h@N6Ӧ"5h OF{x&nd"،|jر}-7NT( ?]K&"lٍ7pyuv$-<g?3gNeuҴ<%lnܸ0K(͏@!40M}) ihzMft_IwjB@!P(>@QQQ?>eïk`ˈNW:bbbH}w)7<(ZOIo&i@Πg? %{fΛ"}:~0>,,QZq}; a掸W|k[W#+%vL?l=z<(>|U EJJ_89,ig|ч8y7~Î<&5̘JU(7?snrll:uJ~?Y^x)7S>6齜27G?IS2d:f⾹!?:sg`^4f> k~n⛷Sx7 ^"]0wxgPNkuPk=(@ewyN ヹv_ML`ָ8n`vŅpnj;iǠ)./߄KE~L7>x/TS7 }^~N [0I8wI׿3 *BO bOG#zNMlPqh{sW| |'[OK,it ۅ)e3}E9 '&[Drs6or\i.q`8KG%Pss7o^G#ÈRm :D8/~4>g-BHrL]Y'}myt[ΊÃM^rLJ\~ < '?Z OqB)^[ޮ37_;61l;f.~[n[G4G+M[7ho&rtT"KHL]Zm'1퐓8U{S,r Ͱl8Gx>PQv0S3 .t^?wĞՓ'Ozq"M乓a^g`dPtF%yH, `t?D>{)xj$7Fa=|fgE?Oݰw=^=ދ}/#Z_1^O Ddl؀/ꯀϧCڍWu>-d̞Y 2*~NY>E8NtOqy">=<ዧPj5wP&>omb> # ,8PTٱ!=7z_=~w/ןt='b۽2Q qF WWӚ ՙbtMTejtVN'4]s1 0p8Su{q.q.knLz'=. ǂ`Iau{MYc<5Bc΀ sds"9< >1 X4_~;OJ"{+^翅gݨm݋}-NP P]>ن %+soK*Y^#xiBNAEh EE+6"뱇0!y{Ӈ$ܲmɽx5|#1r=쭮]RC"dzyٌ^Wϕi揑^8HB b,G7 ]Z yI4.\AƷ@l}~P9o(g+oDC Xu[oδ lژ]baoٚR__kEș_bgAhӽF[Xz/ m(zs[S~3up\- G\zsȇo2\Ɖ2gVoP1KVS<"Yqin i<} f$NМEˑj@]!X9yd1hQÃ8G> 6RK@FeIhElT5#&yڎĂxzǣ%t o[4w+ʈ?mr1/ϛ7UDž"qmr祋W0fY"lfI =aPQgM\%ea1yԂ?^qpog.>[dceDž罛{ p8~bҨr,:Iywg7+6WNNIlƫMre:p]ͨ*><]3 |} ONft>g ϼ#W~޳8Ǩ>)"0xe}wF,\DSu~~`}ξq|rK\+N 빕`# tD%obA X~5uJd!^yFg4V_9}6S?)EWck{UiD'賝vuu ( \<11qzKSput0rWb~e#x՝4:b~fJs)?aay}*Cw/h51gFl\J:=6&_ ֆ92y>.l։"!70mǷa.tt82A-`t'p[~N .rVI`x8Uo" X('_Á#l4:$gr:|l=pehwOk/XK#f2>|}FgLjֱ PB.A`]9v%㋿ itΗzm).$v4/_s'Qjך~;'sO~s1`փ{9{Kxk_w^;Q(BV"|^8PSni,Fet@Qd^>Rw-_N#v/>spKP'Zi_sI';itPo~'M 8N3&a)uct kť  6_DQv) ϧ~N6O0,ix>NNt*gq!4l:\f!rn,=\ìW+>[ DYGN򞇑G`+;B\ׅaF,_X5҈VF0k ͦ'FKr/W:A.s?i}`#G<0loAk>Uy^8{Ϟ)i޸| ҷO _6žcQ  Extk86q韣wv'~_l=ٟ﷋Pe.7;IƊK4Oj7\v>ј9uo? hZ xaǜHrC?⨀{SDЃŌr_k30|d#2m1җǜd*Ex޷7" &v>0)ޚq8 4* >x۩蓭TXt1">UkQց_蜥>3k\x0W<ǧ|8"pm/"VCݟ~IeRU rAGg tP`R?1KH|z=XL3\މ'p#S'BC"kǂS}7ʒ+dRg"N{˂#q`]IǣXl1'&7ЦuJVs>O} 6Jah4PKʣ+>DlO*a*7`o>멻bCNB$+AaA:|zr%apH 7~EC,s&"E H3ND#W(cue oɌ[0> S0yMx&Sh" ޿w<6DbF/y]?aw>&].#^1]_DSE-̆'뿜%R 2ݍĥJs{]]!1itLdqJo{yӓNk5K#8}G`<=HhE9W?7Q)D+umoYYsh0\t!q~''1VL"!b͌tl,f6tINmi?ҵ(LZ-|^$D4Ǜ 8Xr+91AFzD'KqY:ƈ!(]QHKō;@%POMD \,L]s >;jwua&R#=BE 62kMVE0Ҏ?-V}*>v衟ՅG:dcG^R1N!r|G?D6Q31n" 3"s9Ns/*3.sYpƭP4:ECVqCڍH9*2 \tc A^2Čiϫn't}4c>1|d{*,-FϯCsgr OxWGrɁ_}! i@.{ }F"PWnfb|BgV IX^Ahi'c< Gw2XC~{\gu2F04ZA ?gKϠ3G<Ԃqg?mW: "8.ʈɄ;mt|'ݝô>B/?9o^iX3s{Qo?y>%\wipp̦& ߠmzWFAAWb׹Itk =F'G.Ǎ$`X:(c(Ywa;?>N7#FXIyh Oj+*Gw\Fh(֤#:n\.oً}}<(B[,x?*(t1#1^x{RέHX$ N eqm\"Oա41qt|,7\Qth8XǛqy,Rj'P쾃`N9 {e{^:|mOQ7>hc&ai"w*&b^|/b#X!d ^,>9y po)8n\/yޭ5'/9h3=S 4ꏢ2n;{x">Eo^FfX?警/Lܿ:_4:\3X${V MޠXa=Rep)s@ +xvz-cp& {y(۴qߠYp9;|ϽvfZ3ڷ9:zGQ{9<| SzY<ډ3m ըov:M~Z'1ܚ-C'o] %'!ꟻ)̯0ǣemt C{ϧW-*#b*,;ãNpy|4&??7o!xH*#L|:G}P40_劶8=E/ žγяȃ=8{,yh8 Z?܃<{jiG,u^:UEqy{aNt|/5qz_]ir [ߊGof/ a_oنkH sOۏi;NC\r 7%aE[_3n] &TM4Bk๕vLŴSKB tMz\yw~_|y85?en3x _C|W܋Y.w<.U܏wKz|n#a&Pasޗ1/Eq1H1cxAdy|QF=Le|C+zGO6g>3\jO)ؚ4? ]T0.>fraN5\yag͵q]Xaޭ s!RZ7Fgr)Uk j95n\E:jE\s#ǔĭBeZ0$GH>ָ DEbo':vWytL8t'%HOCQXs`\X%&itάE<t/ 2g[^Bc' # r ˺z&b \*\9ٸJ> X)Q ^x0r[1pfV ^6DjW)l=MOfӨXFdIOO>~OϢx(b PZхH 9]/ϏnW #zLo PiYv#4#}'O.l|7 ;i-mqLrZ$<;=1N&< /oݎy?3n\Bghp0_O7G$l!}2mƼIx pĆUA!7"FJ)n)ŏB@!aH ۿIoh9A"goi@"|N٠ |Le% nҟ9%Y`tc s Vi>:}N柆auLN0TP(>B>5w8R(B@,f$)EDbOLq kA,%S BBx:*or&0\W4Cz1^ݳ Wb឴0NJkx8P.GLS6EO!P(>9ĈeԻ0'SWw>Fkay{7 ;8L _idSBXL+Ub}a{0|¦Egd!r$۝454п\-[{zn B@!E7v aبp"  iTq .rC*N!P( BӉD Oul)U B@!P(:yubH!P( ex~:UJ!P( ]2<.Q ) B@!t" OgV) B@!P]%!B@!P(NW*B@!P(w>HlWB@!P( T"?/B@!P( B`<OR{H| B@!P(B@OV( B@!exʧP( B@!0)9)TaB@!P( `PgH| B@!P(B@OV( B@!exʧP( B@!0)9)TaB@!P( `PgH| B@!P(B@OV( B@!exʧP( B@!0)9)TaB@!P( `PgH| B@!P(B@OV( B@!exʧP( B@!0)9)TaB@!P( `PgH| B@!P(B@OV( B@!exʧP( B@!0)n@WG;ۻz<{:`w IM3;GOڻzђwGY0=5i$E]~eh?eĸ@wMs&)3?R/;Q揚YfBm,:)>77<= NVKC7)7<غ)yyPBkSDfi&dVwDޜMu]~,ev?@Ofmh/' ^wې]P쇐)nCmi2TCЛ ܶ,m؄nbeێDBLgQSX:ƁDOA^?C=M z+jڠ(n5 atvK*w}<|qC[o/deOcLw&?fB͓[3"!A@0DW;I鰘J:#L^E"mDZM]!V$Y=ww53|gHnswm17bGOm#ln]i+!6V-MJ\L{{nG1mJq&2_OyEwlM8"!W;r1cd+u}>w1Ӻ#bžW *(LmHM(Mc##oQ7l@Z>PM[,49R{D1*%b[(Llk le=_Ѫ0}h7Z{{doġm./P.˕ޣ=inB5v{ ,:4HGF+ [P_lw/zZ*]0ӌ\zFe7@v^Kuckv65y-[+eOn!W` v4/ڒN<%/ZFZӫa[F~yؔ+=CTWkK3£[VՈtnG]!n~G[5/4v]`OOG/Mō ]ȖNl{`ko!rQ&Glu$ιee;Ux5[qe  A^M(QI0pF4צhWގ\y!) ] ͞.b"eZ*P[>|MFOG2w]Ue㮲äy NP[@e\!=f[<.fkWuro~moTùX׉ccz<1wa;TۭBf'/t3bË3A.y$ǩ@][oH6TWr0䭠 =~:yǬ'1ni9P}y#y_vT੓>1vǓ9gHp#*ǘ.>p[ O!)EH4uPW3ZT֊rY T5!)׺[a1BaT'|{)2lؑWL%񚷵? i)ߚVScGNy9JszOiJͨ–"0Duddkޫ$=#;m#_fl('TЫ'I+aPO#P&ڊ%husxa㯭5U'Bz jd~kmKBͻgаٌšHW*ep6V!rakAldɞmL"E (n3+l2DlvF h%N`ĥI+)ɾbzw?eK7YnȁVzMP/<آ]t bqaf&ZS~ٮ^>.J`72qLTw#2emvf}PR<87gzCVQwi~vͦՃmO؛SE)*lxcRRp?ٶ *nYeT4?>jV[[aurXC7skS+69(YIά}Nwi,Sk= v2/DN;)|L?rFsjc0)͗4:%I󮒃Ά|UKNgLh(b>'im'j#,g3:EΈg95NWNW5ވs$G,!$Y{D/.)A;sX=#/mi|YcǨ^md|+I`}gߧcͻ&^IZ"M$.Y2 :rIi(%N֬GN+?&q7ryGjFuZ;HC֋#FƹȫyghptD]J{n#_i܃yHSL }E[1Fpƹ+rɣY61.ki0^}_$gQ7!9.j`= @cȣZ1}۪Օ5h#1$&] +ͥߍvR?E G$l51])uqRRSMKA9'%ieO^(?Xys;Զx2#$z⦠/t^Lh>9zx`Syi/iƼ.ep~2x޹%YNKXs[Gh{˗yT }* ܺO_>g:\:' FWǤ֭m{D9ѻ_~"c$>T^U]]dJA\[>귣)[R-  !5Q.:˨HCHϐ3d*]2GԳHv',LE;`z>FѲ^JOhzX.(<ĥuax ȡ; GpKY&#ߣUw땼 Am&;)vG ;pS+Ȼ5> 6qe&%JGoϦ-6|Tl#v_9%-V4JuhݏzW:S_X ' y/^9ŭE.H7j =\R.!tq@;^QWo"  - n"rDq8q0 kq̚-QWP *G#Z go!e)Zˠi&% z 2dlj0$ǩ>zHb~G00yY}&-5ߒA^39;?XeŰ`3"(i9k A u1 %R\ǑmGvp|8ԎCn>9XGLa.$§Z]Z&pg_ZYṫ?}1<`Uo#۪7x0s ^螟]\c]rAov]É"!ZbEus cېĭGu97ULXI K-C[LY残R],CvA·;LhvӼQ`~u,q|Rq ˧^Feζ7w )nP~ЕiUga9Iڤ`M0zyaL&rg(GsA%jUb6GII9-\G!VN溷 }[sTC\2/1':4\ϵ@"<M.?FD#QLo嶊XWLJJ$,QI;PǸe6r $e˨ŚDȟ‰܂(,ft=ŭ?5B v - `hǙ& Q, R%>롱8#n>t׷I/zq{q aM-x],%zv${?z}u4 Uեۼd-Vf4գBWϿWX(Rק~e&`=I `"TxVbk,:p2(1 rMMpG˫q0Sae ď$w&֖Ssفm&܋=M;`88[x=sG:ݨu yBpvӈK@|i 댱$s0 aì|IPVFh-֒VL1PA^Mv0< w/LT\8s:IY^װ9!`nw Oy=)8uF)QY'.~ey㖭~߽gSfg<F#tiψo]_JC˯+e>qF{aш7&~ɽf=49tp(c18;|Ý[}4q)E[5 JGj qr1jy+_A{^!;QɴTIB3F}wv\( F[D֛[xKTtWhxjrpMߒ5[$'FeVheL[$C<Er8O{`g7 ŷ){i!"ʦ]<!K-C>| SX("R'#.䏱I&t&Owp+7g4ON`'<@IDATDFNAOL .X?v4rHi$s=j+v2[[=2zLm5:wHqvػxM҈k'xw]bE!|CA#]`B`XƓ &8Dgj6DTҋ5~[< KU⵻^:e_a Xy$F l8޸MGF>3'݅yXen.Mylr64s{]\R>e<GYt{I^S]]< ;f1H'>E}Qȇ5 hq0 ZKP9㨒 ,E65s~6` z`[xWLvwpȊ\nBS|sٕRgKxOAvq#-Vy?\# KwĈ~ݢyAQ:xCwZ7}IB$y| HNRt)=GH?i0?\]~CsxEPȤ%W|Gzo0ު~.V`dr-n|r0k-02=,WO.,'ŵ'0R>SU4Pzr2W*~rS,?U\Vlރ=VM9%FD:&Gĺ=piq+$Uwj>xsO29,-pfV~gn^,8|%T^+D xTGx#i^9[.gxO͗"$Go (-=yuɞF76Z۴ +}o*}rl+H)G!OnPǖ]5DOs6 E(5NMݛbsF?#g-Z9ajr9BL*/F Vז](qx%(C|КF!_DT7vXfQU|3&`ۺ{yenkmZU MĤ]8B1'ĺ|dד_F9XӾ4ȳ䯦G':gȊe!䐆 [%̱\S#L|g=࡫x^f֯-3'u\mȹ/Z6Ws Z{Jv֮XӧqW_{=|S,·Ej4oaO&3u@zCR!%4mwlވ]~D q=x{GĜRQ)k60ӀGQK2%ޖ)JT#l[c7ֵvO)oo.&^ٵN}R>b+S/KQP \@1]wC,:'; F6likע0#$C/1VQg>4Ie'}ϓb {9'Uy]r6t-.ΫWDtߥƏ%]Ҝz/T|[sߑ6twy0}r֝2+QyucDNw:=XzƎs5N0JS5O[fJy]X "o#[&La<(T~)m}jDtOs^Z`Psl=)F_>wX/z}# ^?udpP` bVH@ VN|A5GFyI>tl|(AK`yXݲa Igvd_D4Oez{g!|0WqY|Vbd?*7o%%'S^' s"S,397)?qaC|Vno;Bέt8%NNIf  B@!P(>8'P( B@!b9ŀ*r B@!P(Po\TB@!P( # )TS( B@!2<}b B@!P(exN1B@!P( oP( B@!0L@W{;:/Sxu=se Qg/ =]c6t˛1#Q}VD볷s[&.K%]ﴉ;z&'7]=Ԕ[ԯd[C}honDcs{Ri$2˩lPzRڕ.F cdL T4exs"]ܗ6%gyU%պ#u,cwΞ;'#H{GuܙUh<-p&v]-$M)@eqVӽ3CWc{ _2;TDKCce1~-~e m_fӘnl15aodl`b˾nfiyG|Kwzhv2C t50t>gRS a79]r-5&|pys&:Mmptpn\ l9iCw|>#%ԝM.Dߥu*u/bwQܔDM61qv75;Ꙛ8 2ya )գdAO/hF)zMiENi9ʋPUR*naFY_߬nEyy9rQu 2g_6dn@o|yۂX-D r$n4EHoּP'$|7ZٶL!!<QRZ6TfP5$#aV$1hAEu%oM uD既4? M; H>ڱXpJ&*껑ST-kcTUv >\yZF\; ˑ%^CX~tKI )E z[?6z`޷lh5˔!ښyAY-^(Ƨ V$qW s~SAv<PR~!7.g KjɉDNL+QRӄR(oC!ǜ5KvU1&o i?9>19s+"gO#,D-E?E~fUahB mَ-(ݲ6gff1ͪ"2!w~+PX/8C%~U|r%Vϲf#UU(ez~FC.Su1[> i&ǭ@>J~qXyWD[>s 9M魇Q՟Cr^)eoa߳/؃@2ӬVR)>d [7&8rr >)͡aw&?W^^!|D Y,'0tAƌZAB_bu!(&Q/vrVn]QM 3.)FwV9njBq`Ɯ^`|BļGl91>#w 5Uԥ[JsZ9ٍA@Φ]ܺN&uݛDj#roY[)470Ig&:Eǜ侧=mnrWw戽P,78)=2.:t}TgڟwMKr+yUV*ralH)b?4״wɟl i zIENcR}=s$kGr|l߮6b`nF,j8G;^ lpC{t<5uk2˓&#,oZϙZm{ἳTcƎ5}cPNtr^mxhR3KHה2Oq9߉ʻ L7x1c}FuzcE]ʏǵpah;OV&c1ș dP>N ,gEr~OWۏ9nݹ+qd,#Ag:cڷrnߥ u5hhz^X+ՠ |/ ] bjiy=N$ʞL;B]RֵyKc,uz\]c3[ZQ L)oqehkӨ}3ŹxgPMIuRs(%Bbɿq/Gd8 KQ+ VU!ގՔ&4r@ʖsiWH x+Dd #cJ#|b)YHp Z v`hGSM18D5ocE` [aL6 A6u3J*=M,W=eC 2P>?|$Ekg\DV1Vƭ;~'ݟ[S~+(OANFVA,ϦoGŦ&ʼUXQXfZߓxy;s|<Ȕwa8Jyy>_a\YeqFS\˙+kqf/ɔv"PZs e2EC)Zk.GymY&ӧ\&GD>8d;&$d * fk.!!*L8e*%2Sʠ:Sb̅'AyD!չ*|\Će\(LgNSgDWGG52=7%%IV32է=>uC:16,mmBp쥓FqM^ĜBJ)n%K7\& yT2`\PdU?aǾPZ70&]:#u8;y*L}e"QNġY]5e`eN҇Fz/Z?OTq̉8)29-~URN_|A6T962ܼ<\E>^.GFL7;NN]љP1hD4e7 ]¢GfxW$?Lk[>n&4رL3%/M R; n;zDKFbmTttg$H6H;;$ 38:2-+eL#u;)-\򃧣#|-eLMKm` ٙЦ@3g헑fqbFܔ,E-iGnӁՁGr~\ 7Vˮ]Oo~ER4QiiYUykWg͔v % \Mr jyĬ?+i>8v=l>PzpeZ4K0kȌg~lEN4v8Ձz ڃfu%mn;i}DY -m0iׇyJX?>jA:XkM'MV 㨴"S2a+c]/7͢' Ss ɪ>ҕ^=J[Oe7k#j _7.o;"yz-$+n{e=ىFi)1Y+VTmHrl(Uvjg.TVc|hIQor ]nTBJp]Cu P=+e|eުdMԱkmEosC8)uT%'d G"@v*ٷ*)#,oZйB/)WjCʻNNFkCk~B H>J5 ieUȠ %܇#NO_Pv5:GI DXɝ{_Wn{ ]\҇TJ/MU]_P0؁P1do<$MK6|֗e*Ψvl4i_J+( it}gL9 {c9(~uu("2tG$9I+M_q$!}뀼cX! %yӁ1bʁ\LRig cGN#UF= ݣT>n@moKnG"(ܒK7XSe}ErL> P;dϞIP@@=aj =͖_!²ZT*li0v#Y%eԗf3lD}c JmMf<?`C^s`w 7ޙ6&m?W$[Vp|jٝJ{OvΤ*RՆA#Sޒ_@ly(iTV`Wa\Hr2n$+vqeL1erPk04fޙA6ku¨q7*1itR s~Z^vߣͨ;JxFgc)B dai){TGQvBdDdz:β 1IwLѷԶdS2z=b)Oю7h3}I=F,~™oy9 8!M'oT٤&|-HGyyqI 5t6::n3RuIim,UۉjF҈aUQ^ a=W ]ե&JzTve])lʺ̚ %g^W>7!r6%|vPW IލZbY ѯ+=ٍ/Ȓqt9hHۖ!m,CLJ9C^6m!%g alC uf(͔:VʎOD8 Ҭns)ϖ4vϡ6Si+ -@tȲR '[u[DC^t|6/ԖoԦҗHHHH`f6HHHH`kܚR #@᪄ $@5땥"   GsU "   Isk+KE$@$@$@熫D$@$@$@[~|^9xOё dO`Sπo [WǔP==e    Pw(w̜HHH6+3_P{=>1xF+gB !SqCM4{DvU_?퍉9aރ/&{=,~ѓ1K`qf8փ}^j/Ķi3%1\ x'%zLD lI7@xH-Q֍13ы(]Aus/fP6/`G-'wXqա%~v G_͘XvEW5e]$(~LϠ()/}%y|+"tMq ̞"p!~383g]Fs8\u-lT?D\\ Y){qC&zg`q"IHHHn9jMk99|9ڇRG[ TL8jK#)U('px_:Ŀ5"$v.2y;*D>7].p<    -F 盋z=>ЅY޹IxDtт"'DaX.`rȣK:s    #Jt~+_ɘ86 @3`$@$@$@$@$ Ϭp10 Z Px dE3+\ L$@$@$@$Vk%x$@$@$@$@Y  Z1 @V(<$@$@$@$@k%@VrG$@$@$@$۬B?ig]۰c|k_d\   F Of^z~*6 ۶msޜ o;wj!  DHb[գOUt$@$@$@$!SbE95AէW:   XO 4skn^M$@$@$@ERxRtn-iz52K$@$@$@TXNHHHH`= X Oͭ!6q=&   KI,$@$@$@$@$k/7Gr#   u p֭{HHHn*Kixnznjbf$@$@$@$GRx0kĹf\X'~>|HYqKpAϯL 4˽x8f}ھun̻?Ʊ/~Wͦ~tD~ :/x2y,]zYXNc!|!>TWkol~;Jv)%V?0olB9".' ]0N/P: |mҢ/>ahGg$b璾+ϝ&ֿxlByW7TRk 0$_q~c݊ؖy=A$@$@$@HRxf3fL4Vqvp0H1>ķ܁;ꪬ}-k-?PmwZ'#lHYQ oѹ-O퉿soJZ-01 GFy0j4| ˚K2GlvnB?<|KS;}G5۶z[oW4ݮB%_sqÏ 7_6J_$@$@$@$n,IeieQX6^ Ѳ ʼO z]lo,- ]"(ʼnaỦGvmTNM}p-tNeӻ!'R߮yK bzwp/>8"x%W[GJ)1п޿?)MqB?jm]iǵw .?Ⰴ=0݁}E9S[:~ dFTj1½{55؝g=?_ zX:K6Ԃ[DF4DgM $'"~u +Or{T% TE,~Tz#w /d% ^M(: $  DpZ"._]w7A\[=yJ ƻ5^ wx V%g|$ojSعs1 lIsK"   XW Sj 3'   ز(Yf2$~}箽ڟ{u/-Aadkm^G'g^8 'hYZy4h#Ė-t@vuD=!00?2,d / ;s `m[k"PopFP mAAj"<`oCtvoyfM/eu}CCHQK`@1v!ʇlh32oe&[ AnG_m-<nh[=T3 xtM%9N7%"(s0#؍l>*"(dO?g/_}##cko`jۘ;cGnY ^d}50_;i{,Vk=CMp{x#ѥmoRHo}g<÷B#lClg~$cRH74 xA8R@0s5sȧ}@d V/wuR!3Aޞw[\=L=f3y䶕Z}b 8bMwn?:h!ZG@-x@|S@0/mO~˩Ũ~p2:mE?y[N#)^ [o$LnGŸѬ#v|HWHJIy:!Qd_o0^-D5 r++-s|>!d$V,9*Lٛl*ܱv'G[ dgnDd"P@ ` d}@ :AHgA6 A1(7P#<`Fbc |` A@8$ )C{!= W ¡P.BWr6T=B}+4 }VQ0 ňB P(- :F%ΠrQEj#3 j f` Xց-`g &ñp*p=/qx^Ac h^h;;:} }]F_ߡѿ0D 'F18b1$Lsӎ)e c`>vñq8  K]5qSxJ<^w9F|?~FAK!HJaAAENQBQOC1EF# ?qB.NxCAIIRҊҗ22w+TTbT:T.TTgQ5SA$Dgb J#f6>FG]MOFFM4M]9Z Z!ZZm,mm0" ] )tOfqBz 0?;CC C;#Qшя1&c7<=<=a5.>?-;pp_xHR!Cه&P(thm#U+\$Dy?##;t+J,*%j:Z?#-G?.F+J,rXⱩ8ǟKǟ_HpHOJK8ap":4|Rda2:7;E.BʯT4鴜S:O˜=yLwbسAg242ѝ>7iYś}0i|Ny㹦.~`v^U>g~JrGA%K\i}/bpH([QľRҴҍkAˬʕ˯s^O@UWp{SfmD*檴[V]o1rWn={VCQ5>5N}uu-jH>אaz#1q)i9y-[[ڬںMڟ<ڡDISէuʝ5Uw)t~bwuROmJo}ߞ~G/t_<0x6h>7d7rex˙WD{y:J;39VVmոwۼ=>i2tr}*C4tLìl}>|ZKL9ȗ{_5v;O}#~ۏk -cKKk˩?~(t:NEs7D7zB" HEyyD'<@.0v`IBPmpƱY)xTiju4st Ōb,QM4%\?x&=<-Lʉy),%"m%'[!7RQگRnQ]yFm7:x]E=wcƐy=˗V?mm,N;V:=s~oeyAJ IkyX{&/%XR&C 7FEf v:btT=F)VJqxİ'N$MiNJ:.f,[\ZAg}{ޱ܂KM.\)/-Y(1k_wuNe_շtw Q]k} V5778+|M^/ R +Eg-%9##,XT\:^ 3t[:\Fzv&Afc-,OX%Y'ۤڞKOtrquݧᢿ@l[Nv^G|}|z҃Cȇ\Țr5-=qzG⧗:< y.эӛoBo@ojm8e⫓#1茲΍ս=9N=ɂC4fg}19Kאo?"} ilyj:6zp:Z݃JagqW>R+NBH5, 5+frL&ofkuVQ6&u~FJb<]I$!=a^"]b↻$PROeɻ*(b{UT^yUǪHҡ? y ZZ,,Y-?Z5Zgتo:u4vbrz\/W?<wPGwwA1y됷Oo?o8j>P ,s#DE:059zHфXإcqǽuYOP&SRONɟƝ9s+=9s3eeEgkhO7YvIP"blte4H$pyfՋ[wD:;w֩.~PѠɭأK-o6u<~znў}^2lnu[wi3QͿ/Xmp[dHid yXXUeP I Whv5: ݎ0#u6Gs]}k3((2)ȚS:RC2a2Qx04M7>m3^&zNQ$3]eJgc~r͊{Ss+ۘg.ߙ] H%d2+3r )u*VFGZjF&YGQ@WEO^_Qys =KN˲9ome١¾8{yd{Yc ~-b0mYt1B1&{'&v&%cSNO%Ho͈t9vA-O@P qID)ǵnL7F+oY/SsYMn3mC.oC/'Ff,& gs放(]X^K\oX(dn +b@G 2 5CQ(]T5Fp" oz:F ƊbcGq{qEx<>?@BQ@@T-ihi]hK?gb$21i3abff=Ʈ!ȹ5]s7t4?V`EW"b5%\% ke^gW0RWjURuQC6je֫4p633hlig-k3blhb )mC3ߛ8,Ԑø[#ã<|-:>:O=)2fwjLYɌWj9r/'\+THnige@J"wߏ^Mz0Iy%1g]ݗz_btmIx*~5ܗ/_ CEK>KK˳?]~TWCVr׾mnrucڴۼ^rD flssldlnmnn#vپkh:UY͈NiTXtXML:com.adobe.xmp 1052 389 bT@IDATx\]Օ6 @CR!L$hbV2ъN:S⌵q86m[)N}Ig|_֠;I:I#b I! H.\=Mnuw8k?ggIp:E@PE@PE@PE@P@r8)#E@PE@PE@PE@P,TUAPE@PE@PE@P*2TE@PE@PE@PU8hPE@PE@PE@P# C E@PE@PE@PE@ZE@PE@PE@PE !*CE@PE@PE@PE@HUE@ ?qE'Kl m1ֻ"("("7D r0|~?q(㹠V0ʚӧOϖ_¥! Qx_B[t0"("( .F/1c?%NsCC[x-"L蘩P0K{HbإjU8Lo)2rg ўc>tv $2o@!Ch -"(TSc*F~2an).p:7B? >` u+~_)֭;hN"D`t̶_03,XEaI'% ٗO~\bN16܋}g%_ZlAfl? 擳b|Ɖ=(Z)djGO[ O8+B`JÙ xV^hN<иaնumL+DK9']8K4`l]NLB$(olmD0YaS KWVPFZv`_݇!1(\318#]Arvo߉$ʟBd"(#=hݻG"5iB,Y4% qv0 `rJDZpM2ޓ`VF cV9Y&#wR\>op;BxP(?λL)rVd#/%t-6]$O8t~"8hX˦9E#/[.8 |?"8).ϒϣtfDr >v^" %/vw#e'x x /q)aK9g.!I %؃'|@ɵjG@4"%M;!/5GSG 2~T_9oE=V 2\]~ Ǟ-&oFaG#ͩ  Ch81;j /à#95Lc/< c-ɣQ4v +A#;0:MZ^p-֯[lؿs|b?j/[u w3Hsf. sK7fun& c!ñיΗ1x>XŞxr/+A17շ90vQX <4穨IeGzd%6c=JEk'ґ63i:p0LJ +x\:wσهB>'f ẂFR#DqJ}VdiC#H!}Wa.ip?BLc;1T7?A6egcՏp~_]ƅv[6pAn&obGYƷ^?Yr̀gאATŐ tȶ[u[Pb#k '-*=܇ I}#\6$ Rpiܿm 3? ٢#=iΤuRDp"CχYŎAU8ndf1YE(/ % 3 KOMl0"/KVB> ˘k*9қQ"K+BQv +Axe*UkW}r_($WTYBd ?d%L-S|xqW+O\zs^`;?I+LܐKҊ %N[y8;IծP e`x,D6 e2e;ml^w2L. wOݟk7lq5sa \b[Nƨ4^1T%ʥl\ ~ $oIq) l,J{[ò" ,./2edH`ƛ# B雳>y7;i3cl W/P6HlP}'*,fR aAdrm:86s+.crx[]֗q 62G)A嗿?߈KܿT\aRQ9!=uB^N6ʾ엮L\v ow)(! Zv\d |˘}IsIm^)T0B5tüoc^f0 JIGx1o!y0F?k󝎾{q3Uun] 38fK=3 ԃOB>-wb[slYW%=U\|cX S)_<\].eİ ̺[Ƞ xry>ԯZXp`y9L!a .|vCAG'Ob+xnlTǑ>x&Gt*t=~mgEV?rk761ڎ;MY\?'CYژM'拸oͥ);ߊwq@Q 뺱!%N+xskV7L>H37 ߭{edZ]oKc `^z!o7Q~74塏on].k0&4}"oP}TƬ;I5tՓDgo(Vɋ þ;8Ida:ٜD?'$)+luL2o l~v|u:qo%rֺf8_[|ngV*DD>dsexXHl]FѶI6?ڀnK9VJ>K.nA,q׾%Klg` -(CaPEu+-SVeԄXbc_B_XE} k .B՗L$O.(XŹy |`~ӏ,,:fPY(iބK#3)3۠p||{a6R{ӋG^_j>!Zn9f]Y뾁m!9BgV´,d\d<PFi8pR\a_ ?pNe8gia=wXO;aɷwK+x܂r_!=^yͷ/9uj 4}1 ȴdP 킫k ͢vq)_5_\'^E/UF t^ >bO04eE1NeueEM딑y>SW޲hqK%.,݁޿yYͼ,+Ŵi)&!niڎnʷ!'J8K9Nny%F lC,&yEvTb]4d[Ȋ;nَr34'Ï.tUɫ6^䣭~Njs^ F~E\Ɋk8:0AkkU3 $Ľ{^㪵7CdO12I+N/mӮBaq{  , "ĉ)2eFc/kV}tќc˖M$v^˖^6O/8pvrJ(+*xI ];| lx~K`(TmL}6uɼsPQ^bn k6SpcK]H8r7sFSC xi?\.y0 fO%dQ9HNMO=t>0r#γimfSb5jVgcb_=N|9e|U;Tؒ 9PQ-1Ĝ Yp SJV#<}o& 'ѷo#W 쏩X3WۇwI=i_2}Jp6BCzG玿E׷nD\YUA~.gpQ'g.I +ذ3zͶ*RZš{w1vsrMghy KGXNy f9?Iu4gameWp{WE ٱ`gpB ˑQ#=R47=\~eFnX4иd|/&̫1-< o.FD\Ω z>R? J= e%VܒpbRdZxMKiQ=|_A8. [O ox#2!@ɦKn?$N#EA}%8"[XRk`pO㰕<$WHc.9Fd"X!F3ٸ(.lNy;~л㷖PW:VRtf22ql ^dn13q,,Iio J@8Hn+kPKEJkGqrr<|Fa{؉%yS_G}",y,"߲<01G1W"M4W+Z,>v滒w2 UeaGޣ*Q'w5MʳJEbE5j[D}h)$} 2F0e͏AaIм9wP mvntC[."SM$#l+SG5T Ńl7g :.omeD]2gÖK0Y!HsZA0SrfpzW2{ӟ1c茆0;$XgU˔}Y(:p% 9 EeerQ`#K-EpDپKV2=gOq4258C"[!=࢟c]aJ=lQd-.y.b?~;$ F#S9 3s*ƍ+Op)g0aA*J 70Xgfӝr4梂yCgfт#,ȳ;C͌O˸b1/Ex3^d,=QR0QdzVDw8ndYO.7 (J ׄ `z~۟lv9pgFtMNu]vކŎ+Mul?'0= ;L}[wP}qr߱p8{oO\>edAu&pI0яpAI>Y ; U6Hvr.qwi>Af{mґlw{q0*nwt#(}84OyqkNۋYsZ ^yT|ժkvͤW#-iIӢw%|qǒ萔RSg+*D'R| C2ZZd]Axec 7gxmI^ .ajz7a8qjվDY(yfKK^7m1z `@;%Ay%h8:$lhsDp!6dn-XQMV.Wy[QíHٯ\ODnj&b5a ߋl+PE/徏xt;)]0oG |*5Ɖf$"da~Ԇ`>.΂cő͐1"nosTso24p$3_:>wL'cnXзEx0W=◰on<6Ẇb |a} Xl'xn.. 㝎X\GN"--'!䡩Xz(dw6d:Si{:!Ă'6ࣳө0OVW۬hws)e/{qYfW3=; `^=9򸝠/xĭ7q?:yB%#{e)Ag[o7keZ! K棦60l%q,#17rŠB9eIJ_DX)2b}kgE~YM&q[Rny85A?k ^E+&u"́eK]bj"ҲAgtV%ƚs /W]$қx__ [i}btl-Xm_.ԙx<|˷Mnu]y'MۄLx-h1ʰ<^_ma7ml6S%D lhS S+/ tj⽂V!Iĭv*Hb3^|•lf@ƛd]5hb%TUkvxؽֳm2;WI<0oP1%g&lVn}f Z"m"pD>9 fOF0Z=J[I!?>ufMaXّ/8ƲǸC2eׇ [ 1kRJd~y" YqnA+Ѭ ʠ"9G{JEBBNGY<>bU|=ړ~Uȫdk!= wx]&vsYK%v2)!˓ǻ=h#y'u$yKDY!yshzr* ܘIKvK$r%qBoބTKh@[9 W\s+łkyPfHmRFcģ莈OvO}p[%k 7Uՠ\^P,\9v *r7466WӞG61QrG7xϡU׏#A <#⶚JKUfC||=\<cGJdwEB]PO۫{m?o%ѕg*a} ^ʗ?`IƏǿ/xKbZ:Mߵd84x_ӛpfd~Td80Sv#yVىG|W&_w~3}w|e0:VXc<ؑ;=8~sɗ_#6ݗѝ-; 'W}1W/bz{RGi/:mґJs^lq<^9{.Cܛy޲ߗ^~z? z܏[Bh8-p?6gA|F{*Gn .x#s gD{"S<=4]+ʋTb!Η,lm rߤNZP{޽:9&o>) 2\po@0m_Vj 0L&ef P{5;{ 6Q6 q<#噬cTqi>H-hɮB<Տ<!snEdhCXA"]4p*IQu+&ZbH@U>ž, [M\PboNsW5>~<êd| /$ $f5mG'-DEѸ"Ԏ~46two!V# ~*R_n'(\ՋfY! 2+3,_:*x LɯWT@ ̀&4EpyaT`$<4X$%hq6󺚗L9]-bfܧ&ɶL%?7;tE"dIq=e5"e.;0rp&x3}.YwOĹ&C$[l ̇!3x=U%&lҍ{ֲvE-b6mE=yp[V;-D^eTf tMu*2u(A%!nṚͧ&7u OxL[3~Z5!e#0ƻp9ީ=_[VĞ8i{p aћr  jmOɔ윬6E䪻4fپ ?9bA{[ KNP,`o+ůMx0 nsv_ຄ(᲌xM .NG;Y[F:>`Rg\wa`v,ʈ=43a'5PHf)T\b$q7a?g̣b-($QEɺM^xZA%imwXw)^IqnvMgEvQ&yv3\@ad2P #M˺g4ïc7!9YosI2/q%yd\&L: <Ļ{>6gK1+d9&A/Qz^ ƗB⥬)H$|l* *NhqkA`h d 5s#3 ^AF?PZ05Q1DTq'So Ž,C"OPHEJw~E~)Ek3C 5n: gDi݄i 5u+tp;,&3jqy<mopr%.ɃyxV*nXthV! -gH3pS*yXߎO(--=DP~4U P ;,gz=uƺw:QQlN"(W[>&!+qd\!#Ƒ"(C`Snϸ3D[.~ DMwV_D;@'w]]]7a\Si"|Ԉjέa^Kpz;o!scl0ɬD U R`綑<ʆ" 3l8ODߒ1¯Q }(D\&|A"("7@jQWɫ:E@>3 "_ơ#k(8d{܋}4p. xWzzOϊ"("("("L@T$("("("(@L1D"("("("p-UE@PE@PE@PPCL0)"("("("ă*AKiE@PE@PE@PE &TLJ("("("( xRZE@PE@PE@PE@ U8)"("("(@<!VPE@PE@PE@PbB@1D"("("("p-UE@PE@PE@PPCL0)"("("("ă*AKiE@PE@PE@PE &TLJ("("("( xRZE@PE@PE@PE@ U8)"("("(@<!v"@IDATVPE@PE@PE@PbB@1D"("("("p-UE@PE@PE@PPCL0)"("("("ă*AKiE@PE@PE@PE &RcR"EC9,\^ ?t)"("(""F _c>tv d̓enzz|gX/*99ޣG)%Ґ_Bý8;z,Û@Q'd 7w&C2]IZϦwdKGX]=Htm9yYy[hf rlQЇp|B_('.4wD}"KbV8x|b0^xIj;s){=(Z2맭?Ӵ( cl?zdWEk6::Oq[h4..D:7 vhƗ5!u@bYvz"dbrB¡b9OǓbS1v4E !t_oKwAy?Ub|jeYQΠ.r M=d<[vVb,21rfپ<]xMo3v?L'Kʋɹ~@b<(X9$;,գ("0)1*<Мw|a! գc@qROD`d ry. F)HK3 }~L 4 ><߲Rkח@MY ah0'qU"0Ѕ^*8eJ E:fO`tmrOVJAb_'qWڰ3 l>,oG3V)g`/XNXN`4TuRQk?,lSM]jC!ՈxʔȀsg" mcnL #5E$t♓k{FDTؒ 2%z ̢˖б>z1*}hdT\T,Jkjb\2J5(:Ą _5`<#Գd"n砝+]"ڃ2$g26]˻0y/1PUSM!RQbf*S"Et1: Qaei*:%ݎTݍpXd](NPE|"!\Aw LC=fC*M{<΃EKQ{g,$DE&t _%%>p3E ]hھ 26\2r/fO2ʟnGDa|@ZRad29{R*[hM++ b0dh2rbRdY cطw:ra$OF.ʖ,BŽ Eki91/)Hp$Y|C^J־:my?w>>'kmaX;9FhA0w~ sinF(@O{+’|,",.A_Uwdb |v1\V=i~E$'9JObBl<[\VE>Rp`7w=.FlN([zxwNcga鳊hRnAY Zi81aN|4]>qsҥlczTbaE@P>p:?c_n[zqC&] &Y _i !++>@V2~\0r[ LXSK̿Q6Hsp,.V6Hd -Y iBQ1N0#ݚ̭jvH9J9`&A.EyRP -XAS2` "JyXspG t)0++–L;C/-A\yɅRґ܉F/;3$42FrJUb\%aeÉA&X]K,/gd,Rgy,E N)_,XM͘k*E)śN](+?k(_fȗ4ŎAb*P!Me bmb*'릏uSa~<ݍYyJTQV6)^.Jp\xy\ *̊#Wb7kNއQG(jAҦ`V,sڽHўR? ݈A\ S O^,ZR:] y{bk[HEl "N-YE ^n aRK a)}z,+׭V6̼|qAgKf/DyOkClOpbn|BfD9OWڬBK a2;cN畘 G pgeaxTM_oأI.YD\]9W]\ًOw~E@P>LkPhv`љ06]' iw˯< |sയǃ +{0;?B!1:;q]Cp뛋UzyQ.*XڶoG'i}eWl yCx袠 |jME@ :d@xy)yua̗o)'hJs@p^FWKD9e/ؾBpeE6th=0J6l-b{49|_)E(hEPnjí88#2L),Fޞ) >2&K|C6V0$86`sr90nl%ax}li3g=XnamՄL,BK`kiPp6&B I*Êtl[{R1gtZ4dR,)rc1O}9bsMKF:2pT"+far-礍:tA Tctl϶yP*ëh4eh(,a;i= nfADl(c}-m.kb9v8 m)3ڼk)>ZMmBKs]1!K91֡p^>Zz:bg=A٧sU9wI{Ob"vF'Y(nIB" . i p1{clBQnJMX A/]"[ bm("Xt' ثEK+`4<+('}XA{cn5bIB{Gal )`cKݞXW%r'Ruξq\{ Mh޷k ^n?a_睖R=h/\kDljx̝W+1Y呁typ;4`NzsBq\%Of"U|1 qF%NnENǍ q PNO qi}Ȃ<2xȟ̎r<0 $٘, ae W 7prjc& bRa5I il%0$4.g fS0+7@'8?› ApA.l9k*}m>1ֺ*U#l[CӁ};bX\!$wR`)DʕeT'b-Db;Rˊ( awk!VB\d1Vrڹ9IT8ov1̓3Q|&wPYŒ1Ii(+'W-E{gzk8\ٲ%gibَh CV{v"_~TxMZ_'m&RS3!ϑ/Ũp=r}%t[aŚ'Ԍfdױ~wu``n1\v_w):)|mH׿{[54O|Jo&eZ!'&tTSkQ}\(Z]f0#A;<#ӷͱWc(4њ}wؼ0B9\T sV[~m"Y,u5\,CgEo2'GaΒ/C<֊,͖a/.p~>k ̘EQ82N(r:^S`f[%HVEd2Q#=fWe{w1O4U-VpV^L&O]nG?EsZ>cqEqs|I }ԘEt wK[byӍTbPyjtyŵer37h QiC%v@Y5,Jfӹ-m30uۆXtE@#0CĜM%Jit6"#ʪ[G%48 $ؙr.!yBQ+H*w sr80V&NܟܻG5\ q]C]CS@-DSdc%K&}o3@sɏPv4ENcɀjXEs߈!g "ޝ)Rhjؠ0aU԰hF_*Tɲ3Va䆸G9Ŵnp -Co$RxjOL'ss1jLDk8ʃ#sy[An!.*BVO*q֍ UÝu3evGѨf8Nsp f񳴆㶨53+&[)(y2]?m/79 P(R,ogX1I3'2fgUh%U}B~$EgTggSL=MnˡzҢU#<37e_Xp%v2Gt#J3=OwM9>pйNŃ]<3Y<)Xn>ݦ ߣ`jX֘d58V{402;GA"IrƘMqh&-X#(nf<=e9y&?8b:;Zw3E/l[z/1O("F r+ ntgLK˗`5|g̚,.?fgD7gs9AqvElL#D6;S 4]x2<%ņ*935y%.h|ܤOS;Nyfv(ࡑ1cA3-dɫ(C-x,܃3 sYC6[\6D|*8/N/3C '{c:a)d].z-;NymCIq}: ]YfuզsyH,~inSMs-DH/v]M/֤0FY<.{OWϦ) 2qO/#jHpT@g[ܺ?5+^7U{ Kqc|T3my[*Y*錶)LdZm]įLlG5}uJcxX1scfbD_ΜE͐|g~>n7eqX*03*));lbNOkHnDI÷Vvu;5cGͶhOBZ2ҧ㭏"|Xn@3*s =]Nb/> ԉg7),}1לk?59'2?Edz ̎s!3] OPW(zaGށ.Zy4UyYfa9>+…ڑ4EMxy0KI݅(8>C"8o) -LMhl~aהB|ZZdX̸[TRU3/Fm>5bbZx<Ŵx|EKWTM/Z 4Y<qMa҄WBŽ+טD|/P[cN_[]dr3vXCTR ua I-nr6K*:d7,b$Ŝe_aV+²"{"1$Ҏg,<}|#ɿtWI}uoYp!I0EإP~cG6Z0YRt< cߖLx.m֩ m( Qы6);٧i7d S$o`/u֡[/K*YGоC`֝16}5v(eY?ٚl)%~e[lymk7`}֚t21*^p|p-ݪ[:Cq__NjU > csN7 d,'A$iRcM],i"l7x,fg[ 3;4*(mOS8ɚ1 qf;K[q6x5MAdnY7.ctH$W7ynvLO8a!xsn|`O 0S<5dך ߋQ957w>LѺW OƍPȈťC8@d?,ZS"\'~d0j^h2\F ;MLaoߨ{k|C|2 9&X\ϊeNġf2†4,TcF;[j(T,j X<ϴ"(FOѮpAӏhPvY#fy޸:#EWLN*zez浫d5T'%+:Ⱥg_- 7{DY/G) :$ *&f0^KFRۼO}_虷階+S~-+dg;0fAT7^&ͣx^q;5YB9 Ưz(crs1ʫWlzCZP\Vy.=KfHO^ev.㜯֯%Kw\2p'Z^D7<%*|9TM" H$D@" H$W6WZ^" H$D@" H$Ew*wJ'i!84g42D@" H$D@"  "p \^lV} ы(qpP;0-L8V#)JWsq LjD-wE]E%H$D@" H$%"`[Ǯ ~/;^~WWv~.|E|c!bE,+.o6DM3(rD"2ʊRƋ]h;H^M$`^/J*DC0hvc Mw!(!P:_%ͣ׀o2|&>PšBHD@" H$D@"AA "#O6C" J i$I\)RpQ]LPvO!pXA+Axj qBr˃6%鱙e8P2x2q)'{hӄ53a  a9˰D@" H$D@"pI%ppx"ODptBp]Oӓ(,d73}/o7/Ǭu3_lSDf)O|qB+TRY [CgD@" H$D@" !pp t&E ^ `hh`B6mqmUء^LNn`jZ%N&W( $#ׇr=8uʉ(BBS}ѱE8;|QW@rע]:ª戂EW?Ne\1B}zyB%6a:P~#6ԉ !G[ߌVyT6HƦ04@!B́PQ?Um݊P *$ѼZxTt:hhTD@" H$D@" \ GAxaq̸>*/dşϞYJ:4hAݍ$A#ؤ@<4a}FuU@aH1Lfqk&S :=vnv5N@BPɧhk"nA!o] ՁJ%[6/SPhޯ Uztmێz5:xYY9yꧽ U@yCmMAB}KU%D@" H$DDC])X S&3vnDR?NՇۻ7d1:6#hܸMBK>8fl&nh64#882[(Иa#MC| }avuEQGS9'#U jE#jX߭j E_O".4vm*x!"&3N-?t+qӓCx}hRjg"1L0C.j r6a{D5Zq Bv}B!c"F7c;KnD(A[ЮKhWhJ+d ! jҤibX@i`Hs$%D@" H$DrD xaqCuwEx˂DYN"&N 8MbCG#Ha<Cr#GXd~ P8bT:MA< C`&E(R3 b0M *" 23 #UW5,(pA?3L0M!uPRib"Y\&X -m]өg!@(\I.D(JCvA}V("ShniÖNfҩ JjR*(2/u"g Lc-^^X,6 S|nFުTKL(H$D@" H$;Eyg GFs"nlÈ*_*Η: 9Cmj8u= jq,T,L?)!PSe~ؕ$ZOYfPCOjйAK:ृbQتZzO" H$D@" H \ n*#т%rQIF K% ƿ5$/y兗jb^\ r+=.)Yz$֙{\(#'@ ztF7/ X 0Ȁ1 NQo749q%M"t Ѱ^؏S3H9ؾ[Pu0 ?%̬a0fxD߾Ag\p _ޱi"$D@" H$@A Bܐ a+[M @x`tQJds4֝S謦̻%Lq*U0:~3=/Z|KuC(F&w 6CLg|n#ìr0̗.`1DtJU ֢iD|Z55pʑD4]K!=( :`OT+[?ޏY_+ZuY#!Hb2JaW ,@SI7|5nQm]e:1p#ؘNmm@/\DF#%D@" H$DR!_@C.s S&nSPxt#eN"PwU6ҴqTq">b0f>¼ͯ3ڷ)UoGh!m3!LZUL`] a~<^auJ #`&pRqLBh.إ$CQt3x|[!rHff0NDxǮP_ XvދjZx3"ԌQڑm;ԅI/871xInLj,D[VY" H$D@" H.t}ockq¬Oޜk"Uv-|L+gHI2i )gh*̉zn.}B=!nj+2>w#VÙI E4W`hQLjJ>a.>b[4&XT@9-C=4̨WkzҰlj(М!꜊!i![TtF#Hf (I``q SI3ͼQ Yz4+H$D@" H$ cJJMh|T=dngoAMػ`Ir{QJތɈ+#Kg.# ,)DMr.*.CHa0Qe2`,8|ŕH$D@" H$D`ka%D@" H$D@"  ! ږeH$D@" H$EB@ .2D@" H$D@" |(YX5r+HduIL3pɲ64@5 CՍc:2IDSc`MEd"zqS5۠1;A4qqh6_}=*b傤7`~rwu{WӘL26WI:֚fTEBq}8 I_" H$ 7nk\z,իk[7p߯0uyе!|KDHOt+:UK"yEnCq1^.4o߅6aKHVډ",҉%;2^ s6D-%._3vmm 7dl #CG1-p%tbi)7E_g%!h2h8(Fƶv{]/MPǶ]*1S +A[vLje,7_.ncY} ~&KDB]X!P\n{ha0&5qq[mRc3= h; Qv[Ǣ# ,/"H$E@&hd!"ٙw%<[;ac8)NEhO鴈#8t#ƅ}p:=mD"6Xxj:xE1 v(.8dڪ*!h9QdžzCo8GJ/B˥LI!aC{/)r/Gw5 ͈d'J#XwxݘV6O9 <Șr̒>P Cao9);ͥ%gICZS88e{.۔Q>=6HQ7mlW:y {f0~dvP߀F8.ap.Ls7.>W" H$@ѕ{*{{18hBFsPU<ʔ`J|+%\6Axhz(}J᳍M{ BvTq6imMȞlUJ(eZwm36m%c8? _\3>ݥ 5Fdwבwg( RFcx&ؤF {>.pvy=WR!  4E1\lPeg}CP +VO.jd_DG$*hi OBC7(OXչܵhD@UaS#48B4P1 CRs:1呗OX@FR&]ܱMuMs;ٟ9! CF]M-h;BQ /*գX-˃6?pB-o9 :u+ƶ4&C&4&~;SN̨R£A:ThL>fvV|Էj/l9m5+=ØeVڙ6PYfʥ[cИBy#6Bx$wpߵaѱ#̝O8Mljb݆e#Ce3 @B޷Me}cۈ6!y#;B+siǦN+)T)֟C8E/TiiеsE6!l\IřKb2l;3wP MTz;EQ{cBn PisWD@" H`Rp&@{ eݷ :?^/+}炏^0JE@[eθzhS-q7:Ar[)Z89#~<u&l>U~\vi \wޠnn;NE,:yK@gk uovd D*u:b*4#iS6 mqjzdxlΧ*hkD] l:}iѹ]ؠG9mGx=yPnb[օ */ZV'&crǏV MЅ 1[]A t5jt~]n<̖>AfŶ\'M[M x݆&˷G64)7f:;UqZs+ =Y&OUiE÷~fo"li ][u&3;f"ĝYԕ ^A)Pg;[;lх\ӬulVֳ͆VJ8xEual2UY5U4Ig癹alRY UO Wmq?lL#t62q=(}&Kb3q 0Y~}/,f8aRS\Eps_E.jOMq!Nu853 'Uu푂 fuKzˆ "#ڣ\p$ڑ=5xmDpz\3dA|3D)l0`-iyJq}즶A d sۮJ<rS}ڭ:PS~ |<dy]x34jWUxYV7(^fVBz1VG%u6* 덫3T*@!DZlgf8fZ, ०Δ(̲sgܒeg5S_1L(]]*2jVW-ۆ]TӉ)E`cV[ 3AJ/D*SڐT[5c>JJwtjFkMiK$S6(o|x}1"F'hb5UPqFl}ݔq/9C 5ҨFLg"e.QZ)vBλsN{;`fXt!T6m ZA!+ޘkJrĵ u3jԛX2e}*"^,L_݇Z+X4M" H$ 5x]h!%nvݷPPӢyA^ūB!\q8Nr YpiZ[) 夭CeT{wc\ ,W LDG3K73\L&n&aĝB9<m]xOS5FH̉FjZ(d}eDզ$D:j]h!2 CYg']1+ #(hXn|e 61޾/qmjvc8lz=pcָcNܴ 4օpIi{Y;Ҧ5D{*HgLA6]ā_low8OuGctz@H9XK&ᵵ l3jp|wc|Qh8Ξc$k*!H$ Px]dwmPiӵ,KC7hإVU%ƧF1}{͒ǹkZygx@ŝ"c;YI } 2SFFiTPODdEyW{ >Zr"+$}5zf&v=!2V7g}QC8rG&Oa\Lq `ha jmŴrQS%vK3>~՚6$-2ZUO0 d0(lHHጶʶkp'65 ޾QZPGZ =AZxtפMHO ftC1,҄Vkض1HMϣWkvo3.0>y{F '-U7\LlfdgBkCΠQrȰ} `)Tqb'z`L[:~g# ,0P(4V(M^DKT}b~Isx:T:D@" \Tqop76Ww_pW>t|e+|֔ΏΥ5FfwVW0~f2tV`erpFo>S~90L>ۆkXSOcٯO%~sD=ޏ3ha6č{y6;_exO-nʏ278)p82x3S|o=bO3^J)1R^_1Rу:K'#+J~0݌~ lbqO׏ptlBfVĶV2v4D@" |p| eNo.%/`.=$]BUnCsG'>|gw\{&MvՅO]Y D+PYyR թ~Zz7t܃;u҉塐N|oF '?هU]$fd~i$Հõ E KP(ߕ?њELbsRsg) qnB%;? ¿yZ71//;/c=8:<}is7?-\Q) zv!H8a.|U2nXDc1/RV? O{jPۘo}%4w*CߴN^H$S.w*i$"p)n,–w}DK]lR *VrH0˟⮡!\nTs Oh% s>^AڛNAj&[ιM$7/&J̱qT\6Ѐk5WaBz8):0}1kSz=-k]oĈ!SFR2D@" \E\\U(&h:S?[?| ԳĞ8TgzfL| Ce4nڶN9!q!RH|h}>2X쉯>qqkō Wdޏ/Pe_ZTUDOIMd0,&D 䟊;{p[.9p0&mJܼM1SEQ+K~kyX,<vmFVST\i !9= 4GXŠA fq>OEF;sH{QN3)Wn2<xli| {om@ ==ͳ}0oك4>ҡ1~c8s8 Ǘ )eir`KR;X ãI>ڎq˿ 1F1"Km5?yN@ { fu|FñĩZ<8:QX/ۭXcRO`~~)ͶA/}IMߧ8pc+ocZ;kn-?mGr٧Y%?7<M1?i g0؂+38o3 k_D;v"s8wxԃaWi1VBmyQr~ϕ6fR}v54i=PUAV|Ïy]^UJ-܃lG;;=o=Ӈ)M=TIpvv OZ[N-֫Fq_<#Xk{(ն$OV8LXx'ھmAy(5|O"+9)BLoHz7w͜86v(ʿ >yci%&00G,O6Z3mO'>Q[} "⯎kuTᤍm32D@" \H2WڵZhv}zG!kuTגqᮁ~⌱K,Wtס \ G~Tg]24B[3(O[?_8 dtSkr]O=R~sYD9чp]58u/T}n 5&xmN{+p`^}Hsу_1EY-υW0Jz ]36=\%#v\ö1{v5 :X&svѶS꒾vڶ%:zX׊u:C8'@zWo Y;sʰb%m~KA&ZRN!?gU:7|ċ OS@0۩.߯+hQOL( {^[Xw'/w֩X]X=GZEcgޱ?CkZcS@ρgU;~W> f]zpýt7ǐ9ߟ쌵z$5?7D D3\zg;H>J$)pU?Bx {o +.<0펝q*Ow}ی/΃=5aC /rG{}󸥋 .x#>Cy%~m3OH0Q %Ι0i ~x%Ș]n}BC#=s 3g7vƩ7~Sq\y WdceQv (Up#;C͞ox}C5'9$4q Ob|WUlz PoךK QDB-֦haY[ 3{lG@÷q1~ >)Lc2Y&1ÚgU9v+?$>mP$ޛZ/>eBǔ4i%Wj+jcO?jY/,诮"l8# jۦ2Y PÿUn!_ tmx;iG lssaϿMg-g|wxuLh?Vf apw܅[e(:G5?G%KzLKm~U#G7!c?اKÌv,Ln U߅ƝYĞ.c<~Vh/13 USA7#=FLв?q*^{> ~e{.ӏ9w*[sݶ6sԚ}w[>8 -պHa;hP@ڜwlAH!6HX87K=!l8G]`f}8DZBd]Jfv5r;DZĂw'1OnlVkùaz6aayH$ 㸚Ju)BK#s} 7#@5\XYH˪VMu!_tw_|y.9V4|&F䢎:`vB H@b 129d~X&֢uQ]۱f3SK[[~#{[3NZ~zxB*!= f4$AA|k~ў;,c\yCxqgƲ8aԅ}.2?|ޫNz&检&4#t+lmX\0vO@ZB# gb1Hsf71Jqٮ#J9 O*Ǻ&?_E kܥ It[qj[[cr;Vj]>̄UX'VZrRa%Dj@ w~tIp6lX8s3,Sg} zgZEɳ_a8m]ᗽ #ZKf=^ IUsy0nϏWS~d]f W,?LA"`]s~G;0ĄvܤT)y 6MP`v(-4AS׮ mAiO>\Qx׎)]+mӞ0}wɹuQ^ ـUOџXJe+[C=|H0xؚQ]f 1;p }jE^4VY9IN|׈6jxOQC`Uy/{=!Cccf ޛH?jfq5KsXsL-p1j -QýWV%k-Is<~Lk_o"-'Om9ė*MQsLz;E q5[0f ,iҼiYu" =ł̱v!%IID@"p#gtg`98"X563^'4 2NO:*pRzNq=i\M_mO$Ə % F/T`wͰ7A{j̀/HiC8 G9W[9!\q#*3,}TynڢT= ywF>rǏ ռF ]XqZ?;b֌UQ _ 44'W9BOW$oBhP!x'F~ddߤQ3l;{1ڽd Khm(.m38N f4W0~,-C ~qλ l4 7EP^ =OCl%N5v@y%0,t.2 fJuK󣟦Fڣ}qŗ{x+Vc\b-U}tOH'>Lu[Qnj57{82]G'~4<0]pJ-88(m-TgCyVf-ƨ:.XxCW{xE u s=Ee9iY\-cܝ6"j-M"3'K<9y@u+~CX#Z 7 h/Əȿ^OA2/qWӾg Y!8&Heh⺚L'FaAeOr[K՚VT(r6 TQS[Ej\;_7ۼvg=go O!3+vnAs/é/Pi7Er1+6*P}&Z߂:=m!v-=<SƇBТ'AWϿhDz\Ǽ+ˏ6nA@-$hm׵Z1k4Uj PP f|8_n|-mDxY,ͼCJ[@iűØ|_+>O-8ND^ƉA}yZ+m۳Z]*as;?T~Gr؆xJ#o[5z'1֞C`5\KFTSUE'%$"/Çz[au+>W,4gSt=K"|X$ ʝwA-cF#=ΞƸaySҠp3ǖi! hQq]Ax(ޟk cA Sa*>cg]" H>`3r/oq-;im+/BE;33v& >4~U@IkN߉s絟48w;~ƿk9V"鐞mgpŷᔣ!x+93Oe<>mx1F=yvU3 Z9ӗ7 5~&ciԭWqC.hmdrX.v Z d)מ=n9]\ׅ֟ L۟=@m;;_)̄_~][4c_f Z̗_g2G/1-J[l33bmCĬFMXӼAYԿY3=/#l3i86S$%fV'[d#l  grmAb" MֶB쌓!=_ Z8ٟDJbhh.NDmLRS["#q\KjRʯË_;+y?'!¸E)J$5=2A +.[CsPCA ULVbm|-2qڦ c+$4wYT՘ nFt+x5%h]2I$0R|YtD@" *x3P`w=?xuR,[D@" H$KG@  H$D@" H$@҆C K~uk]~slH- A@Ӆ@UҔH$D@" H$R`@L֡Ip#F*mQn-,}a J2Dq2Eiq:a1yv7fx'kn؀UJ$D@" H$)pXzY2+q|kVY;;=E]Ft ;YVVKPLxd U`^w e8D@" H$D@"pE!pqWxy3;soWj'Rm OKjL](RO'Z?ƎG;zwKg#o[$!D@" H$D@"p)9K"mp#7VCŊ zۘcՊ wj?57㖟S#j>2՟cS)yD^0ͼCםy6VK+ȓ!%3tt|b !}IwC&_|9dkϞc ϟƌf j'̼&ny,yˉ%H$D@" \Hg*Q@YԮu 5+ᴆZ[W +[֡E:uh{_[ ycZ?{h#@0(#F)ͻE2&KfX3;NvO&T.LfM51jWj&:FFI!xPhyMw ~s9g7䤣!eHNIMeO̬$" x KAcOMSUq2ʘ0_^׽%k$@$@$@$@$@$@Q K6ޣ_/Ģͥڤat{iۺ7UC_QTW"N*5İWP6 +k=9oMhJP㌤T7\>@${oβՈ 9/t* * bAITmFrd+K"9NC>"'3 d8FFG$@$@$@$@$@@-ĤYiղxR8SY+ѽe6QQo)5 2 A C2Yݢ]Y!R3Oa{OU_#]v ^ʵ5- <}IP}/Fm .geGTO|eIJ,2-^.Ź"L Ln6Ilrgt.Fz@zo_H(x>zPy'S,V@IDATBC_8N0Y1MY:| 1;p lyθ@}C3"     IJ&MNlه+ĸ 6a|8jzPB -ˀ^t OƇ߱_)DGq/Jk?ieX|_ATF}szenHmDQpo-GNvi2 \}T8Ĩj@H9iVz\\j= vŀ]{Qy|*R6^\kR&bf"Zg]uB{ <"yB~JlJ+KTuCzR=}܈»O"ReG3xHHHHH @C,YoxS2#؋X(sKݥfƾNS| \x.]<кǧp88ىѵyf2xLwBAㆉzF$@$@$@$@$0|'>k(E/zF(M_ )O‹@M,#]G0eC;Q1}0V鎽C6j'zgۏöJmOy4^ \!5]p2"Qv%7{m4;=:}ފKpm#.7p&"%d,D$@$@$@$@$@ L[;mޙ=wC uS"U&qXjj٘?QAt]5HHHHpN*Ŝ lY!f QfPq}4R09j     %*bIq h8i$ @ PsHHHHHHH        iH!n,Ln܇K\ک\HHHHH`JaJU3;;`Aj^4~( H!xHHHHHH`<pHxPeWc.1uxrÄIHHHHH%e1ߺ%/{A(.%Hr'{eo <{.P.^Vc&q6c,g͂iu_[n;p{;<){{ze- AQ77L"zXJ\r*Da nn%̂AKz<-'o*qIſuX`B^r}]o-ES5c$@$@$@$@$@$3l DC qVSeK {db@ =0e}Cb[FDdeZ9Rfavi2xV,HrJY!iY,z Ӱ =EE&NKZﭫK{UF~}a2 33oYJ*!S#     IX1rqbaȠpٷz~KpCOL;D! O~1* YҚ ވ; Em.|!q%£ũ1S3kV6v?OO\pd[wx؀ˮl(X $ܲ/?܅w?9ebAqG(Dl3F\~9L$^|D'     %*F bOm!z_hw<(XVVt+A^3*ȓǸX/'ĵElp0 uq'.C(tW/>σ~FO"\e˰@3zњlӿR %_,/`|w?,/ RΘ#b$06n'*0"󊕃 2`u.ֱٶowt<艖-Aa; wQl%T6yK!wS;r f̓vQHHHHHH rh 18 ')| jߜ;DKH^ٛ_Pa?oF-?息^~xn?tz1Svq(?<"^!     D(D!}@, xjX>1"| 4pdcwBO 9%@ U/)X ^&,PU.SCWLYr#'RdΈ$'     XWTƁ@܋%CdS4ַy55/)΋ʗ 5Ƈ⟙wsOÃ}{?-oXrfC4֪L+勿< p<#     &7 0~cB¶]K~_> DG#A~k emI-0h3&z 3/K5,|q:6Ko}żW!2xF$@$@$@$@$@1% /18pd KpExdB0%D;R1K,\2ZDj¼ȼD-"ńTlLX cFHHHHHHH pHE  cFHHHHHHH PH$@$@$@$@$@$@$@c&@Ø2       @ <%wcA,|ྌKgP[&`iP$@$@$@$@$@$@$0 PZeːgnW% IHHHHHH!0MWI=|e7R(8҆pZWHHHHHH&1Z8ĨrWl,LCDPQHHHHHHN:#s7.TĘ.#     *puMy}}yD$@$@$@$@$@$@*b]ɖUYXHHHHHHH`Rऑ1l,#q} ,ij1FHHHHHH&1Z8ĸrgja⠢NEK3ft$@$@$@$@$@$@mc\G) V"ψD%vå*bё LrT8ĺTn| \!ք  !1$/LsbLё LT8ĸoPHHHHHH:#@8F>+)lA 1ѐ #@CL*l}o5YK L=T8Ĥ8I-kzk6$HHHHHHHH` /S(*  I#@%1$@$@$@$@$@$@$0P0j%      )@s8LJ4Y)DB(-UJtx' kN8q:ʎ Hs$~5f\x~t9qB {(5_DOYr5S|iأŋpcq([t iqKfpGڬt+Usߎ/|~6?uHHNwj mC~vQwDy:ߞ]9) @$ph+H;>tDqCStI`0U{R"g>V=GuBt˿4wJIX7]8q,-eHs/'rU;Vu(3 k]T7u ilQZ4X{TgNqszw9RщϾ$:犇誩?WyJżO>`3mwϫxnJJ.8yk\kP4?ޗF9|t}>p ݮ1 Q8ۼ-\Te(cc9ޣSk*EpB(L5a؈BL{}.wq|WcrB-W۟ǥiێ+^KɪB;g'(IybM)%w;1MAL 2=! 7߅+bu.YzfUn)Yd 眏+ *]8j>x]-16gT: G jV֖S@)xlPG` ʻy Rs IqD>c&(*Q׮,|kF#tف+=%N`{gDgx)nZ"J1;'6ʆqf>voX6})%[.VNƣ;¶Ua p}!0o #TˊNk)1Mflv>GcS/;1, X–-b|]9bg?JhDB+BzB6ۘ}kΛf ^XDp}q;B;9p{zӗcv~8pOƜ}qBTϮҞ+Z%th+mt߱y-3=CmhW TyQFضjK7k#1#  tYD}O)tbH(|sf땺:xd=/K7rA)K}- -]CO>ZOysQr/KO܋?ͻ^5Pemǟ RGe@ك{m2lH3W 7W-Hw"`Кp~K!_6ҩ#824AVâJ0M(xUH]w;^G:+:/L-)J!؊$1 ̗6ə_df+㑐nHBDd!OW5ğEͦwFsCD{)ܒ+:.T?n\b[[-l^z~YJ2sŗDm.{} _ ?A*Hvyߍd9H]r#   @T8o/i)}Hu>/w³Δ4MyZR,G2əG#],~h>YIhT(>[9"3,vj 0XVp|{[^ĥ#Ux%XHchsXW>`p+ !WJRj|O/+ra񛪬.c o9o'4>ㅯ~͙oQcuQr!NL $k3 25A %j IޮpH*2jW ɹbNnqN'!UX׼f%ɼr?=sEb fTGs$K Nh0F3Q3 FY^[+ڙRn9dH-g&Q~~̟Q|.mn۶0Mm-`R1MK4$Ȱ"" %p`lDdl$Q6nZJiErivB|zSe̅󪔻UL__d^Ne֙$sH{T<2Y!ԱE 3KRb2+CQFҐ~Ecwe^;/o@rLKM܉9­06D\ ^77y=AZmqIP 5 E᠔t$@$@$@!RRFa6~R&'}<HXMh"#{Q(ǴWbODz#Nw{ogjjy=B3+3gOeFfN/D|܉AA^Cܚ{mʆNIk d"4ߥ?bՏ=6C؞Makp>ЗPŸ+'n9\MKߖ㨪^9PjO04d?Z&mG^Ob֔IVIfoJ#3;qNڊMCzg'.J;'(b(4gmo}!ݣ F³c8_KcNQ8CbrK> ͝t{Ʀ 0Ҋ{ѧں>kyĩrJO~U;v!!?̴X]Q;{4/ {Dkם[qr#%# m^m](^c%]i0FY52^YOCD[r7h7SB8|1"j5sy6&)dף_͚Qk'ƽ*>)i:y芾KUCp_!ѨS+o  _j^ktHڐxR`dH(tplrї<XʆvԼ*.VaE@IKU]FYlsp&|sGBQ84↍>eÅuK!JD᧔}*Qm{6t'C!V7t$@$@$@_r};V&Ĭ|f,&˄{3y㝔#7~B9gbBZcr*Զ3O _p N6O?W>Tdɹ&իً$8Ĝ^zs %P i-.~KbѠπ~񄘃[wIFOjDh՗\C0^) 1PP>^gK_[i)o|!* JYՆզc"[#OHu2BXS2h]|JCuG{lNTƊGLȰjz MUIz) K _\PC 2$KP8LbdێQ۰ QF/^)`(ts] %S*O[ũp%teR2ܓn@Bzm?)͎{(?XlO}(kdmۡY88O:   H P)p)E\==9G>vG~Zrj}{a]hH>?D]':ʆ*|.jKs/Re)|cHNŭܠSDn=twQbXsrPՐ݉0 f̣w7syp~W Qr&r$FO ;T}35X\;&QR{&91o6}90%"l/6ʶmn}3ke/eqd>[SQ21 "2alm>N;(Im2 V>e[`l":EE̬QBPsuVeS#U0rwLi_ LL$WxHHH`8~ zb`**P–C"H'$^AD$ڄ Q#G٫ԯKVUtnG ղR*OCb4^$Ѝ@Lku'sZN ʄbg!WLwSe"kp ̘QWGzԇVopR(GƔ ?P ȟ'#ʪX)ֱOϗqCڌFϷvEMvD $ȖeGMרrVey2h<bZ4R=ʊyݷREcor\^LU x ttfϭ/=92XH &|GѶm 頶䢿]N$g\'.-G $@RJrြ[8rk>.f]j ܅CG>SCv!Xb¶ ïz_%p oftHCT2CGX($< Z=H=6p#B|Jkw|++U#.]~T=хXU++|s1am{z<}mr_#jG.k,[.yH$@$@$>/G@+SwUՄħ"esn.vfqٷ]VІ KCz<2#(x-W-2n] C82WY8HW(޳);@g_3zo4ė&4=C>efZ#'w[ϗGso{;Rqd>  mN.qF0?ed90FY_7M{NMܖR? Rх6.OkO"CzhsL.g&ڀD}2Q7 >!ȥtꂬGm 穧QQ(H,Q3Ir])*93=/CH]?);mi(3s ea!Lmbω`y2p>wPI_{^\9է SE6Ĭ̦PM(U2狴!0;HQmRC !+6& [p҆dۼWds>QOjP/DdI^ o]R8X\P𩃪NVf/IfƖbnMEVh#kۈ54. ?*y^,/zɒsKݥԅ Ӂ|:QZx;^NA%2Jo0nzwF8&rSLfnwY+F=J*ZmXr *}n}\MZ-[eΘ3ɫ2<-})X;6~UڱRs2-@d7 MWY`;x˦p&}]gk-Ki~PPeR8}#%S5D.]8Qr0@6D&?]FX1^+lC!Kzz !1c ˊ +W( UGLk([> j%Y3UM=SM"){T ъ)anK@34%_EG5{4mن[}{аm}˭y+ %o '0702>awԅK5AMF0# 1~ДmE.zT*2&}J|vӖDP;ڰnՃ=s^k,ɫ7dė,7K{*qLGF?nȪ_tzdXyIa#\='maF%nhBݹբtUρsa猼,BuoKA8vO7WYh69X|m;Ի?a*((0S N굞Qi̶Q;kXu>MS6zsߚ{uf ھK/Y4f{{_oҁdkȔXSY{6 2._׶(.\>{stIϾZiEYOm>h#skJҍmHN\h8~a?}#_]\ϋ/'5E22|GV91Uʆ>{6޵ ~d'ϱ/mPfXE8-: xy4>8]LHc0M4S"3 ҋ%&7(Z琕!⤷k 0t5Ej#v('Bg%iK\1]vL|~#FD1iD;oxZ[/6g zƸtNdB˚ _ nG4{lN8id5|&ޗ?B i:z^_prx{Ua3 uB'd5!QDy÷Ց oAMX!NJNcm9 8n _baVߧռ/W%7@9ԤH|M.~?!m+K/Ɂ̷^ԗUm{;J*˜\dd%: ގ5ѐFxF93=oE3v IbȄxHH{ 0Lu]8?e})ĈX4_lhDx֜۳$_v/y^@*c"!E.jE3a9'5g f#' ژmeͭK v4H{k)-Esem4 3r;HGh@] 44c:CoIY5[HS6ؔ z)ittc #0jܼ\;vy/^:o?ۆLz|*6e)r(mh1F΍<ӉL!0M>ߊHFڶ?+iGeb@MUyHHH -x8U;Z[ĉFPDM89%ϰ@$rm|*skLs NPV$ªeb dGVRȶe%:$ge% ٦ ĉo(y9˱JTkD$&{z6c#F ⶭ_Gվ% $@$@$p}wbK@Ĥ Fb[mB bF#dIG5&F$@$@$@*d1HHHHHHH`2idKi` ;\pF\jeHHHHH&*b]YE(޼Ahx^/r00 кT Pbz=L=%_RYn:^ߋ')?&ٿ:R'     )H XWLtTey/òL?g!8IżOKtŴa)yZ͔UuSsuG?-+GT8X8x@$@$@$@$@$p !E/zCY+8US6t_zѯ4LNX'O.ZO#+&j|qGK)LeZ*lHHHHHEM ;u#J%`#eaSϳg*ew>)f13"     |pQĭYE[U#3 qmc 3PnZ",b!fo{ Oݩũ<[Q_}yťfb-)K}- -]R\'N>Z{^v,.ҙg>5Iz2 H03&c'     pQĥ4ؒSfcnz}%it}ti/#E۷܅3Cv5Y 8fbb(HHHHHH`2Ib9S)o 0҃@FS/;uoG|Wų jabb/( er$qQΧ̈&r7ک)eEa|$@$@$@$@$@$0L5 >1ٱWq6a|8jzPRHCbЋi;+6(^܁EIb'ñ +:j6o\OCQ ɦJGV6:     V Pu "Y=qsӪ{c,8{}tUGH\zqIe.SjfuU 27DL* 5z)uf,ESq )뉦xg;hCmJO"w$@$@$@$@$@$p !,7w<`ͩ`F}Ϗ}~SrW {R, ɹR3Yc_)>Niw$@$@$@$@$@$p-ZMe=MhpO$@$@$@$@$@*E%ʲlM^vt$@$@$@$@$@$p5W, \}pu 5G kJY       pu 5Gq͕ |[JA]٫ HHHHHH&-&9SGwHm8èIHHHHH!<ޝbܶڏmyH$@$@$@$@$@$0,@f ?_6l:!A6>qxBjU3W *^QrM xA6RFlM]Ȧ** ]vT<)nMYP|O6Zd3+6I8?өUq/0)Mf"ܓ 8pC"&%>M%R]q]T%x+S)Ji`wf)+ʟg:uMݙy W{4Me ٕHR<Ќ̗J'ʓLq$= xH' m-۷e3julz\P{U63vA2+ɦN?WL{8u_z3r8SJF9ei^6~d23yQoHHHHHƃ-ƃ*J)NT^jS)+]) blq-5e}R Ӛv{Ce*R@]vu,M 8a2P :{/ĕr!3D{ϞhF{J`W@4CY`q9 *Ƌ,9%\a))!%CN j@ P3Iv*ʗf/g$ LT8L`>jM) @6%.{jx(ggAK,j)%ƉdFyaHHHHH***~&n'`WߕP+.B6 ?Aͥ8'eS P}fͽ~0ܓ X |C"X#axj4R JBѺorb9eᠬ9T9EFRE.ё '@ä"fHHHHHHzu '@ä"fHHHHHH*^1$@$@$@$@$@$@$0 P0髈$      G1<9s|d[e 3#      WQWH<Բkl<$      TĨCX6e\lwhGG$@$@$@$@$@$@*bTJ<˦SC,HHHHHHHz$@Ck-1IHHHHHHz"@8ֶǁHHHHHHGT8cc܌HHHHHH&3*b\;Mi٠pHHHHHHH`!5.T8ؗ˴! \lɮRRA)I6U %      E q$m_bb$@$@$@$@$@$@$0pHEC 0]y= \gpq/G IHHHHH+RV)L& IHHHHH7T8Ĩx"(0'      1p5Clb !      )A˿L2$@$@$@$@$@$@$@S'2UŌ !!SS 뜀cf%p`I |8פ^tW @L$@$@$0pwP9\eӽ5 (_܅cҜ:I$_E;=3?]sy8t텃}| G_#TTT)xk>4Sgc^{22a(=*_wѼ gbFA$@$@p̵3f}C9<Ɏ+܉[5ф4T|Vѻ˵|RY;ѕ Jq`ݠugI|A%Nàq˯nmqČYjmmɨ8j` d*GԵ瀷T$#$ъ>uyq܋*ӏu{砻sdxKg2qㅾydkm<!)Al{a}n3mo%yV% 4eqѺ~D-I/Sل[ *K?9[*4ժMXV stWgJ0_S1,/NOQjl9i J엷bQ%J4>!a_N(BdӲjʥ~$bfR9ytt*y8j-uӓoӚ#F{H^R 3KJ1x/e 2UXR`YR ֺJo;~CH~\c zN~Z Dy'?"d=o\el16T}/n|g]/nwqtW Gh&t$@$@$p@ʸ Ĩxi/7#i)}Hu>/w³Δ4)U>.5wfzs,~ůS>|. 3ep£9>[-%"ZBz$sJڼ%my{"fo%lǫGZyDBӅ[ uT8 IϡR͖Rvz]ِTewexG[ SⅯ~͙[ٽoFxri# 36 ,q3Ot$Ix!HOX{%0[sJh6= Rܸ}42@|\2ct)XQ IW&WG<_ǒ|S/+,iG2. !mqzV?>jOڙ!D cޛiB=o+;Rs =ZsaYۛ^/\mxZQ9sh~6}M ՛-б8_KcN?喐|fAx;Ns 9ݨ+Ƣy$TVbPa ~."H5l%H%!U:KL\WF4E ϰL[Wq^'!NƱy\NB >|r*'. bMmߍūT{G2|(E{fnĕcajO阽6JQE̖2 Ag:YԶ#}lێBC&| "ZfK wF2!P4e™{MS $avq"Rl9PP.'喲R^GoJ6/$~ k`'ZҵE T k3qyBdp>B$@$p-/^ jQd^.e$~U(f;)G FM>r*CO<-~˽.8x".RM"<+vmbN/=bz#u)k}cz%h'(Fd||OҎ/=˅APzgxqZ)Q8cOk=|2dCVy*YX+ڐNtjǥDYu!]?QdX T,*Pm[,qm}&dL/X8.yF~Z.TʵD\" )CN*igC22vsT%M`HB*iha&a;,|}ۥ)KehO*5cۨ?:V[yq&kzUv^u/jȚ'u"V2 A𢰏%6NNw-`Cfn_sPb v$pm79'-kC툾SbUSh'Õ3)dťGK5kgIޱJvLΓaIz * Om@[ojk(!pHPŔrjw. @7FS GۡTuq Y.Uyn0tz_8YeȟN]@ [0ZC~7Dgid'{%':̕~l~a|QkJ)q]3 m @C,-RdHc*͑\'(YA 1Ä4$K;Eq(t'c_ e2嘻Z܌I}~xRR:zh)HҰ~WG8y@ƚP/uۈ-1sUKϹl%@>a2SC>Ó <| )7$KvAywQ8(lYvtjJ(lӨ'F:~a'bmܱ={d빓4;PcAxt-V|id27Cp nu5ƥm+˛g8mҐ %nSJ]LeƍTN=h"dzHエ1dZ֝2)Xk9՝_:]Y 2cwV1w;8>8eCZQn/UFBr;Q7 ʋ1)I7߮fn/|N㮬Q{Ame$r|cIHH`<PvCCzJD ވOE2\$ c QY$;6gNy uYi.xew! _>"C82WY8acRcŔU);d&5(s8֣~[_OA|__hEm~%sc ҕ 2e=IneL)ČFC^T,j! m%g~NSriaH%41]ԏc6 tyjFm _Zz\/wX2}i_jطcS]Z:o ^[ҩ/9)QxEyjvT$#N]!AO22Hwb&~T8D¦n ݋b|bbMջGn9>rĸ[ h.3^ro:j>w*p=d])Ȅ|mXԠk7ݱ՘m.m{ }"S^d ˯Cp>{G,E_?V0GYiF kSHzJVC-eA?]T?)v{t ͎{<:JD8幕HB#i +TQ%>ufF5Zpk\#'rH)Xq9݌ wsZR,߱T'cz^[e>:׋.W 5Xsa9 h-dMpeGwO&\hYzl^KpHV{y7:Da2lhly4\,3(yYՓ&_:*Ïd5tmGd'ƅҬn*}7}yKߍ'v54Cއ pQC&T] G" [=zυ4^>j{0]2J=Z`;NϦ5ۜ< )Ձ 5X%QbP"3`ޢ6vg8ʶ hߨ#IU];g5g؞D(PKޟv7Aho^WKkq}`է='%Q|mm}W.m_ > Uo SզgSa2>;w!ϰjܵߺx0|G $@$@$0 |C2ɜ)7OiO2DaY/!7I |)I)jiǞ3)PS({ocpvd.i4Ф#ieVYJV(V)ݍ],T JP-J Q?lI1I/WvmGOgx<3q=svy苍zڝZVtrv(oppL_h"_[Rʪ$_z`T!c! nz^voQK5?K>x7ٲNW9>nL/^%_y6_}Ǖ׎\M>ϻ2uC=+VS;fܑ!;}$G7vG%)q< y{ߝE?|56 i힤ci.ntZ t5|&jꀌ p?Ɵ{A9%P4Ϧ=`CPdY-Mrcj,wVcbR:6| ~~9CsyV$%{X&kfȍbG[N/LB@`as}qyǮZ z祡]uy4)@@(<!=kux+HIDZJ^/n\i7K*7e\-"D?%E2-Hw4yB>|1?Y>9p\le[_yLn]cѿ}O6lZ&'k֋}AiT?{i%&'h0xg2_`ĵ(;N   @a pUVi1-.M]Q)U2UR.WKGlG]6o7Fr{AyX E5ښa[U)8D.E󭈝(%+Jpr\Dƿm ؤf>UzXcI\6yuXW.   Hy#\!%2|Ng_"o 9Υ].kvn U@AsD48k 1 AaȸGYioIϴ!փ/I^kar5LbaSe9.RQ{e"yv_ȊKhR/Lɺi]/+w*+%GiWh; msr2(@@(d< 6 n-HMXQ,[wD3-cc8 7IoHE6fٺZX}[%O,+K@@Mn鮦^wkd o^bXoWt4dC 6 tO}@OF:]CuoOJ2iO)Z/> єpm9B8 @@(`snpZ.ڍbN9)7d?3_i],~q]^B@@f4؞.ٸ7@@@Y$@Y0Uh-uEB@@f-f#@@@8p8π    08̺G !   p8g@ @@@uYwGМR@`mF   a͹ |A^,:   ]Cv0nM@@@i`Zi@ r\kP7t'%rK׺<_ֺu[[,=u.v Fܨu)ŮSF.X9kudZP|SKm!]&ӝz%ݣ_ 9.@@I,Bs  :[ש{.], ʵ}%[>;϶[jَt;.~O4C구N'\:.V'\2] 88)   0ESQ(e"0Ywguu.W^]Xr[zLw^l8߫gt΋L/5].Ń:}R꭛& *9ص\mY,.9]]>   Ta*T)sRoYP ^mmi.v-b^݂aZG˽}eך;<2~?x̼ÉtOd@@SJqR-[p![rAly=!s >fҙ/;@@@ 0U\[7R]%ڽH[K|[aMEr˥l?`h}}Ry@@@B P(OzHլ e.rnǬ{=c Xpɂ %Ep*+@@@ p8\n^ݰ͸`]" ;~Yb DX9 姫[[D٥^ 2YaVdIFه  L@TL+{~m"EZ2V@q>K~?]~^{cX+:]tq;5   phi!tX7 4^%]]& ND%k`9>EFjPEw@@@ P "   0R15F@@@8#   <3Qc@@@ ^C?"*   ̼*foj]MSh <uEB@@@` 0KEH|+˦m&   sI.!=_k9ob-,?@@@{Bz{{tKł   \ S+f@@@$@a @B@@@`. p§^9eS4   ,@!Ӭ幖 6C.| C@@@8:6L.;&   V 2k,ݘM xxTg.\@@@%@a вY+R   @A Х"a]*\*q@@@cB~8xl"  )T-`L)k@@@&@!'~tE6X#   08 Z^]B    0#Z~2#jJ%@@@@`0hyTT@@@#@fqM#2XFڎY|   078̍\wPV96OԷ2 \'IoK"    (@JJ)sThiy`kFFR1-rT+4>E+1(|@@@ L1 $S&+n7\*vP_i`COp@?hgDў1Y   P=sRk^B }.=uɉ[i*\49؅   PB{* )+#2(Tɧ7T<-q}I_&K|~Ú1|~.KӿItv1Ke/Ȱs謗D^o^ot5<+G;'|N==vn?u]Ӌ&wl쓓 eɦYcp,{R5ϩ!Yԯs^Gɿ^OK,P w[v. >,>w/P   0s8eqe|rW e сdվ]H,^Q-%WxR*WLKUb`lEwcPb;V|E(׽Ruy7:oU4J֯J/"XF6>xD޼qy>i]׫WR%=M^ݶ7~i.qiQ^   3Q V:k_`uN2oޞ2T\VꔏJ{k"m׷˦6| |GW7w,vf'G-]}n6 KUP,B/="VZ<=Z'Ɇ_/ʪ7jۊuCtCe[eZJB䴍rXkwٹ].#E   08L:!on._mUTW%VRL۵]tv7_yk[#dڡ{ISuy)Rڅ⁦zh\>[&ZcLפe ҽKZ&+7X+ .}1~=z*Շ   3Q/:ww`d(ي`Y0f"Crcȵ pԭc CԿ=>w=E%_t(2/VKa@@@p8Lų HXw: vwT t҂z.E21 [AՁ-I   -@aϰNoI>~P|̅Ur^ CD]TtR!]-zljV\"HM 86K\Dʴ.%"%9:tiʰ8 ͳtK4zKF'V?@@@ A4>W yAr7zѥ|<$kZXa\vϽciK>$6rC[Fh)L>q vxZBm,gwt(=Jtݶ>6fٺZxvIS6% @@@] V3.7{-]>X;{c$lǰ?hGDL4[@wq>@&׸@&ϝ9ux;} N@@@@&&P,ޚ,2rS    8G$%6et~!'O%FsHg@@@@ U m!5@@@@|OAE@@@'@@@B %!    $X#   @hB @@@pk@@@M&lrIENDB`Sia-1.3.0/doc/whitepaper.tex000066400000000000000000000765541313565667000157230ustar00rootroot00000000000000\documentclass[twocolumn]{article} \usepackage{amsmath} \begin{document} \frenchspacing \title{Sia: Simple Decentralized Storage} \author{ {\rm David Vorick}\\ Nebulous Inc.\\ david@nebulouslabs.com \and {\rm Luke Champine}\\ Nebulous Inc.\\ luke@nebulouslabs.com } \maketitle \subsection*{Abstract} The authors introduce Sia, a platform for decentralized storage. Sia enables the formation of storage contracts between peers. Contracts are agreements between a storage provider and their client, defining what data will be stored and at what price. They require the storage provider to prove, at regular intervals, that they are still storing their client's data. Contracts are stored in a blockchain, making them publicly auditable. In this respect, Sia can be viewed as a Bitcoin derivative that includes support for such contracts. Sia will initially be implemented as an altcoin, and later financially connected to Bitcoin via a two-way peg. \section{Introduction} Sia is a decentralized cloud storage platform that intends to compete with existing storage solutions, at both the P2P and enterprise level. Instead of renting storage from a centralized provider, peers on Sia rent storage from each other. Sia itself stores only the storage contracts formed between parties, defining the terms of their arrangement. A blockchain, similar to Bitcoin \cite{btc, btcdg}, is used for this purpose. By forming a contract, a storage provider (also known as a \textit{host}) agrees to store a client's data, and to periodically submit proof of their continued storage until the contract expires. The host is compensated for every proof they submit, and penalized for missing a proof. Since these proofs are publicly verifiable (and are publicly available in the blockchain), network consensus can be used to automatically enforce storage contracts. Importantly, this means that clients do not need to personally verify storage proofs; they can simply upload their file and let the network do the rest. We acknowledge that storing data on a single untrusted host guarantees little in the way of availability, bandwidth, or general quality of service. Instead, we recommend storing data redundantly across multiple hosts. In particular, the use of erasure codes can enable high availability without excessive redundancy. Sia will initially be implemented as a blockchain-based altcoin. Future support for a two-way peg with Bitcoin is planned, as discussed in ``Enabling Blockchain Innovations with Pegged Sidechains'' \cite{side}. The Sia protocol largely resembles Bitcoin except for the changes noted below. \section{General Structure} Sia's primary departure from Bitcoin lies in its transactions. Bitcoin uses a scripting system to enable a range of transaction types, such as pay-to-public-key-hash and pay-to-script-hash. Sia opts instead to use an $M$--of--$N$ multi-signature scheme for all transactions, eschewing the scripting system entirely. This reduces complexity and attack surface. Sia also extends transactions to enable the creation and enforcement of storage contracts. Three extensions are used to accomplish this: contracts, proofs, and contract updates. Contracts declare the intention of a host to store a file with a certain size and hash. They define the regularity with which a host must submit storage proofs. Once established, contracts can be modified later via contract updates. The specifics of these transaction types are defined in sections \ref{sec:contracts} and \ref{sec:storage}. \section{Transactions} A transaction contains the following fields: \\ \noindent \begin{tabular}{ l | l } \textbf{Field} & \textbf{Description} \\ \hline Version & Protocol version number \\ Arbitrary Data & Used for metadata or otherwise \\ Miner Fee & Reward given to miner \\ Inputs & Incoming funds \\ Outputs & Outgoing funds (optional) \\ File Contract & See: File Contracts (optional) \\ Storage Proof & See: Proof of Storage (optional) \\ Signatures & Signatures from each input \\ \end{tabular} \subsection{Inputs and Outputs} An output comprises a volume of coins. Each output has an associated identifier, which is derived from the transaction that the output appeared in. The ID of output $i$ in transaction $t$ is defined as: \[ H(t || \text{``output''} || i) \] where $H$ is a cryptographic hashing function, and ``output'' is a string literal. The block reward and miner fees have special output IDs, given by: \[ H(H(\text{Block Header}) || \text{``blockreward''}) \] Every input must come from a prior output, so an input is simply an output ID. Inputs and outputs are also paired with a set of \textit{spend conditions}. Inputs contain the spend conditions themselves, while outputs contain their Merkle root hash \cite{merkle}. \subsection{Spend Conditions} Spend conditions are properties that must be met before coins are ``unlocked'' and can be spent. The spend conditions include a time lock and a set of public keys, and the number of signatures required. An output cannot be spent until the time lock has expired and enough of the specified keys have added their signature. The spend conditions are hashed into a Merkle tree, using the time lock, the number of signatures required, and the public keys as leaves. The root hash of this tree is used as the address to which the coins are sent. In order to spend the coins, the spend conditions corresponding to the address hash must be provided. The use of a Merkle tree allows parties to selectively reveal information in the spend conditions. For example, the time lock can be revealed without revealing the number of public keys or the number of signatures required. It should be noted that the time lock and number of signatures have low entropy, making their hashes vulnerable to brute-forcing. This could be resolved by adding a random nonce to these fields, increasing their entropy at the cost of space efficiency. \subsection{Signatures} Each input in a transaction must be signed. The cryptographic signature itself is paired with an input ID, a time lock, and a set of flags indicating which parts of the transaction have been signed. The input ID indicates which input the signature is being applied to. The time lock specifies when the signature becomes valid. Any subset of fields in the transaction can be signed, with the exception of the signature itself (as this would be impossible). There is also a flag to indicate that the whole transaction should be signed, except for the signatures. This allows for more nuanced transaction schemes. The actual data being signed, then, is a concatenation of the time lock, input ID, flags, and every flagged field. Every such signature in the transaction must be valid for the transaction to be accepted. \section{File Contracts} \label{sec:contracts} A file contract is an agreement between a storage provider and their client. At the core of a file contract is the file's Merkle root hash. To construct this hash, the file is split into segments of constant size and hashed into a Merkle tree. The root hash, along with the total size of the file, can be used to verify storage proofs. File contracts also specify a duration, challenge frequency, and payout parameters, including the reward for a valid proof, the reward for an invalid or missing proof, and the maximum number of proofs that can be missed. The challenge frequency specifies how often a storage proof must be submitted, and creates discrete \textit{challenge windows} during which a host must submit storage proofs (one proof per window). Submitting a valid proof during the challenge window triggers an automatic payment to the ``valid proof'' address (presumably the host). If, at the end of the challenge window, no valid proof has been submitted, coins are instead sent to the ``missed proof'' address (likely an unspendable address in order to disincentivize DoS attacks; see section \ref{subsec:host-protect}). Contracts define a maximum number of proofs that can be missed; if this number is exceeded, the contract becomes invalid. If the contract is still valid at the end of the contract duration, it \textit{successfully terminates} and any remaining coins are sent to the valid proof address. Conversely, if the contract funds are exhausted before the duration elapses, or if the maximum number of missed proofs is exceeded, the contract \textit{unsuccessfully terminates} and any remaining coins are sent to the missed proof address. Completing or missing a proof results in a new transaction output belonging to the recipient specified in the contract. The output ID of a proof depends on the contract ID, defined as: \[ H(\text{transaction} || \text{``contract''} || i) \] where $i$ is the index of the contract within the transaction. The output ID of the proof can then be determined from: \[ H(\text{contract ID} || \text{outcome} || W_i) \] Where $W_i$ is the window index, i.e. the number of windows that have elapsed since the contract was formed. The outcome is a string literal: either ``validproof'' and ``missedproof'', corresponding to the validity of the proof. The output ID of a contract termination is defined as: \[ H(\text{contract ID} || \text{outcome}) \] Where outcome has the potential values ``successfultermination'' and ``unsucessfultermination'', corresponding to the termination status of the contract. File contracts are also created with a list of ``edit conditions,'' analogous to the spend conditions of a transaction. If the edit conditions are fulfilled, the contract may be modified. Any of the values can be modified, including the contract funds, file hash, and output addresses. As these modifications can affect the validity of subsequent storage proofs, contract edits must specify a future challenge window at which they will become effective. Theoretically, peers could create ``micro-edit channels'' to facilitate frequent edits; see discussion of \mbox{micropayment} channels, section \ref{sec:uptime}. \section{Proof of Storage} \label{sec:storage} Storage proof transactions are periodically submitted in order to fulfill file contracts. Each storage proof targets a specific file contract. A storage proof does not need to have any inputs or outputs; only a contract ID and the proof data are required. \subsection{Algorithm} Hosts prove their storage by providing a segment of the original file and a list of hashes from the file's Merkle tree. This information is sufficient to prove that the segment came from the original file. Because proofs are submitted to the blockchain, anyone can verify their validity or invalidity. Each storage proof uses a randomly selected segment. The random seed for challenge window $W_i$ is given by: \[ H(\text{contract ID} || H(B_{i-1})) \] where $B_{i-1}$ is the block immediately prior to the beginning of $W_i$. If the host is consistently able to demonstrate possession of a random segment, then they are very likely storing the whole file. A host storing only 50\% of the file will be unable to complete approximately 50\% of the proofs. \subsection{Block Withholding Attacks} The random number generator is subject to manipulation via block withholding attacks, in which the attacker withholds blocks until they find one that will produce a favorable random number. However, the attacker has only one chance to manipulate the random number for a particular challenge. Furthermore, withholding a block to manipulate the random number will cost the attacker the block reward. If an attacker is able to mine 50\% of the blocks, then 50\% of the challenges can be manipulated. Nevertheless, the remaining 50\% are still random, so the attacker will still fail some storage proofs. Specifically, they will fail half as many as they would without the withholding attack. To protect against such attacks, clients can specify a high challenge frequency and large penalties for missing proofs. These precautions should be sufficient to deter any financially-motivated attacker that controls less than 50\% of the network's hashing power. Regardless, clients are advised to plan around potential Byzantine attacks, which may not be financially motivated. \subsection{Closed Window Attacks} Hosts can only complete a storage proof if their proof transaction makes it into the blockchain. Miners could maliciously exclude storage proofs from blocks, depriving themselves of transaction fees but forcing a penalty on hosts. Alternatively, miners could extort hosts by requiring large fees to include storage proofs, knowing that they are more important than the average transaction. This is termed a \textit{closed window attack}, because the malicious miner has artificially ``closed the window.'' The defense for this is to use a large window size. Hosts can reasonably assume that some percentage of miners will include their proofs in return for a transaction fee. Because hosts consent to all file contracts, they are free to reject any contract that they feel leaves them vulnerable to closed window attacks. \section{Arbitrary Transaction Data} Each transaction has an arbitrary data field which can be used for any type of information. Nodes will be required to store the arbitrary data if it is signed by any signature in the transaction. Nodes will initially accept up to 64 KB of arbitrary data per block. This arbitrary data provides hosts and clients with a decentralized way to organize themselves. It can be used to advertise available space or files seeking a host, or to create a decentralized file tracker. Arbitrary data could also be used to implement other types of soft forks. This would be done by creating an ``anyone-can-spend'' output but with restrictions specified in the arbitrary data. Miners that understand the restrictions can block any transaction that spends the output without satisfying the necessary stipulations. Naive nodes will stay synchronized without needing to be able to parse the arbitrary data. \section{Storage Ecosystem} Sia relies on an ecosystem that facilitates decentralized storage. Storage providers can use the arbitrary data field to announce themselves to the network. This can be done using standardized template that clients will be able to read. Clients can use these announcements to create a database of potential hosts, and form contracts with only those they trust. \subsection{Host Protections} \label{subsec:host-protect} A contract requires consent from both the storage provider and their client, allowing the provider to reject unfavorable terms or unwanted (e.g. illegal) files. The provider may also refuse to sign a contract until the entire file has been uploaded to them. Contract terms give storage providers some flexibility. They can advertise themselves as minimally reliable, offering a low price and a agreeing to minimal penalties for losing files; or they can advertise themselves as highly reliable, offering a higher price and agreeing to harsher penalties for losing files. An efficient market will optimize storage strategies. Hosts are vulnerable to denial of service attacks, which could prevent them from submitting storage proofs or transferring files. It is the responsibility of the host to protect themselves from such attacks. \subsection{Client Protections} \label{subsec:client-protect} Clients can use erasure codes, such as regenerating codes \cite{reg}, to safeguard against hosts going offline. These codes typically operate by splitting a file into $n$ pieces, such that the file can be recovered from any subset of $m$ unique pieces. (The values of $n$ and $m$ vary based on the specific erasure code and redundancy factor.) Each piece is then encrypted and stored across many hosts. This allows a client to attain high file availability even if the average network reliability is low. As an extreme example, if only 10 out of 100 pieces are needed to recover the file, then the client is actually relying on the 10 most reliable hosts, rather than the average reliability. Availability can be further improved by rehosting file pieces whose hosts have gone offline. Other metrics benefit from this strategy as well; the client can reduce latency by downloading from the closest 10 hosts, or increase download speed by downloading from the 10 fastest hosts. These downloads can be run in parallel to maximize available bandwidth. \subsection{Uptime Incentives} \label{sec:uptime} The storage proofs contain no mechanism to enforce constant uptime. There are also no provisions that require hosts to transfer files to clients upon request. One might expect, then, to see hosts holding their clients' files hostage and demanding exorbitant fees to download them. However, this attack is mitigated through the use of erasure codes, as described in section \ref{subsec:client-protect}. The strategy gives clients the freedom to ignore uncooperative hosts and work only with those that are cooperative. As a result, power shifts from the host to the client, and the ``download fee'' becomes an ``upload incentive.'' In this scenario, clients offer a reward for being sent a file, and hosts must compete to provide the best quality of service. Clients may request a file at any time, which incentivizes hosts to maximize uptime in order to collect as many rewards as possible. Clients can also incentivize greater throughput and lower latency via proportionally larger rewards. Clients could even perform random ``checkups'' that reward hosts simply for being online, even if they do not wish to download anything. However, we reiterate that uptime incentives are not part of the Sia protocol; they are entirely dependent on client behavior. Payment for downloads is expected to be offered through preexisting micropayment channels \cite{mpc}. Micropayment channels allow clients to make many consecutive small payments with minimal latency and blockchain bloat. Hosts could transfer a small segment of the file and wait to receive a micropayment before proceeding. The use of many consecutive payments allows each party to minimize the risk of being cheated. Micropayments are small enough and fast enough that payments could be made every few seconds without having any major effect on throughput. \subsection{Basic Reputation System} Clients need a reliable method for picking quality hosts. Analyzing their history is insufficient, because the history could be spoofed. A host could repeatedly form contracts with itself, agreeing to store large ``fake'' files, such as a file containing only zeros. It would be trivial to perform storage proofs on such data without actually storing anything. To mitigate this Sybil attack, clients can require that hosts that announce themselves in the arbitrary data section also include a large volume of time locked coins. If 10 coins are time locked 14 days into the future, then the host can be said to have created a lock valued at 140 coin-days. By favoring hosts that have created high-value locks, clients can mitigate the risk of Sybil attacks, as valuable locks are not trivial to create. Each client can choose their own equation for picking hosts, and can use a large number of factors, including price, lock value, volume of storage being offered, and the penalties hosts are willing to pay for losing files. More complex systems, such as those that use human review or other metrics, could be implemented out-of-band in a more centralized setting. \section{Siafunds} Sia is a product of Nebulous Incorporated. Nebulous is a for-profit company, and Sia is intended to become a primary source of income for the company. Currency premining is not a stable source of income, as it requires creating a new currency and tethering the company's revenue to the currency's increasing value. When the company needs to spend money, it must trade away portions of its source of income. Additionally, premining means that one entity has control over a large volume of the currency, and therefore potentially large and disruptive control over the market. Instead, Nebulous intends to generate revenue from Sia in a manner proportional to the value added by Sia, as determined by the value of the contracts set up between clients and hosts. This is accomplished by imposing a fee on all contracts. When a contract is created, 3.9\% of the contract fund is removed and distributed to the holders of \textit{siafunds}. Nebulous Inc. will initially hold approx. 88\% of the siafunds, and the early crowd-fund backers of Sia will hold the rest. Siafunds can be sent to other addresses, in the same way that siacoins can be sent to other addresses. They cannot, however, be used to fund contracts or miner fees. When siafunds are transferred to a new address, an additional unspent output is created, containing all of the siacoins that have been earned by the siafunds since their previous transfer. These siacoins are sent to the same address as the siafunds. \section{Economics of Sia} The primary currency of Sia is the siacoin. The supply of siacoins will increase permanently, and all fresh supply will be given to miners as a block subsidy. The first block will have 300,000 coins minted. This number will decrease by 1 coin per block, until a minimum of 30,000 coins per block is reached. Following a target of 10 minutes between blocks, the annual growth in supply is:\\ \tabcolsep=0.11cm \noindent \begin{tabular}{ l | *{7}{c} } Year & 1 & 2 & 3 & 4 & 5 & 8 & 20 \\ \hline Growth & 90\% & 39\% & 21\% & 11.5\% & 4.4\% & 3.2\% & 2.3\% \end{tabular} \\ There are inefficiencies within the Sia incentive scheme. The primary goal of Sia is to provide a blockchain that enforces storage contracts. The mining reward, however, is only indirectly linked to the total value of contracts being created. The siacoin, especially initially, is likely to have high volatility. Hosts can be adversely affected if the value of the currency shifts mid-contract. As a result, we expect to see hosts increasing the price of long-term contracts as a hedge against volatility. Additionally, hosts can advertise their prices in a more stable currency (like USD) and convert to siacoin immediately before finalizing a contract. Eventually, the use of two-way pegs with other crypto-assets will give hosts additional means to insulate themselves from volatility. % \section{Under Consideration} % The primary foundation of Sia has been established above. % Other considerations, such as mining algorithms, block time, etc., can be assumed to mirror those found in Bitcoin. % Giving careful attention to ``A Treatise on Altcoins'' \cite{alts}, we are considering the following changes to Sia for the overall improvement of the cryptocurrency. % We caution that these propositions have not yet been rigorously examined from a security standpoint. % \subsection{Flexible Contracts} % Contracts, in their present form, are fairly strict. % There is a set penalty for each missed storage proof, and a termination upon $n$ total missed storage proofs. % Increased flexibility in the penalty schedule may be desirable. % Contracts are also permanent, creating what is essentially an uneditable file on the network. % There may be value in enabling clients and hosts to negotiate an updated file hash or other updated contract terms. % Updating the terms of the contract would require consent from all parties. % \subsection{Proof of Existence Windows} % In an attempt to partially resolve the closed window attacks, we could use a proof of existence strategy. % A host can create a hash of the storage proof which they submit to the blockchain within the window. % The host then has a greatly extended window in which they can demonstrate that the proof of storage was created during the required window. % This has two advantages. % First, an attacker cannot selectively exclude proof of existence hashes, because there's no way to figure out who owns each hash. % Either the attacker doesn't include any unknown proof of existence hashes, or the attacker risks including undesired proof of existence hashes. % Second, this allows hosts to submit small transactions to the network during peak hours and then the larger transactions when the traffic has died down. % A further improvement would enable Merkle Tree proofs of existence. % This would enable a host to submit multiple proofs of storage in a single proof of existence hash. % % delete/expand this section? % \subsection{Siafund the Miner fees} % Have some portion of siafunds contribute to the miner fees, which ensures that miners have compensation so long as Sia is being used for its core purpose - storage. % \subsection{Miner Fee Adjustments} % If a block has miner fees which are significantly higher than the fees in the current block, there is incentive for miners to re-mine the previous block and change who gets the miner fees. % This can be mitigated by putting all of the fees into a pool which pays out 50\% every block, making re-mining unprofitable for any party with less than 50\% of the network hashing power. % Link to discussion threads of this potential change. % Additionally, miners have incentives not to propagate high fee transactions, because this will prevent other miners from mining the transaction and collecting the fees. % It may be possible to construct a system using fee deterioration that means a miner has the highest expected total reward when the transaction is mined as soon as possible - regardless of who mines the transaction. % Link to discussion threads of this potential change. % \subsection{More Frequent Target Adjustments} % % This section could use a few citations, but the discussion on this seems pretty well scattered. I could find things like the Kimoto Gravity Well, but other than alts.pdf I couldn't find any comments by respected individuals. I know that these discussions are out there, I've seen them before, just can't find them. % Bitcoin adjusts its block difficulty every 2016 blocks, with a target of 10 minutes per block. % This often means that the final blocks in a difficulty window are produced faster than 10 minutes (sometimes substantially) on account of rapid growth in available mining power. % To mitigate this effect, we are considering adjusting the difficulty after every block. % % I don't understand this paragraph % The bi-weekly adjustments to the Bitcoin difficulty can also cause coordinated drops in mining power---all at once, miners lose a percentage of their dollars-per-energy efficiency. % Difficulty adjustments every block creates a much smoother function for when mining rigs are no longer profitable. % The clamp on mining growth can also be increased. % The clamp serves to prevent an attacker from being easily manipulating the difficulty. % % 2,016,000 what? % In Bitcoin, raising the difficulty from 1000 to 4000 requires a minimum of 2,016,000 of work, and the difficulty can adjust by a maximum of 4x every week. % If the difficulty is being adjusted every block, however, and clamped at 1.001\% per block, an attacker will need 3,000,050 work to raise the difficulty from 1000 to 4000. % At this rate, the difficulty can shift by a maximum of 7.5x every week, which both increases the flexibility of the difficulty and makes difficulty raising attacks more difficult. % Though the difficulty will be adjusted every block, it will still be adjusted according to the amount of time taken to produce the previous 2016 blocks, preventing randomly fast or slow blocks from having large impact on the network. % \subsection{Committing to State} % One thing that could allow for substantially lighter weight clients is if the miners committed to the current state of the network, instead of just to the new transactions. % This would mean creating a structure for a database that represents the state of the network and hashing it. % We could follow suggestions similar to those presented in ``Ultimate blockchain compression'' \cite{ubc}. % \subsection{Variance Enforced Merge Mining} % Bitcoin enforces a number of leading 0s on a winning block. % Sia could enforce something like a single leading 1, followed by a bunch of leading 0s. % This creates the property that no hash is ever valid for both Bitcoin and Sia. % The value to this is that the number of payouts a miner gets from finding blocks goes up. % The total payout is still the same, but the number of payouts increases by the number of blocks that would have been valid for both. % A block that solves the coin with the highest difficulty will always be valid for both blockchains. % (I need to read more about merge mining before I publish this section) \section{Conclusion} Sia is a variant on the Bitcoin protocol that enables decentralized file storage via cryptographic contracts. These contracts can be used to enforce storage agreements between clients and hosts. After agreeing to store a file, a host must regularly submit storage proofs to the network. The host will automatically be compensated for storing the file regardless of the behavior of the client. Importantly, contracts do not require hosts to transfer files back to their client when requested. Instead, an out-of-band ecosystem must be created to reward hosts for uploading. Clients and hosts must also find a way to coordinate; one mechanism would be the arbitrary data field in the blockchain. Various precautions have been enumerated which mitigate Sybil attacks and the unreliability of hosts. Siafunds are used as a mechanism of generating revenue for Nebulous Inc., the company responsible for the release and maintenance of Sia. By using Siafunds instead of premining, Nebulous more directly correlates revenue to actual use of the network, and is largely unaffected by market games that malicious entities may play with the network currency. Miners may also derive a part of their block subsidy from siafunds, with similar benefits. Long term, we hope to add support for two-way-pegs with various currencies, which would enable consumers to insulate themselves from the instability of a single currency. We believe Sia will provide a fertile platform for decentralized cloud storage in trustless environments. \onecolumn \begin{thebibliography}{9} \bibitem{btc} Satoshi Nakamoto, \emph{Bitcoin: A Peer-to-Peer Electronic Cash System}. \bibitem{merkle} R.C. Merkle, \emph{Protocols for public key cryptosystems}, In Proc. 1980 Symposium on Security and Privacy, IEEE Computer Society, pages 122-133, April 1980. \bibitem{cpr} Hovav Shacham, Brent Waters, \emph{Compact Proofs of Retrievability}, Proc. of Asiacrypt 2008, vol. 5350, Dec 2008, pp. 90-107. \bibitem{reg} K. V. Rashmi, Nihar B. Shah, and P. Vijay Kumar, \emph{Optimal Exact-Regenerating Codes for Distributed Storage at the MSR and MBR Points via a Product-Matrix Construction}. \bibitem{side} Adam Back, Matt Corallo, Luke Dashjr, Mark Friedenbach, Gregory Maxwell, Andrew Miller, Andrew Peolstra, Jorge Timon, Pieter Wuille, \emph{Enabling Blockchain Innovations with Pegged Sidechains}. \bibitem{alts} Andrew Poelstra, \emph{A Treatise on Altcoins}. \bibitem{ibf} Gavin Andresen, \emph{O(1) Block Propagation}, https://gist.github.com/gavinandresen/e20c3b5a1d4b97f79ac2 \bibitem{hdw} Gregory Maxwell, \emph{Deterministic Wallets}, https://bitcointalk.org/index.php?topic=19137.0 \bibitem{ubc} etotheipi, Ultimate blockchain compression w/ trust-free lite nodes, \newline https://bitcointalk.org/index.php?topic=88208.0 \bibitem{poc} Gregory Maxwell, \emph{Proof of Storage to make distributed resource consumption costly.} \newline https://bitcointalk.org/index.php?topic=310323.0 \bibitem{mpc} Mike Hearn, \emph{Rapidly-adjusted (micro)payments to a pre-determined party},\newline https://en.bitcoin.it/wiki/Contracts\#Example\_7:\_Rapidly-adjusted\_.28micro.29payments\_to\_a\_pre-determined\_party \bibitem{btcdg} Bitcoin Developer Guide, https://bitcoin.org/en/developer-guide \end{thebibliography} \end{document} Sia-1.3.0/encoding/000077500000000000000000000000001313565667000140315ustar00rootroot00000000000000Sia-1.3.0/encoding/integers.go000066400000000000000000000020331313565667000161760ustar00rootroot00000000000000package encoding import ( "encoding/binary" "io" ) // EncInt64 encodes an int64 as a slice of 8 bytes. func EncInt64(i int64) (b []byte) { b = make([]byte, 8) binary.LittleEndian.PutUint64(b, uint64(i)) return } // DecInt64 decodes a slice of 8 bytes into an int64. // If len(b) < 8, the slice is padded with zeros. func DecInt64(b []byte) int64 { b2 := make([]byte, 8) copy(b2, b) return int64(binary.LittleEndian.Uint64(b2)) } // EncUint64 encodes a uint64 as a slice of 8 bytes. func EncUint64(i uint64) (b []byte) { b = make([]byte, 8) binary.LittleEndian.PutUint64(b, i) return } // DecUint64 decodes a slice of 8 bytes into a uint64. // If len(b) < 8, the slice is padded with zeros. func DecUint64(b []byte) uint64 { b2 := make([]byte, 8) copy(b2, b) return binary.LittleEndian.Uint64(b2) } // WriteUint64 writes u to w. func WriteUint64(w io.Writer, u uint64) error { _, err := w.Write(EncUint64(u)) return err } // WriteInt64 writes i to w. func WriteInt(w io.Writer, i int) error { return WriteUint64(w, uint64(i)) } Sia-1.3.0/encoding/marshal.go000066400000000000000000000227211313565667000160130ustar00rootroot00000000000000// Package encoding converts arbitrary objects into byte slices, and vis // versa. It also contains helper functions for reading and writing length- // prefixed data. See doc/Encoding.md for the full encoding specification. package encoding import ( "bytes" "errors" "fmt" "io" "os" "reflect" ) const ( maxDecodeLen = 12e6 // 12 MB maxSliceLen = 5e6 // 5 MB ) var ( errBadPointer = errors.New("cannot decode into invalid pointer") ) type ( // A SiaMarshaler can encode and write itself to a stream. SiaMarshaler interface { MarshalSia(io.Writer) error } // A SiaUnmarshaler can read and decode itself from a stream. SiaUnmarshaler interface { UnmarshalSia(io.Reader) error } // An Encoder writes objects to an output stream. Encoder struct { w io.Writer } ) // Encode writes the encoding of v to the stream. For encoding details, see // the package docstring. func (e *Encoder) Encode(v interface{}) error { return e.encode(reflect.ValueOf(v)) } // EncodeAll encodes a variable number of arguments. func (e *Encoder) EncodeAll(vs ...interface{}) error { for _, v := range vs { if err := e.Encode(v); err != nil { return err } } return nil } // write catches instances where short writes do not return an error. func (e *Encoder) write(p []byte) error { n, err := e.w.Write(p) if n != len(p) && err == nil { return io.ErrShortWrite } return err } // Encode writes the encoding of val to the stream. For encoding details, see // the package docstring. func (e *Encoder) encode(val reflect.Value) error { // check for MarshalSia interface first if val.CanInterface() { if m, ok := val.Interface().(SiaMarshaler); ok { return m.MarshalSia(e.w) } } switch val.Kind() { case reflect.Ptr: // write either a 1 or 0 if err := e.Encode(!val.IsNil()); err != nil { return err } if !val.IsNil() { return e.encode(val.Elem()) } case reflect.Bool: if val.Bool() { return e.write([]byte{1}) } else { return e.write([]byte{0}) } case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return e.write(EncInt64(val.Int())) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: return WriteUint64(e.w, val.Uint()) case reflect.String: return WritePrefix(e.w, []byte(val.String())) case reflect.Slice: // slices are variable length, so prepend the length and then fallthrough to array logic if err := WriteInt(e.w, val.Len()); err != nil { return err } if val.Len() == 0 { return nil } fallthrough case reflect.Array: // special case for byte arrays if val.Type().Elem().Kind() == reflect.Uint8 { // if the array is addressable, we can optimize a bit here if val.CanAddr() { return e.write(val.Slice(0, val.Len()).Bytes()) } // otherwise we have to copy into a newly allocated slice slice := reflect.MakeSlice(reflect.SliceOf(val.Type().Elem()), val.Len(), val.Len()) reflect.Copy(slice, val) return e.write(slice.Bytes()) } // normal slices/arrays are encoded by sequentially encoding their elements for i := 0; i < val.Len(); i++ { if err := e.encode(val.Index(i)); err != nil { return err } } return nil case reflect.Struct: for i := 0; i < val.NumField(); i++ { if err := e.encode(val.Field(i)); err != nil { return err } } return nil } // Marshalling should never fail. If it panics, you're doing something wrong, // like trying to encode a map or an unexported struct field. panic("could not marshal type " + val.Type().String()) } // NewEncoder returns a new encoder that writes to w. func NewEncoder(w io.Writer) *Encoder { return &Encoder{w} } // Marshal returns the encoding of v. For encoding details, see the package // docstring. func Marshal(v interface{}) []byte { b := new(bytes.Buffer) NewEncoder(b).Encode(v) // no error possible when using a bytes.Buffer return b.Bytes() } // MarshalAll encodes all of its inputs and returns their concatenation. func MarshalAll(vs ...interface{}) []byte { b := new(bytes.Buffer) enc := NewEncoder(b) // Error from EncodeAll is ignored as encoding cannot fail when writing // to a bytes.Buffer. _ = enc.EncodeAll(vs...) return b.Bytes() } // WriteFile writes v to a file. The file will be created if it does not exist. func WriteFile(filename string, v interface{}) error { file, err := os.Create(filename) if err != nil { return err } defer file.Close() err = NewEncoder(file).Encode(v) if err != nil { return errors.New("error while writing " + filename + ": " + err.Error()) } return nil } // A Decoder reads and decodes values from an input stream. type Decoder struct { r io.Reader n int } // Read implements the io.Reader interface. It also keeps track of the total // number of bytes decoded, and panics if that number exceeds a global // maximum. func (d *Decoder) Read(p []byte) (int, error) { n, err := d.r.Read(p) // enforce an absolute maximum size limit if d.n += n; d.n > maxDecodeLen { panic("encoded type exceeds size limit") } return n, err } // Decode reads the next encoded value from its input stream and stores it in // v, which must be a pointer. The decoding rules are the inverse of those // specified in the package docstring. func (d *Decoder) Decode(v interface{}) (err error) { // v must be a pointer pval := reflect.ValueOf(v) if pval.Kind() != reflect.Ptr || pval.IsNil() { return errBadPointer } // catch decoding panics and convert them to errors // note that this allows us to skip boundary checks during decoding defer func() { if r := recover(); r != nil { err = fmt.Errorf("could not decode type %s: %v", pval.Elem().Type().String(), r) } }() // reset the read count d.n = 0 d.decode(pval.Elem()) return } // DecodeAll decodes a variable number of arguments. func (d *Decoder) DecodeAll(vs ...interface{}) error { for _, v := range vs { if err := d.Decode(v); err != nil { return err } } return nil } // readN reads n bytes and panics if the read fails. func (d *Decoder) readN(n int) []byte { b := make([]byte, n) _, err := io.ReadFull(d, b) if err != nil { panic(err) } return b } // readPrefix reads a length-prefixed byte slice and panics if the read fails. func (d *Decoder) readPrefix() []byte { b, err := ReadPrefix(d, maxSliceLen) if err != nil { panic(err) } return b } // decode reads the next encoded value from its input stream and stores it in // val. The decoding rules are the inverse of those specified in the package // docstring. func (d *Decoder) decode(val reflect.Value) { // check for UnmarshalSia interface first if val.CanAddr() && val.Addr().CanInterface() { if u, ok := val.Addr().Interface().(SiaUnmarshaler); ok { err := u.UnmarshalSia(d) if err != nil { panic(err) } return } } switch val.Kind() { case reflect.Ptr: var valid bool d.decode(reflect.ValueOf(&valid).Elem()) // nil pointer, nothing to decode if !valid { return } // make sure we aren't decoding into nil if val.IsNil() { val.Set(reflect.New(val.Type().Elem())) } d.decode(val.Elem()) case reflect.Bool: b := d.readN(1) if b[0] > 1 { panic("boolean value was not 0 or 1") } val.SetBool(b[0] == 1) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: val.SetInt(DecInt64(d.readN(8))) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: val.SetUint(DecUint64(d.readN(8))) case reflect.String: val.SetString(string(d.readPrefix())) case reflect.Slice: // slices are variable length, but otherwise the same as arrays. // just have to allocate them first, then we can fallthrough to the array logic. sliceLen := DecUint64(d.readN(8)) // sanity-check the sliceLen, otherwise you can crash a peer by making // them allocate a massive slice if sliceLen > 1<<31-1 || sliceLen*uint64(val.Type().Elem().Size()) > maxSliceLen { panic("slice is too large") } else if sliceLen == 0 { return } val.Set(reflect.MakeSlice(val.Type(), int(sliceLen), int(sliceLen))) fallthrough case reflect.Array: // special case for byte arrays (e.g. hashes) if val.Type().Elem().Kind() == reflect.Uint8 { // convert val to a slice and read into it directly b := val.Slice(0, val.Len()) _, err := io.ReadFull(d, b.Bytes()) if err != nil { panic(err) } return } // arrays are unmarshalled by sequentially unmarshalling their elements for i := 0; i < val.Len(); i++ { d.decode(val.Index(i)) } return case reflect.Struct: for i := 0; i < val.NumField(); i++ { d.decode(val.Field(i)) } return default: panic("unknown type") } } // NewDecoder returns a new decoder that reads from r. func NewDecoder(r io.Reader) *Decoder { return &Decoder{r, 0} } // Unmarshal decodes the encoded value b and stores it in v, which must be a // pointer. The decoding rules are the inverse of those specified in the // package docstring for marshaling. func Unmarshal(b []byte, v interface{}) error { r := bytes.NewReader(b) return NewDecoder(r).Decode(v) } // UnmarshalAll decodes the encoded values in b and stores them in vs, which // must be pointers. func UnmarshalAll(b []byte, vs ...interface{}) error { dec := NewDecoder(bytes.NewReader(b)) return dec.DecodeAll(vs...) } // ReadFile reads the contents of a file and decodes them into v. func ReadFile(filename string, v interface{}) error { file, err := os.Open(filename) if err != nil { return err } defer file.Close() err = NewDecoder(file).Decode(v) if err != nil { return errors.New("error while reading " + filename + ": " + err.Error()) } return nil } Sia-1.3.0/encoding/marshal_test.go000066400000000000000000000257201313565667000170540ustar00rootroot00000000000000package encoding import ( "bytes" "io" "io/ioutil" "os" "reflect" "strings" "testing" "github.com/NebulousLabs/Sia/build" ) // dummy types to test encoding type ( // basic test0 struct { B bool I int32 U uint16 S string } // slice/array test1 struct { Is []int32 Bs []byte Sa [3]string Ba [3]byte } // nested test2 struct { T test0 } // embedded test3 struct { test2 } // pointer test4 struct { P *test1 } // private field -- need to implement MarshalSia/UnmarshalSia test5 struct { s string } // private field with pointer receiver test6 struct { s string } ) func (t test5) MarshalSia(w io.Writer) error { return WritePrefix(w, []byte(t.s)) } func (t *test5) UnmarshalSia(r io.Reader) error { b, err := ReadPrefix(r, 256) t.s = string(b) return err } // same as above methods, but with a pointer receiver func (t *test6) MarshalSia(w io.Writer) error { return WritePrefix(w, []byte(t.s)) } func (t *test6) UnmarshalSia(r io.Reader) error { b, err := ReadPrefix(r, 256) t.s = string(b) return err } var testStructs = []interface{}{ test0{false, 65537, 256, "foo"}, test1{[]int32{1, 2, 3}, []byte("foo"), [3]string{"foo", "bar", "baz"}, [3]byte{'f', 'o', 'o'}}, test2{test0{false, 65537, 256, "foo"}}, test3{test2{test0{false, 65537, 256, "foo"}}}, test4{&test1{[]int32{1, 2, 3}, []byte("foo"), [3]string{"foo", "bar", "baz"}, [3]byte{'f', 'o', 'o'}}}, test5{"foo"}, &test6{"foo"}, } var testEncodings = [][]byte{ {0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 'f', 'o', 'o'}, {3, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 'f', 'o', 'o', 3, 0, 0, 0, 0, 0, 0, 0, 'f', 'o', 'o', 3, 0, 0, 0, 0, 0, 0, 0, 'b', 'a', 'r', 3, 0, 0, 0, 0, 0, 0, 0, 'b', 'a', 'z', 'f', 'o', 'o'}, {0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 'f', 'o', 'o'}, {0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 'f', 'o', 'o'}, {1, 3, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 'f', 'o', 'o', 3, 0, 0, 0, 0, 0, 0, 0, 'f', 'o', 'o', 3, 0, 0, 0, 0, 0, 0, 0, 'b', 'a', 'r', 3, 0, 0, 0, 0, 0, 0, 0, 'b', 'a', 'z', 'f', 'o', 'o'}, {3, 0, 0, 0, 0, 0, 0, 0, 'f', 'o', 'o'}, {3, 0, 0, 0, 0, 0, 0, 0, 'f', 'o', 'o'}, } // TestEncode tests the Encode function. func TestEncode(t *testing.T) { // use Marshal for convenience for i := range testStructs { b := Marshal(testStructs[i]) if !bytes.Equal(b, testEncodings[i]) { t.Errorf("bad encoding of testStructs[%d]: \nexp:\t%v\ngot:\t%v", i, testEncodings[i], b) } } // bad type defer func() { if recover() == nil { t.Error("expected panic, got nil") } }() NewEncoder(ioutil.Discard).Encode(map[int]int{}) } // TestDecode tests the Decode function. func TestDecode(t *testing.T) { if testing.Short() { t.SkipNow() } // use Unmarshal for convenience var emptyStructs = []interface{}{&test0{}, &test1{}, &test2{}, &test3{}, &test4{}, &test5{}, &test6{}} for i := range testEncodings { err := Unmarshal(testEncodings[i], emptyStructs[i]) if err != nil { t.Error(err) } } // bad boolean err := Unmarshal([]byte{3}, new(bool)) if err == nil || err.Error() != "could not decode type bool: boolean value was not 0 or 1" { t.Error("expected bool error, got", err) } // non-pointer err = Unmarshal([]byte{1, 2, 3}, "foo") if err != errBadPointer { t.Error("expected errBadPointer, got", err) } // unknown type err = Unmarshal([]byte{1, 2, 3}, new(map[int]int)) if err == nil || err.Error() != "could not decode type map[int]int: unknown type" { t.Error("expected unknown type error, got", err) } // big slice (larger than maxSliceLen) err = Unmarshal(EncUint64(maxSliceLen+1), new([]byte)) if err == nil || err.Error() != "could not decode type []uint8: slice is too large" { t.Error("expected large slice error, got", err) } // massive slice (larger than MaxInt32) err = Unmarshal(EncUint64(1<<32), new([]byte)) if err == nil || err.Error() != "could not decode type []uint8: slice is too large" { t.Error("expected large slice error, got", err) } // many small slices (total larger than maxDecodeLen) bigSlice := strings.Split(strings.Repeat("0123456789abcdefghijklmnopqrstuvwxyz", (maxSliceLen/16)-1), "0") err = Unmarshal(Marshal(bigSlice), new([]string)) if err == nil || err.Error() != "could not decode type []string: encoded type exceeds size limit" { t.Error("expected size limit error, got", err) } // badReader should fail on every decode dec := NewDecoder(new(badReader)) for i := range testEncodings { err := dec.Decode(emptyStructs[i]) if err == nil { t.Error("expected error, got nil") } } // special case, not covered by testStructs err = dec.Decode(new([3]byte)) if err == nil || err.Error() != "could not decode type [3]uint8: EOF" { t.Error("expected EOF error, got", err) } } // TestMarshalUnmarshal tests the Marshal and Unmarshal functions, which are // inverses of each other. func TestMarshalUnmarshal(t *testing.T) { var emptyStructs = []interface{}{&test0{}, &test1{}, &test2{}, &test3{}, &test4{}, &test5{}, &test6{}} for i := range testStructs { b := Marshal(testStructs[i]) err := Unmarshal(b, emptyStructs[i]) if err != nil { t.Error(err) } } } // TestEncodeDecode tests the Encode and Decode functions, which are inverses // of each other. func TestEncodeDecode(t *testing.T) { var emptyStructs = []interface{}{&test0{}, &test1{}, &test2{}, &test3{}, &test4{}, &test5{}, &test6{}} b := new(bytes.Buffer) enc := NewEncoder(b) dec := NewDecoder(b) for i := range testStructs { enc.Encode(testStructs[i]) err := dec.Decode(emptyStructs[i]) if err != nil { t.Error(err) } } } // TestEncodeAll tests the EncodeAll function. func TestEncodeAll(t *testing.T) { // EncodeAll should produce the same result as individually encoding each // object exp := new(bytes.Buffer) enc := NewEncoder(exp) for i := range testStructs { enc.Encode(testStructs[i]) } b := new(bytes.Buffer) NewEncoder(b).EncodeAll(testStructs...) if !bytes.Equal(b.Bytes(), exp.Bytes()) { t.Errorf("expected %v, got %v", exp.Bytes(), b.Bytes()) } // hardcoded check exp.Reset() exp.Write([]byte{1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 'f', 'o', 'o', 1}) b.Reset() NewEncoder(b).EncodeAll(1, 2, "foo", true) if !bytes.Equal(b.Bytes(), exp.Bytes()) { t.Errorf("expected %v, got %v", exp.Bytes(), b.Bytes()) } } // TestDecodeAll tests the DecodeAll function. func TestDecodeAll(t *testing.T) { b := new(bytes.Buffer) NewEncoder(b).EncodeAll(testStructs...) var emptyStructs = []interface{}{&test0{}, &test1{}, &test2{}, &test3{}, &test4{}, &test5{}, &test6{}} err := NewDecoder(b).DecodeAll(emptyStructs...) if err != nil { t.Error(err) } empty0 := *emptyStructs[0].(*test0) if !reflect.DeepEqual(empty0, testStructs[0]) { t.Error("deep equal:", empty0, testStructs[0]) } empty6 := emptyStructs[6].(*test6) if !reflect.DeepEqual(empty6, testStructs[6]) { t.Error("deep equal:", empty6, testStructs[6]) } // hardcoded check b.Reset() b.Write([]byte{1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 'f', 'o', 'o', 1}) var ( one, two uint64 foo string tru bool ) err = NewDecoder(b).DecodeAll(&one, &two, &foo, &tru) if err != nil { t.Fatal(err) } else if one != 1 || two != 2 || foo != "foo" || tru != true { t.Error("values were not decoded correctly:", one, two, foo, tru) } } // TestMarshalAll tests the MarshalAll function. func TestMarshalAll(t *testing.T) { // MarshalAll should produce the same result as individually marshalling // each object var expected []byte for i := range testStructs { expected = append(expected, Marshal(testStructs[i])...) } b := MarshalAll(testStructs...) if !bytes.Equal(b, expected) { t.Errorf("expected %v, got %v", expected, b) } // hardcoded check exp := []byte{1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 'f', 'o', 'o', 1} b = MarshalAll(1, 2, "foo", true) if !bytes.Equal(b, exp) { t.Errorf("expected %v, got %v", exp, b) } } // TestUnmarshalAll tests the UnmarshalAll function. func TestUnmarshalAll(t *testing.T) { b := MarshalAll(testStructs...) var emptyStructs = []interface{}{&test0{}, &test1{}, &test2{}, &test3{}, &test4{}, &test5{}, &test6{}} err := UnmarshalAll(b, emptyStructs...) if err != nil { t.Error(err) } empty1 := *emptyStructs[1].(*test1) if !reflect.DeepEqual(empty1, testStructs[1]) { t.Error("deep equal:", empty1, testStructs[1]) } empty5 := *emptyStructs[5].(*test5) if !reflect.DeepEqual(empty5, testStructs[5]) { t.Error("deep equal:", empty5, testStructs[5]) } // hardcoded check b = []byte{1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 'f', 'o', 'o', 1} var ( one, two uint64 foo string tru bool ) err = UnmarshalAll(b, &one, &two, &foo, &tru) if err != nil { t.Fatal(err) } else if one != 1 || two != 2 || foo != "foo" || tru != true { t.Error("values were not decoded correctly:", one, two, foo, tru) } } // TestReadWriteFile tests the ReadFiles and WriteFile functions, which are // inverses of each other. func TestReadWriteFile(t *testing.T) { // standard os.MkdirAll(build.TempDir("encoding"), 0777) path := build.TempDir("encoding", t.Name()) err := WriteFile(path, testStructs[3]) if err != nil { t.Fatal(err) } var obj test4 err = ReadFile(path, &obj) if err != nil { t.Error(err) } // bad paths err = WriteFile("/foo/bar", "baz") if err == nil { t.Error("expected error, got nil") } err = ReadFile("/foo/bar", nil) if err == nil { t.Error("expected error, got nil") } } // i5-4670K, 9a90f86: 33 MB/s func BenchmarkEncode(b *testing.B) { buf := new(bytes.Buffer) enc := NewEncoder(buf) for i := 0; i < b.N; i++ { buf.Reset() for i := range testStructs { err := enc.Encode(testStructs[i]) if err != nil { b.Fatal(err) } } } b.SetBytes(int64(buf.Len())) } // i5-4670K, 9a90f86: 26 MB/s func BenchmarkDecode(b *testing.B) { var emptyStructs = []interface{}{&test0{}, &test1{}, &test2{}, &test3{}, &test4{}, &test5{}, &test6{}} var numBytes int64 for i := 0; i < b.N; i++ { numBytes = 0 for i := range testEncodings { err := Unmarshal(testEncodings[i], emptyStructs[i]) if err != nil { b.Fatal(err) } numBytes += int64(len(testEncodings[i])) } } b.SetBytes(numBytes) } // i5-4670K, 2059112: 44 MB/s func BenchmarkMarshalAll(b *testing.B) { for i := 0; i < b.N; i++ { _ = MarshalAll(testStructs...) } b.SetBytes(int64(len(bytes.Join(testEncodings, nil)))) } // i5-4670K, 2059112: 36 MB/s func BenchmarkUnmarshalAll(b *testing.B) { var emptyStructs = []interface{}{&test0{}, &test1{}, &test2{}, &test3{}, &test4{}, &test5{}, &test6{}} structBytes := bytes.Join(testEncodings, nil) for i := 0; i < b.N; i++ { err := UnmarshalAll(structBytes, emptyStructs...) if err != nil { b.Fatal(err) } } b.SetBytes(int64(len(structBytes))) } Sia-1.3.0/encoding/prefix.go000066400000000000000000000024111313565667000156530ustar00rootroot00000000000000package encoding import ( "fmt" "io" ) // ReadPrefix reads an 8-byte length prefixes, followed by the number of bytes // specified in the prefix. The operation is aborted if the prefix exceeds a // specified maximum length. func ReadPrefix(r io.Reader, maxLen uint64) ([]byte, error) { prefix := make([]byte, 8) if _, err := io.ReadFull(r, prefix); err != nil { return nil, err } dataLen := DecUint64(prefix) if dataLen > maxLen { return nil, fmt.Errorf("length %d exceeds maxLen of %d", dataLen, maxLen) } // read dataLen bytes data := make([]byte, dataLen) _, err := io.ReadFull(r, data) return data, err } // ReadObject reads and decodes a length-prefixed and marshalled object. func ReadObject(r io.Reader, obj interface{}, maxLen uint64) error { data, err := ReadPrefix(r, maxLen) if err != nil { return err } return Unmarshal(data, obj) } // WritePrefix writes a length-prefixed byte slice to w. func WritePrefix(w io.Writer, data []byte) error { err := WriteInt(w, len(data)) if err != nil { return err } n, err := w.Write(data) if err == nil && n != len(data) { err = io.ErrShortWrite } return err } // WriteObject writes a length-prefixed object to w. func WriteObject(w io.Writer, v interface{}) error { return WritePrefix(w, Marshal(v)) } Sia-1.3.0/encoding/prefix_test.go000066400000000000000000000070211313565667000167140ustar00rootroot00000000000000package encoding import ( "bytes" "io" "testing" ) // badReader/Writer used to test error handling type badReader struct{} func (br *badReader) Read([]byte) (int, error) { return 0, io.EOF } type badWriter struct{} func (bw *badWriter) Write([]byte) (int, error) { return 0, nil } func TestReadPrefix(t *testing.T) { b := new(bytes.Buffer) // standard b.Write(append(EncUint64(3), "foo"...)) data, err := ReadPrefix(b, 3) if err != nil { t.Error(err) } else if string(data) != "foo" { t.Errorf("expected foo, got %s", data) } // 0-length b.Write(EncUint64(0)) _, err = ReadPrefix(b, 0) if err != nil { t.Error(err) } // empty b.Write([]byte{}) _, err = ReadPrefix(b, 3) if err != io.EOF { t.Error("expected EOF, got", err) } // less than 8 bytes b.Write([]byte{1, 2, 3}) _, err = ReadPrefix(b, 3) if err != io.ErrUnexpectedEOF { t.Error("expected unexpected EOF, got", err) } // exceed maxLen b.Write(EncUint64(4)) _, err = ReadPrefix(b, 3) if err == nil || err.Error() != "length 4 exceeds maxLen of 3" { t.Error("expected maxLen error, got", err) } // no data after length prefix b.Write(EncUint64(3)) _, err = ReadPrefix(b, 3) if err != io.EOF { t.Error("expected EOF, got", err) } } func TestReadObject(t *testing.T) { b := new(bytes.Buffer) var obj string // standard b.Write(EncUint64(11)) b.Write(append(EncUint64(3), "foo"...)) err := ReadObject(b, &obj, 11) if err != nil { t.Error(err) } else if obj != "foo" { t.Errorf("expected foo, got %s", obj) } // empty b.Write([]byte{}) err = ReadObject(b, &obj, 0) if err != io.EOF { t.Error("expected EOF, got", err) } // bad object b.Write(EncUint64(3)) b.WriteString("foo") // strings need an additional length prefix err = ReadObject(b, &obj, 3) if err == nil || err.Error() != "could not decode type string: "+io.ErrUnexpectedEOF.Error() { t.Error("expected unexpected EOF, got", err) } } func TestWritePrefix(t *testing.T) { b := new(bytes.Buffer) // standard err := WritePrefix(b, []byte("foo")) expected := append(EncUint64(3), "foo"...) if err != nil { t.Error(err) } else if !bytes.Equal(b.Bytes(), expected) { t.Errorf("WritePrefix wrote wrong data: expected %v, got %v", b.Bytes(), expected) } // badWriter (returns nil error, but doesn't write anything) bw := new(badWriter) err = WritePrefix(bw, []byte("foo")) if err != io.ErrShortWrite { t.Error("expected ErrShortWrite, got", err) } } func TestWriteObject(t *testing.T) { b := new(bytes.Buffer) // standard err := WriteObject(b, "foo") expected := append(EncUint64(11), append(EncUint64(3), "foo"...)...) if err != nil { t.Error(err) } else if !bytes.Equal(b.Bytes(), expected) { t.Errorf("WritePrefix wrote wrong data: expected %v, got %v", b.Bytes(), expected) } // badWriter bw := new(badWriter) err = WriteObject(bw, "foo") if err != io.ErrShortWrite { t.Error("expected ErrShortWrite, got", err) } } func TestReadWritePrefix(t *testing.T) { b := new(bytes.Buffer) // WritePrefix -> ReadPrefix data := []byte("foo") err := WritePrefix(b, data) if err != nil { t.Fatal(err) } rdata, err := ReadPrefix(b, 100) if err != nil { t.Error(err) } else if !bytes.Equal(rdata, data) { t.Errorf("read/write mismatch: wrote %s, read %s", data, rdata) } // WriteObject -> ReadObject obj := "bar" err = WriteObject(b, obj) if err != nil { t.Fatal(err) } var robj string err = ReadObject(b, &robj, 100) if err != nil { t.Error(err) } else if robj != obj { t.Errorf("read/write mismatch: wrote %s, read %s", obj, robj) } } Sia-1.3.0/modules/000077500000000000000000000000001313565667000137135ustar00rootroot00000000000000Sia-1.3.0/modules/consensus.go000066400000000000000000000246621313565667000162740ustar00rootroot00000000000000package modules import ( "errors" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/types" ) const ( // ConsensusDir is the name of the directory used for all of the consensus // persistence files. ConsensusDir = "consensus" // DiffApply indicates that a diff is being applied to the consensus set. DiffApply DiffDirection = true // DiffRevert indicates that a diff is being reverted from the consensus // set. DiffRevert DiffDirection = false ) var ( // ConsensusChangeBeginning is a special consensus change id that tells the // consensus set to provide all consensus changes starting from the very // first diff, which includes the genesis block diff. ConsensusChangeBeginning = ConsensusChangeID{} // ConsensusChangeRecent is a special consensus change id that tells the // consensus set to provide the most recent consensus change, instead of // starting from a specific value (which may not be known to the caller). ConsensusChangeRecent = ConsensusChangeID{1} // ErrBlockKnown is an error indicating that a block is already in the // database. ErrBlockKnown = errors.New("block already present in database") // ErrBlockUnsolved indicates that a block did not meet the required POW // target. ErrBlockUnsolved = errors.New("block does not meet target") // ErrInvalidConsensusChangeID indicates that ConsensusSetPersistSubscribe // was called with a consensus change id that is not recognized. Most // commonly, this means that the consensus set was deleted or replaced and // now the module attempting the subscription has desynchronized. This error // should be handled by the module, and not reported to the user. ErrInvalidConsensusChangeID = errors.New("consensus subscription has invalid id - files are inconsistent") // ErrNonExtendingBlock indicates that a block is valid but does not result // in a fork that is the heaviest known fork - the consensus set has not // changed as a result of seeing the block. ErrNonExtendingBlock = errors.New("block does not extend the longest fork") ) type ( // ConsensusChangeID is the id of a consensus change. ConsensusChangeID crypto.Hash // A DiffDirection indicates the "direction" of a diff, either applied or // reverted. A bool is used to restrict the value to these two possibilities. DiffDirection bool // A ConsensusSetSubscriber is an object that receives updates to the consensus // set every time there is a change in consensus. ConsensusSetSubscriber interface { // ProcessConsensusChange sends a consensus update to a module through // a function call. Updates will always be sent in the correct order. // There may not be any reverted blocks, but there will always be // applied blocks. ProcessConsensusChange(ConsensusChange) } // A ConsensusChange enumerates a set of changes that occurred to the consensus set. ConsensusChange struct { // ID is a unique id for the consensus change derived from the reverted // and applied blocks. ID ConsensusChangeID // RevertedBlocks is the list of blocks that were reverted by the change. // The reverted blocks were always all reverted before the applied blocks // were applied. The revered blocks are presented in the order that they // were reverted. RevertedBlocks []types.Block // AppliedBlocks is the list of blocks that were applied by the change. The // applied blocks are always all applied after all the reverted blocks were // reverted. The applied blocks are presented in the order that they were // applied. AppliedBlocks []types.Block // SiacoinOutputDiffs contains the set of siacoin diffs that were applied // to the consensus set in the recent change. The direction for the set of // diffs is 'DiffApply'. SiacoinOutputDiffs []SiacoinOutputDiff // FileContractDiffs contains the set of file contract diffs that were // applied to the consensus set in the recent change. The direction for the // set of diffs is 'DiffApply'. FileContractDiffs []FileContractDiff // SiafundOutputDiffs contains the set of siafund diffs that were applied // to the consensus set in the recent change. The direction for the set of // diffs is 'DiffApply'. SiafundOutputDiffs []SiafundOutputDiff // DelayedSiacoinOutputDiffs contains the set of delayed siacoin output // diffs that were applied to the consensus set in the recent change. DelayedSiacoinOutputDiffs []DelayedSiacoinOutputDiff // SiafundPoolDiffs are the siafund pool diffs that were applied to the // consensus set in the recent change. SiafundPoolDiffs []SiafundPoolDiff // ChildTarget defines the target of any block that would be the child // of the block most recently appended to the consensus set. ChildTarget types.Target // MinimumValidChildTimestamp defines the minimum allowed timestamp for // any block that is the child of the block most recently appended to // the consensus set. MinimumValidChildTimestamp types.Timestamp // Synced indicates whether or not the ConsensusSet is synced with its // peers. Synced bool // TryTransactionSet is an unlocked version of // ConsensusSet.TryTransactionSet. This allows the TryTransactionSet // function to be called by a subscriber during // ProcessConsensusChange. TryTransactionSet func([]types.Transaction) (ConsensusChange, error) } // A SiacoinOutputDiff indicates the addition or removal of a SiacoinOutput in // the consensus set. SiacoinOutputDiff struct { Direction DiffDirection ID types.SiacoinOutputID SiacoinOutput types.SiacoinOutput } // A FileContractDiff indicates the addition or removal of a FileContract in // the consensus set. FileContractDiff struct { Direction DiffDirection ID types.FileContractID FileContract types.FileContract } // A SiafundOutputDiff indicates the addition or removal of a SiafundOutput in // the consensus set. SiafundOutputDiff struct { Direction DiffDirection ID types.SiafundOutputID SiafundOutput types.SiafundOutput } // A DelayedSiacoinOutputDiff indicates the introduction of a siacoin output // that cannot be spent until after maturing for 144 blocks. When the output // has matured, a SiacoinOutputDiff will be provided. DelayedSiacoinOutputDiff struct { Direction DiffDirection ID types.SiacoinOutputID SiacoinOutput types.SiacoinOutput MaturityHeight types.BlockHeight } // A SiafundPoolDiff contains the value of the siafundPool before the block // was applied, and after the block was applied. When applying the diff, set // siafundPool to 'Adjusted'. When reverting the diff, set siafundPool to // 'Previous'. SiafundPoolDiff struct { Direction DiffDirection Previous types.Currency Adjusted types.Currency } // A ConsensusSet accepts blocks and builds an understanding of network // consensus. ConsensusSet interface { // AcceptBlock adds a block to consensus. An error will be returned if the // block is invalid, has been seen before, is an orphan, or doesn't // contribute to the heaviest fork known to the consensus set. If the block // does not become the head of the heaviest known fork but is otherwise // valid, it will be remembered by the consensus set but an error will // still be returned. AcceptBlock(types.Block) error // BlockAtHeight returns the block found at the input height, with a // bool to indicate whether that block exists. BlockAtHeight(types.BlockHeight) (types.Block, bool) // ChildTarget returns the target required to extend the current heaviest // fork. This function is typically used by miners looking to extend the // heaviest fork. ChildTarget(types.BlockID) (types.Target, bool) // Close will shut down the consensus set, giving the module enough time to // run any required closing routines. Close() error // ConsensusSetSubscribe adds a subscriber to the list of subscribers // and gives them every consensus change that has occurred since the // change with the provided id. There are a few special cases, // described by the ConsensusChangeX variables in this package. ConsensusSetSubscribe(ConsensusSetSubscriber, ConsensusChangeID) error // CurrentBlock returns the latest block in the heaviest known // blockchain. CurrentBlock() types.Block // Flush will cause the consensus set to finish all in-progress // routines. Flush() error // Height returns the current height of consensus. Height() types.BlockHeight // Synced returns true if the consensus set is synced with the network. Synced() bool // InCurrentPath returns true if the block id presented is found in the // current path, false otherwise. InCurrentPath(types.BlockID) bool // MinimumValidChildTimestamp returns the earliest timestamp that is // valid on the current longest fork according to the consensus set. This is // a required piece of information for the miner, who could otherwise be at // risk of mining invalid blocks. MinimumValidChildTimestamp(types.BlockID) (types.Timestamp, bool) // StorageProofSegment returns the segment to be used in the storage proof for // a given file contract. StorageProofSegment(types.FileContractID) (uint64, error) // TryTransactionSet checks whether the transaction set would be valid if // it were added in the next block. A consensus change is returned // detailing the diffs that would result from the application of the // transaction. TryTransactionSet([]types.Transaction) (ConsensusChange, error) // Unsubscribe removes a subscriber from the list of subscribers, // allowing for garbage collection and rescanning. If the subscriber is // not found in the subscriber database, no action is taken. Unsubscribe(ConsensusSetSubscriber) } ) // Append takes to ConsensusChange objects and adds all of their diffs together. // // NOTE: It is possible for diffs to overlap or be inconsistent. This function // should only be used with consecutive or disjoint consensus change objects. func (cc ConsensusChange) Append(cc2 ConsensusChange) ConsensusChange { return ConsensusChange{ RevertedBlocks: append(cc.RevertedBlocks, cc2.RevertedBlocks...), AppliedBlocks: append(cc.AppliedBlocks, cc2.AppliedBlocks...), SiacoinOutputDiffs: append(cc.SiacoinOutputDiffs, cc2.SiacoinOutputDiffs...), FileContractDiffs: append(cc.FileContractDiffs, cc2.FileContractDiffs...), SiafundOutputDiffs: append(cc.SiafundOutputDiffs, cc2.SiafundOutputDiffs...), DelayedSiacoinOutputDiffs: append(cc.DelayedSiacoinOutputDiffs, cc2.DelayedSiacoinOutputDiffs...), } } Sia-1.3.0/modules/consensus/000077500000000000000000000000001313565667000157335ustar00rootroot00000000000000Sia-1.3.0/modules/consensus/accept.go000066400000000000000000000307331313565667000175270ustar00rootroot00000000000000package consensus import ( "bytes" "errors" "os" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/bolt" ) var ( errDoSBlock = errors.New("block is known to be invalid") errNoBlockMap = errors.New("block map is not in database") errInconsistentSet = errors.New("consensus set is not in a consistent state") errOrphan = errors.New("block has no known parent") errNonLinearChain = errors.New("block set is not a contiguous chain") ) // managedBroadcastBlock will broadcast a block to the consensus set's peers. func (cs *ConsensusSet) managedBroadcastBlock(b types.Block) { // broadcast the block header to all peers go cs.gateway.Broadcast("RelayHeader", b.Header(), cs.gateway.Peers()) } // validateHeaderAndBlock does some early, low computation verification on the // block. Callers should not assume that validation will happen in a particular // order. func (cs *ConsensusSet) validateHeaderAndBlock(tx dbTx, b types.Block, id types.BlockID) (parent *processedBlock, err error) { // Check if the block is a DoS block - a known invalid block that is expensive // to validate. _, exists := cs.dosBlocks[id] if exists { return nil, errDoSBlock } // Check if the block is already known. blockMap := tx.Bucket(BlockMap) if blockMap == nil { return nil, errNoBlockMap } if blockMap.Get(id[:]) != nil { return nil, modules.ErrBlockKnown } // Check for the parent. parentID := b.ParentID parentBytes := blockMap.Get(parentID[:]) if parentBytes == nil { return nil, errOrphan } parent = new(processedBlock) err = cs.marshaler.Unmarshal(parentBytes, parent) if err != nil { return nil, err } // Check that the timestamp is not too far in the past to be acceptable. minTimestamp := cs.blockRuleHelper.minimumValidChildTimestamp(blockMap, parent) err = cs.blockValidator.ValidateBlock(b, id, minTimestamp, parent.ChildTarget, parent.Height+1, cs.log) if err != nil { return nil, err } return parent, nil } // checkHeaderTarget returns true if the header's ID meets the given target. func checkHeaderTarget(h types.BlockHeader, target types.Target) bool { blockHash := h.ID() return bytes.Compare(target[:], blockHash[:]) >= 0 } // validateHeader does some early, low computation verification on the header // to determine if the block should be downloaded. Callers should not assume // that validation will happen in a particular order. func (cs *ConsensusSet) validateHeader(tx dbTx, h types.BlockHeader) error { // Check if the block is a DoS block - a known invalid block that is expensive // to validate. id := h.ID() _, exists := cs.dosBlocks[id] if exists { return errDoSBlock } // Check if the block is already known. blockMap := tx.Bucket(BlockMap) if blockMap == nil { return errNoBlockMap } if blockMap.Get(id[:]) != nil { return modules.ErrBlockKnown } // Check for the parent. parentID := h.ParentID parentBytes := blockMap.Get(parentID[:]) if parentBytes == nil { return errOrphan } var parent processedBlock err := cs.marshaler.Unmarshal(parentBytes, &parent) if err != nil { return err } // Check that the target of the new block is sufficient. if !checkHeaderTarget(h, parent.ChildTarget) { return modules.ErrBlockUnsolved } // TODO: check if the block is a non extending block once headers-first // downloads are implemented. // Check that the timestamp is not too far in the past to be acceptable. minTimestamp := cs.blockRuleHelper.minimumValidChildTimestamp(blockMap, &parent) if minTimestamp > h.Timestamp { return errEarlyTimestamp } // Check if the block is in the extreme future. We make a distinction between // future and extreme future because there is an assumption that by the time // the extreme future arrives, this block will no longer be a part of the // longest fork because it will have been ignored by all of the miners. if h.Timestamp > types.CurrentTimestamp()+types.ExtremeFutureThreshold { return errExtremeFutureTimestamp } // We do not check if the header is in the near future here, because we want // to get the corresponding block as soon as possible, even if the block is in // the near future. return nil } // addBlockToTree inserts a block into the blockNode tree by adding it to its // parent's list of children. If the new blockNode is heavier than the current // node, the blockchain is forked to put the new block and its parents at the // tip. An error will be returned if block verification fails or if the block // does not extend the longest fork. // // addBlockToTree might need to modify the database while returning an error // on the block. Such errors are handled outside of the transaction by the // caller. Switching to a managed tx through bolt will make this complexity // unneeded. func (cs *ConsensusSet) addBlockToTree(tx *bolt.Tx, b types.Block, parent *processedBlock) (ce changeEntry, err error) { // Prepare the child processed block associated with the parent block. newNode := cs.newChild(tx, parent, b) // Check whether the new node is part of a chain that is heavier than the // current node. If not, return ErrNonExtending and don't fork the // blockchain. currentNode := currentProcessedBlock(tx) if !newNode.heavierThan(currentNode) { return changeEntry{}, modules.ErrNonExtendingBlock } // Fork the blockchain and put the new heaviest block at the tip of the // chain. var revertedBlocks, appliedBlocks []*processedBlock revertedBlocks, appliedBlocks, err = cs.forkBlockchain(tx, newNode) if err != nil { return changeEntry{}, err } for _, rn := range revertedBlocks { ce.RevertedBlocks = append(ce.RevertedBlocks, rn.Block.ID()) } for _, an := range appliedBlocks { ce.AppliedBlocks = append(ce.AppliedBlocks, an.Block.ID()) } err = appendChangeLog(tx, ce) if err != nil { return changeEntry{}, err } return ce, nil } // threadedSleepOnFutureBlock will sleep until the timestamp of a future block // has arrived. // // TODO: An attacker can broadcast a future block multiple times, resulting in a // goroutine spinup for each future block. Need to prevent that. // // TODO: An attacker could produce a very large number of future blocks, // consuming memory. Need to prevent that. func (cs *ConsensusSet) threadedSleepOnFutureBlock(b types.Block) { // Add this thread to the threadgroup. err := cs.tg.Add() if err != nil { return } defer cs.tg.Done() // Perform a soft-sleep while we wait for the block to become valid. select { case <-cs.tg.StopChan(): return case <-time.After(time.Duration(b.Timestamp-(types.CurrentTimestamp()+types.FutureThreshold)) * time.Second): _, err := cs.managedAcceptBlocks([]types.Block{b}) if err != nil { cs.log.Debugln("WARN: failed to accept a future block:", err) } cs.managedBroadcastBlock(b) } } // managedAcceptBlocks will try to add blocks to the consensus set. If the // blocks do not extend the longest currently known chain, an error is // returned but the blocks are still kept in memory. If the blocks extend a fork // such that the fork becomes the longest currently known chain, the consensus // set will reorganize itself to recognize the new longest fork. Accepted // blocks are not relayed. // // Typically AcceptBlock should be used so that the accepted block is relayed. // This method is typically only be used when there would otherwise be multiple // consecutive calls to AcceptBlock with each successive call accepting the // child block of the previous call. func (cs *ConsensusSet) managedAcceptBlocks(blocks []types.Block) (blockchainExtended bool, err error) { // Grab a lock on the consensus set. cs.mu.Lock() defer cs.mu.Unlock() // Make sure that blocks are consecutive. Though this isn't a strict // requirement, if blocks are not consecutive then it becomes a lot harder // to maintain correcetness when adding multiple blocks in a single tx. // // This is the first time that IDs on the blocks have been computed. blockIDs := make([]types.BlockID, 0, len(blocks)) for i := 0; i < len(blocks); i++ { blockIDs = append(blockIDs, blocks[i].ID()) if i > 0 && blocks[i].ParentID != blockIDs[i-1] { return false, errNonLinearChain } } // Verify the headers for every block, throw out known blocks, and the // invalid blocks (which includes the children of invalid blocks). chainExtended := false changes := make([]changeEntry, 0, len(blocks)) validBlocks := make([]types.Block, 0, len(blocks)) parents := make([]*processedBlock, 0, len(blocks)) setErr := cs.db.Update(func(tx *bolt.Tx) error { for i := 0; i < len(blocks); i++ { // Start by checking the header of the block. parent, err := cs.validateHeaderAndBlock(boltTxWrapper{tx}, blocks[i], blockIDs[i]) if err == modules.ErrBlockKnown { // Skip over known blocks. continue } if err == errFutureTimestamp { // Queue the block to be tried again if it is a future block. go cs.threadedSleepOnFutureBlock(blocks[i]) } if err != nil { return err } // Try adding the block to consnesus. changeEntry, err := cs.addBlockToTree(tx, blocks[i], parent) if err == nil { changes = append(changes, changeEntry) chainExtended = true } if err == modules.ErrNonExtendingBlock { err = nil } if err != nil { return err } // Sanity check - If reverted blocks is zero, applied blocks should also // be zero. if build.DEBUG && len(changeEntry.AppliedBlocks) == 0 && len(changeEntry.RevertedBlocks) != 0 { panic("after adding a change entry, there are no applied blocks but there are reverted blocks") } // Append to the set of changes, and append the valid block. validBlocks = append(validBlocks, blocks[i]) parents = append(parents, parent) } return nil }) if _, ok := setErr.(bolt.MmapError); ok { cs.log.Println("ERROR: Bolt mmap failed:", setErr) println("Blockchain database has run out of disk space!") os.Exit(1) } if setErr != nil { // Check if any blocks were valid. if len(validBlocks) < 1 { // Nothing more to do, the first block was invalid. return false, setErr } // At least some of the blocks were valid. Add the valid blocks before // returning, since we've already done the downloading and header // validation. verifyExtended := false err := cs.db.Update(func(tx *bolt.Tx) error { for i := 0; i < len(validBlocks); i++ { _, err := cs.addBlockToTree(tx, validBlocks[i], parents[i]) if err == nil { verifyExtended = true } if err != modules.ErrNonExtendingBlock && err != nil { return err } } return nil }) // Sanity check - verifyExtended should match chainExtended. if build.DEBUG && verifyExtended != chainExtended { panic("chain extension logic does not match up between first and last attempt") } // Something has gone wrong. Maybe the filesystem is having errors for // example. But under normal conditions, this code should not be // reached. If it is, return early because both attempts to add blocks // have failed. if err != nil { return false, err } } // Stop here if the blocks did not extend the longest blockchain. if !chainExtended { return false, modules.ErrNonExtendingBlock } // Sanity check - if we get here, len(changes) should be non-zero. if build.DEBUG && len(changes) == 0 { panic("changes is empty, but this code should not be reached if no blocks got added") } // Update the subscribers with all of the consensus changes. First combine // the changes into a single set. for _, change := range changes { cs.updateSubscribers(change) } // If there were valid blocks and invalid blocks in the set that was // provided, then the setErr is not going to be nil. Return the set error to // the caller. if setErr != nil { return chainExtended, setErr } return chainExtended, nil } // AcceptBlock will try to add a block to the consensus set. If the block does // not extend the longest currently known chain, an error is returned but the // block is still kept in memory. If the block extends a fork such that the // fork becomes the longest currently known chain, the consensus set will // reorganize itself to recognize the new longest fork. If a block is accepted // without error, it will be relayed to all connected peers. This function // should only be called for new blocks. func (cs *ConsensusSet) AcceptBlock(b types.Block) error { err := cs.tg.Add() if err != nil { return err } defer cs.tg.Done() chainExtended, err := cs.managedAcceptBlocks([]types.Block{b}) if err != nil { return err } if chainExtended { cs.managedBroadcastBlock(b) } return nil } Sia-1.3.0/modules/consensus/accept_bench_test.go000066400000000000000000000107251313565667000217240ustar00rootroot00000000000000package consensus import ( "path/filepath" "strconv" "testing" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/modules/gateway" "github.com/NebulousLabs/Sia/types" ) // BenchmarkAcceptEmptyBlocks measures how quckly empty blocks are integrated // into the consensus set. // // i7-4770, 1d60d69: 1.356 ms / op func BenchmarkAcceptEmptyBlocks(b *testing.B) { cst, err := createConsensusSetTester(b.Name() + strconv.Itoa(b.N)) if err != nil { b.Fatal("Error creating tester: " + err.Error()) } defer cst.Close() // Create an alternate testing consensus set, which does not // have any subscribers testdir := build.TempDir(modules.ConsensusDir, "BenchmarkEmptyBlocks - 2") g, err := gateway.New("localhost:0", false, filepath.Join(testdir, modules.GatewayDir)) if err != nil { b.Fatal(err) } cs, err := New(g, false, filepath.Join(testdir, modules.ConsensusDir)) if err != nil { b.Fatal(err) } defer cs.Close() // Synchronisze the cst and the subscriberless consensus set. h := cst.cs.dbBlockHeight() for i := types.BlockHeight(1); i <= h; i++ { id, err := cst.cs.dbGetPath(i) if err != nil { b.Fatal(err) } processedBlock, err := cst.cs.dbGetBlockMap(id) if err != nil { b.Fatal(err) } err = cs.AcceptBlock(processedBlock.Block) if err != nil { b.Fatal(err) } } b.ResetTimer() b.StopTimer() for j := 0; j < b.N; j++ { // Submit a block to the consensus set tester - which has many // subscribers. (untimed) block, err := cst.miner.AddBlock() if err != nil { b.Fatal(err) } // Submit a block to the consensus set which has no subscribers. // (timed) b.StartTimer() err = cs.AcceptBlock(block) if err != nil { b.Fatal("error accepting a block:", err) } b.StopTimer() } } // BenchmarkAcceptSmallBlocks measures how quickly smaller blocks are // integrated into the consensus set. // // i7-4770, 1d60d69: 3.579 ms / op func BenchmarkAcceptSmallBlocks(b *testing.B) { cst, err := createConsensusSetTester(b.Name() + strconv.Itoa(b.N)) if err != nil { b.Fatal(err) } defer cst.Close() // COMPAT v0.4.0 // // Push the height of the consensus set tester beyond the fork height. for i := 0; i < 10; i++ { _, err := cst.miner.AddBlock() if err != nil { b.Fatal(err) } } // Create an alternate testing consensus set, which does not // have any subscribers testdir := build.TempDir(modules.ConsensusDir, "BenchmarkAcceptSmallBlocks - 2") g, err := gateway.New("localhost:0", false, filepath.Join(testdir, modules.GatewayDir)) if err != nil { b.Fatal(err) } cs, err := New(g, false, filepath.Join(testdir, modules.ConsensusDir)) if err != nil { b.Fatal("Error creating consensus: " + err.Error()) } defer cs.Close() // Synchronize the consensus set with the consensus set tester. h := cst.cs.dbBlockHeight() for i := types.BlockHeight(1); i <= h; i++ { id, err := cst.cs.dbGetPath(i) if err != nil { b.Fatal(err) } processedBlock, err := cst.cs.dbGetBlockMap(id) if err != nil { b.Fatal(err) } err = cs.AcceptBlock(processedBlock.Block) if err != nil { b.Fatal(err) } } b.ResetTimer() b.StopTimer() for j := 0; j < b.N; j++ { // Create a transaction with a miner fee, a normal siacoin output, and // a funded file contract. txnBuilder := cst.wallet.StartTransaction() err = txnBuilder.FundSiacoins(types.NewCurrency64(125e6)) if err != nil { b.Fatal(err) } // Add a small miner fee. txnBuilder.AddMinerFee(types.NewCurrency64(5e6)) // Add a siacoin output. txnBuilder.AddSiacoinOutput(types.SiacoinOutput{Value: types.NewCurrency64(20e6)}) // Add a file contract. fc := types.FileContract{ WindowStart: 1000, WindowEnd: 10005, Payout: types.NewCurrency64(100e6), ValidProofOutputs: []types.SiacoinOutput{{ Value: types.NewCurrency64(96100e3), }}, MissedProofOutputs: []types.SiacoinOutput{{ Value: types.NewCurrency64(96100e3), }}, } txnBuilder.AddFileContract(fc) txnSet, err := txnBuilder.Sign(true) if err != nil { b.Fatal(err) } // Submit the transaction set and mine the block. err = cst.tpool.AcceptTransactionSet(txnSet) if err != nil { b.Fatal(err) } block, err := cst.miner.AddBlock() if err != nil { b.Fatal(err) } // Submit the block to the consensus set without subscribers, timing // how long it takes for the block to get accepted. b.StartTimer() err = cs.AcceptBlock(block) if err != nil { b.Fatal(err) } b.StopTimer() } } Sia-1.3.0/modules/consensus/accept_reorg_test.go000066400000000000000000000232621313565667000217630ustar00rootroot00000000000000package consensus import ( "testing" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) // reorgSets contains multiple consensus sets that share a genesis block, which // can be manipulated to cause full integration blockchain reorgs. // // cstBackup is a holding place for cstMain - the blocks originally in cstMain get moved // to cstBackup so that cstMain can be reorganized without that history being lost. // Extending cstBackup will allow cstMain to be reorg'd back to its original blocks. type reorgSets struct { cstMain *consensusSetTester cstAlt *consensusSetTester cstBackup *consensusSetTester } // Close will close all of the testers in the reorgSets. Because we don't yet // have a good way to check errors on a deferred statement, a panic will be // thrown if there are any problems closing the reorgSets. func (rs *reorgSets) Close() error { err := rs.cstMain.Close() if err != nil { panic(err) } err = rs.cstAlt.Close() if err != nil { panic(err) } err = rs.cstBackup.Close() if err != nil { panic(err) } return nil } // createReorgSets creates a reorg set that is ready to be manipulated. func createReorgSets(name string) *reorgSets { cstMain, err := createConsensusSetTester(name + " - 1") if err != nil { panic(err) } cstAlt, err := createConsensusSetTester(name + " - 2") if err != nil { panic(err) } cstBackup, err := createConsensusSetTester(name + " - 3") if err != nil { panic(err) } return &reorgSets{ cstMain: cstMain, cstAlt: cstAlt, cstBackup: cstBackup, } } // save takes all of the blocks in cstMain and moves them to cstBackup. func (rs *reorgSets) save() { mainHeight := rs.cstMain.cs.dbBlockHeight() for i := types.BlockHeight(1); i <= mainHeight; i++ { id, err := rs.cstMain.cs.dbGetPath(i) if err != nil { panic(err) } pb, err := rs.cstMain.cs.dbGetBlockMap(id) if err != nil { panic(err) } // err is not checked - block may already be in cstBackup. _ = rs.cstBackup.cs.AcceptBlock(pb.Block) } // Check that cstMain and cstBackup are even. if rs.cstMain.cs.dbCurrentProcessedBlock().Block.ID() != rs.cstBackup.cs.dbCurrentProcessedBlock().Block.ID() { panic("could not save cstMain into cstBackup") } if rs.cstMain.cs.dbConsensusChecksum() != rs.cstBackup.cs.dbConsensusChecksum() { panic("reorg checksums do not match after saving") } } // extend adds blocks to cstAlt until cstAlt has more weight than cstMain. Then // cstMain is caught up, causing cstMain to perform a reorg that extends all // the way to the genesis block. func (rs *reorgSets) extend() { for rs.cstMain.cs.dbBlockHeight() >= rs.cstAlt.cs.dbBlockHeight() { _, err := rs.cstAlt.miner.AddBlock() if err != nil { panic(err) } } for i := types.BlockHeight(1); i <= rs.cstAlt.cs.dbBlockHeight(); i++ { id, err := rs.cstAlt.cs.dbGetPath(i) if err != nil { panic(err) } pb, err := rs.cstAlt.cs.dbGetBlockMap(id) if err != nil { panic(err) } _ = rs.cstMain.cs.AcceptBlock(pb.Block) } // Check that cstMain and cstAlt are even. if rs.cstMain.cs.dbCurrentProcessedBlock().Block.ID() != rs.cstAlt.cs.dbCurrentProcessedBlock().Block.ID() { panic("could not save cstMain into cstAlt") } if rs.cstMain.cs.dbConsensusChecksum() != rs.cstAlt.cs.dbConsensusChecksum() { panic("reorg checksums do not match after extending") } } // restore extends cstBackup until it is ahead of cstMain, and then adds all of // the blocks from cstBackup to cstMain, causing cstMain to reorg to the state // of cstBackup. func (rs *reorgSets) restore() { for rs.cstMain.cs.dbBlockHeight() >= rs.cstBackup.cs.dbBlockHeight() { _, err := rs.cstBackup.miner.AddBlock() if err != nil { panic(err) } } for i := types.BlockHeight(1); i <= rs.cstBackup.cs.dbBlockHeight(); i++ { id, err := rs.cstBackup.cs.dbGetPath(i) if err != nil { panic(err) } pb, err := rs.cstBackup.cs.dbGetBlockMap(id) if err != nil { panic(err) } _ = rs.cstMain.cs.AcceptBlock(pb.Block) } // Check that cstMain and cstBackup are even. if rs.cstMain.cs.dbCurrentProcessedBlock().Block.ID() != rs.cstBackup.cs.dbCurrentProcessedBlock().Block.ID() { panic("could not save cstMain into cstBackup") } if rs.cstMain.cs.dbConsensusChecksum() != rs.cstBackup.cs.dbConsensusChecksum() { panic("reorg checksums do not match after restoring") } } // fullReorg saves all of the blocks from cstMain into cstBackup, then extends // cstAlt until cstMain joins cstAlt in structure. Then cstBackup is extended // and cstMain is reorg'd back to have all of the original blocks. func (rs *reorgSets) fullReorg() { rs.save() rs.extend() rs.restore() } // TestIntegrationSimpleReorg tries to reorganize a simple block out of, and // then back into, the consensus set. func TestIntegrationSimpleReorg(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() rs := createReorgSets(t.Name()) defer rs.Close() // Give a simple block to cstMain. rs.cstMain.testSimpleBlock() // Try to trigger consensus inconsistencies by doing a full reorg on the // simple block. rs.fullReorg() } // TestIntegrationSiacoinReorg tries to reorganize a siacoin output block out // of, and then back into, the consensus set. func TestIntegrationSiacoinReorg(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() rs := createReorgSets(t.Name()) defer rs.Close() // Give a siacoin block to cstMain. rs.cstMain.testSpendSiacoinsBlock() // Try to trigger consensus inconsistencies by doing a full reorg on the // simple block. rs.fullReorg() } // TestIntegrationValidStorageProofReorg tries to reorganize a valid storage // proof block out of, and then back into, the consensus set. func TestIntegrationValidStorageProofReorg(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() rs := createReorgSets(t.Name()) defer rs.Close() // Give a series of blocks containing a file contract and a valid storage // proof to cstMain. rs.cstMain.testValidStorageProofBlocks() // Try to trigger consensus inconsistencies by doing a full reorg on the // simple block. rs.fullReorg() } // TestIntegrationMissedStorageProofReorg tries to reorganize a valid storage // proof block out of, and then back into, the consensus set. func TestIntegrationMissedStorageProofReorg(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() rs := createReorgSets(t.Name()) defer rs.Close() // Give a series of blocks containing a file contract and a valid storage // proof to cstMain. rs.cstMain.testMissedStorageProofBlocks() // Try to trigger consensus inconsistencies by doing a full reorg on the // simple block. rs.fullReorg() } // TestIntegrationFileContractRevisionReorg tries to reorganize a valid storage // proof block out of, and then back into, the consensus set. func TestIntegrationFileContractRevisionReorg(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() rs := createReorgSets(t.Name()) defer rs.Close() // Give a series of blocks containing a file contract and a valid storage // proof to cstMain. rs.cstMain.testFileContractRevision() // Try to trigger consensus inconsistencies by doing a full reorg on the // simple block. rs.fullReorg() } // TestIntegrationComplexReorg stacks up blocks of all types into a single // blockchain that undergoes a massive reorg as a stress test to the codebase. func TestIntegrationComplexReorg(t *testing.T) { if testing.Short() || !build.VLONG { t.SkipNow() } t.Parallel() rs := createReorgSets(t.Name()) defer rs.Close() // Give a wide variety of block types to cstMain. for i := 0; i < 3; i++ { rs.cstMain.testBlockSuite() } // Give fewer blocks to cstAlt, while still using the same variety. for i := 0; i < 2; i++ { rs.cstAlt.testBlockSuite() } // Try to trigger consensus inconsistencies by doing a full reorg on the // simple block. rs.fullReorg() } /// All functions below this point are deprecated. /// // TestBuriedBadFork creates a block with an invalid transaction that's not on // the longest fork. The consensus set will not validate that block. Then valid // blocks are added on top of it to make it the longest fork. When it becomes // the longest fork, all the blocks should be fully validated and thrown out // because a parent is invalid. func TestBuriedBadFork(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() pb := cst.cs.dbCurrentProcessedBlock() // Create a bad block that builds on a parent, so that it is part of not // the longest fork. badBlock := types.Block{ ParentID: pb.Block.ParentID, Timestamp: types.CurrentTimestamp(), MinerPayouts: []types.SiacoinOutput{{Value: types.CalculateCoinbase(pb.Height)}}, Transactions: []types.Transaction{{ SiacoinInputs: []types.SiacoinInput{{}}, // Will trigger an error on full verification but not partial verification. }}, } parent, err := cst.cs.dbGetBlockMap(pb.Block.ParentID) if err != nil { t.Fatal(err) } badBlock, _ = cst.miner.SolveBlock(badBlock, parent.ChildTarget) err = cst.cs.AcceptBlock(badBlock) if err != modules.ErrNonExtendingBlock { t.Fatal(err) } // Build another bock on top of the bad block that is fully valid, this // will cause a fork and full validation of the bad block, both the bad // block and this block should be thrown away. block := types.Block{ ParentID: badBlock.ID(), Timestamp: types.CurrentTimestamp(), MinerPayouts: []types.SiacoinOutput{{Value: types.CalculateCoinbase(pb.Height + 1)}}, } block, _ = cst.miner.SolveBlock(block, parent.ChildTarget) // okay because the target will not change err = cst.cs.AcceptBlock(block) if err == nil { t.Fatal("a bad block failed to cause an error") } } Sia-1.3.0/modules/consensus/accept_test.go000066400000000000000000001072441313565667000205700ustar00rootroot00000000000000package consensus import ( "bytes" "errors" "testing" "time" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/persist" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/bolt" ) var ( // validateBlockParamsGot stores the parameters passed to the most recent call // to mockBlockValidator.ValidateBlock. validateBlockParamsGot validateBlockParams mockValidBlock = types.Block{ Timestamp: 100, ParentID: mockParentID(), } mockInvalidBlock = types.Block{ Timestamp: 500, ParentID: mockParentID(), } // parentBlockSerialized is a mock serialized form of a processedBlock. parentBlockSerialized = []byte{3, 2, 1} parentBlockUnmarshaler = mockBlockMarshaler{ []predefinedBlockUnmarshal{ {parentBlockSerialized, mockParent(), nil}, }, } parentBlockHighTargetUnmarshaler = mockBlockMarshaler{ []predefinedBlockUnmarshal{ {parentBlockSerialized, mockParentHighTarget(), nil}, }, } parentBlockLowTargetUnmarshaler = mockBlockMarshaler{ []predefinedBlockUnmarshal{ {parentBlockSerialized, mockParentLowTarget(), nil}, }, } unmarshalFailedErr = errors.New("mock unmarshal failed") failingBlockUnmarshaler = mockBlockMarshaler{ []predefinedBlockUnmarshal{ {parentBlockSerialized, processedBlock{}, unmarshalFailedErr}, }, } serializedParentBlockMap = []blockMapPair{ {mockValidBlock.ParentID[:], parentBlockSerialized}, } ) type ( // mockDbBucket is an implementation of dbBucket for unit testing. mockDbBucket struct { values map[string][]byte } // mockDbTx is an implementation of dbTx for unit testing. It uses an // in-memory key/value store to mock a database. mockDbTx struct { buckets map[string]dbBucket } // predefinedBlockUnmarshal is a predefined response from mockBlockMarshaler. // It defines the unmarshaled processedBlock and error code that // mockBlockMarshaler should return in response to an input serialized byte // slice. predefinedBlockUnmarshal struct { serialized []byte unmarshaled processedBlock err error } // mockBlockMarshaler is an implementation of the encoding.GenericMarshaler // interface for unit testing. It allows clients to specify mappings of // serialized bytes into unmarshaled blocks. mockBlockMarshaler struct { p []predefinedBlockUnmarshal } // mockBlockRuleHelper is an implementation of the blockRuleHelper interface // for unit testing. mockBlockRuleHelper struct { minTimestamp types.Timestamp } // mockBlockValidator is an implementation of the blockValidator interface for // unit testing. mockBlockValidator struct { err error } // validateBlockParams stores the set of parameters passed to ValidateBlock. validateBlockParams struct { called bool b types.Block minTimestamp types.Timestamp target types.Target height types.BlockHeight } // blockMapPair represents a key-value pair in the mock block map. blockMapPair struct { key []byte val []byte } ) // Get returns the value associated with a given key. func (bucket mockDbBucket) Get(key []byte) []byte { return bucket.values[string(key)] } // Set adds a named value to a mockDbBucket. func (bucket mockDbBucket) Set(key []byte, value []byte) { bucket.values[string(key)] = value } // Bucket returns a mock dbBucket object associated with the given bucket name. func (db mockDbTx) Bucket(name []byte) dbBucket { return db.buckets[string(name)] } // Marshal is not implemented and panics if called. func (m mockBlockMarshaler) Marshal(interface{}) []byte { panic("not implemented") } // Unmarshal unmarshals a byte slice into an object based on a pre-defined map // of deserialized objects. func (m mockBlockMarshaler) Unmarshal(b []byte, v interface{}) error { for _, pu := range m.p { if bytes.Equal(b[:], pu.serialized[:]) { pv, ok := v.(*processedBlock) if !ok { panic("mockBlockMarshaler.Unmarshal expected v to be of type processedBlock") } *pv = pu.unmarshaled return pu.err } } panic("unmarshal failed: predefined unmarshal not found") } // AddPredefinedUnmarshal adds a predefinedBlockUnmarshal to mockBlockMarshaler. func (m *mockBlockMarshaler) AddPredefinedUnmarshal(u predefinedBlockUnmarshal) { m.p = append(m.p, u) } // minimumValidChildTimestamp returns the minimum timestamp of pb that can be // considered a valid block. func (brh mockBlockRuleHelper) minimumValidChildTimestamp(blockMap dbBucket, pb *processedBlock) types.Timestamp { return brh.minTimestamp } // ValidateBlock stores the parameters it receives and returns the mock error // defined by mockBlockValidator.err. func (bv mockBlockValidator) ValidateBlock(b types.Block, id types.BlockID, minTimestamp types.Timestamp, target types.Target, height types.BlockHeight, log *persist.Logger) error { validateBlockParamsGot = validateBlockParams{true, b, minTimestamp, target, height} return bv.err } // mockParentID returns a mock BlockID value. func mockParentID() (parentID types.BlockID) { parentID[0] = 42 return parentID } // mockParent returns a mock processedBlock with its ChildTarget member // initialized to a dummy value. func mockParent() (parent processedBlock) { var mockTarget types.Target mockTarget[0] = 56 parent.ChildTarget = mockTarget return parent } // mockParent returns a mock processedBlock with its ChildTarget member // initialized to a the maximum value. func mockParentHighTarget() (parent processedBlock) { parent.ChildTarget = types.RootDepth return parent } // mockParent returns a mock processedBlock with its ChildTarget member // initialized to the minimum value. func mockParentLowTarget() (parent processedBlock) { return parent } // TestUnitValidateHeaderAndBlock runs a series of unit tests for validateHeaderAndBlock. func TestUnitValidateHeaderAndBlock(t *testing.T) { var tests = []struct { block types.Block dosBlocks map[types.BlockID]struct{} blockMapPairs []blockMapPair earliestValidTimestamp types.Timestamp marshaler mockBlockMarshaler useNilBlockMap bool validateBlockErr error errWant error msg string }{ { block: mockValidBlock, dosBlocks: make(map[types.BlockID]struct{}), useNilBlockMap: true, earliestValidTimestamp: mockValidBlock.Timestamp, marshaler: parentBlockUnmarshaler, errWant: errNoBlockMap, msg: "validateHeaderAndBlock should fail when no block map is found in the database", }, { block: mockValidBlock, // Create a dosBlocks map where mockValidBlock is marked as a bad block. dosBlocks: map[types.BlockID]struct{}{ mockValidBlock.ID(): {}, }, earliestValidTimestamp: mockValidBlock.Timestamp, marshaler: parentBlockUnmarshaler, errWant: errDoSBlock, msg: "validateHeaderAndBlock should reject known bad blocks", }, { block: mockValidBlock, dosBlocks: make(map[types.BlockID]struct{}), earliestValidTimestamp: mockValidBlock.Timestamp, marshaler: parentBlockUnmarshaler, errWant: errOrphan, msg: "validateHeaderAndBlock should reject a block if its parent block does not appear in the block database", }, { block: mockValidBlock, dosBlocks: make(map[types.BlockID]struct{}), blockMapPairs: serializedParentBlockMap, earliestValidTimestamp: mockValidBlock.Timestamp, marshaler: failingBlockUnmarshaler, errWant: unmarshalFailedErr, msg: "validateHeaderAndBlock should fail when unmarshaling the parent block fails", }, { block: mockInvalidBlock, dosBlocks: make(map[types.BlockID]struct{}), blockMapPairs: []blockMapPair{ {mockInvalidBlock.ParentID[:], parentBlockSerialized}, }, earliestValidTimestamp: mockInvalidBlock.Timestamp, marshaler: parentBlockUnmarshaler, validateBlockErr: errBadMinerPayouts, errWant: errBadMinerPayouts, msg: "validateHeaderAndBlock should reject a block if ValidateBlock returns an error for the block", }, { block: mockValidBlock, dosBlocks: make(map[types.BlockID]struct{}), blockMapPairs: serializedParentBlockMap, earliestValidTimestamp: mockValidBlock.Timestamp, marshaler: parentBlockUnmarshaler, errWant: nil, msg: "validateHeaderAndBlock should accept a valid block", }, } for _, tt := range tests { // Initialize the blockmap in the tx. bucket := mockDbBucket{map[string][]byte{}} for _, mapPair := range tt.blockMapPairs { bucket.Set(mapPair.key, mapPair.val) } dbBucketMap := map[string]dbBucket{} if tt.useNilBlockMap { dbBucketMap[string(BlockMap)] = nil } else { dbBucketMap[string(BlockMap)] = bucket } tx := mockDbTx{dbBucketMap} mockParent := mockParent() cs := ConsensusSet{ dosBlocks: tt.dosBlocks, marshaler: tt.marshaler, blockRuleHelper: mockBlockRuleHelper{ minTimestamp: tt.earliestValidTimestamp, }, blockValidator: mockBlockValidator{tt.validateBlockErr}, } // Reset the stored parameters to ValidateBlock. validateBlockParamsGot = validateBlockParams{} _, err := cs.validateHeaderAndBlock(tx, tt.block, tt.block.ID()) if err != tt.errWant { t.Errorf("%s: expected to fail with `%v', got: `%v'", tt.msg, tt.errWant, err) } if err == nil || validateBlockParamsGot.called { if validateBlockParamsGot.b.ID() != tt.block.ID() { t.Errorf("%s: incorrect parameter passed to ValidateBlock - got: %v, want: %v", tt.msg, validateBlockParamsGot.b, tt.block) } if validateBlockParamsGot.minTimestamp != tt.earliestValidTimestamp { t.Errorf("%s: incorrect parameter passed to ValidateBlock - got: %v, want: %v", tt.msg, validateBlockParamsGot.minTimestamp, tt.earliestValidTimestamp) } if validateBlockParamsGot.target != mockParent.ChildTarget { t.Errorf("%s: incorrect parameter passed to ValidateBlock - got: %v, want: %v", tt.msg, validateBlockParamsGot.target, mockParent.ChildTarget) } } } } // TestCheckHeaderTarget probes the checkHeaderTarget function and checks that // the result matches the result of checkTarget. func TestCheckHeaderTarget(t *testing.T) { var b types.Block var h types.BlockHeader tests := []struct { target types.Target expected bool msg string }{ {types.RootDepth, true, "checkHeaderTarget failed for a low target"}, {types.Target{}, false, "checkHeaderTarget passed for a high target"}, {types.Target(h.ID()), true, "checkHeaderTarget failed for a same target"}, } for _, tt := range tests { if checkHeaderTarget(h, tt.target) != tt.expected { t.Error(tt.msg) } if checkHeaderTarget(h, tt.target) != checkTarget(b, b.ID(), tt.target) { t.Errorf("checkHeaderTarget and checkTarget do not match for target %v", tt.target) } } } // TestUnitValidateHeader runs a series of unit tests for validateHeader. func TestUnitValidateHeader(t *testing.T) { mockValidBlockID := mockValidBlock.ID() var tests = []struct { header types.BlockHeader dosBlocks map[types.BlockID]struct{} blockMapPairs []blockMapPair earliestValidTimestamp types.Timestamp marshaler mockBlockMarshaler useNilBlockMap bool errWant error msg string }{ // Test that known dos blocks are rejected. { header: mockValidBlock.Header(), // Create a dosBlocks map where mockValidBlock is marked as a bad block. dosBlocks: map[types.BlockID]struct{}{ mockValidBlock.ID(): {}, }, blockMapPairs: serializedParentBlockMap, earliestValidTimestamp: mockValidBlock.Timestamp, marshaler: parentBlockUnmarshaler, errWant: errDoSBlock, msg: "validateHeader should reject known bad blocks", }, // Test that blocks are rejected if a block map doesn't exist. { header: mockValidBlock.Header(), dosBlocks: make(map[types.BlockID]struct{}), blockMapPairs: serializedParentBlockMap, earliestValidTimestamp: mockValidBlock.Timestamp, marshaler: parentBlockUnmarshaler, useNilBlockMap: true, errWant: errNoBlockMap, msg: "validateHeader should fail when no block map is found in the database", }, // Test that known blocks are rejected. { header: mockValidBlock.Header(), dosBlocks: make(map[types.BlockID]struct{}), blockMapPairs: []blockMapPair{{mockValidBlockID[:], []byte{}}}, earliestValidTimestamp: mockValidBlock.Timestamp, marshaler: parentBlockUnmarshaler, errWant: modules.ErrBlockKnown, msg: "validateHeader should fail when the block has been seen before", }, // Test that blocks with unknown parents (orphans) are rejected. { header: mockValidBlock.Header(), dosBlocks: make(map[types.BlockID]struct{}), earliestValidTimestamp: mockValidBlock.Timestamp, marshaler: parentBlockUnmarshaler, errWant: errOrphan, msg: "validateHeader should reject a block if its parent block does not appear in the block database", }, // Test that blocks whose parents don't unmarshal are rejected. { header: mockValidBlock.Header(), dosBlocks: make(map[types.BlockID]struct{}), blockMapPairs: serializedParentBlockMap, earliestValidTimestamp: mockValidBlock.Timestamp, marshaler: failingBlockUnmarshaler, errWant: unmarshalFailedErr, msg: "validateHeader should fail when unmarshaling the parent block fails", }, // Test that blocks with too early of a timestamp are rejected. { header: mockValidBlock.Header(), dosBlocks: make(map[types.BlockID]struct{}), blockMapPairs: serializedParentBlockMap, earliestValidTimestamp: mockValidBlock.Timestamp + 1, marshaler: parentBlockHighTargetUnmarshaler, errWant: errEarlyTimestamp, msg: "validateHeader should fail when the header's timestamp is too early", }, // Test that headers in the extreme future are rejected. { header: types.BlockHeader{ Timestamp: types.CurrentTimestamp() + types.ExtremeFutureThreshold + 2, ParentID: mockParentID(), }, dosBlocks: make(map[types.BlockID]struct{}), blockMapPairs: serializedParentBlockMap, marshaler: parentBlockHighTargetUnmarshaler, errWant: errExtremeFutureTimestamp, msg: "validateHeader should fail when the header's timestamp is in the extreme future", }, // Test that headers in the near future are not rejected. { header: types.BlockHeader{ Timestamp: types.CurrentTimestamp() + types.FutureThreshold + 2, ParentID: mockParentID(), }, dosBlocks: make(map[types.BlockID]struct{}), blockMapPairs: serializedParentBlockMap, marshaler: parentBlockHighTargetUnmarshaler, errWant: nil, msg: "validateHeader should not reject headers whose timestamps are in the near future", }, // Test that blocks with too large of a target are rejected. { header: mockValidBlock.Header(), dosBlocks: make(map[types.BlockID]struct{}), blockMapPairs: serializedParentBlockMap, earliestValidTimestamp: mockValidBlock.Timestamp, marshaler: parentBlockLowTargetUnmarshaler, errWant: modules.ErrBlockUnsolved, msg: "validateHeader should reject blocks with an insufficiently low target", }, // Test that valid blocks are accepted. { header: mockValidBlock.Header(), dosBlocks: make(map[types.BlockID]struct{}), blockMapPairs: serializedParentBlockMap, earliestValidTimestamp: mockValidBlock.Timestamp, marshaler: parentBlockHighTargetUnmarshaler, errWant: nil, msg: "validateHeader should accept a valid block", }, } for _, tt := range tests { // Initialize the blockmap in the tx. bucket := mockDbBucket{map[string][]byte{}} for _, mapPair := range tt.blockMapPairs { bucket.Set(mapPair.key, mapPair.val) } dbBucketMap := map[string]dbBucket{} if tt.useNilBlockMap { dbBucketMap[string(BlockMap)] = nil } else { dbBucketMap[string(BlockMap)] = bucket } tx := mockDbTx{dbBucketMap} cs := ConsensusSet{ dosBlocks: tt.dosBlocks, marshaler: tt.marshaler, blockRuleHelper: mockBlockRuleHelper{ minTimestamp: tt.earliestValidTimestamp, }, } err := cs.validateHeader(tx, tt.header) if err != tt.errWant { t.Errorf("%s: expected to fail with `%v', got: `%v'", tt.msg, tt.errWant, err) } } } // TestIntegrationDoSBlockHandling checks that saved bad blocks are correctly // ignored. func TestIntegrationDoSBlockHandling(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() // Mine a block that is valid except for containing a buried invalid // transaction. The transaction has more siacoin inputs than outputs. txnBuilder := cst.wallet.StartTransaction() err = txnBuilder.FundSiacoins(types.NewCurrency64(50)) if err != nil { t.Fatal(err) } txnSet, err := txnBuilder.Sign(true) // true sets the 'wholeTransaction' flag if err != nil { t.Fatal(err) } // Mine and submit the invalid block to the consensus set. The first time // around, the complaint should be about the rule-breaking transaction. block, target, err := cst.miner.BlockForWork() if err != nil { t.Fatal(err) } block.Transactions = append(block.Transactions, txnSet...) dosBlock, _ := cst.miner.SolveBlock(block, target) err = cst.cs.AcceptBlock(dosBlock) if err != errSiacoinInputOutputMismatch { t.Fatalf("expected %v, got %v", errSiacoinInputOutputMismatch, err) } // Submit the same block a second time. The complaint should be that the // block is already known to be invalid. err = cst.cs.AcceptBlock(dosBlock) if err != errDoSBlock { t.Fatalf("expected %v, got %v", errDoSBlock, err) } } // TestBlockKnownHandling submits known blocks to the consensus set. func TestBlockKnownHandling(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() // Get a block destined to be stale. block, target, err := cst.miner.BlockForWork() if err != nil { t.Fatal(err) } staleBlock, _ := cst.miner.SolveBlock(block, target) // Add two new blocks to the consensus set to block the stale block. block1, err := cst.miner.AddBlock() if err != nil { t.Fatal(err) } block2, err := cst.miner.AddBlock() if err != nil { t.Fatal(err) } // Submit the stale block. err = cst.cs.AcceptBlock(staleBlock) if err != nil && err != modules.ErrNonExtendingBlock { t.Fatal(err) } // Submit all the blocks again, looking for a 'stale block' error. err = cst.cs.AcceptBlock(block1) if err == nil { t.Fatal("expected an error upon submitting the block") } err = cst.cs.AcceptBlock(block2) if err == nil { t.Fatal("expected an error upon submitting the block") } err = cst.cs.AcceptBlock(staleBlock) if err == nil { t.Fatal("expected an error upon submitting the block") } // Try submitting the genesis block. id, err := cst.cs.dbGetPath(0) if err != nil { t.Fatal(err) } genesisBlock, err := cst.cs.dbGetBlockMap(id) if err != nil { t.Fatal(err) } err = cst.cs.AcceptBlock(genesisBlock.Block) if err == nil { t.Fatal("expected an error upon submitting the block") } } // TestOrphanHandling passes an orphan block to the consensus set. func TestOrphanHandling(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() // Try submitting an orphan block to the consensus set. The empty block can // be used, because looking for a parent is one of the first checks the // consensus set performs. orphan := types.Block{} err = cst.cs.AcceptBlock(orphan) if err != errOrphan { t.Fatalf("expected %v, got %v", errOrphan, err) } err = cst.cs.AcceptBlock(orphan) if err != errOrphan { t.Fatalf("expected %v, got %v", errOrphan, err) } } // TestMissedTarget submits a block that does not meet the required target. func TestMissedTarget(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() // Mine a block that doesn't meet the target. block, target, err := cst.miner.BlockForWork() if err != nil { t.Fatal(err) } for checkTarget(block, block.ID(), target) && block.Nonce[0] != 255 { block.Nonce[0]++ } if checkTarget(block, block.ID(), target) { t.Fatal("unable to find a failing target") } err = cst.cs.AcceptBlock(block) if err != modules.ErrBlockUnsolved { t.Fatalf("expected %v, got %v", modules.ErrBlockUnsolved, err) } } // TestMinerPayoutHandling checks that blocks with incorrect payouts are // rejected. func TestMinerPayoutHandling(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() // Create a block with the wrong miner payout structure - testing can be // light here because there is heavier testing in the 'types' package, // where the logic is defined. block, target, err := cst.miner.BlockForWork() if err != nil { t.Fatal(err) } block.MinerPayouts = append(block.MinerPayouts, types.SiacoinOutput{Value: types.NewCurrency64(1)}) solvedBlock, _ := cst.miner.SolveBlock(block, target) err = cst.cs.AcceptBlock(solvedBlock) if err != errBadMinerPayouts { t.Fatalf("expected %v, got %v", errBadMinerPayouts, err) } } // TestEarlyTimestampHandling checks that blocks too far in the past are // rejected. func TestEarlyTimestampHandling(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() minTimestamp := types.CurrentTimestamp() cst.cs.blockRuleHelper = mockBlockRuleHelper{ minTimestamp: minTimestamp, } // Submit a block with a timestamp in the past, before minTimestamp. block, target, err := cst.miner.BlockForWork() if err != nil { t.Fatal(err) } block.Timestamp = minTimestamp - 1 solvedBlock, _ := cst.miner.SolveBlock(block, target) err = cst.cs.AcceptBlock(solvedBlock) if err != errEarlyTimestamp { t.Fatalf("expected %v, got %v", errEarlyTimestamp, err) } } // testFutureTimestampHandling checks that blocks in the future (but not // extreme future) are handled correctly. func TestFutureTimestampHandling(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() // Submit a block with a timestamp in the future, but not the extreme // future. block, target, err := cst.miner.BlockForWork() if err != nil { t.Fatal(err) } block.Timestamp = types.CurrentTimestamp() + 2 + types.FutureThreshold solvedBlock, _ := cst.miner.SolveBlock(block, target) err = cst.cs.AcceptBlock(solvedBlock) if err != errFutureTimestamp { t.Fatalf("expected %v, got %v", errFutureTimestamp, err) } // Poll the consensus set until the future block appears. for i := 0; i < 30; i++ { time.Sleep(time.Second * 3) _, err = cst.cs.dbGetBlockMap(solvedBlock.ID()) if err == nil { break } } _, err = cst.cs.dbGetBlockMap(solvedBlock.ID()) if err != nil { t.Errorf("Future block not added to consensus set.\nCurrent Timestamp %v\nFutureThreshold: %v\nBlock Timestamp %v\n", types.CurrentTimestamp(), types.FutureThreshold, block.Timestamp) } } // TestExtremeFutureTimestampHandling checks that blocks in the extreme future // are rejected. func TestExtremeFutureTimestampHandling(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() // Submit a block with a timestamp in the extreme future. block, target, err := cst.miner.BlockForWork() if err != nil { t.Fatal(err) } block.Timestamp = types.CurrentTimestamp() + 2 + types.ExtremeFutureThreshold solvedBlock, _ := cst.miner.SolveBlock(block, target) err = cst.cs.AcceptBlock(solvedBlock) if err != errExtremeFutureTimestamp { t.Fatalf("expected %v, got %v", errFutureTimestamp, err) } } // TestBuriedBadTransaction tries submitting a block with a bad transaction // that is buried under good transactions. func TestBuriedBadTransaction(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() pb := cst.cs.dbCurrentProcessedBlock() // Create a good transaction using the wallet. txnValue := types.NewCurrency64(1200) txnBuilder := cst.wallet.StartTransaction() err = txnBuilder.FundSiacoins(txnValue) if err != nil { t.Fatal(err) } txnBuilder.AddSiacoinOutput(types.SiacoinOutput{Value: txnValue}) txnSet, err := txnBuilder.Sign(true) if err != nil { t.Fatal(err) } err = cst.tpool.AcceptTransactionSet(txnSet) if err != nil { t.Fatal(err) } // Create a bad transaction badTxn := types.Transaction{ SiacoinInputs: []types.SiacoinInput{{}}, } txns := append(cst.tpool.TransactionList(), badTxn) // Create a block with a buried bad transaction. block := types.Block{ ParentID: pb.Block.ID(), Timestamp: types.CurrentTimestamp(), MinerPayouts: []types.SiacoinOutput{{Value: types.CalculateCoinbase(pb.Height + 1)}}, Transactions: txns, } block, _ = cst.miner.SolveBlock(block, pb.ChildTarget) err = cst.cs.AcceptBlock(block) if err == nil { t.Error("buried transaction didn't cause an error") } } // TestInconsistencyCheck puts the consensus set in to an inconsistent state // and makes sure that the santiy checks are triggering panics. func TestInconsistentCheck(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } // Corrupt the consensus set by adding a new siafund output. sfo := types.SiafundOutput{ Value: types.NewCurrency64(1), } cst.cs.dbAddSiafundOutput(types.SiafundOutputID{}, sfo) // Catch a panic that should be caused by the inconsistency check after a // block is mined. defer func() { r := recover() if r == nil { t.Fatalf("inconsistency panic not triggered by corrupted database") } }() cst.miner.AddBlock() } // COMPATv0.4.0 // // This test checks that the hardfork scheduled for block 21,000 rolls through // smoothly. func TestTaxHardfork(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() // Create a file contract with a payout that is put into the blockchain // before the hardfork block but expires after the hardfork block. payout := types.NewCurrency64(400e6) outputSize := types.PostTax(cst.cs.dbBlockHeight(), payout) fc := types.FileContract{ WindowStart: cst.cs.dbBlockHeight() + 12, WindowEnd: cst.cs.dbBlockHeight() + 14, Payout: payout, ValidProofOutputs: []types.SiacoinOutput{{Value: outputSize}}, MissedProofOutputs: []types.SiacoinOutput{{Value: outputSize}}, UnlockHash: types.UnlockConditions{}.UnlockHash(), // The empty UC is anyone-can-spend } // Create and fund a transaction with a file contract. txnBuilder := cst.wallet.StartTransaction() err = txnBuilder.FundSiacoins(payout) if err != nil { t.Fatal(err) } fcIndex := txnBuilder.AddFileContract(fc) txnSet, err := txnBuilder.Sign(true) if err != nil { t.Fatal(err) } err = cst.tpool.AcceptTransactionSet(txnSet) if err != nil { t.Fatal(err) } _, err = cst.miner.AddBlock() if err != nil { t.Fatal(err) } // Check that the siafund pool was increased by the faulty float amount. siafundPool := cst.cs.dbGetSiafundPool() if !siafundPool.Equals64(15590e3) { t.Fatal("siafund pool was not increased correctly") } // Mine blocks until the hardfork is reached. for i := 0; i < 10; i++ { _, err = cst.miner.AddBlock() if err != nil { t.Fatal(err) } } // Submit a file contract revision and check that the payouts are able to // be the same. fcid := txnSet[len(txnSet)-1].FileContractID(fcIndex) fcr := types.FileContractRevision{ ParentID: fcid, UnlockConditions: types.UnlockConditions{}, NewRevisionNumber: 1, NewFileSize: 1, NewWindowStart: cst.cs.dbBlockHeight() + 2, NewWindowEnd: cst.cs.dbBlockHeight() + 4, NewValidProofOutputs: fc.ValidProofOutputs, NewMissedProofOutputs: fc.MissedProofOutputs, } txnBuilder = cst.wallet.StartTransaction() txnBuilder.AddFileContractRevision(fcr) txnSet, err = txnBuilder.Sign(true) if err != nil { t.Fatal(err) } err = cst.tpool.AcceptTransactionSet(txnSet) if err != nil { t.Fatal(err) } _, err = cst.miner.AddBlock() if err != nil { t.Fatal(err) } // Mine blocks until the revision goes through, such that the sanity checks // can be run. for i := 0; i < 6; i++ { _, err = cst.miner.AddBlock() if err != nil { t.Fatal(err) } } // Check that the siafund pool did not change after the submitted revision. siafundPool = cst.cs.dbGetSiafundPool() if !siafundPool.Equals64(15590e3) { t.Fatal("siafund pool was not increased correctly") } } // mockGatewayDoesBroadcast implements modules.Gateway to mock the Broadcast // method. type mockGatewayDoesBroadcast struct { modules.Gateway broadcastCalled chan struct{} } // Broadcast is a mock implementation of modules.Gateway.Broadcast that // sends a sentinel value down a channel to signal it's been called. func (g *mockGatewayDoesBroadcast) Broadcast(name string, obj interface{}, peers []modules.Peer) { g.Gateway.Broadcast(name, obj, peers) g.broadcastCalled <- struct{}{} } // TestAcceptBlockBroadcasts tests that AcceptBlock broadcasts valid blocks and // that managedAcceptBlock does not. func TestAcceptBlockBroadcasts(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cst, err := blankConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() mg := &mockGatewayDoesBroadcast{ Gateway: cst.cs.gateway, broadcastCalled: make(chan struct{}), } cst.cs.gateway = mg // Test that Broadcast is called for valid blocks. b, _ := cst.miner.FindBlock() err = cst.cs.AcceptBlock(b) if err != nil { t.Fatal(err) } select { case <-mg.broadcastCalled: case <-time.After(10 * time.Millisecond): t.Error("expected AcceptBlock to broadcast a valid block") } // Test that Broadcast is not called for invalid blocks. err = cst.cs.AcceptBlock(types.Block{}) if err == nil { t.Fatal("expected AcceptBlock to error on an invalid block") } select { case <-mg.broadcastCalled: t.Error("AcceptBlock broadcasted an invalid block") case <-time.After(10 * time.Millisecond): } // Test that Broadcast is not called in managedAcceptBlock. b, _ = cst.miner.FindBlock() _, err = cst.cs.managedAcceptBlocks([]types.Block{b}) if err != nil { t.Fatal(err) } select { case <-mg.broadcastCalled: t.Errorf("managedAcceptBlock should not broadcast blocks") case <-time.After(10 * time.Millisecond): } } // blockCountingSubscriber counts the number of blocks that get submitted to the // subscriber, as well as the number of times that the subscriber has been given // changes at all. type blockCountingSubscriber struct { changes []modules.ConsensusChangeID appliedBlocks int revertedBlocks int } // ProcessConsensusChange fills the subscription interface for the // blockCountingSubscriber. func (bcs *blockCountingSubscriber) ProcessConsensusChange(cc modules.ConsensusChange) { bcs.changes = append(bcs.changes, cc.ID) bcs.revertedBlocks += len(cc.RevertedBlocks) bcs.appliedBlocks += len(cc.AppliedBlocks) } // TestChainedAcceptBlock creates series of blocks, some of which are valid, // some invalid, and submits them to the consensus set, verifying that the // consensus set updates correctly each time. func TestChainedAcceptBlock(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() // Create a tester to send blocks in a batch to the other tester. cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() cst2, err := blankConsensusSetTester(t.Name() + "2") if err != nil { t.Fatal(err) } defer cst2.Close() // Subscribe a blockCountingSubscriber to cst2. var bcs blockCountingSubscriber cst2.cs.ConsensusSetSubscribe(&bcs, modules.ConsensusChangeBeginning) if len(bcs.changes) != 1 || bcs.appliedBlocks != 1 || bcs.revertedBlocks != 0 { t.Error("consensus changes do not seem to be getting passed to subscribers correctly") } // Grab all of the blocks in cst, with the intention of giving them to cst2. var blocks []types.Block height := cst.cs.Height() for i := types.BlockHeight(0); i <= height; i++ { id, err := cst.cs.dbGetPath(i) if err != nil { t.Fatal(err) } pb, err := cst.cs.dbGetBlockMap(id) if err != nil { t.Fatal(err) } blocks = append(blocks, pb.Block) } // Create a jumbling of the blocks, so that the set is not in order. jumble := make([]types.Block, len(blocks)) jumble[0] = blocks[0] jumble[1] = blocks[2] jumble[2] = blocks[1] for i := 3; i < len(jumble); i++ { jumble[i] = blocks[i] } // Try to submit the blocks out-of-order, which would violate one of the // assumptions in managedAcceptBlocks. _, err = cst2.cs.managedAcceptBlocks(jumble) if err != errNonLinearChain { t.Fatal(err) } if cst2.cs.Height() != 0 { t.Fatal("blocks added even though the inputs were jumbled") } if len(bcs.changes) != 1 || bcs.appliedBlocks != 1 || bcs.revertedBlocks != 0 { t.Error("consensus changes do not seem to be getting passed to subscribers correctly") } // Tag an invalid block onto the end of blocks. block, err := cst.miner.AddBlock() if err != nil { t.Fatal(err) } // Adding an invalid transaction to make the block invalid. badBlock := block badBlock.Transactions = append(badBlock.Transactions, types.Transaction{ SiacoinInputs: []types.SiacoinInput{{ ParentID: types.SiacoinOutputID{1}, }}, }) // Append the invalid transaction to the block. blocks = append(blocks, badBlock) // Submit the whole invalid set. Result should be that the valid ones get // added, and the invalid ones get dropped. _, err = cst2.cs.managedAcceptBlocks(blocks) if err == nil { t.Fatal(err) } if cst2.cs.Height() != cst.cs.Height()-1 { t.Log(cst2.cs.Height()) t.Log(cst.cs.Height()) t.Fatal("height is not correct, does not seem that the blocks were added") } if bcs.appliedBlocks != int(cst2.cs.Height()+1) || bcs.revertedBlocks != 0 { t.Error("consensus changes do not seem to be getting passed to subscribers correctly") } // Try submitting the good block. It should succeed because the other good // blocks should have been added. err = cst2.cs.AcceptBlock(block) if err != nil { t.Fatal(err) } if bcs.appliedBlocks != int(cst2.cs.Height()+1) || bcs.revertedBlocks != 0 { t.Error("consensus changes do not seem to be getting passed to subscribers correctly") } // Check that every change recorded in 'bcs' is also available in the // consensus set. for _, change := range bcs.changes { err := cst2.cs.db.Update(func(tx *bolt.Tx) error { _, exists := getEntry(tx, change) if !exists { t.Error("an entry was provided that doesn't exist") } return nil }) if err != nil { t.Fatal(err) } } } Sia-1.3.0/modules/consensus/accept_txntypes_test.go000066400000000000000000001110401313565667000225330ustar00rootroot00000000000000package consensus import ( "testing" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/fastrand" ) // testBlockSuite tests a wide variety of blocks. func (cst *consensusSetTester) testBlockSuite() { cst.testSimpleBlock() cst.testSpendSiacoinsBlock() cst.testValidStorageProofBlocks() cst.testMissedStorageProofBlocks() cst.testFileContractRevision() cst.testSpendSiafunds() } // testSimpleBlock mines a simple block (no transactions except those // automatically added by the miner) and adds it to the consnesus set. func (cst *consensusSetTester) testSimpleBlock() { // Get the starting hash of the consenesus set. initialChecksum := cst.cs.dbConsensusChecksum() initialHeight := cst.cs.dbBlockHeight() initialBlockID := cst.cs.dbCurrentBlockID() // Mine and submit a block block, err := cst.miner.AddBlock() if err != nil { panic(err) } // Check that the consensus info functions changed as expected. resultingChecksum := cst.cs.dbConsensusChecksum() if initialChecksum == resultingChecksum { panic("checksum is unchanged after mining a block") } resultingHeight := cst.cs.dbBlockHeight() if resultingHeight != initialHeight+1 { panic("height of consensus set did not increase as expected") } currentPB := cst.cs.dbCurrentProcessedBlock() if currentPB.Block.ParentID != initialBlockID { panic("new processed block does not have correct information") } if currentPB.Block.ID() != block.ID() { panic("the state's current block is not reporting as the recently mined block.") } if currentPB.Height != initialHeight+1 { panic("the processed block is not reporting the correct height") } pathID, err := cst.cs.dbGetPath(currentPB.Height) if err != nil { panic(err) } if pathID != block.ID() { panic("current path does not point to the correct block") } // Revert the block that was just added to the consensus set and check for // parity with the original state of consensus. parent, err := cst.cs.dbGetBlockMap(currentPB.Block.ParentID) if err != nil { panic(err) } _, _, err = cst.cs.dbForkBlockchain(parent) if err != nil { panic(err) } if cst.cs.dbConsensusChecksum() != initialChecksum { panic("adding and reverting a block changed the consensus set") } // Re-add the block and check for parity with the first time it was added. // This test is useful because a different codepath is followed if the // diffs have already been generated. _, _, err = cst.cs.dbForkBlockchain(currentPB) if err != nil { panic(err) } if cst.cs.dbConsensusChecksum() != resultingChecksum { panic("adding, reverting, and reading a block was inconsistent with just adding the block") } } // TestIntegrationSimpleBlock creates a consensus set tester and uses it to // call testSimpleBlock. func TestIntegrationSimpleBlock(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() cst.testSimpleBlock() } // testSpendSiacoinsBlock mines a block with a transaction spending siacoins // and adds it to the consensus set. func (cst *consensusSetTester) testSpendSiacoinsBlock() { // Create a random destination address for the output in the transaction. destAddr := randAddress() // Create a block containing a transaction with a valid siacoin output. txnValue := types.NewCurrency64(1200) txnBuilder := cst.wallet.StartTransaction() err := txnBuilder.FundSiacoins(txnValue) if err != nil { panic(err) } outputIndex := txnBuilder.AddSiacoinOutput(types.SiacoinOutput{Value: txnValue, UnlockHash: destAddr}) txnSet, err := txnBuilder.Sign(true) if err != nil { panic(err) } err = cst.tpool.AcceptTransactionSet(txnSet) if err != nil { panic(err) } // Mine and apply the block to the consensus set. _, err = cst.miner.AddBlock() if err != nil { panic(err) } // See that the destination output was created. outputID := txnSet[len(txnSet)-1].SiacoinOutputID(outputIndex) sco, err := cst.cs.dbGetSiacoinOutput(outputID) if err != nil { panic(err) } if !sco.Value.Equals(txnValue) { panic("output added with wrong value") } if sco.UnlockHash != destAddr { panic("output sent to the wrong address") } } // TestIntegrationSpendSiacoinsBlock creates a consensus set tester and uses it // to call testSpendSiacoinsBlock. func TestIntegrationSpendSiacoinsBlock(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() cst.testSpendSiacoinsBlock() } // testValidStorageProofBlocks adds a block with a file contract, and then // submits a storage proof for that file contract. func (cst *consensusSetTester) testValidStorageProofBlocks() { // COMPATv0.4.0 - Step the block height up past the hardfork amount. This // code stops nondeterministic failures when producing storage proofs that // is related to buggy old code. for cst.cs.dbBlockHeight() <= 10 { _, err := cst.miner.AddBlock() if err != nil { panic(err) } } // Create a file (as a bytes.Buffer) that will be used for the file // contract. filesize := uint64(4e3) file := fastrand.Bytes(int(filesize)) merkleRoot := crypto.MerkleRoot(file) // Create a file contract that will be successful. validProofDest := randAddress() payout := types.NewCurrency64(400e6) fc := types.FileContract{ FileSize: filesize, FileMerkleRoot: merkleRoot, WindowStart: cst.cs.dbBlockHeight() + 1, WindowEnd: cst.cs.dbBlockHeight() + 2, Payout: payout, ValidProofOutputs: []types.SiacoinOutput{{ UnlockHash: validProofDest, Value: types.PostTax(cst.cs.dbBlockHeight(), payout), }}, MissedProofOutputs: []types.SiacoinOutput{{ UnlockHash: types.UnlockHash{}, Value: types.PostTax(cst.cs.dbBlockHeight(), payout), }}, } // Submit a transaction with the file contract. oldSiafundPool := cst.cs.dbGetSiafundPool() txnBuilder := cst.wallet.StartTransaction() err := txnBuilder.FundSiacoins(payout) if err != nil { panic(err) } fcIndex := txnBuilder.AddFileContract(fc) txnSet, err := txnBuilder.Sign(true) if err != nil { panic(err) } err = cst.tpool.AcceptTransactionSet(txnSet) if err != nil { panic(err) } _, err = cst.miner.AddBlock() if err != nil { panic(err) } // Check that the siafund pool was increased by the tax on the payout. siafundPool := cst.cs.dbGetSiafundPool() if !siafundPool.Equals(oldSiafundPool.Add(types.Tax(cst.cs.dbBlockHeight()-1, payout))) { panic("siafund pool was not increased correctly") } // Check that the file contract made it into the database. ti := len(txnSet) - 1 fcid := txnSet[ti].FileContractID(fcIndex) _, err = cst.cs.dbGetFileContract(fcid) if err != nil { panic(err) } // Create and submit a storage proof for the file contract. segmentIndex, err := cst.cs.StorageProofSegment(fcid) if err != nil { panic(err) } segment, hashSet := crypto.MerkleProof(file, segmentIndex) sp := types.StorageProof{ ParentID: fcid, HashSet: hashSet, } copy(sp.Segment[:], segment) txnBuilder = cst.wallet.StartTransaction() txnBuilder.AddStorageProof(sp) txnSet, err = txnBuilder.Sign(true) if err != nil { panic(err) } err = cst.tpool.AcceptTransactionSet(txnSet) if err != nil { panic(err) } _, err = cst.miner.AddBlock() if err != nil { panic(err) } // Check that the file contract has been removed. _, err = cst.cs.dbGetFileContract(fcid) if err != errNilItem { panic("file contract should not exist in the database") } // Check that the siafund pool has not changed. postProofPool := cst.cs.dbGetSiafundPool() if !postProofPool.Equals(siafundPool) { panic("siafund pool should not change after submitting a storage proof") } // Check that a delayed output was created for the valid proof. spoid := fcid.StorageProofOutputID(types.ProofValid, 0) dsco, err := cst.cs.dbGetDSCO(cst.cs.dbBlockHeight()+types.MaturityDelay, spoid) if err != nil { panic(err) } if dsco.UnlockHash != fc.ValidProofOutputs[0].UnlockHash { panic("wrong unlock hash in dsco") } if !dsco.Value.Equals(fc.ValidProofOutputs[0].Value) { panic("wrong sco value in dsco") } } // TestIntegrationValidStorageProofBlocks creates a consensus set tester and // uses it to call testValidStorageProofBlocks. func TestIntegrationValidStorageProofBlocks(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() cst.testValidStorageProofBlocks() } // testMissedStorageProofBlocks adds a block with a file contract, and then // fails to submit a storage proof before expiration. func (cst *consensusSetTester) testMissedStorageProofBlocks() { // Create a file contract that will be successful. filesize := uint64(4e3) payout := types.NewCurrency64(400e6) missedProofDest := randAddress() fc := types.FileContract{ FileSize: filesize, FileMerkleRoot: crypto.Hash{}, WindowStart: cst.cs.dbBlockHeight() + 1, WindowEnd: cst.cs.dbBlockHeight() + 2, Payout: payout, ValidProofOutputs: []types.SiacoinOutput{{ UnlockHash: types.UnlockHash{}, Value: types.PostTax(cst.cs.dbBlockHeight(), payout), }}, MissedProofOutputs: []types.SiacoinOutput{{ UnlockHash: missedProofDest, Value: types.PostTax(cst.cs.dbBlockHeight(), payout), }}, } // Submit a transaction with the file contract. oldSiafundPool := cst.cs.dbGetSiafundPool() txnBuilder := cst.wallet.StartTransaction() err := txnBuilder.FundSiacoins(payout) if err != nil { panic(err) } fcIndex := txnBuilder.AddFileContract(fc) txnSet, err := txnBuilder.Sign(true) if err != nil { panic(err) } err = cst.tpool.AcceptTransactionSet(txnSet) if err != nil { panic(err) } _, err = cst.miner.AddBlock() if err != nil { panic(err) } // Check that the siafund pool was increased by the tax on the payout. siafundPool := cst.cs.dbGetSiafundPool() if !siafundPool.Equals(oldSiafundPool.Add(types.Tax(cst.cs.dbBlockHeight()-1, payout))) { panic("siafund pool was not increased correctly") } // Check that the file contract made it into the database. ti := len(txnSet) - 1 fcid := txnSet[ti].FileContractID(fcIndex) _, err = cst.cs.dbGetFileContract(fcid) if err != nil { panic(err) } // Mine a block to close the storage proof window. _, err = cst.miner.AddBlock() if err != nil { panic(err) } // Check that the file contract has been removed. _, err = cst.cs.dbGetFileContract(fcid) if err != errNilItem { panic("file contract should not exist in the database") } // Check that the siafund pool has not changed. postProofPool := cst.cs.dbGetSiafundPool() if !postProofPool.Equals(siafundPool) { panic("siafund pool should not change after submitting a storage proof") } // Check that a delayed output was created for the missed proof. spoid := fcid.StorageProofOutputID(types.ProofMissed, 0) dsco, err := cst.cs.dbGetDSCO(cst.cs.dbBlockHeight()+types.MaturityDelay, spoid) if err != nil { panic(err) } if dsco.UnlockHash != fc.MissedProofOutputs[0].UnlockHash { panic("wrong unlock hash in dsco") } if !dsco.Value.Equals(fc.MissedProofOutputs[0].Value) { panic("wrong sco value in dsco") } } // TestIntegrationMissedStorageProofBlocks creates a consensus set tester and // uses it to call testMissedStorageProofBlocks. func TestIntegrationMissedStorageProofBlocks(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() cst.testMissedStorageProofBlocks() } // testFileContractRevision creates and revises a file contract on the // blockchain. func (cst *consensusSetTester) testFileContractRevision() { // COMPATv0.4.0 - Step the block height up past the hardfork amount. This // code stops nondeterministic failures when producing storage proofs that // is related to buggy old code. for cst.cs.dbBlockHeight() <= 10 { _, err := cst.miner.AddBlock() if err != nil { panic(err) } } // Create a file (as a bytes.Buffer) that will be used for the file // contract. filesize := uint64(4e3) file := fastrand.Bytes(int(filesize)) merkleRoot := crypto.MerkleRoot(file) // Create a spendable unlock hash for the file contract. sk, pk := crypto.GenerateKeyPair() uc := types.UnlockConditions{ PublicKeys: []types.SiaPublicKey{{ Algorithm: types.SignatureEd25519, Key: pk[:], }}, SignaturesRequired: 1, } // Create a file contract that will be revised. validProofDest := randAddress() payout := types.NewCurrency64(400e6) fc := types.FileContract{ FileSize: filesize, FileMerkleRoot: crypto.Hash{}, WindowStart: cst.cs.dbBlockHeight() + 2, WindowEnd: cst.cs.dbBlockHeight() + 3, Payout: payout, ValidProofOutputs: []types.SiacoinOutput{{ UnlockHash: validProofDest, Value: types.PostTax(cst.cs.dbBlockHeight(), payout), }}, MissedProofOutputs: []types.SiacoinOutput{{ UnlockHash: types.UnlockHash{}, Value: types.PostTax(cst.cs.dbBlockHeight(), payout), }}, UnlockHash: uc.UnlockHash(), } // Submit a transaction with the file contract. txnBuilder := cst.wallet.StartTransaction() err := txnBuilder.FundSiacoins(payout) if err != nil { panic(err) } fcIndex := txnBuilder.AddFileContract(fc) txnSet, err := txnBuilder.Sign(true) if err != nil { panic(err) } err = cst.tpool.AcceptTransactionSet(txnSet) if err != nil { panic(err) } _, err = cst.miner.AddBlock() if err != nil { panic(err) } // Submit a revision for the file contract. ti := len(txnSet) - 1 fcid := txnSet[ti].FileContractID(fcIndex) fcr := types.FileContractRevision{ ParentID: fcid, UnlockConditions: uc, NewRevisionNumber: 69292, NewFileSize: filesize, NewFileMerkleRoot: merkleRoot, NewWindowStart: cst.cs.dbBlockHeight() + 1, NewWindowEnd: cst.cs.dbBlockHeight() + 2, NewValidProofOutputs: fc.ValidProofOutputs, NewMissedProofOutputs: fc.MissedProofOutputs, NewUnlockHash: uc.UnlockHash(), } ts := types.TransactionSignature{ ParentID: crypto.Hash(fcid), CoveredFields: types.CoveredFields{WholeTransaction: true}, PublicKeyIndex: 0, } txn := types.Transaction{ FileContractRevisions: []types.FileContractRevision{fcr}, TransactionSignatures: []types.TransactionSignature{ts}, } encodedSig := crypto.SignHash(txn.SigHash(0), sk) txn.TransactionSignatures[0].Signature = encodedSig[:] err = cst.tpool.AcceptTransactionSet([]types.Transaction{txn}) if err != nil { panic(err) } _, err = cst.miner.AddBlock() if err != nil { panic(err) } // Create and submit a storage proof for the file contract. segmentIndex, err := cst.cs.StorageProofSegment(fcid) if err != nil { panic(err) } segment, hashSet := crypto.MerkleProof(file, segmentIndex) sp := types.StorageProof{ ParentID: fcid, HashSet: hashSet, } copy(sp.Segment[:], segment) txnBuilder = cst.wallet.StartTransaction() txnBuilder.AddStorageProof(sp) txnSet, err = txnBuilder.Sign(true) if err != nil { panic(err) } err = cst.tpool.AcceptTransactionSet(txnSet) if err != nil { panic(err) } _, err = cst.miner.AddBlock() if err != nil { panic(err) } // Check that the file contract has been removed. _, err = cst.cs.dbGetFileContract(fcid) if err != errNilItem { panic("file contract should not exist in the database") } } // TestIntegrationFileContractRevision creates a consensus set tester and uses // it to call testFileContractRevision. func TestIntegrationFileContractRevision(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() cst.testFileContractRevision() } // testSpendSiafunds spends siafunds on the blockchain. func (cst *consensusSetTester) testSpendSiafunds() { // Create a random destination address for the output in the transaction. destAddr := randAddress() // Create a block containing a transaction with a valid siafund output. txnValue := types.NewCurrency64(3) txnBuilder := cst.wallet.StartTransaction() err := txnBuilder.FundSiafunds(txnValue) if err != nil { panic(err) } outputIndex := txnBuilder.AddSiafundOutput(types.SiafundOutput{Value: txnValue, UnlockHash: destAddr}) txnSet, err := txnBuilder.Sign(true) if err != nil { panic(err) } err = cst.tpool.AcceptTransactionSet(txnSet) if err != nil { panic(err) } // Find the siafund inputs used in the txn set. var claimValues []types.Currency var claimIDs []types.SiacoinOutputID for _, txn := range txnSet { for _, sfi := range txn.SiafundInputs { sfo, err := cst.cs.dbGetSiafundOutput(sfi.ParentID) if err != nil { // It's not in the database because it's in an earlier // transaction: disregard it - testing the first layer of // dependencies is sufficient. continue } poolDiff := cst.cs.dbGetSiafundPool().Sub(sfo.ClaimStart) value := poolDiff.Div(types.SiafundCount).Mul(sfo.Value) claimValues = append(claimValues, value) claimIDs = append(claimIDs, sfi.ParentID.SiaClaimOutputID()) } } if len(claimValues) == 0 { panic("no siafund outputs created?") } // Mine and apply the block to the consensus set. _, err = cst.miner.AddBlock() if err != nil { panic(err) } // See that the destination output was created. outputID := txnSet[len(txnSet)-1].SiafundOutputID(outputIndex) sfo, err := cst.cs.dbGetSiafundOutput(outputID) if err != nil { panic(err) } if !sfo.Value.Equals(txnValue) { panic("output added with wrong value") } if sfo.UnlockHash != destAddr { panic("output sent to the wrong address") } if !sfo.ClaimStart.Equals(cst.cs.dbGetSiafundPool()) { panic("ClaimStart is not being set correctly") } // Verify that all expected claims were created and added to the set of // delayed siacoin outputs. for i, id := range claimIDs { dsco, err := cst.cs.dbGetDSCO(cst.cs.dbBlockHeight()+types.MaturityDelay, id) if err != nil { panic(err) } if !dsco.Value.Equals(claimValues[i]) { panic("expected a different claim value on the siaclaim") } } } // TestIntegrationSpendSiafunds creates a consensus set tester and uses it // to call testSpendSiafunds. func (cst *consensusSetTester) TestIntegrationSpendSiafunds(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() cst.testSpendSiafunds() } // testDelayedOutputMaturity adds blocks that result in many delayed outputs // maturing at the same time, verifying that bulk maturity is handled // correctly. // TestRegressionDelayedOutputMaturity creates a consensus set tester and uses // it to call testDelayedOutputMaturity. In the past, bolt's ForEach function // had been used incorrectly resulting in the incorrect processing of bulk // delayed outputs. // testFileContractMaturity adds blocks that result in many file contracts // being closed at the same time. // TestRegressionFileContractMaturity creates a consensus set tester and uses // it to call testFileContractMaturity. In the past, bolt's ForEach function // had been used incorrectly, resulting in the incorrect processing of bulk // file contracts. /* // testPaymentChannelBlocks submits blocks to set up, use, and close a payment // channel. func (cst *consensusSetTester) testPaymentChannelBlocks() error { // The current method of doing payment channels is gimped because public // keys do not have timelocks. We will be hardforking to include timelocks // in public keys in 0.4.0, but in the meantime we need an alternate // method. // Gimped payment channels: 2-of-2 multisig where one key is controlled by // the funding entity, and one key is controlled by the receiving entity. An // address is created containing both keys, and then the funding entity // creates, but does not sign, a transaction sending coins to the channel // address. A second transaction is created that sends all the coins in the // funding output back to the funding entity. The receiving entity signs the // transaction with a timelocked signature. The funding entity will get the // refund after T blocks as long as the output is not double spent. The // funding entity then signs the first transaction and opens the channel. // // Creating the channel: // 1. Create a 2-of-2 unlock conditions, one key held by each entity. // 2. Funding entity creates, but does not sign, a transaction sending // money to the payment channel address. (txn A) // 3. Funding entity creates and signs a transaction spending the output // created in txn A that sends all the money back as a refund. (txn B) // 4. Receiving entity signs txn B with a timelocked signature, so that the // funding entity cannot get the refund for several days. The funding entity // is given a fully signed and eventually-spendable txn B. // 5. The funding entity signs and broadcasts txn A. // // Using the channel: // Each the receiving entity and the funding entity keeps a record of how // much has been sent down the unclosed channel, and watches the // blockchain for a channel closing transaction. To send more money down // the channel, the funding entity creates and signs a transaction sending // X+y coins to the receiving entity from the channel address. The // transaction is sent to the receiving entity, who will keep it and // potentially sign and broadcast it later. The funding entity will only // send money down the channel if 'work' or some other sort of event has // completed that indicates the receiving entity should get more money. // // Closing the channel: // The receiving entity will sign the transaction that pays them the most // money and then broadcast that transaction. This will spend the output // and close the channel, invalidating txn B and preventing any future // transactions from being made over the channel. The channel must be // closed before the timelock expires on the second signature in txn B, // otherwise the funding entity will be able to get a full refund. // // The funding entity should be waiting until either the receiving entity // closes the channel or the timelock expires. If the receiving entity // closes the channel, all is good. If not, then the funding entity can // close the channel and get a full refund. // Create a 2-of-2 unlock conditions, 1 key for each the sender and the // receiver in the payment channel. sk1, pk1, err := crypto.StdKeyGen.Generate() // Funding entity. if err != nil { return err } sk2, pk2, err := crypto.StdKeyGen.Generate() // Receiving entity. if err != nil { return err } uc := types.UnlockConditions{ PublicKeys: []types.SiaPublicKey{ { Algorithm: types.SignatureEd25519, Key: pk1[:], }, { Algorithm: types.SignatureEd25519, Key: pk2[:], }, }, SignaturesRequired: 2, } channelAddress := uc.UnlockHash() // Funding entity creates but does not sign a transaction that funds the // channel address. Because the wallet is not very flexible, the channel // txn needs to be fully custom. To get a custom txn, manually create an // address and then use the wallet to fund that address. channelSize := types.NewCurrency64(10e3) channelFundingSK, channelFundingPK, err := crypto.StdKeyGen.Generate() if err != nil { return err } channelFundingUC := types.UnlockConditions{ PublicKeys: []types.SiaPublicKey{{ Algorithm: types.SignatureEd25519, Key: channelFundingPK[:], }}, SignaturesRequired: 1, } channelFundingAddr := channelFundingUC.UnlockHash() fundTxnBuilder := cst.wallet.StartTransaction() if err != nil { return err } err = fundTxnBuilder.FundSiacoins(channelSize) if err != nil { return err } scoFundIndex := fundTxnBuilder.AddSiacoinOutput(types.SiacoinOutput{Value: channelSize, UnlockHash: channelFundingAddr}) fundTxnSet, err := fundTxnBuilder.Sign(true) if err != nil { return err } fundOutputID := fundTxnSet[len(fundTxnSet)-1].SiacoinOutputID(int(scoFundIndex)) channelTxn := types.Transaction{ SiacoinInputs: []types.SiacoinInput{{ ParentID: fundOutputID, UnlockConditions: channelFundingUC, }}, SiacoinOutputs: []types.SiacoinOutput{{ Value: channelSize, UnlockHash: channelAddress, }}, TransactionSignatures: []types.TransactionSignature{{ ParentID: crypto.Hash(fundOutputID), PublicKeyIndex: 0, CoveredFields: types.CoveredFields{WholeTransaction: true}, }}, } // Funding entity creates and signs a transaction that spends the full // channel output. channelOutputID := channelTxn.SiacoinOutputID(0) refundUC, err := cst.wallet.NextAddress() refundAddr := refundUC.UnlockHash() if err != nil { return err } refundTxn := types.Transaction{ SiacoinInputs: []types.SiacoinInput{{ ParentID: channelOutputID, UnlockConditions: uc, }}, SiacoinOutputs: []types.SiacoinOutput{{ Value: channelSize, UnlockHash: refundAddr, }}, TransactionSignatures: []types.TransactionSignature{{ ParentID: crypto.Hash(channelOutputID), PublicKeyIndex: 0, CoveredFields: types.CoveredFields{WholeTransaction: true}, }}, } sigHash := refundTxn.SigHash(0) cryptoSig1, err := crypto.SignHash(sigHash, sk1) if err != nil { return err } refundTxn.TransactionSignatures[0].Signature = cryptoSig1[:] // Receiving entity signs the transaction that spends the full channel // output, but with a timelock. refundTxn.TransactionSignatures = append(refundTxn.TransactionSignatures, types.TransactionSignature{ ParentID: crypto.Hash(channelOutputID), PublicKeyIndex: 1, Timelock: cst.cs.dbBlockHeight() + 2, CoveredFields: types.CoveredFields{WholeTransaction: true}, }) sigHash = refundTxn.SigHash(1) cryptoSig2, err := crypto.SignHash(sigHash, sk2) if err != nil { return err } refundTxn.TransactionSignatures[1].Signature = cryptoSig2[:] // Funding entity will now sign and broadcast the funding transaction. sigHash = channelTxn.SigHash(0) cryptoSig0, err := crypto.SignHash(sigHash, channelFundingSK) if err != nil { return err } channelTxn.TransactionSignatures[0].Signature = cryptoSig0[:] err = cst.tpool.AcceptTransactionSet(append(fundTxnSet, channelTxn)) if err != nil { return err } // Put the txn in a block. _, err = cst.miner.AddBlock() if err != nil { return err } // Try to submit the refund transaction before the timelock has expired. err = cst.tpool.AcceptTransactionSet([]types.Transaction{refundTxn}) if err != types.ErrPrematureSignature { return err } // Create a transaction that has partially used the channel, and submit it // to the blockchain to close the channel. closeTxn := types.Transaction{ SiacoinInputs: []types.SiacoinInput{{ ParentID: channelOutputID, UnlockConditions: uc, }}, SiacoinOutputs: []types.SiacoinOutput{ { Value: channelSize.Sub(types.NewCurrency64(5)), UnlockHash: refundAddr, }, { Value: types.NewCurrency64(5), }, }, TransactionSignatures: []types.TransactionSignature{ { ParentID: crypto.Hash(channelOutputID), PublicKeyIndex: 0, CoveredFields: types.CoveredFields{WholeTransaction: true}, }, { ParentID: crypto.Hash(channelOutputID), PublicKeyIndex: 1, CoveredFields: types.CoveredFields{WholeTransaction: true}, }, }, } sigHash = closeTxn.SigHash(0) cryptoSig3, err := crypto.SignHash(sigHash, sk1) if err != nil { return err } closeTxn.TransactionSignatures[0].Signature = cryptoSig3[:] sigHash = closeTxn.SigHash(1) cryptoSig4, err := crypto.SignHash(sigHash, sk2) if err != nil { return err } closeTxn.TransactionSignatures[1].Signature = cryptoSig4[:] err = cst.tpool.AcceptTransactionSet([]types.Transaction{closeTxn}) if err != nil { return err } // Mine the block with the transaction. _, err = cst.miner.AddBlock() if err != nil { return err } closeRefundID := closeTxn.SiacoinOutputID(0) closePaymentID := closeTxn.SiacoinOutputID(1) exists := cst.cs.db.inSiacoinOutputs(closeRefundID) if !exists { return errors.New("close txn refund output doesn't exist") } exists = cst.cs.db.inSiacoinOutputs(closePaymentID) if !exists { return errors.New("close txn payment output doesn't exist") } // Create a payment channel where the receiving entity never responds to // the initial transaction. { // Funding entity creates but does not sign a transaction that funds the // channel address. Because the wallet is not very flexible, the channel // txn needs to be fully custom. To get a custom txn, manually create an // address and then use the wallet to fund that address. channelSize := types.NewCurrency64(10e3) channelFundingSK, channelFundingPK, err := crypto.StdKeyGen.Generate() if err != nil { return err } channelFundingUC := types.UnlockConditions{ PublicKeys: []types.SiaPublicKey{{ Algorithm: types.SignatureEd25519, Key: channelFundingPK[:], }}, SignaturesRequired: 1, } channelFundingAddr := channelFundingUC.UnlockHash() fundTxnBuilder := cst.wallet.StartTransaction() err = fundTxnBuilder.FundSiacoins(channelSize) if err != nil { return err } scoFundIndex := fundTxnBuilder.AddSiacoinOutput(types.SiacoinOutput{Value: channelSize, UnlockHash: channelFundingAddr}) fundTxnSet, err := fundTxnBuilder.Sign(true) if err != nil { return err } fundOutputID := fundTxnSet[len(fundTxnSet)-1].SiacoinOutputID(int(scoFundIndex)) channelTxn := types.Transaction{ SiacoinInputs: []types.SiacoinInput{{ ParentID: fundOutputID, UnlockConditions: channelFundingUC, }}, SiacoinOutputs: []types.SiacoinOutput{{ Value: channelSize, UnlockHash: channelAddress, }}, TransactionSignatures: []types.TransactionSignature{{ ParentID: crypto.Hash(fundOutputID), PublicKeyIndex: 0, CoveredFields: types.CoveredFields{WholeTransaction: true}, }}, } // Funding entity creates and signs a transaction that spends the full // channel output. channelOutputID := channelTxn.SiacoinOutputID(0) refundUC, err := cst.wallet.NextAddress() refundAddr := refundUC.UnlockHash() if err != nil { return err } refundTxn := types.Transaction{ SiacoinInputs: []types.SiacoinInput{{ ParentID: channelOutputID, UnlockConditions: uc, }}, SiacoinOutputs: []types.SiacoinOutput{{ Value: channelSize, UnlockHash: refundAddr, }}, TransactionSignatures: []types.TransactionSignature{{ ParentID: crypto.Hash(channelOutputID), PublicKeyIndex: 0, CoveredFields: types.CoveredFields{WholeTransaction: true}, }}, } sigHash := refundTxn.SigHash(0) cryptoSig1, err := crypto.SignHash(sigHash, sk1) if err != nil { return err } refundTxn.TransactionSignatures[0].Signature = cryptoSig1[:] // Receiving entity never communitcates, funding entity must reclaim // the 'channelSize' coins that were intended to go to the channel. reclaimUC, err := cst.wallet.NextAddress() reclaimAddr := reclaimUC.UnlockHash() if err != nil { return err } reclaimTxn := types.Transaction{ SiacoinInputs: []types.SiacoinInput{{ ParentID: fundOutputID, UnlockConditions: channelFundingUC, }}, SiacoinOutputs: []types.SiacoinOutput{{ Value: channelSize, UnlockHash: reclaimAddr, }}, TransactionSignatures: []types.TransactionSignature{{ ParentID: crypto.Hash(fundOutputID), PublicKeyIndex: 0, CoveredFields: types.CoveredFields{WholeTransaction: true}, }}, } sigHash = reclaimTxn.SigHash(0) cryptoSig, err := crypto.SignHash(sigHash, channelFundingSK) if err != nil { return err } reclaimTxn.TransactionSignatures[0].Signature = cryptoSig[:] err = cst.tpool.AcceptTransactionSet(append(fundTxnSet, reclaimTxn)) if err != nil { return err } block, _ := cst.miner.FindBlock() err = cst.cs.AcceptBlock(block) if err != nil { return err } reclaimOutputID := reclaimTxn.SiacoinOutputID(0) exists := cst.cs.db.inSiacoinOutputs(reclaimOutputID) if !exists { return errors.New("failed to reclaim an output that belongs to the funding entity") } } // Create a channel and the open the channel, but close the channel using // the timelocked signature. { // Funding entity creates but does not sign a transaction that funds the // channel address. Because the wallet is not very flexible, the channel // txn needs to be fully custom. To get a custom txn, manually create an // address and then use the wallet to fund that address. channelSize := types.NewCurrency64(10e3) channelFundingSK, channelFundingPK, err := crypto.StdKeyGen.Generate() if err != nil { return err } channelFundingUC := types.UnlockConditions{ PublicKeys: []types.SiaPublicKey{{ Algorithm: types.SignatureEd25519, Key: channelFundingPK[:], }}, SignaturesRequired: 1, } channelFundingAddr := channelFundingUC.UnlockHash() fundTxnBuilder := cst.wallet.StartTransaction() err = fundTxnBuilder.FundSiacoins(channelSize) if err != nil { return err } scoFundIndex := fundTxnBuilder.AddSiacoinOutput(types.SiacoinOutput{Value: channelSize, UnlockHash: channelFundingAddr}) fundTxnSet, err := fundTxnBuilder.Sign(true) if err != nil { return err } fundOutputID := fundTxnSet[len(fundTxnSet)-1].SiacoinOutputID(int(scoFundIndex)) channelTxn := types.Transaction{ SiacoinInputs: []types.SiacoinInput{{ ParentID: fundOutputID, UnlockConditions: channelFundingUC, }}, SiacoinOutputs: []types.SiacoinOutput{{ Value: channelSize, UnlockHash: channelAddress, }}, TransactionSignatures: []types.TransactionSignature{{ ParentID: crypto.Hash(fundOutputID), PublicKeyIndex: 0, CoveredFields: types.CoveredFields{WholeTransaction: true}, }}, } // Funding entity creates and signs a transaction that spends the full // channel output. channelOutputID := channelTxn.SiacoinOutputID(0) refundUC, err := cst.wallet.NextAddress() refundAddr := refundUC.UnlockHash() if err != nil { return err } refundTxn := types.Transaction{ SiacoinInputs: []types.SiacoinInput{{ ParentID: channelOutputID, UnlockConditions: uc, }}, SiacoinOutputs: []types.SiacoinOutput{{ Value: channelSize, UnlockHash: refundAddr, }}, TransactionSignatures: []types.TransactionSignature{{ ParentID: crypto.Hash(channelOutputID), PublicKeyIndex: 0, CoveredFields: types.CoveredFields{WholeTransaction: true}, }}, } sigHash := refundTxn.SigHash(0) cryptoSig1, err := crypto.SignHash(sigHash, sk1) if err != nil { return err } refundTxn.TransactionSignatures[0].Signature = cryptoSig1[:] // Receiving entity signs the transaction that spends the full channel // output, but with a timelock. refundTxn.TransactionSignatures = append(refundTxn.TransactionSignatures, types.TransactionSignature{ ParentID: crypto.Hash(channelOutputID), PublicKeyIndex: 1, Timelock: cst.cs.dbBlockHeight() + 2, CoveredFields: types.CoveredFields{WholeTransaction: true}, }) sigHash = refundTxn.SigHash(1) cryptoSig2, err := crypto.SignHash(sigHash, sk2) if err != nil { return err } refundTxn.TransactionSignatures[1].Signature = cryptoSig2[:] // Funding entity will now sign and broadcast the funding transaction. sigHash = channelTxn.SigHash(0) cryptoSig0, err := crypto.SignHash(sigHash, channelFundingSK) if err != nil { return err } channelTxn.TransactionSignatures[0].Signature = cryptoSig0[:] err = cst.tpool.AcceptTransactionSet(append(fundTxnSet, channelTxn)) if err != nil { return err } // Put the txn in a block. block, _ := cst.miner.FindBlock() err = cst.cs.AcceptBlock(block) if err != nil { return err } // Receiving entity never signs another transaction, so the funding // entity waits until the timelock is complete, and then submits the // refundTxn. for i := 0; i < 3; i++ { block, _ := cst.miner.FindBlock() err = cst.cs.AcceptBlock(block) if err != nil { return err } } err = cst.tpool.AcceptTransactionSet([]types.Transaction{refundTxn}) if err != nil { return err } block, _ = cst.miner.FindBlock() err = cst.cs.AcceptBlock(block) if err != nil { return err } refundOutputID := refundTxn.SiacoinOutputID(0) exists := cst.cs.db.inSiacoinOutputs(refundOutputID) if !exists { return errors.New("timelocked refund transaction did not get spent correctly") } } return nil } */ /* // TestPaymentChannelBlocks creates a consensus set tester and uses it to call // testPaymentChannelBlocks. func TestPaymentChannelBlocks(t *testing.T) { if testing.Short() { t.SkipNow() } cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.closeCst() err = cst.testPaymentChannelBlocks() if err != nil { t.Fatal(err) } } */ Sia-1.3.0/modules/consensus/applytransaction.go000066400000000000000000000167241313565667000216670ustar00rootroot00000000000000package consensus // applytransaction.go handles applying a transaction to the consensus set. // There is an assumption that the transaction has already been verified. import ( "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/bolt" ) // applySiacoinInputs takes all of the siacoin inputs in a transaction and // applies them to the state, updating the diffs in the processed block. func applySiacoinInputs(tx *bolt.Tx, pb *processedBlock, t types.Transaction) { // Remove all siacoin inputs from the unspent siacoin outputs list. for _, sci := range t.SiacoinInputs { sco, err := getSiacoinOutput(tx, sci.ParentID) if build.DEBUG && err != nil { panic(err) } scod := modules.SiacoinOutputDiff{ Direction: modules.DiffRevert, ID: sci.ParentID, SiacoinOutput: sco, } pb.SiacoinOutputDiffs = append(pb.SiacoinOutputDiffs, scod) commitSiacoinOutputDiff(tx, scod, modules.DiffApply) } } // applySiacoinOutputs takes all of the siacoin outputs in a transaction and // applies them to the state, updating the diffs in the processed block. func applySiacoinOutputs(tx *bolt.Tx, pb *processedBlock, t types.Transaction) { // Add all siacoin outputs to the unspent siacoin outputs list. for i, sco := range t.SiacoinOutputs { scoid := t.SiacoinOutputID(uint64(i)) scod := modules.SiacoinOutputDiff{ Direction: modules.DiffApply, ID: scoid, SiacoinOutput: sco, } pb.SiacoinOutputDiffs = append(pb.SiacoinOutputDiffs, scod) commitSiacoinOutputDiff(tx, scod, modules.DiffApply) } } // applyFileContracts iterates through all of the file contracts in a // transaction and applies them to the state, updating the diffs in the proccesed // block. func applyFileContracts(tx *bolt.Tx, pb *processedBlock, t types.Transaction) { for i, fc := range t.FileContracts { fcid := t.FileContractID(uint64(i)) fcd := modules.FileContractDiff{ Direction: modules.DiffApply, ID: fcid, FileContract: fc, } pb.FileContractDiffs = append(pb.FileContractDiffs, fcd) commitFileContractDiff(tx, fcd, modules.DiffApply) // Get the portion of the contract that goes into the siafund pool and // add it to the siafund pool. sfp := getSiafundPool(tx) sfpd := modules.SiafundPoolDiff{ Direction: modules.DiffApply, Previous: sfp, Adjusted: sfp.Add(types.Tax(blockHeight(tx), fc.Payout)), } pb.SiafundPoolDiffs = append(pb.SiafundPoolDiffs, sfpd) commitSiafundPoolDiff(tx, sfpd, modules.DiffApply) } } // applyTxFileContractRevisions iterates through all of the file contract // revisions in a transaction and applies them to the state, updating the diffs // in the processed block. func applyFileContractRevisions(tx *bolt.Tx, pb *processedBlock, t types.Transaction) { for _, fcr := range t.FileContractRevisions { fc, err := getFileContract(tx, fcr.ParentID) if build.DEBUG && err != nil { panic(err) } // Add the diff to delete the old file contract. fcd := modules.FileContractDiff{ Direction: modules.DiffRevert, ID: fcr.ParentID, FileContract: fc, } pb.FileContractDiffs = append(pb.FileContractDiffs, fcd) commitFileContractDiff(tx, fcd, modules.DiffApply) // Add the diff to add the revised file contract. newFC := types.FileContract{ FileSize: fcr.NewFileSize, FileMerkleRoot: fcr.NewFileMerkleRoot, WindowStart: fcr.NewWindowStart, WindowEnd: fcr.NewWindowEnd, Payout: fc.Payout, ValidProofOutputs: fcr.NewValidProofOutputs, MissedProofOutputs: fcr.NewMissedProofOutputs, UnlockHash: fcr.NewUnlockHash, RevisionNumber: fcr.NewRevisionNumber, } fcd = modules.FileContractDiff{ Direction: modules.DiffApply, ID: fcr.ParentID, FileContract: newFC, } pb.FileContractDiffs = append(pb.FileContractDiffs, fcd) commitFileContractDiff(tx, fcd, modules.DiffApply) } } // applyTxStorageProofs iterates through all of the storage proofs in a // transaction and applies them to the state, updating the diffs in the processed // block. func applyStorageProofs(tx *bolt.Tx, pb *processedBlock, t types.Transaction) { for _, sp := range t.StorageProofs { fc, err := getFileContract(tx, sp.ParentID) if build.DEBUG && err != nil { panic(err) } // Add all of the outputs in the ValidProofOutputs of the contract. for i, vpo := range fc.ValidProofOutputs { spoid := sp.ParentID.StorageProofOutputID(types.ProofValid, uint64(i)) dscod := modules.DelayedSiacoinOutputDiff{ Direction: modules.DiffApply, ID: spoid, SiacoinOutput: vpo, MaturityHeight: pb.Height + types.MaturityDelay, } pb.DelayedSiacoinOutputDiffs = append(pb.DelayedSiacoinOutputDiffs, dscod) commitDelayedSiacoinOutputDiff(tx, dscod, modules.DiffApply) } fcd := modules.FileContractDiff{ Direction: modules.DiffRevert, ID: sp.ParentID, FileContract: fc, } pb.FileContractDiffs = append(pb.FileContractDiffs, fcd) commitFileContractDiff(tx, fcd, modules.DiffApply) } } // applyTxSiafundInputs takes all of the siafund inputs in a transaction and // applies them to the state, updating the diffs in the processed block. func applySiafundInputs(tx *bolt.Tx, pb *processedBlock, t types.Transaction) { for _, sfi := range t.SiafundInputs { // Calculate the volume of siacoins to put in the claim output. sfo, err := getSiafundOutput(tx, sfi.ParentID) if build.DEBUG && err != nil { panic(err) } claimPortion := getSiafundPool(tx).Sub(sfo.ClaimStart).Div(types.SiafundCount).Mul(sfo.Value) // Add the claim output to the delayed set of outputs. sco := types.SiacoinOutput{ Value: claimPortion, UnlockHash: sfi.ClaimUnlockHash, } sfoid := sfi.ParentID.SiaClaimOutputID() dscod := modules.DelayedSiacoinOutputDiff{ Direction: modules.DiffApply, ID: sfoid, SiacoinOutput: sco, MaturityHeight: pb.Height + types.MaturityDelay, } pb.DelayedSiacoinOutputDiffs = append(pb.DelayedSiacoinOutputDiffs, dscod) commitDelayedSiacoinOutputDiff(tx, dscod, modules.DiffApply) // Create the siafund output diff and remove the output from the // consensus set. sfod := modules.SiafundOutputDiff{ Direction: modules.DiffRevert, ID: sfi.ParentID, SiafundOutput: sfo, } pb.SiafundOutputDiffs = append(pb.SiafundOutputDiffs, sfod) commitSiafundOutputDiff(tx, sfod, modules.DiffApply) } } // applySiafundOutput applies a siafund output to the consensus set. func applySiafundOutputs(tx *bolt.Tx, pb *processedBlock, t types.Transaction) { for i, sfo := range t.SiafundOutputs { sfoid := t.SiafundOutputID(uint64(i)) sfo.ClaimStart = getSiafundPool(tx) sfod := modules.SiafundOutputDiff{ Direction: modules.DiffApply, ID: sfoid, SiafundOutput: sfo, } pb.SiafundOutputDiffs = append(pb.SiafundOutputDiffs, sfod) commitSiafundOutputDiff(tx, sfod, modules.DiffApply) } } // applyTransaction applies the contents of a transaction to the ConsensusSet. // This produces a set of diffs, which are stored in the blockNode containing // the transaction. No verification is done by this function. func applyTransaction(tx *bolt.Tx, pb *processedBlock, t types.Transaction) { applySiacoinInputs(tx, pb, t) applySiacoinOutputs(tx, pb, t) applyFileContracts(tx, pb, t) applyFileContractRevisions(tx, pb, t) applyStorageProofs(tx, pb, t) applySiafundInputs(tx, pb, t) applySiafundOutputs(tx, pb, t) } Sia-1.3.0/modules/consensus/applytransaction_test.go000066400000000000000000000562331313565667000227250ustar00rootroot00000000000000package consensus /* import ( "testing" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) // TestApplySiacoinInputs probes the applySiacoinInputs method of the consensus // set. func TestApplySiacoinInputs(t *testing.T) { if testing.Short() { t.SkipNow() } // Create a consensus set and get it to 3 siacoin outputs. The consensus // set starts with 2 siacoin outputs, mining a block will add another. cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.closeCst() b, _ := cst.miner.FindBlock() err = cst.cs.AcceptBlock(b) if err != nil { t.Fatal(err) } // Create a block node to use with application. pb := new(processedBlock) // Fetch the output id's of each siacoin output in the consensus set. var ids []types.SiacoinOutputID cst.cs.db.forEachSiacoinOutputs(func(id types.SiacoinOutputID, sco types.SiacoinOutput) { ids = append(ids, id) }) // Apply a transaction with a single siacoin input. txn := types.Transaction{ SiacoinInputs: []types.SiacoinInput{ {ParentID: ids[0]}, }, } cst.cs.applySiacoinInputs(pb, txn) exists := cst.cs.db.inSiacoinOutputs(ids[0]) if exists { t.Error("Failed to conusme a siacoin output") } if cst.cs.db.lenSiacoinOutputs() != 2 { t.Error("siacoin outputs not correctly updated") } if len(pb.SiacoinOutputDiffs) != 1 { t.Error("block node was not updated for single transaction") } if pb.SiacoinOutputDiffs[0].Direction != modules.DiffRevert { t.Error("wrong diff direction applied when consuming a siacoin output") } if pb.SiacoinOutputDiffs[0].ID != ids[0] { t.Error("wrong id used when consuming a siacoin output") } // Apply a transaction with two siacoin inputs. txn = types.Transaction{ SiacoinInputs: []types.SiacoinInput{ {ParentID: ids[1]}, {ParentID: ids[2]}, }, } cst.cs.applySiacoinInputs(pb, txn) if cst.cs.db.lenSiacoinOutputs() != 0 { t.Error("failed to consume all siacoin outputs in the consensus set") } if len(pb.SiacoinOutputDiffs) != 3 { t.Error("processed block was not updated for single transaction") } } // TestMisuseApplySiacoinInputs misuses applySiacoinInput and checks that a // panic was triggered. func TestMisuseApplySiacoinInputs(t *testing.T) { if testing.Short() { t.SkipNow() } cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.closeCst() // Create a block node to use with application. pb := new(processedBlock) // Fetch the output id's of each siacoin output in the consensus set. var ids []types.SiacoinOutputID cst.cs.db.forEachSiacoinOutputs(func(id types.SiacoinOutputID, sco types.SiacoinOutput) { ids = append(ids, id) }) // Apply a transaction with a single siacoin input. txn := types.Transaction{ SiacoinInputs: []types.SiacoinInput{ {ParentID: ids[0]}, }, } cst.cs.applySiacoinInputs(pb, txn) // Trigger the panic that occurs when an output is applied incorrectly, and // perform a catch to read the error that is created. defer func() { r := recover() if r == nil { t.Error("expecting error after corrupting database") } }() cst.cs.applySiacoinInputs(pb, txn) } // TestApplySiacoinOutputs probes the applySiacoinOutput method of the // consensus set. func TestApplySiacoinOutputs(t *testing.T) { if testing.Short() { t.SkipNow() } cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.closeCst() // Create a block node to use with application. pb := new(processedBlock) // Apply a transaction with a single siacoin output. txn := types.Transaction{ SiacoinOutputs: []types.SiacoinOutput{{}}, } cst.cs.applySiacoinOutputs(pb, txn) scoid := txn.SiacoinOutputID(0) exists := cst.cs.db.inSiacoinOutputs(scoid) if !exists { t.Error("Failed to create siacoin output") } if cst.cs.db.lenSiacoinOutputs() != 3 { // 3 because createConsensusSetTester has 2 initially. t.Error("siacoin outputs not correctly updated") } if len(pb.SiacoinOutputDiffs) != 1 { t.Error("block node was not updated for single element transaction") } if pb.SiacoinOutputDiffs[0].Direction != modules.DiffApply { t.Error("wrong diff direction applied when creating a siacoin output") } if pb.SiacoinOutputDiffs[0].ID != scoid { t.Error("wrong id used when creating a siacoin output") } // Apply a transaction with 2 siacoin outputs. txn = types.Transaction{ SiacoinOutputs: []types.SiacoinOutput{ {Value: types.NewCurrency64(1)}, {Value: types.NewCurrency64(2)}, }, } cst.cs.applySiacoinOutputs(pb, txn) scoid0 := txn.SiacoinOutputID(0) scoid1 := txn.SiacoinOutputID(1) exists = cst.cs.db.inSiacoinOutputs(scoid0) if !exists { t.Error("Failed to create siacoin output") } exists = cst.cs.db.inSiacoinOutputs(scoid1) if !exists { t.Error("Failed to create siacoin output") } if cst.cs.db.lenSiacoinOutputs() != 5 { // 5 because createConsensusSetTester has 2 initially. t.Error("siacoin outputs not correctly updated") } if len(pb.SiacoinOutputDiffs) != 3 { t.Error("block node was not updated correctly") } } // TestMisuseApplySiacoinOutputs misuses applySiacoinOutputs and checks that a // panic was triggered. func TestMisuseApplySiacoinOutputs(t *testing.T) { if testing.Short() { t.SkipNow() } cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.closeCst() // Create a block node to use with application. pb := new(processedBlock) // Apply a transaction with a single siacoin output. txn := types.Transaction{ SiacoinOutputs: []types.SiacoinOutput{{}}, } cst.cs.applySiacoinOutputs(pb, txn) // Trigger the panic that occurs when an output is applied incorrectly, and // perform a catch to read the error that is created. defer func() { r := recover() if r == nil { t.Error("no panic occurred when misusing applySiacoinInput") } }() cst.cs.applySiacoinOutputs(pb, txn) } // TestApplyFileContracts probes the applyFileContracts method of the // consensus set. func TestApplyFileContracts(t *testing.T) { if testing.Short() { t.SkipNow() } cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.closeCst() // Create a block node to use with application. pb := new(processedBlock) // Apply a transaction with a single file contract. txn := types.Transaction{ FileContracts: []types.FileContract{{}}, } cst.cs.applyFileContracts(pb, txn) fcid := txn.FileContractID(0) exists := cst.cs.db.inFileContracts(fcid) if !exists { t.Error("Failed to create file contract") } if cst.cs.db.lenFileContracts() != 1 { t.Error("file contracts not correctly updated") } if len(pb.FileContractDiffs) != 1 { t.Error("block node was not updated for single element transaction") } if pb.FileContractDiffs[0].Direction != modules.DiffApply { t.Error("wrong diff direction applied when creating a file contract") } if pb.FileContractDiffs[0].ID != fcid { t.Error("wrong id used when creating a file contract") } // Apply a transaction with 2 file contracts. txn = types.Transaction{ FileContracts: []types.FileContract{ {Payout: types.NewCurrency64(1)}, {Payout: types.NewCurrency64(300e3)}, }, } cst.cs.applyFileContracts(pb, txn) fcid0 := txn.FileContractID(0) fcid1 := txn.FileContractID(1) exists = cst.cs.db.inFileContracts(fcid0) if !exists { t.Error("Failed to create file contract") } exists = cst.cs.db.inFileContracts(fcid1) if !exists { t.Error("Failed to create file contract") } if cst.cs.db.lenFileContracts() != 3 { t.Error("file contracts not correctly updated") } if len(pb.FileContractDiffs) != 3 { t.Error("block node was not updated correctly") } if cst.cs.siafundPool.Cmp64(10e3) != 0 { t.Error("siafund pool did not update correctly upon creation of a file contract") } } // TestMisuseApplyFileContracts misuses applyFileContracts and checks that a // panic was triggered. func TestMisuseApplyFileContracts(t *testing.T) { if testing.Short() { t.SkipNow() } cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.closeCst() // Create a block node to use with application. pb := new(processedBlock) // Apply a transaction with a single file contract. txn := types.Transaction{ FileContracts: []types.FileContract{{}}, } cst.cs.applyFileContracts(pb, txn) // Trigger the panic that occurs when an output is applied incorrectly, and // perform a catch to read the error that is created. defer func() { r := recover() if r == nil { t.Error("no panic occurred when misusing applySiacoinInput") } }() cst.cs.applyFileContracts(pb, txn) } // TestApplyFileContractRevisions probes the applyFileContractRevisions method // of the consensus set. func TestApplyFileContractRevisions(t *testing.T) { if testing.Short() { t.SkipNow() } cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.closeCst() // Create a block node to use with application. pb := new(processedBlock) // Apply a transaction with two file contracts - that way there is // something to revise. txn := types.Transaction{ FileContracts: []types.FileContract{ {}, {Payout: types.NewCurrency64(1)}, }, } cst.cs.applyFileContracts(pb, txn) fcid0 := txn.FileContractID(0) fcid1 := txn.FileContractID(1) // Apply a single file contract revision. txn = types.Transaction{ FileContractRevisions: []types.FileContractRevision{ { ParentID: fcid0, NewFileSize: 1, }, }, } cst.cs.applyFileContractRevisions(pb, txn) exists := cst.cs.db.inFileContracts(fcid0) if !exists { t.Error("Revision killed a file contract") } fc := cst.cs.db.getFileContracts(fcid0) if fc.FileSize != 1 { t.Error("file contract filesize not properly updated") } if cst.cs.db.lenFileContracts() != 2 { t.Error("file contracts not correctly updated") } if len(pb.FileContractDiffs) != 4 { // 2 creating the initial contracts, 1 to remove the old, 1 to add the revision. t.Error("block node was not updated for single element transaction") } if pb.FileContractDiffs[2].Direction != modules.DiffRevert { t.Error("wrong diff direction applied when revising a file contract") } if pb.FileContractDiffs[3].Direction != modules.DiffApply { t.Error("wrong diff direction applied when revising a file contract") } if pb.FileContractDiffs[2].ID != fcid0 { t.Error("wrong id used when revising a file contract") } if pb.FileContractDiffs[3].ID != fcid0 { t.Error("wrong id used when revising a file contract") } // Apply a transaction with 2 file contract revisions. txn = types.Transaction{ FileContractRevisions: []types.FileContractRevision{ { ParentID: fcid0, NewFileSize: 2, }, { ParentID: fcid1, NewFileSize: 3, }, }, } cst.cs.applyFileContractRevisions(pb, txn) exists = cst.cs.db.inFileContracts(fcid0) if !exists { t.Error("Revision ate file contract") } fc0 := cst.cs.db.getFileContracts(fcid0) exists = cst.cs.db.inFileContracts(fcid1) if !exists { t.Error("Revision ate file contract") } fc1 := cst.cs.db.getFileContracts(fcid1) if fc0.FileSize != 2 { t.Error("Revision not correctly applied") } if fc1.FileSize != 3 { t.Error("Revision not correctly applied") } if cst.cs.db.lenFileContracts() != 2 { t.Error("file contracts not correctly updated") } if len(pb.FileContractDiffs) != 8 { t.Error("block node was not updated correctly") } } // TestMisuseApplyFileContractRevisions misuses applyFileContractRevisions and // checks that a panic was triggered. func TestMisuseApplyFileContractRevisions(t *testing.T) { if testing.Short() { t.SkipNow() } cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.closeCst() // Create a block node to use with application. pb := new(processedBlock) // Trigger a panic from revising a nonexistent file contract. defer func() { r := recover() if r != errNilItem { t.Error("no panic occurred when misusing applySiacoinInput") } }() txn := types.Transaction{ FileContractRevisions: []types.FileContractRevision{{}}, } cst.cs.applyFileContractRevisions(pb, txn) } // TestApplyStorageProofs probes the applyStorageProofs method of the consensus // set. func TestApplyStorageProofs(t *testing.T) { if testing.Short() { t.SkipNow() } cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.closeCst() // Create a block node to use with application. pb := new(processedBlock) pb.Height = cst.cs.height() // Apply a transaction with two file contracts - there is a reason to // create a storage proof. txn := types.Transaction{ FileContracts: []types.FileContract{ { Payout: types.NewCurrency64(300e3), ValidProofOutputs: []types.SiacoinOutput{ {Value: types.NewCurrency64(290e3)}, }, }, {}, { Payout: types.NewCurrency64(600e3), ValidProofOutputs: []types.SiacoinOutput{ {Value: types.NewCurrency64(280e3)}, {Value: types.NewCurrency64(300e3)}, }, }, }, } cst.cs.applyFileContracts(pb, txn) fcid0 := txn.FileContractID(0) fcid1 := txn.FileContractID(1) fcid2 := txn.FileContractID(2) // Apply a single storage proof. txn = types.Transaction{ StorageProofs: []types.StorageProof{{ParentID: fcid0}}, } cst.cs.applyStorageProofs(pb, txn) exists := cst.cs.db.inFileContracts(fcid0) if exists { t.Error("Storage proof did not disable a file contract.") } if cst.cs.db.lenFileContracts() != 2 { t.Error("file contracts not correctly updated") } if len(pb.FileContractDiffs) != 4 { // 3 creating the initial contracts, 1 for the storage proof. t.Error("block node was not updated for single element transaction") } if pb.FileContractDiffs[3].Direction != modules.DiffRevert { t.Error("wrong diff direction applied when revising a file contract") } if pb.FileContractDiffs[3].ID != fcid0 { t.Error("wrong id used when revising a file contract") } spoid0 := fcid0.StorageProofOutputID(types.ProofValid, 0) exists = cst.cs.db.inDelayedSiacoinOutputsHeight(pb.Height+types.MaturityDelay, spoid0) if !exists { t.Error("storage proof output not created after applying a storage proof") } sco := cst.cs.db.getDelayedSiacoinOutputs(pb.Height+types.MaturityDelay, spoid0) if sco.Value.Cmp64(290e3) != 0 { t.Error("storage proof output was created with the wrong value") } // Apply a transaction with 2 storage proofs. txn = types.Transaction{ StorageProofs: []types.StorageProof{ {ParentID: fcid1}, {ParentID: fcid2}, }, } cst.cs.applyStorageProofs(pb, txn) exists = cst.cs.db.inFileContracts(fcid1) if exists { t.Error("Storage proof failed to consume file contract.") } exists = cst.cs.db.inFileContracts(fcid2) if exists { t.Error("storage proof did not consume file contract") } if cst.cs.db.lenFileContracts() != 0 { t.Error("file contracts not correctly updated") } if len(pb.FileContractDiffs) != 6 { t.Error("block node was not updated correctly") } spoid1 := fcid1.StorageProofOutputID(types.ProofValid, 0) exists = cst.cs.db.inSiacoinOutputs(spoid1) if exists { t.Error("output created when file contract had no corresponding output") } spoid2 := fcid2.StorageProofOutputID(types.ProofValid, 0) exists = cst.cs.db.inDelayedSiacoinOutputsHeight(pb.Height+types.MaturityDelay, spoid2) if !exists { t.Error("no output created by first output of file contract") } sco = cst.cs.db.getDelayedSiacoinOutputs(pb.Height+types.MaturityDelay, spoid2) if sco.Value.Cmp64(280e3) != 0 { t.Error("first siacoin output created has wrong value") } spoid3 := fcid2.StorageProofOutputID(types.ProofValid, 1) exists = cst.cs.db.inDelayedSiacoinOutputsHeight(pb.Height+types.MaturityDelay, spoid3) if !exists { t.Error("second output not created for storage proof") } sco = cst.cs.db.getDelayedSiacoinOutputs(pb.Height+types.MaturityDelay, spoid3) if sco.Value.Cmp64(300e3) != 0 { t.Error("second siacoin output has wrong value") } if cst.cs.siafundPool.Cmp64(30e3) != 0 { t.Error("siafund pool not being added up correctly") } } // TestNonexistentStorageProof applies a storage proof which points to a // nonextentent parent. func TestNonexistentStorageProof(t *testing.T) { if testing.Short() { t.SkipNow() } cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.closeCst() // Create a block node to use with application. pb := new(processedBlock) // Trigger a panic by applying a storage proof for a nonexistent file // contract. defer func() { r := recover() if r != errNilItem { t.Error("no panic occurred when misusing applySiacoinInput") } }() txn := types.Transaction{ StorageProofs: []types.StorageProof{{}}, } cst.cs.applyStorageProofs(pb, txn) } // TestDuplicateStorageProof applies a storage proof which has already been // applied. func TestDuplicateStorageProof(t *testing.T) { if testing.Short() { t.SkipNow() } cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.closeCst() // Create a block node. pb := new(processedBlock) pb.Height = cst.cs.height() // Create a file contract for the storage proof to prove. txn0 := types.Transaction{ FileContracts: []types.FileContract{ { Payout: types.NewCurrency64(300e3), ValidProofOutputs: []types.SiacoinOutput{ {Value: types.NewCurrency64(290e3)}, }, }, }, } cst.cs.applyFileContracts(pb, txn0) fcid := txn0.FileContractID(0) // Apply a single storage proof. txn1 := types.Transaction{ StorageProofs: []types.StorageProof{{ParentID: fcid}}, } cst.cs.applyStorageProofs(pb, txn1) // Trigger a panic by applying the storage proof again. defer func() { r := recover() if r != ErrDuplicateValidProofOutput { t.Error("failed to trigger ErrDuplicateValidProofOutput:", r) } }() cst.cs.applyFileContracts(pb, txn0) // File contract was consumed by the first proof. cst.cs.applyStorageProofs(pb, txn1) } // TestApplySiafundInputs probes the applySiafundInputs method of the consensus // set. func TestApplySiafundInputs(t *testing.T) { if testing.Short() { t.SkipNow() } cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.closeCst() // Create a block node to use with application. pb := new(processedBlock) pb.Height = cst.cs.height() // Fetch the output id's of each siacoin output in the consensus set. var ids []types.SiafundOutputID cst.cs.db.forEachSiafundOutputs(func(sfoid types.SiafundOutputID, sfo types.SiafundOutput) { ids = append(ids, sfoid) }) // Apply a transaction with a single siafund input. txn := types.Transaction{ SiafundInputs: []types.SiafundInput{ {ParentID: ids[0]}, }, } cst.cs.applySiafundInputs(pb, txn) exists := cst.cs.db.inSiafundOutputs(ids[0]) if exists { t.Error("Failed to conusme a siafund output") } if cst.cs.db.lenSiafundOutputs() != 2 { t.Error("siafund outputs not correctly updated", cst.cs.db.lenSiafundOutputs()) } if len(pb.SiafundOutputDiffs) != 1 { t.Error("block node was not updated for single transaction") } if pb.SiafundOutputDiffs[0].Direction != modules.DiffRevert { t.Error("wrong diff direction applied when consuming a siafund output") } if pb.SiafundOutputDiffs[0].ID != ids[0] { t.Error("wrong id used when consuming a siafund output") } if cst.cs.db.lenDelayedSiacoinOutputsHeight(cst.cs.height()+types.MaturityDelay) != 2 { // 1 for a block subsidy, 1 for the siafund claim. t.Error("siafund claim was not created") } } // TestMisuseApplySiafundInputs misuses applySiafundInputs and checks that a // panic was triggered. func TestMisuseApplySiafundInputs(t *testing.T) { if testing.Short() { t.SkipNow() } cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.closeCst() // Create a block node to use with application. pb := new(processedBlock) pb.Height = cst.cs.height() // Fetch the output id's of each siacoin output in the consensus set. var ids []types.SiafundOutputID cst.cs.db.forEachSiafundOutputs(func(sfoid types.SiafundOutputID, sfo types.SiafundOutput) { ids = append(ids, sfoid) }) // Apply a transaction with a single siafund input. txn := types.Transaction{ SiafundInputs: []types.SiafundInput{ {ParentID: ids[0]}, }, } cst.cs.applySiafundInputs(pb, txn) // Trigger the panic that occurs when an output is applied incorrectly, and // perform a catch to read the error that is created. defer func() { r := recover() if r != ErrMisuseApplySiafundInput { t.Error("no panic occurred when misusing applySiacoinInput") t.Error(r) } }() cst.cs.applySiafundInputs(pb, txn) } // TestApplySiafundOutputs probes the applySiafundOutputs method of the // consensus set. func TestApplySiafundOutputs(t *testing.T) { if testing.Short() { t.SkipNow() } cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.closeCst() cst.cs.siafundPool = types.NewCurrency64(101) // Create a block node to use with application. pb := new(processedBlock) // Apply a transaction with a single siafund output. txn := types.Transaction{ SiafundOutputs: []types.SiafundOutput{{}}, } cst.cs.applySiafundOutputs(pb, txn) sfoid := txn.SiafundOutputID(0) exists := cst.cs.db.inSiafundOutputs(sfoid) if !exists { t.Error("Failed to create siafund output") } if cst.cs.db.lenSiafundOutputs() != 4 { t.Error("siafund outputs not correctly updated") } if len(pb.SiafundOutputDiffs) != 1 { t.Error("block node was not updated for single element transaction") } if pb.SiafundOutputDiffs[0].Direction != modules.DiffApply { t.Error("wrong diff direction applied when creating a siafund output") } if pb.SiafundOutputDiffs[0].ID != sfoid { t.Error("wrong id used when creating a siafund output") } if pb.SiafundOutputDiffs[0].SiafundOutput.ClaimStart.Cmp64(101) != 0 { t.Error("claim start set incorrectly when creating a siafund output") } // Apply a transaction with 2 siacoin outputs. txn = types.Transaction{ SiafundOutputs: []types.SiafundOutput{ {Value: types.NewCurrency64(1)}, {Value: types.NewCurrency64(2)}, }, } cst.cs.applySiafundOutputs(pb, txn) sfoid0 := txn.SiafundOutputID(0) sfoid1 := txn.SiafundOutputID(1) exists = cst.cs.db.inSiafundOutputs(sfoid0) if !exists { t.Error("Failed to create siafund output") } exists = cst.cs.db.inSiafundOutputs(sfoid1) if !exists { t.Error("Failed to create siafund output") } if cst.cs.db.lenSiafundOutputs() != 6 { t.Error("siafund outputs not correctly updated") } if len(pb.SiafundOutputDiffs) != 3 { t.Error("block node was not updated for single element transaction") } } // TestMisuseApplySiafundOutputs misuses applySiafundOutputs and checks that a // panic was triggered. func TestMisuseApplySiafundOutputs(t *testing.T) { if testing.Short() { t.SkipNow() } cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.closeCst() // Create a block node to use with application. pb := new(processedBlock) // Apply a transaction with a single siacoin output. txn := types.Transaction{ SiafundOutputs: []types.SiafundOutput{{}}, } cst.cs.applySiafundOutputs(pb, txn) // Trigger the panic that occurs when an output is applied incorrectly, and // perform a catch to read the error that is created. defer func() { r := recover() if r != ErrMisuseApplySiafundOutput { t.Error("no panic occurred when misusing applySiafundInput") } }() cst.cs.applySiafundOutputs(pb, txn) } */ Sia-1.3.0/modules/consensus/block_rules.go000066400000000000000000000035411313565667000205710ustar00rootroot00000000000000package consensus import ( "sort" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/types" ) // blockRuleHelper assists with block validity checks by calculating values // on blocks that are relevant to validity rules. type blockRuleHelper interface { minimumValidChildTimestamp(dbBucket, *processedBlock) types.Timestamp } // stdBlockRuleHelper is the standard implementation of blockRuleHelper. type stdBlockRuleHelper struct{} // minimumValidChildTimestamp returns the earliest timestamp that a child node // can have while still being valid. See section 'Block Timestamps' in // Consensus.md. // // To boost performance, minimumValidChildTimestamp is passed a bucket that it // can use from inside of a boltdb transaction. func (rh stdBlockRuleHelper) minimumValidChildTimestamp(blockMap dbBucket, pb *processedBlock) types.Timestamp { // Get the previous MedianTimestampWindow timestamps. windowTimes := make(types.TimestampSlice, types.MedianTimestampWindow) windowTimes[0] = pb.Block.Timestamp parent := pb.Block.ParentID for i := uint64(1); i < types.MedianTimestampWindow; i++ { // If the genesis block is 'parent', use the genesis block timestamp // for all remaining times. if parent == (types.BlockID{}) { windowTimes[i] = windowTimes[i-1] continue } // Get the next parent's bytes. Because the ordering is specific, the // parent does not need to be decoded entirely to get the desired // information. This provides a performance boost. The id of the next // parent lies at the first 32 bytes, and the timestamp of the block // lies at bytes 40-48. parentBytes := blockMap.Get(parent[:]) copy(parent[:], parentBytes[:32]) windowTimes[i] = types.Timestamp(encoding.DecUint64(parentBytes[40:48])) } sort.Sort(windowTimes) // Return the median of the sorted timestamps. return windowTimes[len(windowTimes)/2] } Sia-1.3.0/modules/consensus/block_validation.go000066400000000000000000000073411313565667000215730ustar00rootroot00000000000000package consensus import ( "bytes" "errors" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/persist" "github.com/NebulousLabs/Sia/types" ) var ( errBadMinerPayouts = errors.New("miner payout sum does not equal block subsidy") errEarlyTimestamp = errors.New("block timestamp is too early") errExtremeFutureTimestamp = errors.New("block timestamp too far in future, discarded") errFutureTimestamp = errors.New("block timestamp too far in future, but saved for later use") errLargeBlock = errors.New("block is too large to be accepted") ) // blockValidator validates a Block against a set of block validity rules. type blockValidator interface { // ValidateBlock validates a block against a minimum timestamp, a block // target, and a block height. ValidateBlock(types.Block, types.BlockID, types.Timestamp, types.Target, types.BlockHeight, *persist.Logger) error } // stdBlockValidator is the standard implementation of blockValidator. type stdBlockValidator struct { // clock is a Clock interface that indicates the current system time. clock types.Clock // marshaler encodes and decodes between objects and byte slices. marshaler marshaler } // NewBlockValidator creates a new stdBlockValidator with default settings. func NewBlockValidator() stdBlockValidator { return stdBlockValidator{ clock: types.StdClock{}, marshaler: stdMarshaler{}, } } // checkMinerPayouts compares a block's miner payouts to the block's subsidy and // returns true if they are equal. func checkMinerPayouts(b types.Block, height types.BlockHeight) bool { // Add up the payouts and check that all values are legal. var payoutSum types.Currency for _, payout := range b.MinerPayouts { if payout.Value.IsZero() { return false } payoutSum = payoutSum.Add(payout.Value) } return b.CalculateSubsidy(height).Equals(payoutSum) } // checkTarget returns true if the block's ID meets the given target. func checkTarget(b types.Block, id types.BlockID, target types.Target) bool { return bytes.Compare(target[:], id[:]) >= 0 } // ValidateBlock validates a block against a minimum timestamp, a block target, // and a block height. Returns nil if the block is valid and an appropriate // error otherwise. func (bv stdBlockValidator) ValidateBlock(b types.Block, id types.BlockID, minTimestamp types.Timestamp, target types.Target, height types.BlockHeight, log *persist.Logger) error { // Check that the timestamp is not too far in the past to be acceptable. if minTimestamp > b.Timestamp { return errEarlyTimestamp } // Check that the target of the new block is sufficient. if !checkTarget(b, id, target) { return modules.ErrBlockUnsolved } // Check that the block is below the size limit. blockSize := len(bv.marshaler.Marshal(b)) if uint64(blockSize) > types.BlockSizeLimit { return errLargeBlock } // Check if the block is in the extreme future. We make a distinction between // future and extreme future because there is an assumption that by the time // the extreme future arrives, this block will no longer be a part of the // longest fork because it will have been ignored by all of the miners. if b.Timestamp > bv.clock.Now()+types.ExtremeFutureThreshold { return errExtremeFutureTimestamp } // Verify that the miner payouts are valid. if !checkMinerPayouts(b, height) { return errBadMinerPayouts } // Check if the block is in the near future, but too far to be acceptable. // This is the last check because it's an expensive check, and not worth // performing if the payouts are incorrect. if b.Timestamp > bv.clock.Now()+types.FutureThreshold { return errFutureTimestamp } if log != nil { log.Debugf("validated block at height %v, block size: %vB", height, blockSize) } return nil } Sia-1.3.0/modules/consensus/block_validation_test.go000066400000000000000000000105641313565667000226330ustar00rootroot00000000000000package consensus import ( "testing" "github.com/NebulousLabs/Sia/types" ) // mockMarshaler is a mock implementation of the encoding.GenericMarshaler // interface that allows the client to pre-define the length of the marshaled // data. type mockMarshaler struct { marshalLength uint64 } // Marshal marshals an object into an empty byte slice of marshalLength. func (m mockMarshaler) Marshal(interface{}) []byte { return make([]byte, m.marshalLength) } // Unmarshal is not implemented. func (m mockMarshaler) Unmarshal([]byte, interface{}) error { panic("not implemented") } // mockClock is a mock implementation of the types.Clock interface that allows // the client to pre-define a return value for Now(). type mockClock struct { now types.Timestamp } // Now returns mockClock's pre-defined Timestamp. func (c mockClock) Now() types.Timestamp { return c.now } var validateBlockTests = []struct { now types.Timestamp minTimestamp types.Timestamp blockTimestamp types.Timestamp blockSize uint64 errWant error msg string }{ { minTimestamp: types.Timestamp(5), blockTimestamp: types.Timestamp(4), errWant: errEarlyTimestamp, msg: "ValidateBlock should reject blocks with timestamps that are too early", }, { blockSize: types.BlockSizeLimit + 1, errWant: errLargeBlock, msg: "ValidateBlock should reject excessively large blocks", }, { now: types.Timestamp(50), blockTimestamp: types.Timestamp(50) + types.ExtremeFutureThreshold + 1, errWant: errExtremeFutureTimestamp, msg: "ValidateBlock should reject blocks timestamped in the extreme future", }, } // TestUnitValidateBlock runs a series of unit tests for ValidateBlock. func TestUnitValidateBlock(t *testing.T) { // TODO(mtlynch): Populate all parameters to ValidateBlock so that everything // is valid except for the attribute that causes validation to fail. (i.e. // don't assume an ordering to the implementation of the validation function). for _, tt := range validateBlockTests { b := types.Block{ Timestamp: tt.blockTimestamp, } blockValidator := stdBlockValidator{ marshaler: mockMarshaler{ marshalLength: tt.blockSize, }, clock: mockClock{ now: tt.now, }, } err := blockValidator.ValidateBlock(b, b.ID(), tt.minTimestamp, types.RootDepth, 0, nil) if err != tt.errWant { t.Errorf("%s: got %v, want %v", tt.msg, err, tt.errWant) } } } // TestCheckMinerPayouts probes the checkMinerPayouts function. func TestCheckMinerPayouts(t *testing.T) { // All tests are done at height = 0. coinbase := types.CalculateCoinbase(0) // Create a block with a single valid payout. b := types.Block{ MinerPayouts: []types.SiacoinOutput{ {Value: coinbase}, }, } if !checkMinerPayouts(b, 0) { t.Error("payouts evaluated incorrectly when there is only one payout.") } // Try a block with an incorrect payout. b = types.Block{ MinerPayouts: []types.SiacoinOutput{ {Value: coinbase.Sub(types.NewCurrency64(1))}, }, } if checkMinerPayouts(b, 0) { t.Error("payouts evaluated incorrectly when there is a too-small payout") } // Try a block with 2 payouts. b = types.Block{ MinerPayouts: []types.SiacoinOutput{ {Value: coinbase.Sub(types.NewCurrency64(1))}, {Value: types.NewCurrency64(1)}, }, } if !checkMinerPayouts(b, 0) { t.Error("payouts evaluated incorrectly when there are 2 payouts") } // Try a block with 2 payouts that are too large. b = types.Block{ MinerPayouts: []types.SiacoinOutput{ {Value: coinbase}, {Value: coinbase}, }, } if checkMinerPayouts(b, 0) { t.Error("payouts evaluated incorrectly when there are two large payouts") } // Create a block with an empty payout. b = types.Block{ MinerPayouts: []types.SiacoinOutput{ {Value: coinbase}, {}, }, } if checkMinerPayouts(b, 0) { t.Error("payouts evaluated incorrectly when there is only one payout.") } } // TestCheckTarget probes the checkTarget function. func TestCheckTarget(t *testing.T) { var b types.Block lowTarget := types.RootDepth highTarget := types.Target{} sameTarget := types.Target(b.ID()) if !checkTarget(b, b.ID(), lowTarget) { t.Error("CheckTarget failed for a low target") } if checkTarget(b, b.ID(), highTarget) { t.Error("CheckTarget passed for a high target") } if !checkTarget(b, b.ID(), sameTarget) { t.Error("CheckTarget failed for a same target") } } Sia-1.3.0/modules/consensus/changelog.go000066400000000000000000000112441313565667000202130ustar00rootroot00000000000000package consensus // changelog.go implements a persistent changelog in the consenus database // tracking all of the atomic changes to the consensus set. The primary use of // the changelog is for subscribers that have persistence - instead of // subscribing from the very beginning and receiving all changes from genesis // each time the daemon starts up, the subscribers can start from the most // recent change that they are familiar with. // // The changelog is set up as a singley linked list where each change points // forward to the next change. In bolt, the key is a hash of the changeEntry // and the value is a struct containing the changeEntry and the key of the next // changeEntry. The empty hash key leads to the 'changeTail', which contains // the id of the most recent changeEntry. // // Initialization only needs to worry about creating the blank change entry, // the genesis block will call 'append' later on during initialization. import ( "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/bolt" ) var ( // ChangeLog contains a list of atomic changes that have happened to the // consensus set so that subscribers can subscribe from the most recent // change they have seen. ChangeLog = []byte("ChangeLog") // ChangeLogTailID is a key that points to the id of the current changelog // tail. ChangeLogTailID = []byte("ChangeLogTailID") ) type ( // changeEntry records a single atomic change to the consensus set. changeEntry struct { RevertedBlocks []types.BlockID AppliedBlocks []types.BlockID } // changeNode contains a change entry and a pointer to the next change // entry, and is the object that gets stored in the database. changeNode struct { Entry changeEntry Next modules.ConsensusChangeID } ) // appendChangeLog adds a new change entry to the change log. func appendChangeLog(tx *bolt.Tx, ce changeEntry) error { // Insert the change entry. cl := tx.Bucket(ChangeLog) ceid := ce.ID() cn := changeNode{Entry: ce, Next: modules.ConsensusChangeID{}} err := cl.Put(ceid[:], encoding.Marshal(cn)) if err != nil { return err } // Update the tail node to point to the new change entry as the next entry. var tailID modules.ConsensusChangeID copy(tailID[:], cl.Get(ChangeLogTailID)) if tailID != (modules.ConsensusChangeID{}) { // Get the old tail node. var tailCN changeNode tailCNBytes := cl.Get(tailID[:]) err = encoding.Unmarshal(tailCNBytes, &tailCN) if err != nil { return err } // Point the 'next' of the old tail node to the new tail node and // insert. tailCN.Next = ceid err = cl.Put(tailID[:], encoding.Marshal(tailCN)) if err != nil { return err } } // Update the tail id. err = cl.Put(ChangeLogTailID, ceid[:]) if err != nil { return err } return nil } // getEntry returns the change entry with a given id, using a bool to indicate // existence. func getEntry(tx *bolt.Tx, id modules.ConsensusChangeID) (ce changeEntry, exists bool) { var cn changeNode cl := tx.Bucket(ChangeLog) changeNodeBytes := cl.Get(id[:]) if changeNodeBytes == nil { return changeEntry{}, false } err := encoding.Unmarshal(changeNodeBytes, &cn) if build.DEBUG && err != nil { panic(err) } return cn.Entry, true } // ID returns the id of a change entry. func (ce *changeEntry) ID() modules.ConsensusChangeID { return modules.ConsensusChangeID(crypto.HashObject(ce)) } // NextEntry returns the entry after the current entry. func (ce *changeEntry) NextEntry(tx *bolt.Tx) (nextEntry changeEntry, exists bool) { // Get the change node associated with the provided change entry. ceid := ce.ID() var cn changeNode cl := tx.Bucket(ChangeLog) changeNodeBytes := cl.Get(ceid[:]) err := encoding.Unmarshal(changeNodeBytes, &cn) if build.DEBUG && err != nil { panic(err) } return getEntry(tx, cn.Next) } // createChangeLog assumes that no change log exists and creates a new one. func (cs *ConsensusSet) createChangeLog(tx *bolt.Tx) error { // Create the changelog bucket. cl, err := tx.CreateBucket(ChangeLog) if err != nil { return err } // Add the genesis block as the first entry of the change log. ge := cs.genesisEntry() geid := ge.ID() cn := changeNode{ Entry: ge, Next: modules.ConsensusChangeID{}, } err = cl.Put(geid[:], encoding.Marshal(cn)) if err != nil { return err } err = cl.Put(ChangeLogTailID, geid[:]) if err != nil { return err } return nil } // genesisEntry returns the id of the genesis block log entry. func (cs *ConsensusSet) genesisEntry() changeEntry { return changeEntry{ AppliedBlocks: []types.BlockID{cs.blockRoot.Block.ID()}, } } Sia-1.3.0/modules/consensus/changelog_test.go000066400000000000000000000040501313565667000212470ustar00rootroot00000000000000package consensus import ( "testing" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) // TestIntegrationChangeLog does a general test of the changelog by creating a // subscriber that subscribes partway into startup and checking that the // correct ordering of blocks are provided. func TestIntegrationChangeLog(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() // Get a blank consensus set tester so that the mocked subscriber can join // immediately after genesis. cst, err := blankConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() // Add a mocked subscriber and check that it receives the correct number of // blocks. ms := newMockSubscriber() cst.cs.ConsensusSetSubscribe(&ms, modules.ConsensusChangeBeginning) if ms.updates[0].AppliedBlocks[0].ID() != cst.cs.blockRoot.Block.ID() { t.Fatal("subscription did not correctly receive the genesis block") } if len(ms.updates) != 1 { t.Fatal("subscription resulted in the wrong number of blocks being sent") } // Create a copy of the subscriber that will subscribe to the consensus at // the tail of the updates. tailSubscriber := ms.copySub() cst.cs.ConsensusSetSubscribe(&tailSubscriber, tailSubscriber.updates[len(tailSubscriber.updates)-1].ID) if len(tailSubscriber.updates) != 1 { t.Fatal("subscription resulted in the wrong number of blocks being sent") } // Create a copy of the subscriber that will join when it is not at 0, but it is behind. behindSubscriber := ms.copySub() cst.addSiafunds() cst.mineSiacoins() cst.cs.ConsensusSetSubscribe(&behindSubscriber, behindSubscriber.updates[len(behindSubscriber.updates)-1].ID) if types.BlockHeight(len(behindSubscriber.updates)) != cst.cs.dbBlockHeight()+1 { t.Fatal("subscription resulted in the wrong number of blocks being sent") } if len(ms.updates) != len(tailSubscriber.updates) { t.Error("subscribers have inconsistent update chains") } if len(ms.updates) != len(behindSubscriber.updates) { t.Error("subscribers have inconsistent update chains") } } Sia-1.3.0/modules/consensus/consensusdb.go000066400000000000000000000427111313565667000206150ustar00rootroot00000000000000package consensus // consensusdb.go contains all of the functions related to performing consensus // related actions on the database, including initializing the consensus // portions of the database. Many errors cause panics instead of being handled // gracefully, but only when the debug flag is set. The errors are silently // ignored otherwise, which is suboptimal. import ( "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/bolt" ) var ( prefixDSCO = []byte("dsco_") prefixFCEX = []byte("fcex_") // BlockHeight is a bucket that stores the current block height. // // Generally we would just look at BlockPath.Stats(), but there is an error // in boltdb that prevents the bucket stats from updating until a tx is // committed. Wasn't a problem until we started doing the entire block as // one tx. // // DEPRECATED - block.Stats() should be sufficient to determine the block // height, but currently stats are only computed after committing a // transaction, therefore cannot be assumed reliable. BlockHeight = []byte("BlockHeight") // BlockMap is a database bucket containing all of the processed blocks, // keyed by their id. This includes blocks that are not currently in the // consensus set, and blocks that may not have been fully validated yet. BlockMap = []byte("BlockMap") // BlockPath is a database bucket containing a mapping from the height of a // block to the id of the block at that height. BlockPath only includes // blocks in the current path. BlockPath = []byte("BlockPath") // BucketOak is the database bucket that contains all of the fields related // to the oak difficulty adjustment algorithm. The cumulative difficulty and // time values are stored for each block id, and then the key "OakInit" // contains the value "true" if the oak fields have been properly // initialized. BucketOak = []byte("Oak") // Consistency is a database bucket with a flag indicating whether // inconsistencies within the database have been detected. Consistency = []byte("Consistency") // SiacoinOutputs is a database bucket that contains all of the unspent // siacoin outputs. SiacoinOutputs = []byte("SiacoinOutputs") // FileContracts is a database bucket that contains all of the open file // contracts. FileContracts = []byte("FileContracts") // SiafundOutputs is a database bucket that contains all of the unspent // siafund outputs. SiafundOutputs = []byte("SiafundOutputs") // SiafundPool is a database bucket storing the current value of the // siafund pool. SiafundPool = []byte("SiafundPool") ) var ( // FieldOakInit is a field in BucketOak that gets set to "true" after the // oak initialiation process has completed. FieldOakInit = []byte("OakInit") ) var ( // ValueOakInit is the value that the oak init field is set to if the oak // difficulty adjustment fields have been correctly intialized. ValueOakInit = []byte("true") ) // createConsensusObjects initialzes the consensus portions of the database. func (cs *ConsensusSet) createConsensusDB(tx *bolt.Tx) error { // Enumerate and create the database buckets. buckets := [][]byte{ BlockHeight, BlockMap, BlockPath, Consistency, SiacoinOutputs, FileContracts, SiafundOutputs, SiafundPool, } for _, bucket := range buckets { _, err := tx.CreateBucket(bucket) if err != nil { return err } } // Set the block height to -1, so the genesis block is at height 0. blockHeight := tx.Bucket(BlockHeight) underflow := types.BlockHeight(0) err := blockHeight.Put(BlockHeight, encoding.Marshal(underflow-1)) if err != nil { return err } // Set the siafund pool to 0. setSiafundPool(tx, types.NewCurrency64(0)) // Update the siafund output diffs map for the genesis block on disk. This // needs to happen between the database being opened/initilized and the // consensus set hash being calculated for _, sfod := range cs.blockRoot.SiafundOutputDiffs { commitSiafundOutputDiff(tx, sfod, modules.DiffApply) } // Add the miner payout from the genesis block to the delayed siacoin // outputs - unspendable, as the unlock hash is blank. createDSCOBucket(tx, types.MaturityDelay) addDSCO(tx, types.MaturityDelay, cs.blockRoot.Block.MinerPayoutID(0), types.SiacoinOutput{ Value: types.CalculateCoinbase(0), UnlockHash: types.UnlockHash{}, }) // Add the genesis block to the block structures - checksum must be taken // after pushing the genesis block into the path. pushPath(tx, cs.blockRoot.Block.ID()) if build.DEBUG { cs.blockRoot.ConsensusChecksum = consensusChecksum(tx) } addBlockMap(tx, &cs.blockRoot) return nil } // blockHeight returns the height of the blockchain. func blockHeight(tx *bolt.Tx) types.BlockHeight { var height types.BlockHeight bh := tx.Bucket(BlockHeight) err := encoding.Unmarshal(bh.Get(BlockHeight), &height) if build.DEBUG && err != nil { panic(err) } return height } // currentBlockID returns the id of the most recent block in the consensus set. func currentBlockID(tx *bolt.Tx) types.BlockID { id, err := getPath(tx, blockHeight(tx)) if build.DEBUG && err != nil { panic(err) } return id } // dbCurrentBlockID is a convenience function allowing currentBlockID to be // called without a bolt.Tx. func (cs *ConsensusSet) dbCurrentBlockID() (id types.BlockID) { dbErr := cs.db.View(func(tx *bolt.Tx) error { id = currentBlockID(tx) return nil }) if dbErr != nil { panic(dbErr) } return id } // currentProcessedBlock returns the most recent block in the consensus set. func currentProcessedBlock(tx *bolt.Tx) *processedBlock { pb, err := getBlockMap(tx, currentBlockID(tx)) if build.DEBUG && err != nil { panic(err) } return pb } // getBlockMap returns a processed block with the input id. func getBlockMap(tx *bolt.Tx, id types.BlockID) (*processedBlock, error) { // Look up the encoded block. pbBytes := tx.Bucket(BlockMap).Get(id[:]) if pbBytes == nil { return nil, errNilItem } // Decode the block - should never fail. var pb processedBlock err := encoding.Unmarshal(pbBytes, &pb) if build.DEBUG && err != nil { panic(err) } return &pb, nil } // addBlockMap adds a processed block to the block map. func addBlockMap(tx *bolt.Tx, pb *processedBlock) { id := pb.Block.ID() err := tx.Bucket(BlockMap).Put(id[:], encoding.Marshal(*pb)) if build.DEBUG && err != nil { panic(err) } } // getPath returns the block id at 'height' in the block path. func getPath(tx *bolt.Tx, height types.BlockHeight) (id types.BlockID, err error) { idBytes := tx.Bucket(BlockPath).Get(encoding.Marshal(height)) if idBytes == nil { return types.BlockID{}, errNilItem } err = encoding.Unmarshal(idBytes, &id) if build.DEBUG && err != nil { panic(err) } return id, nil } // pushPath adds a block to the BlockPath at current height + 1. func pushPath(tx *bolt.Tx, bid types.BlockID) { // Fetch and update the block height. bh := tx.Bucket(BlockHeight) heightBytes := bh.Get(BlockHeight) var oldHeight types.BlockHeight err := encoding.Unmarshal(heightBytes, &oldHeight) if build.DEBUG && err != nil { panic(err) } newHeightBytes := encoding.Marshal(oldHeight + 1) err = bh.Put(BlockHeight, newHeightBytes) if build.DEBUG && err != nil { panic(err) } // Add the block to the block path. bp := tx.Bucket(BlockPath) err = bp.Put(newHeightBytes, bid[:]) if build.DEBUG && err != nil { panic(err) } } // popPath removes a block from the "end" of the chain, i.e. the block // with the largest height. func popPath(tx *bolt.Tx) { // Fetch and update the block height. bh := tx.Bucket(BlockHeight) oldHeightBytes := bh.Get(BlockHeight) var oldHeight types.BlockHeight err := encoding.Unmarshal(oldHeightBytes, &oldHeight) if build.DEBUG && err != nil { panic(err) } newHeightBytes := encoding.Marshal(oldHeight - 1) err = bh.Put(BlockHeight, newHeightBytes) if build.DEBUG && err != nil { panic(err) } // Remove the block from the path - make sure to remove the block at // oldHeight. bp := tx.Bucket(BlockPath) err = bp.Delete(oldHeightBytes) if build.DEBUG && err != nil { panic(err) } } // isSiacoinOutput returns true if there is a siacoin output of that id in the // database. func isSiacoinOutput(tx *bolt.Tx, id types.SiacoinOutputID) bool { bucket := tx.Bucket(SiacoinOutputs) sco := bucket.Get(id[:]) return sco != nil } // getSiacoinOutput fetches a siacoin output from the database. An error is // returned if the siacoin output does not exist. func getSiacoinOutput(tx *bolt.Tx, id types.SiacoinOutputID) (types.SiacoinOutput, error) { scoBytes := tx.Bucket(SiacoinOutputs).Get(id[:]) if scoBytes == nil { return types.SiacoinOutput{}, errNilItem } var sco types.SiacoinOutput err := encoding.Unmarshal(scoBytes, &sco) if err != nil { return types.SiacoinOutput{}, err } return sco, nil } // addSiacoinOutput adds a siacoin output to the database. An error is returned // if the siacoin output is already in the database. func addSiacoinOutput(tx *bolt.Tx, id types.SiacoinOutputID, sco types.SiacoinOutput) { // While this is not supposed to be allowed, there's a bug in the consensus // code which means that earlier versions have accetped 0-value outputs // onto the blockchain. A hardfork to remove 0-value outputs will fix this, // and that hardfork is planned, but not yet. /* if build.DEBUG && sco.Value.IsZero() { panic("discovered a zero value siacoin output") } */ siacoinOutputs := tx.Bucket(SiacoinOutputs) // Sanity check - should not be adding an item that exists. if build.DEBUG && siacoinOutputs.Get(id[:]) != nil { panic("repeat siacoin output") } err := siacoinOutputs.Put(id[:], encoding.Marshal(sco)) if build.DEBUG && err != nil { panic(err) } } // removeSiacoinOutput removes a siacoin output from the database. An error is // returned if the siacoin output is not in the database prior to removal. func removeSiacoinOutput(tx *bolt.Tx, id types.SiacoinOutputID) { scoBucket := tx.Bucket(SiacoinOutputs) // Sanity check - should not be removing an item that is not in the db. if build.DEBUG && scoBucket.Get(id[:]) == nil { panic("nil siacoin output") } err := scoBucket.Delete(id[:]) if build.DEBUG && err != nil { panic(err) } } // getFileContract fetches a file contract from the database, returning an // error if it is not there. func getFileContract(tx *bolt.Tx, id types.FileContractID) (fc types.FileContract, err error) { fcBytes := tx.Bucket(FileContracts).Get(id[:]) if fcBytes == nil { return types.FileContract{}, errNilItem } err = encoding.Unmarshal(fcBytes, &fc) if err != nil { return types.FileContract{}, err } return fc, nil } // addFileContract adds a file contract to the database. An error is returned // if the file contract is already in the database. func addFileContract(tx *bolt.Tx, id types.FileContractID, fc types.FileContract) { // Add the file contract to the database. fcBucket := tx.Bucket(FileContracts) // Sanity check - should not be adding a zero-payout file contract. if build.DEBUG && fc.Payout.IsZero() { panic("adding zero-payout file contract") } // Sanity check - should not be adding a file contract already in the db. if build.DEBUG && fcBucket.Get(id[:]) != nil { panic("repeat file contract") } err := fcBucket.Put(id[:], encoding.Marshal(fc)) if build.DEBUG && err != nil { panic(err) } // Add an entry for when the file contract expires. expirationBucketID := append(prefixFCEX, encoding.Marshal(fc.WindowEnd)...) expirationBucket, err := tx.CreateBucketIfNotExists(expirationBucketID) if build.DEBUG && err != nil { panic(err) } err = expirationBucket.Put(id[:], []byte{}) if build.DEBUG && err != nil { panic(err) } } // removeFileContract removes a file contract from the database. func removeFileContract(tx *bolt.Tx, id types.FileContractID) { // Delete the file contract entry. fcBucket := tx.Bucket(FileContracts) fcBytes := fcBucket.Get(id[:]) // Sanity check - should not be removing a file contract not in the db. if build.DEBUG && fcBytes == nil { panic("nil file contract") } err := fcBucket.Delete(id[:]) if build.DEBUG && err != nil { panic(err) } // Delete the entry for the file contract's expiration. The portion of // 'fcBytes' used to determine the expiration bucket id is the // byte-representation of the file contract window end, which always // appears at bytes 48-56. expirationBucketID := append(prefixFCEX, fcBytes[48:56]...) expirationBucket := tx.Bucket(expirationBucketID) expirationBytes := expirationBucket.Get(id[:]) if expirationBytes == nil { panic(errNilItem) } err = expirationBucket.Delete(id[:]) if build.DEBUG && err != nil { panic(err) } } // The address of the devs. var devAddr = types.UnlockHash{243, 113, 199, 11, 206, 158, 184, 151, 156, 213, 9, 159, 89, 158, 196, 228, 252, 177, 78, 10, 252, 243, 31, 151, 145, 224, 62, 100, 150, 164, 192, 179} // getSiafundOutput fetches a siafund output from the database. An error is // returned if the siafund output does not exist. func getSiafundOutput(tx *bolt.Tx, id types.SiafundOutputID) (types.SiafundOutput, error) { sfoBytes := tx.Bucket(SiafundOutputs).Get(id[:]) if sfoBytes == nil { return types.SiafundOutput{}, errNilItem } var sfo types.SiafundOutput err := encoding.Unmarshal(sfoBytes, &sfo) if err != nil { return types.SiafundOutput{}, err } gsa := types.GenesisSiafundAllocation if sfo.UnlockHash == gsa[len(gsa)-1].UnlockHash && blockHeight(tx) > 10e3 { sfo.UnlockHash = devAddr } return sfo, nil } // addSiafundOutput adds a siafund output to the database. An error is returned // if the siafund output is already in the database. func addSiafundOutput(tx *bolt.Tx, id types.SiafundOutputID, sfo types.SiafundOutput) { siafundOutputs := tx.Bucket(SiafundOutputs) // Sanity check - should not be adding a siafund output with a value of // zero. if build.DEBUG && sfo.Value.IsZero() { panic("zero value siafund being added") } // Sanity check - should not be adding an item already in the db. if build.DEBUG && siafundOutputs.Get(id[:]) != nil { panic("repeat siafund output") } err := siafundOutputs.Put(id[:], encoding.Marshal(sfo)) if build.DEBUG && err != nil { panic(err) } } // removeSiafundOutput removes a siafund output from the database. An error is // returned if the siafund output is not in the database prior to removal. func removeSiafundOutput(tx *bolt.Tx, id types.SiafundOutputID) { sfoBucket := tx.Bucket(SiafundOutputs) if build.DEBUG && sfoBucket.Get(id[:]) == nil { panic("nil siafund output") } err := sfoBucket.Delete(id[:]) if build.DEBUG && err != nil { panic(err) } } // getSiafundPool returns the current value of the siafund pool. No error is // returned as the siafund pool should always be available. func getSiafundPool(tx *bolt.Tx) (pool types.Currency) { bucket := tx.Bucket(SiafundPool) poolBytes := bucket.Get(SiafundPool) // An error should only be returned if the object stored in the siafund // pool bucket is either unavailable or otherwise malformed. As this is a // developer error, a panic is appropriate. err := encoding.Unmarshal(poolBytes, &pool) if build.DEBUG && err != nil { panic(err) } return pool } // setSiafundPool updates the saved siafund pool on disk func setSiafundPool(tx *bolt.Tx, c types.Currency) { err := tx.Bucket(SiafundPool).Put(SiafundPool, encoding.Marshal(c)) if build.DEBUG && err != nil { panic(err) } } // addDSCO adds a delayed siacoin output to the consnesus set. func addDSCO(tx *bolt.Tx, bh types.BlockHeight, id types.SiacoinOutputID, sco types.SiacoinOutput) { // Sanity check - dsco should never have a value of zero. // An error in the consensus code means sometimes there are 0-value dscos // in the blockchain. A hardfork will fix this. /* if build.DEBUG && sco.Value.IsZero() { panic("zero-value dsco being added") } */ // Sanity check - output should not already be in the full set of outputs. if build.DEBUG && tx.Bucket(SiacoinOutputs).Get(id[:]) != nil { panic("dsco already in output set") } dscoBucketID := append(prefixDSCO, encoding.EncUint64(uint64(bh))...) dscoBucket := tx.Bucket(dscoBucketID) // Sanity check - should not be adding an item already in the db. if build.DEBUG && dscoBucket.Get(id[:]) != nil { panic(errRepeatInsert) } err := dscoBucket.Put(id[:], encoding.Marshal(sco)) if build.DEBUG && err != nil { panic(err) } } // removeDSCO removes a delayed siacoin output from the consensus set. func removeDSCO(tx *bolt.Tx, bh types.BlockHeight, id types.SiacoinOutputID) { bucketID := append(prefixDSCO, encoding.Marshal(bh)...) // Sanity check - should not remove an item not in the db. dscoBucket := tx.Bucket(bucketID) if build.DEBUG && dscoBucket.Get(id[:]) == nil { panic("nil dsco") } err := dscoBucket.Delete(id[:]) if build.DEBUG && err != nil { panic(err) } } // createDSCOBucket creates a bucket for the delayed siacoin outputs at the // input height. func createDSCOBucket(tx *bolt.Tx, bh types.BlockHeight) { bucketID := append(prefixDSCO, encoding.Marshal(bh)...) _, err := tx.CreateBucket(bucketID) if build.DEBUG && err != nil { panic(err) } } // deleteDSCOBucket deletes the bucket that held a set of delayed siacoin // outputs. func deleteDSCOBucket(tx *bolt.Tx, bh types.BlockHeight) { // Delete the bucket. bucketID := append(prefixDSCO, encoding.Marshal(bh)...) bucket := tx.Bucket(bucketID) if build.DEBUG && bucket == nil { panic(errNilBucket) } // TODO: Check that the bucket is empty. Using Stats() does not work at the // moment, as there is an error in the boltdb code. err := tx.DeleteBucket(bucketID) if build.DEBUG && err != nil { panic(err) } } Sia-1.3.0/modules/consensus/consensusdb_helpers_test.go000066400000000000000000000152441313565667000233770ustar00rootroot00000000000000package consensus // database_test.go contains a bunch of legacy functions to preserve // compatibility with the test suite. import ( "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/bolt" ) // dbBlockHeight is a convenience function allowing blockHeight to be called // without a bolt.Tx. func (cs *ConsensusSet) dbBlockHeight() (bh types.BlockHeight) { dbErr := cs.db.View(func(tx *bolt.Tx) error { bh = blockHeight(tx) return nil }) if dbErr != nil { panic(dbErr) } return bh } // dbCurrentProcessedBlock is a convenience function allowing // currentProcessedBlock to be called without a bolt.Tx. func (cs *ConsensusSet) dbCurrentProcessedBlock() (pb *processedBlock) { dbErr := cs.db.View(func(tx *bolt.Tx) error { pb = currentProcessedBlock(tx) return nil }) if dbErr != nil { panic(dbErr) } return pb } // dbGetPath is a convenience function allowing getPath to be called without a // bolt.Tx. func (cs *ConsensusSet) dbGetPath(bh types.BlockHeight) (id types.BlockID, err error) { dbErr := cs.db.View(func(tx *bolt.Tx) error { id, err = getPath(tx, bh) return nil }) if dbErr != nil { panic(dbErr) } return id, err } // dbPushPath is a convenience function allowing pushPath to be called without a // bolt.Tx. func (cs *ConsensusSet) dbPushPath(bid types.BlockID) { dbErr := cs.db.Update(func(tx *bolt.Tx) error { pushPath(tx, bid) return nil }) if dbErr != nil { panic(dbErr) } } // dbGetBlockMap is a convenience function allowing getBlockMap to be called // without a bolt.Tx. func (cs *ConsensusSet) dbGetBlockMap(id types.BlockID) (pb *processedBlock, err error) { dbErr := cs.db.View(func(tx *bolt.Tx) error { pb, err = getBlockMap(tx, id) return nil }) if dbErr != nil { panic(dbErr) } return pb, err } // dbGetSiacoinOutput is a convenience function allowing getSiacoinOutput to be // called without a bolt.Tx. func (cs *ConsensusSet) dbGetSiacoinOutput(id types.SiacoinOutputID) (sco types.SiacoinOutput, err error) { dbErr := cs.db.View(func(tx *bolt.Tx) error { sco, err = getSiacoinOutput(tx, id) return nil }) if dbErr != nil { panic(dbErr) } return sco, err } // getArbSiacoinOutput is a convenience function fetching a single random // siacoin output from the database. func (cs *ConsensusSet) getArbSiacoinOutput() (scoid types.SiacoinOutputID, sco types.SiacoinOutput, err error) { dbErr := cs.db.View(func(tx *bolt.Tx) error { cursor := tx.Bucket(SiacoinOutputs).Cursor() scoidBytes, scoBytes := cursor.First() copy(scoid[:], scoidBytes) return encoding.Unmarshal(scoBytes, &sco) }) if dbErr != nil { panic(dbErr) } if err != nil { return types.SiacoinOutputID{}, types.SiacoinOutput{}, err } return scoid, sco, nil } // dbGetFileContract is a convenience function allowing getFileContract to be // called without a bolt.Tx. func (cs *ConsensusSet) dbGetFileContract(id types.FileContractID) (fc types.FileContract, err error) { dbErr := cs.db.View(func(tx *bolt.Tx) error { fc, err = getFileContract(tx, id) return nil }) if dbErr != nil { panic(dbErr) } return fc, err } // dbAddFileContract is a convenience function allowing addFileContract to be // called without a bolt.Tx. func (cs *ConsensusSet) dbAddFileContract(id types.FileContractID, fc types.FileContract) { dbErr := cs.db.Update(func(tx *bolt.Tx) error { addFileContract(tx, id, fc) return nil }) if dbErr != nil { panic(dbErr) } } // dbRemoveFileContract is a convenience function allowing removeFileContract // to be called without a bolt.Tx. func (cs *ConsensusSet) dbRemoveFileContract(id types.FileContractID) { dbErr := cs.db.Update(func(tx *bolt.Tx) error { removeFileContract(tx, id) return nil }) if dbErr != nil { panic(dbErr) } } // dbGetSiafundOutput is a convenience function allowing getSiafundOutput to be // called without a bolt.Tx. func (cs *ConsensusSet) dbGetSiafundOutput(id types.SiafundOutputID) (sfo types.SiafundOutput, err error) { dbErr := cs.db.View(func(tx *bolt.Tx) error { sfo, err = getSiafundOutput(tx, id) return nil }) if dbErr != nil { panic(dbErr) } return sfo, err } // dbAddSiafundOutput is a convenience function allowing addSiafundOutput to be // called without a bolt.Tx. func (cs *ConsensusSet) dbAddSiafundOutput(id types.SiafundOutputID, sfo types.SiafundOutput) { dbErr := cs.db.Update(func(tx *bolt.Tx) error { addSiafundOutput(tx, id, sfo) return nil }) if dbErr != nil { panic(dbErr) } } // dbGetSiafundPool is a convenience function allowing getSiafundPool to be // called without a bolt.Tx. func (cs *ConsensusSet) dbGetSiafundPool() (siafundPool types.Currency) { dbErr := cs.db.View(func(tx *bolt.Tx) error { siafundPool = getSiafundPool(tx) return nil }) if dbErr != nil { panic(dbErr) } return siafundPool } // dbGetDSCO is a convenience function allowing a delayed siacoin output to be // fetched without a bolt.Tx. An error is returned if the delayed output is not // found at the maturity height indicated by the input. func (cs *ConsensusSet) dbGetDSCO(height types.BlockHeight, id types.SiacoinOutputID) (dsco types.SiacoinOutput, err error) { dbErr := cs.db.View(func(tx *bolt.Tx) error { dscoBucketID := append(prefixDSCO, encoding.Marshal(height)...) dscoBucket := tx.Bucket(dscoBucketID) if dscoBucket == nil { err = errNilItem return nil } dscoBytes := dscoBucket.Get(id[:]) if dscoBytes == nil { err = errNilItem return nil } err = encoding.Unmarshal(dscoBytes, &dsco) if err != nil { panic(err) } return nil }) if dbErr != nil { panic(dbErr) } return dsco, err } // dbStorageProofSegment is a convenience function allowing // 'storageProofSegment' to be called during testing without a tx. func (cs *ConsensusSet) dbStorageProofSegment(fcid types.FileContractID) (index uint64, err error) { dbErr := cs.db.View(func(tx *bolt.Tx) error { index, err = storageProofSegment(tx, fcid) return nil }) if dbErr != nil { panic(dbErr) } return index, err } // dbValidStorageProofs is a convenience function allowing 'validStorageProofs' // to be called during testing without a tx. func (cs *ConsensusSet) dbValidStorageProofs(t types.Transaction) (err error) { dbErr := cs.db.View(func(tx *bolt.Tx) error { err = validStorageProofs(tx, t) return nil }) if dbErr != nil { panic(dbErr) } return err } // dbValidFileContractRevisions is a convenience function allowing // 'validFileContractRevisions' to be called during testing without a tx. func (cs *ConsensusSet) dbValidFileContractRevisions(t types.Transaction) (err error) { dbErr := cs.db.View(func(tx *bolt.Tx) error { err = validFileContractRevisions(tx, t) return nil }) if dbErr != nil { panic(dbErr) } return err } Sia-1.3.0/modules/consensus/consensusset.go000066400000000000000000000251171313565667000210240ustar00rootroot00000000000000package consensus // All changes to the consenuss set are made via diffs, specifically by calling // a commitDiff function. This means that future modifications (such as // replacing in-memory versions of the utxo set with on-disk versions of the // utxo set) should be relatively easy to verify for correctness. Modifying the // commitDiff functions will be sufficient. import ( "errors" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/persist" "github.com/NebulousLabs/Sia/sync" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/bolt" "github.com/NebulousLabs/demotemutex" ) var ( errNilGateway = errors.New("cannot have a nil gateway as input") ) // marshaler marshals objects into byte slices and unmarshals byte // slices into objects. type marshaler interface { Marshal(interface{}) []byte Unmarshal([]byte, interface{}) error } type stdMarshaler struct{} func (stdMarshaler) Marshal(v interface{}) []byte { return encoding.Marshal(v) } func (stdMarshaler) Unmarshal(b []byte, v interface{}) error { return encoding.Unmarshal(b, v) } // The ConsensusSet is the object responsible for tracking the current status // of the blockchain. Broadly speaking, it is responsible for maintaining // consensus. It accepts blocks and constructs a blockchain, forking when // necessary. type ConsensusSet struct { // The gateway manages peer connections and keeps the consensus set // synchronized to the rest of the network. gateway modules.Gateway // The block root contains the genesis block. blockRoot processedBlock // Subscribers to the consensus set will receive a changelog every time // there is an update to the consensus set. At initialization, they receive // all changes that they are missing. // // Memory: A consensus set typically has fewer than 10 subscribers, and // subscription typically happens entirely at startup. This slice is // unlikely to grow beyond 1kb, and cannot by manipulated by an attacker as // the function of adding a subscriber should not be exposed. subscribers []modules.ConsensusSetSubscriber // dosBlocks are blocks that are invalid, but the invalidity is only // discoverable during an expensive step of validation. These blocks are // recorded to eliminate a DoS vector where an expensive-to-validate block // is submitted to the consensus set repeatedly. // // TODO: dosBlocks needs to be moved into the database, and if there's some // reason it can't be in THE database, it should be in a separate database. // dosBlocks is an unbounded map that an attacker can manipulate, though // iirc manipulations are expensive, to the tune of creating a blockchain // PoW per DoS block (though the attacker could conceivably build off of // the genesis block, meaning the PoW is not very expensive. dosBlocks map[types.BlockID]struct{} // checkingConsistency is a bool indicating whether or not a consistency // check is in progress. The consistency check logic call itself, resulting // in infinite loops. This bool prevents that while still allowing for full // granularity consistency checks. Previously, consistency checks were only // performed after a full reorg, but now they are performed after every // block. checkingConsistency bool // synced is true if initial blockchain download has finished. It indicates // whether the consensus set is synced with the network. synced bool // Interfaces to abstract the dependencies of the ConsensusSet. marshaler marshaler blockRuleHelper blockRuleHelper blockValidator blockValidator // Utilities db *persist.BoltDatabase log *persist.Logger mu demotemutex.DemoteMutex persistDir string tg sync.ThreadGroup } // New returns a new ConsensusSet, containing at least the genesis block. If // there is an existing block database present in the persist directory, it // will be loaded. func New(gateway modules.Gateway, bootstrap bool, persistDir string) (*ConsensusSet, error) { // Check for nil dependencies. if gateway == nil { return nil, errNilGateway } // Create the ConsensusSet object. cs := &ConsensusSet{ gateway: gateway, blockRoot: processedBlock{ Block: types.GenesisBlock, ChildTarget: types.RootTarget, Depth: types.RootDepth, DiffsGenerated: true, }, dosBlocks: make(map[types.BlockID]struct{}), marshaler: stdMarshaler{}, blockRuleHelper: stdBlockRuleHelper{}, blockValidator: NewBlockValidator(), persistDir: persistDir, } // Create the diffs for the genesis siafund outputs. for i, siafundOutput := range types.GenesisBlock.Transactions[0].SiafundOutputs { sfid := types.GenesisBlock.Transactions[0].SiafundOutputID(uint64(i)) sfod := modules.SiafundOutputDiff{ Direction: modules.DiffApply, ID: sfid, SiafundOutput: siafundOutput, } cs.blockRoot.SiafundOutputDiffs = append(cs.blockRoot.SiafundOutputDiffs, sfod) } // Initialize the consensus persistence structures. err := cs.initPersist() if err != nil { return nil, err } go func() { // Sync with the network. Don't sync if we are testing because // typically we don't have any mock peers to synchronize with in // testing. if bootstrap { // We are in a virgin goroutine right now, so calling the threaded // function without a goroutine is okay. err = cs.threadedInitialBlockchainDownload() if err != nil { return } } // threadedInitialBlockchainDownload will release the threadgroup 'Add' // it was holding, so another needs to be grabbed to finish off this // goroutine. err = cs.tg.Add() if err != nil { return } defer cs.tg.Done() // Register RPCs gateway.RegisterRPC("SendBlocks", cs.rpcSendBlocks) gateway.RegisterRPC("RelayHeader", cs.threadedRPCRelayHeader) gateway.RegisterRPC("SendBlk", cs.rpcSendBlk) gateway.RegisterConnectCall("SendBlocks", cs.threadedReceiveBlocks) cs.tg.OnStop(func() { cs.gateway.UnregisterRPC("SendBlocks") cs.gateway.UnregisterRPC("RelayHeader") cs.gateway.UnregisterRPC("SendBlk") cs.gateway.UnregisterConnectCall("SendBlocks") }) // Mark that we are synced with the network. cs.mu.Lock() cs.synced = true cs.mu.Unlock() }() return cs, nil } // BlockAtHeight returns the block at a given height. func (cs *ConsensusSet) BlockAtHeight(height types.BlockHeight) (block types.Block, exists bool) { _ = cs.db.View(func(tx *bolt.Tx) error { id, err := getPath(tx, height) if err != nil { return err } pb, err := getBlockMap(tx, id) if err != nil { return err } block = pb.Block exists = true return nil }) return block, exists } // ChildTarget returns the target for the child of a block. func (cs *ConsensusSet) ChildTarget(id types.BlockID) (target types.Target, exists bool) { // A call to a closed database can cause undefined behavior. err := cs.tg.Add() if err != nil { return types.Target{}, false } defer cs.tg.Done() _ = cs.db.View(func(tx *bolt.Tx) error { pb, err := getBlockMap(tx, id) if err != nil { return err } target = pb.ChildTarget exists = true return nil }) return target, exists } // Close safely closes the block database. func (cs *ConsensusSet) Close() error { return cs.tg.Stop() } // managedCurrentBlock returns the latest block in the heaviest known blockchain. func (cs *ConsensusSet) managedCurrentBlock() (block types.Block) { cs.mu.RLock() defer cs.mu.RUnlock() _ = cs.db.View(func(tx *bolt.Tx) error { pb := currentProcessedBlock(tx) block = pb.Block return nil }) return block } // CurrentBlock returns the latest block in the heaviest known blockchain. func (cs *ConsensusSet) CurrentBlock() (block types.Block) { // A call to a closed database can cause undefined behavior. err := cs.tg.Add() if err != nil { return types.Block{} } defer cs.tg.Done() // Block until a lock can be grabbed on the consensus set, indicating that // all modules have received the most recent block. The lock is held so that // there are no race conditions when trying to synchronize nodes. cs.mu.Lock() defer cs.mu.Unlock() _ = cs.db.View(func(tx *bolt.Tx) error { pb := currentProcessedBlock(tx) block = pb.Block return nil }) return block } // Flush will block until the consensus set has finished all in-progress // routines. func (cs *ConsensusSet) Flush() error { return cs.tg.Flush() } // Height returns the height of the consensus set. func (cs *ConsensusSet) Height() (height types.BlockHeight) { // A call to a closed database can cause undefined behavior. err := cs.tg.Add() if err != nil { return 0 } defer cs.tg.Done() // Block until a lock can be grabbed on the consensus set, indicating that // all modules have received the most recent block. The lock is held so that // there are no race conditions when trying to synchronize nodes. cs.mu.Lock() defer cs.mu.Unlock() _ = cs.db.View(func(tx *bolt.Tx) error { height = blockHeight(tx) return nil }) return height } // InCurrentPath returns true if the block presented is in the current path, // false otherwise. func (cs *ConsensusSet) InCurrentPath(id types.BlockID) (inPath bool) { // A call to a closed database can cause undefined behavior. err := cs.tg.Add() if err != nil { return false } defer cs.tg.Done() _ = cs.db.View(func(tx *bolt.Tx) error { pb, err := getBlockMap(tx, id) if err != nil { inPath = false return nil } pathID, err := getPath(tx, pb.Height) if err != nil { inPath = false return nil } inPath = pathID == id return nil }) return inPath } // MinimumValidChildTimestamp returns the earliest timestamp that the next block // can have in order for it to be considered valid. func (cs *ConsensusSet) MinimumValidChildTimestamp(id types.BlockID) (timestamp types.Timestamp, exists bool) { // A call to a closed database can cause undefined behavior. err := cs.tg.Add() if err != nil { return 0, false } defer cs.tg.Done() // Error is not checked because it does not matter. _ = cs.db.View(func(tx *bolt.Tx) error { pb, err := getBlockMap(tx, id) if err != nil { return err } timestamp = cs.blockRuleHelper.minimumValidChildTimestamp(tx.Bucket(BlockMap), pb) exists = true return nil }) return timestamp, exists } // StorageProofSegment returns the segment to be used in the storage proof for // a given file contract. func (cs *ConsensusSet) StorageProofSegment(fcid types.FileContractID) (index uint64, err error) { // A call to a closed database can cause undefined behavior. err = cs.tg.Add() if err != nil { return 0, err } defer cs.tg.Done() _ = cs.db.View(func(tx *bolt.Tx) error { index, err = storageProofSegment(tx, fcid) return nil }) return index, err } Sia-1.3.0/modules/consensus/consensusset_bench_test.go000066400000000000000000000011041313565667000232100ustar00rootroot00000000000000package consensus import ( "strconv" "testing" ) // BenchmarkCreateServerTester benchmarks creating a server tester from // scratch. The consensus package creates over 60 server testers (and // counting), and optimizations to the server tester creation process are // likely to generalize to the project as a whole. // // i7-4770, 1d60d69: 22.883 ms / op func BenchmarkCreateServerTester(b *testing.B) { for i := 0; i < b.N; i++ { cst, err := createConsensusSetTester(b.Name() + strconv.Itoa(b.N) + strconv.Itoa(i)) if err != nil { b.Fatal(err) } cst.Close() } } Sia-1.3.0/modules/consensus/consensusset_test.go000066400000000000000000000117641313565667000220660ustar00rootroot00000000000000package consensus import ( "path/filepath" "testing" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/modules/gateway" "github.com/NebulousLabs/Sia/modules/miner" "github.com/NebulousLabs/Sia/modules/transactionpool" "github.com/NebulousLabs/Sia/modules/wallet" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/fastrand" ) // A consensusSetTester is the helper object for consensus set testing, // including helper modules and methods for controlling synchronization between // the tester and the modules. type consensusSetTester struct { gateway modules.Gateway miner modules.TestMiner tpool modules.TransactionPool wallet modules.Wallet walletKey crypto.TwofishKey cs *ConsensusSet persistDir string } // randAddress returns a random address that is not spendable. func randAddress() (uh types.UnlockHash) { fastrand.Read(uh[:]) return } // addSiafunds makes a transaction that moves some testing genesis siafunds // into the wallet. func (cst *consensusSetTester) addSiafunds() { // Get an address to receive the siafunds. uc, err := cst.wallet.NextAddress() if err != nil { panic(err) } // Create the transaction that sends the anyone-can-spend siafund output to // the wallet address (output only available during testing). txn := types.Transaction{ SiafundInputs: []types.SiafundInput{{ ParentID: cst.cs.blockRoot.Block.Transactions[0].SiafundOutputID(2), UnlockConditions: types.UnlockConditions{}, }}, SiafundOutputs: []types.SiafundOutput{{ Value: types.NewCurrency64(1e3), UnlockHash: uc.UnlockHash(), }}, } // Mine the transaction into the blockchain. err = cst.tpool.AcceptTransactionSet([]types.Transaction{txn}) if err != nil { panic(err) } _, err = cst.miner.AddBlock() if err != nil { panic(err) } // Check that the siafunds made it to the wallet. _, siafundBalance, _ := cst.wallet.ConfirmedBalance() if !siafundBalance.Equals64(1e3) { panic("wallet does not have the siafunds") } } // mineCoins mines blocks until there are siacoins in the wallet. func (cst *consensusSetTester) mineSiacoins() { for i := types.BlockHeight(0); i <= types.MaturityDelay; i++ { b, _ := cst.miner.FindBlock() err := cst.cs.AcceptBlock(b) if err != nil { panic(err) } } } // blankConsensusSetTester creates a consensusSetTester that has only the // genesis block. func blankConsensusSetTester(name string) (*consensusSetTester, error) { testdir := build.TempDir(modules.ConsensusDir, name) // Create modules. g, err := gateway.New("localhost:0", false, filepath.Join(testdir, modules.GatewayDir)) if err != nil { return nil, err } cs, err := New(g, false, filepath.Join(testdir, modules.ConsensusDir)) if err != nil { return nil, err } tp, err := transactionpool.New(cs, g, filepath.Join(testdir, modules.ConsensusDir)) if err != nil { return nil, err } w, err := wallet.New(cs, tp, filepath.Join(testdir, modules.WalletDir)) if err != nil { return nil, err } key := crypto.GenerateTwofishKey() _, err = w.Encrypt(key) if err != nil { return nil, err } err = w.Unlock(key) if err != nil { return nil, err } m, err := miner.New(cs, tp, w, filepath.Join(testdir, modules.MinerDir)) if err != nil { return nil, err } // Assemble all objects into a consensusSetTester. cst := &consensusSetTester{ gateway: g, miner: m, tpool: tp, wallet: w, walletKey: key, cs: cs, persistDir: testdir, } return cst, nil } // createConsensusSetTester creates a consensusSetTester that's ready for use, // including siacoins and siafunds available in the wallet. func createConsensusSetTester(name string) (*consensusSetTester, error) { cst, err := blankConsensusSetTester(name) if err != nil { return nil, err } cst.addSiafunds() cst.mineSiacoins() return cst, nil } // Close safely closes the consensus set tester. Because there's not a good way // to errcheck when deferring a close, a panic is called in the event of an // error. func (cst *consensusSetTester) Close() error { errs := []error{ cst.cs.Close(), cst.gateway.Close(), cst.miner.Close(), } if err := build.JoinErrors(errs, "; "); err != nil { panic(err) } return nil } // TestNilInputs tries to create new consensus set modules using nil inputs. func TestNilInputs(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() testdir := build.TempDir(modules.ConsensusDir, t.Name()) _, err := New(nil, false, testdir) if err != errNilGateway { t.Fatal(err) } } // TestClosing tries to close a consenuss set. func TestDatabaseClosing(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() testdir := build.TempDir(modules.ConsensusDir, t.Name()) // Create the gateway. g, err := gateway.New("localhost:0", false, filepath.Join(testdir, modules.GatewayDir)) if err != nil { t.Fatal(err) } cs, err := New(g, false, testdir) if err != nil { t.Fatal(err) } err = cs.Close() if err != nil { t.Error(err) } } Sia-1.3.0/modules/consensus/consistency.go000066400000000000000000000237641313565667000206370ustar00rootroot00000000000000package consensus import ( "bytes" "errors" "fmt" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/fastrand" "github.com/NebulousLabs/bolt" ) // manageErr handles an error detected by the consistency checks. func manageErr(tx *bolt.Tx, err error) { markInconsistency(tx) if build.DEBUG { panic(err) } else { fmt.Println(err) } } // consensusChecksum grabs a checksum of the consensus set by pushing all of // the elements in sorted order into a merkle tree and taking the root. All // consensus sets with the same current block should have identical consensus // checksums. func consensusChecksum(tx *bolt.Tx) crypto.Hash { // Create a checksum tree. tree := crypto.NewTree() // For all of the constant buckets, push every key and every value. Buckets // are sorted in byte-order, therefore this operation is deterministic. consensusSetBuckets := []*bolt.Bucket{ tx.Bucket(BlockPath), tx.Bucket(SiacoinOutputs), tx.Bucket(FileContracts), tx.Bucket(SiafundOutputs), tx.Bucket(SiafundPool), } for i := range consensusSetBuckets { err := consensusSetBuckets[i].ForEach(func(k, v []byte) error { tree.Push(k) tree.Push(v) return nil }) if err != nil { manageErr(tx, err) } } // Iterate through all the buckets looking for buckets prefixed with // prefixDSCO or prefixFCEX. Buckets are presented in byte-sorted order by // name. err := tx.ForEach(func(name []byte, b *bolt.Bucket) error { // If the bucket is not a delayed siacoin output bucket or a file // contract expiration bucket, skip. if !bytes.HasPrefix(name, prefixDSCO) && !bytes.HasPrefix(name, prefixFCEX) { return nil } // The bucket is a prefixed bucket - add all elements to the tree. return b.ForEach(func(k, v []byte) error { tree.Push(k) tree.Push(v) return nil }) }) if err != nil { manageErr(tx, err) } return tree.Root() } // checkSiacoinCount checks that the number of siacoins countable within the // consensus set equal the expected number of siacoins for the block height. func checkSiacoinCount(tx *bolt.Tx) { // Iterate through all the buckets looking for the delayed siacoin output // buckets, and check that they are for the correct heights. var dscoSiacoins types.Currency err := tx.ForEach(func(name []byte, b *bolt.Bucket) error { // Check if the bucket is a delayed siacoin output bucket. if !bytes.HasPrefix(name, prefixDSCO) { return nil } // Sum up the delayed outputs in this bucket. err := b.ForEach(func(_, delayedOutput []byte) error { var sco types.SiacoinOutput err := encoding.Unmarshal(delayedOutput, &sco) if err != nil { manageErr(tx, err) } dscoSiacoins = dscoSiacoins.Add(sco.Value) return nil }) if err != nil { return err } return nil }) if err != nil { manageErr(tx, err) } // Add all of the siacoin outputs. var scoSiacoins types.Currency err = tx.Bucket(SiacoinOutputs).ForEach(func(_, scoBytes []byte) error { var sco types.SiacoinOutput err := encoding.Unmarshal(scoBytes, &sco) if err != nil { manageErr(tx, err) } scoSiacoins = scoSiacoins.Add(sco.Value) return nil }) if err != nil { manageErr(tx, err) } // Add all of the payouts from file contracts. var fcSiacoins types.Currency err = tx.Bucket(FileContracts).ForEach(func(_, fcBytes []byte) error { var fc types.FileContract err := encoding.Unmarshal(fcBytes, &fc) if err != nil { manageErr(tx, err) } var fcCoins types.Currency for _, output := range fc.ValidProofOutputs { fcCoins = fcCoins.Add(output.Value) } fcSiacoins = fcSiacoins.Add(fcCoins) return nil }) if err != nil { manageErr(tx, err) } // Add all of the siafund claims. var claimSiacoins types.Currency err = tx.Bucket(SiafundOutputs).ForEach(func(_, sfoBytes []byte) error { var sfo types.SiafundOutput err := encoding.Unmarshal(sfoBytes, &sfo) if err != nil { manageErr(tx, err) } coinsPerFund := getSiafundPool(tx).Sub(sfo.ClaimStart) claimCoins := coinsPerFund.Mul(sfo.Value).Div(types.SiafundCount) claimSiacoins = claimSiacoins.Add(claimCoins) return nil }) if err != nil { manageErr(tx, err) } expectedSiacoins := types.CalculateNumSiacoins(blockHeight(tx)) totalSiacoins := dscoSiacoins.Add(scoSiacoins).Add(fcSiacoins).Add(claimSiacoins) if !totalSiacoins.Equals(expectedSiacoins) { diagnostics := fmt.Sprintf("Wrong number of siacoins\nDsco: %v\nSco: %v\nFc: %v\nClaim: %v\n", dscoSiacoins, scoSiacoins, fcSiacoins, claimSiacoins) if totalSiacoins.Cmp(expectedSiacoins) < 0 { diagnostics += fmt.Sprintf("total: %v\nexpected: %v\n expected is bigger: %v", totalSiacoins, expectedSiacoins, expectedSiacoins.Sub(totalSiacoins)) } else { diagnostics += fmt.Sprintf("total: %v\nexpected: %v\n expected is bigger: %v", totalSiacoins, expectedSiacoins, totalSiacoins.Sub(expectedSiacoins)) } manageErr(tx, errors.New(diagnostics)) } } // checkSiafundCount checks that the number of siafunds countable within the // consensus set equal the expected number of siafunds for the block height. func checkSiafundCount(tx *bolt.Tx) { var total types.Currency err := tx.Bucket(SiafundOutputs).ForEach(func(_, siafundOutputBytes []byte) error { var sfo types.SiafundOutput err := encoding.Unmarshal(siafundOutputBytes, &sfo) if err != nil { manageErr(tx, err) } total = total.Add(sfo.Value) return nil }) if err != nil { manageErr(tx, err) } if !total.Equals(types.SiafundCount) { manageErr(tx, errors.New("wrong number of siafunds in the consensus set")) } } // checkDSCOs scans the sets of delayed siacoin outputs and checks for // consistency. func checkDSCOs(tx *bolt.Tx) { // Create a map to track which delayed siacoin output maps exist, and // another map to track which ids have appeared in the dsco set. dscoTracker := make(map[types.BlockHeight]struct{}) idMap := make(map[types.SiacoinOutputID]struct{}) // Iterate through all the buckets looking for the delayed siacoin output // buckets, and check that they are for the correct heights. err := tx.ForEach(func(name []byte, b *bolt.Bucket) error { // If the bucket is not a delayed siacoin output bucket or a file // contract expiration bucket, skip. if !bytes.HasPrefix(name, prefixDSCO) { return nil } // Add the bucket to the dscoTracker. var height types.BlockHeight err := encoding.Unmarshal(name[len(prefixDSCO):], &height) if err != nil { manageErr(tx, err) } _, exists := dscoTracker[height] if exists { return errors.New("repeat dsco map") } dscoTracker[height] = struct{}{} var total types.Currency err = b.ForEach(func(idBytes, delayedOutput []byte) error { // Check that the output id has not appeared in another dsco. var id types.SiacoinOutputID copy(id[:], idBytes) _, exists := idMap[id] if exists { return errors.New("repeat delayed siacoin output") } idMap[id] = struct{}{} // Sum the funds in the bucket. var sco types.SiacoinOutput err := encoding.Unmarshal(delayedOutput, &sco) if err != nil { manageErr(tx, err) } total = total.Add(sco.Value) return nil }) if err != nil { return err } // Check that the minimum value has been achieved - the coinbase from // an earlier block is guaranteed to be in the bucket. minimumValue := types.CalculateCoinbase(height - types.MaturityDelay) if total.Cmp(minimumValue) < 0 { return errors.New("total number of coins in the delayed output bucket is incorrect") } return nil }) if err != nil { manageErr(tx, err) } // Check that all of the correct heights are represented. currentHeight := blockHeight(tx) expectedBuckets := 0 for i := currentHeight + 1; i <= currentHeight+types.MaturityDelay; i++ { if i < types.MaturityDelay { continue } _, exists := dscoTracker[i] if !exists { manageErr(tx, errors.New("missing a dsco bucket")) } expectedBuckets++ } if len(dscoTracker) != expectedBuckets { manageErr(tx, errors.New("too many dsco buckets")) } } // checkRevertApply reverts the most recent block, checking to see that the // consensus set hash matches the hash obtained for the previous block. Then it // applies the block again and checks that the consensus set hash matches the // original consensus set hash. func (cs *ConsensusSet) checkRevertApply(tx *bolt.Tx) { current := currentProcessedBlock(tx) // Don't perform the check if this block is the genesis block. if current.Block.ID() == cs.blockRoot.Block.ID() { return } parent, err := getBlockMap(tx, current.Block.ParentID) if err != nil { manageErr(tx, err) } if current.Height != parent.Height+1 { manageErr(tx, errors.New("parent structure of a block is incorrect")) } _, _, err = cs.forkBlockchain(tx, parent) if err != nil { manageErr(tx, err) } if consensusChecksum(tx) != parent.ConsensusChecksum { manageErr(tx, errors.New("consensus checksum mismatch after reverting")) } _, _, err = cs.forkBlockchain(tx, current) if err != nil { manageErr(tx, err) } if consensusChecksum(tx) != current.ConsensusChecksum { manageErr(tx, errors.New("consensus checksum mismatch after re-applying")) } } // checkConsistency runs a series of checks to make sure that the consensus set // is consistent with some rules that should always be true. func (cs *ConsensusSet) checkConsistency(tx *bolt.Tx) { if cs.checkingConsistency { return } cs.checkingConsistency = true checkDSCOs(tx) checkSiacoinCount(tx) checkSiafundCount(tx) if build.DEBUG { cs.checkRevertApply(tx) } cs.checkingConsistency = false } // maybeCheckConsistency runs a consistency check with a small probability. // Useful for detecting database corruption in production without needing to go // through the extremely slow process of running a consistency check every // block. func (cs *ConsensusSet) maybeCheckConsistency(tx *bolt.Tx) { if fastrand.Intn(1000) == 0 { cs.checkConsistency(tx) } } // TODO: Check that every file contract has an expiration too, and that the // number of file contracts + the number of expirations is equal. Sia-1.3.0/modules/consensus/consistency_helpers_test.go000066400000000000000000000006451313565667000234110ustar00rootroot00000000000000package consensus import ( "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/bolt" ) // dbConsensusChecksum is a convenience function to call consensusChecksum // without a bolt.Tx. func (cs *ConsensusSet) dbConsensusChecksum() (checksum crypto.Hash) { err := cs.db.Update(func(tx *bolt.Tx) error { checksum = consensusChecksum(tx) return nil }) if err != nil { panic(err) } return checksum } Sia-1.3.0/modules/consensus/database.go000066400000000000000000000074141313565667000200340ustar00rootroot00000000000000package consensus // database.go contains functions to initialize the database and report // inconsistencies. All of the database-specific logic belongs here. import ( "errors" "fmt" "os" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/persist" "github.com/NebulousLabs/bolt" ) var ( errRepeatInsert = errors.New("attempting to add an already existing item to the consensus set") errNilBucket = errors.New("using a bucket that does not exist") errNilItem = errors.New("requested item does not exist") errDBInconsistent = errors.New("database guard indicates inconsistency within database") errNonEmptyBucket = errors.New("cannot remove a map with objects still in it") dbMetadata = persist.Metadata{ Header: "Consensus Set Database", Version: "0.5.0", } ) type ( // dbBucket represents a collection of key/value pairs inside the database. dbBucket interface { Get(key []byte) []byte } // dbTx represents a read-only transaction on the database that can be used // for retrieving values. dbTx interface { Bucket(name []byte) dbBucket } // boltTxWrapper wraps a bolt.Tx so that it matches the dbTx interface. The // wrap is necessary because bolt.Tx.Bucket() returns a fixed type // (bolt.Bucket), but we want it to return an interface (dbBucket). boltTxWrapper struct { tx *bolt.Tx } ) // Bucket returns the dbBucket associated with the given bucket name. func (b boltTxWrapper) Bucket(name []byte) dbBucket { return b.tx.Bucket(name) } // replaceDatabase backs up the existing database and creates a new one. func (cs *ConsensusSet) replaceDatabase(filename string) error { // Rename the existing database and create a new one. fmt.Println("Outdated consensus database... backing up and replacing") err := os.Rename(filename, filename+".bck") if err != nil { return errors.New("error while backing up consensus database: " + err.Error()) } // Try again to create a new database, this time without checking for an // outdated database error. cs.db, err = persist.OpenDatabase(dbMetadata, filename) if err != nil { return errors.New("error opening consensus database: " + err.Error()) } return nil } // openDB loads the set database and populates it with the necessary buckets func (cs *ConsensusSet) openDB(filename string) (err error) { cs.db, err = persist.OpenDatabase(dbMetadata, filename) if err == persist.ErrBadVersion { return cs.replaceDatabase(filename) } if err != nil { return errors.New("error opening consensus database: " + err.Error()) } return nil } // initDB is run if there is no existing consensus database, creating a // database with all the required buckets and sane initial values. func (cs *ConsensusSet) initDB(tx *bolt.Tx) error { // If the database has already been initialized, there is nothing to do. // Initialization can be detected by looking for the presense of the siafund // pool bucket. (legacy design chioce - ultimately probably not the best way // ot tell). if tx.Bucket(SiafundPool) != nil { return nil } // Create the compononents of the database. err := cs.createConsensusDB(tx) if err != nil { return err } err = cs.createChangeLog(tx) if err != nil { return err } // Place a 'false' in the consistency bucket to indicate that no // inconsistencies have been found. err = tx.Bucket(Consistency).Put(Consistency, encoding.Marshal(false)) if err != nil { return err } return nil } // markInconsistency flags the database to indicate that inconsistency has been // detected. func markInconsistency(tx *bolt.Tx) { // Place a 'true' in the consistency bucket to indicate that // inconsistencies have been found. err := tx.Bucket(Consistency).Put(Consistency, encoding.Marshal(true)) if build.DEBUG && err != nil { panic(err) } } Sia-1.3.0/modules/consensus/difficulty.go000066400000000000000000000231411313565667000204250ustar00rootroot00000000000000package consensus import ( "bytes" "encoding/binary" "math/big" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/bolt" "github.com/NebulousLabs/errors" ) // Errors returned by this file. var ( // errOakHardforkIncompatibility is the error returned if Oak initialization // cannot begin because the consensus database was not upgraded before the // hardfork height. errOakHardforkIncompatibility = errors.New("difficulty adjustment hardfork incompatibility detected") ) // difficulty.go defines the Oak difficulty adjustment algorithm. Past the // hardfork trigger height, it the algorithm that Sia uses to adjust the // difficulty. // // A running tally is maintained which keeps the total difficulty and total time // passed across all blocks. The total difficulty can be divided by the total // time to get a hashrate. The total is multiplied by 0.995 each block, to keep // exponential preference on recent blocks with a half life of about 24 hours. // This estimated hashrate is assumed to closely match the actual hashrate on // the network. // // There is a target block time. If the difficulty increases or decreases, the // total amount of time that has passed will be more or less than the target // amount of time passed for the current height. To counteract this, the target // block time for each block is adjusted based on how far away from the desired // total time passed the current total time passed is. If the total time passed // is too low, blocks are targeted to be slightly longer, which helps to correct // the network. And if the total time passed is too high, blocks are targeted to // be slightly shorter, to help correct the network. // // High variance in block times means that the corrective action should not be // very strong if the total time passed has only missed the target time passed // by a few hours. But if the total time passed is significantly off, the block // time corrections should be much stronger. The square of the total deviation // is used to figure out what the adjustment should be. At 10,000 seconds // variance (about 3 hours), blocks will be adjusted by 10 seconds each. At // 20,000 seconds, blocks will be adjusted by 40 seconds each, a 4x adjustment // for 2x the error. And at 40,000 seconds, blocks will be adjusted by 160 // seconds each, and so on. // // The total amount of blocktime adjustment is capped to 1/3 and 3x the target // blocktime, to prevent too much disruption on the network. If blocks are // actually coming out 3x as fast as intended, there will be a (temporary) // significant increase on the amount of strain on nodes to process blocks. And // at 1/3 the target blocktime, the total blockchain throughput will decrease // dramatically. // // Finally, one extra cap is applied to the difficulty adjustment - the // difficulty of finding a block is not allowed to change more than 0.4% every // block. This maps to a total possible difficulty change of 55x across 1008 // blocks. This clamp helps to prevent wild swings when the hashrate increases // or decreases rapidly on the network, and it also limits the amount of damange // that a malicious attacker can do if performing a difficulty raising attack. // childTargetOak sets the child target based on the total time delta and total // hashrate of the parent block. The deltas are known for the child block, // however we do not use the child block deltas because that would allow the // child block to influence the target of the following block, which makes abuse // easier in selfish mining scenarios. func (cs *ConsensusSet) childTargetOak(parentTotalTime int64, parentTotalTarget, currentTarget types.Target, parentHeight types.BlockHeight) types.Target { // Determine the detla of the current total time vs. the desired total time. expectedTime := types.BlockFrequency * parentHeight delta := int64(expectedTime) - parentTotalTime // Convert the delta in to a target block time. square := delta * delta if delta < 0 { // If the delta is negative, restore the negative value. square *= -1 } shift := square / 10e6 // 10e3 second delta leads to 10 second shift. targetBlockTime := int64(types.BlockFrequency) + shift // Clamp the block time to 1/3 and 3x the target block time. if targetBlockTime < int64(types.BlockFrequency)/3 { targetBlockTime = int64(types.BlockFrequency) / 3 } if targetBlockTime > int64(types.BlockFrequency)*3 { targetBlockTime = int64(types.BlockFrequency) * 3 } // Determine the hashrate using the total time and total target. Set a // minimum total time of 1 to prevent divide by zero and underflows. if parentTotalTime < 1 { parentTotalTime = 1 } visibleHashrate := parentTotalTarget.Difficulty().Div64(uint64(parentTotalTime)) // Hashes per second. if visibleHashrate.IsZero() { visibleHashrate = visibleHashrate.Add(types.NewCurrency64(1)) } // Determine the new target by multiplying the visible hashrate by the // target block time. Clamp it to a 0.4% difficulty adjustment. maxNewTarget := currentTarget.MulDifficulty(types.OakMaxRise) minNewTarget := currentTarget.MulDifficulty(types.OakMaxDrop) newTarget := types.RatToTarget(new(big.Rat).SetFrac(types.RootDepth.Int(), visibleHashrate.Mul64(uint64(targetBlockTime)).Big())) if newTarget.Cmp(maxNewTarget) < 0 { newTarget = maxNewTarget } if newTarget.Cmp(minNewTarget) > 0 { newTarget = minNewTarget } return newTarget } // getBlockTotals returns the block totals values that get stored in // storeBlockTotals. func (cs *ConsensusSet) getBlockTotals(tx *bolt.Tx, id types.BlockID) (totalTime int64, totalTarget types.Target) { totalsBytes := tx.Bucket(BucketOak).Get(id[:]) totalTime = int64(binary.LittleEndian.Uint64(totalsBytes[:8])) copy(totalTarget[:], totalsBytes[8:]) return } // storeBlockTotals computes the new total time and total target for the current // block and stores that new time in the database. It also returns the new // totals. func (cs *ConsensusSet) storeBlockTotals(tx *bolt.Tx, currentHeight types.BlockHeight, currentBlockID types.BlockID, prevTotalTime int64, parentTimestamp, currentTimestamp types.Timestamp, prevTotalTarget, targetOfCurrentBlock types.Target) (newTotalTime int64, newTotalTarget types.Target, err error) { // Reset the prevTotalTime to a delta of zero just before the hardfork. if currentHeight == types.OakHardforkBlock-1 { prevTotalTime = int64(types.BlockFrequency * currentHeight) } // For each value, first multiply by the decay, and then add in the new // delta. newTotalTime = (prevTotalTime * types.OakDecayNum / types.OakDecayDenom) + (int64(currentTimestamp) - int64(parentTimestamp)) newTotalTarget = prevTotalTarget.MulDifficulty(big.NewRat(types.OakDecayNum, types.OakDecayDenom)).AddDifficulties(targetOfCurrentBlock) // Store the new total time and total target in the database at the // appropriate id. bytes := make([]byte, 40) binary.LittleEndian.PutUint64(bytes[:8], uint64(newTotalTime)) copy(bytes[8:], newTotalTarget[:]) err = tx.Bucket(BucketOak).Put(currentBlockID[:], bytes) if err != nil { return 0, types.Target{}, errors.Extend(errors.New("unable to store total time values"), err) } return newTotalTime, newTotalTarget, nil } // initOak will initialize all of the oak difficulty adjustment related fields. // This is separate from the initialization process for compatibility reasons - // some databases will not have these fields at start, so it much be checked. // // After oak initialization is complete, a specific field in the oak bucket is // marked so that oak initialization can be skipped in the future. func (cs *ConsensusSet) initOak(tx *bolt.Tx) error { // Prep the oak bucket. bucketOak, err := tx.CreateBucketIfNotExists(BucketOak) if err != nil { return errors.Extend(errors.New("unable to create oak bucket"), err) } // Check whether the init field is set. if bytes.Equal(bucketOak.Get(FieldOakInit), ValueOakInit) { // The oak fields have been initialized, nothing to do. return nil } // If the current height is greater than the hardfork trigger date, return // an error and refuse to initialize. height := blockHeight(tx) if height > types.OakHardforkBlock { return errOakHardforkIncompatibility } // Store base values for the genesis block. totalTime, totalTarget, err := cs.storeBlockTotals(tx, 0, types.GenesisID, 0, types.GenesisTimestamp, types.GenesisTimestamp, types.RootDepth, types.RootTarget) if err != nil { return errors.Extend(errors.New("unable to store genesis block totals"), err) } // The Oak fields have not been initialized, scan through the consensus set // and set the fields for each block. parentTimestamp := types.GenesisTimestamp parentChildTarget := types.RootTarget for i := types.BlockHeight(1); i <= height; i++ { // Skip Genesis block // Fetch the processed block for the current block. id, err := getPath(tx, i) if err != nil { return errors.Extend(errors.New("unable to find block at height"), err) } pb, err := getBlockMap(tx, id) if err != nil { return errors.Extend(errors.New("unable to find block from id"), err) } // Calculate and store the new block totals. totalTime, totalTarget, err = cs.storeBlockTotals(tx, i, id, totalTime, parentTimestamp, pb.Block.Timestamp, totalTarget, parentChildTarget) if err != nil { return errors.Extend(errors.New("unable to store updated block totals"), err) } // Update the previous values. parentTimestamp = pb.Block.Timestamp parentChildTarget = pb.ChildTarget } // Tag the initialization field in the oak bucket, indicating that // initialization has completed. err = bucketOak.Put(FieldOakInit, ValueOakInit) if err != nil { return errors.Extend(errors.New("unable to put oak init confirmation into oak bucket"), err) } return nil } Sia-1.3.0/modules/consensus/difficulty_test.go000066400000000000000000000254731313565667000214760ustar00rootroot00000000000000package consensus import ( "bytes" "math/big" "testing" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/bolt" ) // TestChildTargetOak checks the childTargetOak function, espeically for edge // cases like overflows and underflows. func TestChildTargetOak(t *testing.T) { // NOTE: Test must not be run in parallel. if testing.Short() { t.SkipNow() } cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() cs := cst.cs // NOTE: Test must not be run in parallel. // // Set the constants to match the real-network constants, and then make sure // they are reset at the end of the test. oldFreq := types.BlockFrequency oldMaxRise := types.OakMaxRise oldMaxDrop := types.OakMaxDrop oldRootTarget := types.RootTarget types.BlockFrequency = 600 types.OakMaxRise = big.NewRat(1004, 1e3) types.OakMaxDrop = big.NewRat(1e3, 1004) types.RootTarget = types.Target{0, 0, 0, 1} defer func() { types.BlockFrequency = oldFreq types.OakMaxRise = oldMaxRise types.OakMaxDrop = oldMaxDrop types.RootTarget = oldRootTarget }() // Start with some values that are normal, resulting in no change in target. parentHeight := types.BlockHeight(100) parentTotalTime := int64(types.BlockFrequency * parentHeight) parentTotalTarget := types.RootTarget.MulDifficulty(big.NewRat(int64(parentHeight), 1)) parentTarget := types.RootTarget // newTarget should match the root target, as the hashrate and blocktime all // match the existing target - there should be no reason for adjustment. newTarget := cs.childTargetOak(parentTotalTime, parentTotalTarget, parentTarget, parentHeight) // New target should be barely moving. Some imprecision may cause slight // adjustments, but the total difference should be less than 0.01%. maxNewTarget := parentTarget.MulDifficulty(big.NewRat(10e3, 10001)) minNewTarget := parentTarget.MulDifficulty(big.NewRat(10001, 10e3)) if newTarget.Cmp(maxNewTarget) > 0 { t.Error("The target shifted too much for a constant hashrate") } if newTarget.Cmp(minNewTarget) < 0 { t.Error("The target shifted too much for a constant hashrate") } // Probe the target clamps and the block deltas by providing a correct // hashrate, but a parent total time that is very far in the future, which // means that blocks have been taking too long - this means that the target // block time should be decreased, the difficulty should go down (and target // up). parentHeight = types.BlockHeight(100) parentTotalTime = int64(types.BlockFrequency*parentHeight) + 500e6 // very large delta used to probe extremes parentTotalTarget = types.RootTarget.MulDifficulty(big.NewRat(int64(parentHeight), 1)) parentTarget = types.RootTarget // newTarget should be higher, representing reduced difficulty. It should be // as high as the adjustment clamp allows it to move. newTarget = cs.childTargetOak(parentTotalTime, parentTotalTarget, parentTarget, parentHeight) expectedTarget := parentTarget.MulDifficulty(types.OakMaxDrop) if newTarget.Cmp(expectedTarget) != 0 { t.Log(parentTarget) t.Log(expectedTarget) t.Log(newTarget) t.Error("target was not adjusted correctly when the block delta was put to an extreme") } // Check that the difficulty decreased from the parent. if newTarget.Difficulty().Cmp(parentTarget.Difficulty()) >= 0 { t.Log(newTarget.Difficulty()) t.Log(expectedTarget.Difficulty()) t.Error("difficulty has risen when we need the block time to be shorter") } // Use the same values as the previous check, but set the parent target so // it's within range (but above) the adjustment, so the clamp is not // triggered. parentHeight = types.BlockHeight(100) parentTotalTime = int64(types.BlockFrequency*parentHeight) + 500e6 parentTotalTarget = types.RootTarget.MulDifficulty(big.NewRat(int64(parentHeight), 1)) parentTarget = types.Target{0, 0, 97, 120} // New target should be higher, but the adjustment clamp should not have // kicked in. newTarget = cs.childTargetOak(parentTotalTime, parentTotalTarget, parentTarget, parentHeight) minNewTarget = parentTarget.MulDifficulty(types.OakMaxDrop) // Check that the difficulty of the new target decreased. if parentTarget.Difficulty().Cmp(newTarget.Difficulty()) <= 0 { t.Error("Difficulty did not decrease") } // Check that the difficulty decreased by less than the clamped amount. if minNewTarget.Difficulty().Cmp(newTarget.Difficulty()) >= 0 { t.Error("Difficulty decreased by too much - clamp should not be in effect for these values") } // A repeat of the second test, except that blocks are coming out too fast // instead of too slow, meaning we should see an increased difficulty and a // slower block time. parentHeight = types.BlockHeight(10e3) parentTotalTime = int64(100) parentTotalTarget = types.RootTarget.MulDifficulty(big.NewRat(int64(parentHeight), 1)) parentTarget = types.RootTarget // newTarget should be lower, representing increased difficulty. It should // be as high as the adjustment clamp allows it to move. newTarget = cs.childTargetOak(parentTotalTime, parentTotalTarget, parentTarget, parentHeight) expectedTarget = parentTarget.MulDifficulty(types.OakMaxRise) if newTarget.Cmp(expectedTarget) != 0 { t.Log(parentTarget) t.Log(expectedTarget) t.Log(newTarget) t.Error("target was not adjusted correctly when the block delta was put to an extreme") } // Check that the difficulty increased from the parent. if newTarget.Difficulty().Cmp(parentTarget.Difficulty()) <= 0 { t.Log(newTarget.Difficulty()) t.Log(expectedTarget.Difficulty()) t.Error("difficulty has dropped when we need the block time to be longer") } // Use the same values as the previous check, but set the parent target so // it's within range (but below) the adjustment, so the clamp is not // triggered. parentHeight = types.BlockHeight(10e3) parentTotalTime = int64(100) parentTotalTarget = types.RootTarget.MulDifficulty(big.NewRat(int64(parentHeight), 1)) parentTarget = types.Target{0, 0, 0, 0, 0, 0, 93, 70} // New target should be higher, but the adjustment clamp should not have // kicked in. newTarget = cs.childTargetOak(parentTotalTime, parentTotalTarget, parentTarget, parentHeight) minNewTarget = parentTarget.MulDifficulty(types.OakMaxRise) // Check that the difficulty of the new target decreased. if parentTarget.Difficulty().Cmp(newTarget.Difficulty()) >= 0 { t.Error("Difficulty did not increase") } // Check that the difficulty decreased by less than the clamped amount. if minNewTarget.Difficulty().Cmp(newTarget.Difficulty()) <= 0 { t.Error("Difficulty increased by too much - clamp should not be in effect for these values") } } // TestStoreBlockTotals checks features of the storeBlockTotals and // getBlockTotals code. func TestStoreBlockTotals(t *testing.T) { // NOTE: Test must not be run in parallel. if testing.Short() { t.SkipNow() } cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() cs := cst.cs // NOTE: Test must not be run in parallel. // // Set the constants to match the real-network constants, and then make sure // they are reset at the end of the test. oldFreq := types.BlockFrequency oldDecayNum := types.OakDecayNum oldDecayDenom := types.OakDecayDenom oldMaxRise := types.OakMaxRise oldMaxDrop := types.OakMaxDrop oldRootTarget := types.RootTarget types.BlockFrequency = 600 types.OakDecayNum = 995 types.OakDecayDenom = 1e3 types.OakMaxRise = big.NewRat(1004, 1e3) types.OakMaxDrop = big.NewRat(1e3, 1004) types.RootTarget = types.Target{0, 0, 0, 1} defer func() { types.BlockFrequency = oldFreq types.OakDecayNum = oldDecayNum types.OakDecayDenom = oldDecayDenom types.OakMaxRise = oldMaxRise types.OakMaxDrop = oldMaxDrop types.RootTarget = oldRootTarget }() // Check that as totals get stored over and over, the values getting // returned follow a decay. While storing repeatedly, check that the // getBlockTotals values match the values that were stored. err = cs.db.Update(func(tx *bolt.Tx) error { totalTime := int64(0) totalTarget := types.RootDepth var id types.BlockID parentTimestamp := types.Timestamp(0) currentTimestamp := types.Timestamp(0) currentTarget := types.RootTarget for i := types.BlockHeight(0); i < 8000; i++ { id[i/256] = byte(i % 256) parentTimestamp = currentTimestamp currentTimestamp += types.Timestamp(types.BlockFrequency) totalTime, totalTarget, err = cs.storeBlockTotals(tx, i, id, totalTime, parentTimestamp, currentTimestamp, totalTarget, currentTarget) if err != nil { return err } // Check that the fetched values match the stored values. getTime, getTarg := cs.getBlockTotals(tx, id) if getTime != totalTime || getTarg != totalTarget { t.Error("fetch failed - retrieving time and target did not work") } } // Do a final iteration, but keep the old totals. After 8000 iterations, // the totals should no longer be changing, yet they should be hundreds // of times larger than the original values. id[8001/256] = byte(8001 % 256) parentTimestamp = currentTimestamp currentTimestamp += types.Timestamp(types.BlockFrequency) newTotalTime, newTotalTarget, err := cs.storeBlockTotals(tx, 8001, id, totalTime, parentTimestamp, currentTimestamp, totalTarget, currentTarget) if err != nil { return err } if newTotalTime != totalTime || newTotalTarget.Difficulty().Cmp(totalTarget.Difficulty()) != 0 { t.Log(newTotalTime) t.Log(totalTime) t.Log(newTotalTarget) t.Log(totalTarget) t.Error("Total time and target did not seem to converge to a result") } if newTotalTime < int64(types.BlockFrequency)*199 { t.Error("decay seems to be happening too rapidly") } if newTotalTime > int64(types.BlockFrequency)*205 { t.Error("decay seems to be happening too slowly") } if newTotalTarget.Difficulty().Cmp(types.RootTarget.Difficulty().Mul64(199)) < 0 { t.Error("decay seems to be happening too rapidly") } if newTotalTarget.Difficulty().Cmp(types.RootTarget.Difficulty().Mul64(205)) > 0 { t.Error("decay seems to be happening too slowly") } return nil }) if err != nil { t.Fatal(err) } } // TestOakHardforkMechanic mines blocks until the oak hardfork kicks in, // verifying that nothing unusual happens, and that the difficulty adjustments // begin happening every block. func TestHardforkMechanic(t *testing.T) { if testing.Short() { t.SkipNow() } cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() // Mine blocks until the oak hardfork height, printing the current target at // each height. var prevTarg types.Target for i := types.BlockHeight(0); i < types.OakHardforkBlock*2; i++ { b, err := cst.miner.AddBlock() if err != nil { t.Fatal(err) } targ, _ := cst.cs.ChildTarget(b.ID()) if i > types.OakHardforkBlock && bytes.Compare(targ[:], prevTarg[:]) >= 0 { t.Error("target is not adjusting down during mining every block") } prevTarg = targ } } Sia-1.3.0/modules/consensus/diffs.go000066400000000000000000000224001313565667000173530ustar00rootroot00000000000000package consensus import ( "errors" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/bolt" ) var ( errApplySiafundPoolDiffMismatch = errors.New("committing a siafund pool diff with an invalid 'previous' field") errDiffsNotGenerated = errors.New("applying diff set before generating errors") errInvalidSuccessor = errors.New("generating diffs for a block that's an invalid successsor to the current block") errNegativePoolAdjustment = errors.New("committing a siafund pool diff with a negative adjustment") errNonApplySiafundPoolDiff = errors.New("committing a siafund pool diff that doesn't have the 'apply' direction") errRevertSiafundPoolDiffMismatch = errors.New("committing a siafund pool diff with an invalid 'adjusted' field") errWrongAppliedDiffSet = errors.New("applying a diff set that isn't the current block") errWrongRevertDiffSet = errors.New("reverting a diff set that isn't the current block") ) // commitDiffSetSanity performs a series of sanity checks before committing a // diff set. func commitDiffSetSanity(tx *bolt.Tx, pb *processedBlock, dir modules.DiffDirection) { // This function is purely sanity checks. if !build.DEBUG { return } // Diffs should have already been generated for this node. if !pb.DiffsGenerated { panic(errDiffsNotGenerated) } // Current node must be the input node's parent if applying, and // current node must be the input node if reverting. if dir == modules.DiffApply { parent, err := getBlockMap(tx, pb.Block.ParentID) if build.DEBUG && err != nil { panic(err) } if parent.Block.ID() != currentBlockID(tx) { panic(errWrongAppliedDiffSet) } } else { if pb.Block.ID() != currentBlockID(tx) { panic(errWrongRevertDiffSet) } } } // commitSiacoinOutputDiff applies or reverts a SiacoinOutputDiff. func commitSiacoinOutputDiff(tx *bolt.Tx, scod modules.SiacoinOutputDiff, dir modules.DiffDirection) { if scod.Direction == dir { addSiacoinOutput(tx, scod.ID, scod.SiacoinOutput) } else { removeSiacoinOutput(tx, scod.ID) } } // commitFileContractDiff applies or reverts a FileContractDiff. func commitFileContractDiff(tx *bolt.Tx, fcd modules.FileContractDiff, dir modules.DiffDirection) { if fcd.Direction == dir { addFileContract(tx, fcd.ID, fcd.FileContract) } else { removeFileContract(tx, fcd.ID) } } // commitSiafundOutputDiff applies or reverts a Siafund output diff. func commitSiafundOutputDiff(tx *bolt.Tx, sfod modules.SiafundOutputDiff, dir modules.DiffDirection) { if sfod.Direction == dir { addSiafundOutput(tx, sfod.ID, sfod.SiafundOutput) } else { removeSiafundOutput(tx, sfod.ID) } } // commitDelayedSiacoinOutputDiff applies or reverts a delayedSiacoinOutputDiff. func commitDelayedSiacoinOutputDiff(tx *bolt.Tx, dscod modules.DelayedSiacoinOutputDiff, dir modules.DiffDirection) { if dscod.Direction == dir { addDSCO(tx, dscod.MaturityHeight, dscod.ID, dscod.SiacoinOutput) } else { removeDSCO(tx, dscod.MaturityHeight, dscod.ID) } } // commitSiafundPoolDiff applies or reverts a SiafundPoolDiff. func commitSiafundPoolDiff(tx *bolt.Tx, sfpd modules.SiafundPoolDiff, dir modules.DiffDirection) { // Sanity check - siafund pool should only ever increase. if build.DEBUG { if sfpd.Adjusted.Cmp(sfpd.Previous) < 0 { panic(errNegativePoolAdjustment) } if sfpd.Direction != modules.DiffApply { panic(errNonApplySiafundPoolDiff) } } if dir == modules.DiffApply { // Sanity check - sfpd.Previous should equal the current siafund pool. if build.DEBUG && !getSiafundPool(tx).Equals(sfpd.Previous) { panic(errApplySiafundPoolDiffMismatch) } setSiafundPool(tx, sfpd.Adjusted) } else { // Sanity check - sfpd.Adjusted should equal the current siafund pool. if build.DEBUG && !getSiafundPool(tx).Equals(sfpd.Adjusted) { panic(errRevertSiafundPoolDiffMismatch) } setSiafundPool(tx, sfpd.Previous) } } // createUpcomingDelayeOutputdMaps creates the delayed siacoin output maps that // will be used when applying delayed siacoin outputs in the diff set. func createUpcomingDelayedOutputMaps(tx *bolt.Tx, pb *processedBlock, dir modules.DiffDirection) { if dir == modules.DiffApply { createDSCOBucket(tx, pb.Height+types.MaturityDelay) } else if pb.Height >= types.MaturityDelay { createDSCOBucket(tx, pb.Height) } } // commitNodeDiffs commits all of the diffs in a block node. func commitNodeDiffs(tx *bolt.Tx, pb *processedBlock, dir modules.DiffDirection) { if dir == modules.DiffApply { for _, scod := range pb.SiacoinOutputDiffs { commitSiacoinOutputDiff(tx, scod, dir) } for _, fcd := range pb.FileContractDiffs { commitFileContractDiff(tx, fcd, dir) } for _, sfod := range pb.SiafundOutputDiffs { commitSiafundOutputDiff(tx, sfod, dir) } for _, dscod := range pb.DelayedSiacoinOutputDiffs { commitDelayedSiacoinOutputDiff(tx, dscod, dir) } for _, sfpd := range pb.SiafundPoolDiffs { commitSiafundPoolDiff(tx, sfpd, dir) } } else { for i := len(pb.SiacoinOutputDiffs) - 1; i >= 0; i-- { commitSiacoinOutputDiff(tx, pb.SiacoinOutputDiffs[i], dir) } for i := len(pb.FileContractDiffs) - 1; i >= 0; i-- { commitFileContractDiff(tx, pb.FileContractDiffs[i], dir) } for i := len(pb.SiafundOutputDiffs) - 1; i >= 0; i-- { commitSiafundOutputDiff(tx, pb.SiafundOutputDiffs[i], dir) } for i := len(pb.DelayedSiacoinOutputDiffs) - 1; i >= 0; i-- { commitDelayedSiacoinOutputDiff(tx, pb.DelayedSiacoinOutputDiffs[i], dir) } for i := len(pb.SiafundPoolDiffs) - 1; i >= 0; i-- { commitSiafundPoolDiff(tx, pb.SiafundPoolDiffs[i], dir) } } } // deleteObsoleteDelayedOutputMaps deletes the delayed siacoin output maps that // are no longer in use. func deleteObsoleteDelayedOutputMaps(tx *bolt.Tx, pb *processedBlock, dir modules.DiffDirection) { // There are no outputs that mature in the first MaturityDelay blocks. if dir == modules.DiffApply && pb.Height >= types.MaturityDelay { deleteDSCOBucket(tx, pb.Height) } else if dir == modules.DiffRevert { deleteDSCOBucket(tx, pb.Height+types.MaturityDelay) } } // updateCurrentPath updates the current path after applying a diff set. func updateCurrentPath(tx *bolt.Tx, pb *processedBlock, dir modules.DiffDirection) { // Update the current path. if dir == modules.DiffApply { pushPath(tx, pb.Block.ID()) } else { popPath(tx) } } // commitDiffSet applies or reverts the diffs in a blockNode. func commitDiffSet(tx *bolt.Tx, pb *processedBlock, dir modules.DiffDirection) { // Sanity checks - there are a few so they were moved to another function. if build.DEBUG { commitDiffSetSanity(tx, pb, dir) } createUpcomingDelayedOutputMaps(tx, pb, dir) commitNodeDiffs(tx, pb, dir) deleteObsoleteDelayedOutputMaps(tx, pb, dir) updateCurrentPath(tx, pb, dir) } // generateAndApplyDiff will verify the block and then integrate it into the // consensus state. These two actions must happen at the same time because // transactions are allowed to depend on each other. We can't be sure that a // transaction is valid unless we have applied all of the previous transactions // in the block, which means we need to apply while we verify. func generateAndApplyDiff(tx *bolt.Tx, pb *processedBlock) error { // Sanity check - the block being applied should have the current block as // a parent. if build.DEBUG && pb.Block.ParentID != currentBlockID(tx) { panic(errInvalidSuccessor) } // Create the bucket to hold all of the delayed siacoin outputs created by // transactions this block. Needs to happen before any transactions are // applied. createDSCOBucket(tx, pb.Height+types.MaturityDelay) // Validate and apply each transaction in the block. They cannot be // validated all at once because some transactions may not be valid until // previous transactions have been applied. for _, txn := range pb.Block.Transactions { err := validTransaction(tx, txn) if err != nil { return err } applyTransaction(tx, pb, txn) } // After all of the transactions have been applied, 'maintenance' is // applied on the block. This includes adding any outputs that have reached // maturity, applying any contracts with missed storage proofs, and adding // the miner payouts to the list of delayed outputs. applyMaintenance(tx, pb) // DiffsGenerated are only set to true after the block has been fully // validated and integrated. This is required to prevent later blocks from // being accepted on top of an invalid block - if the consensus set ever // forks over an invalid block, 'DiffsGenerated' will be set to 'false', // requiring validation to occur again. when 'DiffsGenerated' is set to // true, validation is skipped, therefore the flag should only be set to // true on fully validated blocks. pb.DiffsGenerated = true // Add the block to the current path and block map. bid := pb.Block.ID() blockMap := tx.Bucket(BlockMap) updateCurrentPath(tx, pb, modules.DiffApply) // Sanity check preparation - set the consensus hash at this height so that // during reverting a check can be performed to assure consistency when // adding and removing blocks. Must happen after the block is added to the // path. if build.DEBUG { pb.ConsensusChecksum = consensusChecksum(tx) } return blockMap.Put(bid[:], encoding.Marshal(*pb)) } Sia-1.3.0/modules/consensus/diffs_test.go000066400000000000000000000474571313565667000204350ustar00rootroot00000000000000package consensus import ( "testing" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/bolt" ) // TestCommitDelayedSiacoinOutputDiffBadMaturity commits a delayed siacoin // output that has a bad maturity height and triggers a panic. func TestCommitDelayedSiacoinOutputDiffBadMaturity(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() // Trigger an inconsistency check. defer func() { r := recover() if r == nil { t.Error("expecting error after corrupting database") } }() // Commit a delayed siacoin output with maturity height = cs.height()+1 maturityHeight := cst.cs.dbBlockHeight() - 1 id := types.SiacoinOutputID{'1'} dsco := types.SiacoinOutput{Value: types.NewCurrency64(1)} dscod := modules.DelayedSiacoinOutputDiff{ Direction: modules.DiffApply, ID: id, SiacoinOutput: dsco, MaturityHeight: maturityHeight, } _ = cst.cs.db.Update(func(tx *bolt.Tx) error { commitDelayedSiacoinOutputDiff(tx, dscod, modules.DiffApply) return nil }) } // TestCommitNodeDiffs probes the commitNodeDiffs method of the consensus set. /* func TestCommitNodeDiffs(t *testing.T) { if testing.Short() { t.SkipNow() } cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() pb := cst.cs.dbCurrentProcessedBlock() _ = cst.cs.db.Update(func(tx *bolt.Tx) error { commitDiffSet(tx, pb, modules.DiffRevert) // pull the block node out of the consensus set. return nil }) // For diffs that can be destroyed in the same block they are created, // create diffs that do just that. This has in the past caused issues upon // rewinding. scoid := types.SiacoinOutputID{'1'} scod0 := modules.SiacoinOutputDiff{ Direction: modules.DiffApply, ID: scoid, } scod1 := modules.SiacoinOutputDiff{ Direction: modules.DiffRevert, ID: scoid, } fcid := types.FileContractID{'2'} fcd0 := modules.FileContractDiff{ Direction: modules.DiffApply, ID: fcid, } fcd1 := modules.FileContractDiff{ Direction: modules.DiffRevert, ID: fcid, } sfoid := types.SiafundOutputID{'3'} sfod0 := modules.SiafundOutputDiff{ Direction: modules.DiffApply, ID: sfoid, } sfod1 := modules.SiafundOutputDiff{ Direction: modules.DiffRevert, ID: sfoid, } dscoid := types.SiacoinOutputID{'4'} dscod := modules.DelayedSiacoinOutputDiff{ Direction: modules.DiffApply, ID: dscoid, MaturityHeight: cst.cs.dbBlockHeight() + types.MaturityDelay, } var siafundPool types.Currency err = cst.cs.db.Update(func(tx *bolt.Tx) error { siafundPool = getSiafundPool(tx) return nil }) if err != nil { panic(err) } sfpd := modules.SiafundPoolDiff{ Direction: modules.DiffApply, Previous: siafundPool, Adjusted: siafundPool.Add(types.NewCurrency64(1)), } pb.SiacoinOutputDiffs = append(pb.SiacoinOutputDiffs, scod0) pb.SiacoinOutputDiffs = append(pb.SiacoinOutputDiffs, scod1) pb.FileContractDiffs = append(pb.FileContractDiffs, fcd0) pb.FileContractDiffs = append(pb.FileContractDiffs, fcd1) pb.SiafundOutputDiffs = append(pb.SiafundOutputDiffs, sfod0) pb.SiafundOutputDiffs = append(pb.SiafundOutputDiffs, sfod1) pb.DelayedSiacoinOutputDiffs = append(pb.DelayedSiacoinOutputDiffs, dscod) pb.SiafundPoolDiffs = append(pb.SiafundPoolDiffs, sfpd) _ = cst.cs.db.Update(func(tx *bolt.Tx) error { createUpcomingDelayedOutputMaps(tx, pb, modules.DiffApply) return nil }) _ = cst.cs.db.Update(func(tx *bolt.Tx) error { commitNodeDiffs(tx, pb, modules.DiffApply) return nil }) exists := cst.cs.db.inSiacoinOutputs(scoid) if exists { t.Error("intradependent outputs not treated correctly") } exists = cst.cs.db.inFileContracts(fcid) if exists { t.Error("intradependent outputs not treated correctly") } exists = cst.cs.db.inSiafundOutputs(sfoid) if exists { t.Error("intradependent outputs not treated correctly") } _ = cst.cs.db.Update(func(tx *bolt.Tx) error { commitNodeDiffs(tx, pb, modules.DiffRevert) return nil }) exists = cst.cs.db.inSiacoinOutputs(scoid) if exists { t.Error("intradependent outputs not treated correctly") } exists = cst.cs.db.inFileContracts(fcid) if exists { t.Error("intradependent outputs not treated correctly") } exists = cst.cs.db.inSiafundOutputs(sfoid) if exists { t.Error("intradependent outputs not treated correctly") } } */ /* // TestSiacoinOutputDiff applies and reverts a siacoin output diff, then // triggers an inconsistency panic. func TestCommitSiacoinOutputDiff(t *testing.T) { if testing.Short() { t.SkipNow() } cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() // Commit a siacoin output diff. initialScosLen := cst.cs.db.lenSiacoinOutputs() id := types.SiacoinOutputID{'1'} sco := types.SiacoinOutput{Value: types.NewCurrency64(1)} scod := modules.SiacoinOutputDiff{ Direction: modules.DiffApply, ID: id, SiacoinOutput: sco, } cst.cs.commitSiacoinOutputDiff(scod, modules.DiffApply) if cst.cs.db.lenSiacoinOutputs() != initialScosLen+1 { t.Error("siacoin output diff set did not increase in size") } if cst.cs.db.getSiacoinOutputs(id).Value.Cmp(sco.Value) != 0 { t.Error("wrong siacoin output value after committing a diff") } // Rewind the diff. cst.cs.commitSiacoinOutputDiff(scod, modules.DiffRevert) if cst.cs.db.lenSiacoinOutputs() != initialScosLen { t.Error("siacoin output diff set did not increase in size") } exists := cst.cs.db.inSiacoinOutputs(id) if exists { t.Error("siacoin output was not reverted") } // Restore the diff and then apply the inverse diff. cst.cs.commitSiacoinOutputDiff(scod, modules.DiffApply) scod.Direction = modules.DiffRevert cst.cs.commitSiacoinOutputDiff(scod, modules.DiffApply) if cst.cs.db.lenSiacoinOutputs() != initialScosLen { t.Error("siacoin output diff set did not increase in size") } exists = cst.cs.db.inSiacoinOutputs(id) if exists { t.Error("siacoin output was not reverted") } // Revert the inverse diff. cst.cs.commitSiacoinOutputDiff(scod, modules.DiffRevert) if cst.cs.db.lenSiacoinOutputs() != initialScosLen+1 { t.Error("siacoin output diff set did not increase in size") } if cst.cs.db.getSiacoinOutputs(id).Value.Cmp(sco.Value) != 0 { t.Error("wrong siacoin output value after committing a diff") } // Trigger an inconsistency check. defer func() { r := recover() if r != errBadCommitSiacoinOutputDiff { t.Error("expecting errBadCommitSiacoinOutputDiff, got", r) } }() // Try reverting a revert diff that was already reverted. (add an object // that already exists) cst.cs.commitSiacoinOutputDiff(scod, modules.DiffRevert) } */ /* // TestCommitFileContracttDiff applies and reverts a file contract diff, then // triggers an inconsistency panic. func TestCommitFileContractDiff(t *testing.T) { if testing.Short() { t.SkipNow() } cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } // Commit a file contract diff. initialFcsLen := cst.cs.db.lenFileContracts() id := types.FileContractID{'1'} fc := types.FileContract{Payout: types.NewCurrency64(1)} fcd := modules.FileContractDiff{ Direction: modules.DiffApply, ID: id, FileContract: fc, } cst.cs.commitFileContractDiff(fcd, modules.DiffApply) if cst.cs.db.lenFileContracts() != initialFcsLen+1 { t.Error("siacoin output diff set did not increase in size") } if cst.cs.db.getFileContracts(id).Payout.Cmp(fc.Payout) != 0 { t.Error("wrong siacoin output value after committing a diff") } // Rewind the diff. cst.cs.commitFileContractDiff(fcd, modules.DiffRevert) if cst.cs.db.lenFileContracts() != initialFcsLen { t.Error("siacoin output diff set did not increase in size") } exists := cst.cs.db.inFileContracts(id) if exists { t.Error("siacoin output was not reverted") } // Restore the diff and then apply the inverse diff. cst.cs.commitFileContractDiff(fcd, modules.DiffApply) fcd.Direction = modules.DiffRevert cst.cs.commitFileContractDiff(fcd, modules.DiffApply) if cst.cs.db.lenFileContracts() != initialFcsLen { t.Error("siacoin output diff set did not increase in size") } exists = cst.cs.db.inFileContracts(id) if exists { t.Error("siacoin output was not reverted") } // Revert the inverse diff. cst.cs.commitFileContractDiff(fcd, modules.DiffRevert) if cst.cs.db.lenFileContracts() != initialFcsLen+1 { t.Error("siacoin output diff set did not increase in size") } if cst.cs.db.getFileContracts(id).Payout.Cmp(fc.Payout) != 0 { t.Error("wrong siacoin output value after committing a diff") } // Trigger an inconsistency check. defer func() { r := recover() if r != errBadCommitFileContractDiff { t.Error("expecting errBadCommitFileContractDiff, got", r) } }() // Try reverting an apply diff that was already reverted. (remove an object // that was already removed) fcd.Direction = modules.DiffApply // Object currently exists, but make the direction 'apply'. cst.cs.commitFileContractDiff(fcd, modules.DiffRevert) // revert the application. cst.cs.commitFileContractDiff(fcd, modules.DiffRevert) // revert the application again, in error. } */ // TestSiafundOutputDiff applies and reverts a siafund output diff, then // triggers an inconsistency panic. /* func TestCommitSiafundOutputDiff(t *testing.T) { if testing.Short() { t.SkipNow() } cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } // Commit a siafund output diff. initialScosLen := cst.cs.db.lenSiafundOutputs() id := types.SiafundOutputID{'1'} sfo := types.SiafundOutput{Value: types.NewCurrency64(1)} sfod := modules.SiafundOutputDiff{ Direction: modules.DiffApply, ID: id, SiafundOutput: sfo, } cst.cs.commitSiafundOutputDiff(sfod, modules.DiffApply) if cst.cs.db.lenSiafundOutputs() != initialScosLen+1 { t.Error("siafund output diff set did not increase in size") } sfo1 := cst.cs.db.getSiafundOutputs(id) if sfo1.Value.Cmp(sfo.Value) != 0 { t.Error("wrong siafund output value after committing a diff") } // Rewind the diff. cst.cs.commitSiafundOutputDiff(sfod, modules.DiffRevert) if cst.cs.db.lenSiafundOutputs() != initialScosLen { t.Error("siafund output diff set did not increase in size") } exists := cst.cs.db.inSiafundOutputs(id) if exists { t.Error("siafund output was not reverted") } // Restore the diff and then apply the inverse diff. cst.cs.commitSiafundOutputDiff(sfod, modules.DiffApply) sfod.Direction = modules.DiffRevert cst.cs.commitSiafundOutputDiff(sfod, modules.DiffApply) if cst.cs.db.lenSiafundOutputs() != initialScosLen { t.Error("siafund output diff set did not increase in size") } exists = cst.cs.db.inSiafundOutputs(id) if exists { t.Error("siafund output was not reverted") } // Revert the inverse diff. cst.cs.commitSiafundOutputDiff(sfod, modules.DiffRevert) if cst.cs.db.lenSiafundOutputs() != initialScosLen+1 { t.Error("siafund output diff set did not increase in size") } sfo2 := cst.cs.db.getSiafundOutputs(id) if sfo2.Value.Cmp(sfo.Value) != 0 { t.Error("wrong siafund output value after committing a diff") } // Trigger an inconsistency check. defer func() { r := recover() if r != errBadCommitSiafundOutputDiff { t.Error("expecting errBadCommitSiafundOutputDiff, got", r) } }() // Try applying a revert diff that was already applied. (remove an object // that was already removed) cst.cs.commitSiafundOutputDiff(sfod, modules.DiffApply) // Remove the object. cst.cs.commitSiafundOutputDiff(sfod, modules.DiffApply) // Remove the object again. } */ // TestCommitDelayedSiacoinOutputDiff probes the commitDelayedSiacoinOutputDiff // method of the consensus set. /* func TestCommitDelayedSiacoinOutputDiff(t *testing.T) { t.Skip("test isn't working, but checks the wrong code anyway") if testing.Short() { t.Skip() } cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } // Commit a delayed siacoin output with maturity height = cs.height()+1 maturityHeight := cst.cs.height() + 1 initialDscosLen := cst.cs.db.lenDelayedSiacoinOutputsHeight(maturityHeight) id := types.SiacoinOutputID{'1'} dsco := types.SiacoinOutput{Value: types.NewCurrency64(1)} dscod := modules.DelayedSiacoinOutputDiff{ Direction: modules.DiffApply, ID: id, SiacoinOutput: dsco, MaturityHeight: maturityHeight, } cst.cs.commitDelayedSiacoinOutputDiff(dscod, modules.DiffApply) if cst.cs.db.lenDelayedSiacoinOutputsHeight(maturityHeight) != initialDscosLen+1 { t.Fatal("delayed output diff set did not increase in size") } if cst.cs.db.getDelayedSiacoinOutputs(maturityHeight, id).Value.Cmp(dsco.Value) != 0 { t.Error("wrong delayed siacoin output value after committing a diff") } // Rewind the diff. cst.cs.commitDelayedSiacoinOutputDiff(dscod, modules.DiffRevert) if cst.cs.db.lenDelayedSiacoinOutputsHeight(maturityHeight) != initialDscosLen { t.Error("siacoin output diff set did not increase in size") } exists := cst.cs.db.inDelayedSiacoinOutputsHeight(maturityHeight, id) if exists { t.Error("siacoin output was not reverted") } // Restore the diff and then apply the inverse diff. cst.cs.commitDelayedSiacoinOutputDiff(dscod, modules.DiffApply) dscod.Direction = modules.DiffRevert cst.cs.commitDelayedSiacoinOutputDiff(dscod, modules.DiffApply) if cst.cs.db.lenDelayedSiacoinOutputsHeight(maturityHeight) != initialDscosLen { t.Error("siacoin output diff set did not increase in size") } exists = cst.cs.db.inDelayedSiacoinOutputsHeight(maturityHeight, id) if exists { t.Error("siacoin output was not reverted") } // Revert the inverse diff. cst.cs.commitDelayedSiacoinOutputDiff(dscod, modules.DiffRevert) if cst.cs.db.lenDelayedSiacoinOutputsHeight(maturityHeight) != initialDscosLen+1 { t.Error("siacoin output diff set did not increase in size") } if cst.cs.db.getDelayedSiacoinOutputs(maturityHeight, id).Value.Cmp(dsco.Value) != 0 { t.Error("wrong siacoin output value after committing a diff") } // Trigger an inconsistency check. defer func() { r := recover() if r != errBadCommitDelayedSiacoinOutputDiff { t.Error("expecting errBadCommitDelayedSiacoinOutputDiff, got", r) } }() // Try applying an apply diff that was already applied. (add an object // that already exists) dscod.Direction = modules.DiffApply // set the direction to apply cst.cs.commitDelayedSiacoinOutputDiff(dscod, modules.DiffApply) // apply an already existing delayed output. } */ /* // TestCommitSiafundPoolDiff probes the commitSiafundPoolDiff method of the // consensus set. func TestCommitSiafundPoolDiff(t *testing.T) { if testing.Short() { t.SkipNow() } cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } // Apply two siafund pool diffs, and then a diff with 0 change. Then revert // them all. initial := cst.cs.siafundPool adjusted1 := initial.Add(types.NewCurrency64(200)) adjusted2 := adjusted1.Add(types.NewCurrency64(500)) adjusted3 := adjusted2.Add(types.NewCurrency64(0)) sfpd1 := modules.SiafundPoolDiff{ Direction: modules.DiffApply, Previous: initial, Adjusted: adjusted1, } sfpd2 := modules.SiafundPoolDiff{ Direction: modules.DiffApply, Previous: adjusted1, Adjusted: adjusted2, } sfpd3 := modules.SiafundPoolDiff{ Direction: modules.DiffApply, Previous: adjusted2, Adjusted: adjusted3, } cst.cs.commitSiafundPoolDiff(sfpd1, modules.DiffApply) if cst.cs.siafundPool.Cmp(adjusted1) != 0 { t.Error("siafund pool was not adjusted correctly") } cst.cs.commitSiafundPoolDiff(sfpd2, modules.DiffApply) if cst.cs.siafundPool.Cmp(adjusted2) != 0 { t.Error("second siafund pool adjustment was flawed") } cst.cs.commitSiafundPoolDiff(sfpd3, modules.DiffApply) if cst.cs.siafundPool.Cmp(adjusted3) != 0 { t.Error("second siafund pool adjustment was flawed") } cst.cs.commitSiafundPoolDiff(sfpd3, modules.DiffRevert) if cst.cs.siafundPool.Cmp(adjusted2) != 0 { t.Error("reverting second adjustment was flawed") } cst.cs.commitSiafundPoolDiff(sfpd2, modules.DiffRevert) if cst.cs.siafundPool.Cmp(adjusted1) != 0 { t.Error("reverting second adjustment was flawed") } cst.cs.commitSiafundPoolDiff(sfpd1, modules.DiffRevert) if cst.cs.siafundPool.Cmp(initial) != 0 { t.Error("reverting first adjustment was flawed") } // Do a chaining set of panics. First apply a negative pool adjustment, // then revert the pool diffs in the wrong order, than apply the pool diffs // in the wrong order. defer func() { r := recover() if r != errApplySiafundPoolDiffMismatch { t.Error("expecting errApplySiafundPoolDiffMismatch, got", r) } }() defer func() { r := recover() if r != errRevertSiafundPoolDiffMismatch { t.Error("expecting errRevertSiafundPoolDiffMismatch, got", r) } cst.cs.commitSiafundPoolDiff(sfpd1, modules.DiffApply) }() defer func() { r := recover() if r != errNonApplySiafundPoolDiff { t.Error(r) } cst.cs.commitSiafundPoolDiff(sfpd1, modules.DiffRevert) }() defer func() { r := recover() if r != errNegativePoolAdjustment { t.Error("expecting errNegativePoolAdjustment, got", r) } sfpd2.Direction = modules.DiffRevert cst.cs.commitSiafundPoolDiff(sfpd2, modules.DiffApply) }() cst.cs.commitSiafundPoolDiff(sfpd1, modules.DiffApply) cst.cs.commitSiafundPoolDiff(sfpd2, modules.DiffApply) negativeAdjustment := adjusted2.Sub(types.NewCurrency64(100)) negativeSfpd := modules.SiafundPoolDiff{ Previous: adjusted3, Adjusted: negativeAdjustment, } cst.cs.commitSiafundPoolDiff(negativeSfpd, modules.DiffApply) } */ /* // TestDeleteObsoleteDelayedOutputMapsSanity probes the sanity checks of the // deleteObsoleteDelayedOutputMaps method of the consensus set. func TestDeleteObsoleteDelayedOutputMapsSanity(t *testing.T) { if testing.Short() { t.SkipNow() } cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } pb := cst.cs.currentProcessedBlock() err = cst.cs.db.Update(func(tx *bolt.Tx) error { return commitDiffSet(tx, pb, modules.DiffRevert) }) if err != nil { t.Fatal(err) } defer func() { r := recover() if r == nil { t.Error("expecting an error after corrupting the database") } }() defer func() { r := recover() if r == nil { t.Error("expecting an error after corrupting the database") } // Trigger a panic by deleting a map with outputs in it during revert. err = cst.cs.db.Update(func(tx *bolt.Tx) error { return createUpcomingDelayedOutputMaps(tx, pb, modules.DiffApply) }) if err != nil { t.Fatal(err) } err = cst.cs.db.Update(func(tx *bolt.Tx) error { return commitNodeDiffs(tx, pb, modules.DiffApply) }) if err != nil { t.Fatal(err) } err = cst.cs.db.Update(func(tx *bolt.Tx) error { return deleteObsoleteDelayedOutputMaps(tx, pb, modules.DiffRevert) }) if err != nil { t.Fatal(err) } }() // Trigger a panic by deleting a map with outputs in it during apply. err = cst.cs.db.Update(func(tx *bolt.Tx) error { return deleteObsoleteDelayedOutputMaps(tx, pb, modules.DiffApply) }) if err != nil { t.Fatal(err) } } */ /* // TestGenerateAndApplyDiffSanity triggers the sanity checks in the // generateAndApplyDiff method of the consensus set. func TestGenerateAndApplyDiffSanity(t *testing.T) { if testing.Short() { t.SkipNow() } cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } pb := cst.cs.currentProcessedBlock() cst.cs.commitDiffSet(pb, modules.DiffRevert) defer func() { r := recover() if r != errRegenerateDiffs { t.Error("expected errRegenerateDiffs, got", r) } }() defer func() { r := recover() if r != errInvalidSuccessor { t.Error("expected errInvalidSuccessor, got", r) } // Trigger errRegenerteDiffs _ = cst.cs.generateAndApplyDiff(pb) }() // Trigger errInvalidSuccessor parent := cst.cs.db.getBlockMap(pb.Parent) parent.DiffsGenerated = false _ = cst.cs.generateAndApplyDiff(parent) } */ Sia-1.3.0/modules/consensus/fork.go000066400000000000000000000076271313565667000172370ustar00rootroot00000000000000package consensus import ( "errors" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/bolt" ) var ( errExternalRevert = errors.New("cannot revert to block outside of current path") ) // backtrackToCurrentPath traces backwards from 'pb' until it reaches a block // in the ConsensusSet's current path (the "common parent"). It returns the // (inclusive) set of blocks between the common parent and 'pb', starting from // the former. func backtrackToCurrentPath(tx *bolt.Tx, pb *processedBlock) []*processedBlock { path := []*processedBlock{pb} for { // Error is not checked in production code - an error can only indicate // that pb.Height > blockHeight(tx). currentPathID, err := getPath(tx, pb.Height) if currentPathID == pb.Block.ID() { break } // Sanity check - an error should only indicate that pb.Height > // blockHeight(tx). if build.DEBUG && err != nil && pb.Height <= blockHeight(tx) { panic(err) } // Prepend the next block to the list of blocks leading from the // current path to the input block. pb, err = getBlockMap(tx, pb.Block.ParentID) if build.DEBUG && err != nil { panic(err) } path = append([]*processedBlock{pb}, path...) } return path } // revertToBlock will revert blocks from the ConsensusSet's current path until // 'pb' is the current block. Blocks are returned in the order that they were // reverted. 'pb' is not reverted. func (cs *ConsensusSet) revertToBlock(tx *bolt.Tx, pb *processedBlock) (revertedBlocks []*processedBlock) { // Sanity check - make sure that pb is in the current path. currentPathID, err := getPath(tx, pb.Height) if build.DEBUG && (err != nil || currentPathID != pb.Block.ID()) { panic(errExternalRevert) } // Rewind blocks until 'pb' is the current block. for currentBlockID(tx) != pb.Block.ID() { block := currentProcessedBlock(tx) commitDiffSet(tx, block, modules.DiffRevert) revertedBlocks = append(revertedBlocks, block) // Sanity check - after removing a block, check that the consensus set // has maintained consistency. if build.Release == "testing" { cs.checkConsistency(tx) } else { cs.maybeCheckConsistency(tx) } } return revertedBlocks } // applyUntilBlock will successively apply the blocks between the consensus // set's current path and 'pb'. func (cs *ConsensusSet) applyUntilBlock(tx *bolt.Tx, pb *processedBlock) (appliedBlocks []*processedBlock, err error) { // Backtrack to the common parent of 'bn' and current path and then apply the new blocks. newPath := backtrackToCurrentPath(tx, pb) for _, block := range newPath[1:] { // If the diffs for this block have already been generated, apply diffs // directly instead of generating them. This is much faster. if block.DiffsGenerated { commitDiffSet(tx, block, modules.DiffApply) } else { err := generateAndApplyDiff(tx, block) if err != nil { // Mark the block as invalid. cs.dosBlocks[block.Block.ID()] = struct{}{} return nil, err } } appliedBlocks = append(appliedBlocks, block) // Sanity check - after applying a block, check that the consensus set // has maintained consistency. if build.Release == "testing" { cs.checkConsistency(tx) } else { cs.maybeCheckConsistency(tx) } } return appliedBlocks, nil } // forkBlockchain will move the consensus set onto the 'newBlock' fork. An // error will be returned if any of the blocks applied in the transition are // found to be invalid. forkBlockchain is atomic; the ConsensusSet is only // updated if the function returns nil. func (cs *ConsensusSet) forkBlockchain(tx *bolt.Tx, newBlock *processedBlock) (revertedBlocks, appliedBlocks []*processedBlock, err error) { commonParent := backtrackToCurrentPath(tx, newBlock)[0] revertedBlocks = cs.revertToBlock(tx, commonParent) appliedBlocks, err = cs.applyUntilBlock(tx, newBlock) if err != nil { return nil, nil, err } return revertedBlocks, appliedBlocks, nil } Sia-1.3.0/modules/consensus/fork_helpers_test.go000066400000000000000000000021161313565667000220040ustar00rootroot00000000000000package consensus import ( "github.com/NebulousLabs/bolt" ) // dbBacktrackToCurrentPath is a convenience function to call // backtrackToCurrentPath without a bolt.Tx. func (cs *ConsensusSet) dbBacktrackToCurrentPath(pb *processedBlock) (pbs []*processedBlock) { _ = cs.db.Update(func(tx *bolt.Tx) error { pbs = backtrackToCurrentPath(tx, pb) return nil }) return pbs } // dbRevertToNode is a convenience function to call revertToBlock without a // bolt.Tx. func (cs *ConsensusSet) dbRevertToNode(pb *processedBlock) (pbs []*processedBlock) { _ = cs.db.Update(func(tx *bolt.Tx) error { pbs = cs.revertToBlock(tx, pb) return nil }) return pbs } // dbForkBlockchain is a convenience function to call forkBlockchain without a // bolt.Tx. func (cs *ConsensusSet) dbForkBlockchain(pb *processedBlock) (revertedBlocks, appliedBlocks []*processedBlock, err error) { updateErr := cs.db.Update(func(tx *bolt.Tx) error { revertedBlocks, appliedBlocks, err = cs.forkBlockchain(tx, pb) return nil }) if updateErr != nil { panic(updateErr) } return revertedBlocks, appliedBlocks, err } Sia-1.3.0/modules/consensus/fork_test.go000066400000000000000000000051511313565667000202640ustar00rootroot00000000000000package consensus import ( "testing" "github.com/NebulousLabs/Sia/modules" ) // TestBacktrackToCurrentPath probes the backtrackToCurrentPath method of the // consensus set. func TestBacktrackToCurrentPath(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() pb := cst.cs.dbCurrentProcessedBlock() // Backtrack from the current node to the blockchain. nodes := cst.cs.dbBacktrackToCurrentPath(pb) if len(nodes) != 1 { t.Fatal("backtracking to the current node gave incorrect result") } if nodes[0].Block.ID() != pb.Block.ID() { t.Error("backtrack returned the wrong node") } // Backtrack from a node that has diverted from the current blockchain. child0, _ := cst.miner.FindBlock() child1, _ := cst.miner.FindBlock() // Is the block not on hte current path. err = cst.cs.AcceptBlock(child0) if err != nil { t.Fatal(err) } err = cst.cs.AcceptBlock(child1) if err != modules.ErrNonExtendingBlock { t.Fatal(err) } pb, err = cst.cs.dbGetBlockMap(child1.ID()) if err != nil { t.Fatal(err) } nodes = cst.cs.dbBacktrackToCurrentPath(pb) if len(nodes) != 2 { t.Error("backtracking grabbed wrong number of nodes") } parent, err := cst.cs.dbGetBlockMap(pb.Block.ParentID) if err != nil { t.Fatal(err) } if nodes[0].Block.ID() != parent.Block.ID() { t.Error("grabbed the wrong block as the common block") } if nodes[1].Block.ID() != pb.Block.ID() { t.Error("backtracked from the wrong node") } } // TestRevertToNode probes the revertToBlock method of the consensus set. func TestRevertToNode(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() pb := cst.cs.dbCurrentProcessedBlock() // Revert to a grandparent and verify the returned array is correct. parent, err := cst.cs.dbGetBlockMap(pb.Block.ParentID) if err != nil { t.Fatal(err) } grandParent, err := cst.cs.dbGetBlockMap(parent.Block.ParentID) if err != nil { t.Fatal(err) } revertedNodes := cst.cs.dbRevertToNode(grandParent) if len(revertedNodes) != 2 { t.Error("wrong number of nodes reverted") } if revertedNodes[0].Block.ID() != pb.Block.ID() { t.Error("wrong composition of reverted nodes") } if revertedNodes[1].Block.ID() != parent.Block.ID() { t.Error("wrong composition of reverted nodes") } // Trigger a panic by trying to revert to a node outside of the current // path. defer func() { r := recover() if r != errExternalRevert { t.Error(r) } }() cst.cs.dbRevertToNode(pb) } Sia-1.3.0/modules/consensus/maintenance.go000066400000000000000000000145311313565667000205500ustar00rootroot00000000000000package consensus import ( "errors" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/bolt" ) var ( errMissingFileContract = errors.New("storage proof submitted for non existing file contract") errOutputAlreadyMature = errors.New("delayed siacoin output is already in the matured outputs set") errPayoutsAlreadyPaid = errors.New("payouts are already in the consensus set") errStorageProofTiming = errors.New("missed proof triggered for file contract that is not expiring") ) // applyMinerPayouts adds a block's miner payouts to the consensus set as // delayed siacoin outputs. func applyMinerPayouts(tx *bolt.Tx, pb *processedBlock) { for i := range pb.Block.MinerPayouts { mpid := pb.Block.MinerPayoutID(uint64(i)) dscod := modules.DelayedSiacoinOutputDiff{ Direction: modules.DiffApply, ID: mpid, SiacoinOutput: pb.Block.MinerPayouts[i], MaturityHeight: pb.Height + types.MaturityDelay, } pb.DelayedSiacoinOutputDiffs = append(pb.DelayedSiacoinOutputDiffs, dscod) commitDelayedSiacoinOutputDiff(tx, dscod, modules.DiffApply) } } // applyMaturedSiacoinOutputs goes through the list of siacoin outputs that // have matured and adds them to the consensus set. This also updates the block // node diff set. func applyMaturedSiacoinOutputs(tx *bolt.Tx, pb *processedBlock) { // Skip this step if the blockchain is not old enough to have maturing // outputs. if pb.Height < types.MaturityDelay { return } // Iterate through the list of delayed siacoin outputs. Sometimes boltdb // has trouble if you delete elements in a bucket while iterating through // the bucket (and sometimes not - nondeterministic), so all of the // elements are collected into an array and then deleted after the bucket // scan is complete. bucketID := append(prefixDSCO, encoding.Marshal(pb.Height)...) var scods []modules.SiacoinOutputDiff var dscods []modules.DelayedSiacoinOutputDiff dbErr := tx.Bucket(bucketID).ForEach(func(idBytes, scoBytes []byte) error { // Decode the key-value pair into an id and a siacoin output. var id types.SiacoinOutputID var sco types.SiacoinOutput copy(id[:], idBytes) encErr := encoding.Unmarshal(scoBytes, &sco) if build.DEBUG && encErr != nil { panic(encErr) } // Sanity check - the output should not already be in siacoinOuptuts. if build.DEBUG && isSiacoinOutput(tx, id) { panic(errOutputAlreadyMature) } // Add the output to the ConsensusSet and record the diff in the // blockNode. scod := modules.SiacoinOutputDiff{ Direction: modules.DiffApply, ID: id, SiacoinOutput: sco, } scods = append(scods, scod) // Create the dscod and add it to the list of dscods that should be // deleted. dscod := modules.DelayedSiacoinOutputDiff{ Direction: modules.DiffRevert, ID: id, SiacoinOutput: sco, MaturityHeight: pb.Height, } dscods = append(dscods, dscod) return nil }) if build.DEBUG && dbErr != nil { panic(dbErr) } for _, scod := range scods { pb.SiacoinOutputDiffs = append(pb.SiacoinOutputDiffs, scod) commitSiacoinOutputDiff(tx, scod, modules.DiffApply) } for _, dscod := range dscods { pb.DelayedSiacoinOutputDiffs = append(pb.DelayedSiacoinOutputDiffs, dscod) commitDelayedSiacoinOutputDiff(tx, dscod, modules.DiffApply) } deleteDSCOBucket(tx, pb.Height) } // applyMissedStorageProof adds the outputs and diffs that result from a file // contract expiring. func applyMissedStorageProof(tx *bolt.Tx, pb *processedBlock, fcid types.FileContractID) (dscods []modules.DelayedSiacoinOutputDiff, fcd modules.FileContractDiff) { // Sanity checks. fc, err := getFileContract(tx, fcid) if build.DEBUG && err != nil { panic(err) } if build.DEBUG { // Check that the file contract in question expires at pb.Height. if fc.WindowEnd != pb.Height { panic(errStorageProofTiming) } } // Add all of the outputs in the missed proof outputs to the consensus set. for i, mpo := range fc.MissedProofOutputs { // Sanity check - output should not already exist. spoid := fcid.StorageProofOutputID(types.ProofMissed, uint64(i)) if build.DEBUG && isSiacoinOutput(tx, spoid) { panic(errPayoutsAlreadyPaid) } // Don't add the output if the value is zero. dscod := modules.DelayedSiacoinOutputDiff{ Direction: modules.DiffApply, ID: spoid, SiacoinOutput: mpo, MaturityHeight: pb.Height + types.MaturityDelay, } dscods = append(dscods, dscod) } // Remove the file contract from the consensus set and record the diff in // the blockNode. fcd = modules.FileContractDiff{ Direction: modules.DiffRevert, ID: fcid, FileContract: fc, } return dscods, fcd } // applyFileContractMaintenance looks for all of the file contracts that have // expired without an appropriate storage proof, and calls 'applyMissedProof' // for the file contract. func applyFileContractMaintenance(tx *bolt.Tx, pb *processedBlock) { // Get the bucket pointing to all of the expiring file contracts. fceBucketID := append(prefixFCEX, encoding.Marshal(pb.Height)...) fceBucket := tx.Bucket(fceBucketID) // Finish if there are no expiring file contracts. if fceBucket == nil { return } var dscods []modules.DelayedSiacoinOutputDiff var fcds []modules.FileContractDiff err := fceBucket.ForEach(func(keyBytes, valBytes []byte) error { var id types.FileContractID copy(id[:], keyBytes) amspDSCODS, fcd := applyMissedStorageProof(tx, pb, id) fcds = append(fcds, fcd) dscods = append(dscods, amspDSCODS...) return nil }) if build.DEBUG && err != nil { panic(err) } for _, dscod := range dscods { pb.DelayedSiacoinOutputDiffs = append(pb.DelayedSiacoinOutputDiffs, dscod) commitDelayedSiacoinOutputDiff(tx, dscod, modules.DiffApply) } for _, fcd := range fcds { pb.FileContractDiffs = append(pb.FileContractDiffs, fcd) commitFileContractDiff(tx, fcd, modules.DiffApply) } err = tx.DeleteBucket(fceBucketID) if build.DEBUG && err != nil { panic(err) } } // applyMaintenance applies block-level alterations to the consensus set. // Maintenance is applied after all of the transactions for the block have been // applied. func applyMaintenance(tx *bolt.Tx, pb *processedBlock) { applyMinerPayouts(tx, pb) applyMaturedSiacoinOutputs(tx, pb) applyFileContractMaintenance(tx, pb) } Sia-1.3.0/modules/consensus/maintenance_test.go000066400000000000000000000213211313565667000216020ustar00rootroot00000000000000package consensus /* import ( "testing" "github.com/NebulousLabs/bolt" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) // TestApplyMinerPayouts probes the applyMinerPayouts method of the consensus // set. func TestApplyMinerPayouts(t *testing.T) { if testing.Short() { t.SkipNow() } cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.closeCst() // Create a block node with a single miner payout. pb := new(processedBlock) pb.Height = cst.cs.dbBlockHeight() pb.Block.Timestamp = 2 // MinerPayout id is determined by block id + index; add uniqueness to the block id. pb.Block.MinerPayouts = append(pb.Block.MinerPayouts, types.SiacoinOutput{Value: types.NewCurrency64(12)}) mpid0 := pb.Block.MinerPayoutID(0) // Apply the single miner payout. _ = cst.cs.db.Update(func(tx *bolt.Tx) error { applyMinerPayouts(tx, pb) return nil }) exists := cst.cs.db.inDelayedSiacoinOutputsHeight(cst.cs.dbBlockHeight()+types.MaturityDelay, mpid0) if !exists { t.Error("miner payout was not created in the delayed outputs set") } dsco, err := cst.cs.dbGetDSCO(cst.cs.dbBlockHeight()+types.MaturityDelay, mpid0) if err != nil { t.Fatal(err) } if dsco.Value.Cmp64(12) != 0 { t.Error("miner payout created with wrong currency value") } exists = cst.cs.db.inSiacoinOutputs(mpid0) if exists { t.Error("miner payout was added to the siacoin output set") } if cst.cs.db.lenDelayedSiacoinOutputsHeight(cst.cs.dbBlockHeight()+types.MaturityDelay) != 2 { // 1 for consensus set creation, 1 for the output that just got added. t.Error("wrong number of delayed siacoin outputs in consensus set") } if len(pb.DelayedSiacoinOutputDiffs) != 1 { t.Fatal("block node did not get the delayed siacoin output diff") } if pb.DelayedSiacoinOutputDiffs[0].Direction != modules.DiffApply { t.Error("delayed siacoin output diff has the wrong direction") } if pb.DelayedSiacoinOutputDiffs[0].ID != mpid0 { t.Error("delayed siacoin output diff has wrong id") } // Apply a processed block with two miner payouts. pb2 := new(processedBlock) pb2.Height = cst.cs.dbBlockHeight() pb2.Block.Timestamp = 5 // MinerPayout id is determined by block id + index; add uniqueness to the block id. pb2.Block.MinerPayouts = []types.SiacoinOutput{ {Value: types.NewCurrency64(5)}, {Value: types.NewCurrency64(10)}, } mpid1 := pb2.Block.MinerPayoutID(0) mpid2 := pb2.Block.MinerPayoutID(1) _ = cst.cs.db.Update(func(tx *bolt.Tx) error { applyMinerPayouts(tx, pb2) return nil }) exists = cst.cs.db.inDelayedSiacoinOutputsHeight(cst.cs.dbBlockHeight()+types.MaturityDelay, mpid1) if !exists { t.Error("delayed siacoin output was not created") } exists = cst.cs.db.inDelayedSiacoinOutputsHeight(cst.cs.dbBlockHeight()+types.MaturityDelay, mpid2) if !exists { t.Error("delayed siacoin output was not created") } if len(pb2.DelayedSiacoinOutputDiffs) != 2 { t.Error("block node should have 2 delayed outputs") } // Trigger a panic where the miner payouts have already been applied. defer func() { r := recover() if r == nil { t.Error("expecting error after corrupting database") } }() defer func() { r := recover() if r == nil { t.Error("expecting error after corrupting database") } cst.cs.db.rmDelayedSiacoinOutputsHeight(pb.Height+types.MaturityDelay, mpid0) cst.cs.db.addSiacoinOutputs(mpid0, types.SiacoinOutput{}) _ = cst.cs.db.Update(func(tx *bolt.Tx) error { applyMinerPayouts(tx, pb) return nil }) }() _ = cst.cs.db.Update(func(tx *bolt.Tx) error { applyMinerPayouts(tx, pb) return nil }) } // TestApplyMaturedSiacoinOutputs probes the applyMaturedSiacoinOutputs method // of the consensus set. func TestApplyMaturedSiacoinOutputs(t *testing.T) { if testing.Short() { t.SkipNow() } cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.closeCst() pb := cst.cs.dbCurrentProcessedBlock() // Trigger the sanity check concerning already-matured outputs. defer func() { r := recover() if r != errOutputAlreadyMature { t.Error(r) } }() cst.cs.db.addSiacoinOutputs(types.SiacoinOutputID{}, types.SiacoinOutput{}) _ = cst.cs.db.Update(func(tx *bolt.Tx) error { createDSCOBucket(tx, pb.Height) return nil }) cst.cs.db.addDelayedSiacoinOutputsHeight(pb.Height, types.SiacoinOutputID{}, types.SiacoinOutput{}) _ = cst.cs.db.Update(func(tx *bolt.Tx) error { applyMaturedSiacoinOutputs(tx, pb) return nil }) } // TestApplyMissedStorageProof probes the applyMissedStorageProof method of the // consensus set. func TestApplyMissedStorageProof(t *testing.T) { if testing.Short() { t.SkipNow() } cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.closeCst() // Create a block node. pb := new(processedBlock) pb.Height = cst.cs.height() // Create a file contract that's expiring and has 1 missed proof output. expiringFC := types.FileContract{ Payout: types.NewCurrency64(300e3), WindowEnd: pb.Height, MissedProofOutputs: []types.SiacoinOutput{{Value: types.NewCurrency64(290e3)}}, } // Assign the contract a 0-id. cst.cs.db.addFileContracts(types.FileContractID{}, expiringFC) cst.cs.db.addFCExpirations(pb.Height) cst.cs.db.addFCExpirationsHeight(pb.Height, types.FileContractID{}) cst.cs.applyMissedStorageProof(pb, types.FileContractID{}) exists := cst.cs.db.inFileContracts(types.FileContractID{}) if exists { t.Error("file contract was not consumed in missed storage proof") } spoid := types.FileContractID{}.StorageProofOutputID(types.ProofMissed, 0) exists = cst.cs.db.inDelayedSiacoinOutputsHeight(pb.Height+types.MaturityDelay, spoid) if !exists { t.Error("missed proof output was never created") } exists = cst.cs.db.inSiacoinOutputs(spoid) if exists { t.Error("storage proof output made it into the siacoin output set") } exists = cst.cs.db.inFileContracts(types.FileContractID{}) if exists { t.Error("file contract remains after expiration") } // Trigger the debug panics. // not exist. defer func() { r := recover() if r != errNilItem { t.Error(r) } }() defer func() { r := recover() if r != errNilItem { t.Error(r) } // Trigger errMissingFileContract cst.cs.applyMissedStorageProof(pb, types.FileContractID(spoid)) }() defer func() { r := recover() if r != errNilItem { t.Error(r) } // Trigger errStorageProofTiming expiringFC.WindowEnd = 0 cst.cs.applyMissedStorageProof(pb, types.FileContractID{}) }() defer func() { r := recover() if r != errNilItem { t.Error(r) } // Trigger errPayoutsAlreadyPaid from siacoin outputs. cst.cs.db.rmDelayedSiacoinOutputsHeight(pb.Height+types.MaturityDelay, spoid) cst.cs.db.addSiacoinOutputs(spoid, types.SiacoinOutput{}) cst.cs.applyMissedStorageProof(pb, types.FileContractID{}) }() // Trigger errPayoutsAlreadyPaid from delayed outputs. cst.cs.db.rmFileContracts(types.FileContractID{}) cst.cs.db.addFileContracts(types.FileContractID{}, expiringFC) cst.cs.db.addDelayedSiacoinOutputsHeight(pb.Height+types.MaturityDelay, spoid, types.SiacoinOutput{}) cst.cs.applyMissedStorageProof(pb, types.FileContractID{}) } */ // TestApplyFileContractMaintenance probes the applyFileContractMaintenance // method of the consensus set. /* func TestApplyFileContractMaintenance(t *testing.T) { if testing.Short() { t.SkipNow() } cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.closeCst() // Create a block node. pb := new(processedBlock) pb.Height = cst.cs.height() // Create a file contract that's expiring and has 1 missed proof output. expiringFC := types.FileContract{ Payout: types.NewCurrency64(300e3), WindowEnd: pb.Height, MissedProofOutputs: []types.SiacoinOutput{{Value: types.NewCurrency64(290e3)}}, } // Assign the contract a 0-id. cst.cs.db.addFileContracts(types.FileContractID{}, expiringFC) cst.cs.db.addFCExpirations(pb.Height) cst.cs.db.addFCExpirationsHeight(pb.Height, types.FileContractID{}) err = cst.cs.db.Update(func(tx *bolt.Tx) error { applyFileContractMaintenance(tx, pb) return nil }) if err != nil { t.Fatal(err) } exists := cst.cs.db.inFileContracts(types.FileContractID{}) if exists { t.Error("file contract was not consumed in missed storage proof") } spoid := types.FileContractID{}.StorageProofOutputID(types.ProofMissed, 0) exists = cst.cs.db.inDelayedSiacoinOutputsHeight(pb.Height+types.MaturityDelay, spoid) if !exists { t.Error("missed proof output was never created") } exists = cst.cs.db.inSiacoinOutputs(spoid) if exists { t.Error("storage proof output made it into the siacoin output set") } exists = cst.cs.db.inFileContracts(types.FileContractID{}) if exists { t.Error("file contract remains after expiration") } } */ Sia-1.3.0/modules/consensus/persist.go000066400000000000000000000053221313565667000177550ustar00rootroot00000000000000package consensus import ( "errors" "fmt" "os" "path/filepath" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/persist" "github.com/NebulousLabs/bolt" ) const ( // DatabaseFilename contains the filename of the database that will be used // when managing consensus. DatabaseFilename = modules.ConsensusDir + ".db" logFile = modules.ConsensusDir + ".log" ) // loadDB pulls all the blocks that have been saved to disk into memory, using // them to fill out the ConsensusSet. func (cs *ConsensusSet) loadDB() error { // Open the database - a new bolt database will be created if none exists. err := cs.openDB(filepath.Join(cs.persistDir, DatabaseFilename)) if err != nil { return err } // Walk through initialization for Sia. return cs.db.Update(func(tx *bolt.Tx) error { // Check if the database has been initialized. err = cs.initDB(tx) if err != nil { return err } // Check the initialization of the oak difficulty adjustment fields, and // create them if they do not exist. This is separate from 'initDB' // because older consensus databases will have completed the 'initDB' // process but will not have the oak difficulty adjustment fields, so a // scan will be needed to add and update them. err = cs.initOak(tx) if err != nil { return err } // Check that the genesis block is correct - typically only incorrect // in the event of developer binaries vs. release binaires. genesisID, err := getPath(tx, 0) if build.DEBUG && err != nil { panic(err) } if genesisID != cs.blockRoot.Block.ID() { return errors.New("Blockchain has wrong genesis block, exiting.") } return nil }) } // initPersist initializes the persistence structures of the consensus set, in // particular loading the database and preparing to manage subscribers. func (cs *ConsensusSet) initPersist() error { // Create the consensus directory. err := os.MkdirAll(cs.persistDir, 0700) if err != nil { return err } // Initialize the logger. cs.log, err = persist.NewFileLogger(filepath.Join(cs.persistDir, logFile)) if err != nil { return err } // Set up closing the logger. cs.tg.AfterStop(func() { err := cs.log.Close() if err != nil { // State of the logger is unknown, a println will suffice. fmt.Println("Error shutting down consensus set logger:", err) } }) // Try to load an existing database from disk - a new one will be created // if one does not exist. err = cs.loadDB() if err != nil { return err } // Set up the closing of the database. cs.tg.AfterStop(func() { err := cs.db.Close() if err != nil { cs.log.Println("ERROR: Unable to close consensus set database at shutdown:", err) } }) return nil } Sia-1.3.0/modules/consensus/persist_test.go000066400000000000000000000021131313565667000210070ustar00rootroot00000000000000package consensus import ( "path/filepath" "testing" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/modules/gateway" ) // TestSaveLoad populates a blockchain, saves it, loads it, and checks // the consensus set hash before and after func TestSaveLoad(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() cst.testBlockSuite() oldHash := cst.cs.dbConsensusChecksum() cst.cs.Close() // Reassigning this will lose subscribers and such, but we // just want to call load and get a hash g, err := gateway.New("localhost:0", false, build.TempDir(modules.ConsensusDir, t.Name(), modules.GatewayDir)) if err != nil { t.Fatal(err) } d := filepath.Join(build.SiaTestingDir, modules.ConsensusDir, t.Name(), modules.ConsensusDir) cst.cs, err = New(g, false, d) if err != nil { t.Fatal(err) } newHash := cst.cs.dbConsensusChecksum() if oldHash != newHash { t.Fatal("consensus set hash changed after load") } } Sia-1.3.0/modules/consensus/processedblock.go000066400000000000000000000140271313565667000212700ustar00rootroot00000000000000package consensus import ( "math/big" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/bolt" ) // SurpassThreshold is a percentage that dictates how much heavier a competing // chain has to be before the node will switch to mining on that chain. This is // not a consensus rule. This percentage is only applied to the most recent // block, not the entire chain; see blockNode.heavierThan. // // If no threshold were in place, it would be possible to manipulate a block's // timestamp to produce a sufficiently heavier block. var SurpassThreshold = big.NewRat(20, 100) // processedBlock is a copy/rename of blockNode, with the pointers to // other blockNodes replaced with block ID's, and all the fields // exported, so that a block node can be marshalled type processedBlock struct { Block types.Block Height types.BlockHeight Depth types.Target ChildTarget types.Target DiffsGenerated bool SiacoinOutputDiffs []modules.SiacoinOutputDiff FileContractDiffs []modules.FileContractDiff SiafundOutputDiffs []modules.SiafundOutputDiff DelayedSiacoinOutputDiffs []modules.DelayedSiacoinOutputDiff SiafundPoolDiffs []modules.SiafundPoolDiff ConsensusChecksum crypto.Hash } // heavierThan returns true if the blockNode is sufficiently heavier than // 'cmp'. 'cmp' is expected to be the current block node. "Sufficient" means // that the weight of 'bn' exceeds the weight of 'cmp' by: // (the target of 'cmp' * 'Surpass Threshold') func (pb *processedBlock) heavierThan(cmp *processedBlock) bool { requirement := cmp.Depth.AddDifficulties(cmp.ChildTarget.MulDifficulty(SurpassThreshold)) return requirement.Cmp(pb.Depth) > 0 // Inversed, because the smaller target is actually heavier. } // childDepth returns the depth of a blockNode's child nodes. The depth is the // "sum" of the current depth and current difficulty. See target.Add for more // detailed information. func (pb *processedBlock) childDepth() types.Target { return pb.Depth.AddDifficulties(pb.ChildTarget) } // targetAdjustmentBase returns the magnitude that the target should be // adjusted by before a clamp is applied. func (cs *ConsensusSet) targetAdjustmentBase(blockMap *bolt.Bucket, pb *processedBlock) *big.Rat { // Grab the block that was generated 'TargetWindow' blocks prior to the // parent. If there are not 'TargetWindow' blocks yet, stop at the genesis // block. var windowSize types.BlockHeight parent := pb.Block.ParentID current := pb.Block.ID() for windowSize = 0; windowSize < types.TargetWindow && parent != (types.BlockID{}); windowSize++ { current = parent copy(parent[:], blockMap.Get(parent[:])[:32]) } timestamp := types.Timestamp(encoding.DecUint64(blockMap.Get(current[:])[40:48])) // The target of a child is determined by the amount of time that has // passed between the generation of its immediate parent and its // TargetWindow'th parent. The expected amount of seconds to have passed is // TargetWindow*BlockFrequency. The target is adjusted in proportion to how // time has passed vs. the expected amount of time to have passed. // // The target is converted to a big.Rat to provide infinite precision // during the calculation. The big.Rat is just the int representation of a // target. timePassed := pb.Block.Timestamp - timestamp expectedTimePassed := types.BlockFrequency * windowSize return big.NewRat(int64(timePassed), int64(expectedTimePassed)) } // clampTargetAdjustment returns a clamped version of the base adjustment // value. The clamp keeps the maximum adjustment to ~7x every 2000 blocks. This // ensures that raising and lowering the difficulty requires a minimum amount // of total work, which prevents certain classes of difficulty adjusting // attacks. func clampTargetAdjustment(base *big.Rat) *big.Rat { if base.Cmp(types.MaxAdjustmentUp) > 0 { return types.MaxAdjustmentUp } else if base.Cmp(types.MaxAdjustmentDown) < 0 { return types.MaxAdjustmentDown } return base } // setChildTarget computes the target of a blockNode's child. All children of a node // have the same target. func (cs *ConsensusSet) setChildTarget(blockMap *bolt.Bucket, pb *processedBlock) { // Fetch the parent block. var parent processedBlock parentBytes := blockMap.Get(pb.Block.ParentID[:]) err := encoding.Unmarshal(parentBytes, &parent) if build.DEBUG && err != nil { panic(err) } if pb.Height%(types.TargetWindow/2) != 0 { pb.ChildTarget = parent.ChildTarget return } adjustment := clampTargetAdjustment(cs.targetAdjustmentBase(blockMap, pb)) adjustedRatTarget := new(big.Rat).Mul(parent.ChildTarget.Rat(), adjustment) pb.ChildTarget = types.RatToTarget(adjustedRatTarget) } // newChild creates a blockNode from a block and adds it to the parent's set of // children. The new node is also returned. It necessarily modifies the database func (cs *ConsensusSet) newChild(tx *bolt.Tx, pb *processedBlock, b types.Block) *processedBlock { // Create the child node. childID := b.ID() child := &processedBlock{ Block: b, Height: pb.Height + 1, Depth: pb.childDepth(), } // Push the total values for this block into the oak difficulty adjustment // bucket. The previous totals are required to compute the new totals. prevTotalTime, prevTotalTarget := cs.getBlockTotals(tx, b.ParentID) _, _, err := cs.storeBlockTotals(tx, child.Height, childID, prevTotalTime, pb.Block.Timestamp, b.Timestamp, prevTotalTarget, pb.ChildTarget) if build.DEBUG && err != nil { panic(err) } // Use the difficulty adjustment algorithm to set the target of the child // block and put the new processed block into the database. blockMap := tx.Bucket(BlockMap) if pb.Height < types.OakHardforkBlock { cs.setChildTarget(blockMap, child) } else { child.ChildTarget = cs.childTargetOak(prevTotalTime, prevTotalTarget, pb.ChildTarget, pb.Height) } err = blockMap.Put(childID[:], encoding.Marshal(*child)) if build.DEBUG && err != nil { panic(err) } return child } Sia-1.3.0/modules/consensus/processedblock_test.go000066400000000000000000000256331313565667000223340ustar00rootroot00000000000000package consensus import ( "path/filepath" "testing" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/modules/gateway" "github.com/NebulousLabs/Sia/modules/miner" "github.com/NebulousLabs/Sia/modules/transactionpool" "github.com/NebulousLabs/Sia/modules/wallet" "github.com/NebulousLabs/Sia/types" ) // TestIntegrationMinimumValidChildTimestamp probes the // MinimumValidChildTimestamp method of the consensus type. func TestIntegrationMinimumValidChildTimestamp(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() // Create a custom consensus set to control the blocks. testdir := build.TempDir(modules.ConsensusDir, t.Name()) g, err := gateway.New("localhost:0", false, filepath.Join(testdir, modules.GatewayDir)) if err != nil { t.Fatal(err) } cs, err := New(g, false, filepath.Join(testdir, modules.ConsensusDir)) if err != nil { t.Fatal(err) } tp, err := transactionpool.New(cs, g, filepath.Join(testdir, modules.TransactionPoolDir)) if err != nil { t.Fatal(err) } w, err := wallet.New(cs, tp, filepath.Join(testdir, modules.WalletDir)) if err != nil { t.Fatal(err) } key := crypto.GenerateTwofishKey() _, err = w.Encrypt(key) if err != nil { t.Fatal(err) } err = w.Unlock(key) if err != nil { t.Fatal(err) } m, err := miner.New(cs, tp, w, filepath.Join(testdir, modules.MinerDir)) if err != nil { t.Fatal(err) } defer g.Close() // The earliest child timestamp of the genesis block should be the // timestamp of the genesis block. genesisTime := cs.blockRoot.Block.Timestamp earliest, ok := cs.MinimumValidChildTimestamp(cs.blockRoot.Block.ID()) if !ok || genesisTime != earliest { t.Error("genesis block earliest timestamp producing unexpected results") } timestampOffsets := []types.Timestamp{1, 3, 2, 5, 4, 6, 7, 8, 9, 10} blockIDs := []types.BlockID{cs.blockRoot.Block.ID()} for _, offset := range timestampOffsets { bfw, target, err := m.BlockForWork() if err != nil { t.Fatal(err) } bfw.Timestamp = genesisTime + offset solvedBlock, _ := m.SolveBlock(bfw, target) err = cs.AcceptBlock(solvedBlock) if err != nil { t.Fatal(err) } blockIDs = append(blockIDs, solvedBlock.ID()) } // Median should be genesisTime for 6th block. earliest, ok = cs.MinimumValidChildTimestamp(blockIDs[5]) if !ok || earliest != genesisTime { t.Error("incorrect child timestamp") } // Median should be genesisTime+1 for 7th block. earliest, ok = cs.MinimumValidChildTimestamp(blockIDs[6]) if !ok || earliest != genesisTime+1 { t.Error("incorrect child timestamp") } // Median should be genesisTime + 5 for pb11. earliest, ok = cs.MinimumValidChildTimestamp(blockIDs[10]) if !ok || earliest != genesisTime+5 { t.Error("incorrect child timestamp") } } // TestUnitHeavierThan probes the heavierThan method of the processedBlock type. func TestUnitHeavierThan(t *testing.T) { // Create a light node. pbLight := new(processedBlock) pbLight.Depth[0] = 64 pbLight.ChildTarget[0] = 200 // Create a node that's heavier, but not enough to beat the surpass // threshold. pbMiddle := new(processedBlock) pbMiddle.Depth[0] = 60 pbMiddle.ChildTarget[0] = 200 // Create a node that's heavy enough to break the surpass threshold. pbHeavy := new(processedBlock) pbHeavy.Depth[0] = 16 pbHeavy.ChildTarget[0] = 200 // pbLight should not be heavier than pbHeavy. if pbLight.heavierThan(pbHeavy) { t.Error("light heavier than heavy") } // pbLight should not be heavier than middle. if pbLight.heavierThan(pbMiddle) { t.Error("light heavier than middle") } // pbLight should not be heavier than itself. if pbLight.heavierThan(pbLight) { t.Error("light heavier than itself") } // pbMiddle should not be heavier than pbLight. if pbMiddle.heavierThan(pbLight) { t.Error("middle heaver than light - surpass threshold should not have been broken") } // pbHeavy should be heaver than pbLight. if !pbHeavy.heavierThan(pbLight) { t.Error("heavy is not heavier than light") } // pbHeavy should be heavier than pbMiddle. if !pbHeavy.heavierThan(pbMiddle) { t.Error("heavy is not heavier than middle") } } // TestChildDepth probes the childDeath method of the blockNode type. func TestChildDepth(t *testing.T) { // Try adding to equal weight nodes, result should be half. pb := new(processedBlock) pb.Depth[0] = 64 pb.ChildTarget[0] = 64 childDepth := pb.childDepth() if childDepth[0] != 32 { t.Error("unexpected child depth") } // Try adding nodes of different weights. pb.Depth[0] = 24 pb.ChildTarget[0] = 48 childDepth = pb.childDepth() if childDepth[0] != 16 { t.Error("unexpected child depth") } } /* // TestTargetAdjustmentBase probes the targetAdjustmentBase method of the block // node type. func TestTargetAdjustmentBase(t *testing.T) { cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.closeCst() // Create a genesis node at timestamp 10,000 genesisNode := &processedBlock{ Block: types.Block{Timestamp: 10000}, } cst.cs.db.addBlockMap(genesisNode) exactTimeNode := &processedBlock{ Block: types.Block{ Nonce: types.BlockNonce{1, 0, 0, 0, 0, 0, 0, 0}, Timestamp: types.Timestamp(10000 + types.BlockFrequency), }, } exactTimeNode.Parent = genesisNode.Block.ID() cst.cs.db.addBlockMap(exactTimeNode) // Base adjustment for the exactTimeNode should be 1. adjustment, exact := cst.cs.targetAdjustmentBase(exactTimeNode).Float64() if !exact { t.Fatal("did not get an exact target adjustment") } if adjustment != 1 { t.Error("block did not adjust itself to the same target") } // Create a double-speed node and get the base adjustment. doubleSpeedNode := &processedBlock{ Block: types.Block{Timestamp: types.Timestamp(10000 + types.BlockFrequency)}, } doubleSpeedNode.Parent = exactTimeNode.Block.ID() cst.cs.db.addBlockMap(doubleSpeedNode) adjustment, exact = cst.cs.targetAdjustmentBase(doubleSpeedNode).Float64() if !exact { t.Fatal("did not get an exact adjustment") } if adjustment != 0.5 { t.Error("double speed node did not get a base to halve the target") } // Create a half-speed node and get the base adjustment. halfSpeedNode := &processedBlock{ Block: types.Block{Timestamp: types.Timestamp(10000 + types.BlockFrequency*6)}, } halfSpeedNode.Parent = doubleSpeedNode.Block.ID() cst.cs.db.addBlockMap(halfSpeedNode) adjustment, exact = cst.cs.targetAdjustmentBase(halfSpeedNode).Float64() if !exact { t.Fatal("did not get an exact adjustment") } if adjustment != 2 { t.Error("double speed node did not get a base to halve the target") } if testing.Short() { t.SkipNow() } // Create a chain of nodes so that the genesis node is no longer the point // of comparison. comparisonNode := &processedBlock{ Block: types.Block{Timestamp: 125000}, } comparisonNode.Parent = halfSpeedNode.Block.ID() cst.cs.db.addBlockMap(comparisonNode) startingNode := comparisonNode for i := types.BlockHeight(0); i < types.TargetWindow; i++ { newNode := new(processedBlock) newNode.Parent = startingNode.Block.ID() newNode.Block.Nonce = types.BlockNonce{byte(i), byte(i / 256), 0, 0, 0, 0, 0, 0} cst.cs.db.addBlockMap(newNode) startingNode = newNode } startingNode.Block.Timestamp = types.Timestamp(125000 + types.BlockFrequency*types.TargetWindow) adjustment, exact = cst.cs.targetAdjustmentBase(startingNode).Float64() if !exact { t.Error("failed to get exact result") } if adjustment != 1 { t.Error("got wrong long-range adjustment") } startingNode.Block.Timestamp = types.Timestamp(125000 + 2*types.BlockFrequency*types.TargetWindow) adjustment, exact = cst.cs.targetAdjustmentBase(startingNode).Float64() if !exact { t.Error("failed to get exact result") } if adjustment != 2 { t.Error("got wrong long-range adjustment") } } // TestClampTargetAdjustment probes the clampTargetAdjustment function. func TestClampTargetAdjustment(t *testing.T) { // Check that the MaxAdjustmentUp and MaxAdjustmentDown constants match the // test's expectations. if types.MaxAdjustmentUp.Cmp(big.NewRat(10001, 10000)) != 0 { t.Fatal("MaxAdjustmentUp changed - test now invalid") } if types.MaxAdjustmentDown.Cmp(big.NewRat(9999, 10000)) != 0 { t.Fatal("MaxAdjustmentDown changed - test now invalid") } // Check high and low clamping. initial := big.NewRat(2, 1) clamped := clampTargetAdjustment(initial) if clamped.Cmp(big.NewRat(10001, 10000)) != 0 { t.Error("clamp not applied to large target adjustment") } initial = big.NewRat(1, 2) clamped = clampTargetAdjustment(initial) if clamped.Cmp(big.NewRat(9999, 10000)) != 0 { t.Error("clamp not applied to small target adjustment") } // Check middle clamping (or lack thereof). initial = big.NewRat(10002, 10001) clamped = clampTargetAdjustment(initial) if clamped.Cmp(initial) != 0 { t.Error("clamp applied to safe target adjustment") } initial = big.NewRat(99999, 100000) clamped = clampTargetAdjustment(initial) if clamped.Cmp(initial) != 0 { t.Error("clamp applied to safe target adjustment") } } // TestSetChildTarget probes the setChildTarget method of the block node type. func TestSetChildTarget(t *testing.T) { cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.closeCst() // Create a genesis node and a child that took 2x as long as expected. genesisNode := &processedBlock{ Block: types.Block{Timestamp: 10000}, } genesisNode.ChildTarget[0] = 64 cst.cs.db.addBlockMap(genesisNode) doubleTimeNode := &processedBlock{ Block: types.Block{Timestamp: types.Timestamp(10000 + types.BlockFrequency*2)}, } doubleTimeNode.Parent = genesisNode.Block.ID() cst.cs.db.addBlockMap(doubleTimeNode) // Check the resulting childTarget of the new node and see that the clamp // was applied. cst.cs.setChildTarget(doubleTimeNode) if doubleTimeNode.ChildTarget.Cmp(genesisNode.ChildTarget) <= 0 { t.Error("double time node target did not increase") } fullAdjustment := genesisNode.ChildTarget.MulDifficulty(big.NewRat(1, 2)) if doubleTimeNode.ChildTarget.Cmp(fullAdjustment) >= 0 { t.Error("clamp was not applied when adjusting target") } } // TestNewChild probes the newChild method of the block node type. func TestNewChild(t *testing.T) { cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.closeCst() parent := &processedBlock{ Height: 12, } parent.Depth[0] = 45 parent.Block.Timestamp = 100 parent.ChildTarget[0] = 90 cst.cs.db.addBlockMap(parent) child := cst.cs.newChild(parent, types.Block{Timestamp: types.Timestamp(100 + types.BlockFrequency)}) if child.Parent != parent.Block.ID() { t.Error("parent-child relationship incorrect") } if child.Height != 13 { t.Error("child height set incorrectly") } var expectedDepth types.Target expectedDepth[0] = 30 if child.Depth.Cmp(expectedDepth) != 0 { t.Error("child depth did not adjust correctly") } if child.ChildTarget.Cmp(parent.ChildTarget) != 0 { t.Error("child childTarget not adjusted correctly") } } */ Sia-1.3.0/modules/consensus/subscribe.go000066400000000000000000000200431313565667000202420ustar00rootroot00000000000000package consensus import ( "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/bolt" ) // computeConsensusChange computes the consensus change from the change entry // at index 'i' in the change log. If i is out of bounds, an error is returned. func (cs *ConsensusSet) computeConsensusChange(tx *bolt.Tx, ce changeEntry) (modules.ConsensusChange, error) { cc := modules.ConsensusChange{ ID: ce.ID(), } for _, revertedBlockID := range ce.RevertedBlocks { revertedBlock, err := getBlockMap(tx, revertedBlockID) if err != nil { cs.log.Critical("getBlockMap failed in computeConsensusChange:", err) return modules.ConsensusChange{}, err } // Because the direction is 'revert', the order of the diffs needs to // be flipped and the direction of the diffs also needs to be flipped. cc.RevertedBlocks = append(cc.RevertedBlocks, revertedBlock.Block) for i := len(revertedBlock.SiacoinOutputDiffs) - 1; i >= 0; i-- { scod := revertedBlock.SiacoinOutputDiffs[i] scod.Direction = !scod.Direction cc.SiacoinOutputDiffs = append(cc.SiacoinOutputDiffs, scod) } for i := len(revertedBlock.FileContractDiffs) - 1; i >= 0; i-- { fcd := revertedBlock.FileContractDiffs[i] fcd.Direction = !fcd.Direction cc.FileContractDiffs = append(cc.FileContractDiffs, fcd) } for i := len(revertedBlock.SiafundOutputDiffs) - 1; i >= 0; i-- { sfod := revertedBlock.SiafundOutputDiffs[i] sfod.Direction = !sfod.Direction cc.SiafundOutputDiffs = append(cc.SiafundOutputDiffs, sfod) } for i := len(revertedBlock.DelayedSiacoinOutputDiffs) - 1; i >= 0; i-- { dscod := revertedBlock.DelayedSiacoinOutputDiffs[i] dscod.Direction = !dscod.Direction cc.DelayedSiacoinOutputDiffs = append(cc.DelayedSiacoinOutputDiffs, dscod) } for i := len(revertedBlock.SiafundPoolDiffs) - 1; i >= 0; i-- { sfpd := revertedBlock.SiafundPoolDiffs[i] sfpd.Direction = modules.DiffRevert cc.SiafundPoolDiffs = append(cc.SiafundPoolDiffs, sfpd) } } for _, appliedBlockID := range ce.AppliedBlocks { appliedBlock, err := getBlockMap(tx, appliedBlockID) if err != nil { cs.log.Critical("getBlockMap failed in computeConsensusChange:", err) return modules.ConsensusChange{}, err } cc.AppliedBlocks = append(cc.AppliedBlocks, appliedBlock.Block) for _, scod := range appliedBlock.SiacoinOutputDiffs { cc.SiacoinOutputDiffs = append(cc.SiacoinOutputDiffs, scod) } for _, fcd := range appliedBlock.FileContractDiffs { cc.FileContractDiffs = append(cc.FileContractDiffs, fcd) } for _, sfod := range appliedBlock.SiafundOutputDiffs { cc.SiafundOutputDiffs = append(cc.SiafundOutputDiffs, sfod) } for _, dscod := range appliedBlock.DelayedSiacoinOutputDiffs { cc.DelayedSiacoinOutputDiffs = append(cc.DelayedSiacoinOutputDiffs, dscod) } for _, sfpd := range appliedBlock.SiafundPoolDiffs { cc.SiafundPoolDiffs = append(cc.SiafundPoolDiffs, sfpd) } } // Grab the child target and the minimum valid child timestamp. recentBlock := ce.AppliedBlocks[len(ce.AppliedBlocks)-1] pb, err := getBlockMap(tx, recentBlock) if err != nil { cs.log.Critical("could not find process block for known block") } cc.ChildTarget = pb.ChildTarget cc.MinimumValidChildTimestamp = cs.blockRuleHelper.minimumValidChildTimestamp(tx.Bucket(BlockMap), pb) currentBlock := currentBlockID(tx) if cs.synced && recentBlock == currentBlock { cc.Synced = true } // Add the unexported tryTransactionSet function. cc.TryTransactionSet = cs.tryTransactionSet return cc, nil } // readLockUpdateSubscribers will inform all subscribers of a new update to the // consensus set. updateSubscribers does not alter the changelog, the changelog // must be updated beforehand. func (cs *ConsensusSet) updateSubscribers(ce changeEntry) { // Get the consensus change and send it to all subscribers. var cc modules.ConsensusChange err := cs.db.View(func(tx *bolt.Tx) error { // Compute the consensus change so it can be sent to subscribers. var err error cc, err = cs.computeConsensusChange(tx, ce) return err }) if err != nil { cs.log.Critical("computeConsensusChange failed:", err) return } for _, subscriber := range cs.subscribers { subscriber.ProcessConsensusChange(cc) } } // managedInitializeSubscribe will take a subscriber and feed them all of the // consensus changes that have occurred since the change provided. // // As a special case, using an empty id as the start will have all the changes // sent to the modules starting with the genesis block. func (cs *ConsensusSet) managedInitializeSubscribe(subscriber modules.ConsensusSetSubscriber, start modules.ConsensusChangeID) error { if start == modules.ConsensusChangeRecent { return nil } // 'exists' and 'entry' are going to be pointed to the first entry that // has not yet been seen by subscriber. var exists bool var entry changeEntry cs.mu.RLock() err := cs.db.View(func(tx *bolt.Tx) error { if start == modules.ConsensusChangeBeginning { // Special case: for modules.ConsensusChangeBeginning, create an // initial node pointing to the genesis block. The subscriber will // receive the diffs for all blocks in the consensus set, including // the genesis block. entry = cs.genesisEntry() exists = true } else { // The subscriber has provided an existing consensus change. // Because the subscriber already has this consensus change, // 'entry' and 'exists' need to be pointed at the next consensus // change. entry, exists = getEntry(tx, start) if !exists { // modules.ErrInvalidConsensusChangeID is a named error that // signals a break in synchronization between the consensus set // persistence and the subscriber persistence. Typically, // receiving this error means that the subscriber needs to // perform a rescan of the consensus set. return modules.ErrInvalidConsensusChangeID } entry, exists = entry.NextEntry(tx) } return nil }) cs.mu.RUnlock() if err != nil { return err } // Send all remaining consensus changes to the subscriber. for exists { // Send changes in batches of 100 so that we don't hold the // lock for too long. cs.mu.RLock() err = cs.db.View(func(tx *bolt.Tx) error { for i := 0; i < 100 && exists; i++ { cc, err := cs.computeConsensusChange(tx, entry) if err != nil { return err } subscriber.ProcessConsensusChange(cc) entry, exists = entry.NextEntry(tx) } return nil }) cs.mu.RUnlock() if err != nil { return err } } return nil } // ConsensusSetSubscribe adds a subscriber to the list of subscribers, and // gives them every consensus change that has occurred since the change with // the provided id. // // As a special case, using an empty id as the start will have all the changes // sent to the modules starting with the genesis block. func (cs *ConsensusSet) ConsensusSetSubscribe(subscriber modules.ConsensusSetSubscriber, start modules.ConsensusChangeID) error { err := cs.tg.Add() if err != nil { return err } defer cs.tg.Done() // Get the input module caught up to the current consensus set. err = cs.managedInitializeSubscribe(subscriber, start) if err != nil { return err } // Add the module to the list of subscribers. cs.mu.Lock() // Sanity check - subscriber should not be already subscribed. for _, s := range cs.subscribers { if s == subscriber { build.Critical("refusing to double-subscribe subscriber") } } cs.subscribers = append(cs.subscribers, subscriber) cs.mu.Unlock() return nil } // Unsubscribe removes a subscriber from the list of subscribers, allowing for // garbage collection and rescanning. If the subscriber is not found in the // subscriber database, no action is taken. func (cs *ConsensusSet) Unsubscribe(subscriber modules.ConsensusSetSubscriber) { if cs.tg.Add() != nil { return } defer cs.tg.Done() cs.mu.Lock() defer cs.mu.Unlock() // Search for the subscriber in the list of subscribers and remove it if // found. for i := range cs.subscribers { if cs.subscribers[i] == subscriber { cs.subscribers = append(cs.subscribers[0:i], cs.subscribers[i+1:]...) break } } } Sia-1.3.0/modules/consensus/subscribe_test.go000066400000000000000000000104271313565667000213060ustar00rootroot00000000000000package consensus import ( "testing" "github.com/NebulousLabs/Sia/modules" ) // mockSubscriber receives and holds changes to the consensus set, remembering // the order in which changes were received. type mockSubscriber struct { updates []modules.ConsensusChange } // newMockSubscriber returns a mockSubscriber that is ready to subscribe to a // consensus set. Currently blank, but can be expanded to support more features // in the future. func newMockSubscriber() mockSubscriber { return mockSubscriber{} } // ProcessConsensusChange adds a consensus change to the mock subscriber. func (ms *mockSubscriber) ProcessConsensusChange(cc modules.ConsensusChange) { ms.updates = append(ms.updates, cc) } // copySub creates and returns a new mock subscriber that has identical // internals to the input mockSubscriber. The copy will not be subscribed to // the consensus set even if the original is. func (ms *mockSubscriber) copySub() (cms mockSubscriber) { cms.updates = make([]modules.ConsensusChange, len(ms.updates)) copy(cms.updates, ms.updates) return cms } // TestInvalidConsensusChangeSubscription checks that the consensus set returns // modules.ErrInvalidConsensusChangeID in the event of a subscriber using an // unrecognized id. func TestInvalidConsensusChangeSubscription(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() ms := newMockSubscriber() badCCID := modules.ConsensusChangeID{255, 255, 255} err = cst.cs.ConsensusSetSubscribe(&ms, badCCID) if err != modules.ErrInvalidConsensusChangeID { t.Error("consensus set returning the wrong error during an invalid subscription:", err) } cst.cs.mu.Lock() for i := range cst.cs.subscribers { if cst.cs.subscribers[i] == &ms { t.Fatal("subscriber was not removed from subscriber list after an erroneus subscription") } } cst.cs.mu.Unlock() } // TestInvalidToValidSubscription is a regression test. Previously, the // consensus set would not unsubscribe a module if it returned an error during // subscription. When the module resubscribed, the module would be // double-subscribed to the consensus set. func TestInvalidToValidSubscription(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() // Start by performing a bad subscribe. ms := newMockSubscriber() badCCID := modules.ConsensusChangeID{255, 255, 255} err = cst.cs.ConsensusSetSubscribe(&ms, badCCID) if err != modules.ErrInvalidConsensusChangeID { t.Error("consensus set returning the wrong error during an invalid subscription:", err) } // Perform a correct subscribe. err = cst.cs.ConsensusSetSubscribe(&ms, modules.ConsensusChangeBeginning) if err != nil { t.Fatal(err) } // Mine a block and check that the mock subscriber only got a single // consensus change. numPrevUpdates := len(ms.updates) _, err = cst.miner.AddBlock() if err != nil { t.Fatal(err) } if len(ms.updates) != numPrevUpdates+1 { t.Error("subscriber received two consensus changes for a single block") } } // TestUnsubscribe checks that the consensus set correctly unsubscribes a // subscriber if the Unsubscribe call is made. func TestUnsubscribe(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() // Subscribe the mock subscriber to the consensus set. ms := newMockSubscriber() err = cst.cs.ConsensusSetSubscribe(&ms, modules.ConsensusChangeBeginning) if err != nil { t.Fatal(err) } // Check that the subscriber is receiving updates. msLen := len(ms.updates) if msLen == 0 { t.Error("mock subscriber is not receiving updates") } _, err = cst.miner.AddBlock() // should cause another update to be sent to the subscriber if err != nil { t.Fatal(err) } if len(ms.updates) != msLen+1 { t.Error("mock subscriber did not receive the correct number of updates") } // Unsubscribe the subscriber and then check that it is no longer receiving // updates. cst.cs.Unsubscribe(&ms) _, err = cst.miner.AddBlock() if err != nil { t.Fatal(err) } if len(ms.updates) != msLen+1 { t.Error("mock subscriber was not correctly unsubscribed") } } Sia-1.3.0/modules/consensus/synchronize.go000066400000000000000000000452201313565667000206400ustar00rootroot00000000000000package consensus import ( "errors" "net" "sync" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/bolt" ) const ( // minNumOutbound is the minimum number of outbound peers required before ibd // is confident we are synced. minNumOutbound = 5 ) var ( // MaxCatchUpBlocks is the maxiumum number of blocks that can be given to // the consensus set in a single iteration during the initial blockchain // download. MaxCatchUpBlocks = build.Select(build.Var{ Standard: types.BlockHeight(10), Dev: types.BlockHeight(50), Testing: types.BlockHeight(3), }).(types.BlockHeight) // sendBlocksTimeout is the timeout for the SendBlocks RPC. sendBlocksTimeout = build.Select(build.Var{ Standard: 5 * time.Minute, Dev: 40 * time.Second, Testing: 5 * time.Second, }).(time.Duration) // sendBlkTimeout is the timeout for the SendBlocks RPC. sendBlkTimeout = build.Select(build.Var{ Standard: 4 * time.Minute, Dev: 30 * time.Second, Testing: 4 * time.Second, }).(time.Duration) // relayHeaderTimeout is the timeout for the RelayHeader RPC. relayHeaderTimeout = build.Select(build.Var{ Standard: 3 * time.Minute, Dev: 20 * time.Second, Testing: 3 * time.Second, }).(time.Duration) // minIBDWaitTime is the time threadedInitialBlockchainDownload waits before // exiting if there are >= 1 and <= minNumOutbound peers synced. This timeout // will primarily affect miners who have multiple nodes daisy chained off each // other. Those nodes will likely have to wait minIBDWaitTime on every startup // before IBD is done. minIBDWaitTime = build.Select(build.Var{ Standard: 90 * time.Minute, Dev: 80 * time.Second, Testing: 10 * time.Second, }).(time.Duration) // ibdLoopDelay is the time that threadedInitialBlockchainDownload waits // between attempts to synchronize with the network if the last attempt // failed. ibdLoopDelay = build.Select(build.Var{ Standard: 10 * time.Second, Dev: 1 * time.Second, Testing: 100 * time.Millisecond, }).(time.Duration) errEarlyStop = errors.New("initial blockchain download did not complete by the time shutdown was issued") errNilProcBlock = errors.New("nil processed block was fetched from the database") errSendBlocksStalled = errors.New("SendBlocks RPC timed and never received any blocks") ) // isTimeoutErr is a helper function that returns true if err was caused by a // network timeout. func isTimeoutErr(err error) bool { if err == nil { return false } if netErr, ok := err.(net.Error); ok && netErr.Timeout() { return true } // COMPATv1.3.0 return (err.Error() == "Read timeout" || err.Error() == "Write timeout") } // blockHistory returns up to 32 block ids, starting with recent blocks and // then proving exponentially increasingly less recent blocks. The genesis // block is always included as the last block. This block history can be used // to find a common parent that is reasonably recent, usually the most recent // common parent is found, but always a common parent within a factor of 2 is // found. func blockHistory(tx *bolt.Tx) (blockIDs [32]types.BlockID) { height := blockHeight(tx) step := types.BlockHeight(1) // The final step is to include the genesis block, which is why the final // element is skipped during iteration. for i := 0; i < 31; i++ { // Include the next block. blockID, err := getPath(tx, height) if build.DEBUG && err != nil { panic(err) } blockIDs[i] = blockID // Determine the height of the next block to include and then increase // the step size. The height must be decreased first to prevent // underflow. // // `i >= 9` means that the first 10 blocks will be included, and then // skipping will start. if i >= 9 { step *= 2 } if height <= step { break } height -= step } // Include the genesis block as the last element blockID, err := getPath(tx, 0) if build.DEBUG && err != nil { panic(err) } blockIDs[31] = blockID return blockIDs } // managedReceiveBlocks is the calling end of the SendBlocks RPC, without the // threadgroup wrapping. func (cs *ConsensusSet) managedReceiveBlocks(conn modules.PeerConn) (returnErr error) { // Set a deadline after which SendBlocks will timeout. During IBD, esepcially, // SendBlocks will timeout. This is by design so that IBD switches peers to // prevent any one peer from stalling IBD. err := conn.SetDeadline(time.Now().Add(sendBlocksTimeout)) if err != nil { return err } finishedChan := make(chan struct{}) defer close(finishedChan) go func() { select { case <-cs.tg.StopChan(): case <-finishedChan: } conn.Close() }() // Check whether this RPC has timed out with the remote peer at the end of // the fuction, and if so, return a custom error to signal that a new peer // needs to be chosen. stalled := true defer func() { if isTimeoutErr(returnErr) && stalled { returnErr = errSendBlocksStalled } }() // Get blockIDs to send. var history [32]types.BlockID cs.mu.RLock() err = cs.db.View(func(tx *bolt.Tx) error { history = blockHistory(tx) return nil }) cs.mu.RUnlock() if err != nil { return err } // Send the block ids. if err := encoding.WriteObject(conn, history); err != nil { return err } // Broadcast the last block accepted. This functionality is in a defer to // ensure that a block is always broadcast if any blocks are accepted. This // is to stop an attacker from preventing block broadcasts. var initialBlock types.BlockID if build.DEBUG { // Prepare for a sanity check on 'chainExtended' - chain extended should // be set to true if an ony if the result of calling dbCurrentBlockID // changes. initialBlock = cs.dbCurrentBlockID() } chainExtended := false defer func() { cs.mu.RLock() synced := cs.synced cs.mu.RUnlock() if synced && chainExtended { if build.DEBUG && initialBlock == cs.dbCurrentBlockID() { panic("blockchain extension reporting is incorrect") } fullBlock := cs.managedCurrentBlock() // TODO: Add cacheing, replace this line by looking at the cache. go cs.gateway.Broadcast("RelayHeader", fullBlock.Header(), cs.gateway.Peers()) } }() // Read blocks off of the wire and add them to the consensus set until // there are no more blocks available. moreAvailable := true for moreAvailable { // Read a slice of blocks from the wire. var newBlocks []types.Block if err := encoding.ReadObject(conn, &newBlocks, uint64(MaxCatchUpBlocks)*types.BlockSizeLimit); err != nil { return err } if err := encoding.ReadObject(conn, &moreAvailable, 1); err != nil { return err } if len(newBlocks) == 0 { continue } stalled = false // Call managedAcceptBlock instead of AcceptBlock so as not to broadcast // every block. extended, acceptErr := cs.managedAcceptBlocks(newBlocks) if extended { chainExtended = true } // ErrNonExtendingBlock must be ignored until headers-first block // sharing is implemented, block already in database should also be // ignored. if acceptErr != nil && acceptErr != modules.ErrNonExtendingBlock && acceptErr != modules.ErrBlockKnown { return acceptErr } } return nil } // threadedReceiveBlocks is the calling end of the SendBlocks RPC. func (cs *ConsensusSet) threadedReceiveBlocks(conn modules.PeerConn) error { err := conn.SetDeadline(time.Now().Add(sendBlocksTimeout)) if err != nil { return err } finishedChan := make(chan struct{}) defer close(finishedChan) go func() { select { case <-cs.tg.StopChan(): case <-finishedChan: } conn.Close() }() err = cs.tg.Add() if err != nil { return err } defer cs.tg.Done() return cs.managedReceiveBlocks(conn) } // rpcSendBlocks is the receiving end of the SendBlocks RPC. It returns a // sequential set of blocks based on the 32 input block IDs. The most recent // known ID is used as the starting point, and up to 'MaxCatchUpBlocks' from // that BlockHeight onwards are returned. It also sends a boolean indicating // whether more blocks are available. func (cs *ConsensusSet) rpcSendBlocks(conn modules.PeerConn) error { err := conn.SetDeadline(time.Now().Add(sendBlocksTimeout)) if err != nil { return err } finishedChan := make(chan struct{}) defer close(finishedChan) go func() { select { case <-cs.tg.StopChan(): case <-finishedChan: } conn.Close() }() err = cs.tg.Add() if err != nil { return err } defer cs.tg.Done() // Read a list of blocks known to the requester and find the most recent // block from the current path. var knownBlocks [32]types.BlockID err = encoding.ReadObject(conn, &knownBlocks, 32*crypto.HashSize) if err != nil { return err } // Find the most recent block from knownBlocks in the current path. found := false var start types.BlockHeight var csHeight types.BlockHeight cs.mu.RLock() err = cs.db.View(func(tx *bolt.Tx) error { csHeight = blockHeight(tx) for _, id := range knownBlocks { pb, err := getBlockMap(tx, id) if err != nil { continue } pathID, err := getPath(tx, pb.Height) if err != nil { continue } if pathID != pb.Block.ID() { continue } if pb.Height == csHeight { break } found = true // Start from the child of the common block. start = pb.Height + 1 break } return nil }) cs.mu.RUnlock() if err != nil { return err } // If no matching blocks are found, or if the caller has all known blocks, // don't send any blocks. if !found { // Send 0 blocks. err = encoding.WriteObject(conn, []types.Block{}) if err != nil { return err } // Indicate that no more blocks are available. return encoding.WriteObject(conn, false) } // Send the caller all of the blocks that they are missing. moreAvailable := true for moreAvailable { // Get the set of blocks to send. var blocks []types.Block cs.mu.RLock() err = cs.db.View(func(tx *bolt.Tx) error { height := blockHeight(tx) for i := start; i <= height && i < start+MaxCatchUpBlocks; i++ { id, err := getPath(tx, i) if err != nil { cs.log.Critical("Unable to get path: height", height, ":: request", i) return err } pb, err := getBlockMap(tx, id) if err != nil { cs.log.Critical("Unable to get block from block map: height", height, ":: request", i, ":: id", id) return err } if pb == nil { cs.log.Critical("getBlockMap yielded 'nil' block:", height, ":: request", i, ":: id", id) return errNilProcBlock } blocks = append(blocks, pb.Block) } moreAvailable = start+MaxCatchUpBlocks <= height start += MaxCatchUpBlocks return nil }) cs.mu.RUnlock() if err != nil { return err } // Send a set of blocks to the caller + a flag indicating whether more // are available. if err = encoding.WriteObject(conn, blocks); err != nil { return err } if err = encoding.WriteObject(conn, moreAvailable); err != nil { return err } } return nil } // threadedRPCRelayHeader is an RPC that accepts a block header from a peer. func (cs *ConsensusSet) threadedRPCRelayHeader(conn modules.PeerConn) error { err := conn.SetDeadline(time.Now().Add(relayHeaderTimeout)) if err != nil { return err } finishedChan := make(chan struct{}) defer close(finishedChan) go func() { select { case <-cs.tg.StopChan(): case <-finishedChan: } conn.Close() }() err = cs.tg.Add() if err != nil { return err } wg := new(sync.WaitGroup) defer func() { go func() { wg.Wait() cs.tg.Done() }() }() // Decode the block header from the connection. var h types.BlockHeader err = encoding.ReadObject(conn, &h, types.BlockHeaderSize) if err != nil { return err } // Start verification inside of a bolt View tx. cs.mu.RLock() err = cs.db.View(func(tx *bolt.Tx) error { // Do some relatively inexpensive checks to validate the header return cs.validateHeader(boltTxWrapper{tx}, h) }) cs.mu.RUnlock() // WARN: orphan multithreading logic (dangerous areas, see below) // // If the header is valid and extends the heaviest chain, fetch the // corresponding block. Call needs to be made in a separate goroutine // because an exported call to the gateway is used, which is a deadlock // risk given that rpcRelayHeader is called from the gateway. // // NOTE: In general this is bad design. Rather than recycling other // calls, the whole protocol should have been kept in a single RPC. // Because it is not, we have to do weird threading to prevent // deadlocks, and we also have to be concerned every time the code in // managedReceiveBlock is adjusted. if err == errOrphan { // WARN: orphan multithreading logic case #1 wg.Add(1) go func() { defer wg.Done() err := cs.gateway.RPC(conn.RPCAddr(), "SendBlocks", cs.managedReceiveBlocks) if err != nil { cs.log.Debugln("WARN: failed to get parents of orphan header:", err) } }() return nil } else if err != nil { return err } // WARN: orphan multithreading logic case #2 wg.Add(1) go func() { defer wg.Done() err = cs.gateway.RPC(conn.RPCAddr(), "SendBlk", cs.managedReceiveBlock(h.ID())) if err != nil { cs.log.Debugln("WARN: failed to get header's corresponding block:", err) } }() return nil } // rpcSendBlk is an RPC that sends the requested block to the requesting peer. func (cs *ConsensusSet) rpcSendBlk(conn modules.PeerConn) error { err := conn.SetDeadline(time.Now().Add(sendBlkTimeout)) if err != nil { return err } finishedChan := make(chan struct{}) defer close(finishedChan) go func() { select { case <-cs.tg.StopChan(): case <-finishedChan: } conn.Close() }() err = cs.tg.Add() if err != nil { return err } defer cs.tg.Done() // Decode the block id from the connection. var id types.BlockID err = encoding.ReadObject(conn, &id, crypto.HashSize) if err != nil { return err } // Lookup the corresponding block. var b types.Block cs.mu.RLock() err = cs.db.View(func(tx *bolt.Tx) error { pb, err := getBlockMap(tx, id) if err != nil { return err } b = pb.Block return nil }) cs.mu.RUnlock() if err != nil { return err } // Encode and send the block to the caller. err = encoding.WriteObject(conn, b) if err != nil { return err } return nil } // managedReceiveBlock takes a block id and returns an RPCFunc that requests that // block and then calls AcceptBlock on it. The returned function should be used // as the calling end of the SendBlk RPC. func (cs *ConsensusSet) managedReceiveBlock(id types.BlockID) modules.RPCFunc { return func(conn modules.PeerConn) error { if err := encoding.WriteObject(conn, id); err != nil { return err } var block types.Block if err := encoding.ReadObject(conn, &block, types.BlockSizeLimit); err != nil { return err } chainExtended, err := cs.managedAcceptBlocks([]types.Block{block}) if chainExtended { cs.managedBroadcastBlock(block) } if err != nil { return err } return nil } } // threadedInitialBlockchainDownload performs the IBD on outbound peers. Blocks // are downloaded from one peer at a time in 5 minute intervals, so as to // prevent any one peer from significantly slowing down IBD. // // NOTE: IBD will succeed right now when each peer has a different blockchain. // The height and the block id of the remote peers' current blocks are not // checked to be the same. This can cause issues if you are connected to // outbound peers <= v0.5.1 that are stalled in IBD. func (cs *ConsensusSet) threadedInitialBlockchainDownload() error { // The consensus set will not recognize IBD as complete until it has enough // peers. After the deadline though, it will recognize the blockchain // download as complete even with only one peer. This deadline is helpful // to local-net setups, where a machine will frequently only have one peer // (and that peer will be another machine on the same local network, but // within the local network at least one peer is connected to the braod // network). deadline := time.Now().Add(minIBDWaitTime) numOutboundSynced := 0 numOutboundNotSynced := 0 for { numOutboundSynced = 0 numOutboundNotSynced = 0 for _, p := range cs.gateway.Peers() { // We only sync on outbound peers at first to make IBD less susceptible to // fast-mining and other attacks, as outbound peers are more difficult to // manipulate. if p.Inbound { continue } // Put the rest of the iteration inside of a thread group. err := func() error { err := cs.tg.Add() if err != nil { return err } defer cs.tg.Done() // Request blocks from the peer. The error returned will only be // 'nil' if there are no more blocks to receive. err = cs.gateway.RPC(p.NetAddress, "SendBlocks", cs.managedReceiveBlocks) if err == nil { numOutboundSynced++ // In this case, 'return nil' is equivalent to skipping to // the next iteration of the loop. return nil } numOutboundNotSynced++ if !isTimeoutErr(err) { cs.log.Printf("WARN: disconnecting from peer %v because IBD failed: %v", p.NetAddress, err) // Disconnect if there is an unexpected error (not a timeout). This // includes errSendBlocksStalled. // // We disconnect so that these peers are removed from gateway.Peers() and // do not prevent us from marking ourselves as fully synced. err := cs.gateway.Disconnect(p.NetAddress) if err != nil { cs.log.Printf("WARN: disconnecting from peer %v failed: %v", p.NetAddress, err) } } return nil }() if err != nil { return err } } // The consensus set is not considered synced until a majority of // outbound peers say that we are synced. If less than 10 minutes have // passed, a minimum of 'minNumOutbound' peers must say that we are // synced, otherwise a 1 vs 0 majority is sufficient. // // This scheme is used to prevent malicious peers from being able to // barricade the sync'd status of the consensus set, and to make sure // that consensus sets behind a firewall with only one peer // (potentially a local peer) are still able to eventually conclude // that they have syncrhonized. Miners and hosts will often have setups // beind a firewall where there is a single node with many peers and // then the rest of the nodes only have a few peers. if numOutboundSynced > numOutboundNotSynced && (numOutboundSynced >= minNumOutbound || time.Now().After(deadline)) { break } else { // Sleep so we don't hammer the network with SendBlock requests. time.Sleep(ibdLoopDelay) } } cs.log.Printf("INFO: IBD done, synced with %v peers", numOutboundSynced) return nil } // Synced returns true if the consensus set is synced with the network. func (cs *ConsensusSet) Synced() bool { err := cs.tg.Add() if err != nil { return false } defer cs.tg.Done() cs.mu.RLock() defer cs.mu.RUnlock() return cs.synced } Sia-1.3.0/modules/consensus/synchronize_ibd_test.go000066400000000000000000000347421313565667000225240ustar00rootroot00000000000000package consensus import ( "errors" "fmt" "io" "net" "path/filepath" "strconv" "sync" "testing" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/modules/gateway" "github.com/NebulousLabs/Sia/types" ) // TestSimpleInitialBlockchainDownload tests that // threadedInitialBlockchainDownload synchronizes with peers in the simple case // where there are 8 outbound peers with the same blockchain. func TestSimpleInitialBlockchainDownload(t *testing.T) { if testing.Short() || !build.VLONG { t.SkipNow() } // Create 8 remote peers. remoteCSTs := make([]*consensusSetTester, 8) for i := range remoteCSTs { cst, err := blankConsensusSetTester(t.Name() + strconv.Itoa(i)) if err != nil { t.Fatal(err) } defer cst.Close() remoteCSTs[i] = cst } // Create the "local" peer. localCST, err := blankConsensusSetTester(t.Name() + "- local") if err != nil { t.Fatal(err) } defer localCST.Close() for _, cst := range remoteCSTs { err = localCST.cs.gateway.Connect(cst.cs.gateway.Address()) if err != nil { t.Fatal(err) } } // Give the OnConnectRPCs time to finish. time.Sleep(5 * time.Second) // Test IBD when all peers have only the genesis block. doneChan := make(chan struct{}) go func() { localCST.cs.threadedInitialBlockchainDownload() doneChan <- struct{}{} }() select { case <-doneChan: case <-time.After(5 * time.Second): t.Fatal("initialBlockchainDownload never completed") } if localCST.cs.CurrentBlock().ID() != remoteCSTs[0].cs.CurrentBlock().ID() { t.Fatalf("current block ids do not match: expected '%v', got '%v'", remoteCSTs[0].cs.CurrentBlock().ID(), localCST.cs.CurrentBlock().ID()) } // Test IBD when all remote peers have the same longest chain. for i := 0; i < 20; i++ { b, err := remoteCSTs[0].miner.FindBlock() if err != nil { t.Fatal(err) } for _, cst := range remoteCSTs { _, err = cst.cs.managedAcceptBlocks([]types.Block{b}) if err != nil && err != modules.ErrBlockKnown && err != modules.ErrNonExtendingBlock { t.Fatal(err) } } } go func() { localCST.cs.threadedInitialBlockchainDownload() doneChan <- struct{}{} }() select { case <-doneChan: case <-time.After(5 * time.Second): t.Fatal("initialBlockchainDownload never completed") } if localCST.cs.CurrentBlock().ID() != remoteCSTs[0].cs.CurrentBlock().ID() { t.Fatalf("current block ids do not match: expected '%v', got '%v'", remoteCSTs[0].cs.CurrentBlock().ID(), localCST.cs.CurrentBlock().ID()) } // Test IBD when not starting from the genesis block. for i := 0; i < 4; i++ { b, err := remoteCSTs[0].miner.FindBlock() if err != nil { t.Fatal(err) } for _, cst := range remoteCSTs { _, err = cst.cs.managedAcceptBlocks([]types.Block{b}) if err != nil && err != modules.ErrBlockKnown { t.Fatal(err) } } } go func() { localCST.cs.threadedInitialBlockchainDownload() doneChan <- struct{}{} }() select { case <-doneChan: case <-time.After(5 * time.Second): t.Fatal("initialBlockchainDownload never completed") } if localCST.cs.CurrentBlock().ID() != remoteCSTs[0].cs.CurrentBlock().ID() { t.Fatalf("current block ids do not match: expected '%v', got '%v'", remoteCSTs[0].cs.CurrentBlock().ID(), localCST.cs.CurrentBlock().ID()) } // Test IBD when the remote peers are on a longer fork. for i := 0; i < 5; i++ { b, err := localCST.miner.FindBlock() if err != nil { t.Fatal(err) } _, err = localCST.cs.managedAcceptBlocks([]types.Block{b}) if err != nil { t.Fatal(err) } } for i := 0; i < 10; i++ { b, err := remoteCSTs[0].miner.FindBlock() if err != nil { t.Fatal(err) } for _, cst := range remoteCSTs { _, err = cst.cs.managedAcceptBlocks([]types.Block{b}) if err != nil && err != modules.ErrBlockKnown { t.Log(i) t.Fatal(err) } } } go func() { localCST.cs.threadedInitialBlockchainDownload() doneChan <- struct{}{} }() select { case <-doneChan: case <-time.After(5 * time.Second): t.Fatal("initialBlockchainDownload never completed") } if localCST.cs.CurrentBlock().ID() != remoteCSTs[0].cs.CurrentBlock().ID() { t.Fatalf("current block ids do not match: expected '%v', got '%v'", remoteCSTs[0].cs.CurrentBlock().ID(), localCST.cs.CurrentBlock().ID()) } // Test IBD when the remote peers are on a shorter fork. for i := 0; i < 10; i++ { b, err := localCST.miner.FindBlock() if err != nil { t.Fatal(err) } _, err = localCST.cs.managedAcceptBlocks([]types.Block{b}) if err != nil { t.Fatal(err) } } for i := 0; i < 5; i++ { b, err := remoteCSTs[0].miner.FindBlock() if err != nil { t.Fatal(err) } for _, cst := range remoteCSTs { _, err = cst.cs.managedAcceptBlocks([]types.Block{b}) if err != nil && err != modules.ErrBlockKnown { t.Log(i) t.Fatal(err) } } } localCurrentBlock := localCST.cs.CurrentBlock() go func() { localCST.cs.threadedInitialBlockchainDownload() doneChan <- struct{}{} }() select { case <-doneChan: case <-time.After(5 * time.Second): t.Fatal("initialBlockchainDownload never completed") } if localCST.cs.CurrentBlock().ID() != localCurrentBlock.ID() { t.Fatalf("local was on a longer fork and should not have reorged") } if localCST.cs.CurrentBlock().ID() == remoteCSTs[0].cs.CurrentBlock().ID() { t.Fatalf("ibd syncing is one way, and a longer fork on the local cs should not cause a reorg on the remote cs's") } } type mockGatewayRPCError struct { modules.Gateway rpcErrs map[modules.NetAddress]error mu sync.Mutex } func (g *mockGatewayRPCError) RPC(addr modules.NetAddress, name string, fn modules.RPCFunc) error { g.mu.Lock() defer g.mu.Unlock() return g.rpcErrs[addr] } // TestInitialBlockChainDownloadDisconnects tests that // threadedInitialBlockchainDownload only disconnects from peers that error // with anything but a timeout. func TestInitialBlockchainDownloadDisconnects(t *testing.T) { if testing.Short() { t.SkipNow() } testdir := build.TempDir(modules.ConsensusDir, t.Name()) g, err := gateway.New("localhost:0", false, filepath.Join(testdir, "local", modules.GatewayDir)) if err != nil { t.Fatal(err) } defer g.Close() mg := mockGatewayRPCError{ Gateway: g, rpcErrs: make(map[modules.NetAddress]error), } localCS, err := New(&mg, false, filepath.Join(testdir, "local", modules.ConsensusDir)) if err != nil { t.Fatal(err) } defer localCS.Close() rpcErrs := []error{ // rpcErrs that should cause a a disconnect. io.EOF, errors.New("random error"), errSendBlocksStalled, // rpcErrs that should not cause a disconnect. mockNetError{ error: errors.New("Read timeout"), timeout: true, }, // Need at least minNumOutbound peers that return nil for // threadedInitialBlockchainDownload to mark IBD done. nil, nil, nil, nil, nil, } for i, rpcErr := range rpcErrs { g, err := gateway.New("localhost:0", false, filepath.Join(testdir, "remote - "+strconv.Itoa(i), modules.GatewayDir)) if err != nil { t.Fatal(err) } defer g.Close() err = localCS.gateway.Connect(g.Address()) if err != nil { t.Fatal(err) } mg.rpcErrs[g.Address()] = rpcErr } // Sleep to to give the OnConnectRPCs time to finish. time.Sleep(500 * time.Millisecond) // Do IBD. localCS.threadedInitialBlockchainDownload() // Check that localCS disconnected from peers that errored but did not time out during SendBlocks. if len(localCS.gateway.Peers()) != 6 { t.Error("threadedInitialBlockchainDownload disconnected from peers that timedout or didn't error", len(localCS.gateway.Peers())) } for _, p := range localCS.gateway.Peers() { err = mg.rpcErrs[p.NetAddress] if err == nil { continue } if netErr, ok := err.(net.Error); ok && netErr.Timeout() { continue } t.Fatalf("threadedInitialBlockchainDownload didn't disconnect from a peer that returned '%v', %v", err, p.NetAddress) } } // TestInitialBlockchainDownloadDoneRules tests that // threadedInitialBlockchainDownload only terminates under the appropriate // conditions. Appropriate conditions are: // - at least minNumOutbound synced outbound peers // - or at least 1 synced outbound peer and minIBDWaitTime has passed since beginning IBD. func TestInitialBlockchainDownloadDoneRules(t *testing.T) { if testing.Short() || !build.VLONG { t.SkipNow() } testdir := build.TempDir(modules.ConsensusDir, t.Name()) // Create a gateway that can be forced to return errors when its RPC method // is called, then create a consensus set using that gateway. g, err := gateway.New("localhost:0", false, filepath.Join(testdir, "local", modules.GatewayDir)) if err != nil { t.Fatal(err) } defer g.Close() mg := mockGatewayRPCError{ Gateway: g, rpcErrs: make(map[modules.NetAddress]error), } cs, err := New(&mg, false, filepath.Join(testdir, "local", modules.ConsensusDir)) if err != nil { t.Fatal(err) } defer cs.Close() // Verify that the consensus set will not signal IBD completion when it has // zero peers. doneChan := make(chan struct{}) go func() { cs.threadedInitialBlockchainDownload() doneChan <- struct{}{} }() select { case <-doneChan: t.Error("threadedInitialBlockchainDownload finished with 0 synced peers") case <-time.After(minIBDWaitTime + ibdLoopDelay): } // threadedInitialBlockchainDownload is already running. Feed some inbound // peers to the consensus set. The gateway, through its own process of // trying to find outbound peers, will eventually convert one of the // inbound peers to an outbound peer. IBD should not complete until there // is at least one outbound peer. // // After this function has completed, all of the peers will be shutdown, // leaving the consensus set once again with zero peers. func() { inboundCSTs := make([]*consensusSetTester, 8) for i := 0; i < len(inboundCSTs); i++ { inboundCST, err := blankConsensusSetTester(filepath.Join(t.Name(), " - inbound "+strconv.Itoa(i))) if err != nil { t.Fatal(err) } defer inboundCST.Close() inboundCST.cs.gateway.Connect(cs.gateway.Address()) } <-doneChan peers := cs.gateway.Peers() outbound := false for _, p := range peers { if !p.Inbound { outbound = true break } } if !outbound { t.Error("threadedInitialBlockchainDownload finished with only inbound peers") } }() // Try another initial blockchain download, this time with an outbound peer // who is not synced. The consensus set should not determine itself to have // completed IBD with only unsynced peers. // // 'NotSynced' is simulated in this peer by having all RPCs return errors. go func() { cs.threadedInitialBlockchainDownload() doneChan <- struct{}{} }() gatewayTimesout, err := gateway.New("localhost:0", false, filepath.Join(testdir, "remote - timesout", modules.GatewayDir)) if err != nil { t.Fatal(err) } defer gatewayTimesout.Close() mg.mu.Lock() mg.rpcErrs[gatewayTimesout.Address()] = mockNetError{ error: errors.New("Read timeout"), timeout: true, } mg.mu.Unlock() err = cs.gateway.Connect(gatewayTimesout.Address()) if err != nil { t.Fatal(err) } select { case <-doneChan: t.Error("threadedInitialBlockchainDownload finished with 0 synced peers") case <-time.After(minIBDWaitTime + ibdLoopDelay): } // Add a peer that is synced to the peer that is not synced. IBD should not // be considered completed when there is a tie between synced and // not-synced peers. gatewayNoTimeout, err := gateway.New("localhost:0", false, filepath.Join(testdir, "remote - no timeout1", modules.GatewayDir)) if err != nil { t.Fatal(err) } defer gatewayNoTimeout.Close() mg.mu.Lock() mg.rpcErrs[gatewayNoTimeout.Address()] = nil mg.mu.Unlock() err = cs.gateway.Connect(gatewayNoTimeout.Address()) if err != nil { t.Fatal(err) } select { case <-doneChan: t.Fatal("threadedInitialBlockchainDownload finished with 1 synced peer and 1 non-synced peer") case <-time.After(minIBDWaitTime + ibdLoopDelay): } // Test when there is 2 peers that are synced and one that is not synced. // There is now a majority synced peers and the minIBDWaitTime has passed, // so the IBD function should finish. gatewayNoTimeout2, err := gateway.New("localhost:0", false, filepath.Join(testdir, "remote - no timeout2", modules.GatewayDir)) if err != nil { t.Fatal(err) } defer gatewayNoTimeout2.Close() mg.mu.Lock() mg.rpcErrs[gatewayNoTimeout2.Address()] = nil mg.mu.Unlock() err = cs.gateway.Connect(gatewayNoTimeout2.Address()) if err != nil { t.Fatal(err) } select { case <-doneChan: case <-time.After(4 * (minIBDWaitTime + ibdLoopDelay)): t.Fatal("threadedInitialBlockchainDownload never finished with 2 synced peers and 1 non-synced peer") } // Test when there are >= minNumOutbound peers and >= minNumOutbound peers are synced. gatewayNoTimeouts := make([]modules.Gateway, minNumOutbound-1) for i := 0; i < len(gatewayNoTimeouts); i++ { tmpG, err := gateway.New("localhost:0", false, filepath.Join(testdir, fmt.Sprintf("remote - no timeout-auto-%v", i+3), modules.GatewayDir)) if err != nil { t.Fatal(err) } defer tmpG.Close() mg.mu.Lock() mg.rpcErrs[tmpG.Address()] = nil mg.mu.Unlock() gatewayNoTimeouts[i] = tmpG err = cs.gateway.Connect(gatewayNoTimeouts[i].Address()) if err != nil { t.Fatal(err) } } go func() { cs.threadedInitialBlockchainDownload() doneChan <- struct{}{} }() select { case <-doneChan: case <-time.After(minIBDWaitTime): t.Fatal("threadedInitialBlockchainDownload didn't finish in less than minIBDWaitTime") } } // TestGenesisBlockSync is a regression test that checks what happens when two // consensus sets with only the genesis block are connected. They should // determine that they are sync'd, however previously they would not sync to // eachother as they would report EOF instead of performing correct block // exchange. func TestGenesisBlockSync(t *testing.T) { if testing.Short() || !build.VLONG { t.SkipNow() } // Create two consensus sets that have zero blocks each (except for the // genesis block). cst1, err := blankConsensusSetTester(t.Name() + "1") if err != nil { t.Fatal(err) } cst2, err := blankConsensusSetTester(t.Name() + "2") if err != nil { t.Fatal(err) } // Connect them. err = cst1.gateway.Connect(cst2.gateway.Address()) if err != nil { t.Fatal(err) } // Block until both report that they are sync'd. for i := 0; i < 100; i++ { time.Sleep(time.Millisecond * 100) if cst1.cs.Synced() && cst2.cs.Synced() { break } } if !cst1.cs.Synced() || !cst2.cs.Synced() { t.Error("Consensus sets did not synchronize to eachother", cst1.cs.Synced(), cst2.cs.Synced()) } time.Sleep(time.Second * 12) if len(cst1.gateway.Peers()) == 0 { t.Error("disconnection occurred!") } } Sia-1.3.0/modules/consensus/synchronize_test.go000066400000000000000000001252121313565667000216770ustar00rootroot00000000000000package consensus import ( "errors" "fmt" "io" "net" "path/filepath" "strconv" "sync" "testing" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/modules/gateway" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/bolt" ) // TestSynchronize tests that the consensus set can successfully synchronize // to a peer. func TestSynchronize(t *testing.T) { if testing.Short() { t.SkipNow() } cst1, err := createConsensusSetTester(t.Name() + "1") if err != nil { t.Fatal(err) } defer cst1.Close() cst2, err := createConsensusSetTester(t.Name() + "2") if err != nil { t.Fatal(err) } defer cst2.Close() // mine on cst2 until it is above cst1 for cst1.cs.dbBlockHeight() >= cst2.cs.dbBlockHeight() { b, _ := cst2.miner.FindBlock() err = cst2.cs.AcceptBlock(b) if err != nil { t.Fatal(err) } } // connect gateways, triggering a Synchronize err = cst1.gateway.Connect(cst2.gateway.Address()) if err != nil { t.Fatal(err) } // blockchains should now match for i := 0; i < 50; i++ { if cst1.cs.dbCurrentBlockID() != cst2.cs.dbCurrentBlockID() { time.Sleep(250 * time.Millisecond) } } if cst1.cs.dbCurrentBlockID() != cst2.cs.dbCurrentBlockID() { t.Fatal("Synchronize failed") } // Mine on cst2 until it is more than 'MaxCatchUpBlocks' ahead of cst1. // NOTE: we have to disconnect prior to this, otherwise cst2 will relay // blocks to cst1. cst1.gateway.Disconnect(cst2.gateway.Address()) cst2.gateway.Disconnect(cst1.gateway.Address()) for cst2.cs.dbBlockHeight() < cst1.cs.dbBlockHeight()+3+MaxCatchUpBlocks { _, err := cst2.miner.AddBlock() if err != nil { t.Fatal(err) } } // reconnect err = cst1.gateway.Connect(cst2.gateway.Address()) if err != nil { t.Fatal(err) } // block heights should now match for i := 0; i < 50; i++ { if cst1.cs.dbBlockHeight() != cst2.cs.dbBlockHeight() { time.Sleep(250 * time.Millisecond) } } if cst1.cs.dbBlockHeight() != cst2.cs.dbBlockHeight() { t.Fatal("synchronize failed") } // extend cst2 with a "bad" (old) block, and synchronize. cst1 should // reject the bad block. cst2.cs.mu.Lock() id, err := cst2.cs.dbGetPath(0) if err != nil { t.Fatal(err) } cst2.cs.dbPushPath(id) cst2.cs.mu.Unlock() // Sleep for a few seconds to allow the network call between the two time // to occur. time.Sleep(5 * time.Second) if cst1.cs.dbBlockHeight() == cst2.cs.dbBlockHeight() { t.Fatal("cst1 did not reject bad block") } } // TestBlockHistory tests that blockHistory returns the expected sequence of // block IDs. func TestBlockHistory(t *testing.T) { if testing.Short() { t.SkipNow() } cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() // mine until we have enough blocks to test blockHistory for cst.cs.dbBlockHeight() < 50 { b, _ := cst.miner.FindBlock() err = cst.cs.AcceptBlock(b) if err != nil { t.Fatal(err) } } var history [32]types.BlockID _ = cst.cs.db.View(func(tx *bolt.Tx) error { history = blockHistory(tx) return nil }) // validate history cst.cs.mu.Lock() // first 10 IDs are linear for i := types.BlockHeight(0); i < 10; i++ { id, err := cst.cs.dbGetPath(cst.cs.dbBlockHeight() - i) if err != nil { t.Fatal(err) } if history[i] != id { t.Errorf("Wrong ID in history: expected %v, got %v", id, history[i]) } } // next 4 IDs are exponential heights := []types.BlockHeight{11, 15, 23, 39} for i, height := range heights { id, err := cst.cs.dbGetPath(cst.cs.dbBlockHeight() - height) if err != nil { t.Fatal(err) } if history[10+i] != id { t.Errorf("Wrong ID in history: expected %v, got %v", height, history[10+i]) } } // finally, the genesis ID genesisID, err := cst.cs.dbGetPath(0) if err != nil { t.Fatal(err) } if history[31] != genesisID { t.Errorf("Wrong ID in history: expected %v, got %v", genesisID, history[31]) } cst.cs.mu.Unlock() // remaining IDs should be empty var emptyID types.BlockID for i, id := range history[14:31] { if id != emptyID { t.Errorf("Expected empty ID at index %v, got %v", i+17, id) } } } // mockGatewayCountBroadcasts implements modules.Gateway to mock the Broadcast // method. type mockGatewayCountBroadcasts struct { modules.Gateway numBroadcasts int mu sync.RWMutex } // Broadcast is a mock implementation of modules.Gateway.Broadcast that // increments a counter denoting the number of times it's been called. func (g *mockGatewayCountBroadcasts) Broadcast(name string, obj interface{}, peers []modules.Peer) { g.mu.Lock() g.numBroadcasts++ g.mu.Unlock() g.Gateway.Broadcast(name, obj, peers) } // TestSendBlocksBroadcastsOnce tests that the SendBlocks RPC call only // Broadcasts one block, no matter how many blocks are sent. In the case 0 // blocks are sent, tests that Broadcast is never called. func TestSendBlocksBroadcastsOnce(t *testing.T) { if testing.Short() { t.SkipNow() } // Setup consensus sets. cst1, err := blankConsensusSetTester(t.Name() + "1") if err != nil { t.Fatal(err) } defer cst1.Close() cst2, err := blankConsensusSetTester(t.Name() + "2") if err != nil { t.Fatal(err) } defer cst2.Close() // Setup mock gateway. mg := mockGatewayCountBroadcasts{Gateway: cst1.cs.gateway} cst1.cs.gateway = &mg err = cst1.cs.gateway.Connect(cst2.cs.gateway.Address()) if err != nil { t.Fatal(err) } tests := []struct { blocksToMine int expectedNumBroadcasts int synced bool }{ // Test that no blocks are broadcast during IBD. { blocksToMine: 0, expectedNumBroadcasts: 0, synced: false, }, { blocksToMine: 1, expectedNumBroadcasts: 0, synced: false, }, { blocksToMine: 2, expectedNumBroadcasts: 0, synced: false, }, // Test that only one blocks is broadcast when IBD is done. { blocksToMine: 0, expectedNumBroadcasts: 0, synced: true, }, { blocksToMine: 1, expectedNumBroadcasts: 1, synced: true, }, { blocksToMine: 2, expectedNumBroadcasts: 1, synced: true, }, { blocksToMine: int(MaxCatchUpBlocks), expectedNumBroadcasts: 1, synced: true, }, { blocksToMine: 2 * int(MaxCatchUpBlocks), expectedNumBroadcasts: 1, synced: true, }, { blocksToMine: 2*int(MaxCatchUpBlocks) + 1, expectedNumBroadcasts: 1, synced: true, }, } for j, test := range tests { cst1.cs.mu.Lock() cst1.cs.synced = test.synced cst1.cs.mu.Unlock() mg.mu.Lock() mg.numBroadcasts = 0 mg.mu.Unlock() for i := 0; i < test.blocksToMine; i++ { b, minerErr := cst2.miner.FindBlock() if minerErr != nil { t.Fatal(minerErr) } // managedAcceptBlock is used here instead of AcceptBlock so as not to // call Broadcast outside of the SendBlocks RPC. _, err = cst2.cs.managedAcceptBlocks([]types.Block{b}) if err != nil { t.Fatal(err) } } err = cst1.cs.gateway.RPC(cst2.cs.gateway.Address(), "SendBlocks", cst1.cs.threadedReceiveBlocks) if err != nil { t.Fatal(err) } // Sleep to wait for possible calls to Broadcast to complete. We cannot // wait on a channel because we don't know how many times broadcast has // been called. time.Sleep(10 * time.Millisecond) mg.mu.RLock() numBroadcasts := mg.numBroadcasts mg.mu.RUnlock() if numBroadcasts != test.expectedNumBroadcasts { t.Errorf("test #%d: expected %d number of broadcasts, got %d", j, test.expectedNumBroadcasts, numBroadcasts) } } } // TestIntegrationRPCSendBlocks tests that the SendBlocks RPC adds blocks to // the consensus set, and that the consensus set catches with the remote peer // and possibly reorgs. func TestIntegrationRPCSendBlocks(t *testing.T) { if testing.Short() || !build.VLONG { t.SkipNow() } type sendBlocksTest struct { commonBlocksToMine types.BlockHeight localBlocksToMine types.BlockHeight remoteBlocksToMine types.BlockHeight msg string } tests := []sendBlocksTest{ { msg: "SendBlocks shouldn't do anything when both CSs are at the genesis block", }, { commonBlocksToMine: 10, msg: "SendBlocks shouldn't do anything when both CSs are at the same block", }, { commonBlocksToMine: 10, localBlocksToMine: 5, msg: "SendBlocks shouldn't do anything when the remote CS is behind the local CS", }, { commonBlocksToMine: 10, remoteBlocksToMine: 5, msg: "SendBlocks should catch up the local CS to the remote CS when it is behind", }, { remoteBlocksToMine: 10, localBlocksToMine: 5, msg: "SendBlocks should reorg the local CS when the remote CS's chain is longer", }, { commonBlocksToMine: 10, remoteBlocksToMine: 10, localBlocksToMine: 5, msg: "SendBlocks should reorg the local CS when the remote CS's chain is longer", }, { remoteBlocksToMine: MaxCatchUpBlocks - 1, msg: "SendBlocks should catch up when the remote CS is ahead", }, { remoteBlocksToMine: MaxCatchUpBlocks - 1, localBlocksToMine: MaxCatchUpBlocks - 2, msg: "SendBlocks should reorg the local CS when the remote CS's chain is longer", }, { remoteBlocksToMine: MaxCatchUpBlocks, msg: "SendBlocks should catch up when the remote CS is ahead", }, { remoteBlocksToMine: MaxCatchUpBlocks, localBlocksToMine: MaxCatchUpBlocks - 2, msg: "SendBlocks should reorg the local CS when the remote CS's chain is longer", }, { remoteBlocksToMine: MaxCatchUpBlocks + 1, // There was a bug that caused SendBlocks to be one block behind when its peer was ahead by (k * MaxCatchUpBlocks) + 1 blocks. msg: "SendBlocks should catch up when the remote CS is ahead", }, { remoteBlocksToMine: MaxCatchUpBlocks + 1, // There was a bug that caused SendBlocks to be one block behind when its peer was ahead by (k * MaxCatchUpBlocks) + 1 blocks. localBlocksToMine: MaxCatchUpBlocks - 2, msg: "SendBlocks should reorg the local CS when the remote CS's chain is longer", }, { remoteBlocksToMine: 2*MaxCatchUpBlocks + 1, msg: "SendBlocks should catch up when the remote CS is ahead", }, { remoteBlocksToMine: 2*MaxCatchUpBlocks + 1, localBlocksToMine: 2*MaxCatchUpBlocks - 2, msg: "SendBlocks should reorg the local CS when the remote CS's chain is longer", }, { remoteBlocksToMine: 12, msg: "SendBlocks should catch up when the remote CS is ahead", }, { remoteBlocksToMine: 15, msg: "SendBlocks should catch up when the remote CS is ahead", }, { remoteBlocksToMine: 16, msg: "SendBlocks should catch up when the remote CS is ahead", }, { remoteBlocksToMine: 17, msg: "SendBlocks should catch up when the remote CS is ahead", }, { remoteBlocksToMine: 23, msg: "SendBlocks should catch up when the remote CS is ahead", }, { remoteBlocksToMine: 31, msg: "SendBlocks should catch up when the remote CS is ahead", }, { remoteBlocksToMine: 32, msg: "SendBlocks should catch up when the remote CS is ahead", }, { remoteBlocksToMine: 33, msg: "SendBlocks should catch up when the remote CS is ahead", }, } for i := 1; i < 10; i++ { tests = append(tests, sendBlocksTest{ remoteBlocksToMine: types.BlockHeight(i), msg: "SendBlocks should catch up when the remote CS is ahead", }) } for i, tt := range tests { // Create the "remote" peer. remoteCST, err := blankConsensusSetTester(filepath.Join(t.Name()+" - remote", strconv.Itoa(i))) if err != nil { t.Fatalf("test #%d, %v: %v", i, tt.msg, err) } // Create the "local" peer. localCST, err := blankConsensusSetTester(filepath.Join(t.Name()+" - local", strconv.Itoa(i))) if err != nil { t.Fatalf("test #%d, %v: %v", i, tt.msg, err) } localCST.cs.gateway.Connect(remoteCST.cs.gateway.Address()) // Wait a second to let the OnConnectRPCs finish time.Sleep(100 * time.Millisecond) // Mine blocks. for i := types.BlockHeight(0); i < tt.commonBlocksToMine; i++ { b, err := remoteCST.miner.FindBlock() if err != nil { t.Fatalf("test #%d, %v: %v", i, tt.msg, err) } _, err = remoteCST.cs.managedAcceptBlocks([]types.Block{b}) if err != nil { t.Fatalf("test #%d, %v: %v", i, tt.msg, err) } _, err = localCST.cs.managedAcceptBlocks([]types.Block{b}) if err != nil { t.Fatalf("test #%d, %v: %v", i, tt.msg, err) } } for i := types.BlockHeight(0); i < tt.remoteBlocksToMine; i++ { b, err := remoteCST.miner.FindBlock() if err != nil { t.Fatalf("test #%d, %v: %v", i, tt.msg, err) } _, err = remoteCST.cs.managedAcceptBlocks([]types.Block{b}) if err != nil { t.Fatalf("test #%d, %v: %v", i, tt.msg, err) } } for i := types.BlockHeight(0); i < tt.localBlocksToMine; i++ { b, err := localCST.miner.FindBlock() if err != nil { t.Fatalf("test #%d, %v: %v", i, tt.msg, err) } _, err = localCST.cs.managedAcceptBlocks([]types.Block{b}) if err != nil { t.Fatalf("test #%d, %v: %v", i, tt.msg, err) } } localCurrentBlockID := localCST.cs.CurrentBlock().ID() remoteCurrentBlockID := remoteCST.cs.CurrentBlock().ID() err = localCST.cs.gateway.RPC(remoteCST.cs.gateway.Address(), "SendBlocks", localCST.cs.threadedReceiveBlocks) if err != nil { t.Errorf("test #%d, %v: %v", i, tt.msg, err) } // Assume that if remoteBlocksToMine is greater than localBlocksToMine, then // the local CS must have received the new blocks (and reorged). if tt.remoteBlocksToMine > tt.localBlocksToMine { // Verify that the remote cs did not change. if remoteCST.cs.CurrentBlock().ID() != remoteCurrentBlockID { t.Errorf("test #%d, %v: the remote CS is at a different current block than before SendBlocks", i, tt.msg) } // Verify that the local cs got the new blocks. if localCST.cs.Height() != remoteCST.cs.Height() { t.Errorf("test #%d, %v: expected height %v, got %v", i, tt.msg, remoteCST.cs.Height(), localCST.cs.Height()) } if localCST.cs.CurrentBlock().ID() != remoteCST.cs.CurrentBlock().ID() { t.Errorf("test #%d, %v: remote and local CSTs have different current blocks", i, tt.msg) } } else { // Verify that the local cs did not change. if localCST.cs.CurrentBlock().ID() != localCurrentBlockID { t.Errorf("test #%d, %v: the local CS is at a different current block than before SendBlocks", i, tt.msg) } } // Cleanup. err = localCST.Close() if err != nil { t.Fatalf("test #%d, %v: %v", i, tt.msg, err) } err = remoteCST.Close() if err != nil { t.Fatalf("test #%d, %v: %v", i, tt.msg, err) } } } // TestRPCSendBlockSendsOnlyNecessaryBlocks tests that the SendBlocks RPC only // sends blocks that the caller does not have and that are part of the longest // chain. func TestRPCSendBlockSendsOnlyNecessaryBlocks(t *testing.T) { if testing.Short() { t.SkipNow() } // Create the "remote" peer. cst, err := blankConsensusSetTester(t.Name() + "- remote") if err != nil { t.Fatal(err) } defer cst.Close() // Create the "local" peer. // // We create this peer manually (not using blankConsensusSetTester) so that we // can connect it to the remote peer before calling consensus.New so as to // prevent SendBlocks from triggering on Connect. testdir := build.TempDir(modules.ConsensusDir, t.Name()+" - local") g, err := gateway.New("localhost:0", false, filepath.Join(testdir, modules.GatewayDir)) if err != nil { t.Fatal(err) } defer g.Close() err = g.Connect(cst.cs.gateway.Address()) if err != nil { t.Fatal(err) } cs, err := New(g, false, filepath.Join(testdir, modules.ConsensusDir)) if err != nil { t.Fatal(err) } defer cs.Close() // Add a few initial blocks to both consensus sets. These are the blocks we // want to make sure SendBlocks is not sending unnecessarily as both parties // already have them. knownBlocks := make(map[types.BlockID]struct{}) for i := 0; i < 20; i++ { b, err := cst.miner.FindBlock() if err != nil { t.Fatal(err) } _, err = cst.cs.managedAcceptBlocks([]types.Block{b}) if err != nil { t.Fatal(err) } _, err = cs.managedAcceptBlocks([]types.Block{b}) if err != nil { t.Fatal(err) } knownBlocks[b.ID()] = struct{}{} } // Add a few blocks to only the remote peer and store which blocks we add. addedBlocks := make(map[types.BlockID]struct{}) for i := 0; i < 20; i++ { b, err := cst.miner.FindBlock() if err != nil { t.Fatal(err) } _, err = cst.cs.managedAcceptBlocks([]types.Block{b}) if err != nil { t.Fatal(err) } addedBlocks[b.ID()] = struct{}{} } err = cs.gateway.RPC(cst.cs.gateway.Address(), "SendBlocks", func(conn modules.PeerConn) error { // Get blockIDs to send. var history [32]types.BlockID cs.mu.RLock() err := cs.db.View(func(tx *bolt.Tx) error { history = blockHistory(tx) return nil }) cs.mu.RUnlock() if err != nil { return err } // Send the block ids. if err := encoding.WriteObject(conn, history); err != nil { return err } moreAvailable := true for moreAvailable { // Read a slice of blocks from the wire. var newBlocks []types.Block if err := encoding.ReadObject(conn, &newBlocks, uint64(MaxCatchUpBlocks)*types.BlockSizeLimit); err != nil { return err } if err := encoding.ReadObject(conn, &moreAvailable, 1); err != nil { return err } // Check if the block needed to be sent. for _, newB := range newBlocks { _, ok := knownBlocks[newB.ID()] if ok { t.Error("SendBlocks sent an unnecessary block that the caller already had") continue } _, ok = addedBlocks[newB.ID()] if !ok { t.Error("SendBlocks sent an unnecessary block that the caller did not have") } } } return nil }) if err != nil { t.Fatal(err) } } // mock PeerConns for testing peer conns that fail reading or writing. type ( mockPeerConn struct { net.Conn } mockPeerConnFailingReader struct { mockPeerConn } mockPeerConnFailingWriter struct { mockPeerConn } ) var ( errFailingReader = errors.New("failing reader") errFailingWriter = errors.New("failing writer") ) // Close returns 'nil', and does nothing behind the scenes. This is because the // testing reuses pipes, but the consensus code now correctly closes conns after // handling them. func (pc mockPeerConn) Close() error { return nil } // RPCAddr implements this method of the modules.PeerConn interface. func (pc mockPeerConn) RPCAddr() modules.NetAddress { return "mockPeerConn dialback addr" } // SetDeadline returns 'nil', and does nothing behind the scenes. func (pc mockPeerConn) SetDeadline(time.Time) error { return nil } // Read is a mock implementation of modules.PeerConn.Read that always returns // an error. func (mockPeerConnFailingReader) Read([]byte) (int, error) { return 0, errFailingReader } // Write is a mock implementation of modules.PeerConn.Write that always returns // an error. func (mockPeerConnFailingWriter) Write([]byte) (int, error) { return 0, errFailingWriter } // TestSendBlk probes the ConsensusSet.rpcSendBlk method and tests that it // correctly receives block ids and writes out the corresponding blocks. func TestSendBlk(t *testing.T) { if testing.Short() { t.SkipNow() } cst, err := blankConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() p1, p2 := net.Pipe() mockP1 := mockPeerConn{p1} fnErr := make(chan error) tests := []struct { id types.BlockID conn modules.PeerConn fn func() // handle reading and writing over the pipe to the mock conn. errWant error msg string }{ // TODO: Test with a failing database. // Test with a failing reader. { conn: mockPeerConnFailingReader{mockP1}, fn: func() { fnErr <- nil }, errWant: errFailingReader, msg: "expected rpcSendBlk to error with a failing reader conn", }, // Test with a block id not found in the blockmap. { conn: mockP1, fn: func() { // Write a block id to the conn. fnErr <- encoding.WriteObject(p2, types.BlockID{}) }, errWant: errNilItem, msg: "expected rpcSendBlk to error with a nonexistent block id", }, // Test with a failing writer. { conn: mockPeerConnFailingWriter{mockP1}, fn: func() { // Write a valid block id to the conn. fnErr <- encoding.WriteObject(p2, types.GenesisID) }, errWant: errFailingWriter, msg: "expected rpcSendBlk to error with a failing writer conn", }, // Test with a valid conn and valid block. { conn: mockP1, fn: func() { // Write a valid block id to the conn. if err := encoding.WriteObject(p2, types.GenesisID); err != nil { fnErr <- err } // Read the block written to the conn. var block types.Block if err := encoding.ReadObject(p2, &block, types.BlockSizeLimit); err != nil { fnErr <- err } // Verify the block is the expected block. if block.ID() != types.GenesisID { fnErr <- fmt.Errorf("rpcSendBlk wrote a different block to conn than the block requested. requested block id: %v, received block id: %v", types.GenesisID, block.ID()) } fnErr <- nil }, errWant: nil, msg: "expected rpcSendBlk to succeed with a valid conn and valid block", }, } for _, tt := range tests { go tt.fn() err := cst.cs.rpcSendBlk(tt.conn) if err != tt.errWant { t.Errorf("%s: expected to fail with `%v', got: `%v'", tt.msg, tt.errWant, err) } err = <-fnErr if err != nil { t.Fatal(err) } } } // TestThreadedReceiveBlock probes the RPCFunc returned by // cs.threadedReceiveBlock and tests that it correctly requests a block id and // receives a block. Also tests that the block is correctly (not) accepted into // the consensus set. func TestThreadedReceiveBlock(t *testing.T) { if testing.Short() { t.SkipNow() } cst, err := blankConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() p1, p2 := net.Pipe() mockP1 := mockPeerConn{p1} fnErr := make(chan error) tests := []struct { id types.BlockID conn modules.PeerConn fn func() // handle reading and writing over the pipe to the mock conn. errWant error msg string }{ // Test with failing writer. { conn: mockPeerConnFailingWriter{mockP1}, fn: func() { fnErr <- nil }, errWant: errFailingWriter, msg: "the function returned from threadedReceiveBlock should fail with a PeerConn with a failing writer", }, // Test with failing reader. { conn: mockPeerConnFailingReader{mockP1}, fn: func() { // Read the id written to conn. var id types.BlockID if err := encoding.ReadObject(p2, &id, crypto.HashSize); err != nil { fnErr <- err } // Verify the id is the expected id. expectedID := types.BlockID{} if id != expectedID { fnErr <- fmt.Errorf("id written to conn was %v, but id received was %v", expectedID, id) } fnErr <- nil }, errWant: errFailingReader, msg: "the function returned from threadedReceiveBlock should fail with a PeerConn with a failing reader", }, // Test with a valid conn, but an invalid block. { id: types.BlockID{1}, conn: mockP1, fn: func() { // Read the id written to conn. var id types.BlockID if err := encoding.ReadObject(p2, &id, crypto.HashSize); err != nil { fnErr <- err } // Verify the id is the expected id. expectedID := types.BlockID{1} if id != expectedID { fnErr <- fmt.Errorf("id written to conn was %v, but id received was %v", expectedID, id) } // Write an invalid block to conn. block := types.Block{} if err := encoding.WriteObject(p2, block); err != nil { fnErr <- err } fnErr <- nil }, errWant: errOrphan, msg: "the function returned from threadedReceiveBlock should not accept an invalid block", }, // Test with a valid conn and a valid block. { id: types.BlockID{2}, conn: mockP1, fn: func() { // Read the id written to conn. var id types.BlockID if err := encoding.ReadObject(p2, &id, crypto.HashSize); err != nil { fnErr <- err } // Verify the id is the expected id. expectedID := types.BlockID{2} if id != expectedID { fnErr <- fmt.Errorf("id written to conn was %v, but id received was %v", expectedID, id) } // Write a valid block to conn. block, err := cst.miner.FindBlock() if err != nil { fnErr <- err } if err := encoding.WriteObject(p2, block); err != nil { fnErr <- err } fnErr <- nil }, errWant: nil, msg: "the function returned from manageddReceiveBlock should accept a valid block", }, } for _, tt := range tests { managedReceiveFN := cst.cs.managedReceiveBlock(tt.id) go tt.fn() err := managedReceiveFN(tt.conn) if err != tt.errWant { t.Errorf("%s: expected to fail with `%v', got: `%v'", tt.msg, tt.errWant, err) } err = <-fnErr if err != nil { t.Fatal(err) } } } // TestIntegrationSendBlkRPC probes the SendBlk RPC and tests that blocks are // correctly requested, received, and accepted into the consensus set. func TestIntegrationSendBlkRPC(t *testing.T) { if testing.Short() { t.SkipNow() } cst1, err := blankConsensusSetTester(t.Name() + "1") if err != nil { t.Fatal(err) } defer cst1.Close() cst2, err := blankConsensusSetTester(t.Name() + "2") if err != nil { t.Fatal(err) } defer cst2.Close() err = cst1.cs.gateway.Connect(cst2.cs.gateway.Address()) if err != nil { t.Fatal(err) } // Sleep to give the consensus sets time to finish the background startup // routines - if the block mined below is mined before the sets finish // synchronizing to each other, it screws up the test. time.Sleep(500 * time.Millisecond) // Test that cst1 doesn't accept a block it's already seen (the genesis block). err = cst1.cs.gateway.RPC(cst2.cs.gateway.Address(), "SendBlk", cst1.cs.managedReceiveBlock(types.GenesisID)) if err != modules.ErrBlockKnown && err != modules.ErrNonExtendingBlock { t.Errorf("cst1 should reject known blocks: expected error '%v', got '%v'", modules.ErrBlockKnown, err) } // Test that cst2 errors when it doesn't recognize the requested block. err = cst1.cs.gateway.RPC(cst2.cs.gateway.Address(), "SendBlk", cst1.cs.managedReceiveBlock(types.BlockID{})) if err != io.EOF { t.Errorf("cst2 shouldn't return a block it doesn't recognize: expected error '%v', got '%v'", io.EOF, err) } // Test that cst1 accepts a block that extends its longest chain. block, err := cst2.miner.FindBlock() if err != nil { t.Fatal(err) } _, err = cst2.cs.managedAcceptBlocks([]types.Block{block}) // Call managedAcceptBlock so that the block isn't broadcast. if err != nil { t.Fatal(err) } err = cst1.cs.gateway.RPC(cst2.cs.gateway.Address(), "SendBlk", cst1.cs.managedReceiveBlock(block.ID())) if err != nil { t.Errorf("cst1 should accept a block that extends its longest chain: expected nil error, got '%v'", err) } // Test that cst2 accepts a block that extends its longest chain. block, err = cst1.miner.FindBlock() if err != nil { t.Fatal(err) } _, err = cst1.cs.managedAcceptBlocks([]types.Block{block}) // Call managedAcceptBlock so that the block isn't broadcast. if err != nil { t.Fatal(err) } err = cst2.cs.gateway.RPC(cst1.cs.gateway.Address(), "SendBlk", cst2.cs.managedReceiveBlock(block.ID())) if err != nil { t.Errorf("cst2 should accept a block that extends its longest chain: expected nil error, got '%v'", err) } // Test that cst1 doesn't accept an orphan block. block, err = cst2.miner.FindBlock() if err != nil { t.Fatal(err) } _, err = cst2.cs.managedAcceptBlocks([]types.Block{block}) // Call managedAcceptBlock so that the block isn't broadcast. if err != nil { t.Fatal(err) } block, err = cst2.miner.FindBlock() if err != nil { t.Fatal(err) } _, err = cst2.cs.managedAcceptBlocks([]types.Block{block}) // Call managedAcceptBlock so that the block isn't broadcast. if err != nil { t.Fatal(err) } err = cst1.cs.gateway.RPC(cst2.cs.gateway.Address(), "SendBlk", cst1.cs.managedReceiveBlock(block.ID())) if err != errOrphan { t.Errorf("cst1 should not accept an orphan block: expected error '%v', got '%v'", errOrphan, err) } } type mockGatewayCallsRPC struct { modules.Gateway rpcCalled chan string } func (g *mockGatewayCallsRPC) RPC(addr modules.NetAddress, name string, fn modules.RPCFunc) error { g.rpcCalled <- name return nil } // TestRelayHeader tests that rpcRelayHeader requests the corresponding blocks // to valid headers with known parents, or requests the block history to orphan // headers. func TestRelayHeader(t *testing.T) { if testing.Short() { t.SkipNow() } cst, err := blankConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() mg := &mockGatewayCallsRPC{ Gateway: cst.cs.gateway, rpcCalled: make(chan string), } cst.cs.gateway = mg p1, p2 := net.Pipe() mockP2 := mockPeerConn{p2} // Valid block that rpcRelayHeader should accept. validBlock, err := cst.miner.FindBlock() if err != nil { t.Fatal(err) } // A block in the near future that rpcRelayHeader return an error for, but // still request the corresponding block. block, target, err := cst.miner.BlockForWork() if err != nil { t.Fatal(err) } block.Timestamp = types.CurrentTimestamp() + 2 + types.FutureThreshold futureBlock, _ := cst.miner.SolveBlock(block, target) tests := []struct { header types.BlockHeader errWant error errMSG string rpcWant string rpcMSG string }{ // Test that rpcRelayHeader rejects known blocks. { header: types.GenesisBlock.Header(), errWant: modules.ErrBlockKnown, errMSG: "rpcRelayHeader should reject headers to known blocks", }, // Test that rpcRelayHeader requests the parent blocks of orphan headers. { header: types.BlockHeader{}, errWant: nil, errMSG: "rpcRelayHeader should not return an error for orphan headers", rpcWant: "SendBlocks", rpcMSG: "rpcRelayHeader should request blocks when the relayed header is an orphan", }, // Test that rpcRelayHeader accepts a valid header that extends the longest chain. { header: validBlock.Header(), errWant: nil, errMSG: "rpcRelayHeader should accept a valid header", rpcWant: "SendBlk", rpcMSG: "rpcRelayHeader should request the block of a valid header", }, // Test that rpcRelayHeader requests a future, but otherwise valid block. { header: futureBlock.Header(), errWant: nil, errMSG: "rpcRelayHeader should not return an error for a future header", rpcWant: "SendBlk", rpcMSG: "rpcRelayHeader should request the corresponding block to a future, but otherwise valid header", }, } errChan := make(chan error) for _, tt := range tests { go func() { errChan <- encoding.WriteObject(p1, tt.header) }() err = cst.cs.threadedRPCRelayHeader(mockP2) if err != tt.errWant { t.Errorf("%s: expected '%v', got '%v'", tt.errMSG, tt.errWant, err) } err = <-errChan if err != nil { t.Fatal(err) } if tt.rpcWant == "" { select { case rpc := <-mg.rpcCalled: t.Errorf("no RPC call expected, but '%v' was called", rpc) case <-time.After(10 * time.Millisecond): } } else { select { case rpc := <-mg.rpcCalled: if rpc != tt.rpcWant { t.Errorf("%s: expected '%v', got '%v'", tt.rpcMSG, tt.rpcWant, rpc) } case <-time.After(10 * time.Millisecond): t.Errorf("%s: expected '%v', but no RPC was called", tt.rpcMSG, tt.rpcWant) } } } } // TestIntegrationBroadcastRelayHeader checks that broadcasting RelayHeader // causes peers to also broadcast the header (if the block is valid). func TestIntegrationBroadcastRelayHeader(t *testing.T) { if testing.Short() { t.SkipNow() } // Setup consensus sets. cst1, err := blankConsensusSetTester(t.Name() + "1") if err != nil { t.Fatal(err) } defer cst1.Close() cst2, err := blankConsensusSetTester(t.Name() + "2") if err != nil { t.Fatal(err) } defer cst2.Close() // Setup mock gateway. mg := &mockGatewayDoesBroadcast{ Gateway: cst2.cs.gateway, broadcastCalled: make(chan struct{}), } cst2.cs.gateway = mg err = cst1.cs.gateway.Connect(cst2.cs.gateway.Address()) if err != nil { t.Fatal(err) } // Give time for on connect RPCs to finish. time.Sleep(500 * time.Millisecond) // Test that broadcasting an invalid block header over RelayHeader on cst1.cs // does not result in cst2.cs.gateway receiving a broadcast. cst1.cs.gateway.Broadcast("RelayHeader", types.BlockHeader{}, cst1.cs.gateway.Peers()) select { case <-mg.broadcastCalled: t.Fatal("RelayHeader broadcasted an invalid block header") case <-time.After(500 * time.Millisecond): } // Test that broadcasting a valid block header over RelayHeader on cst1.cs // causes cst2.cs.gateway to receive a broadcast. validBlock, err := cst1.miner.FindBlock() if err != nil { t.Fatal(err) } _, err = cst1.cs.managedAcceptBlocks([]types.Block{validBlock}) if err != nil { t.Fatal(err) } cst1.cs.gateway.Broadcast("RelayHeader", validBlock.Header(), cst1.cs.gateway.Peers()) select { case <-mg.broadcastCalled: case <-time.After(1500 * time.Millisecond): t.Fatal("RelayHeader didn't broadcast a valid block header") } } // TestIntegrationRelaySynchronize tests that blocks are relayed as they are // accepted and that peers stay synchronized. func TestIntegrationRelaySynchronize(t *testing.T) { if testing.Short() { t.SkipNow() } cst1, err := blankConsensusSetTester(t.Name() + "1") if err != nil { t.Fatal(err) } defer cst1.Close() cst2, err := blankConsensusSetTester(t.Name() + "2") if err != nil { t.Fatal(err) } defer cst2.Close() cst3, err := blankConsensusSetTester(t.Name() + "3") if err != nil { t.Fatal(err) } defer cst3.Close() // Connect them like so: cst1 <-> cst2 <-> cst3 err = cst1.gateway.Connect(cst2.gateway.Address()) if err != nil { t.Fatal(err) } err = cst2.gateway.Connect(cst3.gateway.Address()) if err != nil { t.Fatal(err) } // Make sure cst1 is not connected to cst3. cst1.gateway.Disconnect(cst3.gateway.Address()) cst3.gateway.Disconnect(cst1.gateway.Address()) // Spin until the connection calls have completed. for i := 0; i < 100; i++ { time.Sleep(150 * time.Millisecond) if len(cst1.gateway.Peers()) >= 1 && len(cst3.gateway.Peers()) >= 1 { break } } if len(cst1.gateway.Peers()) < 1 || len(cst3.gateway.Peers()) < 1 { t.Fatal("Peer connection has failed.") } // Mine a block on cst1, expecting the block to propagate from cst1 to // cst2, and then to cst3. b1, err := cst1.miner.AddBlock() if err != nil { t.Log(b1.ID()) t.Log(cst1.cs.CurrentBlock().ID()) t.Log(cst2.cs.CurrentBlock().ID()) t.Fatal(err) } // Spin until the block has propagated to cst2. for i := 0; i < 100; i++ { time.Sleep(150 * time.Millisecond) if cst2.cs.CurrentBlock().ID() == b1.ID() { break } } if cst2.cs.CurrentBlock().ID() != b1.ID() { t.Fatal("Block propagation has failed") } // Spin until the block has propagated to cst3. for i := 0; i < 100; i++ { time.Sleep(150 * time.Millisecond) if cst3.cs.CurrentBlock().ID() == b1.ID() { break } } if cst3.cs.CurrentBlock().ID() != b1.ID() { t.Fatal("Block propagation has failed") } // Mine a block on cst2. b2, err := cst2.miner.AddBlock() if err != nil { t.Log(b1.ID()) t.Log(b2.ID()) t.Log(cst2.cs.CurrentBlock().ID()) t.Log(cst3.cs.CurrentBlock().ID()) t.Fatal(err) } // Spin until the block has propagated to cst1. for i := 0; i < 100; i++ { time.Sleep(150 * time.Millisecond) if cst1.cs.CurrentBlock().ID() == b2.ID() { break } } if cst1.cs.CurrentBlock().ID() != b2.ID() { t.Fatal("block propagation has failed") } // Spin until the block has propagated to cst3. for i := 0; i < 100; i++ { time.Sleep(150 * time.Millisecond) if cst3.cs.CurrentBlock().ID() == b2.ID() { break } } if cst3.cs.CurrentBlock().ID() != b2.ID() { t.Fatal("block propagation has failed") } // Mine a block on cst3. b3, err := cst3.miner.AddBlock() if err != nil { t.Log(b1.ID()) t.Log(b2.ID()) t.Log(b3.ID()) t.Log(cst1.cs.CurrentBlock().ID()) t.Log(cst2.cs.CurrentBlock().ID()) t.Log(cst3.cs.CurrentBlock().ID()) t.Fatal(err) } // Spin until the block has propagated to cst1. for i := 0; i < 100; i++ { time.Sleep(150 * time.Millisecond) if cst1.cs.CurrentBlock().ID() == b3.ID() { break } } if cst1.cs.CurrentBlock().ID() != b3.ID() { t.Fatal("block propagation has failed") } // Spin until the block has propagated to cst2. for i := 0; i < 100; i++ { time.Sleep(150 * time.Millisecond) if cst2.cs.CurrentBlock().ID() == b3.ID() { break } } if cst2.cs.CurrentBlock().ID() != b3.ID() { t.Fatal("block propagation has failed") } // Check that cst1 and cst3 are not peers, if they are peers then this test // is invalid because it has failed to be certain that blocks can make // multiple hops. if len(cst1.gateway.Peers()) != 1 || cst1.gateway.Peers()[0].NetAddress == cst3.gateway.Address() { t.Fatal("Test is invalid, cst1 and cst3 have connected to each other") } if len(cst3.gateway.Peers()) != 1 || cst3.gateway.Peers()[0].NetAddress == cst1.gateway.Address() { t.Fatal("Test is invalid, cst3 and cst1 have connected to each other") } } // mockPeerConnMockReadWrite is a mock implementation of modules.PeerConn that // returns fails reading or writing if readErr or writeErr is non-nil, // respectively. type mockPeerConnMockReadWrite struct { modules.PeerConn readErr error writeErr error } // Read is a mock implementation of conn.Read that fails with the mock error if // readErr != nil. func (conn mockPeerConnMockReadWrite) Read(b []byte) (n int, err error) { if conn.readErr != nil { return 0, conn.readErr } return conn.PeerConn.Read(b) } // Write is a mock implementation of conn.Write that fails with the mock error // if writeErr != nil. func (conn mockPeerConnMockReadWrite) Write(b []byte) (n int, err error) { if conn.writeErr != nil { return 0, conn.writeErr } return conn.PeerConn.Write(b) } // mockNetError is a mock net.Error. type mockNetError struct { error timeout bool temporary bool } // Timeout is a mock implementation of net.Error.Timeout. func (err mockNetError) Timeout() bool { return err.timeout } // Temporary is a mock implementation of net.Error.Temporary. func (err mockNetError) Temporary() bool { return err.temporary } // TestThreadedReceiveBlocksStalls tests that threadedReceiveBlocks returns // errSendBlocksStalled when the connection times out before a block is // received. func TestThreadedReceiveBlocksStalls(t *testing.T) { if testing.Short() { t.SkipNow() } cst, err := blankConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() p1, p2 := net.Pipe() mockP2 := mockPeerConn{p2} writeTimeoutConn := mockPeerConnMockReadWrite{ PeerConn: mockP2, writeErr: mockNetError{ error: errors.New("Write timeout"), timeout: true, }, } readTimeoutConn := mockPeerConnMockReadWrite{ PeerConn: mockP2, readErr: mockNetError{ error: errors.New("Read timeout"), timeout: true, }, } readNetErrConn := mockPeerConnMockReadWrite{ PeerConn: mockP2, readErr: mockNetError{ error: errors.New("mock read net.Error"), }, } writeNetErrConn := mockPeerConnMockReadWrite{ PeerConn: mockP2, writeErr: mockNetError{ error: errors.New("mock write net.Error"), }, } readErrConn := mockPeerConnMockReadWrite{ PeerConn: mockP2, readErr: errors.New("mock read err"), } writeErrConn := mockPeerConnMockReadWrite{ PeerConn: mockP2, writeErr: errors.New("mock write err"), } // Test that threadedReceiveBlocks errors with errSendBlocksStalled when 0 // blocks have been sent and the conn times out. err = cst.cs.threadedReceiveBlocks(writeTimeoutConn) if err != errSendBlocksStalled { t.Errorf("expected threadedReceiveBlocks to err with \"%v\", got \"%v\"", errSendBlocksStalled, err) } errChan := make(chan error) go func() { var knownBlocks [32]types.BlockID errChan <- encoding.ReadObject(p1, &knownBlocks, 32*crypto.HashSize) }() err = cst.cs.threadedReceiveBlocks(readTimeoutConn) if err != errSendBlocksStalled { t.Errorf("expected threadedReceiveBlocks to err with \"%v\", got \"%v\"", errSendBlocksStalled, err) } err = <-errChan if err != nil { t.Fatal(err) } // Test that threadedReceiveBlocks errors when writing the block history fails. // Test with an error of type net.Error. err = cst.cs.threadedReceiveBlocks(writeNetErrConn) if err != writeNetErrConn.writeErr { t.Errorf("expected threadedReceiveBlocks to err with \"%v\", got \"%v\"", writeNetErrConn.writeErr, err) } // Test with an error of type error. err = cst.cs.threadedReceiveBlocks(writeErrConn) if err != writeErrConn.writeErr { t.Errorf("expected threadedReceiveBlocks to err with \"%v\", got \"%v\"", writeErrConn.writeErr, err) } // Test that threadedReceiveBlocks errors when reading blocks fails. // Test with an error of type net.Error. go func() { var knownBlocks [32]types.BlockID errChan <- encoding.ReadObject(p1, &knownBlocks, 32*crypto.HashSize) }() err = cst.cs.threadedReceiveBlocks(readNetErrConn) if err != readNetErrConn.readErr { t.Errorf("expected threadedReceiveBlocks to err with \"%v\", got \"%v\"", readNetErrConn.readErr, err) } err = <-errChan if err != nil { t.Fatal(err) } // Test with an error of type error. go func() { var knownBlocks [32]types.BlockID errChan <- encoding.ReadObject(p1, &knownBlocks, 32*crypto.HashSize) }() err = cst.cs.threadedReceiveBlocks(readErrConn) if err != readErrConn.readErr { t.Errorf("expected threadedReceiveBlocks to err with \"%v\", got \"%v\"", readErrConn.readErr, err) } err = <-errChan if err != nil { t.Fatal(err) } // TODO: Test that threadedReceiveBlocks doesn't error with a timeout if it has received one block before this timed out read/write. // TODO: Test that threadedReceiveBlocks doesn't error with errSendBlocksStalled if it successfully received one block. } // TestIntegrationSendBlocksStalls tests that the SendBlocks RPC fails with // errSendBlockStalled when the RPC timesout and the requesting end has // received 0 blocks. func TestIntegrationSendBlocksStalls(t *testing.T) { if testing.Short() { t.SkipNow() } cstLocal, err := blankConsensusSetTester(t.Name() + "- local") if err != nil { t.Fatal(err) } defer cstLocal.Close() cstRemote, err := blankConsensusSetTester(t.Name() + "- remote") if err != nil { t.Fatal(err) } defer cstRemote.Close() cstLocal.cs.gateway.Connect(cstRemote.cs.gateway.Address()) // Lock the remote CST so that SendBlocks blocks and timesout. cstRemote.cs.mu.Lock() defer cstRemote.cs.mu.Unlock() err = cstLocal.cs.gateway.RPC(cstRemote.cs.gateway.Address(), "SendBlocks", cstLocal.cs.threadedReceiveBlocks) if err != errSendBlocksStalled { t.Fatal(err) } } Sia-1.3.0/modules/consensus/validtransaction.go000066400000000000000000000322141313565667000216310ustar00rootroot00000000000000package consensus import ( "errors" "math/big" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/bolt" ) var ( errAlteredRevisionPayouts = errors.New("file contract revision has altered payout volume") errInvalidStorageProof = errors.New("provided storage proof is invalid") errLateRevision = errors.New("file contract revision submitted after deadline") errLowRevisionNumber = errors.New("transaction has a file contract with an outdated revision number") errMissingSiacoinOutput = errors.New("transaction spends a nonexisting siacoin output") errMissingSiafundOutput = errors.New("transaction spends a nonexisting siafund output") errSiacoinInputOutputMismatch = errors.New("siacoin inputs do not equal siacoin outputs for transaction") errSiafundInputOutputMismatch = errors.New("siafund inputs do not equal siafund outputs for transaction") errUnfinishedFileContract = errors.New("file contract window has not yet openend") errUnrecognizedFileContractID = errors.New("cannot fetch storage proof segment for unknown file contract") errWrongUnlockConditions = errors.New("transaction contains incorrect unlock conditions") ) // validSiacoins checks that the siacoin inputs and outputs are valid in the // context of the current consensus set. func validSiacoins(tx *bolt.Tx, t types.Transaction) error { scoBucket := tx.Bucket(SiacoinOutputs) var inputSum types.Currency for _, sci := range t.SiacoinInputs { // Check that the input spends an existing output. scoBytes := scoBucket.Get(sci.ParentID[:]) if scoBytes == nil { return errMissingSiacoinOutput } // Check that the unlock conditions match the required unlock hash. var sco types.SiacoinOutput err := encoding.Unmarshal(scoBytes, &sco) if build.DEBUG && err != nil { panic(err) } if sci.UnlockConditions.UnlockHash() != sco.UnlockHash { return errWrongUnlockConditions } inputSum = inputSum.Add(sco.Value) } if !inputSum.Equals(t.SiacoinOutputSum()) { return errSiacoinInputOutputMismatch } return nil } // storageProofSegment returns the index of the segment that needs to be proven // exists in a file contract. func storageProofSegment(tx *bolt.Tx, fcid types.FileContractID) (uint64, error) { // Check that the parent file contract exists. fcBucket := tx.Bucket(FileContracts) fcBytes := fcBucket.Get(fcid[:]) if fcBytes == nil { return 0, errUnrecognizedFileContractID } // Decode the file contract. var fc types.FileContract err := encoding.Unmarshal(fcBytes, &fc) if build.DEBUG && err != nil { panic(err) } // Get the trigger block id. blockPath := tx.Bucket(BlockPath) triggerHeight := fc.WindowStart - 1 if triggerHeight > blockHeight(tx) { return 0, errUnfinishedFileContract } var triggerID types.BlockID copy(triggerID[:], blockPath.Get(encoding.EncUint64(uint64(triggerHeight)))) // Get the index by appending the file contract ID to the trigger block and // taking the hash, then converting the hash to a numerical value and // modding it against the number of segments in the file. The result is a // random number in range [0, numSegments]. The probability is very // slightly weighted towards the beginning of the file, but because the // size difference between the number of segments and the random number // being modded, the difference is too small to make any practical // difference. seed := crypto.HashAll(triggerID, fcid) numSegments := int64(crypto.CalculateLeaves(fc.FileSize)) seedInt := new(big.Int).SetBytes(seed[:]) index := seedInt.Mod(seedInt, big.NewInt(numSegments)).Uint64() return index, nil } // validStorageProofsPre100e3 runs the code that was running before height // 100e3, which contains a hardforking bug, fixed at block 100e3. // // HARDFORK 100,000 // // Originally, it was impossible to provide a storage proof for data of length // zero. A hardfork was added triggering at block 100,000 to enable an // optimization where hosts could submit empty storage proofs for files of size // 0, saving space on the blockchain in conditions where the renter is content. func validStorageProofs100e3(tx *bolt.Tx, t types.Transaction) error { for _, sp := range t.StorageProofs { // Check that the storage proof itself is valid. segmentIndex, err := storageProofSegment(tx, sp.ParentID) if err != nil { return err } fc, err := getFileContract(tx, sp.ParentID) if err != nil { return err } leaves := crypto.CalculateLeaves(fc.FileSize) segmentLen := uint64(crypto.SegmentSize) if segmentIndex == leaves-1 { segmentLen = fc.FileSize % crypto.SegmentSize } // HARDFORK 21,000 // // Originally, the code used the entire segment to verify the // correctness of the storage proof. This made the code incompatible // with data sizes that did not fill an entire segment. // // This was patched with a hardfork in block 21,000. The new code made // it possible to perform successful storage proofs on the final // segment of a file if the final segment was not crypto.SegmentSize // bytes. // // Unfortunately, a new bug was introduced where storage proofs on the // final segment would fail if the final segment was selected and was // crypto.SegmentSize bytes, because the segmentLen would be set to 0 // instead of crypto.SegmentSize, due to an error with the modulus // math. This new error has been fixed with the block 100,000 hardfork. if (build.Release == "standard" && blockHeight(tx) < 21e3) || (build.Release == "testing" && blockHeight(tx) < 10) { segmentLen = uint64(crypto.SegmentSize) } verified := crypto.VerifySegment( sp.Segment[:segmentLen], sp.HashSet, leaves, segmentIndex, fc.FileMerkleRoot, ) if !verified { return errInvalidStorageProof } } return nil } // validStorageProofs checks that the storage proofs are valid in the context // of the consensus set. func validStorageProofs(tx *bolt.Tx, t types.Transaction) error { if (build.Release == "standard" && blockHeight(tx) < 100e3) || (build.Release == "testing" && blockHeight(tx) < 10) { return validStorageProofs100e3(tx, t) } for _, sp := range t.StorageProofs { // Check that the storage proof itself is valid. segmentIndex, err := storageProofSegment(tx, sp.ParentID) if err != nil { return err } fc, err := getFileContract(tx, sp.ParentID) if err != nil { return err } leaves := crypto.CalculateLeaves(fc.FileSize) segmentLen := uint64(crypto.SegmentSize) // If this segment chosen is the final segment, it should only be as // long as necessary to complete the filesize. if segmentIndex == leaves-1 { segmentLen = fc.FileSize % crypto.SegmentSize } if segmentLen == 0 { segmentLen = uint64(crypto.SegmentSize) } verified := crypto.VerifySegment( sp.Segment[:segmentLen], sp.HashSet, leaves, segmentIndex, fc.FileMerkleRoot, ) if !verified && fc.FileSize > 0 { return errInvalidStorageProof } } return nil } // validFileContractRevision checks that each file contract revision is valid // in the context of the current consensus set. func validFileContractRevisions(tx *bolt.Tx, t types.Transaction) error { for _, fcr := range t.FileContractRevisions { fc, err := getFileContract(tx, fcr.ParentID) if err != nil { return err } // Check that the height is less than fc.WindowStart - revisions are // not allowed to be submitted once the storage proof window has // opened. This reduces complexity for unconfirmed transactions. if blockHeight(tx) > fc.WindowStart { return errLateRevision } // Check that the revision number of the revision is greater than the // revision number of the existing file contract. if fc.RevisionNumber >= fcr.NewRevisionNumber { return errLowRevisionNumber } // Check that the unlock conditions match the unlock hash. if fcr.UnlockConditions.UnlockHash() != fc.UnlockHash { return errWrongUnlockConditions } // Check that the payout of the revision matches the payout of the // original, and that the payouts match each other. var validPayout, missedPayout, oldPayout types.Currency for _, output := range fcr.NewValidProofOutputs { validPayout = validPayout.Add(output.Value) } for _, output := range fcr.NewMissedProofOutputs { missedPayout = missedPayout.Add(output.Value) } for _, output := range fc.ValidProofOutputs { oldPayout = oldPayout.Add(output.Value) } if !validPayout.Equals(oldPayout) { return errAlteredRevisionPayouts } if !missedPayout.Equals(oldPayout) { return errAlteredRevisionPayouts } } return nil } // validSiafunds checks that the siafund portions of the transaction are valid // in the context of the consensus set. func validSiafunds(tx *bolt.Tx, t types.Transaction) (err error) { // Compare the number of input siafunds to the output siafunds. var siafundInputSum types.Currency var siafundOutputSum types.Currency for _, sfi := range t.SiafundInputs { sfo, err := getSiafundOutput(tx, sfi.ParentID) if err != nil { return err } // Check the unlock conditions match the unlock hash. if sfi.UnlockConditions.UnlockHash() != sfo.UnlockHash { return errWrongUnlockConditions } siafundInputSum = siafundInputSum.Add(sfo.Value) } for _, sfo := range t.SiafundOutputs { siafundOutputSum = siafundOutputSum.Add(sfo.Value) } if !siafundOutputSum.Equals(siafundInputSum) { return errSiafundInputOutputMismatch } return } // validTransaction checks that all fields are valid within the current // consensus state. If not an error is returned. func validTransaction(tx *bolt.Tx, t types.Transaction) error { // StandaloneValid will check things like signatures and properties that // should be inherent to the transaction. (storage proof rules, etc.) err := t.StandaloneValid(blockHeight(tx)) if err != nil { return err } // Check that each portion of the transaction is legal given the current // consensus set. err = validSiacoins(tx, t) if err != nil { return err } err = validStorageProofs(tx, t) if err != nil { return err } err = validFileContractRevisions(tx, t) if err != nil { return err } err = validSiafunds(tx, t) if err != nil { return err } return nil } // tryTransactionSet applies the input transactions to the consensus set to // determine if they are valid. An error is returned IFF they are not a valid // set in the current consensus set. The size of the transactions and the set // is not checked. After the transactions have been validated, a consensus // change is returned detailing the diffs that the transactions set would have. func (cs *ConsensusSet) tryTransactionSet(txns []types.Transaction) (modules.ConsensusChange, error) { // applyTransaction will apply the diffs from a transaction and store them // in a block node. diffHolder is the blockNode that tracks the temporary // changes. At the end of the function, all changes that were made to the // consensus set get reverted. diffHolder := new(processedBlock) // Boltdb will only roll back a tx if an error is returned. In the case of // TryTransactionSet, we want to roll back the tx even if there is no // error. So errSuccess is returned. An alternate method would be to // manually manage the tx instead of using 'Update', but that has safety // concerns and is more difficult to implement correctly. errSuccess := errors.New("success") err := cs.db.Update(func(tx *bolt.Tx) error { diffHolder.Height = blockHeight(tx) for _, txn := range txns { err := validTransaction(tx, txn) if err != nil { return err } applyTransaction(tx, diffHolder, txn) } return errSuccess }) if err != errSuccess { return modules.ConsensusChange{}, err } cc := modules.ConsensusChange{ SiacoinOutputDiffs: diffHolder.SiacoinOutputDiffs, FileContractDiffs: diffHolder.FileContractDiffs, SiafundOutputDiffs: diffHolder.SiafundOutputDiffs, DelayedSiacoinOutputDiffs: diffHolder.DelayedSiacoinOutputDiffs, SiafundPoolDiffs: diffHolder.SiafundPoolDiffs, } return cc, nil } // TryTransactionSet applies the input transactions to the consensus set to // determine if they are valid. An error is returned IFF they are not a valid // set in the current consensus set. The size of the transactions and the set // is not checked. After the transactions have been validated, a consensus // change is returned detailing the diffs that the transactions set would have. func (cs *ConsensusSet) TryTransactionSet(txns []types.Transaction) (modules.ConsensusChange, error) { err := cs.tg.Add() if err != nil { return modules.ConsensusChange{}, err } defer cs.tg.Done() cs.mu.RLock() defer cs.mu.RUnlock() return cs.tryTransactionSet(txns) } // LockedTryTransactionSet calls fn while under read-lock, passing it a // version of TryTransactionSet that can be called under read-lock. This fixes // an edge case in the transaction pool. func (cs *ConsensusSet) LockedTryTransactionSet(fn func(func(txns []types.Transaction) (modules.ConsensusChange, error)) error) error { err := cs.tg.Add() if err != nil { return err } defer cs.tg.Done() cs.mu.RLock() defer cs.mu.RUnlock() return fn(cs.tryTransactionSet) } Sia-1.3.0/modules/consensus/validtransaction_test.go000066400000000000000000000533501313565667000226740ustar00rootroot00000000000000package consensus import ( "testing" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/fastrand" "github.com/NebulousLabs/bolt" ) // TestTryValidTransactionSet submits a valid transaction set to the // TryTransactionSet method. func TestTryValidTransactionSet(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() initialHash := cst.cs.dbConsensusChecksum() // Try a valid transaction. _, err = cst.wallet.SendSiacoins(types.NewCurrency64(1), types.UnlockHash{}) if err != nil { t.Fatal(err) } txns := cst.tpool.TransactionList() cc, err := cst.cs.TryTransactionSet(txns) if err != nil { t.Error(err) } if cst.cs.dbConsensusChecksum() != initialHash { t.Error("TryTransactionSet did not resotre order") } if len(cc.SiacoinOutputDiffs) == 0 { t.Error("consensus change is missing diffs after verifying a transction clump") } } // TestTryInvalidTransactionSet submits an invalid transaction set to the // TryTransaction method. func TestTryInvalidTransactionSet(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() initialHash := cst.cs.dbConsensusChecksum() // Try a valid transaction followed by an invalid transaction. _, err = cst.wallet.SendSiacoins(types.NewCurrency64(1), types.UnlockHash{}) if err != nil { t.Fatal(err) } txns := cst.tpool.TransactionList() txn := types.Transaction{ SiacoinInputs: []types.SiacoinInput{{}}, } txns = append(txns, txn) cc, err := cst.cs.TryTransactionSet(txns) if err == nil { t.Error("bad transaction survived filter") } if cst.cs.dbConsensusChecksum() != initialHash { t.Error("TryTransactionSet did not restore order") } if len(cc.SiacoinOutputDiffs) != 0 { t.Error("consensus change was not empty despite an error being returned") } } // TestStorageProofBoundaries creates file contracts and submits storage proofs // for them, probing segment boundaries (first segment, last segment, // incomplete segment, etc.). func TestStorageProofBoundaries(t *testing.T) { if testing.Short() || !build.VLONG { t.SkipNow() } t.Parallel() cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() // Mine enough blocks to put us beyond the testing hardfork. for i := 0; i < 10; i++ { _, err = cst.miner.AddBlock() if err != nil { t.Fatal(err) } } // Try storage proofs on data between 0 bytes and 128 bytes (0 segments and // 1 segment). Perform the operation five times because we can't control // which segment gets selected - it is randomly decided by the block. segmentRange := []int{0, 1, 2, 3, 4, 5, 15, 25, 30, 32, 62, 63, 64, 65, 66, 70, 81, 89, 90, 126, 127, 128, 129} for i := 0; i < 3; i++ { randData := fastrand.Bytes(140) // Create a file contract for all sizes of the data between 0 and 2 // segments and put them in the transaction pool. var fcids []types.FileContractID for _, k := range segmentRange { // Create the data and the file contract around it. truncatedData := randData[:k] fc := types.FileContract{ FileSize: uint64(k), FileMerkleRoot: crypto.MerkleRoot(truncatedData), WindowStart: cst.cs.dbBlockHeight() + 2, WindowEnd: cst.cs.dbBlockHeight() + 4, Payout: types.NewCurrency64(500), // Too small to be subject to siafund fee. ValidProofOutputs: []types.SiacoinOutput{{Value: types.NewCurrency64(500)}}, MissedProofOutputs: []types.SiacoinOutput{{Value: types.NewCurrency64(500)}}, } // Create a transaction around the file contract and add it to the // transaction pool. b := cst.wallet.StartTransaction() err = b.FundSiacoins(types.NewCurrency64(500)) if err != nil { t.Fatal(err) } b.AddFileContract(fc) txnSet, err := b.Sign(true) if err != nil { t.Fatal(err) } err = cst.tpool.AcceptTransactionSet(txnSet) if err != nil { t.Fatal(err) } // Store the file contract id for later when building the storage // proof. fcids = append(fcids, txnSet[len(txnSet)-1].FileContractID(0)) } // Mine blocks to get the file contracts into the blockchain and // confirming. for j := 0; j < 2; j++ { _, err = cst.miner.AddBlock() if err != nil { t.Fatal(err) } } // Create storage proofs for the file contracts and submit the proofs // to the blockchain. for j, k := range segmentRange { // Build the storage proof. truncatedData := randData[:k] proofIndex, err := cst.cs.StorageProofSegment(fcids[j]) if err != nil { t.Fatal(err) } base, hashSet := crypto.MerkleProof(truncatedData, proofIndex) sp := types.StorageProof{ ParentID: fcids[j], HashSet: hashSet, } copy(sp.Segment[:], base) if k > 0 { // Try submitting an empty storage proof, to make sure that the // hardfork code didn't accidentally allow empty storage proofs // in situations other than file sizes with 0 bytes. badSP := types.StorageProof{ParentID: fcids[j]} badTxn := types.Transaction{ StorageProofs: []types.StorageProof{badSP}, } if sp.Segment == badSP.Segment { continue } err = cst.tpool.AcceptTransactionSet([]types.Transaction{badTxn}) if err == nil { t.Fatal("An empty storage proof got into the transaction pool with non-empty data") } } // Submit the storage proof to the blockchain in a transaction. txn := types.Transaction{ StorageProofs: []types.StorageProof{sp}, } err = cst.tpool.AcceptTransactionSet([]types.Transaction{txn}) if err != nil { t.Fatal(err, "-", j, k) } } // Mine blocks to get the storage proofs on the blockchain. for j := 0; j < 2; j++ { _, err = cst.miner.AddBlock() if err != nil { t.Fatal(err) } } } } // TestEmptyStorageProof creates file contracts and submits storage proofs for // them, probing segment boundaries (first segment, last segment, incomplete // segment, etc.). func TestEmptyStorageProof(t *testing.T) { if testing.Short() || !build.VLONG { t.SkipNow() } t.Parallel() cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() // Mine enough blocks to put us beyond the testing hardfork. for i := 0; i < 10; i++ { _, err = cst.miner.AddBlock() if err != nil { t.Fatal(err) } } // Try storage proofs on data between 0 bytes and 128 bytes (0 segments and // 1 segment). Perform the operation five times because we can't control // which segment gets selected - it is randomly decided by the block. segmentRange := []int{0, 1, 2, 3, 4, 5, 15, 25, 30, 32, 62, 63, 64, 65, 66, 70, 81, 89, 90, 126, 127, 128, 129} for i := 0; i < 3; i++ { randData := fastrand.Bytes(140) // Create a file contract for all sizes of the data between 0 and 2 // segments and put them in the transaction pool. var fcids []types.FileContractID for _, k := range segmentRange { // Create the data and the file contract around it. truncatedData := randData[:k] fc := types.FileContract{ FileSize: uint64(k), FileMerkleRoot: crypto.MerkleRoot(truncatedData), WindowStart: cst.cs.dbBlockHeight() + 2, WindowEnd: cst.cs.dbBlockHeight() + 4, Payout: types.NewCurrency64(500), // Too small to be subject to siafund fee. ValidProofOutputs: []types.SiacoinOutput{{Value: types.NewCurrency64(500)}}, MissedProofOutputs: []types.SiacoinOutput{{Value: types.NewCurrency64(500)}}, } // Create a transaction around the file contract and add it to the // transaction pool. b := cst.wallet.StartTransaction() err = b.FundSiacoins(types.NewCurrency64(500)) if err != nil { t.Fatal(err) } b.AddFileContract(fc) txnSet, err := b.Sign(true) if err != nil { t.Fatal(err) } err = cst.tpool.AcceptTransactionSet(txnSet) if err != nil { t.Fatal(err) } // Store the file contract id for later when building the storage // proof. fcids = append(fcids, txnSet[len(txnSet)-1].FileContractID(0)) } // Mine blocks to get the file contracts into the blockchain and // confirming. for j := 0; j < 2; j++ { _, err = cst.miner.AddBlock() if err != nil { t.Fatal(err) } } // Create storage proofs for the file contracts and submit the proofs // to the blockchain. for j, k := range segmentRange { // Build the storage proof. truncatedData := randData[:k] proofIndex, err := cst.cs.StorageProofSegment(fcids[j]) if err != nil { t.Fatal(err) } base, hashSet := crypto.MerkleProof(truncatedData, proofIndex) sp := types.StorageProof{ ParentID: fcids[j], HashSet: hashSet, } copy(sp.Segment[:], base) // Submit the storage proof to the blockchain in a transaction. txn := types.Transaction{ StorageProofs: []types.StorageProof{sp}, } err = cst.tpool.AcceptTransactionSet([]types.Transaction{txn}) if err != nil { t.Fatal(err, "-", j, k) } } // Mine blocks to get the storage proofs on the blockchain. for j := 0; j < 2; j++ { _, err = cst.miner.AddBlock() if err != nil { t.Fatal(err) } } } } // TestValidSiacoins probes the validSiacoins method of the consensus set. func TestValidSiacoins(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() // Create a transaction pointing to a nonexistent siacoin output. txn := types.Transaction{ SiacoinInputs: []types.SiacoinInput{{}}, } err = cst.cs.db.View(func(tx *bolt.Tx) error { err := validSiacoins(tx, txn) if err != errMissingSiacoinOutput { t.Fatal(err) } return nil }) if err != nil { t.Fatal(err) } // Create a transaction with invalid unlock conditions. scoid, _, err := cst.cs.getArbSiacoinOutput() if err != nil { t.Fatal(err) } txn = types.Transaction{ SiacoinInputs: []types.SiacoinInput{{ ParentID: scoid, }}, } err = cst.cs.db.View(func(tx *bolt.Tx) error { err := validSiacoins(tx, txn) if err != errWrongUnlockConditions { t.Fatal(err) } return nil }) if err != nil { t.Fatal(err) } // Create a txn with more outputs than inputs. txn = types.Transaction{ SiacoinOutputs: []types.SiacoinOutput{{ Value: types.NewCurrency64(1), }}, } err = cst.cs.db.View(func(tx *bolt.Tx) error { err := validSiacoins(tx, txn) if err != errSiacoinInputOutputMismatch { t.Fatal(err) } return nil }) if err != nil { t.Fatal(err) } } // TestStorageProofSegment probes the storageProofSegment method of the // consensus set. func TestStorageProofSegment(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() // Submit a file contract that is unrecognized. _, err = cst.cs.dbStorageProofSegment(types.FileContractID{}) if err != errUnrecognizedFileContractID { t.Error(err) } // Try to get the segment of an unfinished file contract. cst.cs.dbAddFileContract(types.FileContractID{}, types.FileContract{ Payout: types.NewCurrency64(1), WindowStart: 100000, }) _, err = cst.cs.dbStorageProofSegment(types.FileContractID{}) if err != errUnfinishedFileContract { t.Error(err) } } // TestValidStorageProofs probes the validStorageProofs method of the consensus // set. func TestValidStorageProofs(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() // COMPATv0.4.0 // // Mine 10 blocks so that the post-hardfork rules are in effect. for i := 0; i < 10; i++ { block, _ := cst.miner.FindBlock() err = cst.cs.AcceptBlock(block) if err != nil { t.Fatal(err) } } // Create a file contract for which a storage proof can be created. var fcid types.FileContractID fcid[0] = 12 simFile := fastrand.Bytes(64 * 1024) root := crypto.MerkleRoot(simFile) fc := types.FileContract{ FileSize: 64 * 1024, FileMerkleRoot: root, Payout: types.NewCurrency64(1), WindowStart: 2, WindowEnd: 1200, } cst.cs.dbAddFileContract(fcid, fc) // Create a transaction with a storage proof. proofIndex, err := cst.cs.dbStorageProofSegment(fcid) if err != nil { t.Fatal(err) } base, proofSet := crypto.MerkleProof(simFile, proofIndex) txn := types.Transaction{ StorageProofs: []types.StorageProof{{ ParentID: fcid, HashSet: proofSet, }}, } copy(txn.StorageProofs[0].Segment[:], base) err = cst.cs.dbValidStorageProofs(txn) if err != nil { t.Error(err) } // Corrupt the proof set. proofSet[0][0]++ txn = types.Transaction{ StorageProofs: []types.StorageProof{{ ParentID: fcid, HashSet: proofSet, }}, } copy(txn.StorageProofs[0].Segment[:], base) err = cst.cs.dbValidStorageProofs(txn) if err != errInvalidStorageProof { t.Error(err) } // Try to validate a proof for a file contract that doesn't exist. txn.StorageProofs[0].ParentID = types.FileContractID{} err = cst.cs.dbValidStorageProofs(txn) if err != errUnrecognizedFileContractID { t.Error(err) } // Try a proof set where there is padding on the last segment in the file. file := fastrand.Bytes(100) root = crypto.MerkleRoot(file) fc = types.FileContract{ FileSize: 100, FileMerkleRoot: root, Payout: types.NewCurrency64(1), WindowStart: 2, WindowEnd: 1200, } // Find a proofIndex that has the value '1'. for { fcid[0]++ cst.cs.dbAddFileContract(fcid, fc) proofIndex, err = cst.cs.dbStorageProofSegment(fcid) if err != nil { t.Fatal(err) } if proofIndex == 1 { break } } base, proofSet = crypto.MerkleProof(file, proofIndex) txn = types.Transaction{ StorageProofs: []types.StorageProof{{ ParentID: fcid, HashSet: proofSet, }}, } copy(txn.StorageProofs[0].Segment[:], base) err = cst.cs.dbValidStorageProofs(txn) if err != nil { t.Fatal(err) } } // HARDFORK 21,000 // // TestPreForkValidStorageProofs checks that storage proofs which are invalid // before the hardfork (but valid afterwards) are still rejected before the // hardfork). func TestPreForkValidStorageProofs(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() // Try a proof set where there is padding on the last segment in the file. file := fastrand.Bytes(100) root := crypto.MerkleRoot(file) fc := types.FileContract{ FileSize: 100, FileMerkleRoot: root, Payout: types.NewCurrency64(1), WindowStart: 2, WindowEnd: 1200, } // Find a proofIndex that has the value '1'. var fcid types.FileContractID var proofIndex uint64 for { fcid[0]++ cst.cs.dbAddFileContract(fcid, fc) proofIndex, err = cst.cs.dbStorageProofSegment(fcid) if err != nil { t.Fatal(err) } if proofIndex == 1 { break } } base, proofSet := crypto.MerkleProof(file, proofIndex) txn := types.Transaction{ StorageProofs: []types.StorageProof{{ ParentID: fcid, HashSet: proofSet, }}, } copy(txn.StorageProofs[0].Segment[:], base) err = cst.cs.dbValidStorageProofs(txn) if err != errInvalidStorageProof { t.Log(cst.cs.dbBlockHeight()) t.Fatal(err) } } // TestValidFileContractRevisions probes the validFileContractRevisions method // of the consensus set. func TestValidFileContractRevisions(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.Close() // Grab an address + unlock conditions for the transaction. unlockConditions, err := cst.wallet.NextAddress() if err != nil { t.Fatal(err) } // Create a file contract for which a storage proof can be created. var fcid types.FileContractID fcid[0] = 12 simFile := fastrand.Bytes(64 * 1024) root := crypto.MerkleRoot(simFile) fc := types.FileContract{ FileSize: 64 * 1024, FileMerkleRoot: root, WindowStart: 102, WindowEnd: 1200, Payout: types.NewCurrency64(1), UnlockHash: unlockConditions.UnlockHash(), RevisionNumber: 1, } cst.cs.dbAddFileContract(fcid, fc) // Try a working file contract revision. txn := types.Transaction{ FileContractRevisions: []types.FileContractRevision{ { ParentID: fcid, UnlockConditions: unlockConditions, NewRevisionNumber: 2, }, }, } err = cst.cs.dbValidFileContractRevisions(txn) if err != nil { t.Error(err) } // Try a transaction with an insufficient revision number. txn = types.Transaction{ FileContractRevisions: []types.FileContractRevision{ { ParentID: fcid, UnlockConditions: unlockConditions, NewRevisionNumber: 1, }, }, } err = cst.cs.dbValidFileContractRevisions(txn) if err != errLowRevisionNumber { t.Error(err) } txn = types.Transaction{ FileContractRevisions: []types.FileContractRevision{ { ParentID: fcid, UnlockConditions: unlockConditions, NewRevisionNumber: 0, }, }, } err = cst.cs.dbValidFileContractRevisions(txn) if err != errLowRevisionNumber { t.Error(err) } // Submit a file contract revision pointing to an invalid parent. txn.FileContractRevisions[0].ParentID[0]-- err = cst.cs.dbValidFileContractRevisions(txn) if err != errNilItem { t.Error(err) } txn.FileContractRevisions[0].ParentID[0]++ // Submit a file contract revision for a file contract whose window has // already opened. fc, err = cst.cs.dbGetFileContract(fcid) if err != nil { t.Fatal(err) } fc.WindowStart = 0 cst.cs.dbRemoveFileContract(fcid) cst.cs.dbAddFileContract(fcid, fc) txn.FileContractRevisions[0].NewRevisionNumber = 3 err = cst.cs.dbValidFileContractRevisions(txn) if err != errLateRevision { t.Error(err) } // Submit a file contract revision with incorrect unlock conditions. fc.WindowStart = 100 cst.cs.dbRemoveFileContract(fcid) cst.cs.dbAddFileContract(fcid, fc) txn.FileContractRevisions[0].UnlockConditions.Timelock++ err = cst.cs.dbValidFileContractRevisions(txn) if err != errWrongUnlockConditions { t.Error(err) } txn.FileContractRevisions[0].UnlockConditions.Timelock-- // Submit file contract revisions for file contracts with altered payouts. txn.FileContractRevisions[0].NewValidProofOutputs = []types.SiacoinOutput{{ Value: types.NewCurrency64(1), }} txn.FileContractRevisions[0].NewMissedProofOutputs = []types.SiacoinOutput{{ Value: types.NewCurrency64(1), }} err = cst.cs.dbValidFileContractRevisions(txn) if err != errAlteredRevisionPayouts { t.Error(err) } txn.FileContractRevisions[0].NewValidProofOutputs = nil err = cst.cs.dbValidFileContractRevisions(txn) if err != errAlteredRevisionPayouts { t.Error(err) } txn.FileContractRevisions[0].NewValidProofOutputs = []types.SiacoinOutput{{ Value: types.NewCurrency64(1), }} txn.FileContractRevisions[0].NewMissedProofOutputs = nil err = cst.cs.dbValidFileContractRevisions(txn) if err != errAlteredRevisionPayouts { t.Error(err) } } /* // TestValidSiafunds probes the validSiafunds mthod of the consensus set. func TestValidSiafunds(t *testing.T) { if testing.Short() { t.SkipNow() } cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.closeCst() // Create a transaction pointing to a nonexistent siafund output. txn := types.Transaction{ SiafundInputs: []types.SiafundInput{{}}, } err = cst.cs.validSiafunds(txn) if err != ErrMissingSiafundOutput { t.Error(err) } // Create a transaction with invalid unlock conditions. var sfoid types.SiafundOutputID cst.cs.db.forEachSiafundOutputs(func(mapSfoid types.SiafundOutputID, sfo types.SiafundOutput) { sfoid = mapSfoid // pointless to do this but I can't think of a better way. }) txn = types.Transaction{ SiafundInputs: []types.SiafundInput{{ ParentID: sfoid, UnlockConditions: types.UnlockConditions{Timelock: 12345}, // avoid collisions with existing outputs }}, } err = cst.cs.validSiafunds(txn) if err != ErrWrongUnlockConditions { t.Error(err) } // Create a transaction with more outputs than inputs. txn = types.Transaction{ SiafundOutputs: []types.SiafundOutput{{ Value: types.NewCurrency64(1), }}, } err = cst.cs.validSiafunds(txn) if err != ErrSiafundInputOutputMismatch { t.Error(err) } } // TestValidTransaction probes the validTransaction method of the consensus // set. func TestValidTransaction(t *testing.T) { if testing.Short() { t.SkipNow() } cst, err := createConsensusSetTester(t.Name()) if err != nil { t.Fatal(err) } defer cst.closeCst() // Create a transaction that is not standalone valid. txn := types.Transaction{ FileContracts: []types.FileContract{{ WindowStart: 0, }}, } err = cst.cs.validTransaction(txn) if err == nil { t.Error("transaction is valid") } // Create a transaction with invalid siacoins. txn = types.Transaction{ SiacoinInputs: []types.SiacoinInput{{}}, } err = cst.cs.validTransaction(txn) if err == nil { t.Error("transaction is valid") } // Create a transaction with invalid storage proofs. txn = types.Transaction{ StorageProofs: []types.StorageProof{{}}, } err = cst.cs.validTransaction(txn) if err == nil { t.Error("transaction is valid") } // Create a transaction with invalid file contract revisions. txn = types.Transaction{ FileContractRevisions: []types.FileContractRevision{{ NewWindowStart: 5000, NewWindowEnd: 5005, ParentID: types.FileContractID{}, }}, } err = cst.cs.validTransaction(txn) if err == nil { t.Error("transaction is valid") } // Create a transaction with invalid siafunds. txn = types.Transaction{ SiafundInputs: []types.SiafundInput{{}}, } err = cst.cs.validTransaction(txn) if err == nil { t.Error("transaction is valid") } } */ Sia-1.3.0/modules/explorer.go000066400000000000000000000106161313565667000161060ustar00rootroot00000000000000package modules import ( "github.com/NebulousLabs/Sia/types" ) const ( // ExplorerDir is the name of the directory that is typically used for the // explorer. ExplorerDir = "explorer" ) type ( // BlockFacts returns a bunch of statistics about the consensus set as they // were at a specific block. BlockFacts struct { BlockID types.BlockID `json:"blockid"` Difficulty types.Currency `json:"difficulty"` EstimatedHashrate types.Currency `json:"estimatedhashrate"` Height types.BlockHeight `json:"height"` MaturityTimestamp types.Timestamp `json:"maturitytimestamp"` Target types.Target `json:"target"` TotalCoins types.Currency `json:"totalcoins"` // Transaction type counts. MinerPayoutCount uint64 `json:"minerpayoutcount"` TransactionCount uint64 `json:"transactioncount"` SiacoinInputCount uint64 `json:"siacoininputcount"` SiacoinOutputCount uint64 `json:"siacoinoutputcount"` FileContractCount uint64 `json:"filecontractcount"` FileContractRevisionCount uint64 `json:"filecontractrevisioncount"` StorageProofCount uint64 `json:"storageproofcount"` SiafundInputCount uint64 `json:"siafundinputcount"` SiafundOutputCount uint64 `json:"siafundoutputcount"` MinerFeeCount uint64 `json:"minerfeecount"` ArbitraryDataCount uint64 `json:"arbitrarydatacount"` TransactionSignatureCount uint64 `json:"transactionsignaturecount"` // Factoids about file contracts. ActiveContractCost types.Currency `json:"activecontractcost"` ActiveContractCount uint64 `json:"activecontractcount"` ActiveContractSize types.Currency `json:"activecontractsize"` TotalContractCost types.Currency `json:"totalcontractcost"` TotalContractSize types.Currency `json:"totalcontractsize"` TotalRevisionVolume types.Currency `json:"totalrevisionvolume"` } // Explorer tracks the blockchain and provides tools for gathering // statistics and finding objects or patterns within the blockchain. Explorer interface { // Block returns the block that matches the input block id. The bool // indicates whether the block appears in the blockchain. Block(types.BlockID) (types.Block, types.BlockHeight, bool) // BlockFacts returns a set of statistics about the blockchain as they // appeared at a given block. BlockFacts(types.BlockHeight) (BlockFacts, bool) // LatestBlockFacts returns the block facts of the last block // in the explorer's database. LatestBlockFacts() BlockFacts // Transaction returns the block that contains the input transaction // id. The transaction itself is either the block (indicating the miner // payouts are somehow involved), or it is a transaction inside of the // block. The bool indicates whether the transaction is found in the // consensus set. Transaction(types.TransactionID) (types.Block, types.BlockHeight, bool) // UnlockHash returns all of the transaction ids associated with the // provided unlock hash. UnlockHash(types.UnlockHash) []types.TransactionID // SiacoinOutput will return the siacoin output associated with the // input id. SiacoinOutput(types.SiacoinOutputID) (types.SiacoinOutput, bool) // SiacoinOutputID returns all of the transaction ids associated with // the provided siacoin output id. SiacoinOutputID(types.SiacoinOutputID) []types.TransactionID // FileContractHistory returns the history associated with a file // contract, which includes the file contract itself and all of the // revisions that have been submitted to the blockchain. The first bool // indicates whether the file contract exists, and the second bool // indicates whether a storage proof was successfully submitted for the // file contract. FileContractHistory(types.FileContractID) (fc types.FileContract, fcrs []types.FileContractRevision, fcExists bool, storageProofExists bool) // FileContractID returns all of the transaction ids associated with // the provided file contract id. FileContractID(types.FileContractID) []types.TransactionID // SiafundOutput will return the siafund output associated with the // input id. SiafundOutput(types.SiafundOutputID) (types.SiafundOutput, bool) // SiafundOutputID returns all of the transaction ids associated with // the provided siafund output id. SiafundOutputID(types.SiafundOutputID) []types.TransactionID Close() error } ) Sia-1.3.0/modules/explorer/000077500000000000000000000000001313565667000155535ustar00rootroot00000000000000Sia-1.3.0/modules/explorer/database.go000066400000000000000000000073561313565667000176610ustar00rootroot00000000000000package explorer import ( "errors" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/bolt" ) var ( errNotExist = errors.New("entry does not exist") // database buckets bucketBlockFacts = []byte("BlockFacts") bucketBlockIDs = []byte("BlockIDs") bucketBlocksDifficulty = []byte("BlocksDifficulty") bucketBlockTargets = []byte("BlockTargets") bucketFileContractHistories = []byte("FileContractHistories") bucketFileContractIDs = []byte("FileContractIDs") bucketSiacoinOutputIDs = []byte("SiacoinOutputIDs") bucketSiacoinOutputs = []byte("SiacoinOutputs") bucketSiafundOutputIDs = []byte("SiafundOutputIDs") bucketSiafundOutputs = []byte("SiafundOutputs") bucketTransactionIDs = []byte("TransactionIDs") bucketUnlockHashes = []byte("UnlockHashes") // bucketInternal is used to store values internal to the explorer bucketInternal = []byte("Internal") // keys for bucketInternal internalBlockHeight = []byte("BlockHeight") internalRecentChange = []byte("RecentChange") ) // These functions all return a 'func(*bolt.Tx) error', which, allows them to // be called concisely with the db.View and db.Update functions, e.g.: // // var height types.BlockHeight // db.View(dbGetAndDecode(bucketBlockIDs, id, &height)) // // Instead of: // // var height types.BlockHeight // db.View(func(tx *bolt.Tx) error { // bytes := tx.Bucket(bucketBlockIDs).Get(encoding.Marshal(id)) // return encoding.Unmarshal(bytes, &height) // }) // dbGetAndDecode returns a 'func(*bolt.Tx) error' that retrieves and decodes // a value from the specified bucket. If the value does not exist, // dbGetAndDecode returns errNotExist. func dbGetAndDecode(bucket []byte, key, val interface{}) func(*bolt.Tx) error { return func(tx *bolt.Tx) error { valBytes := tx.Bucket(bucket).Get(encoding.Marshal(key)) if valBytes == nil { return errNotExist } return encoding.Unmarshal(valBytes, val) } } // dbGetTransactionIDSet returns a 'func(*bolt.Tx) error' that decodes a // bucket of transaction IDs into a slice. If the bucket is nil, // dbGetTransactionIDSet returns errNotExist. func dbGetTransactionIDSet(bucket []byte, key interface{}, ids *[]types.TransactionID) func(*bolt.Tx) error { return func(tx *bolt.Tx) error { b := tx.Bucket(bucket).Bucket(encoding.Marshal(key)) if b == nil { return errNotExist } // decode into a local slice var txids []types.TransactionID err := b.ForEach(func(txid, _ []byte) error { var id types.TransactionID err := encoding.Unmarshal(txid, &id) if err != nil { return err } txids = append(txids, id) return nil }) if err != nil { return err } // set pointer *ids = txids return nil } } // dbGetBlockFacts returns a 'func(*bolt.Tx) error' that decodes // the block facts for `height` into blockfacts func (e *Explorer) dbGetBlockFacts(height types.BlockHeight, bf *blockFacts) func(*bolt.Tx) error { return func(tx *bolt.Tx) error { block, exists := e.cs.BlockAtHeight(height) if !exists { return errors.New("requested block facts for a block that does not exist") } return dbGetAndDecode(bucketBlockFacts, block.ID(), bf)(tx) } } // dbSetInternal sets the specified key of bucketInternal to the encoded value. func dbSetInternal(key []byte, val interface{}) func(*bolt.Tx) error { return func(tx *bolt.Tx) error { return tx.Bucket(bucketInternal).Put(key, encoding.Marshal(val)) } } // dbGetInternal decodes the specified key of bucketInternal into the supplied pointer. func dbGetInternal(key []byte, val interface{}) func(*bolt.Tx) error { return func(tx *bolt.Tx) error { return encoding.Unmarshal(tx.Bucket(bucketInternal).Get(key), val) } } Sia-1.3.0/modules/explorer/explorer.go000066400000000000000000000045051313565667000177460ustar00rootroot00000000000000// The explorer module provides a glimpse into what the Sia network // currently looks like. package explorer import ( "errors" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/persist" "github.com/NebulousLabs/Sia/types" ) const ( // hashrateEstimationBlocks is the number of blocks that are used to // estimate the current hashrate. hashrateEstimationBlocks = 200 // 33 hours ) var ( errNilCS = errors.New("explorer cannot use a nil consensus set") ) type ( // fileContractHistory stores the original file contract and the chain of // revisions that have affected a file contract through the life of the // blockchain. fileContractHistory struct { Contract types.FileContract Revisions []types.FileContractRevision StorageProof types.StorageProof } // blockFacts contains a set of facts about the consensus set related to a // certain block. The explorer needs some additional information in the // history so that it can calculate certain values, which is one of the // reasons that the explorer uses a separate struct instead of // modules.BlockFacts. blockFacts struct { modules.BlockFacts Timestamp types.Timestamp } // An Explorer contains a more comprehensive view of the blockchain, // including various statistics and metrics. Explorer struct { cs modules.ConsensusSet db *persist.BoltDatabase persistDir string } ) // New creates the internal data structures, and subscribes to // consensus for changes to the blockchain func New(cs modules.ConsensusSet, persistDir string) (*Explorer, error) { // Check that input modules are non-nil if cs == nil { return nil, errNilCS } // Initialize the explorer. e := &Explorer{ cs: cs, persistDir: persistDir, } // Initialize the persistent structures, including the database. err := e.initPersist() if err != nil { return nil, err } // retrieve the current ConsensusChangeID var recentChange modules.ConsensusChangeID err = e.db.View(dbGetInternal(internalRecentChange, &recentChange)) if err != nil { return nil, err } err = cs.ConsensusSetSubscribe(e, recentChange) if err != nil { // TODO: restart from 0 return nil, errors.New("explorer subscription failed: " + err.Error()) } return e, nil } // Close closes the explorer. func (e *Explorer) Close() error { return e.db.Close() } Sia-1.3.0/modules/explorer/explorer_test.go000066400000000000000000000120401313565667000207760ustar00rootroot00000000000000package explorer import ( "path/filepath" "testing" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/modules/consensus" "github.com/NebulousLabs/Sia/modules/gateway" "github.com/NebulousLabs/Sia/modules/miner" "github.com/NebulousLabs/Sia/modules/transactionpool" "github.com/NebulousLabs/Sia/modules/wallet" "github.com/NebulousLabs/Sia/persist" "github.com/NebulousLabs/Sia/types" ) // Explorer tester struct is the helper object for explorer // testing. It holds the helper modules for its testing type explorerTester struct { cs modules.ConsensusSet gateway modules.Gateway miner modules.TestMiner tpool modules.TransactionPool wallet modules.Wallet walletKey crypto.TwofishKey explorer *Explorer testdir string } // createExplorerTester creates a tester object for the explorer module. func createExplorerTester(name string) (*explorerTester, error) { if testing.Short() { panic("createExplorerTester called when in a short test") } // Create and assemble the dependencies. testdir := build.TempDir(modules.ExplorerDir, name) g, err := gateway.New("localhost:0", false, filepath.Join(testdir, modules.GatewayDir)) if err != nil { return nil, err } cs, err := consensus.New(g, false, filepath.Join(testdir, modules.ConsensusDir)) if err != nil { return nil, err } tp, err := transactionpool.New(cs, g, filepath.Join(testdir, modules.TransactionPoolDir)) if err != nil { return nil, err } w, err := wallet.New(cs, tp, filepath.Join(testdir, modules.WalletDir)) if err != nil { return nil, err } key := crypto.GenerateTwofishKey() _, err = w.Encrypt(key) if err != nil { return nil, err } err = w.Unlock(key) if err != nil { return nil, err } m, err := miner.New(cs, tp, w, filepath.Join(testdir, modules.RenterDir)) if err != nil { return nil, err } e, err := New(cs, filepath.Join(testdir, modules.ExplorerDir)) if err != nil { return nil, err } et := &explorerTester{ cs: cs, gateway: g, miner: m, tpool: tp, wallet: w, walletKey: key, explorer: e, testdir: testdir, } // Mine until the wallet has money. for i := types.BlockHeight(0); i <= types.MaturityDelay; i++ { b, _ := et.miner.FindBlock() err = et.cs.AcceptBlock(b) if err != nil { return nil, err } } return et, nil } // reorgToBlank creates a bunch of empty blocks on top of the genesis block // that reorgs the explorer to a state of all blank blocks. func (et *explorerTester) reorgToBlank() error { // Get a unique directory name to house the persistence of the miner // dependencies. dir := et.testdir + " - " + persist.RandomSuffix() // Create a miner and all dependencies to create an alternate chain. g, err := gateway.New("localhost:0", false, filepath.Join(dir, modules.GatewayDir)) if err != nil { return err } cs, err := consensus.New(g, false, filepath.Join(dir, modules.ConsensusDir)) if err != nil { return err } tp, err := transactionpool.New(cs, g, filepath.Join(dir, modules.TransactionPoolDir)) if err != nil { return err } w, err := wallet.New(cs, tp, filepath.Join(dir, modules.WalletDir)) if err != nil { return err } key := crypto.GenerateTwofishKey() _, err = w.Encrypt(key) if err != nil { return err } err = w.Unlock(key) if err != nil { return err } m, err := miner.New(cs, tp, w, filepath.Join(dir, modules.RenterDir)) if err != nil { return err } // Mine blocks until the height is higher than the existing consensus, // submitting each block to the explorerTester. currentHeight := cs.Height() for i := types.BlockHeight(0); i <= currentHeight+1; i++ { block, err := m.AddBlock() if err != nil { return err } et.cs.AcceptBlock(block) // error is not checked, will not always be nil } return nil } // TestNilExplorerDependencies tries to initialize an explorer with nil // dependencies, checks that the correct error is returned. func TestNilExplorerDependencies(t *testing.T) { _, err := New(nil, "expdir") if err != errNilCS { t.Fatal("Expecting errNilCS") } } // TestExplorerGenesisHeight checks that when the explorer is initialized and given the // genesis block, the result has the correct height. func TestExplorerGenesisHeight(t *testing.T) { // Create the dependencies. testdir := build.TempDir(modules.HostDir, t.Name()) g, err := gateway.New("localhost:0", false, filepath.Join(testdir, modules.GatewayDir)) if err != nil { t.Fatal(err) } cs, err := consensus.New(g, false, filepath.Join(testdir, modules.ConsensusDir)) if err != nil { t.Fatal(err) } // Create the explorer - from the subscription only the genesis block will // be received. e, err := New(cs, testdir) if err != nil { t.Fatal(err) } block, height, exists := e.Block(types.GenesisID) if !exists { t.Error("explorer missing genesis block after initialization") } if block.ID() != types.GenesisID { t.Error("explorer returned wrong genesis block") } if height != 0 { t.Errorf("genesis block hash wrong height: expected 0, got %v", height) } } Sia-1.3.0/modules/explorer/info.go000066400000000000000000000140431313565667000170370ustar00rootroot00000000000000package explorer import ( "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/bolt" ) // Block takes a block ID and finds the corresponding block, provided that the // block is in the consensus set. func (e *Explorer) Block(id types.BlockID) (types.Block, types.BlockHeight, bool) { var height types.BlockHeight err := e.db.View(dbGetAndDecode(bucketBlockIDs, id, &height)) if err != nil { return types.Block{}, 0, false } block, exists := e.cs.BlockAtHeight(height) if !exists { return types.Block{}, 0, false } return block, height, true } // BlockFacts returns a set of statistics about the blockchain as they appeared // at a given block height, and a bool indicating whether facts exist for the // given height. func (e *Explorer) BlockFacts(height types.BlockHeight) (modules.BlockFacts, bool) { var bf blockFacts err := e.db.View(e.dbGetBlockFacts(height, &bf)) if err != nil { return modules.BlockFacts{}, false } return bf.BlockFacts, true } // LatestBlockFacts returns a set of statistics about the blockchain as they appeared // at the latest block height in the explorer's consensus set. func (e *Explorer) LatestBlockFacts() modules.BlockFacts { var bf blockFacts err := e.db.View(func(tx *bolt.Tx) error { var height types.BlockHeight err := dbGetInternal(internalBlockHeight, &height)(tx) if err != nil { return err } return e.dbGetBlockFacts(height, &bf)(tx) }) if err != nil { build.Critical(err) } return bf.BlockFacts } // Transaction takes a transaction ID and finds the block containing the // transaction. Because of the miner payouts, the transaction ID might be a // block ID. To find the transaction, iterate through the block. func (e *Explorer) Transaction(id types.TransactionID) (types.Block, types.BlockHeight, bool) { var height types.BlockHeight err := e.db.View(dbGetAndDecode(bucketTransactionIDs, id, &height)) if err != nil { return types.Block{}, 0, false } block, exists := e.cs.BlockAtHeight(height) if !exists { return types.Block{}, 0, false } return block, height, true } // UnlockHash returns the IDs of all the transactions that contain the unlock // hash. An empty set indicates that the unlock hash does not appear in the // blockchain. func (e *Explorer) UnlockHash(uh types.UnlockHash) []types.TransactionID { var ids []types.TransactionID err := e.db.View(dbGetTransactionIDSet(bucketUnlockHashes, uh, &ids)) if err != nil { ids = nil } return ids } // SiacoinOutput returns the siacoin output associated with the specified ID. func (e *Explorer) SiacoinOutput(id types.SiacoinOutputID) (types.SiacoinOutput, bool) { var sco types.SiacoinOutput err := e.db.View(dbGetAndDecode(bucketSiacoinOutputs, id, &sco)) if err != nil { return types.SiacoinOutput{}, false } return sco, true } // SiacoinOutputID returns all of the transactions that contain the specified // siacoin output ID. An empty set indicates that the siacoin output ID does // not appear in the blockchain. func (e *Explorer) SiacoinOutputID(id types.SiacoinOutputID) []types.TransactionID { var ids []types.TransactionID err := e.db.View(dbGetTransactionIDSet(bucketSiacoinOutputIDs, id, &ids)) if err != nil { ids = nil } return ids } // FileContractHistory returns the history associated with the specified file // contract ID, which includes the file contract itself and all of the // revisions that have been submitted to the blockchain. The first bool // indicates whether the file contract exists, and the second bool indicates // whether a storage proof was successfully submitted for the file contract. func (e *Explorer) FileContractHistory(id types.FileContractID) (fc types.FileContract, fcrs []types.FileContractRevision, fcE bool, spE bool) { var history fileContractHistory err := e.db.View(dbGetAndDecode(bucketFileContractHistories, id, &history)) fc = history.Contract fcrs = history.Revisions fcE = err == nil spE = history.StorageProof.ParentID == id return } // FileContractIDs returns all of the transactions that contain the specified // file contract ID. An empty set indicates that the file contract ID does not // appear in the blockchain. func (e *Explorer) FileContractID(id types.FileContractID) []types.TransactionID { var ids []types.TransactionID err := e.db.View(dbGetTransactionIDSet(bucketFileContractIDs, id, &ids)) if err != nil { ids = nil } return ids } // FileContractPayouts returns all of the spendable siacoin outputs which are the // result of a FileContract. An empty set indicates that the file contract is // still open func (e *Explorer) FileContractPayouts(id types.FileContractID) ([]types.SiacoinOutput, error) { var history fileContractHistory err := e.db.View(dbGetAndDecode(bucketFileContractHistories, id, &history)) if err != nil { return []types.SiacoinOutput{}, err } fc := history.Contract var outputs []types.SiacoinOutput for i := range fc.ValidProofOutputs { scoid := id.StorageProofOutputID(types.ProofValid, uint64(i)) sco, found := e.SiacoinOutput(scoid) if found { outputs = append(outputs, sco) } } for i := range fc.MissedProofOutputs { scoid := id.StorageProofOutputID(types.ProofMissed, uint64(i)) sco, found := e.SiacoinOutput(scoid) if found { outputs = append(outputs, sco) } } return outputs, nil } // SiafundOutput returns the siafund output associated with the specified ID. func (e *Explorer) SiafundOutput(id types.SiafundOutputID) (types.SiafundOutput, bool) { var sco types.SiafundOutput err := e.db.View(dbGetAndDecode(bucketSiafundOutputs, id, &sco)) if err != nil { return types.SiafundOutput{}, false } return sco, true } // SiafundOutputID returns all of the transactions that contain the specified // siafund output ID. An empty set indicates that the siafund output ID does // not appear in the blockchain. func (e *Explorer) SiafundOutputID(id types.SiafundOutputID) []types.TransactionID { var ids []types.TransactionID err := e.db.View(dbGetTransactionIDSet(bucketSiafundOutputIDs, id, &ids)) if err != nil { ids = nil } return ids } Sia-1.3.0/modules/explorer/info_test.go000066400000000000000000000150161313565667000200770ustar00rootroot00000000000000package explorer import ( "testing" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/fastrand" ) // TestImmediateBlockFacts grabs the block facts object from the block explorer // at the current height and verifies that the data has been filled out. func TestImmediateBlockFacts(t *testing.T) { if testing.Short() { t.SkipNow() } et, err := createExplorerTester(t.Name()) if err != nil { t.Fatal(err) } facts := et.explorer.LatestBlockFacts() var explorerHeight types.BlockHeight err = et.explorer.db.View(dbGetInternal(internalBlockHeight, &explorerHeight)) if err != nil { t.Fatal(err) } if facts.Height != explorerHeight || explorerHeight == 0 { t.Error("wrong height reported in facts object") } if !facts.TotalCoins.Equals(types.CalculateNumSiacoins(et.cs.Height())) { t.Error("wrong number of total coins:", facts.TotalCoins, et.cs.Height()) } } // TestBlock probes the Block function of the explorer. func TestBlock(t *testing.T) { if testing.Short() { t.SkipNow() } et, err := createExplorerTester(t.Name()) if err != nil { t.Fatal(err) } gb := types.GenesisBlock gbFetch, height, exists := et.explorer.Block(gb.ID()) if !exists || height != 0 || gbFetch.ID() != gb.ID() { t.Error("call to 'Block' inside explorer failed") } } // TestBlockFacts checks that the correct block facts are returned for a query. func TestBlockFacts(t *testing.T) { if testing.Short() { t.SkipNow() } et, err := createExplorerTester(t.Name()) if err != nil { t.Fatal(err) } gb := types.GenesisBlock bf, exists := et.explorer.BlockFacts(0) if !exists || bf.BlockID != gb.ID() || bf.Height != 0 { t.Error("call to 'BlockFacts' inside explorer failed") t.Error("Expecting true ->", exists) t.Error("Expecting", gb.ID(), "->", bf.BlockID) t.Error("Expecting 0 ->", bf.Height) } bf, exists = et.explorer.BlockFacts(1) if !exists || bf.Height != 1 { t.Error("call to 'BlockFacts' has failed") } } // TestFileContractPayouts checks that file contract outputs are tracked by the explorer func TestFileContractPayoutsMissingProof(t *testing.T) { if testing.Short() { t.SkipNow() } et, err := createExplorerTester(t.Name()) if err != nil { t.Fatal(err) } // Create and fund valid file contracts. builder := et.wallet.StartTransaction() payout := types.NewCurrency64(1e9) err = builder.FundSiacoins(payout) if err != nil { t.Fatal(err) } windowStart := et.cs.Height() + 2 windowEnd := et.cs.Height() + 5 fc := types.FileContract{ WindowStart: windowStart, WindowEnd: windowEnd, Payout: payout, ValidProofOutputs: []types.SiacoinOutput{{Value: types.PostTax(et.cs.Height(), payout)}}, MissedProofOutputs: []types.SiacoinOutput{{Value: types.PostTax(et.cs.Height(), payout)}}, UnlockHash: types.UnlockConditions{}.UnlockHash(), } fcIndex := builder.AddFileContract(fc) tSet, err := builder.Sign(true) if err != nil { t.Fatal(err) } if err != nil { t.Fatal(err) } err = et.tpool.AcceptTransactionSet(tSet) if err != nil { t.Fatal(err) } // Mine until contract payout is in consensus for i := et.cs.Height(); i < windowEnd+types.MaturityDelay; i++ { _, err := et.miner.AddBlock() if err != nil { t.Fatal(err) } } ti := len(tSet) - 1 fcid := tSet[ti].FileContractID(fcIndex) txns := et.explorer.FileContractID(fcid) if len(txns) == 0 { t.Error("Filecontract ID does not appear in blockchain") } outputs, err := et.explorer.FileContractPayouts(fcid) if err != nil { t.Fatal(err) } // Check if MissedProofOutputs were added to spendable outputs if len(outputs) != len(fc.MissedProofOutputs) { t.Error("Incorrect number of outputs returned") t.Error("Expecting -> ", len(fc.MissedProofOutputs)) t.Error("But was -> ", len(outputs)) } } func TestFileContractsPayoutValidProof(t *testing.T) { if testing.Short() { t.SkipNow() } et, err := createExplorerTester(t.Name()) if err != nil { t.Fatal(err) } // COMPATv0.4.0 - Step the block height up past the hardfork amount. This // code stops nondeterministic failures when producing storage proofs that // is related to buggy old code. for et.cs.Height() <= 10 { _, err := et.miner.AddBlock() if err != nil { t.Fatal(err) } } // Create a file (as a bytes.Buffer) that will be used for the file // contract. filesize := uint64(4e3) file := fastrand.Bytes(int(filesize)) merkleRoot := crypto.MerkleRoot(file) // Create a funded file contract payout := types.NewCurrency64(400e6) fc := types.FileContract{ FileSize: filesize, FileMerkleRoot: merkleRoot, WindowStart: et.cs.Height() + 1, WindowEnd: et.cs.Height() + 2, Payout: payout, ValidProofOutputs: []types.SiacoinOutput{{Value: types.PostTax(et.cs.Height(), payout)}}, MissedProofOutputs: []types.SiacoinOutput{{Value: types.PostTax(et.cs.Height(), payout)}}, } // Submit a transaction with the file contract. //oldSiafundPool := cst.cs.dbGetSiafundPool() builder := et.wallet.StartTransaction() err = builder.FundSiacoins(payout) if err != nil { t.Fatal(err) } fcIndex := builder.AddFileContract(fc) tSet, err := builder.Sign(true) if err != nil { t.Fatal(err) } err = et.tpool.AcceptTransactionSet(tSet) if err != nil { t.Fatal(err) } _, err = et.miner.AddBlock() if err != nil { t.Fatal(err) } ti := len(tSet) - 1 fcid := tSet[ti].FileContractID(fcIndex) // Create and submit a storage proof for the file contract. segmentIndex, err := et.cs.StorageProofSegment(fcid) if err != nil { t.Fatal(err) } segment, hashSet := crypto.MerkleProof(file, segmentIndex) sp := types.StorageProof{ ParentID: fcid, HashSet: hashSet, } copy(sp.Segment[:], segment) builder = et.wallet.StartTransaction() builder.AddStorageProof(sp) tSet, err = builder.Sign(true) if err != nil { t.Fatal(err) } err = et.tpool.AcceptTransactionSet(tSet) if err != nil { t.Fatal(err) } // Mine until contract payout is in consensus for i := types.BlockHeight(0); i < types.MaturityDelay+1; i++ { _, err := et.miner.AddBlock() if err != nil { t.Fatal(err) } } txns := et.explorer.FileContractID(fcid) if len(txns) == 0 { t.Error("Filecontract ID does not appear in blockchain") } // Check that the storageproof was added to the explorer after // the filecontract was removed from the consensus set outputs, err := et.explorer.FileContractPayouts(fcid) if err != nil { t.Fatal(err) } if len(outputs) != len(fc.ValidProofOutputs) { t.Errorf("expected %v, got %v ", fc.MissedProofOutputs, outputs) } } Sia-1.3.0/modules/explorer/persist.go000066400000000000000000000033351313565667000175770ustar00rootroot00000000000000package explorer import ( "os" "path/filepath" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/persist" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/bolt" ) var explorerMetadata = persist.Metadata{ Header: "Sia Explorer", Version: "0.5.2", } // initPersist initializes the persistent structures of the explorer module. func (e *Explorer) initPersist() error { // Make the persist directory err := os.MkdirAll(e.persistDir, 0700) if err != nil { return err } // Open the database db, err := persist.OpenDatabase(explorerMetadata, filepath.Join(e.persistDir, "explorer.db")) if err != nil { return err } e.db = db // Initialize the database err = e.db.Update(func(tx *bolt.Tx) error { buckets := [][]byte{ bucketBlockFacts, bucketBlockIDs, bucketBlocksDifficulty, bucketBlockTargets, bucketFileContractHistories, bucketFileContractIDs, bucketInternal, bucketSiacoinOutputIDs, bucketSiacoinOutputs, bucketSiafundOutputIDs, bucketSiafundOutputs, bucketTransactionIDs, bucketUnlockHashes, } for _, b := range buckets { _, err := tx.CreateBucketIfNotExists(b) if err != nil { return err } } // set default values for the bucketInternal internalDefaults := []struct { key, val []byte }{ {internalBlockHeight, encoding.Marshal(types.BlockHeight(0))}, {internalRecentChange, encoding.Marshal(modules.ConsensusChangeID{})}, } b := tx.Bucket(bucketInternal) for _, d := range internalDefaults { if b.Get(d.key) != nil { continue } err := b.Put(d.key, d.val) if err != nil { return err } } return nil }) if err != nil { return err } return nil } Sia-1.3.0/modules/explorer/update.go000066400000000000000000000464221313565667000173740ustar00rootroot00000000000000package explorer import ( "fmt" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/bolt" ) // ProcessConsensusChange follows the most recent changes to the consensus set, // including parsing new blocks and updating the utxo sets. func (e *Explorer) ProcessConsensusChange(cc modules.ConsensusChange) { if len(cc.AppliedBlocks) == 0 { build.Critical("Explorer.ProcessConsensusChange called with a ConsensusChange that has no AppliedBlocks") } err := e.db.Update(func(tx *bolt.Tx) (err error) { // use exception-style error handling to enable more concise update code defer func() { if r := recover(); r != nil { err = fmt.Errorf("%v", r) } }() // get starting block height var blockheight types.BlockHeight err = dbGetInternal(internalBlockHeight, &blockheight)(tx) if err != nil { return err } // Update cumulative stats for reverted blocks. for _, block := range cc.RevertedBlocks { bid := block.ID() tbid := types.TransactionID(bid) blockheight-- dbRemoveBlockID(tx, bid) dbRemoveTransactionID(tx, tbid) // Miner payouts are a transaction target, exists := e.cs.ChildTarget(block.ParentID) if !exists { target = types.RootTarget } dbRemoveBlockTarget(tx, bid, target) // Remove miner payouts for j, payout := range block.MinerPayouts { scoid := block.MinerPayoutID(uint64(j)) dbRemoveSiacoinOutputID(tx, scoid, tbid) dbRemoveUnlockHash(tx, payout.UnlockHash, tbid) } // Remove transactions for _, txn := range block.Transactions { txid := txn.ID() dbRemoveTransactionID(tx, txid) for _, sci := range txn.SiacoinInputs { dbRemoveSiacoinOutputID(tx, sci.ParentID, txid) dbRemoveUnlockHash(tx, sci.UnlockConditions.UnlockHash(), txid) } for k, sco := range txn.SiacoinOutputs { scoid := txn.SiacoinOutputID(uint64(k)) dbRemoveSiacoinOutputID(tx, scoid, txid) dbRemoveUnlockHash(tx, sco.UnlockHash, txid) dbRemoveSiacoinOutput(tx, scoid) } for k, fc := range txn.FileContracts { fcid := txn.FileContractID(uint64(k)) dbRemoveFileContractID(tx, fcid, txid) dbRemoveUnlockHash(tx, fc.UnlockHash, txid) for l, sco := range fc.ValidProofOutputs { scoid := fcid.StorageProofOutputID(types.ProofValid, uint64(l)) dbRemoveSiacoinOutputID(tx, scoid, txid) dbRemoveUnlockHash(tx, sco.UnlockHash, txid) } for l, sco := range fc.MissedProofOutputs { scoid := fcid.StorageProofOutputID(types.ProofMissed, uint64(l)) dbRemoveSiacoinOutputID(tx, scoid, txid) dbRemoveUnlockHash(tx, sco.UnlockHash, txid) } dbRemoveFileContract(tx, fcid) } for _, fcr := range txn.FileContractRevisions { dbRemoveFileContractID(tx, fcr.ParentID, txid) dbRemoveUnlockHash(tx, fcr.UnlockConditions.UnlockHash(), txid) dbRemoveUnlockHash(tx, fcr.NewUnlockHash, txid) for l, sco := range fcr.NewValidProofOutputs { scoid := fcr.ParentID.StorageProofOutputID(types.ProofValid, uint64(l)) dbRemoveSiacoinOutputID(tx, scoid, txid) dbRemoveUnlockHash(tx, sco.UnlockHash, txid) } for l, sco := range fcr.NewMissedProofOutputs { scoid := fcr.ParentID.StorageProofOutputID(types.ProofMissed, uint64(l)) dbRemoveSiacoinOutputID(tx, scoid, txid) dbRemoveUnlockHash(tx, sco.UnlockHash, txid) } // Remove the file contract revision from the revision chain. dbRemoveFileContractRevision(tx, fcr.ParentID) } for _, sp := range txn.StorageProofs { dbRemoveStorageProof(tx, sp.ParentID) } for _, sfi := range txn.SiafundInputs { dbRemoveSiafundOutputID(tx, sfi.ParentID, txid) dbRemoveUnlockHash(tx, sfi.UnlockConditions.UnlockHash(), txid) dbRemoveUnlockHash(tx, sfi.ClaimUnlockHash, txid) } for k, sfo := range txn.SiafundOutputs { sfoid := txn.SiafundOutputID(uint64(k)) dbRemoveSiafundOutputID(tx, sfoid, txid) dbRemoveUnlockHash(tx, sfo.UnlockHash, txid) } } // remove the associated block facts dbRemoveBlockFacts(tx, bid) } // Update cumulative stats for applied blocks. for _, block := range cc.AppliedBlocks { bid := block.ID() tbid := types.TransactionID(bid) // special handling for genesis block if bid == types.GenesisID { dbAddGenesisBlock(tx) continue } blockheight++ dbAddBlockID(tx, bid, blockheight) dbAddTransactionID(tx, tbid, blockheight) // Miner payouts are a transaction target, exists := e.cs.ChildTarget(block.ParentID) if !exists { target = types.RootTarget } dbAddBlockTarget(tx, bid, target) // Catalog the new miner payouts. for j, payout := range block.MinerPayouts { scoid := block.MinerPayoutID(uint64(j)) dbAddSiacoinOutputID(tx, scoid, tbid) dbAddUnlockHash(tx, payout.UnlockHash, tbid) } // Update cumulative stats for applied transactions. for _, txn := range block.Transactions { // Add the transaction to the list of active transactions. txid := txn.ID() dbAddTransactionID(tx, txid, blockheight) for _, sci := range txn.SiacoinInputs { dbAddSiacoinOutputID(tx, sci.ParentID, txid) dbAddUnlockHash(tx, sci.UnlockConditions.UnlockHash(), txid) } for j, sco := range txn.SiacoinOutputs { scoid := txn.SiacoinOutputID(uint64(j)) dbAddSiacoinOutputID(tx, scoid, txid) dbAddUnlockHash(tx, sco.UnlockHash, txid) } for k, fc := range txn.FileContracts { fcid := txn.FileContractID(uint64(k)) dbAddFileContractID(tx, fcid, txid) dbAddUnlockHash(tx, fc.UnlockHash, txid) dbAddFileContract(tx, fcid, fc) for l, sco := range fc.ValidProofOutputs { scoid := fcid.StorageProofOutputID(types.ProofValid, uint64(l)) dbAddSiacoinOutputID(tx, scoid, txid) dbAddUnlockHash(tx, sco.UnlockHash, txid) } for l, sco := range fc.MissedProofOutputs { scoid := fcid.StorageProofOutputID(types.ProofMissed, uint64(l)) dbAddSiacoinOutputID(tx, scoid, txid) dbAddUnlockHash(tx, sco.UnlockHash, txid) } } for _, fcr := range txn.FileContractRevisions { dbAddFileContractID(tx, fcr.ParentID, txid) dbAddUnlockHash(tx, fcr.UnlockConditions.UnlockHash(), txid) dbAddUnlockHash(tx, fcr.NewUnlockHash, txid) for l, sco := range fcr.NewValidProofOutputs { scoid := fcr.ParentID.StorageProofOutputID(types.ProofValid, uint64(l)) dbAddSiacoinOutputID(tx, scoid, txid) dbAddUnlockHash(tx, sco.UnlockHash, txid) } for l, sco := range fcr.NewMissedProofOutputs { scoid := fcr.ParentID.StorageProofOutputID(types.ProofMissed, uint64(l)) dbAddSiacoinOutputID(tx, scoid, txid) dbAddUnlockHash(tx, sco.UnlockHash, txid) } dbAddFileContractRevision(tx, fcr.ParentID, fcr) } for _, sp := range txn.StorageProofs { dbAddFileContractID(tx, sp.ParentID, txid) dbAddStorageProof(tx, sp.ParentID, sp) } for _, sfi := range txn.SiafundInputs { dbAddSiafundOutputID(tx, sfi.ParentID, txid) dbAddUnlockHash(tx, sfi.UnlockConditions.UnlockHash(), txid) dbAddUnlockHash(tx, sfi.ClaimUnlockHash, txid) } for k, sfo := range txn.SiafundOutputs { sfoid := txn.SiafundOutputID(uint64(k)) dbAddSiafundOutputID(tx, sfoid, txid) dbAddUnlockHash(tx, sfo.UnlockHash, txid) } } // calculate and add new block facts, if possible if tx.Bucket(bucketBlockFacts).Get(encoding.Marshal(block.ParentID)) != nil { facts := dbCalculateBlockFacts(tx, e.cs, block) dbAddBlockFacts(tx, facts) } } // Update stats according to SiacoinOutputDiffs for _, scod := range cc.SiacoinOutputDiffs { if scod.Direction == modules.DiffApply { dbAddSiacoinOutput(tx, scod.ID, scod.SiacoinOutput) } } // Update stats according to SiafundOutputDiffs for _, sfod := range cc.SiafundOutputDiffs { if sfod.Direction == modules.DiffApply { dbAddSiafundOutput(tx, sfod.ID, sfod.SiafundOutput) } } // Compute the changes in the active set. Note, because this is calculated // at the end instead of in a loop, the historic facts may contain // inaccuracies about the active set. This should not be a problem except // for large reorgs. // TODO: improve this currentBlock, exists := e.cs.BlockAtHeight(blockheight) if !exists { build.Critical("consensus is missing block", blockheight) } currentID := currentBlock.ID() var facts blockFacts err = dbGetAndDecode(bucketBlockFacts, currentID, &facts)(tx) if err == nil { for _, diff := range cc.FileContractDiffs { if diff.Direction == modules.DiffApply { facts.ActiveContractCount++ facts.ActiveContractCost = facts.ActiveContractCost.Add(diff.FileContract.Payout) facts.ActiveContractSize = facts.ActiveContractSize.Add(types.NewCurrency64(diff.FileContract.FileSize)) } else { facts.ActiveContractCount-- facts.ActiveContractCost = facts.ActiveContractCost.Sub(diff.FileContract.Payout) facts.ActiveContractSize = facts.ActiveContractSize.Sub(types.NewCurrency64(diff.FileContract.FileSize)) } } err = tx.Bucket(bucketBlockFacts).Put(encoding.Marshal(currentID), encoding.Marshal(facts)) if err != nil { return err } } // set final blockheight err = dbSetInternal(internalBlockHeight, blockheight)(tx) if err != nil { return err } // set change ID err = dbSetInternal(internalRecentChange, cc.ID)(tx) if err != nil { return err } return nil }) if err != nil { build.Critical("explorer update failed:", err) } } // helper functions func assertNil(err error) { if err != nil { panic(err) } } func mustPut(bucket *bolt.Bucket, key, val interface{}) { assertNil(bucket.Put(encoding.Marshal(key), encoding.Marshal(val))) } func mustPutSet(bucket *bolt.Bucket, key interface{}) { assertNil(bucket.Put(encoding.Marshal(key), nil)) } func mustDelete(bucket *bolt.Bucket, key interface{}) { assertNil(bucket.Delete(encoding.Marshal(key))) } func bucketIsEmpty(bucket *bolt.Bucket) bool { k, _ := bucket.Cursor().First() return k == nil } // These functions panic on error. The panic will be caught by // ProcessConsensusChange. // Add/Remove block ID func dbAddBlockID(tx *bolt.Tx, id types.BlockID, height types.BlockHeight) { mustPut(tx.Bucket(bucketBlockIDs), id, height) } func dbRemoveBlockID(tx *bolt.Tx, id types.BlockID) { mustDelete(tx.Bucket(bucketBlockIDs), id) } // Add/Remove block facts func dbAddBlockFacts(tx *bolt.Tx, facts blockFacts) { mustPut(tx.Bucket(bucketBlockFacts), facts.BlockID, facts) } func dbRemoveBlockFacts(tx *bolt.Tx, id types.BlockID) { mustDelete(tx.Bucket(bucketBlockFacts), id) } // Add/Remove block target func dbAddBlockTarget(tx *bolt.Tx, id types.BlockID, target types.Target) { mustPut(tx.Bucket(bucketBlockTargets), id, target) } func dbRemoveBlockTarget(tx *bolt.Tx, id types.BlockID, target types.Target) { mustDelete(tx.Bucket(bucketBlockTargets), id) } // Add/Remove file contract func dbAddFileContract(tx *bolt.Tx, id types.FileContractID, fc types.FileContract) { history := fileContractHistory{Contract: fc} mustPut(tx.Bucket(bucketFileContractHistories), id, history) } func dbRemoveFileContract(tx *bolt.Tx, id types.FileContractID) { mustDelete(tx.Bucket(bucketFileContractHistories), id) } // Add/Remove txid from file contract ID bucket func dbAddFileContractID(tx *bolt.Tx, id types.FileContractID, txid types.TransactionID) { b, err := tx.Bucket(bucketFileContractIDs).CreateBucketIfNotExists(encoding.Marshal(id)) assertNil(err) mustPutSet(b, txid) } func dbRemoveFileContractID(tx *bolt.Tx, id types.FileContractID, txid types.TransactionID) { bucket := tx.Bucket(bucketFileContractIDs).Bucket(encoding.Marshal(id)) mustDelete(bucket, txid) if bucketIsEmpty(bucket) { tx.Bucket(bucketFileContractIDs).DeleteBucket(encoding.Marshal(id)) } } func dbAddFileContractRevision(tx *bolt.Tx, fcid types.FileContractID, fcr types.FileContractRevision) { var history fileContractHistory assertNil(dbGetAndDecode(bucketFileContractHistories, fcid, &history)(tx)) history.Revisions = append(history.Revisions, fcr) mustPut(tx.Bucket(bucketFileContractHistories), fcid, history) } func dbRemoveFileContractRevision(tx *bolt.Tx, fcid types.FileContractID) { var history fileContractHistory assertNil(dbGetAndDecode(bucketFileContractHistories, fcid, &history)(tx)) // TODO: could be more rigorous history.Revisions = history.Revisions[:len(history.Revisions)-1] mustPut(tx.Bucket(bucketFileContractHistories), fcid, history) } // Add/Remove siacoin output func dbAddSiacoinOutput(tx *bolt.Tx, id types.SiacoinOutputID, output types.SiacoinOutput) { mustPut(tx.Bucket(bucketSiacoinOutputs), id, output) } func dbRemoveSiacoinOutput(tx *bolt.Tx, id types.SiacoinOutputID) { mustDelete(tx.Bucket(bucketSiacoinOutputs), id) } // Add/Remove txid from siacoin output ID bucket func dbAddSiacoinOutputID(tx *bolt.Tx, id types.SiacoinOutputID, txid types.TransactionID) { b, err := tx.Bucket(bucketSiacoinOutputIDs).CreateBucketIfNotExists(encoding.Marshal(id)) assertNil(err) mustPutSet(b, txid) } func dbRemoveSiacoinOutputID(tx *bolt.Tx, id types.SiacoinOutputID, txid types.TransactionID) { bucket := tx.Bucket(bucketSiacoinOutputIDs).Bucket(encoding.Marshal(id)) mustDelete(bucket, txid) if bucketIsEmpty(bucket) { tx.Bucket(bucketSiacoinOutputIDs).DeleteBucket(encoding.Marshal(id)) } } // Add/Remove siafund output func dbAddSiafundOutput(tx *bolt.Tx, id types.SiafundOutputID, output types.SiafundOutput) { mustPut(tx.Bucket(bucketSiafundOutputs), id, output) } func dbRemoveSiafundOutput(tx *bolt.Tx, id types.SiafundOutputID) { mustDelete(tx.Bucket(bucketSiafundOutputs), id) } // Add/Remove txid from siafund output ID bucket func dbAddSiafundOutputID(tx *bolt.Tx, id types.SiafundOutputID, txid types.TransactionID) { b, err := tx.Bucket(bucketSiafundOutputIDs).CreateBucketIfNotExists(encoding.Marshal(id)) assertNil(err) mustPutSet(b, txid) } func dbRemoveSiafundOutputID(tx *bolt.Tx, id types.SiafundOutputID, txid types.TransactionID) { bucket := tx.Bucket(bucketSiafundOutputIDs).Bucket(encoding.Marshal(id)) mustDelete(bucket, txid) if bucketIsEmpty(bucket) { tx.Bucket(bucketSiafundOutputIDs).DeleteBucket(encoding.Marshal(id)) } } // Add/Remove storage proof func dbAddStorageProof(tx *bolt.Tx, fcid types.FileContractID, sp types.StorageProof) { var history fileContractHistory assertNil(dbGetAndDecode(bucketFileContractHistories, fcid, &history)(tx)) history.StorageProof = sp mustPut(tx.Bucket(bucketFileContractHistories), fcid, history) } func dbRemoveStorageProof(tx *bolt.Tx, fcid types.FileContractID) { dbAddStorageProof(tx, fcid, types.StorageProof{}) } // Add/Remove transaction ID func dbAddTransactionID(tx *bolt.Tx, id types.TransactionID, height types.BlockHeight) { mustPut(tx.Bucket(bucketTransactionIDs), id, height) } func dbRemoveTransactionID(tx *bolt.Tx, id types.TransactionID) { mustDelete(tx.Bucket(bucketTransactionIDs), id) } // Add/Remove txid from unlock hash bucket func dbAddUnlockHash(tx *bolt.Tx, uh types.UnlockHash, txid types.TransactionID) { b, err := tx.Bucket(bucketUnlockHashes).CreateBucketIfNotExists(encoding.Marshal(uh)) assertNil(err) mustPutSet(b, txid) } func dbRemoveUnlockHash(tx *bolt.Tx, uh types.UnlockHash, txid types.TransactionID) { bucket := tx.Bucket(bucketUnlockHashes).Bucket(encoding.Marshal(uh)) mustDelete(bucket, txid) if bucketIsEmpty(bucket) { tx.Bucket(bucketUnlockHashes).DeleteBucket(encoding.Marshal(uh)) } } func dbCalculateBlockFacts(tx *bolt.Tx, cs modules.ConsensusSet, block types.Block) blockFacts { // get the parent block facts var bf blockFacts err := dbGetAndDecode(bucketBlockFacts, block.ParentID, &bf)(tx) assertNil(err) // get target target, exists := cs.ChildTarget(block.ParentID) if !exists { panic(fmt.Sprint("ConsensusSet is missing target of known block", block.ParentID)) } // update fields bf.BlockID = block.ID() bf.Height++ bf.Difficulty = target.Difficulty() bf.Target = target bf.Timestamp = block.Timestamp bf.TotalCoins = types.CalculateNumSiacoins(bf.Height) // calculate maturity timestamp var maturityTimestamp types.Timestamp if bf.Height > types.MaturityDelay { oldBlock, exists := cs.BlockAtHeight(bf.Height - types.MaturityDelay) if !exists { panic(fmt.Sprint("ConsensusSet is missing block at height", bf.Height-types.MaturityDelay)) } maturityTimestamp = oldBlock.Timestamp } bf.MaturityTimestamp = maturityTimestamp // calculate hashrate by averaging last 'hashrateEstimationBlocks' blocks var estimatedHashrate types.Currency if bf.Height > hashrateEstimationBlocks { var totalDifficulty = bf.Target var oldestTimestamp types.Timestamp for i := types.BlockHeight(1); i < hashrateEstimationBlocks; i++ { b, exists := cs.BlockAtHeight(bf.Height - i) if !exists { panic(fmt.Sprint("ConsensusSet is missing block at height", bf.Height-hashrateEstimationBlocks)) } target, exists := cs.ChildTarget(b.ParentID) if !exists { panic(fmt.Sprint("ConsensusSet is missing target of known block", b.ParentID)) } totalDifficulty = totalDifficulty.AddDifficulties(target) oldestTimestamp = b.Timestamp } secondsPassed := bf.Timestamp - oldestTimestamp estimatedHashrate = totalDifficulty.Difficulty().Div64(uint64(secondsPassed)) } bf.EstimatedHashrate = estimatedHashrate bf.MinerPayoutCount += uint64(len(block.MinerPayouts)) bf.TransactionCount += uint64(len(block.Transactions)) for _, txn := range block.Transactions { bf.SiacoinInputCount += uint64(len(txn.SiacoinInputs)) bf.SiacoinOutputCount += uint64(len(txn.SiacoinOutputs)) bf.FileContractCount += uint64(len(txn.FileContracts)) bf.FileContractRevisionCount += uint64(len(txn.FileContractRevisions)) bf.StorageProofCount += uint64(len(txn.StorageProofs)) bf.SiafundInputCount += uint64(len(txn.SiafundInputs)) bf.SiafundOutputCount += uint64(len(txn.SiafundOutputs)) bf.MinerFeeCount += uint64(len(txn.MinerFees)) bf.ArbitraryDataCount += uint64(len(txn.ArbitraryData)) bf.TransactionSignatureCount += uint64(len(txn.TransactionSignatures)) for _, fc := range txn.FileContracts { bf.TotalContractCost = bf.TotalContractCost.Add(fc.Payout) bf.TotalContractSize = bf.TotalContractSize.Add(types.NewCurrency64(fc.FileSize)) } for _, fcr := range txn.FileContractRevisions { bf.TotalContractSize = bf.TotalContractSize.Add(types.NewCurrency64(fcr.NewFileSize)) bf.TotalRevisionVolume = bf.TotalRevisionVolume.Add(types.NewCurrency64(fcr.NewFileSize)) } } return bf } // Special handling for the genesis block. No other functions are called on it. func dbAddGenesisBlock(tx *bolt.Tx) { id := types.GenesisID dbAddBlockID(tx, id, 0) txid := types.GenesisBlock.Transactions[0].ID() dbAddTransactionID(tx, txid, 0) for i, sfo := range types.GenesisSiafundAllocation { sfoid := types.GenesisBlock.Transactions[0].SiafundOutputID(uint64(i)) dbAddSiafundOutputID(tx, sfoid, txid) dbAddUnlockHash(tx, sfo.UnlockHash, txid) dbAddSiafundOutput(tx, sfoid, sfo) } dbAddBlockFacts(tx, blockFacts{ BlockFacts: modules.BlockFacts{ BlockID: id, Height: 0, Difficulty: types.RootTarget.Difficulty(), Target: types.RootTarget, TotalCoins: types.CalculateCoinbase(0), TransactionCount: 1, SiafundOutputCount: uint64(len(types.GenesisSiafundAllocation)), }, Timestamp: types.GenesisBlock.Timestamp, }) } Sia-1.3.0/modules/explorer/update_test.go000066400000000000000000000136771313565667000204410ustar00rootroot00000000000000package explorer import ( "testing" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) func (et *explorerTester) currentFacts() (facts modules.BlockFacts, exists bool) { var height types.BlockHeight err := et.explorer.db.View(dbGetInternal(internalBlockHeight, &height)) if err != nil { exists = false return } return et.explorer.BlockFacts(height) } // TestIntegrationExplorerFileContractMetrics checks that the siacoin // transfer volume metric is working correctly. func TestIntegrationExplorerFileContractMetrics(t *testing.T) { if testing.Short() { t.Skip() } et, err := createExplorerTester(t.Name()) if err != nil { t.Fatal(err) } // Propel explorer tester past the hardfork height. for i := 0; i < 10; i++ { _, err = et.miner.AddBlock() if err != nil { t.Fatal(err) } } facts, ok := et.currentFacts() if !ok { t.Fatal("couldn't get current facts") } if !facts.ActiveContractCost.IsZero() { t.Error("fresh explorer has nonzero active contract cost") } if facts.ActiveContractCount != 0 { t.Error("active contract count should initialize to zero") } if !facts.ActiveContractSize.IsZero() { t.Error("active contract size should initialize to zero") } // Put a file contract into the chain, and check that the explorer // correctly does all of the counting. builder := et.wallet.StartTransaction() builder.FundSiacoins(types.NewCurrency64(5e9)) fcOutputs := []types.SiacoinOutput{{Value: types.NewCurrency64(4805e6)}} fc := types.FileContract{ FileSize: 5e3, WindowStart: et.cs.Height() + 2, WindowEnd: et.cs.Height() + 3, Payout: types.NewCurrency64(5e9), ValidProofOutputs: fcOutputs, MissedProofOutputs: fcOutputs, } _ = builder.AddFileContract(fc) txns, err := builder.Sign(true) if err != nil { t.Fatal(err) } err = et.tpool.AcceptTransactionSet(txns) if err != nil { t.Fatal(err) } _, err = et.miner.AddBlock() if err != nil { t.Fatal(err) } // Check that the stats have updated to represent the file contract. facts, ok = et.currentFacts() if !ok { t.Fatal("couldn't get current facts") } if !facts.ActiveContractCost.Equals64(5e9) { t.Error("active resources providing wrong file contract cost") } if facts.ActiveContractCount != 1 { t.Error("active contract count does not read correctly") } if !facts.ActiveContractSize.Equals64(5e3) { t.Error("active contract size is not correctly reported") } if !facts.TotalContractCost.Equals64(5e9) { t.Error("total cost is not tallied correctly") } if facts.FileContractCount != 1 { t.Error("total contract count is not accurate") } if !facts.TotalContractSize.Equals64(5e3) { t.Error("total contract size is not accurate") } // Put a second file into the explorer to check that multiple files are // handled well. builder = et.wallet.StartTransaction() builder.FundSiacoins(types.NewCurrency64(1e9)) fcOutputs = []types.SiacoinOutput{{Value: types.NewCurrency64(961e6)}} fc = types.FileContract{ FileSize: 15e3, WindowStart: et.cs.Height() + 2, WindowEnd: et.cs.Height() + 3, Payout: types.NewCurrency64(1e9), ValidProofOutputs: fcOutputs, MissedProofOutputs: fcOutputs, } _ = builder.AddFileContract(fc) txns, err = builder.Sign(true) if err != nil { t.Fatal(err) } err = et.tpool.AcceptTransactionSet(txns) if err != nil { t.Fatal(err) } _, err = et.miner.AddBlock() if err != nil { t.Fatal(err) } // Check that the stats have updated to represent the file contracts. facts, ok = et.currentFacts() if !ok { t.Fatal("couldn't get current facts") } if !facts.ActiveContractCost.Equals64(6e9) { t.Error("active resources providing wrong file contract cost") } if facts.ActiveContractCount != 2 { t.Error("active contract count does not read correctly") } if !facts.ActiveContractSize.Equals64(20e3) { t.Error("active contract size is not correctly reported") } if !facts.TotalContractCost.Equals64(6e9) { t.Error("total cost is not tallied correctly") } if facts.FileContractCount != 2 { t.Error("total contract count is not accurate") } if !facts.TotalContractSize.Equals64(20e3) { t.Error("total contract size is not accurate") } // Expire the first file contract but not the second. _, err = et.miner.AddBlock() if err != nil { t.Fatal(err) } // Check that the stats have updated to reflect the expired file contract. facts, ok = et.currentFacts() if !ok { t.Fatal("couldn't get current facts") } if !facts.ActiveContractCost.Equals64(1e9) { t.Error("active resources providing wrong file contract cost", facts.ActiveContractCost) } if facts.ActiveContractCount != 1 { t.Error("active contract count does not read correctly") } if !facts.ActiveContractSize.Equals64(15e3) { t.Error("active contract size is not correctly reported") } if !facts.TotalContractCost.Equals64(6e9) { t.Error("total cost is not tallied correctly") } if facts.FileContractCount != 2 { t.Error("total contract count is not accurate") } if !facts.TotalContractSize.Equals64(20e3) { t.Error("total contract size is not accurate") } // Reorg the block explorer to a blank state, see that all of the file // contract statistics got removed. // TODO: broken by new block facts model // err = et.reorgToBlank() // if err != nil { // t.Fatal(err) // } // facts, ok = et.currentFacts() // if !ok { // t.Fatal("couldn't get current facts") // } // if !facts.ActiveContractCost.IsZero() { // t.Error("post reorg active contract cost should be zero, got", facts.ActiveContractCost) // } // if facts.ActiveContractCount != 0 { // t.Error("post reorg active contract count should be zero, got", facts.ActiveContractCount) // } // if !facts.TotalContractCost.IsZero() { // t.Error("post reorg total contract cost should be zero, got", facts.TotalContractCost) // } // if facts.FileContractCount != 0 { // t.Error("post reorg file contract count should be zero, got", facts.FileContractCount) // } } Sia-1.3.0/modules/gateway.go000066400000000000000000000112071313565667000157040ustar00rootroot00000000000000package modules import ( "net" "github.com/NebulousLabs/Sia/build" ) const ( // GatewayDir is the name of the directory used to store the gateway's // persistent data. GatewayDir = "gateway" ) var ( // BootstrapPeers is a list of peers that can be used to find other peers - // when a client first connects to the network, the only options for // finding peers are either manual entry of peers or to use a hardcoded // bootstrap point. While the bootstrap point could be a central service, // it can also be a list of peers that are known to be stable. We have // chosen to hardcode known-stable peers. BootstrapPeers = build.Select(build.Var{ Standard: []NetAddress{ "101.200.214.115:9981", "109.172.42.157:9981", "113.98.98.164:9981", "139.162.152.204:9981", "142.4.209.72:9981", "148.251.221.163:9981", "162.210.249.170:9981", "162.222.23.93:9981", "176.9.72.2:9981", "18.239.0.53:9981", "188.166.61.155:9981", "188.166.61.157:9981", "188.166.61.158:9981", "190.10.8.173:9981", "194.135.90.38:9981", "195.154.243.233:9981", "210.14.155.90:9981", "213.251.158.199:9981", "217.65.8.75:9981", "23.239.14.98:9971", "23.239.14.98:9981", "37.139.28.207:9981", "45.79.159.167:9981", "46.105.118.15:9981", "64.31.31.106:9981", "73.73.50.191:33721", "78.119.218.13:9981", "79.172.204.10:9981", "80.234.37.94:9981", "82.196.11.170:9981", "82.196.5.50:9981", "82.220.99.82:9981", "85.255.197.69:9981", "95.85.14.54:9981", "95.85.15.69:9981", "95.85.15.71:9981", }, Dev: []NetAddress(nil), Testing: []NetAddress(nil), }).([]NetAddress) ) type ( // Peer contains all the info necessary to Broadcast to a peer. Peer struct { Inbound bool `json:"inbound"` Local bool `json:"local"` NetAddress NetAddress `json:"netaddress"` Version string `json:"version"` } // A PeerConn is the connection type used when communicating with peers during // an RPC. It is identical to a net.Conn with the additional RPCAddr method. // This method acts as an identifier for peers and is the address that the // peer can be dialed on. It is also the address that should be used when // calling an RPC on the peer. PeerConn interface { net.Conn RPCAddr() NetAddress } // RPCFunc is the type signature of functions that handle RPCs. It is used for // both the caller and the callee. RPCFuncs may perform locking. RPCFuncs may // close the connection early, and it is recommended that they do so to avoid // keeping the connection open after all necessary I/O has been performed. RPCFunc func(PeerConn) error // A Gateway facilitates the interactions between the local node and remote // nodes (peers). It relays incoming blocks and transactions to local modules, // and broadcasts outgoing blocks and transactions to peers. In a broad sense, // it is responsible for ensuring that the local consensus set is consistent // with the "network" consensus set. Gateway interface { // Connect establishes a persistent connection to a peer. Connect(NetAddress) error // Disconnect terminates a connection to a peer. Disconnect(NetAddress) error // Address returns the Gateway's address. Address() NetAddress // Peers returns the addresses that the Gateway is currently connected to. Peers() []Peer // RegisterRPC registers a function to handle incoming connections that // supply the given RPC ID. RegisterRPC(string, RPCFunc) // UnregisterRPC unregisters an RPC and removes all references to the RPCFunc // supplied in the corresponding RegisterRPC call. References to RPCFuncs // registered with RegisterConnectCall are not removed and should be removed // with UnregisterConnectCall. If the RPC does not exist no action is taken. UnregisterRPC(string) // RegisterConnectCall registers an RPC name and function to be called // upon connecting to a peer. RegisterConnectCall(string, RPCFunc) // UnregisterConnectCall unregisters an RPC and removes all references to the // RPCFunc supplied in the corresponding RegisterConnectCall call. References // to RPCFuncs registered with RegisterRPC are not removed and should be // removed with UnregisterRPC. If the RPC does not exist no action is taken. UnregisterConnectCall(string) // RPC calls an RPC on the given address. RPC cannot be called on an // address that the Gateway is not connected to. RPC(NetAddress, string, RPCFunc) error // Broadcast transmits obj, prefaced by the RPC name, to all of the // given peers in parallel. Broadcast(name string, obj interface{}, peers []Peer) // Close safely stops the Gateway's listener process. Close() error } ) Sia-1.3.0/modules/gateway/000077500000000000000000000000001313565667000153545ustar00rootroot00000000000000Sia-1.3.0/modules/gateway/conn.go000066400000000000000000000016311313565667000166410ustar00rootroot00000000000000package gateway import ( "net" "time" "github.com/NebulousLabs/Sia/modules" ) // peerConn is a simple type that implements the modules.PeerConn interface. type peerConn struct { net.Conn dialbackAddr modules.NetAddress } // RPCAddr implements the RPCAddr method of the modules.PeerConn interface. It // is the address that identifies a peer. func (pc peerConn) RPCAddr() modules.NetAddress { return pc.dialbackAddr } // dial will dial the input address and return a connection. dial appropriately // handles things like clean shutdown, fast shutdown, and chooses the correct // communication protocol. func (g *Gateway) dial(addr modules.NetAddress) (net.Conn, error) { dialer := &net.Dialer{ Cancel: g.threads.StopChan(), Timeout: dialTimeout, } conn, err := dialer.Dial("tcp", string(addr)) if err != nil { return nil, err } conn.SetDeadline(time.Now().Add(connStdDeadline)) return conn, nil } Sia-1.3.0/modules/gateway/consts.go000066400000000000000000000166671313565667000172340ustar00rootroot00000000000000package gateway import ( "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" ) const ( // handshakeUpgradeVersion is the version where the gateway handshake RPC // was altered to include additional information transfer. handshakeUpgradeVersion = "1.0.0" // sessionUpgradeVersion is the version where the gateway handshake RPC // was altered to include the ID of the genesis block, the gateway's // unique ID, and whether a connection is desired. This version also uses // smux instead of muxado for stream multiplexing. sessionUpgradeVersion = "1.3.0" // maxLocalOutbound is currently set to 3, meaning the gateway will not // consider a local node to be an outbound peer if the gateway already has // 3 outbound peers. Three is currently needed to handle situations where // the gateway is at high risk of connecting to itself (such as a low // number of total peers, especially such as in a testing environment). // Once the gateway has a proper way to figure out that it's trying to // connect to itself, this number can be reduced. maxLocalOutboundPeers = 3 // minAcceptableVersion is the version below which the gateway will refuse to // connect to peers and reject connection attempts. // // Reject peers < v0.4.0 as the previous version is v0.3.3 which is // pre-hardfork. minAcceptableVersion = "0.4.0" // maxEncodedSessionHeaderSize is the maximum allowed size of an encoded // sessionHeader object. maxEncodedSessionHeaderSize = 40 + modules.MaxEncodedNetAddressLength // saveFrequency defines how often the gateway saves its persistence. saveFrequency = time.Minute * 2 ) var ( // fastNodePurgeDelay defines the amount of time that is waited between each // iteration of the purge loop when the gateway has enough nodes to be // needing to purge quickly. fastNodePurgeDelay = build.Select(build.Var{ Standard: 1 * time.Minute, Dev: 5 * time.Second, Testing: 200 * time.Millisecond, }).(time.Duration) // healthyNodeListLen defines the number of nodes that the gateway must // have in the node list before it will stop asking peers for more nodes. healthyNodeListLen = build.Select(build.Var{ Standard: int(200), Dev: int(30), Testing: int(15), }).(int) // maxSharedNodes defines the number of nodes that will be shared between // peers when they are expanding their node lists. maxSharedNodes = build.Select(build.Var{ Standard: uint64(10), Dev: uint64(5), Testing: uint64(3), }).(uint64) // nodePurgeDelay defines the amount of time that is waited between each // iteration of the node purge loop. nodePurgeDelay = build.Select(build.Var{ Standard: 10 * time.Minute, Dev: 20 * time.Second, Testing: 500 * time.Millisecond, }).(time.Duration) // nodeListDelay defines the amount of time that is waited between each // iteration of the node list loop. nodeListDelay = build.Select(build.Var{ Standard: 5 * time.Second, Dev: 3 * time.Second, Testing: 500 * time.Millisecond, }).(time.Duration) // peerRPCDelay defines the amount of time waited between each RPC accepted // from a peer. Without this delay, a peer can force us to spin up thousands // of goroutines per second. peerRPCDelay = build.Select(build.Var{ Standard: 3 * time.Second, Dev: 1 * time.Second, Testing: 20 * time.Millisecond, }).(time.Duration) // pruneNodeListLen defines the number of nodes that the gateway must have // to be pruning nodes from the node list. pruneNodeListLen = build.Select(build.Var{ Standard: int(50), Dev: int(15), Testing: int(10), }).(int) // quickPruneListLen defines the number of nodes that the gateway must have // to be pruning nodes quickly from the node list. quickPruneListLen = build.Select(build.Var{ Standard: int(250), Dev: int(40), Testing: int(20), }).(int) ) var ( // The gateway will sleep this long between incoming connections. For // attack reasons, the acceptInterval should be longer than the // nodeListDelay. Right at startup, a node is vulnerable to being flooded // by Sybil attackers. The node's best defense is to wait until it has // filled out its nodelist somewhat from the bootstrap nodes. An attacker // needs to completely dominate the nodelist and the peerlist to be // successful, so just a few honest nodes from requests to the bootstraps // should be enough to fend from most attacks. acceptInterval = build.Select(build.Var{ Standard: 6 * time.Second, Dev: 3 * time.Second, Testing: 100 * time.Millisecond, }).(time.Duration) // acquiringPeersDelay defines the amount of time that is waited between // iterations of the peer acquisition loop if the gateway is actively // forming new connections with peers. acquiringPeersDelay = build.Select(build.Var{ Standard: 5 * time.Second, Dev: 3 * time.Second, Testing: 500 * time.Millisecond, }).(time.Duration) // fullyConnectedThreshold defines the number of peers that the gateway can // have before it stops accepting inbound connections. fullyConnectedThreshold = build.Select(build.Var{ Standard: 128, Dev: 20, Testing: 10, }).(int) // maxConcurrentOutboundPeerRequests defines the maximum number of peer // connections that the gateway will try to form concurrently. maxConcurrentOutboundPeerRequests = build.Select(build.Var{ Standard: 3, Dev: 2, Testing: 2, }).(int) // noNodesDelay defines the amount of time that is waited between // iterations of the peer acquisition loop if the gateway does not have any // nodes in the nodelist. noNodesDelay = build.Select(build.Var{ Standard: 20 * time.Second, Dev: 10 * time.Second, Testing: 3 * time.Second, }).(time.Duration) // unwawntedLocalPeerDelay defines the amount of time that is waited // between iterations of the permanentPeerManager if the gateway has at // least a few outbound peers, but is not well connected, and the recently // selected peer was a local peer. The wait is mostly to prevent the // gateway from hogging the CPU in the event that all peers are local // peers. unwantedLocalPeerDelay = build.Select(build.Var{ Standard: 2 * time.Second, Dev: 1 * time.Second, Testing: 100 * time.Millisecond, }).(time.Duration) // wellConnectedDelay defines the amount of time that is waited between // iterations of the peer acquisition loop if the gateway is well // connected. wellConnectedDelay = build.Select(build.Var{ Standard: 5 * time.Minute, Dev: 1 * time.Minute, Testing: 3 * time.Second, }).(time.Duration) // wellConnectedThreshold is the number of outbound connections at which // the gateway will not attempt to make new outbound connections. wellConnectedThreshold = build.Select(build.Var{ Standard: 8, Dev: 5, Testing: 4, }).(int) ) var ( // connStdDeadline defines the standard deadline that should be used for // all temporary connections to the gateway. connStdDeadline = build.Select(build.Var{ Standard: 5 * time.Minute, Dev: 2 * time.Minute, Testing: 30 * time.Second, }).(time.Duration) // the gateway will abort a connection attempt after this long dialTimeout = build.Select(build.Var{ Standard: 3 * time.Minute, Dev: 20 * time.Second, Testing: 500 * time.Millisecond, }).(time.Duration) // rpcStdDeadline defines the standard deadline that should be used for all // incoming RPC calls. rpcStdDeadline = build.Select(build.Var{ Standard: 5 * time.Minute, Dev: 3 * time.Minute, Testing: 5 * time.Second, }).(time.Duration) ) Sia-1.3.0/modules/gateway/gateway.go000066400000000000000000000300651313565667000173500ustar00rootroot00000000000000// Package gateway connects a Sia node to the Sia flood network. The flood // network is used to propagate blocks and transactions. The gateway is the // primary avenue that a node uses to hear about transactions and blocks, and // is the primary avenue used to tell the network about blocks that you have // mined or about transactions that you have created. package gateway import ( "time" ) // For the user to be securely connected to the network, the user must be // connected to at least one node which will send them all of the blocks. An // attacker can trick the user into thinking that a different blockchain is the // full blockchain if the user is not connected to any nodes who are seeing + // broadcasting the real chain (and instead is connected only to attacker nodes // or to nodes that are not broadcasting). This situation is called an eclipse // attack. // // Connecting to a large number of nodes increases the resiliancy of the // network, but also puts a networking burden on the nodes and can slow down // block propagation or increase orphan rates. The gateway's job is to keep the // network efficient while also protecting the user against attacks. // // The gateway keeps a list of nodes that it knows about. It uses this list to // form connections with other nodes, and then uses those connections to // participate in the flood network. The primary vector for an attacker to // achieve an eclipse attack is node list domination. If a gateway's nodelist // is heavily dominated by attacking nodes, then when the gateway chooses to // make random connections the gateway is at risk of selecting only attacker // nodes. // // The gateway defends itself from these attacks by minimizing the amount of // control that an attacker has over the node list and peer list. The first // major defense is that the gateway maintains 8 'outbound' relationships, // which means that the gateway created those relationships instead of an // attacker. If a node forms a connection to you, that node is called // 'inbound', and because it may be an attacker node, it is not trusted. // Outbound nodes can also be attacker nodes, but they are less likely to be // attacker nodes because you chose them, instead of them choosing you. // // If the gateway forms too many connections, the gateway will allow incoming // connections by kicking an existing peer. But, to limit the amount of control // that an attacker may have, only inbound peers are selected to be kicked. // Furthermore, to increase the difficulty of attack, if a new inbound // connection shares the same IP address as an existing connection, the shared // connection is the connection that gets dropped (unless that connection is a // local or outbound connection). // // Nodes are added to a peerlist in two methods. The first method is that a // gateway will ask its outbound peers for a list of nodes. If the node list is // below a certain size (see consts.go), the gateway will repeatedly ask // outbound peers to expand the list. Nodes are also added to the nodelist // after they successfully form a connection with the gateway. To limit the // attacker's ability to add nodes to the nodelist, connections are // ratelimited. An attacker with lots of IP addresses still has the ability to // fill up the nodelist, however getting 90% dominance of the nodelist requires // forming thousands of connections, which will take hours or days. By that // time, the attacked node should already have its set of outbound peers, // limiting the amount of damage that the attacker can do. // // To limit DNS-based tomfoolry, nodes are only added to the nodelist if their // connection information takes the form of an IP address. // // Some research has been done on Bitcoin's flood networks. The more relevant // research has been listed below. The papers listed first are more relevant. // Eclipse Attacks on Bitcoin's Peer-to-Peer Network (Heilman, Kendler, Zohar, Goldberg) // Stubborn Mining: Generalizing Selfish Mining and Combining with an Eclipse Attack (Nayak, Kumar, Miller, Shi) // An Overview of BGP Hijacking (https://www.bishopfox.com/blog/2015/08/an-overview-of-bgp-hijacking/) // TODO: Currently the gateway does not do much in terms of bucketing. The // gateway should make sure that it has outbound peers from a wide range of IP // addresses, and when kicking inbound peers it shouldn't just favor kicking // peers of the same IP address, it should favor kicking peers of the same ip // address range. // // TODO: There is no public key exchange, so communications cannot be // effectively encrypted or authenticated. // // TODO: Gateway hostname discovery currently has significant centralization, // namely the fallback is a single third-party website that can easily form any // response it wants. Instead, multiple TLS-protected third party websites // should be used, and the plurality answer should be accepted as the true // hostname. // // TODO: The gateway currently does hostname discovery in a non-blocking way, // which means that the first few peers that it connects to may not get the // correct hostname. This means that you may give the remote peer the wrong // hostname, which means they will not be able to dial you back, which means // they will not add you to their node list. // // TODO: The gateway should encrypt and authenticate all communications. Though // the gateway participates in a flood network, practical attacks have been // demonstrated which have been able to confuse nodes by manipulating messages // from their peers. Encryption + authentication would have made the attack // more difficult. import ( "errors" "fmt" "net" "os" "path/filepath" "sync" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/persist" siasync "github.com/NebulousLabs/Sia/sync" "github.com/NebulousLabs/fastrand" ) var ( errNoPeers = errors.New("no peers") errUnreachable = errors.New("peer did not respond to ping") ) // Gateway implements the modules.Gateway interface. type Gateway struct { listener net.Listener myAddr modules.NetAddress port string // handlers are the RPCs that the Gateway can handle. // // initRPCs are the RPCs that the Gateway calls upon connecting to a peer. handlers map[rpcID]modules.RPCFunc initRPCs map[string]modules.RPCFunc // nodes is the set of all known nodes (i.e. potential peers). // // peers are the nodes that the gateway is currently connected to. // // peerTG is a special thread group for tracking peer connections, and will // block shutdown until all peer connections have been closed out. The peer // connections are put in a separate TG because of their unique // requirements - they have the potential to live for the lifetime of the // program, but also the potential to close early. Calling threads.OnStop // for each peer could create a huge backlog of functions that do nothing // (because most of the peers disconnected prior to shutdown). And they // can't call threads.Add because they are potentially very long running // and would block any threads.Flush() calls. So a second threadgroup is // added which handles clean-shutdown for the peers, without blocking // threads.Flush() calls. nodes map[modules.NetAddress]*node peers map[modules.NetAddress]*peer peerTG siasync.ThreadGroup // Utilities. log *persist.Logger mu sync.RWMutex persistDir string threads siasync.ThreadGroup // Unique ID id gatewayID } type gatewayID [8]byte // managedSleep will sleep for the given period of time. If the full time // elapses, 'true' is returned. If the sleep is interrupted for shutdown, // 'false' is returned. func (g *Gateway) managedSleep(t time.Duration) (completed bool) { select { case <-time.After(t): return true case <-g.threads.StopChan(): return false } } // Address returns the NetAddress of the Gateway. func (g *Gateway) Address() modules.NetAddress { g.mu.RLock() defer g.mu.RUnlock() return g.myAddr } // Close saves the state of the Gateway and stops its listener process. func (g *Gateway) Close() error { if err := g.threads.Stop(); err != nil { return err } g.mu.Lock() defer g.mu.Unlock() return g.saveSync() } // New returns an initialized Gateway. func New(addr string, bootstrap bool, persistDir string) (*Gateway, error) { // Create the directory if it doesn't exist. err := os.MkdirAll(persistDir, 0700) if err != nil { return nil, err } g := &Gateway{ handlers: make(map[rpcID]modules.RPCFunc), initRPCs: make(map[string]modules.RPCFunc), nodes: make(map[modules.NetAddress]*node), peers: make(map[modules.NetAddress]*peer), persistDir: persistDir, } // Set Unique GatewayID fastrand.Read(g.id[:]) // Create the logger. g.log, err = persist.NewFileLogger(filepath.Join(g.persistDir, logFile)) if err != nil { return nil, err } // Establish the closing of the logger. g.threads.AfterStop(func() { if err := g.log.Close(); err != nil { // The logger may or may not be working here, so use a println // instead. fmt.Println("Failed to close the gateway logger:", err) } }) g.log.Println("INFO: gateway created, started logging") // Establish that the peerTG must complete shutdown before the primary // thread group completes shutdown. g.threads.OnStop(func() { err = g.peerTG.Stop() if err != nil { g.log.Println("ERROR: peerTG experienced errors while shutting down:", err) } }) // Register RPCs. g.RegisterRPC("ShareNodes", g.shareNodes) g.RegisterConnectCall("ShareNodes", g.requestNodes) // Establish the de-registration of the RPCs. g.threads.OnStop(func() { g.UnregisterRPC("ShareNodes") g.UnregisterConnectCall("ShareNodes") }) // Load the old node list. If it doesn't exist, no problem, but if it does, // we want to know about any errors preventing us from loading it. if loadErr := g.load(); loadErr != nil && !os.IsNotExist(loadErr) { return nil, loadErr } // Spawn the thread to periodically save the gateway. go g.threadedSaveLoop() // Make sure that the gateway saves after shutdown. g.threads.AfterStop(func() { g.mu.Lock() err = g.saveSync() g.mu.Unlock() if err != nil { g.log.Println("ERROR: Unable to save gateway:", err) } }) // Add the bootstrap peers to the node list. if bootstrap { for _, addr := range modules.BootstrapPeers { err := g.addNode(addr) if err != nil && err != errNodeExists { g.log.Printf("WARN: failed to add the bootstrap node '%v': %v", addr, err) } } } // Create the listener which will listen for new connections from peers. permanentListenClosedChan := make(chan struct{}) g.listener, err = net.Listen("tcp", addr) if err != nil { return nil, err } // Automatically close the listener when g.threads.Stop() is called. g.threads.OnStop(func() { err := g.listener.Close() if err != nil { g.log.Println("WARN: closing the listener failed:", err) } <-permanentListenClosedChan }) // Set the address and port of the gateway. _, g.port, err = net.SplitHostPort(g.listener.Addr().String()) if err != nil { return nil, err } // Set myAddr equal to the address returned by the listener. It will be // overwritten by threadedLearnHostname later on. g.myAddr = modules.NetAddress(g.listener.Addr().String()) // Spawn the peer connection listener. go g.permanentListen(permanentListenClosedChan) // Spawn the peer manager and provide tools for ensuring clean shutdown. peerManagerClosedChan := make(chan struct{}) g.threads.OnStop(func() { <-peerManagerClosedChan }) go g.permanentPeerManager(peerManagerClosedChan) // Spawn the node manager and provide tools for ensuring clean shudown. nodeManagerClosedChan := make(chan struct{}) g.threads.OnStop(func() { <-nodeManagerClosedChan }) go g.permanentNodeManager(nodeManagerClosedChan) // Spawn the node purger and provide tools for ensuring clean shutdown. nodePurgerClosedChan := make(chan struct{}) g.threads.OnStop(func() { <-nodePurgerClosedChan }) go g.permanentNodePurger(nodePurgerClosedChan) // Spawn threads to take care of port forwarding and hostname discovery. go g.threadedForwardPort(g.port) go g.threadedLearnHostname() return g, nil } // enforce that Gateway satisfies the modules.Gateway interface var _ modules.Gateway = (*Gateway)(nil) Sia-1.3.0/modules/gateway/gateway_test.go000066400000000000000000000117711313565667000204120ustar00rootroot00000000000000package gateway import ( "io/ioutil" "net" "os" "path/filepath" "strconv" "sync" "testing" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" siasync "github.com/NebulousLabs/Sia/sync" ) // newTestingGateway returns a gateway ready to use in a testing environment. func newTestingGateway(t *testing.T) *Gateway { if testing.Short() { panic("newTestingGateway called during short test") } g, err := New("localhost:0", false, build.TempDir("gateway", t.Name())) if err != nil { panic(err) } return g } // newNamedTestingGateway returns a gateway ready to use in a testing // environment. The gateway's persist folder will have the specified suffix. func newNamedTestingGateway(t *testing.T, suffix string) *Gateway { if testing.Short() { panic("newTestingGateway called during short test") } g, err := New("localhost:0", false, build.TempDir("gateway", t.Name()+suffix)) if err != nil { panic(err) } return g } // TestExportedMethodsErrAfterClose tests that exported methods like Close and // Connect error with siasync.ErrStopped after the gateway has been closed. func TestExportedMethodsErrAfterClose(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() g := newTestingGateway(t) if err := g.Close(); err != nil { t.Fatal(err) } if err := g.Close(); err != siasync.ErrStopped { t.Fatalf("expected %q, got %q", siasync.ErrStopped, err) } if err := g.Connect("localhost:1234"); err != siasync.ErrStopped { t.Fatalf("expected %q, got %q", siasync.ErrStopped, err) } } // TestAddress tests that Gateway.Address returns the address of its listener. // Also tests that the address is not unspecified and is a loopback address. // The address must be a loopback address for testing. func TestAddress(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() g := newTestingGateway(t) defer g.Close() if g.Address() != g.myAddr { t.Fatal("Address does not return g.myAddr") } if g.Address() != modules.NetAddress(g.listener.Addr().String()) { t.Fatalf("wrong address: expected %v, got %v", g.listener.Addr(), g.Address()) } host := modules.NetAddress(g.listener.Addr().String()).Host() ip := net.ParseIP(host) if ip == nil { t.Fatal("address is not an IP address") } if ip.IsUnspecified() { t.Fatal("expected a non-unspecified address") } if !ip.IsLoopback() { t.Fatal("expected a loopback address") } } // TestPeers checks that two gateways are able to connect to each other. func TestPeers(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() g1 := newNamedTestingGateway(t, "1") defer g1.Close() g2 := newNamedTestingGateway(t, "2") defer g2.Close() err := g1.Connect(g2.Address()) if err != nil { t.Fatal("failed to connect:", err) } peers := g1.Peers() if len(peers) != 1 || peers[0].NetAddress != g2.Address() { t.Fatal("g1 has bad peer list:", peers) } err = g1.Disconnect(g2.Address()) if err != nil { t.Fatal("failed to disconnect:", err) } peers = g1.Peers() if len(peers) != 0 { t.Fatal("g1 has peers after disconnect:", peers) } } // TestNew checks that a call to New is effective. func TestNew(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() if _, err := New("", false, ""); err == nil { t.Fatal("expecting persistDir error, got nil") } if _, err := New("localhost:0", false, ""); err == nil { t.Fatal("expecting persistDir error, got nil") } if g, err := New("foo", false, build.TempDir("gateway", t.Name()+"1")); err == nil { t.Fatal("expecting listener error, got nil", g.myAddr) } // create corrupted nodes.json dir := build.TempDir("gateway", t.Name()+"2") os.MkdirAll(dir, 0700) err := ioutil.WriteFile(filepath.Join(dir, "nodes.json"), []byte{1, 2, 3}, 0660) if err != nil { t.Fatal("couldn't create corrupted file:", err) } if _, err := New("localhost:0", false, dir); err == nil { t.Fatal("expected load error, got nil") } } // TestClose creates and closes a gateway. func TestClose(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() g := newTestingGateway(t) err := g.Close() if err != nil { t.Fatal(err) } } // TestParallelClose spins up 3 gateways, connects them all, and then closes // them in parallel. The goal of this test is to make it more vulnerable to any // potential nondeterministic failures. func TestParallelClose(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() // Spin up three gateways in parallel. var gs [3]*Gateway var wg sync.WaitGroup wg.Add(3) for i := range gs { go func(i int) { gs[i] = newNamedTestingGateway(t, strconv.Itoa(i)) wg.Done() }(i) } wg.Wait() // Connect g1 to g2, g2 to g3. They may connect to eachother further. wg.Add(2) for i := range gs[:2] { go func(i int) { err := gs[i].Connect(gs[i+1].myAddr) if err != nil { panic(err) } wg.Done() }(i) } wg.Wait() // Close all three gateways in parallel. wg.Add(3) for i := range gs { go func(i int) { err := gs[i].Close() if err != nil { panic(err) } wg.Done() }(i) } wg.Wait() } Sia-1.3.0/modules/gateway/nodes.go000066400000000000000000000236771313565667000170320ustar00rootroot00000000000000package gateway import ( "errors" "fmt" "net" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/fastrand" ) var ( errNodeExists = errors.New("node already added") errNoNodes = errors.New("no nodes in the node list") errOurAddress = errors.New("can't add our own address") errPeerGenesisID = errors.New("peer has different genesis ID") ) // A node represents a potential peer on the Sia network. type node struct { NetAddress modules.NetAddress `json:"netaddress"` WasOutboundPeer bool `json:"wasoutboundpeer"` } // addNode adds an address to the set of nodes on the network. func (g *Gateway) addNode(addr modules.NetAddress) error { if addr == g.myAddr { return errOurAddress } else if _, exists := g.nodes[addr]; exists { return errNodeExists } else if addr.IsStdValid() != nil { return errors.New("address is not valid: " + string(addr)) } else if net.ParseIP(addr.Host()) == nil { return errors.New("address must be an IP address: " + string(addr)) } g.nodes[addr] = &node{ NetAddress: addr, WasOutboundPeer: false, } return nil } // pingNode verifies that there is a reachable node at the provided address // by performing the Sia gateway handshake protocol. func (g *Gateway) pingNode(addr modules.NetAddress) error { // Ping the untrusted node to see whether or not there's actually a // reachable node at the provided address. conn, err := g.dial(addr) if err != nil { return err } defer conn.Close() // Read the node's version. remoteVersion, err := connectVersionHandshake(conn, build.Version) if err != nil { return err } if build.VersionCmp(remoteVersion, sessionUpgradeVersion) < 0 { return nil // for older versions, this is where pinging ends } // Send our header. // NOTE: since we don't intend to complete the connection, we can send an // inaccurate NetAddress. ourHeader := sessionHeader{ GenesisID: types.GenesisID, UniqueID: g.id, NetAddress: modules.NetAddress(conn.LocalAddr().String()), } if err := exchangeOurHeader(conn, ourHeader); err != nil { return err } // Read remote header. var remoteHeader sessionHeader if err := encoding.ReadObject(conn, &remoteHeader, maxEncodedSessionHeaderSize); err != nil { return fmt.Errorf("failed to read remote header: %v", err) } else if err := acceptableSessionHeader(ourHeader, remoteHeader, conn.RemoteAddr().String()); err != nil { return err } // Send special rejection string. if err := encoding.WriteObject(conn, modules.StopResponse); err != nil { return fmt.Errorf("failed to write header rejection: %v", err) } return nil } // removeNode will remove a node from the gateway. func (g *Gateway) removeNode(addr modules.NetAddress) error { if _, exists := g.nodes[addr]; !exists { return errors.New("no record of that node") } delete(g.nodes, addr) return nil } // randomNode returns a random node from the gateway. An error can be returned // if there are no nodes in the node list. func (g *Gateway) randomNode() (modules.NetAddress, error) { if len(g.nodes) == 0 { return "", errNoPeers } // Select a random peer. Note that the algorithm below is roughly linear in // the number of nodes known by the gateway, and this number can approach // every node on the network. If the network gets large, this algorithm // will either need to be refactored, or more likely a cap on the size of // g.nodes will need to be added. r := fastrand.Intn(len(g.nodes)) for node := range g.nodes { if r <= 0 { return node, nil } r-- } return "", errNoPeers } // shareNodes is the receiving end of the ShareNodes RPC. It writes up to 10 // randomly selected nodes to the caller. func (g *Gateway) shareNodes(conn modules.PeerConn) error { conn.SetDeadline(time.Now().Add(connStdDeadline)) remoteNA := modules.NetAddress(conn.RemoteAddr().String()) // Assemble a list of nodes to send to the peer. var nodes []modules.NetAddress func() { g.mu.RLock() defer g.mu.RUnlock() // Gather candidates for sharing. gnodes := make([]modules.NetAddress, 0, len(g.nodes)) for node := range g.nodes { // Don't share local peers with remote peers. That means that if 'node' // is loopback, it will only be shared if the remote peer is also // loopback. And if 'node' is private, it will only be shared if the // remote peer is either the loopback or is also private. if node.IsLoopback() && !remoteNA.IsLoopback() { continue } if node.IsLocal() && !remoteNA.IsLocal() { continue } gnodes = append(gnodes, node) } // Iterate through the random permutation of nodes and select the // desirable ones. for _, i := range fastrand.Perm(len(gnodes)) { nodes = append(nodes, gnodes[i]) if uint64(len(nodes)) == maxSharedNodes { break } } }() return encoding.WriteObject(conn, nodes) } // requestNodes is the calling end of the ShareNodes RPC. func (g *Gateway) requestNodes(conn modules.PeerConn) error { conn.SetDeadline(time.Now().Add(connStdDeadline)) var nodes []modules.NetAddress if err := encoding.ReadObject(conn, &nodes, maxSharedNodes*modules.MaxEncodedNetAddressLength); err != nil { return err } g.mu.Lock() for _, node := range nodes { err := g.addNode(node) if err != nil && err != errNodeExists && err != errOurAddress { g.log.Printf("WARN: peer '%v' sent the invalid addr '%v'", conn.RPCAddr(), node) } } err := g.saveSync() if err != nil { g.log.Println("ERROR: unable to save new nodes added to the gateway:", err) } g.mu.Unlock() return nil } // permanentNodePurger is a thread that runs throughout the lifetime of the // gateway, purging unconnectable nodes from the node list in a sustainable // way. func (g *Gateway) permanentNodePurger(closeChan chan struct{}) { defer close(closeChan) for { // Choose an amount of time to wait before attempting to prune a node. // Nodes will occasionally go offline for some time, which can even be // days. We don't want to too aggressively prune nodes with low-moderate // uptime, as they are still useful to the network. // // But if there are a lot of nodes, we want to make sure that the node // list does not become saturated with inaccessible / offline nodes. // Pruning happens a lot faster when there are a lot of nodes in the // gateway. // // This value is a ratelimit which tries to keep the nodes list in the // gateawy healthy. A more complex algorithm might adjust this number // according to the percentage of prune attempts that are successful // (decrease prune frequency if most nodes in the database are online, // increase prune frequency if more nodes in the database are offline). waitTime := nodePurgeDelay g.mu.RLock() nodeCount := len(g.nodes) g.mu.RUnlock() if nodeCount > quickPruneListLen { waitTime = fastNodePurgeDelay } // Sleep as a purge ratelimit. select { case <-time.After(waitTime): case <-g.threads.StopChan(): // The gateway is shutting down, close out the thread. return } // Get a random node for scanning. g.mu.RLock() numNodes := len(g.nodes) node, err := g.randomNode() g.mu.RUnlock() if err == errNoNodes { // errNoNodes is a common error that will be resolved by the // bootstrap process. continue } else if err != nil { // Unusual error, create a logging statement. g.log.Println("ERROR: could not pick a random node for uptime check:", err) continue } if numNodes <= pruneNodeListLen { // There are not enough nodes in the gateway - pruning more is // probably a bad idea, and may affect the user's ability to // connect to the network in the future. continue } // Check whether this node is already a peer. If so, no need to dial // them. g.mu.RLock() _, exists := g.peers[node] g.mu.RUnlock() if exists { continue } // Try connecting to the random node. If the node is not reachable, // remove them from the node list. // // NOTE: an error may be returned if the dial is canceled partway // through, which would cause the node to be pruned even though it may // be a good node. Because nodes are plentiful, this is an acceptable // bug. if err = g.pingNode(node); err != nil { g.mu.Lock() g.removeNode(node) g.mu.Unlock() g.log.Debugf("INFO: removing node %q because it could not be reached during a random scan: %v", node, err) } } } // permanentNodeManager tries to keep the Gateway's node list healthy. As long // as the Gateway has fewer than healthyNodeListLen nodes, it asks a random // peer for more nodes. It also continually pings nodes in order to establish // their connectivity. Unresponsive nodes are aggressively removed. func (g *Gateway) permanentNodeManager(closeChan chan struct{}) { defer close(closeChan) for { // Wait 5 seconds so that a controlled number of node requests are made // to peers. select { case <-time.After(nodeListDelay): case <-g.threads.StopChan(): // Gateway is shutting down, close the thread. return } g.mu.RLock() numNodes := len(g.nodes) peer, err := g.randomOutboundPeer() g.mu.RUnlock() if err == errNoPeers { // errNoPeers is a common and expected error, there's no need to // log it. continue } else if err != nil { g.log.Println("ERROR: could not fetch a random peer:", err) continue } // Determine whether there are a satisfactory number of nodes in the // nodelist. If there are not, use the random peer from earlier to // expand the node list. if numNodes < healthyNodeListLen { err := g.managedRPC(peer, "ShareNodes", g.requestNodes) if err != nil { g.log.Debugf("WARN: RPC ShareNodes failed on peer %q: %v", peer, err) continue } } else { // There are enough nodes in the gateway, no need to check for more // every 5 seconds. Wait a while before checking again. select { case <-time.After(wellConnectedDelay): case <-g.threads.StopChan(): // Gateway is shutting down, close the thread. return } } } } Sia-1.3.0/modules/gateway/nodes_test.go000066400000000000000000000262431313565667000200610ustar00rootroot00000000000000package gateway import ( "errors" "strconv" "sync" "testing" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/fastrand" ) const dummyNode = "111.111.111.111:1111" // TestAddNode tries adding a node to the gateway using the unexported addNode // function. Edge case trials are also performed. func TestAddNode(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() g := newTestingGateway(t) defer g.Close() g.mu.Lock() defer g.mu.Unlock() if err := g.addNode(dummyNode); err != nil { t.Fatal("addNode failed:", err) } if err := g.addNode(dummyNode); err != errNodeExists { t.Error("addNode added duplicate node") } if err := g.addNode("foo"); err == nil { t.Error("addNode added unroutable address") } if err := g.addNode("foo:9981"); err == nil { t.Error("addNode added a non-IP address") } if err := g.addNode("[::]:9981"); err == nil { t.Error("addNode added unspecified address") } if err := g.addNode(g.myAddr); err != errOurAddress { t.Error("addNode added our own address") } } // TestRemoveNode tries remiving a node from the gateway. func TestRemoveNode(t *testing.T) { if testing.Short() { t.SkipNow() } g := newTestingGateway(t) defer g.Close() t.Parallel() g.mu.Lock() defer g.mu.Unlock() if err := g.addNode(dummyNode); err != nil { t.Fatal("addNode failed:", err) } if err := g.removeNode(dummyNode); err != nil { t.Fatal("removeNode failed:", err) } if err := g.removeNode("bar"); err == nil { t.Fatal("removeNode removed nonexistent node") } } // TestRandomNode tries pulling random nodes from the gateway using // g.randomNode() under a variety of conditions. func TestRandomNode(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() g := newTestingGateway(t) defer g.Close() // Test with 0 nodes. g.mu.RLock() _, err := g.randomNode() g.mu.RUnlock() if err != errNoPeers { t.Fatal("randomNode should fail when the gateway has 0 nodes") } // Test with 1 node. g.mu.Lock() if err = g.addNode(dummyNode); err != nil { t.Fatal(err) } g.mu.Unlock() g.mu.RLock() addr, err := g.randomNode() g.mu.RUnlock() if err != nil { t.Fatal("randomNode failed:", err) } else if addr != dummyNode { t.Fatal("randomNode returned wrong address:", addr) } // Test again with 0 nodes. g.mu.Lock() err = g.removeNode(dummyNode) g.mu.Unlock() if err != nil { t.Fatal(err) } g.mu.RLock() _, err = g.randomNode() g.mu.RUnlock() if err != errNoPeers { t.Fatalf("randomNode returned wrong error: expected %v, got %v", errNoPeers, err) } // Test with 3 nodes. nodes := map[modules.NetAddress]int{ "111.111.111.111:1111": 0, "111.111.111.111:2222": 0, "111.111.111.111:3333": 0, } g.mu.Lock() for addr := range nodes { err := g.addNode(addr) if err != nil { t.Error(err) } } g.mu.Unlock() for i := 0; i < len(nodes)*10; i++ { g.mu.RLock() addr, err := g.randomNode() g.mu.RUnlock() if err != nil { t.Fatal("randomNode failed:", err) } nodes[addr]++ } for node, count := range nodes { if count == 0 { // 1-in-200000 chance of occurring naturally t.Errorf("node %v was never selected", node) } } } // TestShareNodes checks that two gateways will share nodes with eachother // following the desired sharing strategy. func TestShareNodes(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() g1 := newNamedTestingGateway(t, "1") defer g1.Close() g2 := newNamedTestingGateway(t, "2") defer g2.Close() // add a node to g2 g2.mu.Lock() err := g2.addNode(dummyNode) g2.mu.Unlock() if err != nil { t.Fatal(err) } // connect err = g1.Connect(g2.Address()) if err != nil { t.Fatal("couldn't connect:", err) } err = build.Retry(50, 100*time.Millisecond, func() error { g1.mu.Lock() _, exists := g1.nodes[dummyNode] g1.mu.Unlock() if !exists { return errors.New("node not added") } return nil }) if err != nil { t.Fatal(err) } // g1 should have received the node time.Sleep(100 * time.Millisecond) g1.mu.Lock() err = g1.addNode(dummyNode) g1.mu.Unlock() if err == nil { t.Fatal("gateway did not receive nodes during Connect:", g1.nodes) } // remove all nodes from both peers g1.mu.Lock() g1.nodes = map[modules.NetAddress]*node{} g1.mu.Unlock() g2.mu.Lock() g2.nodes = map[modules.NetAddress]*node{} g2.mu.Unlock() // SharePeers should now return no peers var nodes []modules.NetAddress err = g1.RPC(g2.Address(), "ShareNodes", func(conn modules.PeerConn) error { return encoding.ReadObject(conn, &nodes, maxSharedNodes*modules.MaxEncodedNetAddressLength) }) if err != nil { t.Fatal(err) } if len(nodes) != 0 { t.Fatal("gateway gave non-existent addresses:", nodes) } // sharing should be capped at maxSharedNodes for i := 1; i < int(maxSharedNodes)+11; i++ { g2.mu.Lock() err := g2.addNode(modules.NetAddress("111.111.111.111:" + strconv.Itoa(i))) g2.mu.Unlock() if err != nil { t.Fatal(err) } } err = g1.RPC(g2.Address(), "ShareNodes", func(conn modules.PeerConn) error { return encoding.ReadObject(conn, &nodes, maxSharedNodes*modules.MaxEncodedNetAddressLength) }) if err != nil { t.Fatal(err) } if uint64(len(nodes)) != maxSharedNodes { t.Fatalf("gateway gave wrong number of nodes: expected %v, got %v", maxSharedNodes, len(nodes)) } } // TestNodesAreSharedOnConnect tests that nodes that a gateway has never seen // before are added to the node list when connecting to another gateway that // has seen said nodes. func TestNodesAreSharedOnConnect(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() g1 := newNamedTestingGateway(t, "1") defer g1.Close() g2 := newNamedTestingGateway(t, "2") defer g2.Close() g3 := newNamedTestingGateway(t, "3") defer g3.Close() // connect g2 to g1 err := g2.Connect(g1.Address()) if err != nil { t.Fatal("couldn't connect:", err) } // connect g3 to g1 err = g3.Connect(g1.Address()) if err != nil { t.Fatal("couldn't connect:", err) } // g3 should have received g2's address from g1 time.Sleep(200 * time.Millisecond) g3.mu.Lock() defer g3.mu.Unlock() if _, ok := g3.nodes[g2.Address()]; !ok { t.Fatal("node was not relayed:", g3.nodes) } } // TestPruneNodeThreshold checks that gateways will not purge nodes if they are // below the purge threshold, even if those nodes are offline. func TestPruneNodeThreshold(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() // The next part of the test expects the pruneNodeListLen to be at least // 'maxSharedNodes * 2 + 2 in size. if uint64(pruneNodeListLen) < (maxSharedNodes*2)+2 { t.Fatal("Constants do not match test, please either adjust the constants or refactor this test", maxSharedNodes, pruneNodeListLen) } // Create and connect pruneNodeListLen gateways. var gs []*Gateway for i := 0; i < pruneNodeListLen; i++ { gs = append(gs, newNamedTestingGateway(t, strconv.Itoa(i))) // Connect this gateway to the previous gateway. if i != 0 { err := gs[i].Connect(gs[i-1].myAddr) if err != nil { t.Fatal(err) } } } // Spin until all gateways have a nearly full node list. success := false for i := 0; i < 50; i++ { success = true for _, g := range gs { g.mu.RLock() gNodeLen := len(g.nodes) g.mu.RUnlock() if gNodeLen < pruneNodeListLen-2 { success = false break } } if !success { time.Sleep(time.Second * 1) } } if !success { t.Fatal("peers are not sharing nodes with eachother") } // Gateway node lists have been filled out. Take a bunch of gateways // offline and verify that they do not start pruning eachother. var wg sync.WaitGroup for i := 2; i < len(gs); i++ { wg.Add(1) go func(i int) { err := gs[i].Close() if err != nil { panic(err) } wg.Done() }(i) } wg.Wait() // Wait for 5 iterations of the node purge loop. Then verify that the // remaining gateways have not been purging nodes. time.Sleep(nodePurgeDelay * 5) // Check that the remaining gateways have not purged any nodes. gs[0].mu.RLock() gs0Nodes := len(gs[0].nodes) gs[0].mu.RUnlock() gs[1].mu.RLock() gs1Nodes := len(gs[1].nodes) gs[1].mu.RUnlock() if gs0Nodes < pruneNodeListLen-2 { t.Error("gateway seems to be pruning nodes below purge threshold") } if gs1Nodes < pruneNodeListLen-2 { t.Error("gateway seems to be pruning nodes below purge threshold") } // Close the remaining gateways. err := gs[0].Close() if err != nil { t.Error(err) } err = gs[1].Close() if err != nil { t.Error(err) } } // TestHealthyNodeListPruning checks that gateways will purge nodes if they are at // a healthy node threshold and the nodes are offline. func TestHealthyNodeListPruning(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() // Create and connect healthyNodeListLen*2 gateways. var gs []*Gateway for i := 0; i < healthyNodeListLen*2; i++ { gs = append(gs, newNamedTestingGateway(t, strconv.Itoa(i))) // Connect this gateway to the previous gateway. if i != 0 { err := gs[i].Connect(gs[i-1].myAddr) if err != nil { t.Fatal(err) } } // To help speed the test up, also connect this gateway to the peer two // back. if i > 1 { err := gs[i].Connect(gs[i-2].myAddr) if err != nil { t.Fatal(err) } } // To help speed the test up, also connect this gateway to a random // previous peer. if i > 2 { err := gs[i].Connect(gs[fastrand.Intn(i-2)].myAddr) if err != nil { t.Fatal(err) } } } // Spin until all gateways have a nearly full node list. success := false for i := 0; i < 80; i++ { success = true for _, g := range gs { g.mu.RLock() gNodeLen := len(g.nodes) g.mu.RUnlock() if gNodeLen < healthyNodeListLen { success = false break } } if !success { time.Sleep(time.Second * 1) } } if !success { t.Fatal("peers are not sharing nodes with eachother") } // Gateway node lists have been filled out. Take a bunch of gateways // offline and verify that the remaining gateways begin pruning their // nodelist. var wg sync.WaitGroup for i := 2; i < len(gs); i++ { wg.Add(1) go func(i int) { err := gs[i].Close() if err != nil { panic(err) } wg.Done() }(i) } wg.Wait() // Wait for enough iterations of the node purge loop that over-pruning is // possible. (Over-pruning does not need to be guaranteed, causing this // test to fail once in a while is sufficient.) time.Sleep(nodePurgeDelay * time.Duration(healthyNodeListLen-pruneNodeListLen) * 12) // Check that the remaining gateways have pruned nodes. gs[0].mu.RLock() gs0Nodes := len(gs[0].nodes) gs[0].mu.RUnlock() gs[1].mu.RLock() gs1Nodes := len(gs[1].nodes) gs[1].mu.RUnlock() if gs0Nodes >= healthyNodeListLen-1 { t.Error("gateway is not pruning nodes", healthyNodeListLen, gs0Nodes) } if gs1Nodes >= healthyNodeListLen-1 { t.Error("gateway is not pruning nodes", healthyNodeListLen, gs1Nodes) } if gs0Nodes < pruneNodeListLen { t.Error("gateway is pruning too many nodes", gs0Nodes, pruneNodeListLen) } if gs1Nodes < pruneNodeListLen { t.Error("gateway is pruning too many nodes", gs1Nodes, pruneNodeListLen) } // Close the remaining gateways. err := gs[0].Close() if err != nil { t.Error(err) } err = gs[1].Close() if err != nil { t.Error(err) } } Sia-1.3.0/modules/gateway/peers.go000066400000000000000000000445731313565667000170360ustar00rootroot00000000000000package gateway import ( "errors" "fmt" "net" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/fastrand" ) var ( errPeerExists = errors.New("already connected to this peer") errPeerRejectedConn = errors.New("peer rejected connection") ) // insufficientVersionError indicates a peer's version is insufficient. type insufficientVersionError string // Error implements the error interface for insufficientVersionError. func (s insufficientVersionError) Error() string { return "unacceptable version: " + string(s) } // invalidVersionError indicates a peer's version is not a valid version number. type invalidVersionError string // Error implements the error interface for invalidVersionError. func (s invalidVersionError) Error() string { return "invalid version: " + string(s) } type peer struct { modules.Peer sess streamSession } // sessionHeader is sent after the initial version exchange. It prevents peers // on different blockchains from connecting to each other, and prevents the // gateway from connecting to itself. type sessionHeader struct { GenesisID types.BlockID UniqueID gatewayID NetAddress modules.NetAddress } func (p *peer) open() (modules.PeerConn, error) { conn, err := p.sess.Open() if err != nil { return nil, err } return &peerConn{conn, p.NetAddress}, nil } func (p *peer) accept() (modules.PeerConn, error) { conn, err := p.sess.Accept() if err != nil { return nil, err } return &peerConn{conn, p.NetAddress}, nil } // addPeer adds a peer to the Gateway's peer list and spawns a listener thread // to handle its requests. func (g *Gateway) addPeer(p *peer) { g.peers[p.NetAddress] = p go g.threadedListenPeer(p) } // randomOutboundPeer returns a random outbound peer. func (g *Gateway) randomOutboundPeer() (modules.NetAddress, error) { // Get the list of outbound peers. var addrs []modules.NetAddress for addr, peer := range g.peers { if peer.Inbound { continue } addrs = append(addrs, addr) } if len(addrs) == 0 { return "", errNoPeers } // Of the remaining options, select one at random. return addrs[fastrand.Intn(len(addrs))], nil } // permanentListen handles incoming connection requests. If the connection is // accepted, the peer will be added to the Gateway's peer list. func (g *Gateway) permanentListen(closeChan chan struct{}) { // Signal that the permanentListen thread has completed upon returning. defer close(closeChan) for { conn, err := g.listener.Accept() if err != nil { g.log.Debugln("[PL] Closing permanentListen:", err) return } go g.threadedAcceptConn(conn) // Sleep after each accept. This limits the rate at which the Gateway // will accept new connections. The intent here is to prevent new // incoming connections from kicking out old ones before they have a // chance to request additional nodes. select { case <-time.After(acceptInterval): case <-g.threads.StopChan(): return } } } // threadedAcceptConn adds a connecting node as a peer. func (g *Gateway) threadedAcceptConn(conn net.Conn) { if g.threads.Add() != nil { conn.Close() return } defer g.threads.Done() conn.SetDeadline(time.Now().Add(connStdDeadline)) addr := modules.NetAddress(conn.RemoteAddr().String()) g.log.Debugf("INFO: %v wants to connect", addr) remoteVersion, err := acceptVersionHandshake(conn, build.Version) if err != nil { g.log.Debugf("INFO: %v wanted to connect but version handshake failed: %v", addr, err) conn.Close() return } if build.VersionCmp(remoteVersion, sessionUpgradeVersion) >= 0 { err = g.managedAcceptConnv130Peer(conn, remoteVersion) } else if build.VersionCmp(remoteVersion, handshakeUpgradeVersion) >= 0 { err = g.managedAcceptConnv100Peer(conn, remoteVersion) } else { err = g.managedAcceptConnOldPeer(conn, remoteVersion) } if err != nil { g.log.Debugf("INFO: %v wanted to connect, but failed: %v", addr, err) conn.Close() return } // Handshake successful, remove the deadline. conn.SetDeadline(time.Time{}) g.log.Debugf("INFO: accepted connection from new peer %v (v%v)", addr, remoteVersion) } // acceptableSessionHeader returns an error if remoteHeader indicates a peer // that should not be connected to. func acceptableSessionHeader(ourHeader, remoteHeader sessionHeader, remoteAddr string) error { if remoteHeader.GenesisID != ourHeader.GenesisID { return errPeerGenesisID } else if remoteHeader.UniqueID == ourHeader.UniqueID { return errOurAddress } else if err := remoteHeader.NetAddress.IsStdValid(); err != nil { return fmt.Errorf("invalid remote address: %v", err) } // Check that claimed NetAddress matches remoteAddr connHost, _, _ := net.SplitHostPort(remoteAddr) claimedHost, _, _ := net.SplitHostPort(string(remoteHeader.NetAddress)) if connHost != claimedHost { return fmt.Errorf("claimed hostname (%v) does not match conn.RemoteAddr (%v)", claimedHost, connHost) } return nil } // managedAcceptConnv130Peer accepts connection requests from peers >= v1.3.0. // The requesting peer is added as a node and a peer. The peer is only added if // a nil error is returned. func (g *Gateway) managedAcceptConnv130Peer(conn net.Conn, remoteVersion string) error { // Perform header handshake. host, _, _ := net.SplitHostPort(conn.LocalAddr().String()) ourHeader := sessionHeader{ GenesisID: types.GenesisID, UniqueID: g.id, NetAddress: modules.NetAddress(net.JoinHostPort(host, g.port)), } remoteHeader, err := exchangeRemoteHeader(conn, ourHeader) if err != nil { return err } if err := exchangeOurHeader(conn, ourHeader); err != nil { return err } // Accept the peer. peer := &peer{ Peer: modules.Peer{ Inbound: true, // NOTE: local may be true even if the supplied NetAddress is not // actually reachable. Local: remoteHeader.NetAddress.IsLocal(), NetAddress: remoteHeader.NetAddress, Version: remoteVersion, }, sess: newServerStream(conn, remoteVersion), } g.mu.Lock() g.acceptPeer(peer) g.mu.Unlock() // Attempt to ping the supplied address. If successful, we will add // remoteHeader.NetAddress to our node list after accepting the peer. We // do this in a goroutine so that we can begin communicating with the peer // immediately. go func() { err := g.pingNode(remoteHeader.NetAddress) if err == nil { g.mu.Lock() g.addNode(remoteHeader.NetAddress) g.mu.Unlock() } }() return nil } // managedAcceptConnv100Peer accepts connection requests from peers >= v1.0.0. // The requesting peer is added as a node and a peer. The peer is only added if // a nil error is returned. func (g *Gateway) managedAcceptConnv100Peer(conn net.Conn, remoteVersion string) error { // Learn the peer's dialback address. var dialbackPort string err := encoding.ReadObject(conn, &dialbackPort, 13) // Max port # is 65535 (5 digits long) + 8 byte string length prefix if err != nil { return fmt.Errorf("could not read remote peer's port: %v", err) } host, _, _ := net.SplitHostPort(conn.RemoteAddr().String()) remoteAddr := modules.NetAddress(net.JoinHostPort(host, dialbackPort)) if err := remoteAddr.IsStdValid(); err != nil { return fmt.Errorf("peer's address (%v) is invalid: %v", remoteAddr, err) } // Sanity check to ensure that appending the port string to the host didn't // change the host. Only necessary because the peer sends the port as a string // instead of an integer. if remoteAddr.Host() != host { return fmt.Errorf("peer sent a port which modified the host") } g.mu.Lock() defer g.mu.Unlock() // Don't accept a connection from a peer we're already connected to. if _, exists := g.peers[remoteAddr]; exists { return fmt.Errorf("already connected to a peer on that address: %v", remoteAddr) } // Accept the peer. g.acceptPeer(&peer{ Peer: modules.Peer{ Inbound: true, // NOTE: local may be true even if the supplied remoteAddr is not // actually reachable. Local: remoteAddr.IsLocal(), NetAddress: remoteAddr, Version: remoteVersion, }, sess: newServerStream(conn, remoteVersion), }) // Attempt to ping the supplied address. If successful, and a connection is wanted, // we will add remoteAddr to our node list after accepting the peer. We do this in a // goroutine so that we can start communicating with the peer immediately. go func() { err := g.pingNode(remoteAddr) if err == nil { g.mu.Lock() g.addNode(remoteAddr) g.mu.Unlock() } }() return nil } // managedAcceptConnOldPeer accepts a connection request from peers < v1.0.0. // The requesting peer is added as a peer, but is not added to the node list // (older peers do not share their dialback address). The peer is only added if // a nil error is returned. func (g *Gateway) managedAcceptConnOldPeer(conn net.Conn, remoteVersion string) error { addr := modules.NetAddress(conn.RemoteAddr().String()) g.mu.Lock() defer g.mu.Unlock() // Old peers are unable to give us a dialback port, so we can't verify // whether or not they are local peers. g.acceptPeer(&peer{ Peer: modules.Peer{ Inbound: true, Local: false, NetAddress: addr, Version: remoteVersion, }, sess: newServerStream(conn, remoteVersion), }) g.addNode(addr) return nil } // acceptPeer makes room for the peer if necessary by kicking out existing // peers, then adds the peer to the peer list. func (g *Gateway) acceptPeer(p *peer) { // If we are not fully connected, add the peer without kicking any out. if len(g.peers) < fullyConnectedThreshold { g.addPeer(p) return } // Select a peer to kick. Outbound peers and local peers are not // available to be kicked. var addrs []modules.NetAddress for addr, peer := range g.peers { // Do not kick outbound peers or local peers. if !peer.Inbound || peer.Local { continue } // Prefer kicking a peer with the same hostname. if addr.Host() == p.NetAddress.Host() { addrs = []modules.NetAddress{addr} break } addrs = append(addrs, addr) } if len(addrs) == 0 { // There is nobody suitable to kick, therefore do not kick anyone. g.addPeer(p) return } // Of the remaining options, select one at random. kick := addrs[fastrand.Intn(len(addrs))] g.peers[kick].sess.Close() delete(g.peers, kick) g.log.Printf("INFO: disconnected from %v to make room for %v\n", kick, p.NetAddress) g.addPeer(p) } // acceptableVersion returns an error if the version is unacceptable. func acceptableVersion(version string) error { if !build.IsVersion(version) { return invalidVersionError(version) } if build.VersionCmp(version, minAcceptableVersion) < 0 { return insufficientVersionError(version) } return nil } // connectVersionHandshake performs the version handshake and should be called // on the side making the connection request. The remote version is only // returned if err == nil. func connectVersionHandshake(conn net.Conn, version string) (remoteVersion string, err error) { // Send our version. if err := encoding.WriteObject(conn, version); err != nil { return "", fmt.Errorf("failed to write version: %v", err) } // Read remote version. if err := encoding.ReadObject(conn, &remoteVersion, build.MaxEncodedVersionLength); err != nil { return "", fmt.Errorf("failed to read remote version: %v", err) } // Check that their version is acceptable. if remoteVersion == "reject" { return "", errPeerRejectedConn } if err := acceptableVersion(remoteVersion); err != nil { return "", err } return remoteVersion, nil } // acceptVersionHandshake performs the version handshake and should be // called on the side accepting a connection request. The remote version is // only returned if err == nil. func acceptVersionHandshake(conn net.Conn, version string) (remoteVersion string, err error) { // Read remote version. if err := encoding.ReadObject(conn, &remoteVersion, build.MaxEncodedVersionLength); err != nil { return "", fmt.Errorf("failed to read remote version: %v", err) } // Check that their version is acceptable. if err := acceptableVersion(remoteVersion); err != nil { if err := encoding.WriteObject(conn, "reject"); err != nil { return "", fmt.Errorf("failed to write reject: %v", err) } return "", err } // Send our version. if err := encoding.WriteObject(conn, version); err != nil { return "", fmt.Errorf("failed to write version: %v", err) } return remoteVersion, nil } // exchangeOurHeader writes ourHeader and reads the remote's error response. func exchangeOurHeader(conn net.Conn, ourHeader sessionHeader) error { // Send our header. if err := encoding.WriteObject(conn, ourHeader); err != nil { return fmt.Errorf("failed to write header: %v", err) } // Read remote response. var response string if err := encoding.ReadObject(conn, &response, 100); err != nil { return fmt.Errorf("failed to read header acceptance: %v", err) } else if response == modules.StopResponse { return errors.New("peer did not want a connection") } else if response != modules.AcceptResponse { return fmt.Errorf("peer rejected our header: %v", response) } return nil } // exchangeRemoteHeader reads the remote header and writes an error response. func exchangeRemoteHeader(conn net.Conn, ourHeader sessionHeader) (sessionHeader, error) { // Read remote header. var remoteHeader sessionHeader if err := encoding.ReadObject(conn, &remoteHeader, maxEncodedSessionHeaderSize); err != nil { return sessionHeader{}, fmt.Errorf("failed to read remote header: %v", err) } // Validate remote header and write acceptance or rejection. err := acceptableSessionHeader(ourHeader, remoteHeader, conn.RemoteAddr().String()) if err != nil { encoding.WriteObject(conn, err.Error()) // error can be ignored return sessionHeader{}, fmt.Errorf("peer's header was not acceptable: %v", err) } else if err := encoding.WriteObject(conn, modules.AcceptResponse); err != nil { return sessionHeader{}, fmt.Errorf("failed to write header acceptance: %v", err) } return remoteHeader, nil } // managedConnectv130Peer connects to peers >= v1.3.0. The peer is added as a // node and a peer. The peer is only added if a nil error is returned. func (g *Gateway) managedConnectv130Peer(conn net.Conn, remoteVersion string, remoteAddr modules.NetAddress) error { // Perform header handshake. host, _, _ := net.SplitHostPort(conn.LocalAddr().String()) ourHeader := sessionHeader{ GenesisID: types.GenesisID, UniqueID: g.id, NetAddress: modules.NetAddress(net.JoinHostPort(host, g.port)), } if err := exchangeOurHeader(conn, ourHeader); err != nil { return err } else if _, err := exchangeRemoteHeader(conn, ourHeader); err != nil { return err } return nil } // managedConnectv100Peer connects to peers >= v1.0.0 and < v1.3.0. The peer is added as a // node and a peer. The peer is only added if a nil error is returned. func (g *Gateway) managedConnectv100Peer(conn net.Conn, remoteVersion string, remoteAddr modules.NetAddress) error { g.mu.RLock() port := g.port g.mu.RUnlock() // Send our port to the peer so they can dial us back. err := encoding.WriteObject(conn, port) if err != nil { return errors.New("could not write port #: " + err.Error()) } return nil } // managedConnect establishes a persistent connection to a peer, and adds it to // the Gateway's peer list. func (g *Gateway) managedConnect(addr modules.NetAddress) error { // Perform verification on the input address. g.mu.RLock() gaddr := g.myAddr g.mu.RUnlock() if addr == gaddr { return errors.New("can't connect to our own address") } if err := addr.IsStdValid(); err != nil { return errors.New("can't connect to invalid address") } if net.ParseIP(addr.Host()) == nil { return errors.New("address must be an IP address") } g.mu.RLock() _, exists := g.peers[addr] g.mu.RUnlock() if exists { return errPeerExists } // Dial the peer and perform peer initialization. conn, err := g.dial(addr) if err != nil { return err } // Perform peer initialization. remoteVersion, err := connectVersionHandshake(conn, build.Version) if err != nil { conn.Close() return err } if build.VersionCmp(remoteVersion, sessionUpgradeVersion) >= 0 { err = g.managedConnectv130Peer(conn, remoteVersion, addr) } else if build.VersionCmp(remoteVersion, handshakeUpgradeVersion) >= 0 { err = g.managedConnectv100Peer(conn, remoteVersion, addr) } else { // for older nodes, protocol stops here } if err != nil { conn.Close() return err } // Connection successful, clear the timeout as to maintain a persistent // connection to this peer. conn.SetDeadline(time.Time{}) // Add the peer. g.mu.Lock() defer g.mu.Unlock() g.addPeer(&peer{ Peer: modules.Peer{ Inbound: false, Local: addr.IsLocal(), NetAddress: addr, Version: remoteVersion, }, sess: newClientStream(conn, remoteVersion), }) g.addNode(addr) g.nodes[addr].WasOutboundPeer = true if err := g.saveSync(); err != nil { g.log.Println("ERROR: Unable to save new outbound peer to gateway:", err) } g.log.Debugln("INFO: connected to new peer", addr) // call initRPCs for name, fn := range g.initRPCs { go func(name string, fn modules.RPCFunc) { if g.threads.Add() != nil { return } defer g.threads.Done() err := g.managedRPC(addr, name, fn) if err != nil { g.log.Debugf("INFO: RPC %q on peer %q failed: %v", name, addr, err) } }(name, fn) } return nil } // Connect establishes a persistent connection to a peer, and adds it to the // Gateway's peer list. func (g *Gateway) Connect(addr modules.NetAddress) error { if err := g.threads.Add(); err != nil { return err } defer g.threads.Done() return g.managedConnect(addr) } // Disconnect terminates a connection to a peer and removes it from the // Gateway's peer list. The peer's address remains in the node list. func (g *Gateway) Disconnect(addr modules.NetAddress) error { if err := g.threads.Add(); err != nil { return err } defer g.threads.Done() g.mu.RLock() p, exists := g.peers[addr] g.mu.RUnlock() if !exists { return errors.New("not connected to that node") } p.sess.Close() g.mu.Lock() // Peer is removed from the peer list as well as the node list, to prevent // the node from being re-connected while looking for a replacement peer. delete(g.peers, addr) delete(g.nodes, addr) g.mu.Unlock() g.log.Println("INFO: disconnected from peer", addr) return nil } // Peers returns the addresses currently connected to the Gateway. func (g *Gateway) Peers() []modules.Peer { g.mu.RLock() defer g.mu.RUnlock() var peers []modules.Peer for _, p := range g.peers { peers = append(peers, p.Peer) } return peers } Sia-1.3.0/modules/gateway/peers_test.go000066400000000000000000000746031313565667000200720ustar00rootroot00000000000000package gateway import ( "errors" "fmt" "net" "strconv" "strings" "testing" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/fastrand" ) // dummyConn implements the net.Conn interface, but does not carry any actual // data. type dummyConn struct { net.Conn } func (dc *dummyConn) Read(p []byte) (int, error) { return len(p), nil } func (dc *dummyConn) Write(p []byte) (int, error) { return len(p), nil } func (dc *dummyConn) Close() error { return nil } func (dc *dummyConn) SetWriteDeadline(time.Time) error { return nil } // TestAddPeer tries adding a peer to the gateway. func TestAddPeer(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() g := newTestingGateway(t) defer g.Close() g.mu.Lock() defer g.mu.Unlock() g.addPeer(&peer{ Peer: modules.Peer{ NetAddress: "foo.com:123", }, sess: newClientStream(new(dummyConn), build.Version), }) if len(g.peers) != 1 { t.Fatal("gateway did not add peer") } } // TestAcceptPeer tests that acceptPeer does't kick outbound or local peers. func TestAcceptPeer(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() g := newTestingGateway(t) defer g.Close() g.mu.Lock() defer g.mu.Unlock() // Add only unkickable peers. var unkickablePeers []*peer for i := 0; i < fullyConnectedThreshold+1; i++ { addr := modules.NetAddress(fmt.Sprintf("1.2.3.%d", i)) p := &peer{ Peer: modules.Peer{ NetAddress: addr, Inbound: false, Local: false, }, sess: newClientStream(new(dummyConn), build.Version), } unkickablePeers = append(unkickablePeers, p) } for i := 0; i < fullyConnectedThreshold+1; i++ { addr := modules.NetAddress(fmt.Sprintf("127.0.0.1:%d", i)) p := &peer{ Peer: modules.Peer{ NetAddress: addr, Inbound: true, Local: true, }, sess: newClientStream(new(dummyConn), build.Version), } unkickablePeers = append(unkickablePeers, p) } for _, p := range unkickablePeers { g.addPeer(p) } // Test that accepting another peer doesn't kick any of the peers. g.acceptPeer(&peer{ Peer: modules.Peer{ NetAddress: "9.9.9.9", Inbound: true, }, sess: newClientStream(new(dummyConn), build.Version), }) for _, p := range unkickablePeers { if _, exists := g.peers[p.NetAddress]; !exists { t.Error("accept peer kicked an outbound or local peer") } } // Add a kickable peer. g.addPeer(&peer{ Peer: modules.Peer{ NetAddress: "9.9.9.9", Inbound: true, }, sess: newClientStream(new(dummyConn), build.Version), }) // Test that accepting a local peer will kick a kickable peer. g.acceptPeer(&peer{ Peer: modules.Peer{ NetAddress: "127.0.0.1:99", Inbound: true, Local: true, }, sess: newClientStream(new(dummyConn), build.Version), }) if _, exists := g.peers["9.9.9.9"]; exists { t.Error("acceptPeer didn't kick a peer to make room for a local peer") } } // TestRandomInbountPeer checks that randomOutboundPeer returns the correct // peer. func TestRandomOutboundPeer(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() g := newTestingGateway(t) defer g.Close() g.mu.Lock() defer g.mu.Unlock() _, err := g.randomOutboundPeer() if err != errNoPeers { t.Fatal("expected errNoPeers, got", err) } g.addPeer(&peer{ Peer: modules.Peer{ NetAddress: "foo.com:123", Inbound: false, }, sess: newClientStream(new(dummyConn), build.Version), }) if len(g.peers) != 1 { t.Fatal("gateway did not add peer") } addr, err := g.randomOutboundPeer() if err != nil || addr != "foo.com:123" { t.Fatal("gateway did not select random peer") } } // TestListen is a general test probling the connection listener. func TestListen(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() g := newTestingGateway(t) defer g.Close() // compliant connect with old version conn, err := net.Dial("tcp", string(g.Address())) if err != nil { t.Fatal("dial failed:", err) } addr := modules.NetAddress(conn.LocalAddr().String()) ack, err := connectVersionHandshake(conn, "0.1") if err != errPeerRejectedConn { t.Fatal(err) } if ack != "" { t.Fatal("gateway should have rejected old version") } for i := 0; i < 10; i++ { g.mu.RLock() _, ok := g.peers[addr] g.mu.RUnlock() if ok { t.Fatal("gateway should not have added an old peer") } time.Sleep(20 * time.Millisecond) } // a simple 'conn.Close' would not obey the stream disconnect protocol newClientStream(conn, build.Version).Close() // compliant connect with invalid net address conn, err = net.Dial("tcp", string(g.Address())) if err != nil { t.Fatal("dial failed:", err) } addr = modules.NetAddress(conn.LocalAddr().String()) ack, err = connectVersionHandshake(conn, build.Version) if err != nil { t.Fatal(err) } if ack != build.Version { t.Fatal("gateway should have given ack") } header := sessionHeader{ GenesisID: types.GenesisID, UniqueID: gatewayID{}, NetAddress: "fake", } err = exchangeOurHeader(conn, header) if err == nil { t.Fatal("expected error, got nil") } conn.Close() // compliant connect conn, err = net.Dial("tcp", string(g.Address())) if err != nil { t.Fatal("dial failed:", err) } addr = modules.NetAddress(conn.LocalAddr().String()) ack, err = connectVersionHandshake(conn, build.Version) if err != nil { t.Fatal(err) } if ack != build.Version { t.Fatal("gateway should have given ack") } header.NetAddress = modules.NetAddress(conn.LocalAddr().String()) err = exchangeOurHeader(conn, header) if err != nil { t.Fatal(err) } _, err = exchangeRemoteHeader(conn, header) if err != nil { t.Fatal(err) } // g should add the peer err = build.Retry(50, 100*time.Millisecond, func() error { g.mu.RLock() _, ok := g.peers[addr] g.mu.RUnlock() if !ok { return errors.New("g should have added the peer") } return nil }) if err != nil { t.Fatal(err) } // Disconnect. Now that connection has been established, need to shutdown // via the stream multiplexer. newClientStream(conn, build.Version).Close() // g should remove the peer err = build.Retry(50, 100*time.Millisecond, func() error { g.mu.RLock() _, ok := g.peers[addr] g.mu.RUnlock() if ok { return errors.New("g should have removed the peer") } return nil }) if err != nil { t.Fatal(err) } // uncompliant connect conn, err = net.Dial("tcp", string(g.Address())) if err != nil { t.Fatal("dial failed:", err) } if _, err := conn.Write([]byte("missing length prefix")); err != nil { t.Fatal("couldn't write malformed header") } // g should have closed the connection if n, err := conn.Write([]byte("closed")); err != nil && n > 0 { t.Error("write succeeded after closed connection") } } // TestConnect verifies that connecting peers will add peer relationships to // the gateway, and that certain edge cases are properly handled. func TestConnect(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() // create bootstrap peer bootstrap := newNamedTestingGateway(t, "1") defer bootstrap.Close() // give it a node bootstrap.mu.Lock() bootstrap.addNode(dummyNode) bootstrap.mu.Unlock() // create peer who will connect to bootstrap g := newNamedTestingGateway(t, "2") defer g.Close() // first simulate a "bad" connect, where bootstrap won't share its nodes bootstrap.mu.Lock() bootstrap.handlers[handlerName("ShareNodes")] = func(modules.PeerConn) error { return nil } bootstrap.mu.Unlock() // connect err := g.Connect(bootstrap.Address()) if err != nil { t.Fatal(err) } // g should not have the node if g.removeNode(dummyNode) == nil { t.Fatal("bootstrapper should not have received dummyNode:", g.nodes) } // split 'em up g.Disconnect(bootstrap.Address()) bootstrap.Disconnect(g.Address()) // now restore the correct ShareNodes RPC and try again bootstrap.mu.Lock() bootstrap.handlers[handlerName("ShareNodes")] = bootstrap.shareNodes bootstrap.mu.Unlock() err = g.Connect(bootstrap.Address()) if err != nil { t.Fatal(err) } // g should have the node time.Sleep(200 * time.Millisecond) g.mu.RLock() if _, ok := g.nodes[dummyNode]; !ok { g.mu.RUnlock() // Needed to prevent a deadlock if this error condition is reached. t.Fatal("bootstrapper should have received dummyNode:", g.nodes) } g.mu.RUnlock() } // TestUnitAcceptableVersion tests that the acceptableVersion func returns an // error for unacceptable versions. func TestUnitAcceptableVersion(t *testing.T) { invalidVersions := []string{ // ascii gibberish "foobar", "foobar.0", "foobar.9", "0.foobar", "9.foobar", "foobar.0.0", "foobar.9.9", "0.foobar.0", "9.foobar.9", "0.0.foobar", "9.9.foobar", // utf-8 gibberish "世界", "世界.0", "世界.9", "0.世界", "9.世界", "世界.0.0", "世界.9.9", "0.世界.0", "9.世界.9", "0.0.世界", "9.9.世界", // missing numbers ".", "..", "...", "0.", ".1", "2..", ".3.", "..4", "5.6.", ".7.8", ".9.0.", } for _, v := range invalidVersions { err := acceptableVersion(v) if _, ok := err.(invalidVersionError); err == nil || !ok { t.Errorf("acceptableVersion returned %q for version %q, but expected invalidVersionError", err, v) } } insufficientVersions := []string{ // random small versions "0", "00", "0000000000", "0.0", "0000000000.0", "0.0000000000", "0.0.0.0.0.0.0.0", "0.0.9", "0.0.999", "0.0.99999999999", "0.1.2", "0.1.2.3.4.5.6.7.8.9", // pre-hardfork versions "0.3.3", "0.3.9.9.9.9.9.9.9.9.9.9", "0.3.9999999999", } for _, v := range insufficientVersions { err := acceptableVersion(v) if _, ok := err.(insufficientVersionError); err == nil || !ok { t.Errorf("acceptableVersion returned %q for version %q, but expected insufficientVersionError", err, v) } } validVersions := []string{ minAcceptableVersion, "0.4.0", "0.6.0", "0.6.1", "0.9", "0.999", "0.9999999999", "1", "1.0", "1.0.0", "9", "9.0", "9.0.0", "9.9.9", } for _, v := range validVersions { err := acceptableVersion(v) if err != nil { t.Errorf("acceptableVersion returned %q for version %q, but expected nil", err, v) } } } // TestConnectRejectsInvalidAddrs tests that Connect only connects to valid IP // addresses. func TestConnectRejectsInvalidAddrs(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() g := newNamedTestingGateway(t, "1") defer g.Close() g2 := newNamedTestingGateway(t, "2") defer g2.Close() _, g2Port, err := net.SplitHostPort(string(g2.Address())) if err != nil { t.Fatal(err) } tests := []struct { addr modules.NetAddress wantErr bool msg string }{ { addr: "127.0.0.1:123", wantErr: true, msg: "Connect should reject unreachable addresses", }, { addr: "111.111.111.111:0", wantErr: true, msg: "Connect should reject invalid NetAddresses", }, { addr: modules.NetAddress(net.JoinHostPort("localhost", g2Port)), wantErr: true, msg: "Connect should reject non-IP addresses", }, { addr: g2.Address(), msg: "Connect failed to connect to another gateway", }, { addr: g2.Address(), wantErr: true, msg: "Connect should reject an address it's already connected to", }, } for _, tt := range tests { err := g.Connect(tt.addr) if tt.wantErr != (err != nil) { t.Errorf("%v, wantErr: %v, err: %v", tt.msg, tt.wantErr, err) } } } // TestConnectRejectsVersions tests that Gateway.Connect only accepts peers // with sufficient and valid versions. func TestConnectRejectsVersions(t *testing.T) { if testing.Short() { t.SkipNow() } g := newTestingGateway(t) defer g.Close() // Setup a listener that mocks Gateway.acceptConn, but sends the // version sent over mockVersionChan instead of build.Version. listener, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatal(err) } defer listener.Close() tests := []struct { version string errWant string localErrWant string invalidVersion bool insufficientVersion bool msg string // version required for this test versionRequired string // 1.2.0 sessionHeader extension to handshake protocol genesisID types.BlockID uniqueID gatewayID }{ // Test that Connect fails when the remote peer's version is "reject". { version: "reject", errWant: errPeerRejectedConn.Error(), msg: "Connect should fail when the remote peer rejects the connection", }, // Test that Connect fails when the remote peer's version is ascii gibberish. { version: "foobar", invalidVersion: true, msg: "Connect should fail when the remote peer's version is ascii gibberish", }, // Test that Connect fails when the remote peer's version is utf8 gibberish. { version: "世界", invalidVersion: true, msg: "Connect should fail when the remote peer's version is utf8 gibberish", }, // Test that Connect fails when the remote peer's version is < 0.4.0 (0). { version: "0", insufficientVersion: true, msg: "Connect should fail when the remote peer's version is 0", }, { version: "0.0.0", insufficientVersion: true, msg: "Connect should fail when the remote peer's version is 0.0.0", }, { version: "0000.0000.0000", insufficientVersion: true, msg: "Connect should fail when the remote peer's version is 0000.0000.0000", }, { version: "0.3.9", insufficientVersion: true, msg: "Connect should fail when the remote peer's version is 0.3.9", }, { version: "0.3.9999", insufficientVersion: true, msg: "Connect should fail when the remote peer's version is 0.3.9999", }, { version: "0.3.9.9.9", insufficientVersion: true, msg: "Connect should fail when the remote peer's version is 0.3.9.9.9", }, // Test that Connect succeeds when the remote peer's version is 0.4.0. { version: "0.4.0", msg: "Connect should succeed when the remote peer's version is 0.4.0", }, // Test that Connect succeeds when the remote peer's version is > 0.4.0. { version: "0.9.0", msg: "Connect should succeed when the remote peer's version is 0.9.0", }, // Test that Connect /could/ succeed when the remote peer's version is >= 1.3.0. { version: sessionUpgradeVersion, msg: "Connect should succeed when the remote peer's version is 1.3.0 and sessionHeader checks out", uniqueID: func() (id gatewayID) { fastrand.Read(id[:]); return }(), genesisID: types.GenesisID, versionRequired: sessionUpgradeVersion, }, { version: sessionUpgradeVersion, msg: "Connect should not succeed when peer is connecting to itself", uniqueID: g.id, genesisID: types.GenesisID, errWant: errOurAddress.Error(), localErrWant: errOurAddress.Error(), versionRequired: sessionUpgradeVersion, }, } for testIndex, tt := range tests { if tt.versionRequired != "" && build.VersionCmp(build.Version, tt.versionRequired) < 0 { continue // skip, as we do not meet the required version } // create the listener doneChan := make(chan struct{}) go func() { defer close(doneChan) conn, err := listener.Accept() if err != nil { panic(fmt.Sprintf("test #%d failed: %s", testIndex, err)) } remoteVersion, err := acceptVersionHandshake(conn, tt.version) if err != nil { panic(fmt.Sprintf("test #%d failed: %s", testIndex, err)) } if remoteVersion != build.Version { panic(fmt.Sprintf("test #%d failed: remoteVersion != build.Version", testIndex)) } if build.VersionCmp(tt.version, sessionUpgradeVersion) >= 0 { ourHeader := sessionHeader{ GenesisID: tt.genesisID, UniqueID: tt.uniqueID, NetAddress: modules.NetAddress(conn.LocalAddr().String()), } _, err = exchangeRemoteHeader(conn, ourHeader) exchangeOurHeader(conn, ourHeader) } else if build.VersionCmp(tt.version, handshakeUpgradeVersion) >= 0 { var dialbackPort string err = encoding.ReadObject(conn, &dialbackPort, 13) } else { // no action taken for old peers } if (err == nil && tt.localErrWant != "") || (err != nil && !strings.Contains(err.Error(), tt.localErrWant)) { panic(fmt.Sprintf("test #%d failed: %v != %v", testIndex, tt.localErrWant, err)) } }() err = g.Connect(modules.NetAddress(listener.Addr().String())) switch { case tt.invalidVersion: // Check that the error is the expected type. if _, ok := err.(invalidVersionError); !ok { t.Fatalf("expected Connect to error with invalidVersionError: %s", tt.msg) } case tt.insufficientVersion: // Check that the error is the expected type. if _, ok := err.(insufficientVersionError); !ok { t.Fatalf("expected Connect to error with insufficientVersionError: %s", tt.msg) } default: // Check that the error is the expected error. if (err == nil && tt.errWant != "") || (err != nil && !strings.Contains(err.Error(), tt.errWant)) { t.Fatalf("expected Connect to error with '%v', but got '%v': %s", tt.errWant, err, tt.msg) } } <-doneChan g.Disconnect(modules.NetAddress(listener.Addr().String())) } } // TestAcceptConnRejectsVersions tests that Gateway.acceptConn only accepts // peers with sufficient and valid versions. func TestAcceptConnRejectsVersions(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() g := newTestingGateway(t) defer g.Close() tests := []struct { remoteVersion string versionResponseWant string errWant error msg string }{ // Test that acceptConn fails when the remote peer's version is "reject". { remoteVersion: "reject", versionResponseWant: "", errWant: errPeerRejectedConn, msg: "acceptConn shouldn't accept a remote peer whose version is \"reject\"", }, // Test that acceptConn fails when the remote peer's version is ascii gibberish. { remoteVersion: "foobar", versionResponseWant: "", errWant: errPeerRejectedConn, msg: "acceptConn shouldn't accept a remote peer whose version is ascii gibberish", }, // Test that acceptConn fails when the remote peer's version is utf8 gibberish. { remoteVersion: "世界", versionResponseWant: "", errWant: errPeerRejectedConn, msg: "acceptConn shouldn't accept a remote peer whose version is utf8 gibberish", }, // Test that acceptConn fails when the remote peer's version is < 0.4.0 (0). { remoteVersion: "0", versionResponseWant: "", errWant: errPeerRejectedConn, msg: "acceptConn shouldn't accept a remote peer whose version is 0", }, { remoteVersion: "0.0.0", versionResponseWant: "", errWant: errPeerRejectedConn, msg: "acceptConn shouldn't accept a remote peer whose version is 0.0.0", }, { remoteVersion: "0000.0000.0000", versionResponseWant: "", errWant: errPeerRejectedConn, msg: "acceptConn shouldn't accept a remote peer whose version is 0000.000.000", }, { remoteVersion: "0.3.9", versionResponseWant: "", errWant: errPeerRejectedConn, msg: "acceptConn shouldn't accept a remote peer whose version is 0.3.9", }, { remoteVersion: "0.3.9999", versionResponseWant: "", errWant: errPeerRejectedConn, msg: "acceptConn shouldn't accept a remote peer whose version is 0.3.9999", }, { remoteVersion: "0.3.9.9.9", versionResponseWant: "", errWant: errPeerRejectedConn, msg: "acceptConn shouldn't accept a remote peer whose version is 0.3.9.9.9", }, // Test that acceptConn succeeds when the remote peer's version is 0.4.0. { remoteVersion: "0.4.0", versionResponseWant: build.Version, msg: "acceptConn should accept a remote peer whose version is 0.4.0", }, // Test that acceptConn succeeds when the remote peer's version is > 0.4.0. { remoteVersion: "9", versionResponseWant: build.Version, msg: "acceptConn should accept a remote peer whose version is 9", }, { remoteVersion: "9.9.9", versionResponseWant: build.Version, msg: "acceptConn should accept a remote peer whose version is 9.9.9", }, { remoteVersion: "9999.9999.9999", versionResponseWant: build.Version, msg: "acceptConn should accept a remote peer whose version is 9999.9999.9999", }, } for _, tt := range tests { conn, err := net.DialTimeout("tcp", string(g.Address()), dialTimeout) if err != nil { t.Fatal(err) } remoteVersion, err := connectVersionHandshake(conn, tt.remoteVersion) if err != tt.errWant { t.Fatal(err) } if remoteVersion != tt.versionResponseWant { t.Fatal(tt.msg) } conn.Close() } } // TestDisconnect checks that calls to gateway.Disconnect correctly disconnect // and remove peers from the gateway. func TestDisconnect(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() g := newTestingGateway(t) defer g.Close() g2 := newNamedTestingGateway(t, "2") defer g2.Close() // Try disconnecting from a peer that doesn't exist. if err := g.Disconnect("bar.com:123"); err == nil { t.Fatal("disconnect removed unconnected peer") } // Connect two peers to eachother. err := g.Connect(g2.myAddr) if err != nil { t.Fatal(err) } g.mu.Lock() _, exists := g.nodes[g2.myAddr] if !exists { t.Error("peer never made it into node list") } g.mu.Unlock() // Disconnect the peer. if err := g.Disconnect(g2.myAddr); err != nil { t.Fatal("disconnect failed:", err) } g2.Disconnect(g.myAddr) // Prevents g2 from connecting back to g peers := g.Peers() for _, peer := range peers { if peer.NetAddress == g2.myAddr { t.Error("disconnect seems to have failed - still have this peer") } } g.mu.Lock() _, exists = g.nodes[g2.myAddr] if exists { t.Error("should be dropping peer from nodelist after disconnect") } g.mu.Unlock() } // TestPeerManager checks that the peer manager is properly spacing out peer // connection requests. func TestPeerManager(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() g1 := newNamedTestingGateway(t, "1") defer g1.Close() // create a valid node to connect to g2 := newNamedTestingGateway(t, "2") defer g2.Close() // g1's node list should only contain g2 g1.mu.Lock() g1.nodes = map[modules.NetAddress]*node{} g1.nodes[g2.Address()] = &node{NetAddress: g2.Address()} g1.mu.Unlock() // when peerManager wakes up, it should connect to g2. time.Sleep(time.Second + noNodesDelay) g1.mu.RLock() defer g1.mu.RUnlock() if len(g1.peers) != 1 || g1.peers[g2.Address()] == nil { t.Fatal("gateway did not connect to g2:", g1.peers) } } // TestOverloadedBootstrap creates a bunch of gateways and connects all of them // to the first gateway, the bootstrap gateway. More gateways will be created // than is allowed by the bootstrap for the total number of connections. After // waiting, all peers should eventually get to the full number of outbound // peers. func TestOverloadedBootstrap(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() // Create fullyConnectedThreshold*2 peers and connect them all to only the // first node. var gs []*Gateway for i := 0; i < fullyConnectedThreshold*2; i++ { gs = append(gs, newNamedTestingGateway(t, strconv.Itoa(i))) // Connect this gateway to the first gateway. if i == 0 { continue } err := gs[i].Connect(gs[0].myAddr) for j := 0; j < 100 && err != nil; j++ { time.Sleep(time.Millisecond * 250) err = gs[i].Connect(gs[0].myAddr) } if err != nil { panic(err) } } // Spin until all gateways have a complete number of outbound peers. success := false for i := 0; i < 100; i++ { success = true for _, g := range gs { outboundPeers := 0 g.mu.RLock() for _, p := range g.peers { if !p.Inbound { outboundPeers++ } } g.mu.RUnlock() if outboundPeers < wellConnectedThreshold { success = false break } } if !success { time.Sleep(time.Second) } } if !success { for i, g := range gs { outboundPeers := 0 g.mu.RLock() for _, p := range g.peers { if !p.Inbound { outboundPeers++ } } g.mu.RUnlock() t.Log("Gateway", i, ":", outboundPeers) } t.Fatal("after 100 seconds not all gateways able to become well connected") } // Randomly close many of the peers. For many peers, this should put them // below the well connected threshold, but there are still enough nodes on // the network that no partitions should occur. var newGS []*Gateway for _, i := range fastrand.Perm(len(gs)) { newGS = append(newGS, gs[i]) } cutSize := len(newGS) / 4 // Close the first many of the now-randomly-sorted gateways. for _, g := range newGS[:cutSize] { err := g.Close() if err != nil { t.Fatal(err) } } // Set 'gs' equal to the remaining gateways. gs = newGS[cutSize:] // Spin until all gateways have a complete number of outbound peers. The // test can fail if there are network partitions, however not a huge // magnitude of nodes are being removed, and they all started with 4 // connections. A partition is unlikely. success = false for i := 0; i < 100; i++ { success = true for _, g := range gs { outboundPeers := 0 g.mu.RLock() for _, p := range g.peers { if !p.Inbound { outboundPeers++ } } g.mu.RUnlock() if outboundPeers < wellConnectedThreshold { success = false break } } if !success { time.Sleep(time.Second) } } if !success { t.Fatal("after 100 seconds not all gateways able to become well connected") } // Close all remaining gateways. for _, g := range gs { err := g.Close() if err != nil { t.Error(err) } } } // TestPeerManagerPriority tests that the peer manager will prioritize // connecting to previous outbound peers before inbound peers. func TestPeerManagerPriority(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() g1 := newNamedTestingGateway(t, "1") defer g1.Close() g2 := newNamedTestingGateway(t, "2") defer g2.Close() g3 := newNamedTestingGateway(t, "3") defer g3.Close() // Connect g1 to g2. This will cause g2 to be saved as an outbound peer in // g1's node list. if err := g1.Connect(g2.Address()); err != nil { t.Fatal(err) } // Connect g3 to g1. This will cause g3 to be added to g1's node list, but // not as an outbound peer. if err := g3.Connect(g1.Address()); err != nil { t.Fatal(err) } // Spin until the connections succeeded. for i := 0; i < 50; i++ { g1.mu.RLock() _, exists2 := g1.nodes[g2.Address()] _, exists3 := g1.nodes[g3.Address()] g1.mu.RUnlock() if exists2 && exists3 { break } time.Sleep(time.Millisecond * 100) } g1.mu.RLock() peer2, exists2 := g1.nodes[g2.Address()] peer3, exists3 := g1.nodes[g3.Address()] g1.mu.RUnlock() if !exists2 { t.Fatal("peer 2 not in gateway") } if !exists3 { t.Fatal("peer 3 not found") } // Verify assumptions about node list. g1.mu.RLock() g2isOutbound := peer2.WasOutboundPeer g3isOutbound := peer3.WasOutboundPeer g1.mu.RUnlock() if !g2isOutbound { t.Fatal("g2 should be an outbound node") } if g3isOutbound { t.Fatal("g3 should not be an outbound node") } // Disconnect everyone. g2.Disconnect(g1.Address()) g3.Disconnect(g1.Address()) // Shutdown g1. err := g1.Close() if err != nil { t.Fatal(err) } // Restart g1. It should immediately reconnect to g2, and then g3 after a // delay. g1, err = New(string(g1.myAddr), false, g1.persistDir) if err != nil { t.Fatal(err) } defer g1.Close() // Wait until g1 connects to g2. for i := 0; i < 100; i++ { if peers := g1.Peers(); len(peers) == 0 { time.Sleep(10 * time.Millisecond) } else if len(peers) == 1 && peers[0].NetAddress == g2.Address() { break } else { t.Fatal("something wrong with the peer list:", peers) } } // Wait until g1 connects to g3. for i := 0; i < 100; i++ { if peers := g1.Peers(); len(peers) == 1 { time.Sleep(10 * time.Millisecond) } else if len(peers) == 2 { break } else { t.Fatal("something wrong with the peer list:", peers) } } } // TestPeerManagerOutboundSave sets up an island of nodes and checks that they // can all connect to eachother, and that the all add eachother as // 'WasOutboundPeer'. func TestPeerManagerOutboundSave(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() // Create enough gateways so that every gateway should automatically end up // with every other gateway as an outbound peer. var gs []*Gateway for i := 0; i < wellConnectedThreshold+1; i++ { gs = append(gs, newNamedTestingGateway(t, strconv.Itoa(i))) } // Connect g1 to each peer. This should be enough that every peer eventually // has the full set of outbound peers. for _, g := range gs[1:] { if err := gs[0].Connect(g.Address()); err != nil { t.Fatal(err) } } // Block until every peer has wellConnectedThreshold outbound peers. err := build.Retry(100, time.Millisecond*200, func() error { for _, g := range gs { var outboundNodes, outboundPeers int g.mu.RLock() for _, node := range g.nodes { if node.WasOutboundPeer { outboundNodes++ } } for _, peer := range g.peers { if !peer.Inbound { outboundPeers++ } } g.mu.RUnlock() if outboundNodes < wellConnectedThreshold { return errors.New("not enough outbound nodes: " + strconv.Itoa(outboundNodes)) } if outboundPeers < wellConnectedThreshold { return errors.New("not enough outbound peers: " + strconv.Itoa(outboundPeers)) } } return nil }) if err != nil { t.Fatal(err) } } // TestBuildPeerManagerNodeList tests the buildPeerManagerNodeList method. func TestBuildPeerManagerNodeList(t *testing.T) { g := &Gateway{ nodes: map[modules.NetAddress]*node{ "foo": {NetAddress: "foo", WasOutboundPeer: true}, "bar": {NetAddress: "bar", WasOutboundPeer: false}, "baz": {NetAddress: "baz", WasOutboundPeer: true}, "quux": {NetAddress: "quux", WasOutboundPeer: false}, }, } nodelist := g.buildPeerManagerNodeList() // all outbound nodes should be at the front of the list var i int for i < len(nodelist) && g.nodes[nodelist[i]].WasOutboundPeer { i++ } for i < len(nodelist) && !g.nodes[nodelist[i]].WasOutboundPeer { i++ } if i != len(nodelist) { t.Fatal("bad nodelist:", nodelist) } } Sia-1.3.0/modules/gateway/peersmanager.go000066400000000000000000000137251313565667000203640ustar00rootroot00000000000000package gateway import ( "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/fastrand" ) // managedPeerManagerConnect is a blocking function which tries to connect to // the input addreess as a peer. func (g *Gateway) managedPeerManagerConnect(addr modules.NetAddress) { g.log.Debugf("[PMC] [%v] Attempting connection", addr) err := g.managedConnect(addr) if err == errPeerExists { // This peer is already connected to us. Safety around the // oubound peers relates to the fact that we have picked out // the outbound peers instead of allow the attacker to pick out // the peers for us. Because we have made the selection, it is // okay to set the peer as an outbound peer. // // The nodelist size check ensures that an attacker can't flood // a new node with a bunch of inbound requests. Doing so would // result in a nodelist that's entirely full of attacker nodes. // There's not much we can do about that anyway, but at least // we can hold off making attacker nodes 'outbound' peers until // our nodelist has had time to fill up naturally. g.mu.Lock() p, exists := g.peers[addr] if exists { // Have to check it exists because we released the lock, a // race condition could mean that the peer was disconnected // before this code block was reached. p.Inbound = false if n, ok := g.nodes[p.NetAddress]; ok && !n.WasOutboundPeer { n.WasOutboundPeer = true g.nodes[n.NetAddress] = n } g.log.Debugf("[PMC] [SUCCESS] [%v] existing peer has been converted to outbound peer", addr) } g.mu.Unlock() } else if err != nil { g.log.Debugf("[PMC] [ERROR] [%v] WARN: removing peer because automatic connect failed: %v\n", addr, err) // Remove the node, but only if there are enough nodes in the node list. g.mu.Lock() if len(g.nodes) > pruneNodeListLen { g.removeNode(addr) } g.mu.Unlock() } else { g.log.Debugf("[PMC] [SUCCESS] [%v] peer successfully added", addr) } } // numOutboundPeers returns the number of outbound peers in the gateway. func (g *Gateway) numOutboundPeers() int { n := 0 for _, p := range g.peers { if !p.Inbound { n++ } } return n } // permanentPeerManager tries to keep the Gateway well-connected. As long as // the Gateway is not well-connected, it tries to connect to random nodes. func (g *Gateway) permanentPeerManager(closedChan chan struct{}) { // Send a signal upon shutdown. defer close(closedChan) defer g.log.Debugln("INFO: [PPM] Permanent peer manager is shutting down") // permanentPeerManager will attempt to connect to peers asynchronously, // such that multiple connection attempts can be open at once, but a // limited number. connectionLimiterChan := make(chan struct{}, maxConcurrentOutboundPeerRequests) g.log.Debugln("INFO: [PPM] Permanent peer manager has started") for { // Fetch the set of nodes to try. g.mu.RLock() nodes := g.buildPeerManagerNodeList() g.mu.RUnlock() if len(nodes) == 0 { g.log.Debugln("[PPM] Node list is empty, sleeping") if !g.managedSleep(noNodesDelay) { return } continue } for _, addr := range nodes { // Break as soon as we have enough outbound peers. g.mu.RLock() numOutboundPeers := g.numOutboundPeers() isOutboundPeer := g.peers[addr] != nil && !g.peers[addr].Inbound g.mu.RUnlock() if numOutboundPeers >= wellConnectedThreshold { g.log.Debugln("INFO: [PPM] Gateway has enough peers, sleeping.") if !g.managedSleep(wellConnectedDelay) { return } break } if isOutboundPeer { // Skip current outbound peers. if !g.managedSleep(acquiringPeersDelay) { return } continue } g.log.Debugln("[PPM] Fetched a random node:", addr) // We need at least some of our outbound peers to be remote peers. If // we already have reached a certain threshold of outbound peers and // this peer is a local peer, do not consider it for an outbound peer. // Sleep briefly to prevent the gateway from hogging the CPU if all // peers are local. if numOutboundPeers >= maxLocalOutboundPeers && addr.IsLocal() && build.Release != "testing" { g.log.Debugln("[PPM] Ignorning selected peer; this peer is local and we already have multiple outbound peers:", addr) if !g.managedSleep(unwantedLocalPeerDelay) { return } continue } // Try connecting to that peer in a goroutine. Do not block unless // there are currently 3 or more peer connection attempts open at once. // Before spawning the thread, make sure that there is enough room by // throwing a struct into the buffered channel. g.log.Debugln("[PPM] Trying to connect to a node:", addr) connectionLimiterChan <- struct{}{} go func(addr modules.NetAddress) { // After completion, take the struct out of the channel so that the // next thread may proceed. defer func() { <-connectionLimiterChan }() if err := g.threads.Add(); err != nil { return } defer g.threads.Done() // peerManagerConnect will handle all of its own logging. g.managedPeerManagerConnect(addr) }(addr) // Wait a bit before trying the next peer. The peer connections are // non-blocking, so they should be spaced out to avoid spinning up an // uncontrolled number of threads and therefore peer connections. if !g.managedSleep(acquiringPeersDelay) { return } } } } // buildPeerManagerNodeList returns the gateway's node list in the order that // permanentPeerManager should attempt to connect to them. func (g *Gateway) buildPeerManagerNodeList() []modules.NetAddress { // flatten the node map, inserting in random order nodes := make([]modules.NetAddress, len(g.nodes)) perm := fastrand.Perm(len(nodes)) for _, node := range g.nodes { nodes[perm[0]] = node.NetAddress perm = perm[1:] } // swap the outbound nodes to the front of the list numOutbound := 0 for i, node := range nodes { if g.nodes[node].WasOutboundPeer { nodes[numOutbound], nodes[i] = nodes[i], nodes[numOutbound] numOutbound++ } } return nodes } Sia-1.3.0/modules/gateway/persist.go000066400000000000000000000043311313565667000173750ustar00rootroot00000000000000package gateway import ( "path/filepath" "time" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/persist" ) const ( // nodesFile is the name of the file that contains all seen nodes. nodesFile = "nodes.json" // logFile is the name of the log file. logFile = modules.GatewayDir + ".log" ) // persistMetadata contains the header and version strings that identify the // gateway persist file. var persistMetadata = persist.Metadata{ Header: "Sia Node List", Version: "1.3.0", } // persistData returns the data in the Gateway that will be saved to disk. func (g *Gateway) persistData() (nodes []*node) { for _, node := range g.nodes { nodes = append(nodes, node) } return } // load loads the Gateway's persistent data from disk. func (g *Gateway) load() error { var nodes []*node err := persist.LoadJSON(persistMetadata, &nodes, filepath.Join(g.persistDir, nodesFile)) if err != nil { // COMPATv1.3.0 return g.loadv033persist() } for i := range nodes { g.nodes[nodes[i].NetAddress] = nodes[i] } return nil } // saveSync stores the Gateway's persistent data on disk, and then syncs to // disk to minimize the possibility of data loss. func (g *Gateway) saveSync() error { return persist.SaveJSON(persistMetadata, g.persistData(), filepath.Join(g.persistDir, nodesFile)) } // threadedSaveLoop periodically saves the gateway. func (g *Gateway) threadedSaveLoop() { for { select { case <-g.threads.StopChan(): return case <-time.After(saveFrequency): } func() { err := g.threads.Add() if err != nil { return } defer g.threads.Done() g.mu.Lock() err = g.saveSync() g.mu.Unlock() if err != nil { g.log.Println("ERROR: Unable to save gateway persist:", err) } }() } } // loadv033persist loads the v0.3.3 Gateway's persistent data from disk. func (g *Gateway) loadv033persist() error { var nodes []modules.NetAddress err := persist.LoadJSON(persist.Metadata{ Header: "Sia Node List", Version: "0.3.3", }, &nodes, filepath.Join(g.persistDir, nodesFile)) if err != nil { return err } for _, addr := range nodes { err := g.addNode(addr) if err != nil { g.log.Printf("WARN: error loading node '%v' from persist: %v", addr, err) } } return nil } Sia-1.3.0/modules/gateway/persist_test.go000066400000000000000000000024061313565667000204350ustar00rootroot00000000000000package gateway import ( "bytes" "path/filepath" "testing" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/persist" ) func TestLoad(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() g := newTestingGateway(t) g.mu.Lock() g.addNode(dummyNode) g.saveSync() g.mu.Unlock() g.Close() g2, err := New("localhost:0", false, g.persistDir) if err != nil { t.Fatal(err) } if _, ok := g2.nodes[dummyNode]; !ok { t.Fatal("gateway did not load old peer list:", g2.nodes) } } // TestLoadv033 tests that the gateway can load a v033 persist file. func TestLoadv033(t *testing.T) { var buf bytes.Buffer log := persist.NewLogger(&buf) buf.Reset() g := &Gateway{ nodes: make(map[modules.NetAddress]*node), persistDir: filepath.Join("testdata", t.Name()), log: log, } if err := g.load(); err != nil { t.Fatal(err) } // All nodes should have been loaded if len(g.nodes) != 10 { t.Error("expected 10 nodes, got", len(g.nodes)) } // All nodes should be marked as non-outbound for _, node := range g.nodes { if node.WasOutboundPeer { t.Error("v033 nodes should not be marked as outbound peers") } } // The log should be empty if buf.Len() != 0 { t.Error("expected empty log, got", buf.String()) } } Sia-1.3.0/modules/gateway/rpc.go000066400000000000000000000177521313565667000165030ustar00rootroot00000000000000package gateway import ( "errors" "sync" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" ) // rpcID is an 8-byte signature that is added to all RPCs to tell the gatway // what to do with the RPC. type rpcID [8]byte // String returns a string representation of an rpcID. Empty elements of rpcID // will be encoded as spaces. func (id rpcID) String() string { for i := range id { if id[i] == 0 { id[i] = ' ' } } return string(id[:]) } // handlerName truncates a string to 8 bytes. If len(name) < 8, the remaining // bytes are 0. A handlerName is specified at the beginning of each network // call, indicating which function should handle the connection. func handlerName(name string) (id rpcID) { copy(id[:], name) return } // managedRPC calls an RPC on the given address. managedRPC cannot be called on // an address that the Gateway is not connected to. func (g *Gateway) managedRPC(addr modules.NetAddress, name string, fn modules.RPCFunc) error { g.mu.RLock() peer, ok := g.peers[addr] g.mu.RUnlock() if !ok { return errors.New("can't call RPC on unconnected peer " + string(addr)) } conn, err := peer.open() if err != nil { // peer probably disconnected without sending a shutdown signal; // disconnect from them g.log.Debugf("Could not initiate RPC with %v; disconnecting", addr) peer.sess.Close() g.mu.Lock() delete(g.peers, addr) g.mu.Unlock() return err } defer conn.Close() // write header conn.SetDeadline(time.Now().Add(rpcStdDeadline)) if err := encoding.WriteObject(conn, handlerName(name)); err != nil { return err } conn.SetDeadline(time.Time{}) // call fn return fn(conn) } // RPC calls an RPC on the given address. RPC cannot be called on an address // that the Gateway is not connected to. func (g *Gateway) RPC(addr modules.NetAddress, name string, fn modules.RPCFunc) error { if err := g.threads.Add(); err != nil { return err } defer g.threads.Done() return g.managedRPC(addr, name, fn) } // RegisterRPC registers an RPCFunc as a handler for a given identifier. To // call an RPC, use gateway.RPC, supplying the same identifier given to // RegisterRPC. Identifiers should always use PascalCase. The first 8 // characters of an identifier should be unique, as the identifier used // internally is truncated to 8 bytes. func (g *Gateway) RegisterRPC(name string, fn modules.RPCFunc) { g.mu.Lock() defer g.mu.Unlock() if _, ok := g.handlers[handlerName(name)]; ok { build.Critical("RPC already registered: " + name) } g.handlers[handlerName(name)] = fn } // UnregisterRPC unregisters an RPC and removes the corresponding RPCFunc from // g.handlers. Future calls to the RPC by peers will fail. func (g *Gateway) UnregisterRPC(name string) { g.mu.Lock() defer g.mu.Unlock() if _, ok := g.handlers[handlerName(name)]; !ok { build.Critical("RPC not registered: " + name) } delete(g.handlers, handlerName(name)) } // RegisterConnectCall registers a name and RPCFunc to be called on a peer // upon connecting. func (g *Gateway) RegisterConnectCall(name string, fn modules.RPCFunc) { g.mu.Lock() defer g.mu.Unlock() if _, ok := g.initRPCs[name]; ok { build.Critical("ConnectCall already registered: " + name) } g.initRPCs[name] = fn } // UnregisterConnectCall unregisters an on-connect call and removes the // corresponding RPCFunc from g.initRPCs. Future connections to peers will not // trigger the RPC to be called on them. func (g *Gateway) UnregisterConnectCall(name string) { g.mu.Lock() defer g.mu.Unlock() if _, ok := g.initRPCs[name]; !ok { build.Critical("ConnectCall not registered: " + name) } delete(g.initRPCs, name) } // threadedListenPeer listens for new streams on a peer connection and serves them via // threadedHandleConn. func (g *Gateway) threadedListenPeer(p *peer) { // threadedListenPeer registers to the peerTG instead of the primary thread // group because peer connections can be lifetime in length, but can also // be short-lived. The fact that they can be lifetime means that they can't // call threads.Add as they will block calls to threads.Flush. The fact // that they can be short-lived means that threads.OnStop is not a good // tool for closing out the threads. Instead, they register to peerTG, // which is cleanly closed upon gateway shutdown but will not block any // calls to threads.Flush() if g.peerTG.Add() != nil { return } defer g.peerTG.Done() // Spin up a goroutine to listen for a shutdown signal from both the peer // and from the gateway. In the event of either, close the muxado session. connClosedChan := make(chan struct{}) peerCloseChan := make(chan struct{}) go func() { // Signal that the muxado session has been successfully closed, and // that this goroutine has terminated. defer close(connClosedChan) // Listen for a stop signal. select { case <-g.threads.StopChan(): case <-peerCloseChan: } // Close the session and remove p from the peer list. p.sess.Close() g.mu.Lock() delete(g.peers, p.NetAddress) g.mu.Unlock() }() for { conn, err := p.accept() if err != nil { g.log.Debugf("Peer connection with %v closed: %v\n", p.NetAddress, err) break } // Set the default deadline on the conn. err = conn.SetDeadline(time.Now().Add(rpcStdDeadline)) if err != nil { g.log.Printf("Peer connection (%v) deadline could not be set: %v\n", p.NetAddress, err) continue } // The handler is responsible for closing the connection, though a // default deadline has been set. go g.threadedHandleConn(conn) if !g.managedSleep(peerRPCDelay) { break } } // Signal that the goroutine can shutdown. close(peerCloseChan) // Wait for confirmation that the goroutine has shut down before returning // and releasing the threadgroup registration. <-connClosedChan } // threadedHandleConn reads header data from a connection, then routes it to the // appropriate handler for further processing. func (g *Gateway) threadedHandleConn(conn modules.PeerConn) { defer conn.Close() if g.threads.Add() != nil { return } defer g.threads.Done() var id rpcID err := conn.SetDeadline(time.Now().Add(rpcStdDeadline)) if err != nil { return } if err := encoding.ReadObject(conn, &id, 8); err != nil { return } // call registered handler for this ID g.mu.RLock() fn, ok := g.handlers[id] g.mu.RUnlock() if !ok { g.log.Debugf("WARN: incoming conn %v requested unknown RPC \"%v\"", conn.RPCAddr(), id) return } g.log.Debugf("INFO: incoming conn %v requested RPC \"%v\"", conn.RPCAddr(), id) // call fn err = fn(conn) // don't log benign errors if err == modules.ErrDuplicateTransactionSet || err == modules.ErrBlockKnown { err = nil } if err != nil { g.log.Debugf("WARN: incoming RPC \"%v\" from conn %v failed: %v", id, conn.RPCAddr(), err) } } // Broadcast calls an RPC on all of the specified peers. The calls are run in // parallel. Broadcasts are restricted to "one-way" RPCs, which simply write an // object and disconnect. This is why Broadcast takes an interface{} instead of // an RPCFunc. func (g *Gateway) Broadcast(name string, obj interface{}, peers []modules.Peer) { if g.threads.Add() != nil { return } defer g.threads.Done() g.log.Debugf("INFO: broadcasting RPC %q to %v peers", name, len(peers)) // only encode obj once, instead of using WriteObject enc := encoding.Marshal(obj) fn := func(conn modules.PeerConn) error { return encoding.WritePrefix(conn, enc) } var wg sync.WaitGroup for _, p := range peers { wg.Add(1) go func(addr modules.NetAddress) { defer wg.Done() err := g.managedRPC(addr, name, fn) if err != nil { g.log.Debugf("WARN: broadcasting RPC %q to peer %q failed (attempting again in 10 seconds): %v", name, addr, err) // try one more time before giving up select { case <-time.After(10 * time.Second): case <-g.threads.StopChan(): return } err := g.managedRPC(addr, name, fn) if err != nil { g.log.Debugf("WARN: broadcasting RPC %q to peer %q failed twice: %v", name, addr, err) } } }(p.NetAddress) } wg.Wait() } Sia-1.3.0/modules/gateway/rpc_test.go000066400000000000000000000367601313565667000175420ustar00rootroot00000000000000package gateway import ( "errors" "io" "sync" "sync/atomic" "testing" "time" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" ) func TestRPCID(t *testing.T) { cases := map[rpcID]string{ {}: " ", {'f', 'o', 'o'}: "foo ", {'f', 'o', 'o', 'b', 'a', 'r', 'b', 'a'}: "foobarba", } for id, s := range cases { if id.String() != s { t.Errorf("rpcID.String mismatch: expected %v, got %v", s, id.String()) } } } func TestHandlerName(t *testing.T) { cases := map[string]rpcID{ "": {}, "foo": {'f', 'o', 'o'}, "foobarbaz": {'f', 'o', 'o', 'b', 'a', 'r', 'b', 'a'}, } for s, id := range cases { if hid := handlerName(s); hid != id { t.Errorf("handlerName mismatch: expected %v, got %v", id, hid) } } } // TestRegisterRPC tests that registering the same RPC twice causes a panic. func TestRegisterRPC(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() g := newTestingGateway(t) defer g.Close() g.RegisterRPC("Foo", func(conn modules.PeerConn) error { return nil }) defer func() { if r := recover(); r == nil { t.Error("Registering the same RPC twice did not cause a panic") } }() g.RegisterRPC("Foo", func(conn modules.PeerConn) error { return nil }) } // TestUnregisterRPC tests that unregistering an RPC causes calls to it to // fail, and checks that unregistering a non-registered RPC causes a panic. func TestUnregisterRPC(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() g1 := newNamedTestingGateway(t, "1") defer g1.Close() g2 := newNamedTestingGateway(t, "2") defer g2.Close() err := g2.Connect(g1.Address()) if err != nil { t.Fatal(err) } dummyFunc := func(conn modules.PeerConn) error { var str string return encoding.ReadObject(conn, &str, 11) } // Register RPC and check that calling it succeeds. g1.RegisterRPC("Foo", func(conn modules.PeerConn) error { return encoding.WriteObject(conn, "foo") }) err = g2.RPC(g1.Address(), "Foo", dummyFunc) if err != nil { t.Errorf("calling registered RPC on g1 returned %q", err) } // Unregister RPC and check that calling it fails. g1.UnregisterRPC("Foo") err = g2.RPC(g1.Address(), "Foo", dummyFunc) if err != io.EOF { t.Errorf("calling unregistered RPC on g1 returned %q instead of io.EOF", err) } // Unregister again and check that it panics. defer func() { if r := recover(); r == nil { t.Error("Unregistering an unregistered RPC did not cause a panic") } }() g1.UnregisterRPC("Foo") } // TestRegisterConnectCall tests that registering the same on-connect call // twice causes a panic. func TestRegisterConnectCall(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() g := newTestingGateway(t) defer g.Close() // Register an on-connect call. g.RegisterConnectCall("Foo", func(conn modules.PeerConn) error { return nil }) defer func() { if r := recover(); r == nil { t.Error("Registering the same on-connect call twice did not cause a panic") } }() g.RegisterConnectCall("Foo", func(conn modules.PeerConn) error { return nil }) } // TestUnregisterConnectCallPanics tests that unregistering the same on-connect // call twice causes a panic. func TestUnregisterConnectCallPanics(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() g1 := newNamedTestingGateway(t, "1") defer g1.Close() g2 := newNamedTestingGateway(t, "2") defer g2.Close() rpcChan := make(chan struct{}) // Register on-connect call and test that RPC is called on connect. g1.RegisterConnectCall("Foo", func(conn modules.PeerConn) error { rpcChan <- struct{}{} return nil }) err := g1.Connect(g2.Address()) if err != nil { t.Fatal(err) } select { case <-rpcChan: case <-time.After(200 * time.Millisecond): t.Fatal("ConnectCall not called on Connect after it was registered") } // Disconnect, unregister on-connect call, and test that RPC is not called on connect. err = g1.Disconnect(g2.Address()) if err != nil { t.Fatal(err) } g1.UnregisterConnectCall("Foo") err = g1.Connect(g2.Address()) if err != nil { t.Fatal(err) } select { case <-rpcChan: t.Fatal("ConnectCall called on Connect after it was unregistered") case <-time.After(200 * time.Millisecond): } // Unregister again and check that it panics. defer func() { if r := recover(); r == nil { t.Error("Unregistering an unregistered on-connect call did not cause a panic") } }() g1.UnregisterConnectCall("Foo") } func TestRPC(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() g1 := newNamedTestingGateway(t, "1") defer g1.Close() if err := g1.RPC("foo.com:123", "", nil); err == nil { t.Fatal("RPC on unconnected peer succeeded") } g2 := newNamedTestingGateway(t, "2") defer g2.Close() err := g1.Connect(g2.Address()) if err != nil { t.Fatal("failed to connect:", err) } g2.RegisterRPC("Foo", func(conn modules.PeerConn) error { var i uint64 err := encoding.ReadObject(conn, &i, 8) if err != nil { return err } else if i == 0xdeadbeef { return encoding.WriteObject(conn, "foo") } else { return encoding.WriteObject(conn, "bar") } }) var foo string err = g1.RPC(g2.Address(), "Foo", func(conn modules.PeerConn) error { err := encoding.WriteObject(conn, 0xdeadbeef) if err != nil { return err } return encoding.ReadObject(conn, &foo, 11) }) if err != nil { t.Fatal(err) } if foo != "foo" { t.Fatal("Foo gave wrong response:", foo) } // wrong number should produce an error err = g1.RPC(g2.Address(), "Foo", func(conn modules.PeerConn) error { err := encoding.WriteObject(conn, 0xbadbeef) if err != nil { return err } return encoding.ReadObject(conn, &foo, 11) }) if err != nil { t.Fatal(err) } if foo != "bar" { t.Fatal("Foo gave wrong response:", foo) } // don't read or write anything err = g1.RPC(g2.Address(), "Foo", func(modules.PeerConn) error { return errNoPeers // any non-nil error will do }) if err == nil { t.Fatal("bad RPC did not produce an error") } g1.peers[g2.Address()].sess.Close() if err := g1.RPC(g2.Address(), "Foo", nil); err == nil { t.Fatal("RPC on closed peer connection succeeded") } } func TestThreadedHandleConn(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() g1 := newNamedTestingGateway(t, "1") defer g1.Close() g2 := newNamedTestingGateway(t, "2") defer g2.Close() err := g1.Connect(g2.Address()) if err != nil { t.Fatal("failed to connect:", err) } g2.RegisterRPC("Foo", func(conn modules.PeerConn) error { var i uint64 err := encoding.ReadObject(conn, &i, 8) if err != nil { return err } else if i == 0xdeadbeef { return encoding.WriteObject(conn, "foo") } else { return encoding.WriteObject(conn, "bar") } }) // custom rpc fn (doesn't automatically write rpcID) rpcFn := func(fn func(modules.PeerConn) error) error { conn, err := g1.peers[g2.Address()].open() if err != nil { return err } defer conn.Close() return fn(conn) } // bad rpcID err = rpcFn(func(conn modules.PeerConn) error { return encoding.WriteObject(conn, [3]byte{1, 2, 3}) }) if err != nil { t.Fatal("rpcFn failed:", err) } // unknown rpcID err = rpcFn(func(conn modules.PeerConn) error { return encoding.WriteObject(conn, handlerName("bar")) }) if err != nil { t.Fatal("rpcFn failed:", err) } // valid rpcID err = rpcFn(func(conn modules.PeerConn) error { return encoding.WriteObject(conn, handlerName("Foo")) }) if err != nil { t.Fatal("rpcFn failed:", err) } } // TestBroadcast tests that calling broadcast with a slice of peers only // broadcasts to those peers. func TestBroadcast(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() g1 := newNamedTestingGateway(t, "1") defer g1.Close() g2 := newNamedTestingGateway(t, "2") defer g2.Close() g3 := newNamedTestingGateway(t, "3") defer g3.Close() err := g1.Connect(g2.Address()) if err != nil { t.Fatal("failed to connect:", err) } err = g1.Connect(g3.Address()) if err != nil { t.Fatal("failed to connect:", err) } var g2Payload, g3Payload string g2DoneChan := make(chan struct{}) g3DoneChan := make(chan struct{}) bothDoneChan := make(chan struct{}) g2.RegisterRPC("Recv", func(conn modules.PeerConn) error { encoding.ReadObject(conn, &g2Payload, 100) g2DoneChan <- struct{}{} return nil }) g3.RegisterRPC("Recv", func(conn modules.PeerConn) error { encoding.ReadObject(conn, &g3Payload, 100) g3DoneChan <- struct{}{} return nil }) // Test that broadcasting to all peers in g1.Peers() broadcasts to all peers. peers := g1.Peers() g1.Broadcast("Recv", "bar", peers) go func() { <-g2DoneChan <-g3DoneChan bothDoneChan <- struct{}{} }() select { case <-bothDoneChan: // Both g2 and g3 should receive the broadcast. case <-time.After(5 * time.Second): t.Fatal("broadcasting to gateway.Peers() should broadcast to all peers") } if g2Payload != "bar" || g3Payload != "bar" { t.Fatal("broadcast failed:", g2Payload, g3Payload) } // Test that broadcasting to only g2 does not broadcast to g3. peers = make([]modules.Peer, 0) for _, p := range g1.Peers() { if p.NetAddress == g2.Address() { peers = append(peers, p) break } } g1.Broadcast("Recv", "baz", peers) select { case <-g2DoneChan: // Only g2 should receive a broadcast. case <-g3DoneChan: t.Error("broadcast broadcasted to peers not in the peers arg") case <-time.After(200 * time.Millisecond): t.Fatal("called broadcast with g2 in peers list, but g2 didn't receive it.") } if g2Payload != "baz" { t.Fatal("broadcast failed:", g2Payload) } // Test that broadcasting to only g3 does not broadcast to g2. peers = make([]modules.Peer, 0) for _, p := range g1.Peers() { if p.NetAddress == g3.Address() { peers = append(peers, p) break } } g1.Broadcast("Recv", "qux", peers) select { case <-g2DoneChan: t.Error("broadcast broadcasted to peers not in the peers arg") case <-g3DoneChan: // Only g3 should receive a broadcast. case <-time.After(200 * time.Millisecond): t.Fatal("called broadcast with g3 in peers list, but g3 didn't receive it.") } if g3Payload != "qux" { t.Fatal("broadcast failed:", g3Payload) } // Test that broadcasting to an empty slice (but not nil!) does not broadcast // to g2 or g3. peers = make([]modules.Peer, 0) g1.Broadcast("Recv", "quux", peers) select { case <-g2DoneChan: t.Error("broadcast broadcasted to peers not in the peers arg") case <-g3DoneChan: t.Error("broadcast broadcasted to peers not in the peers arg") case <-time.After(200 * time.Millisecond): // Neither peer should receive a broadcast. } // Test that calling broadcast with nil peers does not broadcast to g2 or g3. g1.Broadcast("Recv", "foo", nil) select { case <-g2DoneChan: t.Error("broadcast broadcasted to peers not in the peers arg") case <-g3DoneChan: t.Error("broadcast broadcasted to peers not in the peers arg") case <-time.After(200 * time.Millisecond): // Neither peer should receive a broadcast. } } // TestOutboundAndInboundRPCs tests that both inbound and outbound connections // can successfully make RPC calls. func TestOutboundAndInboundRPCs(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() g1 := newNamedTestingGateway(t, "1") defer g1.Close() g2 := newNamedTestingGateway(t, "2") defer g2.Close() rpcChanG1 := make(chan struct{}) rpcChanG2 := make(chan struct{}) g1.RegisterRPC("recv", func(conn modules.PeerConn) error { rpcChanG1 <- struct{}{} return nil }) g2.RegisterRPC("recv", func(conn modules.PeerConn) error { rpcChanG2 <- struct{}{} return nil }) err := g1.Connect(g2.Address()) if err != nil { t.Fatal(err) } time.Sleep(10 * time.Millisecond) err = g1.RPC(g2.Address(), "recv", func(conn modules.PeerConn) error { return nil }) if err != nil { t.Fatal(err) } <-rpcChanG2 // Call the "recv" RPC on g1. We don't know g1's address as g2 sees it, so we // get it from the first address in g2's peer list. var addr modules.NetAddress for pAddr := range g2.peers { addr = pAddr break } err = g2.RPC(addr, "recv", func(conn modules.PeerConn) error { return nil }) if err != nil { t.Fatal(err) } <-rpcChanG1 } // TestCallingRPCFromRPC tests that calling an RPC from an RPC works. func TestCallingRPCFromRPC(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() g1 := newNamedTestingGateway(t, "1") defer g1.Close() g2 := newNamedTestingGateway(t, "2") defer g2.Close() errChan := make(chan error) g1.RegisterRPC("FOO", func(conn modules.PeerConn) error { err := g1.RPC(conn.RPCAddr(), "BAR", func(conn modules.PeerConn) error { return nil }) errChan <- err return err }) barChan := make(chan struct{}) g2.RegisterRPC("BAR", func(conn modules.PeerConn) error { barChan <- struct{}{} return nil }) err := g1.Connect(g2.Address()) if err != nil { t.Fatal(err) } // Wait for g2 to accept the connection for { if len(g2.Peers()) > 0 { break } } err = g2.RPC(g1.Address(), "FOO", func(conn modules.PeerConn) error { return nil }) select { case err = <-errChan: if err != nil { t.Fatal(err) } case <-time.After(500 * time.Millisecond): t.Fatal("expected FOO RPC to be called") } select { case <-barChan: case <-time.After(500 * time.Millisecond): t.Fatal("expected BAR RPC to be called") } } // TestRPCRatelimit checks that a peer calling an RPC repeatedly does not result // in a crash. func TestRPCRatelimit(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() g1 := newNamedTestingGateway(t, "1") defer g1.Close() g2 := newNamedTestingGateway(t, "2") defer g2.Close() var atomicCalls, atomicErrs uint64 g2.RegisterRPC("recv", func(conn modules.PeerConn) error { _, err := conn.Write([]byte("hi")) if err != nil { atomic.AddUint64(&atomicErrs, 1) return err } atomic.AddUint64(&atomicCalls, 1) return nil }) err := g1.Connect(g2.Address()) if err != nil { t.Fatal(err) } // Block until the connection is confirmed. for i := 0; i < 50; i++ { time.Sleep(10 * time.Millisecond) g1.mu.Lock() g1Peers := len(g1.peers) g1.mu.Unlock() g2.mu.Lock() g2Peers := len(g2.peers) g2.mu.Unlock() if g1Peers > 0 || g2Peers > 0 { break } } g1.mu.Lock() g1Peers := len(g1.peers) g1.mu.Unlock() g2.mu.Lock() g2Peers := len(g2.peers) g2.mu.Unlock() if g1Peers == 0 || g2Peers == 0 { t.Fatal("Peers did not connect to eachother") } // Call "recv" in a tight loop. Check that the number of successful calls // does not exceed the ratelimit. start := time.Now() var wg sync.WaitGroup targetDuration := rpcStdDeadline * 4 / 3 maxCallsForDuration := targetDuration / peerRPCDelay callVolume := int(maxCallsForDuration * 3 / 5) for i := 0; i < callVolume; i++ { wg.Add(1) go func() { defer wg.Done() // Call an RPC on our peer. Error is ignored, as many are expected // and indicate that the test is working. _ = g1.RPC(g2.Address(), "recv", func(conn modules.PeerConn) error { buf := make([]byte, 2) _, err := conn.Read(buf) if err != nil { return err } if string(buf) != "hi" { return errors.New("caller rpc failed") } return nil }) }() // Sleep for a little bit so that the connections are coming all in a // row instead of all at once. But sleep for little enough time that the // number of connectings is still far surpassing the allowed ratelimit. time.Sleep(peerRPCDelay / 10) } wg.Wait() stop := time.Now() elapsed := stop.Sub(start) expected := peerRPCDelay * (time.Duration(atomic.LoadUint64(&atomicCalls)) + 1) if elapsed*10/9 < expected { t.Error("ratelimit does not seem to be effective", expected, elapsed) } } Sia-1.3.0/modules/gateway/stream.go000066400000000000000000000046701313565667000172050ustar00rootroot00000000000000package gateway import ( "net" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/muxado" "github.com/xtaci/smux" ) // A streamSession is a multiplexed transport that can accept or initiate // streams. type streamSession interface { Accept() (net.Conn, error) Open() (net.Conn, error) Close() error } // returns a new client stream, with a protocol that works on top of the TCP connection. // using smux for version >= 1.3.0, and using muxado otherwise. func newClientStream(conn net.Conn, version string) streamSession { if build.VersionCmp(version, sessionUpgradeVersion) >= 0 { return newSmuxClient(conn) } return newMuxadoClient(conn) } // returns a new server stream, with a protocol that works on top of the TCP connection. // using smux for version >= 1.3.0, and using muxado otherwise. func newServerStream(conn net.Conn, version string) streamSession { if build.VersionCmp(version, sessionUpgradeVersion) >= 0 { return newSmuxServer(conn) } return newMuxadoServer(conn) } // muxado's Session methods do not return a net.Conn, but rather a // muxado.Stream, necessitating an adaptor. type muxadoSession struct { sess muxado.Session } func (m muxadoSession) Accept() (net.Conn, error) { return m.sess.Accept() } func (m muxadoSession) Open() (net.Conn, error) { return m.sess.Open() } func (m muxadoSession) Close() error { return m.sess.Close() } func newMuxadoServer(conn net.Conn) streamSession { return muxadoSession{muxado.Server(conn)} } func newMuxadoClient(conn net.Conn) streamSession { return muxadoSession{muxado.Client(conn)} } // smux's Session methods do not return a net.Conn, but rather a // smux.Stream, necessitating an adaptor. type smuxSession struct { sess *smux.Session } func (s smuxSession) Accept() (net.Conn, error) { return s.sess.AcceptStream() } func (s smuxSession) Open() (net.Conn, error) { return s.sess.OpenStream() } func (s smuxSession) Close() error { return s.sess.Close() } func newSmuxServer(conn net.Conn) streamSession { sess, err := smux.Server(conn, nil) // default config means no error is possible if err != nil { build.Critical("smux should not fail with default config:", err) } return smuxSession{sess} } func newSmuxClient(conn net.Conn) streamSession { sess, err := smux.Client(conn, nil) // default config means no error is possible if err != nil { build.Critical("smux should not fail with default config:", err) } return smuxSession{sess} } Sia-1.3.0/modules/gateway/testdata/000077500000000000000000000000001313565667000171655ustar00rootroot00000000000000Sia-1.3.0/modules/gateway/testdata/TestLoadv033/000077500000000000000000000000001313565667000213205ustar00rootroot00000000000000Sia-1.3.0/modules/gateway/testdata/TestLoadv033/nodes.json000066400000000000000000000004531313565667000233250ustar00rootroot00000000000000"Sia Node List" "0.3.3" [ "[2009:0:9d38:90d7:10a0:31ab:b4ac:5b9a]:9981", "11.22.33.21:9981", "57.39.64.7:9981", "75.14.173.18:9981", "[2012:57f3:7bd::57f3:7bd]:9981", "[2700:3c03::f03c:91ff:fe3b:5e3d]:9981", "151.69.120.71:9981", "66.204.102.193:9981", "8.8.8.8:9981", "192.168.0.1:9981" ]Sia-1.3.0/modules/gateway/upnp.go000066400000000000000000000061121313565667000166650ustar00rootroot00000000000000package gateway import ( "errors" "io" "io/ioutil" "net" "net/http" "strconv" "strings" "time" "github.com/NebulousLabs/go-upnp" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" ) // myExternalIP discovers the gateway's external IP by querying a centralized // service, http://myexternalip.com. func myExternalIP() (string, error) { // timeout after 10 seconds client := http.Client{Timeout: time.Duration(10 * time.Second)} resp, err := client.Get("http://myexternalip.com/raw") if err != nil { return "", err } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { errResp, _ := ioutil.ReadAll(resp.Body) return "", errors.New(string(errResp)) } buf, err := ioutil.ReadAll(io.LimitReader(resp.Body, 64)) if err != nil { return "", err } if len(buf) == 0 { return "", errors.New("myexternalip.com returned a 0 length IP address") } // trim newline return strings.TrimSpace(string(buf)), nil } // threadedLearnHostname discovers the external IP of the Gateway. Once the IP // has been discovered, it registers the ShareNodes RPC to be called on new // connections, advertising the IP to other nodes. func (g *Gateway) threadedLearnHostname() { if err := g.threads.Add(); err != nil { return } defer g.threads.Done() if build.Release == "testing" { return } // try UPnP first, then fallback to myexternalip.com var host string d, err := upnp.Discover() if err == nil { host, err = d.ExternalIP() } if err != nil { host, err = myExternalIP() } if err != nil { g.log.Println("WARN: failed to discover external IP:", err) return } g.mu.RLock() addr := modules.NetAddress(net.JoinHostPort(host, g.port)) g.mu.RUnlock() if err := addr.IsValid(); err != nil { g.log.Printf("WARN: discovered hostname %q is invalid: %v", addr, err) return } g.mu.Lock() g.myAddr = addr g.mu.Unlock() g.log.Println("INFO: our address is", addr) } // threadedForwardPort adds a port mapping to the router. func (g *Gateway) threadedForwardPort(port string) { if err := g.threads.Add(); err != nil { return } defer g.threads.Done() if build.Release == "testing" { return } d, err := upnp.Discover() if err != nil { g.log.Printf("WARN: could not automatically forward port %s: no UPnP-enabled devices found: %v", port, err) return } portInt, _ := strconv.Atoi(port) err = d.Forward(uint16(portInt), "Sia RPC") if err != nil { g.log.Printf("WARN: could not automatically forward port %s: %v", port, err) return } g.log.Println("INFO: successfully forwarded port", port) // Establish port-clearing at shutdown. g.threads.AfterStop(func() { g.managedClearPort(port) }) } // managedClearPort removes a port mapping from the router. func (g *Gateway) managedClearPort(port string) { if build.Release == "testing" { return } d, err := upnp.Discover() if err != nil { return } portInt, _ := strconv.Atoi(port) err = d.Clear(uint16(portInt)) if err != nil { g.log.Printf("WARN: could not automatically unforward port %s: %v", port, err) return } g.log.Println("INFO: successfully unforwarded port", port) } Sia-1.3.0/modules/host.go000066400000000000000000000170641313565667000152270ustar00rootroot00000000000000package modules import ( "github.com/NebulousLabs/Sia/types" ) const ( // HostDir names the directory that contains the host persistence. HostDir = "host" ) var ( // BytesPerTerabyte is the conversion rate between bytes and terabytes. BytesPerTerabyte = types.NewCurrency64(1e12) // BlockBytesPerMonthTerabyte is the conversion rate between block-bytes and month-TB. BlockBytesPerMonthTerabyte = BytesPerTerabyte.Mul64(4320) // HostWorkingStatusChecking is returned from WorkingStatus() if the host is // still determining if it is working, that is, if settings calls are // incrementing. HostWorkingStatusChecking = HostWorkingStatus("checking") // HostWorkingStatusNotWorking is returned from WorkingStatus() if the host // has not received any settings calls over the duration of // workingStatusFrequency. HostWorkingStatusNotWorking = HostWorkingStatus("not working") // HostWorkingStatusWorking is returned from WorkingStatus() if the host has // received more than workingThreshold settings calls over the duration of // workingStatusFrequency. HostWorkingStatusWorking = HostWorkingStatus("working") // HostConnectabilityStatusChecking is returned from ConnectabilityStatus() // if the host is still determining if it is connectable. HostConnectabilityStatusChecking = HostConnectabilityStatus("checking") // HostConnectabilityStatusConnectable is returned from // ConnectabilityStatus() if the host is connectable at its configured // netaddress. HostConnectabilityStatusConnectable = HostConnectabilityStatus("connectable") // HostConnectabilityStatusNotConnectable is returned from // ConnectabilityStatus() if the host is not connectable at its configured // netaddress. HostConnectabilityStatusNotConnectable = HostConnectabilityStatus("not connectable") ) type ( // HostFinancialMetrics provides financial statistics for the host, // including money that is locked in contracts. Though verbose, these // statistics should provide a clear picture of where the host's money is // currently being used. The front end can consolidate stats where desired. // Potential revenue refers to revenue that is available in a file // contract for which the file contract window has not yet closed. HostFinancialMetrics struct { // Every time a renter forms a contract with a host, a contract fee is // paid by the renter. These stats track the total contract fees. ContractCount uint64 `json:"contractcount"` ContractCompensation types.Currency `json:"contractcompensation"` PotentialContractCompensation types.Currency `json:"potentialcontractcompensation"` // Metrics related to storage proofs, collateral, and submitting // transactions to the blockchain. LockedStorageCollateral types.Currency `json:"lockedstoragecollateral"` LostRevenue types.Currency `json:"lostrevenue"` LostStorageCollateral types.Currency `json:"loststoragecollateral"` PotentialStorageRevenue types.Currency `json:"potentialstoragerevenue"` RiskedStorageCollateral types.Currency `json:"riskedstoragecollateral"` StorageRevenue types.Currency `json:"storagerevenue"` TransactionFeeExpenses types.Currency `json:"transactionfeeexpenses"` // Bandwidth financial metrics. DownloadBandwidthRevenue types.Currency `json:"downloadbandwidthrevenue"` PotentialDownloadBandwidthRevenue types.Currency `json:"potentialdownloadbandwidthrevenue"` PotentialUploadBandwidthRevenue types.Currency `json:"potentialuploadbandwidthrevenue"` UploadBandwidthRevenue types.Currency `json:"uploadbandwidthrevenue"` } // HostInternalSettings contains a list of settings that can be changed. HostInternalSettings struct { AcceptingContracts bool `json:"acceptingcontracts"` MaxDownloadBatchSize uint64 `json:"maxdownloadbatchsize"` MaxDuration types.BlockHeight `json:"maxduration"` MaxReviseBatchSize uint64 `json:"maxrevisebatchsize"` NetAddress NetAddress `json:"netaddress"` WindowSize types.BlockHeight `json:"windowsize"` Collateral types.Currency `json:"collateral"` CollateralBudget types.Currency `json:"collateralbudget"` MaxCollateral types.Currency `json:"maxcollateral"` MinContractPrice types.Currency `json:"mincontractprice"` MinDownloadBandwidthPrice types.Currency `json:"mindownloadbandwidthprice"` MinStoragePrice types.Currency `json:"minstorageprice"` MinUploadBandwidthPrice types.Currency `json:"minuploadbandwidthprice"` } // HostNetworkMetrics reports the quantity of each type of RPC call that // has been made to the host. HostNetworkMetrics struct { DownloadCalls uint64 `json:"downloadcalls"` ErrorCalls uint64 `json:"errorcalls"` FormContractCalls uint64 `json:"formcontractcalls"` RenewCalls uint64 `json:"renewcalls"` ReviseCalls uint64 `json:"revisecalls"` SettingsCalls uint64 `json:"settingscalls"` UnrecognizedCalls uint64 `json:"unrecognizedcalls"` } // StorageObligation contains information about a storage obligation that // the host has accepted. StorageObligation struct { NegotiationHeight types.BlockHeight `json:"negotiationheight"` OriginConfirmed bool `json:"originconfirmed"` RevisionConstructed bool `json:"revisionconstructed"` RevisionConfirmed bool `json:"revisionconfirmed"` ProofConstructed bool `json:"proofconstructed"` ProofConfirmed bool `json:"proofconfirmed"` ObligationStatus uint64 `json:"obligationstatus"` } // HostWorkingStatus reports the working state of a host. Can be one of // "checking", "working", or "not working. HostWorkingStatus string // HostConnectabilityStatus reports the connectability state of a host. Can be // one of "checking", "connectable", or "not connectable" HostConnectabilityStatus string // A Host can take storage from disk and offer it to the network, managing // things such as announcements, settings, and implementing all of the RPCs // of the host protocol. Host interface { // Announce submits a host announcement to the blockchain. Announce() error // AnnounceAddress submits an announcement using the given address. AnnounceAddress(NetAddress) error // ExternalSettings returns the settings of the host as seen by an // untrusted node querying the host for settings. ExternalSettings() HostExternalSettings // FinancialMetrics returns the financial statistics of the host. FinancialMetrics() HostFinancialMetrics // InternalSettings returns the host's internal settings, including // potentially private or sensitive information. InternalSettings() HostInternalSettings // NetworkMetrics returns information on the types of RPC calls that // have been made to the host. NetworkMetrics() HostNetworkMetrics // PublicKey returns the public key of the host. PublicKey() types.SiaPublicKey // SetInternalSettings sets the hosting parameters of the host. SetInternalSettings(HostInternalSettings) error // StorageObligations returns the set of storage obligations held by // the host. StorageObligations() []StorageObligation // ConnectabilityStatus returns the connectability status of the host, that // is, if it can connect to itself on the configured NetAddress. ConnectabilityStatus() HostConnectabilityStatus // WorkingStatus returns the working state of the host, determined by if // settings calls are increasing. WorkingStatus() HostWorkingStatus // The storage manager provides an interface for adding and removing // storage folders and data sectors to the host. StorageManager } ) Sia-1.3.0/modules/host/000077500000000000000000000000001313565667000146705ustar00rootroot00000000000000Sia-1.3.0/modules/host/announce.go000066400000000000000000000077711313565667000170410ustar00rootroot00000000000000package host import ( "errors" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" ) var ( // errAnnWalletLocked is returned during a host announcement if the wallet // is locked. errAnnWalletLocked = errors.New("cannot announce the host while the wallet is locked") // errUnknownAddress is returned if the host is unable to determine a // public address for itself to use in the announcement. errUnknownAddress = errors.New("host cannot announce, does not seem to have a valid address.") ) // managedAnnounce creates an announcement transaction and submits it to the network. func (h *Host) managedAnnounce(addr modules.NetAddress) error { // The wallet needs to be unlocked to add fees to the transaction, and the // host needs to have an active unlock hash that renters can make payment // to. if !h.wallet.Unlocked() { return errAnnWalletLocked } h.mu.Lock() pubKey := h.publicKey secKey := h.secretKey err := h.checkUnlockHash() h.mu.Unlock() if err != nil { return err } // Create the announcement that's going to be added to the arbitrary data // field of the transaction. signedAnnouncement, err := modules.CreateAnnouncement(addr, pubKey, secKey) if err != nil { return err } // Create a transaction, with a fee, that contains the full announcement. txnBuilder := h.wallet.StartTransaction() _, fee := h.tpool.FeeEstimation() fee = fee.Mul64(600) // Estimated txn size (in bytes) of a host announcement. err = txnBuilder.FundSiacoins(fee) if err != nil { txnBuilder.Drop() return err } _ = txnBuilder.AddMinerFee(fee) _ = txnBuilder.AddArbitraryData(signedAnnouncement) txnSet, err := txnBuilder.Sign(true) if err != nil { txnBuilder.Drop() return err } // Add the transactions to the transaction pool. err = h.tpool.AcceptTransactionSet(txnSet) if err != nil { txnBuilder.Drop() return err } h.mu.Lock() h.announced = true h.mu.Unlock() h.log.Printf("INFO: Successfully announced as %v", addr) return nil } // Announce creates a host announcement transaction. func (h *Host) Announce() error { err := h.tg.Add() if err != nil { return err } defer h.tg.Done() // Grab the internal net address and internal auto address, and compare // them. h.mu.RLock() userSet := h.settings.NetAddress autoSet := h.autoAddress h.mu.RUnlock() // Check that we have at least one address to work with. if userSet == "" && autoSet == "" { return build.ExtendErr("cannot announce because address could not be determined", err) } // Prefer using the userSet address, otherwise use the automatic address. var annAddr modules.NetAddress if userSet != "" { annAddr = userSet } else { annAddr = autoSet } // Check that the address is sane, and that the address is also not local. err = annAddr.IsStdValid() if err != nil { return build.ExtendErr("announcement requested with bad net address", err) } if annAddr.IsLocal() && build.Release != "testing" { return errors.New("announcement requested with local net address") } // Address has cleared inspection, perform the announcement. return h.managedAnnounce(annAddr) } // AnnounceAddress submits a host announcement to the blockchain to announce a // specific address. If there is no error, the host's address will be updated // to the supplied address. func (h *Host) AnnounceAddress(addr modules.NetAddress) error { err := h.tg.Add() if err != nil { return err } defer h.tg.Done() // Check that the address is sane, and that the address is also not local. err = addr.IsStdValid() if err != nil { return build.ExtendErr("announcement requested with bad net address", err) } if addr.IsLocal() { return errors.New("announcement requested with local net address") } // Attempt the actual announcement. err = h.managedAnnounce(addr) if err != nil { return build.ExtendErr("unable to perform manual host announcement", err) } // Address is valid, update the host's internal net address to match the // specified addr. h.mu.Lock() h.settings.NetAddress = addr h.mu.Unlock() return nil } Sia-1.3.0/modules/host/announce_test.go000066400000000000000000000067241313565667000200750ustar00rootroot00000000000000package host import ( "bytes" "testing" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) // announcementFinder is a quick module that parses the blockchain for host // announcements, keeping a record of all the announcements that get found. type announcementFinder struct { cs modules.ConsensusSet // Announcements that have been seen. The two slices are wedded. netAddresses []modules.NetAddress publicKeys []types.SiaPublicKey } // ProcessConsensusChange receives consensus changes from the consensus set and // parses them for valid host announcements. func (af *announcementFinder) ProcessConsensusChange(cc modules.ConsensusChange) { for _, block := range cc.AppliedBlocks { for _, txn := range block.Transactions { for _, arb := range txn.ArbitraryData { addr, pubKey, err := modules.DecodeAnnouncement(arb) if err == nil { af.netAddresses = append(af.netAddresses, addr) af.publicKeys = append(af.publicKeys, pubKey) } } } } } // Close will shut down the announcement finder. func (af *announcementFinder) Close() error { af.cs.Unsubscribe(af) return nil } // newAnnouncementFinder will create and return an announcement finder. func newAnnouncementFinder(cs modules.ConsensusSet) (*announcementFinder, error) { af := &announcementFinder{ cs: cs, } err := cs.ConsensusSetSubscribe(af, modules.ConsensusChangeBeginning) if err != nil { return nil, err } return af, nil } // TestHostAnnounce checks that the host announce function is operating // correctly. func TestHostAnnounce(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() ht, err := newHostTester("TestHostAnnounce") if err != nil { t.Fatal(err) } defer ht.Close() // Create an announcement finder to scan the blockchain for host // announcements. af, err := newAnnouncementFinder(ht.cs) if err != nil { t.Fatal(err) } defer af.Close() // Create an announcement, then use the address finding module to scan the // blockchain for the host's address. err = ht.host.Announce() if err != nil { t.Fatal(err) } _, err = ht.miner.AddBlock() if err != nil { t.Fatal(err) } if len(af.publicKeys) != 1 { t.Fatal("could not find host announcement in blockchain") } if af.netAddresses[0] != ht.host.autoAddress { t.Error("announcement has wrong address") } if !bytes.Equal(af.publicKeys[0].Key, ht.host.publicKey.Key) { t.Error("announcement has wrong host key") } } // TestHostAnnounceAddress checks that the host announce address function is // operating correctly. func TestHostAnnounceAddress(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() ht, err := newHostTester("TestHostAnnounceAddress") if err != nil { t.Fatal(err) } defer ht.Close() // Create an announcement finder to scan the blockchain for host // announcements. af, err := newAnnouncementFinder(ht.cs) if err != nil { t.Fatal(err) } defer af.Close() // Create an announcement, then use the address finding module to scan the // blockchain for the host's address. addr := modules.NetAddress("foo.com:1234") err = ht.host.AnnounceAddress(addr) if err != nil { t.Fatal(err) } _, err = ht.miner.AddBlock() if err != nil { t.Fatal(err) } if len(af.netAddresses) != 1 { t.Fatal("could not find host announcement in blockchain") } if af.netAddresses[0] != addr { t.Error("announcement has wrong address") } if !bytes.Equal(af.publicKeys[0].Key, ht.host.publicKey.Key) { t.Error("announcement has wrong host key") } } Sia-1.3.0/modules/host/consts.go000066400000000000000000000262311313565667000165340ustar00rootroot00000000000000package host import ( "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" "time" ) const ( // defaultMaxDuration defines the maximum number of blocks into the future // that the host will accept for the duration of an incoming file contract // obligation. 6 months is chosen because hosts are expected to be // long-term entities, and because we want to have a set of hosts that // support 6 month contracts when Sia leaves beta. defaultMaxDuration = 144 * 30 * 6 // 6 months. // fileContractNegotiationTimeout indicates the amount of time that a // renter has to negotiate a file contract with the host. A timeout is // necessary to limit the impact of DoS attacks. fileContractNegotiationTimeout = 120 * time.Second // iteratedConnectionTime is the amount of time that is allowed to pass // before the host will stop accepting new iterations on an iterated // connection. iteratedConnectionTime = 1200 * time.Second // resubmissionTimeout defines the number of blocks that a host will wait // before attempting to resubmit a transaction to the blockchain. // Typically, this transaction will contain either a file contract, a file // contract revision, or a storage proof. resubmissionTimeout = 3 ) var ( // defaultCollateral defines the amount of money that the host puts up as // collateral per-byte by default. The collateral should be considered as // an absolute instead of as a percentage, because low prices result in // collaterals which may be significant by percentage, but insignificant // overall. A default of 25 KS / TB / Month has been chosen, which is 2.5x // the default price for storage. The host is expected to put up a // significant amount of collateral as a commitment to faithfulness, // because this guarantees that the incentives are aligned for the host to // keep the data even if the price of siacoin fluctuates, the price of raw // storage fluctuates, or the host realizes that there is unexpected // opportunity cost in being a host. defaultCollateral = types.SiacoinPrecision.Mul64(100).Div(modules.BlockBytesPerMonthTerabyte) // 100 SC / TB / Month // defaultCollateralBudget defines the maximum number of siacoins that the // host is going to allocate towards collateral. The number has been chosen // as a number that is large, but not so large that someone would be // furious for losing access to it for a few weeks. defaultCollateralBudget = types.SiacoinPrecision.Mul64(100e3) // defaultContractPrice defines the default price of creating a contract // with the host. The default is set to 30 siacoins, which the file // contract revision can have 15 siacoins put towards it, and the storage // proof can have 15 siacoins put towards it. defaultContractPrice = types.SiacoinPrecision.Mul64(3) // 3 siacoins // defaultDownloadBandwidthPrice defines the default price of upload // bandwidth. The default is set to 10 siacoins per gigabyte, because // download bandwidth is expected to be plentiful but also in-demand. defaultDownloadBandwidthPrice = types.SiacoinPrecision.Mul64(25).Div(modules.BytesPerTerabyte) // 25 SC / TB // defaultMaxDownloadBatchSize defines the maximum number of bytes that the // host will allow to be requested by a single download request. 17 MiB has // been chosen because it's 4 full sectors plus some wiggle room. 17 MiB is // a conservative default, most hosts will be fine with a number like 65 // MiB. defaultMaxDownloadBatchSize = 17 * (1 << 20) // defaultMaxReviseBatchSize defines the maximum number of bytes that the // host will allow to be sent during a single batch update in a revision // RPC. 17 MiB has been chosen because it's four full sectors, plus some // wiggle room for the extra data or a few delete operations. The whole // batch will be held in memory, so the batch size should only be increased // substantially if the host has a lot of memory. Additionally, the whole // batch is sent in one network connection. Additionally, the renter can // steal funds for upload bandwidth all the way out to the size of a batch. // 17 MiB is a conservative default, most hosts are likely to be just fine // with a number like 65 MiB. defaultMaxReviseBatchSize = 17 * (1 << 20) // defaultMaxCollateral defines the maximum amount of collateral that the // host is comfortable putting into a single file contract. 10e3 is a // relatively small file contract, but millions of siacoins could be locked // away by only a few hundred file contracts. As the ecosystem matures, it // is expected that the safe default for this value will increase quite a // bit. defaultMaxCollateral = types.SiacoinPrecision.Mul64(5e3) // defaultStoragePrice defines the starting price for hosts selling // storage. We try to match a number that is both reasonably profitable and // reasonably competitive. defaultStoragePrice = types.SiacoinPrecision.Mul64(50).Div(modules.BlockBytesPerMonthTerabyte) // 50 SC / TB / Month // defaultUploadBandwidthPrice defines the default price of upload // bandwidth. The default is set to 1 siacoin per GB, because the host is // presumed to have a large amount of downstream bandwidth. Furthermore, // the host is typically only downloading data if it is planning to store // the data, meaning that the host serves to profit from accepting the // data. defaultUploadBandwidthPrice = types.SiacoinPrecision.Mul64(1).Div(modules.BytesPerTerabyte) // 1 SC / TB // workingStatusFirstCheck defines how frequently the Host's working status // check runs workingStatusFirstCheck = build.Select(build.Var{ Standard: time.Minute * 3, Dev: time.Minute * 1, Testing: time.Second * 3, }).(time.Duration) // workingStatusFrequency defines how frequently the Host's working status // check runs workingStatusFrequency = build.Select(build.Var{ Standard: time.Minute * 10, Dev: time.Minute * 5, Testing: time.Second * 10, }).(time.Duration) // workingStatusThreshold defines how many settings calls must occur over the // workingStatusFrequency for the host to be considered working. workingStatusThreshold = build.Select(build.Var{ Standard: uint64(3), Dev: uint64(1), Testing: uint64(1), }).(uint64) // connectablityCheckFirstWait defines how often the host's connectability // check is run. connectabilityCheckFirstWait = build.Select(build.Var{ Standard: time.Minute * 2, Dev: time.Minute * 1, Testing: time.Second * 3, }).(time.Duration) // connectablityCheckFrequency defines how often the host's connectability // check is run. connectabilityCheckFrequency = build.Select(build.Var{ Standard: time.Minute * 10, Dev: time.Minute * 5, Testing: time.Second * 10, }).(time.Duration) // connectabilityCheckTimeout defines how long a connectability check's dial // will be allowed to block before it times out. connectabilityCheckTimeout = build.Select(build.Var{ Standard: time.Minute * 2, Dev: time.Minute * 5, Testing: time.Second * 90, }).(time.Duration) // defaultWindowSize is the size of the proof of storage window requested // by the host. The host will not delete any obligations until the window // has closed and buried under several confirmations. For release builds, // the default is set to 144 blocks, or about 1 day. This gives the host // flexibility to experience downtime without losing file contracts. The // optimal default, especially as the network matures, is probably closer // to 36 blocks. An experienced or high powered host should not be // frustrated by lost coins due to long periods of downtime. defaultWindowSize = build.Select(build.Var{ Dev: types.BlockHeight(36), // 3.6 minutes. Standard: types.BlockHeight(144), // 1 day. Testing: types.BlockHeight(5), // 5 seconds. }).(types.BlockHeight) // logAllLimit is the number of errors of each type that the host will log // before switching to probabilistic logging. If there are not many errors, // it is reasonable that all errors get logged. If there are lots of // errors, to cut down on the noise only some of the errors get logged. logAllLimit = build.Select(build.Var{ Dev: uint64(50), Standard: uint64(250), Testing: uint64(100), }).(uint64) // logFewLimit is the number of errors of each type that the host will log // before substantially constricting the amount of logging that it is // doing. logFewLimit = build.Select(build.Var{ Dev: uint64(500), Standard: uint64(2500), Testing: uint64(500), }).(uint64) // maximumLockedStorageObligations sets the maximum number of storage // obligations that are allowed to be locked at a time. The map uses an // in-memory lock, but also a locked storage obligation could be reading a // whole sector into memory, which could use a bunch of system resources. maximumLockedStorageObligations = build.Select(build.Var{ Dev: uint64(20), Standard: uint64(100), Testing: uint64(5), }).(uint64) // obligationLockTimeout defines how long a thread will wait to get a lock // on a storage obligation before timing out and reporting an error to the // renter. obligationLockTimeout = build.Select(build.Var{ Dev: time.Second * 20, Standard: time.Second * 60, Testing: time.Second * 3, }).(time.Duration) // revisionSubmissionBuffer describes the number of blocks ahead of time // that the host will submit a file contract revision. The host will not // accept any more revisions once inside the submission buffer. revisionSubmissionBuffer = build.Select(build.Var{ Dev: types.BlockHeight(20), // About 4 minutes Standard: types.BlockHeight(144), // 1 day. Testing: types.BlockHeight(4), }).(types.BlockHeight) // rpcRatelimit prevents someone from spamming the host with connections, // causing it to spin up enough goroutines to crash. rpcRatelimit = build.Select(build.Var{ Dev: time.Millisecond * 10, Standard: time.Millisecond * 50, Testing: time.Millisecond, }).(time.Duration) ) // All of the following variables define the names of buckets used by the host // in the database. var ( // bucketActionItems maps a blockchain height to a list of storage // obligations that need to be managed in some way at that height. The // height is stored as a big endian uint64, which means that bolt will // store the heights sorted in numerical order. The action item itself is // an array of file contract ids. The host is able to contextually figure // out what the necessary actions for that item are based on the file // contract id and the associated storage obligation that can be retrieved // using the id. bucketActionItems = []byte("BucketActionItems") // bucketStorageObligations contains a set of serialized // 'storageObligations' sorted by their file contract id. bucketStorageObligations = []byte("BucketStorageObligations") ) // init runs a series of sanity checks to verify that the constants have sane // values. func init() { // The revision submission buffer should be greater than the resubmission // timeout, because there should be time to perform resubmission if the // first attempt to submit the revision fails. if revisionSubmissionBuffer < resubmissionTimeout { build.Critical("revision submission buffer needs to be larger than or equal to the resubmission timeout") } } Sia-1.3.0/modules/host/contractmanager/000077500000000000000000000000001313565667000200405ustar00rootroot00000000000000Sia-1.3.0/modules/host/contractmanager/consts.go000066400000000000000000000107231313565667000217030ustar00rootroot00000000000000package contractmanager import ( "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/persist" ) const ( // logFile is the name of the file that is used for logging in the contract // manager. logFile = "contractmanager.log" // metadataFile is the name of the file that stores all of the sector // metadata associated with a storage folder. metadataFile = "siahostmetadata.dat" // settingsFile is the name of the file that is used to save the contract // manager's settings. settingsFile = "contractmanager.json" // settingsFileTmp is the name of the file that is used to hold unfinished // writes to the contract manager's settings. After this file is completed, // a copy-on-write operation is performed to make sure that the contract // manager's persistent settings are updated atomically. settingsFileTmp = "contractmanager.json_temp" // sectorFile is the file that is placed inside of a storage folder to // house all of the sectors associated with a storage folder. sectorFile = "siahostdata.dat" // walFile is the name of the file that is used to save the write ahead log // for the contract manager. walFile = "contractmanager.wal" // walFileTmp is used for incomplete writes to the WAL. Data could be // interrupted by power outages, etc., and is therefore written to a // temporary file before being atomically renamed to the correct name. walFileTmp = "contractmanager.wal_temp" ) const ( // folderAllocationStepSize is the amount of data that gets allocated at a // time when writing out the sparse sector file during a storageFolderAdd or // a storageFolderGrow. folderAllocationStepSize = 1 << 35 // sectorMetadataDiskSize defines the number of bytes it takes to store the // metadata of a single sector on disk. sectorMetadataDiskSize = 14 // storageFolderGranularity defines the number of sectors that a storage // folder must cleanly divide into. 64 sectors is a requirement due to the // way the storage folder bitfield (field 'Usage') is constructed - the // bitfield defines which sectors are available, and the bitfield must be // constructed 1 uint64 at a time (8 bytes, 64 bits, or 64 sectors). // // This corresponds to a granularity of 256 MiB on the production network, // which is a high granluarity relative the to the TiBs of storage that // hosts are expected to provide. storageFolderGranularity = 64 ) var ( // settingsMetadata is the header that is used when writing the contract // manager's settings to disk. settingsMetadata = persist.Metadata{ Header: "Sia Contract Manager", Version: "1.2.0", } // walMetadata is the header that is used when writing the write ahead log // to disk, so that it may be identified at startup. walMetadata = persist.Metadata{ Header: "Sia Contract Manager WAL", Version: "1.2.0", } ) var ( // maximumStorageFolders defines the maximum number of storage folders that // the host can support. maximumStorageFolders = build.Select(build.Var{ Dev: uint64(1 << 5), Standard: uint64(1 << 16), Testing: uint64(1 << 3), }).(uint64) // MaximumSectorsPerStorageFolder sets an upper bound on how large storage // folders in the host are allowed to be. There is a hard limit at 4 // billion sectors because the sector location map only uses 4 bytes to // indicate the location of a sector. MaximumSectorsPerStorageFolder = build.Select(build.Var{ Dev: uint64(1 << 20), // 4 TiB Standard: uint64(1 << 32), // 32 PiB Testing: uint64(1 << 12), // 16 MiB }).(uint64) // MinimumSectorsPerStorageFolder defines the minimum number of sectors // that a storage folder is allowed to have. MinimumSectorsPerStorageFolder = build.Select(build.Var{ Dev: uint64(1 << 6), // 16 MiB Standard: uint64(1 << 6), // 512 MiB Testing: uint64(1 << 6), // 256 KiB }).(uint64) ) var ( // folderRecheckInitialInterval specifies the amount of time that the // contract manager will initially wait when checking to see if an // unavailable storage folder has become available. folderRecheckInitialInterval = build.Select(build.Var{ Dev: time.Second, Standard: time.Second * 5, Testing: time.Second, }).(time.Duration) // maxFolderRecheckInterval specifies the maximum amount of time that the // contract manager will wait between checking if an unavailable storage // folder has become available. maxFolderRecheckInterval = build.Select(build.Var{ Dev: time.Second * 30, Standard: time.Second * 60 * 5, Testing: time.Second * 8, }).(time.Duration) ) Sia-1.3.0/modules/host/contractmanager/contractmanager.go000066400000000000000000000213501313565667000235400ustar00rootroot00000000000000package contractmanager // TODO: Need to sync the directory after doing rename and create operations. // TODO: Use fallocate when adding + growing storage folders. // TODO: Long-running operations (add, empty) don't tally progress, and don't // indicate what operation is running. // TODO: Add disk failure testing. // TODO: Write some code into the production dependencies that will, during // testing, arbitrarily write less than the full data to a file until Sync() // has been called. That way, disruptions can effectively simulate partial // writes even though the disk writes are actually completing. // TODO: emptyStorageFolder should be able to move sectors into folders that // are being resized, into the sectors that are not affected by the resize. // TODO: Re-write the WAL to not need to do group syncing, and also to not need // to use the rename call at all. // TODO: When a storage folder is missing, operations on the sectors in that // storage folder (Add, Remove, Delete, etc.) may result in corruption and // inconsistent internal state for the contractor. For now, this is fine because // it's a rare situation, but it should be addressed eventually. import ( "errors" "path/filepath" "sync/atomic" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/persist" siasync "github.com/NebulousLabs/Sia/sync" ) // ContractManager is responsible for managing contracts that the host has with // renters, including storing the data, submitting storage proofs, and deleting // the data when a contract is complete. type ContractManager struct { // The contract manager controls many resources which are spread across // multiple files yet must all be consistent and durable. ACID properties // have been achieved by using a write-ahead-logger (WAL). The in-memory // state represents currently uncommitted data, however reading from the // uncommitted state does not threaten consistency. It is okay if the user // sees uncommitted data, so long as other ACID operations do not return // early. Any changes to the state must be documented in the WAL to prevent // inconsistency. // The contract manager is highly concurrent. Most fields are protected by // the mutex in the WAL, but storage folders and sectors can be accessed // individually. A map of locked sectors ensures that each sector is only // accessed by one thread at a time, but allows many sectors across a // single file to be accessed concurrently. Any interaction with a sector // requires a sector lock. // // If sectors are being added to a storage folder, a readlock is required // on the storage folder. Reads and deletes do not require any locks on the // storage folder. If a storage folder operation is happening (add, resize, // remove), a writelock is required on the storage folder lock. // The contract manager is expected to be consistent, durable, atomic, and // error-free in the face of unclean shutdown and disk error. Failure of // the controlling disk (containing the settings file and WAL file) is not // tolerated and will cause a panic, but any disk failures for the storage // folders should be tolerated gracefully. Threads should perform complete // cleanup before returning, which can be achieved with threadgroups. // sectorSalt is a persistent security field that gets set the first time // the contract manager is initiated and then never gets touched again. // It's used to randomize the location on-disk that a sector gets stored, // so that an adversary cannot maliciously add sectors to specific disks, // or otherwise perform manipulations that may degrade performance. // // sectorLocations is a giant lookup table that keeps a mapping from every // sector in the host to the location on-disk where it is stored. For // performance information, see the BenchmarkSectorLocations docstring. // sectorLocations is persisted on disk through a combination of the WAL // and through metadata that is stored directly in each storage folder. // // The storageFolders fields stores information about each storage folder, // including metadata about which sector slots are currently populated vs. // which sector slots are available. For performance information, see // BenchmarkStorageFolders. sectorSalt crypto.Hash sectorLocations map[sectorID]sectorLocation storageFolders map[uint16]*storageFolder // lockedSectors contains a list of sectors that are currently being read // or modified. lockedSectors map[sectorID]*sectorLock // Utilities. dependencies log *persist.Logger persistDir string tg siasync.ThreadGroup wal writeAheadLog } // Close will cleanly shutdown the contract manager. func (cm *ContractManager) Close() error { return build.ExtendErr("error while stopping contract manager", cm.tg.Stop()) } // newContrctManager returns a contract manager that is ready to be used with // the provided dependencies. func newContractManager(dependencies dependencies, persistDir string) (*ContractManager, error) { cm := &ContractManager{ storageFolders: make(map[uint16]*storageFolder), sectorLocations: make(map[sectorID]sectorLocation), lockedSectors: make(map[sectorID]*sectorLock), dependencies: dependencies, persistDir: persistDir, } cm.wal.cm = cm dependencies.init() cm.tg.AfterStop(func() { dependencies.destruct() }) // Perform clean shutdown of already-initialized features if startup fails. var err error defer func() { if err != nil { err1 := build.ExtendErr("error during contract manager startup", err) err2 := build.ExtendErr("error while stopping a partially started contract manager", cm.tg.Stop()) err = build.ComposeErrors(err1, err2) } }() // Create the perist directory if it does not yet exist. err = dependencies.mkdirAll(cm.persistDir, 0700) if err != nil { return nil, build.ExtendErr("error while creating the persist directory for the contract manager", err) } // Logger is always the first thing initialized. cm.log, err = dependencies.newLogger(filepath.Join(cm.persistDir, logFile)) if err != nil { return nil, build.ExtendErr("error while creating the logger for the contract manager", err) } // Set up the clean shutdown of the logger. cm.tg.AfterStop(func() { err = build.ComposeErrors(cm.log.Close(), err) }) // Load the atomic state of the contract manager. Unclean shutdown may have // wiped out some changes that got made. Anything really important will be // recovered when the WAL is loaded. err = cm.loadSettings() if err != nil { cm.log.Println("ERROR: Unable to load contract manager settings:", err) return nil, build.ExtendErr("error while loading contract manager atomic data", err) } // Load the WAL, repairing any corruption caused by unclean shutdown. err = cm.wal.load() if err != nil { cm.log.Println("ERROR: Unable to load the contract manager write-ahead-log:", err) return nil, build.ExtendErr("error while loading the WAL at startup", err) } // Upon shudown, unload all of the files. cm.tg.AfterStop(func() { cm.wal.mu.Lock() defer cm.wal.mu.Unlock() for _, sf := range cm.storageFolders { // No storage folder to close if the folder is not available. if atomic.LoadUint64(&sf.atomicUnavailable) == 1 { // File handles will either already be closed or may even be // nil. continue } err = sf.metadataFile.Close() if err != nil { cm.log.Println("Error closing the storage folder file handle", err) } err = sf.sectorFile.Close() if err != nil { cm.log.Println("Error closing the storage folder file handle", err) } } }) // The sector location data is loaded last. Any corruption that happened // during unclean shutdown has already been fixed by the WAL. for _, sf := range cm.storageFolders { if atomic.LoadUint64(&sf.atomicUnavailable) == 1 { // Metadata unavailable, just count the number of sectors instead of // loading them. sf.sectors = uint64(len(usageSectors(sf.usage))) continue } cm.loadSectorLocations(sf) } // Launch the sync loop that periodically flushes changes from the WAL to // disk. err = cm.wal.spawnSyncLoop() if err != nil { cm.log.Println("ERROR: Unable to spawn the contract manager synchronization loop:", err) return nil, build.ExtendErr("error while spawning contract manager sync loop", err) } // Spin up the thread that continuously looks for missing storage folders // and adds them if they are discovered. go cm.threadedFolderRecheck() // Simulate an error to make sure the cleanup code is triggered correctly. if cm.dependencies.disrupt("erroredStartup") { err = errors.New("startup disrupted") return nil, err } return cm, nil } // New returns a new ContractManager. func New(persistDir string) (*ContractManager, error) { return newContractManager(new(productionDependencies), persistDir) } Sia-1.3.0/modules/host/contractmanager/contractmanager_bench_test.go000066400000000000000000000047721313565667000257470ustar00rootroot00000000000000package contractmanager import ( "sync" "testing" "github.com/NebulousLabs/fastrand" ) // BenchmarkSectorLocations explores the cost of creating the sectorLocations // map when there are 24 million elements to load. 24 million elements would // cover 96 TiB of data storage. // // On my t540p it takes about 10 seconds to create a map with 24 million // elements in it, via random insertions. The map appears to consume // approximately 1.2 GiB of RAM. In terms of performance, lock contention within // the contract manager is by far the bottleneck when compared to the cost of // interacting with massive maps. func BenchmarkSectorLocations(b *testing.B) { // Create a bunch of data to insert into the map - metadata equivalent to // storing 96 TiB in the contract manager. ids := make([][12]byte, 24e6) sectorLocations := make([]sectorLocation, 24e6) // Fill out the arrays in 8 threads. var wg sync.WaitGroup for i := 0; i < 8; i++ { wg.Add(1) go func(i int) { defer wg.Done() for j := i * 3e6; j < i*3e6+3e6; j++ { fastrand.Read(ids[j][:]) sectorLocations[j] = sectorLocation{ index: uint32(fastrand.Intn(1 << 32)), storageFolder: uint16(fastrand.Intn(1 << 16)), count: uint16(fastrand.Intn(1 << 16)), } } }(i) } wg.Wait() // Reset the timer and then benchmark the cost of doing 24 million // insertions into a map - equivalent to initializng the map for a host // storing 96 TiB of data. b.ResetTimer() for i := 0; i < b.N; i++ { m := make(map[sectorID]sectorLocation) for i := 0; i < 24e6; i++ { m[ids[i]] = sectorLocations[i] } } } // BenchmarkStorageFolders explores the cost of maintaining and updating a // massive usage array. The storageFolder object is small and fast with the // exception of the usage array, which is why benchmarking is focused on the // usage array. // // The usage array for 96 TiB of storage consumes less than 10 MB of RAM, far // dwarfed by the size of the corresponding sectorLocations map that is used to // support it. func BenchmarkStorageFolders(b *testing.B) { // Create a massive usage array, matching a 96 TiB storage folder on disk. // The array is a bit-array, so 24e6 sectors (96 TiB) is represented by // 375e3 usage elements. usage := make([]uint64, 375e3) // Fill the folder to ~99.99% capacity, which will degrade performance. for i := 0; i < 23999e3; i++ { randFreeSector(usage) } // Perform insertions and get a benchmark. b.ResetTimer() for i := 0; i < b.N; i++ { randFreeSector(usage) } } Sia-1.3.0/modules/host/contractmanager/contractmanager_test.go000066400000000000000000000107541313565667000246050ustar00rootroot00000000000000package contractmanager import ( "errors" "os" "path/filepath" "testing" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" ) // contractManagerTester holds a contract manager along with some other fields // useful for testing, and has methods implemented on it that can assist // testing. type contractManagerTester struct { cm *ContractManager persistDir string } // panicClose will attempt to call Close on the contract manager tester. If // there is an error, the function will panic. A convenient function for making // sure that the cleanup code is always running correctly, without needing to // write a lot of boiler code. func (cmt *contractManagerTester) panicClose() { err := cmt.Close() if err != nil { panic(err) } } // Close will perform clean shutdown on the contract manager tester. func (cmt *contractManagerTester) Close() error { if cmt.cm == nil { return errors.New("nil contract manager") } return cmt.cm.Close() } // newContractManagerTester returns a ready-to-rock contract manager tester. func newContractManagerTester(name string) (*contractManagerTester, error) { if testing.Short() { panic("use of newContractManagerTester during short testing") } testdir := build.TempDir(modules.ContractManagerDir, name) cm, err := New(filepath.Join(testdir, modules.ContractManagerDir)) if err != nil { return nil, err } cmt := &contractManagerTester{ cm: cm, persistDir: testdir, } return cmt, nil } // newMockedContractManagerTester returns a contract manager tester that uses // the input dependencies instead of the production ones. func newMockedContractManagerTester(d dependencies, name string) (*contractManagerTester, error) { if testing.Short() { panic("use of newContractManagerTester during short testing") } testdir := build.TempDir(modules.ContractManagerDir, name) cm, err := newContractManager(d, filepath.Join(testdir, modules.ContractManagerDir)) if err != nil { return nil, err } cmt := &contractManagerTester{ cm: cm, persistDir: testdir, } return cmt, nil } // TestNewContractManager does basic startup and shutdown of a contract // manager, checking for egregious errors. func TestNewContractManager(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() // Create a contract manager. parentDir := build.TempDir(modules.ContractManagerDir, "TestNewContractManager") cmDir := filepath.Join(parentDir, modules.ContractManagerDir) cm, err := New(cmDir) if err != nil { t.Fatal(err) } // Close the contract manager. err = cm.Close() if err != nil { t.Fatal(err) } // Create a new contract manager using the same directory. cm, err = New(cmDir) if err != nil { t.Fatal(err) } // Close it again. err = cm.Close() if err != nil { t.Fatal(err) } } // dependencyErroredStartupis a mocked dependency that will cause the contract // manager to be returned with an error upon startup. type dependencyErroredStartup struct { productionDependencies } // disrupt will disrupt the threadedSyncLoop, causing the loop to terminate as // soon as it is created. func (d *dependencyErroredStartup) disrupt(s string) bool { // Cause an error to be returned during startup. if s == "erroredStartup" { return true } return false } // TestNewContractManagerErroredStartup uses disruption to simulate an error // during startup, allowing the test to verify that the cleanup code ran // correctly. func TestNewContractManagerErroredStartup(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() // Create a new contract manager where the startup gets disrupted. d := new(dependencyErroredStartup) testdir := build.TempDir(modules.ContractManagerDir, "TestNewContractManagerErroredStartup") cmd := filepath.Join(testdir, modules.ContractManagerDir) _, err := newContractManager(d, cmd) if err.Error() != "startup disrupted" { t.Fatal("expecting contract manager startup to be disrupted:", err) } // Verify that shutdown was triggered correctly - tmp files should be gone, // WAL file should also be gone. walFileName := filepath.Join(cmd, walFile) walFileTmpName := filepath.Join(cmd, walFileTmp) settingsFileTmpName := filepath.Join(cmd, settingsFileTmp) _, err = os.Stat(walFileName) if !os.IsNotExist(err) { t.Error("file should have been removed:", err) } _, err = os.Stat(walFileTmpName) if !os.IsNotExist(err) { t.Error("file should have been removed:", err) } _, err = os.Stat(settingsFileTmpName) if !os.IsNotExist(err) { t.Error("file should have been removed:", err) } } Sia-1.3.0/modules/host/contractmanager/dependencies.go000066400000000000000000000170241313565667000230210ustar00rootroot00000000000000package contractmanager import ( "errors" "io" "os" "sync" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/persist" "github.com/NebulousLabs/fastrand" ) // Fake errors that get returned when a simulated failure of a dependency is // desired for testing. var ( mockErrListen = errors.New("simulated Listen failure") mockErrLoadFile = errors.New("simulated LoadFile failure") mockErrMkdirAll = errors.New("simulated MkdirAll failure") mockErrNewLogger = errors.New("simulated NewLogger failure") mockErrOpenDatabase = errors.New("simulated OpenDatabase failure") mockErrReadFile = errors.New("simulated ReadFile failure") mockErrRemoveFile = errors.New("simulated RemoveFile faulure") mockErrSymlink = errors.New("simulated Symlink failure") mockErrWriteFile = errors.New("simulated WriteFile failure") ) // These interfaces define the StorageManager's dependencies. Mocking // implementation complexity can be reduced by defining each dependency as the // minimum possible subset of the real dependency. type ( // dependencies defines all of the dependencies of the StorageManager. dependencies interface { // atLeastOne will return a value that is at least one. In production, // the value should always be one. This function is used to test the // idempotency of actions, so during testing sometimes the value // returned will be higher, causing an idempotent action to be // committed multiple times. If the action is truly idempotent, // committing it multiple times should not cause any problems or // changes. atLeastOne() uint64 // createFile gives the host the ability to create files on the // operating system. createFile(string) (file, error) // destruct will clean up the dependencies, panicking if there are // unclosed resources. destruct() // disrupt is a general purpose testing function which will return true // if a disruption is happening and false if a disruption is not. Most // frequently it is used to simulate power-failures by forcing some of // the code to terminate partway through. The string input can be used // by the testing code to distinguish between the many places where // production code can be disrupted. disrupt(string) bool // Init performs any necessary initialization for the set of // dependencies. init() // loadFile allows the host to load a persistence structure form disk. loadFile(persist.Metadata, interface{}, string) error // mkdirAll gives the host the ability to create chains of folders // within the filesystem. mkdirAll(string, os.FileMode) error // newLogger creates a logger that the host can use to log messages and // write critical statements. newLogger(string) (*persist.Logger, error) // openFile opens a file for the host. openFile(string, int, os.FileMode) (file, error) // removeFile removes a file from disk. removeFile(string) error // renameFile renames a file on disk to another name. renameFile(string, string) error } // file implements all of the methods that can be called on an os.File. file interface { io.ReadWriteCloser Name() string ReadAt([]byte, int64) (int, error) Sync() error Truncate(int64) error WriteAt([]byte, int64) (int, error) } ) type ( // productionDependencies implements all of the dependencies using full // featured libraries. productionDependencies struct { shouldInit bool openFiles map[string]int mu *sync.Mutex } // productionFile allows the production dependencies to track productionFile struct { pd *productionDependencies *os.File } ) // atLeastOne will return a value that is equal to 1 if debugging is disabled. // If debugging is enabled, a higher value may be returned. func (productionDependencies) atLeastOne() uint64 { if !build.DEBUG { return 1 } // Probabilistically return a number greater than one. var val uint64 for { val++ coin := fastrand.Intn(2) if coin == 0 { break } } return val } // createFile gives the host the ability to create files on the operating // system. func (pd *productionDependencies) createFile(s string) (file, error) { if !build.DEBUG { return os.Create(s) } f, err := os.Create(s) if err != nil { return f, err } pd.mu.Lock() v := pd.openFiles[s] pd.openFiles[s] = v + 1 pd.mu.Unlock() return &productionFile{ pd: pd, File: f, }, nil } // destruct checks that all resources have been cleaned up correctly. func (pd *productionDependencies) destruct() { if !build.DEBUG { return } pd.mu.Lock() l := len(pd.openFiles) pd.mu.Unlock() if l != 0 { panic("unclosed resources - most likely file handles") } } // disrupt will always return false when using the production dependencies, // because production code should never be intentionally disrupted. func (productionDependencies) disrupt(string) bool { return false } // init will create the map and mutex func (pd *productionDependencies) init() { if !build.DEBUG { return } if !pd.shouldInit { pd.shouldInit = true pd.openFiles = make(map[string]int) pd.mu = new(sync.Mutex) } } // loadFile allows the host to load a persistence structure form disk. func (productionDependencies) loadFile(m persist.Metadata, i interface{}, s string) error { return persist.LoadJSON(m, i, s) } // mkdirAll gives the host the ability to create chains of folders within the // filesystem. func (productionDependencies) mkdirAll(s string, fm os.FileMode) error { return os.MkdirAll(s, fm) } // newLogger creates a logger that the host can use to log messages and write // critical statements. func (productionDependencies) newLogger(s string) (*persist.Logger, error) { return persist.NewFileLogger(s) } // openFile opens a file for the contract manager. func (pd *productionDependencies) openFile(s string, i int, fm os.FileMode) (file, error) { if !build.DEBUG { return os.OpenFile(s, i, fm) } f, err := os.OpenFile(s, i, fm) if err != nil { return f, err } pd.mu.Lock() v := pd.openFiles[s] pd.openFiles[s] = v + 1 pd.mu.Unlock() return &productionFile{ pd: pd, File: f, }, nil } // removeFile will remove a file from disk. func (pd *productionDependencies) removeFile(s string) error { if !build.DEBUG { return os.Remove(s) } pd.mu.Lock() v, exists := pd.openFiles[s] pd.mu.Unlock() if exists && v > 0 { return errors.New("cannot remove the file, it's open somewhere else right now") } return os.Remove(s) } // renameFile renames a file on disk. func (pd *productionDependencies) renameFile(s1 string, s2 string) error { if !build.DEBUG { return os.Rename(s1, s2) } pd.mu.Lock() v1, exists1 := pd.openFiles[s1] v2, exists2 := pd.openFiles[s2] pd.mu.Unlock() if exists1 && v1 > 0 { return errors.New("cannot remove the file, it's open somewhere else right now") } if exists2 && v2 > 0 { return errors.New("cannot remove the file, it's open somewhere else right now") } return os.Rename(s1, s2) } // Close will close a file, checking whether the file handle is open somewhere // else before closing completely. This check is performed on Windows but not // Linux, therefore a mock is used to ensure that linux testing picks up // potential problems that would be seen on Windows. func (pf *productionFile) Close() error { if !build.DEBUG { return pf.File.Close() } pf.pd.mu.Lock() v, exists := pf.pd.openFiles[pf.File.Name()] if !exists { panic("file not registered") } if v == 1 { delete(pf.pd.openFiles, pf.File.Name()) } else if v > 1 { pf.pd.openFiles[pf.File.Name()] = v - 1 } else { panic("inconsistent state") } pf.pd.mu.Unlock() return pf.File.Close() } Sia-1.3.0/modules/host/contractmanager/dependencies_test.go000066400000000000000000000052431313565667000240600ustar00rootroot00000000000000package contractmanager import ( "bytes" "os" "path/filepath" "sync" "testing" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/fastrand" ) // TestParallelFileAccess using a single file handle + ReadAt and WriteAt to // write to multiple locations on a file in parallel, verifying that it's a // safe thing to do. func TestParallelFileAccess(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() // Create the file that will be used in parallel. testdir := build.TempDir(modules.ContractManagerDir, "TestParallelFileAccess") err := os.MkdirAll(testdir, 0700) if err != nil { t.Fatal(err) } f, err := os.Create(filepath.Join(testdir, "parallelFile")) if err != nil { t.Fatal(err) } defer f.Close() // Create the data that will be writted to the file, such that it can be // verified later. writesPerThread := 200 numThreads := 500 dataSize := 163 // Intentionally overlaps sector boundaries. datas := make([][]byte, numThreads*writesPerThread) for i := 0; i < numThreads*writesPerThread; i++ { datas[i] = make([]byte, dataSize) fastrand.Read(datas[i]) } // Spin up threads to make concurrent writes to the file in different // locations. Have some reads + writes that are trying to overlap. threadingModifier := 71 var wg1 sync.WaitGroup var wg2 sync.WaitGroup for i := 0; i < numThreads; i++ { if i%threadingModifier == 0 { wg1.Add(1) } else { wg2.Add(1) } go func(i int) { if i%threadingModifier == 0 { defer wg1.Done() } else { defer wg2.Done() } for j := 0; j < writesPerThread; j++ { _, err := f.WriteAt(datas[i*j], int64(i*dataSize*j)) if err != nil { t.Error(err) } } }(i) } // Wait for the smaller set of first writes to complete. wg1.Wait() // Verify the results for the smaller set of writes. for i := 0; i < numThreads; i++ { if i%threadingModifier != 0 { continue } wg1.Add(1) go func(i int) { defer wg1.Done() for j := 0; j < writesPerThread; j++ { data := make([]byte, dataSize) _, err := f.ReadAt(data, int64(i*dataSize)) if err != nil { t.Error(err) } if !bytes.Equal(data, datas[i]) { t.Error("data mismatch for value", i) } } }(i) } wg1.Wait() wg2.Wait() // Verify the results for all of the writes. for i := 0; i < numThreads; i++ { wg1.Add(1) go func(i int) { defer wg1.Done() for j := 0; j < writesPerThread; j++ { data := make([]byte, dataSize) _, err := f.ReadAt(data, int64(i*dataSize)) if err != nil { t.Error(err) } if !bytes.Equal(data, datas[i]) { t.Error("data mismatch for value", i) } } }(i) } wg1.Wait() } Sia-1.3.0/modules/host/contractmanager/persist.go000066400000000000000000000125611313565667000220650ustar00rootroot00000000000000package contractmanager import ( "encoding/binary" "os" "path/filepath" "sync/atomic" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/persist" "github.com/NebulousLabs/fastrand" ) type ( // savedStorageFolder contains fields that are saved automatically to disk // for each storage folder. savedStorageFolder struct { Index uint16 Path string Usage []uint64 } // savedSettings contains fields that are saved atomically to disk inside // of the contract manager directory, alongside the WAL and log. savedSettings struct { SectorSalt crypto.Hash StorageFolders []savedStorageFolder } ) // savedStorageFolder returns the persistent version of the storage folder. func (sf *storageFolder) savedStorageFolder() savedStorageFolder { ssf := savedStorageFolder{ Index: sf.index, Path: sf.path, Usage: make([]uint64, len(sf.usage)), } copy(ssf.Usage, sf.usage) return ssf } // initSettings will set the default settings for the contract manager. // initSettings should only be run for brand new contract maangers. func (cm *ContractManager) initSettings() error { // Initialize the sector salt to a random value. fastrand.Read(cm.sectorSalt[:]) // Ensure that the initialized defaults have stuck. ss := cm.savedSettings() err := persist.SaveJSON(settingsMetadata, &ss, filepath.Join(cm.persistDir, settingsFile)) if err != nil { cm.log.Println("ERROR: unable to initialize settings file for contract manager:", err) return build.ExtendErr("error saving contract manager after initialization", err) } return nil } // loadSettings will load the contract manager settings. func (cm *ContractManager) loadSettings() error { var ss savedSettings err := cm.dependencies.loadFile(settingsMetadata, &ss, filepath.Join(cm.persistDir, settingsFile)) if os.IsNotExist(err) { // There is no settings file, this must be the first time that the // contract manager has been run. Initialize with default settings. return cm.initSettings() } else if err != nil { cm.log.Println("ERROR: unable to load the contract manager settings file:", err) return build.ExtendErr("error loading the contract manager settings file", err) } // Copy the saved settings into the contract manager. cm.sectorSalt = ss.SectorSalt for i := range ss.StorageFolders { sf := new(storageFolder) sf.index = ss.StorageFolders[i].Index sf.path = ss.StorageFolders[i].Path sf.usage = ss.StorageFolders[i].Usage sf.metadataFile, err = cm.dependencies.openFile(filepath.Join(ss.StorageFolders[i].Path, metadataFile), os.O_RDWR, 0700) if err != nil { // Mark the folder as unavailable and log an error. atomic.StoreUint64(&sf.atomicUnavailable, 1) cm.log.Printf("ERROR: unable to open the %v sector metadata file: %v\n", sf.path, err) } sf.sectorFile, err = cm.dependencies.openFile(filepath.Join(ss.StorageFolders[i].Path, sectorFile), os.O_RDWR, 0700) if err != nil { // Mark the folder as unavailable and log an error. atomic.StoreUint64(&sf.atomicUnavailable, 1) cm.log.Printf("ERROR: unable to open the %v sector file: %v\n", sf.path, err) if sf.metadataFile != nil { sf.metadataFile.Close() } } sf.availableSectors = make(map[sectorID]uint32) cm.storageFolders[sf.index] = sf } return nil } // loadSectorLocations will read the metadata portion of each storage folder // file and load the sector location information into memory. func (cm *ContractManager) loadSectorLocations(sf *storageFolder) { // Read the sector lookup table for this storage folder into memory. sectorLookupBytes, err := readFullMetadata(sf.metadataFile, len(sf.usage)*storageFolderGranularity) if err != nil { atomic.AddUint64(&sf.atomicFailedReads, 1) atomic.StoreUint64(&sf.atomicUnavailable, 1) err = build.ComposeErrors(err, sf.metadataFile.Close()) err = build.ComposeErrors(err, sf.sectorFile.Close()) cm.log.Printf("ERROR: unable to read sector metadata for folder %v: %v\n", sf.path, err) return } atomic.AddUint64(&sf.atomicSuccessfulReads, 1) // Iterate through the sectors that are in-use and read their storage // locations into memory. sf.sectors = 0 // may be non-zero from WAL operations - they will be double counted here if not reset. for _, sectorIndex := range usageSectors(sf.usage) { readHead := sectorMetadataDiskSize * sectorIndex var id sectorID copy(id[:], sectorLookupBytes[readHead:readHead+12]) count := binary.LittleEndian.Uint16(sectorLookupBytes[readHead+12 : readHead+14]) sl := sectorLocation{ index: sectorIndex, storageFolder: sf.index, count: count, } // Add the sector to the sector location map. cm.sectorLocations[id] = sl sf.sectors++ } atomic.StoreUint64(&sf.atomicUnavailable, 0) } // savedSettings returns the settings of the contract manager in an // easily-serializable form. func (cm *ContractManager) savedSettings() savedSettings { ss := savedSettings{ SectorSalt: cm.sectorSalt, } for _, sf := range cm.storageFolders { // Unset all of the usage bits in the storage folder for the queued sectors. for _, sectorIndex := range sf.availableSectors { sf.clearUsage(sectorIndex) } // Copy over the storage folder. ss.StorageFolders = append(ss.StorageFolders, sf.savedStorageFolder()) // Re-set all of the usage bits for the queued sectors. for _, sectorIndex := range sf.availableSectors { sf.setUsage(sectorIndex) } } return ss } Sia-1.3.0/modules/host/contractmanager/persist_test.go000066400000000000000000000502471313565667000231270ustar00rootroot00000000000000package contractmanager import ( "bytes" "os" "path/filepath" "sync" "testing" "time" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" ) // dependencyNoRecheck prevents the recheck loop from running in the contract // manager. type dependencyNoRecheck struct { productionDependencies } // disrupt prevents the recheck loop from running in the contract manager. func (dependencyNoRecheck) disrupt(s string) bool { if s == "noRecheck" { return true } return false } // TestLoadMissingStorageFolder checks that loading a storage folder which is // missing doesn't result in a complete loss of the storage folder on subsequent // startups. func TestLoadMissingStorageFolder(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cmt, err := newContractManagerTester(t.Name()) if err != nil { t.Fatal(err) } defer cmt.panicClose() // Add a storage folder to the contract manager tester. storageFolderDir := filepath.Join(cmt.persistDir, "storageFolderOne") // Create the storage folder dir. err = os.MkdirAll(storageFolderDir, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderDir, modules.SectorSize*storageFolderGranularity*2) if err != nil { t.Fatal(err) } // Check that the storage folder has been added. sfs := cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("There should be one storage folder reported") } // Check that the storage folder has the right path and size. if sfs[0].Path != storageFolderDir { t.Error("storage folder reported with wrong path") } if sfs[0].Capacity != modules.SectorSize*storageFolderGranularity*2 { t.Error("storage folder reported with wrong sector size") } // Add a sector to the storage folder. root, data := randSector() err = cmt.cm.AddSector(root, data) if err != nil { t.Fatal(err) } // Check that the sector was successfully added. sfs = cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("There should be one storage folder in the contract manager", len(sfs)) } if sfs[0].Capacity != sfs[0].CapacityRemaining+modules.SectorSize { t.Error("One sector's worth of capacity should be consumed:", sfs[0].Capacity, sfs[0].CapacityRemaining) } sfOneIndex := sfs[0].Index // Try reloading the contract manager after the storage folder has been // moved somewhere else. err = cmt.cm.Close() if err != nil { t.Fatal(err) } // Move the storage folder directory to a new location - hiding it from the // contract manager. err = os.Rename(storageFolderDir, storageFolderDir+"-moved") if err != nil { t.Fatal(err) } // Re-open the contract manager. d := new(dependencyNoRecheck) cmt.cm, err = newContractManager(d, filepath.Join(cmt.persistDir, modules.ContractManagerDir)) if err != nil { t.Fatal(err) } // The contract manager should still be reporting the storage folder, but // with errors reported. sfs = cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("wrong number of storage folders being reported") } if sfs[0].FailedReads < 100000000 { t.Error("Not enough failures reported for absent storage folder") } if sfs[0].FailedWrites < 100000000 { t.Error("Not enough failures reported for absent storage folder") } if sfs[0].Capacity != sfs[0].CapacityRemaining+modules.SectorSize { t.Error("One sector's worth of capacity should be consumed:", sfs[0].Capacity, sfs[0].CapacityRemaining) } // Reload the contract manager and make sure the storage folder is still // there. err = cmt.cm.Close() if err != nil { t.Fatal(err) } // Re-open the contract manager. cmt.cm, err = newContractManager(d, filepath.Join(cmt.persistDir, modules.ContractManagerDir)) if err != nil { t.Fatal(err) } // The contract manager should still be reporting the storage folder with // errors. sfs = cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("wrong number of storage folders being reported") } if sfs[0].FailedReads < 100000000 { t.Error("Not enough failures reported for absent storage folder") } if sfs[0].FailedWrites < 100000000 { t.Error("Not enough failures reported for absent storage folder") } // Try reading the sector from the missing storage folder. _, err = cmt.cm.ReadSector(root) if err == nil { t.Fatal("Expecting error when reading missing sector.") } // Try adding a sector to the contract manager - no folder can receive it. rootF, dataF := randSector() err = cmt.cm.AddSector(rootF, dataF) if err == nil { t.Error("should not be able to add sector") } // Check that you can add folders, add sectors while the contract manager // correctly works around the missing storage folder. storageFolderTwo := filepath.Join(cmt.persistDir, "storageFolderTwo") err = os.MkdirAll(storageFolderTwo, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderTwo, modules.SectorSize*storageFolderGranularity*2) if err != nil { t.Fatal(err) } // Add a sector to the storage folder. root2, data2 := randSector() err = cmt.cm.AddSector(root2, data2) if err != nil { t.Fatal(err) } // Check that the sector was successfully added. sfs = cmt.cm.StorageFolders() if len(sfs) != 2 { t.Fatal("There should be one storage folder in the contract manager", len(sfs)) } for i := range sfs { if sfs[i].Capacity != sfs[i].CapacityRemaining+modules.SectorSize { t.Error("One sector's worth of capacity should be consumed:", sfs[i].Capacity, sfs[i].CapacityRemaining, sfs[i].Path) } } var sfTwoIndex uint16 if sfs[0].Index == sfOneIndex { sfTwoIndex = sfs[1].Index } else { sfTwoIndex = sfs[0].Index } // Add two more sectors. root3, data3 := randSector() err = cmt.cm.AddSector(root3, data3) if err != nil { t.Fatal(err) } root4, data4 := randSector() err = cmt.cm.AddSector(root4, data4) if err != nil { t.Fatal(err) } // Check that the sector was successfully added. sfs = cmt.cm.StorageFolders() if len(sfs) != 2 { t.Fatal("There should be one storage folder in the contract manager", len(sfs)) } if sfs[0].Capacity != sfs[0].CapacityRemaining+modules.SectorSize*3 && sfs[1].Capacity != sfs[1].CapacityRemaining+modules.SectorSize*3 { t.Error("One sector's worth of capacity should be consumed") } // Try to shrink the missing storage folder. err = cmt.cm.ResizeStorageFolder(sfOneIndex, modules.SectorSize*storageFolderGranularity, false) if err == nil { t.Fatal("should not be able to resize a missing storage folder") } err = cmt.cm.ResizeStorageFolder(sfOneIndex, modules.SectorSize*storageFolderGranularity, true) if err == nil { t.Fatal("should not be able to resize a missing storage folder") } // Check that the storage folder is still the original size. sfs = cmt.cm.StorageFolders() if len(sfs) != 2 { t.Fatal("wrong storage folder count") } if sfs[0].Index == sfOneIndex && sfs[0].Capacity != modules.SectorSize*storageFolderGranularity*2 { t.Error("Storage folder has wrong size after failing to resize") } if sfs[1].Index == sfOneIndex && sfs[1].Capacity != modules.SectorSize*storageFolderGranularity*2 { t.Error("Storage folder has wrong size after failing to resize") } // Try to grow the missing storage folder. err = cmt.cm.ResizeStorageFolder(sfOneIndex, modules.SectorSize*storageFolderGranularity*4, false) if err == nil { t.Fatal("should not be able to resize a missing storage folder") } err = cmt.cm.ResizeStorageFolder(sfOneIndex, modules.SectorSize*storageFolderGranularity*4, true) if err == nil { t.Fatal("should not be able to resize a missing storage folder") } // Check that the storage folder is still the original size. sfs = cmt.cm.StorageFolders() if len(sfs) != 2 { t.Fatal("wrong storage folder count") } if sfs[0].Index == sfOneIndex && sfs[0].Capacity != modules.SectorSize*storageFolderGranularity*2 { t.Error("Storage folder has wrong size after failing to resize") } if sfs[1].Index == sfOneIndex && sfs[1].Capacity != modules.SectorSize*storageFolderGranularity*2 { t.Error("Storage folder has wrong size after failing to resize") } // Check that you can delete sectors and have the contract manager work // correctly around the missing storage folder. err = cmt.cm.DeleteSector(root2) if err != nil { t.Fatal(err) } err = cmt.cm.DeleteSector(root3) if err != nil { t.Fatal(err) } err = cmt.cm.DeleteSector(root4) if err != nil { t.Fatal(err) } // Check that the sectors are no longer reported. sfs = cmt.cm.StorageFolders() if len(sfs) != 2 { t.Fatal("There should be one storage folder in the contract manager", len(sfs)) } if sfs[0].Capacity != sfs[0].CapacityRemaining && sfs[1].Capacity != sfs[1].CapacityRemaining { t.Error("Deleted sector does not seem to have been deleted correctly.") } // Try reading the deleted sector. _, err = cmt.cm.ReadSector(root2) if err == nil { t.Fatal("should get an error when reading a deleted sector") } // Check that it's okay to shrink a storage folder while missing a storage // folder. // // Start by resizing the second storage folder so it can hold a lot of // sectors. err = cmt.cm.ResizeStorageFolder(sfTwoIndex, modules.SectorSize*storageFolderGranularity*4, false) if err != nil { t.Fatal(err) } // Add enough sectors to the storage folder that doing a shrink operation // causes sectors to be moved around. num := int(storageFolderGranularity*3 + 2) roots := make([]crypto.Hash, num) datas := make([][]byte, num) var wg sync.WaitGroup // Add in parallel to get massive performance boost. for i := 0; i < num; i++ { wg.Add(1) go func(i int) { defer wg.Done() rootI, dataI := randSector() roots[i] = rootI datas[i] = dataI err := cmt.cm.AddSector(rootI, dataI) if err != nil { t.Fatal(err) } }(i) } wg.Wait() // Make a new storage folder so the sectors have somewhere to go. storageFolderThree := filepath.Join(cmt.persistDir, "storageFolderThree") err = os.MkdirAll(storageFolderThree, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderThree, modules.SectorSize*storageFolderGranularity) if err != nil { t.Fatal(err) } // Shrink the second storage folder such that some of the sectors are forced // to move. err = cmt.cm.ResizeStorageFolder(sfTwoIndex, modules.SectorSize*storageFolderGranularity*3, false) if err != nil { t.Fatal(err) } // Check that all of the sectors are still recoverable. for i := range roots { data, err := cmt.cm.ReadSector(roots[i]) if err != nil { t.Fatal(err) } if !bytes.Equal(data, datas[i]) { t.Error("read sector does not have the same data that was inserted") } } // Shrink the second storage folder again, such that there is not enough // room in the available storage folders to accept the data. err = cmt.cm.ResizeStorageFolder(sfTwoIndex, modules.SectorSize*storageFolderGranularity*2, false) if err == nil { t.Fatal("expected an error") } // Check that all of the sectors are still recoverable. for i := range roots { data, err := cmt.cm.ReadSector(roots[i]) if err != nil { t.Fatal(err) } if !bytes.Equal(data, datas[i]) { t.Error("read sector does not have the same data that was inserted") } } // Shrink the second storage folder again, such that there is not enough // room in the available storage folders to accept the data. err = cmt.cm.ResizeStorageFolder(sfTwoIndex, modules.SectorSize*storageFolderGranularity, true) if err != nil { t.Fatal(err) } // There is now data loss. // Try deleting the second storage folder, which again will cause data loss. err = cmt.cm.RemoveStorageFolder(sfTwoIndex, false) if err == nil { t.Fatal("should have gotten an error when trying to remove the storage folder.") } err = cmt.cm.RemoveStorageFolder(sfTwoIndex, true) if err != nil { t.Fatal(err) } // Try to recover the missing storage folder by closing and moving the // storage folder to the right place. err = cmt.cm.Close() if err != nil { t.Fatal(err) } err = os.Rename(storageFolderDir+"-moved", storageFolderDir) if err != nil { t.Fatal(err) } // Re-open the contract manager. cmt.cm, err = newContractManager(d, filepath.Join(cmt.persistDir, modules.ContractManagerDir)) if err != nil { t.Fatal(err) } // The contract manager should still be reporting the storage folder, but // with errors reported. sfs = cmt.cm.StorageFolders() if len(sfs) != 2 { t.Fatal("wrong number of storage folders being reported") } var sfOne modules.StorageFolderMetadata for _, sf := range sfs { if sf.Index == sfOneIndex { sfOne = sf } } if sfOne.FailedReads > 0 { t.Error("folder should be visible again") } if sfOne.FailedWrites > 0 { t.Error("folder should be visible again") } if sfOne.Capacity != sfOne.CapacityRemaining+modules.SectorSize { cmt.cm.wal.mu.Lock() t.Log("Usage len:", len(cmt.cm.storageFolders[sfOne.Index].usage)) t.Log("Reported Sectors:", cmt.cm.storageFolders[sfOne.Index].sectors) t.Log("Avail:", len(cmt.cm.storageFolders[sfOne.Index].availableSectors)) cmt.cm.wal.mu.Unlock() t.Error("One sector's worth of capacity should be consumed:", sfOne.Capacity, sfOne.CapacityRemaining) } // See if the sector is still available. recoveredData, err := cmt.cm.ReadSector(root) if err != nil { t.Fatal(err) } if !bytes.Equal(recoveredData, data) { t.Error("recovered data is not equal to original data") } // Redo the storage folder move, so we can test deleting a missing storage // folder. err = cmt.cm.Close() if err != nil { t.Fatal(err) } // Move the storage folder directory to a new location - hiding it from the // contract manager. err = os.Rename(storageFolderDir, storageFolderDir+"-moved") if err != nil { t.Fatal(err) } // Re-open the contract manager. cmt.cm, err = newContractManager(d, filepath.Join(cmt.persistDir, modules.ContractManagerDir)) if err != nil { t.Fatal(err) } // Try removing the storage folder without the --force option. It should // fail. err = cmt.cm.RemoveStorageFolder(sfOneIndex, false) if err == nil { t.Fatal("should have gotten an error") } sfs = cmt.cm.StorageFolders() if len(sfs) != 2 { t.Error("there should be two storage folders after a removal failed.") } err = cmt.cm.RemoveStorageFolder(sfOneIndex, true) if err != nil { t.Fatal(err) } sfs = cmt.cm.StorageFolders() if len(sfs) != 1 { t.Error("there should be only one storage folder remaining") } // Close and re-open the contract maanger, storage folder should still be // missing. err = cmt.cm.Close() if err != nil { t.Fatal(err) } // Re-open the contract manager. cmt.cm, err = newContractManager(d, filepath.Join(cmt.persistDir, modules.ContractManagerDir)) if err != nil { t.Fatal(err) } sfs = cmt.cm.StorageFolders() if len(sfs) != 1 { t.Error("there should be only one storage folder remaining") } } // TestFolderRechecker verifies that the folder rechecker is able to discover // when a storage folder has become available again. func TestFolderRechecker(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cmt, err := newContractManagerTester(t.Name()) if err != nil { t.Fatal(err) } defer cmt.panicClose() // Add a storage folder to the contract manager tester. storageFolderDir := filepath.Join(cmt.persistDir, "storageFolderOne") // Create the storage folder dir. err = os.MkdirAll(storageFolderDir, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderDir, modules.SectorSize*storageFolderGranularity*2) if err != nil { t.Fatal(err) } // Check that the storage folder has been added. sfs := cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("There should be one storage folder reported") } // Check that the storage folder has the right path and size. if sfs[0].Path != storageFolderDir { t.Error("storage folder reported with wrong path") } if sfs[0].Capacity != modules.SectorSize*storageFolderGranularity*2 { t.Error("storage folder reported with wrong sector size") } // Add a sector to the storage folder. root, data := randSector() err = cmt.cm.AddSector(root, data) if err != nil { t.Fatal(err) } // Check that the sector was successfully added. sfs = cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("There should be one storage folder in the contract manager", len(sfs)) } if sfs[0].Capacity != sfs[0].CapacityRemaining+modules.SectorSize { t.Error("One sector's worth of capacity should be consumed:", sfs[0].Capacity, sfs[0].CapacityRemaining) } // Try reloading the contract manager after the storage folder has been // moved somewhere else. err = cmt.cm.Close() if err != nil { t.Fatal(err) } // Move the storage folder directory to a new location - hiding it from the // contract manager. err = os.Rename(storageFolderDir, storageFolderDir+"-moved") if err != nil { t.Fatal(err) } // Re-open the contract manager. cmt.cm, err = New(filepath.Join(cmt.persistDir, modules.ContractManagerDir)) if err != nil { t.Fatal(err) } // The contract manager should still be reporting the storage folder, but // with errors reported. sfs = cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("wrong number of storage folders being reported") } if sfs[0].FailedReads < 100000000 { t.Error("Not enough failures reported for absent storage folder") } if sfs[0].FailedWrites < 100000000 { t.Error("Not enough failures reported for absent storage folder") } if sfs[0].Capacity != sfs[0].CapacityRemaining+modules.SectorSize { t.Error("One sector's worth of capacity should be consumed:", sfs[0].Capacity, sfs[0].CapacityRemaining) } // Move the storage folder back to where the contract manager can see it. err = os.Rename(storageFolderDir+"-moved", storageFolderDir) if err != nil { t.Fatal(err) } // Sleep until the rechecker can find the storage folder. time.Sleep(maxFolderRecheckInterval) // Check that the storage folder has been found by the rechecker. sfs = cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("wrong number of storage folders being reported") } if sfs[0].FailedReads != 0 { t.Error("Not enough failures reported for absent storage folder") } if sfs[0].FailedWrites != 0 { t.Error("Not enough failures reported for absent storage folder") } if sfs[0].Capacity != sfs[0].CapacityRemaining+modules.SectorSize { t.Error("One sector's worth of capacity should be consumed:", sfs[0].Capacity, sfs[0].CapacityRemaining) } // Check that the sector is once again available. recoveredData, err := cmt.cm.ReadSector(root) if err != nil { t.Fatal(err) } if !bytes.Equal(recoveredData, data) { t.Error("recovered data does not equal original data") } // Try adding a sector to the contract manager - no folder can receive it. root2, data2 := randSector() err = cmt.cm.AddSector(root2, data2) if err != nil { t.Error("should not be able to add sector") } recoveredData, err = cmt.cm.ReadSector(root2) if err != nil { t.Fatal(err) } if !bytes.Equal(recoveredData, data2) { t.Error("recovered data does not equal original data") } // Grow the storage folder. err = cmt.cm.ResizeStorageFolder(sfs[0].Index, modules.SectorSize*storageFolderGranularity*4, false) if err != nil { t.Fatal(err) } sfs = cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("wrong number of storage folders being reported") } if sfs[0].Capacity != sfs[0].CapacityRemaining+modules.SectorSize*2 { t.Error("One sector's worth of capacity should be consumed:", sfs[0].Capacity, sfs[0].CapacityRemaining) } if sfs[0].Capacity != modules.SectorSize*storageFolderGranularity*4 { t.Error("the storage folder growth does not seem to have worked") } // Restart the client. Sector should still be readable, storage folder // should still be grown. err = cmt.cm.Close() if err != nil { t.Fatal(err) } cmt.cm, err = New(filepath.Join(cmt.persistDir, modules.ContractManagerDir)) if err != nil { t.Fatal(err) } // Check that the sector is once again available. recoveredData, err = cmt.cm.ReadSector(root) if err != nil { t.Fatal(err) } if !bytes.Equal(recoveredData, data) { t.Error("recovered data does not equal original data") } sfs = cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("wrong number of storage folders being reported") } if sfs[0].Capacity != sfs[0].CapacityRemaining+modules.SectorSize*2 { t.Error("One sector's worth of capacity should be consumed:", sfs[0].Capacity, sfs[0].CapacityRemaining) } if sfs[0].Capacity != modules.SectorSize*storageFolderGranularity*4 { t.Error("the storage folder growth does not seem to have worked") } } Sia-1.3.0/modules/host/contractmanager/sector.go000066400000000000000000000151731313565667000216750ustar00rootroot00000000000000package contractmanager import ( "encoding/binary" "errors" "sync" "sync/atomic" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" ) var ( // errDiskTrouble is returned when the host is supposed to have enough // storage to hold a new sector but failures that are likely related to the // disk have prevented the host from successfully adding the sector. errDiskTrouble = errors.New("host unable to add sector despite having the storage capacity to do so") // errInsufficientStorageForSector is returned if the host tries to add a // sector when there is not enough storage remaining on the host to accept // the sector. // // Ideally, the host will adjust pricing as the host starts to fill up, so // this error should be pretty rare. Demand should drive the price up // faster than the Host runs out of space, such that the host is always // hovering around 95% capacity and rarely over 98% or under 90% capacity. errInsufficientStorageForSector = errors.New("not enough storage remaining to accept sector") // errMaxVirtualSectors is returned when a sector cannot be added because // the maximum number of virtual sectors for that sector id already exist. errMaxVirtualSectors = errors.New("sector collides with a physical sector that already has the maximum allowed number of virtual sectors") // ErrSectorNotFound is returned when a lookup for a sector fails. ErrSectorNotFound = errors.New("could not find the desired sector") ) // sectorLocation indicates the location of a sector on disk. type ( sectorID [12]byte sectorLocation struct { // index indicates the index of the sector's location within the storage // folder. index uint32 // storageFolder indicates the index of the storage folder that the sector // is stored on. storageFolder uint16 // count indicates the number of virtual sectors represented by the // physical sector described by this object. A maximum of 2^16 virtual // sectors are allowed for each sector. Proper use by the renter should // mean that the host never has more than 3 virtual sectors for any sector. count uint16 } // sectorLock contains a lock plus a count of the number of threads // currently waiting to access the lock. sectorLock struct { waiting int mu sync.Mutex } ) // readSector will read the sector in the file, starting from the provided // location. func readSector(f file, sectorIndex uint32) ([]byte, error) { b := make([]byte, modules.SectorSize) _, err := f.ReadAt(b, int64(uint64(sectorIndex)*modules.SectorSize)) if err != nil { return nil, build.ExtendErr("unable to read within storage folder", err) } return b, nil } // readFullMetadata will read a full sector metadata file into memory. func readFullMetadata(f file, numSectors int) ([]byte, error) { sectorLookupBytes := make([]byte, numSectors*sectorMetadataDiskSize) _, err := f.ReadAt(sectorLookupBytes, 0) if err != nil { return nil, build.ExtendErr("unable to read metadata file for target storage folder", err) } return sectorLookupBytes, nil } // writeSector will write the given sector into the given file at the given // index. func writeSector(f file, sectorIndex uint32, data []byte) error { _, err := f.WriteAt(data, int64(uint64(sectorIndex)*modules.SectorSize)) if err != nil { return build.ExtendErr("unable to write within provided file", err) } return nil } // writeSectorMetadata will take a sector update and write the related metadata // to disk. func writeSectorMetadata(f file, sectorIndex uint32, id sectorID, count uint16) error { writeData := make([]byte, sectorMetadataDiskSize) copy(writeData, id[:]) binary.LittleEndian.PutUint16(writeData[12:], count) _, err := f.WriteAt(writeData, sectorMetadataDiskSize*int64(sectorIndex)) if err != nil { return build.ExtendErr("unable to write in given file", err) } return nil } // sectorID returns the id that should be used when referring to a sector. // There are lots of sectors, and to minimize their footprint a reduced size // hash is used. Hashes are typically 256bits to provide collision resistance // when an attacker can perform orders of magnitude more than a billion trials // per second. When attacking the host sector ids though, the attacker can only // do one trial per sector upload, and even then has minimal means to learn // whether or not a collision was successfully achieved. Hash length can safely // be reduced from 32 bytes to 12 bytes, which has a collision resistance of // 2^48. The host however is unlikely to be storing 2^48 sectors, which would // be an exabyte of data. func (cm *ContractManager) managedSectorID(sectorRoot crypto.Hash) (id sectorID) { saltedRoot := crypto.HashAll(sectorRoot, cm.sectorSalt) copy(id[:], saltedRoot[:]) return id } // ReadSector will read a sector from the storage manager, returning the bytes // that match the input sector root. func (cm *ContractManager) ReadSector(root crypto.Hash) ([]byte, error) { err := cm.tg.Add() if err != nil { return nil, err } defer cm.tg.Done() id := cm.managedSectorID(root) cm.wal.managedLockSector(id) defer cm.wal.managedUnlockSector(id) // Fetch the sector metadata. cm.wal.mu.Lock() sl, exists1 := cm.sectorLocations[id] sf, exists2 := cm.storageFolders[sl.storageFolder] cm.wal.mu.Unlock() if !exists1 { return nil, ErrSectorNotFound } if !exists2 { cm.log.Critical("Unable to load storage folder despite having sector metadata") return nil, ErrSectorNotFound } if atomic.LoadUint64(&sf.atomicUnavailable) == 1 { // TODO: Pick a new error instead. return nil, ErrSectorNotFound } // Read the sector. sectorData, err := readSector(sf.sectorFile, sl.index) if err != nil { atomic.AddUint64(&sf.atomicFailedReads, 1) return nil, build.ExtendErr("unable to fetch sector", err) } atomic.AddUint64(&sf.atomicSuccessfulReads, 1) return sectorData, nil } // managedLockSector grabs a sector lock. func (wal *writeAheadLog) managedLockSector(id sectorID) { wal.mu.Lock() sl, exists := wal.cm.lockedSectors[id] if exists { sl.waiting++ } else { sl = §orLock{ waiting: 1, } wal.cm.lockedSectors[id] = sl } wal.mu.Unlock() // Block until the sector is available. sl.mu.Lock() } // managedUnlockSector releases a sector lock. func (wal *writeAheadLog) managedUnlockSector(id sectorID) { wal.mu.Lock() defer wal.mu.Unlock() // Release the lock on the sector. sl, exists := wal.cm.lockedSectors[id] if !exists { wal.cm.log.Critical("Unlock of sector that is not locked.") return } sl.waiting-- sl.mu.Unlock() // If nobody else is trying to lock the sector, perform garbage collection. if sl.waiting == 0 { delete(wal.cm.lockedSectors, id) } } Sia-1.3.0/modules/host/contractmanager/sectorupdate.go000066400000000000000000000343601313565667000230770ustar00rootroot00000000000000package contractmanager import ( "errors" "sync" "sync/atomic" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" ) // commitUpdateSector will commit a sector update to the contract manager, // writing in metadata and usage info if the sector still exists, and deleting // the usage info if the sector does not exist. The update is idempotent. func (wal *writeAheadLog) commitUpdateSector(su sectorUpdate) { sf, exists := wal.cm.storageFolders[su.Folder] if !exists || atomic.LoadUint64(&sf.atomicUnavailable) == 1 { wal.cm.log.Printf("ERROR: unable to locate storage folder for a committed sector update.") return } // If the sector is being cleaned from disk, unset the usage flag. if su.Count == 0 { sf.clearUsage(su.Index) return } // Set the usage flag and update the on-disk metadata. Abort if the // metadata write fails. err := wal.writeSectorMetadata(sf, su) if err != nil { wal.cm.log.Printf("ERROR: unable to write sector metadata for %v: %v\n", sf.path, err) return } sf.setUsage(su.Index) } // managedAddPhysicalSector is a WAL operation to add a physical sector to the // contract manager. func (wal *writeAheadLog) managedAddPhysicalSector(id sectorID, data []byte, count uint16) error { // Sanity check - data should have modules.SectorSize bytes. if uint64(len(data)) != modules.SectorSize { wal.cm.log.Critical("sector has the wrong size", modules.SectorSize, len(data)) return errors.New("malformed sector") } // Find a committed storage folder that has enough space to receive // this sector. Keep trying new storage folders if some return // errors during disk operations. wal.mu.Lock() storageFolders := wal.cm.availableStorageFolders() wal.mu.Unlock() var syncChan chan struct{} for len(storageFolders) >= 1 { var storageFolderIndex int err := func() error { // NOTE: Convention is broken when working with WAL lock here, due // to the complexity required with managing both the WAL lock and // the storage folder lock. Pay close attention when reviewing and // modifying. // Grab a vacant storage folder. wal.mu.Lock() var sf *storageFolder sf, storageFolderIndex = vacancyStorageFolder(storageFolders) if sf == nil { // None of the storage folders have enough room to house the // sector. wal.mu.Unlock() return errInsufficientStorageForSector } defer sf.mu.RUnlock() // Grab a sector from the storage folder. WAL lock cannot be // released between grabbing the storage folder and grabbing a // sector lest another thread request the final available sector in // the storage folder. sectorIndex, err := randFreeSector(sf.usage) if err != nil { wal.mu.Unlock() wal.cm.log.Critical("a storage folder with full usage was returned from emptiestStorageFolder") return err } // Set the usage, but mark it as uncommitted. sf.setUsage(sectorIndex) sf.availableSectors[id] = sectorIndex wal.mu.Unlock() // NOTE: The usage has been set, in the event of failure the usage // must be cleared. // Try writing the new sector to disk. err = writeSector(sf.sectorFile, sectorIndex, data) if err != nil { wal.cm.log.Printf("ERROR: Unable to write sector for folder %v: %v\n", sf.path, err) atomic.AddUint64(&sf.atomicFailedWrites, 1) wal.mu.Lock() sf.clearUsage(sectorIndex) delete(sf.availableSectors, id) wal.mu.Unlock() return errDiskTrouble } // Try writing the sector metadata to disk. su := sectorUpdate{ Count: count, ID: id, Folder: sf.index, Index: sectorIndex, } err = wal.writeSectorMetadata(sf, su) if err != nil { wal.cm.log.Printf("ERROR: Unable to write sector metadata for folder %v: %v\n", sf.path, err) atomic.AddUint64(&sf.atomicFailedWrites, 1) wal.mu.Lock() sf.clearUsage(sectorIndex) delete(sf.availableSectors, id) wal.mu.Unlock() return errDiskTrouble } // Sector added successfully, update the WAL and the state. sl := sectorLocation{ index: sectorIndex, storageFolder: sf.index, count: count, } wal.mu.Lock() wal.appendChange(stateChange{ SectorUpdates: []sectorUpdate{su}, }) delete(wal.cm.storageFolders[su.Folder].availableSectors, id) wal.cm.sectorLocations[id] = sl syncChan = wal.syncChan wal.mu.Unlock() return nil }() if err != nil { // End the loop if no storage folder proved suitable. if storageFolderIndex == -1 { storageFolders = nil break } // Remove the storage folder that failed and try the next one. storageFolders = append(storageFolders[:storageFolderIndex], storageFolders[storageFolderIndex+1:]...) continue } // Sector added successfully, break. break } if len(storageFolders) < 1 { return errInsufficientStorageForSector } // Wait for the synchronize. // sectors. <-syncChan return nil } // managedAddVirtualSector will add a virtual sector to the contract manager. func (wal *writeAheadLog) managedAddVirtualSector(id sectorID, location sectorLocation) error { // Update the location count. if location.count == 65535 { return errMaxVirtualSectors } location.count += 1 // Prepare the sector update. su := sectorUpdate{ Count: location.count, ID: id, Folder: location.storageFolder, Index: location.index, } // Append the sector update to the WAL. wal.mu.Lock() sf, exists := wal.cm.storageFolders[su.Folder] if !exists || atomic.LoadUint64(&sf.atomicUnavailable) == 1 { // Need to check that the storage folder exists before syncing the // commit that increases the virtual sector count. wal.mu.Unlock() return errStorageFolderNotFound } wal.appendChange(stateChange{ SectorUpdates: []sectorUpdate{su}, }) wal.cm.sectorLocations[id] = location syncChan := wal.syncChan wal.mu.Unlock() <-syncChan // Update the metadata on disk. Metadata is updated on disk after the sync // so that there is no risk of obliterating the previous count in the event // that the change is not fully committed during unclean shutdown. err := wal.writeSectorMetadata(sf, su) if err != nil { // Revert the sector update in the WAL to reflect the fact that adding // the sector has failed. su.Count-- wal.mu.Lock() wal.appendChange(stateChange{ SectorUpdates: []sectorUpdate{su}, }) wal.cm.sectorLocations[id] = location wal.mu.Unlock() <-syncChan return build.ExtendErr("unable to write sector metadata during addSector call", err) } return nil } // managedDeleteSector will delete a sector (physical) from the contract manager. func (wal *writeAheadLog) managedDeleteSector(id sectorID) error { // Write the sector delete to the WAL. var location sectorLocation var syncChan chan struct{} var sf *storageFolder err := func() error { wal.mu.Lock() defer wal.mu.Unlock() // Fetch the metadata related to the sector. var exists bool location, exists = wal.cm.sectorLocations[id] if !exists { return ErrSectorNotFound } sf, exists = wal.cm.storageFolders[location.storageFolder] if !exists || atomic.LoadUint64(&sf.atomicUnavailable) == 1 { wal.cm.log.Critical("deleting a sector from a storage folder that does not exist?") return errStorageFolderNotFound } // Inform the WAL of the sector update. wal.appendChange(stateChange{ SectorUpdates: []sectorUpdate{{ Count: 0, ID: id, Folder: location.storageFolder, Index: location.index, }}, }) // Delete the sector and mark the usage as available. delete(wal.cm.sectorLocations, id) sf.availableSectors[id] = location.index // Block until the change has been committed. syncChan = wal.syncChan return nil }() if err != nil { return err } <-syncChan // Only update the usage after the sector delete has been committed to disk // fully. wal.mu.Lock() delete(sf.availableSectors, id) sf.clearUsage(location.index) wal.mu.Unlock() return nil } // managedRemoveSector will remove a sector (virtual or physical) from the // contract manager. func (wal *writeAheadLog) managedRemoveSector(id sectorID) error { // Inform the WAL of the removed sector. var location sectorLocation var su sectorUpdate var sf *storageFolder var syncChan chan struct{} err := func() error { wal.mu.Lock() defer wal.mu.Unlock() // Grab the number of virtual sectors that have been committed with // this root. var exists bool location, exists = wal.cm.sectorLocations[id] if !exists { return ErrSectorNotFound } sf, exists = wal.cm.storageFolders[location.storageFolder] if !exists || atomic.LoadUint64(&sf.atomicUnavailable) == 1 { wal.cm.log.Critical("deleting a sector from a storage folder that does not exist?") return errStorageFolderNotFound } // Inform the WAL of the sector update. location.count-- su = sectorUpdate{ Count: location.count, ID: id, Folder: location.storageFolder, Index: location.index, } wal.appendChange(stateChange{ SectorUpdates: []sectorUpdate{su}, }) // Update the in-memeory representation of the sector. if location.count == 0 { // Delete the sector and mark it as available. delete(wal.cm.sectorLocations, id) sf.availableSectors[id] = location.index } else { // Reduce the sector usage. wal.cm.sectorLocations[id] = location } syncChan = wal.syncChan return nil }() if err != nil { return err } // synchronize before updating the metadata or clearing the usage. <-syncChan // Update the metadata, and the usage. if location.count != 0 { err = wal.writeSectorMetadata(sf, su) if err != nil { // Revert the previous change. wal.mu.Lock() su.Count++ location.count++ wal.appendChange(stateChange{ SectorUpdates: []sectorUpdate{su}, }) wal.cm.sectorLocations[id] = location wal.mu.Unlock() return build.ExtendErr("failed to write sector metadata", err) } } // Only update the usage after the sector removal has been committed to // disk entirely. The usage is not updated until after the commit has // completed to prevent the actual sector data from being overwritten in // the event of unclean shutdown. if location.count == 0 { wal.mu.Lock() sf.clearUsage(location.index) delete(sf.availableSectors, id) wal.mu.Unlock() } return nil } // writeSectorMetadata will take a sector update and write the related metadata // to disk. func (wal *writeAheadLog) writeSectorMetadata(sf *storageFolder, su sectorUpdate) error { err := writeSectorMetadata(sf.metadataFile, su.Index, su.ID, su.Count) if err != nil { wal.cm.log.Printf("ERROR: unable to write sector metadata to folder %v when adding sector: %v\n", su.Folder, err) atomic.AddUint64(&sf.atomicFailedWrites, 1) return err } atomic.AddUint64(&sf.atomicSuccessfulWrites, 1) return nil } // AddSector will add a sector to the contract manager. func (cm *ContractManager) AddSector(root crypto.Hash, sectorData []byte) error { // Prevent shutdown until this function completes. err := cm.tg.Add() if err != nil { return err } defer cm.tg.Done() // Hold a sector lock throughout the duration of the function, but release // before syncing. id := cm.managedSectorID(root) cm.wal.managedLockSector(id) defer cm.wal.managedUnlockSector(id) // Determine whether the sector is virtual or physical. cm.wal.mu.Lock() location, exists := cm.sectorLocations[id] cm.wal.mu.Unlock() if exists { err = cm.wal.managedAddVirtualSector(id, location) } else { err = cm.wal.managedAddPhysicalSector(id, sectorData, 1) } if err != nil { cm.log.Println("ERROR: Unable to add sector:", err) return err } return nil } // AddSectorBatch is a non-ACID call to add a bunch of sectors at once. // Necessary for compatibility with old renters. // // TODO: Make ACID, and definitely improve the performance as well. func (cm *ContractManager) AddSectorBatch(sectorRoots []crypto.Hash) error { // Prevent shutdown until this function completes. err := cm.tg.Add() if err != nil { return err } defer cm.tg.Done() // Add each sector in a separate goroutine. var wg sync.WaitGroup for _, root := range sectorRoots { wg.Add(1) go func(root crypto.Hash) { defer wg.Done() // Hold a sector lock throughout the duration of the function, but release // before syncing. id := cm.managedSectorID(root) cm.wal.managedLockSector(id) defer cm.wal.managedUnlockSector(id) // Add the sector as virtual. cm.wal.mu.Lock() location, exists := cm.sectorLocations[id] cm.wal.mu.Unlock() if exists { cm.wal.managedAddVirtualSector(id, location) } }(root) } wg.Wait() return nil } // DeleteSector will delete a sector from the contract manager. If multiple // copies of the sector exist, all of them will be removed. This should only be // used to remove offensive data, as it will cause corruption in the contract // manager. This corruption puts the contract manager at risk of failing // storage proofs. If the amount of data removed is small, the risk is small. // This operation will not destabilize the contract manager. func (cm *ContractManager) DeleteSector(root crypto.Hash) error { cm.tg.Add() defer cm.tg.Done() id := cm.managedSectorID(root) cm.wal.managedLockSector(id) defer cm.wal.managedUnlockSector(id) return cm.wal.managedDeleteSector(id) } // RemoveSector will remove a sector from the contract manager. If multiple // copies of the sector exist, only one will be removed. func (cm *ContractManager) RemoveSector(root crypto.Hash) error { cm.tg.Add() defer cm.tg.Done() id := cm.managedSectorID(root) cm.wal.managedLockSector(id) defer cm.wal.managedUnlockSector(id) return cm.wal.managedRemoveSector(id) } // RemoveSectorBatch is a non-ACID call to remove a bunch of sectors at once. // Necessary for compatibility with old renters. // // TODO: Make ACID, and definitely improve the performance as well. func (cm *ContractManager) RemoveSectorBatch(sectorRoots []crypto.Hash) error { // Prevent shutdown until this function completes. err := cm.tg.Add() if err != nil { return err } defer cm.tg.Done() // Add each sector in a separate goroutine. var wg sync.WaitGroup for _, root := range sectorRoots { wg.Add(1) go func(root crypto.Hash) { id := cm.managedSectorID(root) cm.wal.managedLockSector(id) cm.wal.managedRemoveSector(id) // Error is ignored. cm.wal.managedUnlockSector(id) wg.Done() }(root) } wg.Wait() return nil } Sia-1.3.0/modules/host/contractmanager/sectorupdate_test.go000066400000000000000000001605771313565667000241500ustar00rootroot00000000000000package contractmanager // TODO: Verify that the code gracefully handles multiple storage folders // failiing, as well as all of them failing. // Verify that the actual data stored on disk matches the sector roots that it // is suppsed to match. Especially for multi-storage folder, // post-resize/remove, after renews and exirations (involving viritual // contracts), with large enough amounts of data that storage folders sometimes // filled up entirely (multiple pages of storageFolderGranularity size). import ( "bytes" "errors" "os" "path/filepath" "strings" "sync" "testing" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/fastrand" ) // randSector creates a random sector that can be added to the contract // manager. func randSector() (root crypto.Hash, data []byte) { data = fastrand.Bytes(int(modules.SectorSize)) root = crypto.MerkleRoot(data) return } // TestAddSector tries to add a sector to the contract manager, blocking until // the add has completed. func TestAddSector(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cmt, err := newContractManagerTester("TestAddSector") if err != nil { t.Fatal(err) } defer cmt.panicClose() // Add a storage folder to the contract manager tester. storageFolderDir := filepath.Join(cmt.persistDir, "storageFolderOne") // Create the storage folder dir. err = os.MkdirAll(storageFolderDir, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderDir, modules.SectorSize*64) if err != nil { t.Fatal(err) } // Fabricate a sector and add it to the contract manager. root, data := randSector() err = cmt.cm.AddSector(root, data) if err != nil { t.Fatal(err) } // Check that the sector was successfully added. sfs := cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("There should be one storage folder in the contract manager", len(sfs)) } if sfs[0].Capacity != sfs[0].CapacityRemaining+modules.SectorSize { t.Error("One sector's worth of capacity should be consumed:", sfs[0].Capacity, sfs[0].CapacityRemaining) } // Break the rules slightly - make the test brittle by looking at the // internals directly to determine that the sector got added to the right // locations, and that the usage information was updated correctly. if len(cmt.cm.sectorLocations) != 1 { t.Fatal("there should be one sector reported in the sectorLocations map") } if len(cmt.cm.storageFolders) != 1 { t.Fatal("storage folder not being reported correctly") } var index uint16 for _, sf := range cmt.cm.storageFolders { index = sf.index } for _, sl := range cmt.cm.sectorLocations { if sl.count != 1 { t.Error("Sector location should only be reporting one sector") } if sl.storageFolder != index { t.Error("Sector location is being reported incorrectly - wrong storage folder") } if sl.index > 64 { t.Error("sector index within storage folder also being reported incorrectly") } } // Check the usage. found := false for _, u := range cmt.cm.storageFolders[index].usage { if u != 0 { found = true break } } if !found { t.Error("usage field does not seem to have been updated") } sectorData, err := cmt.cm.ReadSector(root) if err != nil { t.Fatal(err) } if !bytes.Equal(sectorData, data) { t.Fatal("wrong sector provided") } // Try reloading the contract manager and see if all of the stateful checks // still hold. err = cmt.cm.Close() if err != nil { t.Fatal(err) } cmt.cm, err = New(filepath.Join(cmt.persistDir, modules.ContractManagerDir)) if err != nil { t.Fatal(err) } // Check that the sector was successfully added. sfs = cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("There should be one storage folder in the contract manager", len(sfs)) } if sfs[0].Capacity != sfs[0].CapacityRemaining+modules.SectorSize { t.Error("One sector's worth of capacity should be consumed:", sfs[0].Capacity, sfs[0].CapacityRemaining) } // Break the rules slightly - make the test brittle by looking at the // internals directly to determine that the sector got added to the right // locations, and that the usage information was updated correctly. if len(cmt.cm.sectorLocations) != 1 { t.Fatal("there should be one sector reported in the sectorLocations map") } if len(cmt.cm.storageFolders) != 1 { t.Fatal("storage folder not being reported correctly") } for _, sf := range cmt.cm.storageFolders { index = sf.index } for _, sl := range cmt.cm.sectorLocations { if sl.count != 1 { t.Error("Sector location should only be reporting one sector:", sl.count) } if sl.storageFolder != index { t.Error("Sector location is being reported incorrectly - wrong storage folder", sl.storageFolder, index) } if sl.index > 64 { t.Error("sector index within storage folder also being reported incorrectly") } } // Check the usage. found = false for _, u := range cmt.cm.storageFolders[index].usage { if u != 0 { found = true break } } if !found { t.Error("usage field does not seem to have been updated") } sectorData, err = cmt.cm.ReadSector(root) if err != nil { t.Fatal(err) } if !bytes.Equal(sectorData, data) { t.Fatal("wrong sector provided") } } // TestAddSectorFillFolder adds sectors to a 64 sector storage folder until it // is full. func TestAddSectorFillFolder(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cmt, err := newContractManagerTester("TestAddSectorFillFolder") if err != nil { t.Fatal(err) } defer cmt.panicClose() // Add a storage folder to the contract manager tester. storageFolderDir := filepath.Join(cmt.persistDir, "storageFolderOne") // Create the storage folder dir. err = os.MkdirAll(storageFolderDir, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderDir, modules.SectorSize*storageFolderGranularity) if err != nil { t.Fatal(err) } // Fabricate 65 sectors for the storage folder, which can only hold 64. roots := make([]crypto.Hash, storageFolderGranularity+1) datas := make([][]byte, storageFolderGranularity+1) var wg sync.WaitGroup for i := 0; i < storageFolderGranularity+1; i++ { wg.Add(1) go func(i int) { defer wg.Done() root, data := randSector() roots[i] = root datas[i] = data }(i) } wg.Wait() // Add 64 sectors which should fit cleanly. The sectors are added in // parallel to make use of the batching in the contract manager. for i := 0; i < storageFolderGranularity; i++ { wg.Add(1) go func(i int) { defer wg.Done() err := cmt.cm.AddSector(roots[i], datas[i]) if err != nil { t.Fatal(err) } }(i) } wg.Wait() // Check that the sectors were successfully added. sfs := cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("There should be one storage folder in the contract manager", len(sfs)) } if sfs[0].CapacityRemaining != 0 { t.Error("Storage folder is supposed to be full", sfs[0].CapacityRemaining) } // Try adding a 65th sector, it should not succeed. err = cmt.cm.AddSector(roots[storageFolderGranularity], datas[storageFolderGranularity]) if err == nil { t.Error("expecting the storage folder to be full.") } // Try reading each sector. for i := range roots[:storageFolderGranularity] { data, err := cmt.cm.ReadSector(roots[i]) if err != nil { t.Fatal(err) } if !bytes.Equal(data, datas[i]) { t.Error("Contract manager returned the wrong data on a sector request") } } } // TestAddSectorFillLargerFolder adds sectors to a 128 sector storage folder // until it is full. func TestAddSectorFillLargerFolder(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cmt, err := newContractManagerTester("TestAddSectorFillLargerFolder") if err != nil { t.Fatal(err) } defer cmt.panicClose() // Add a storage folder to the contract manager tester. storageFolderDir := filepath.Join(cmt.persistDir, "storageFolderOne") // Create the storage folder dir. err = os.MkdirAll(storageFolderDir, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderDir, modules.SectorSize*storageFolderGranularity*2) if err != nil { t.Fatal(err) } // Fabricate 65 sectors for the storage folder, which can only hold 64. roots := make([]crypto.Hash, storageFolderGranularity*2+1) datas := make([][]byte, storageFolderGranularity*2+1) var wg sync.WaitGroup for i := 0; i < storageFolderGranularity*2+1; i++ { wg.Add(1) go func(i int) { defer wg.Done() root, data := randSector() roots[i] = root datas[i] = data }(i) } wg.Wait() // Add 64 sectors which should fit cleanly. The sectors are added in // parallel to make use of the batching in the contract manager. for i := 0; i < storageFolderGranularity*2; i++ { wg.Add(1) go func(i int) { defer wg.Done() err := cmt.cm.AddSector(roots[i], datas[i]) if err != nil { t.Fatal(err) } }(i) } wg.Wait() // Check that the sectors were successfully added. sfs := cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("There should be one storage folder in the contract manager", len(sfs)) } if sfs[0].CapacityRemaining != 0 { t.Error("Storage folder is supposed to be full", sfs[0].CapacityRemaining) } // Try adding a 65th sector, it should not succeed. err = cmt.cm.AddSector(roots[storageFolderGranularity*2], datas[storageFolderGranularity*2]) if err == nil { t.Error("expecting the storage folder to be full.") } // Try reading each sector. for i := range roots[:storageFolderGranularity*2] { data, err := cmt.cm.ReadSector(roots[i]) if err != nil { t.Fatal(err) } if !bytes.Equal(data, datas[i]) { t.Error("Contract manager returned the wrong data on a sector request") } } } // dependencyNoSettingsSave is a mocked dependency that will prevent the // settings file from saving. type dependencyNoSettingsSave struct { productionDependencies triggered bool mu sync.Mutex } // disrupt will disrupt the threadedSyncLoop, causing the loop to terminate as // soon as it is created. func (d *dependencyNoSettingsSave) disrupt(s string) bool { d.mu.Lock() defer d.mu.Unlock() if s == "settingsSyncRename" && d.triggered { // Prevent the settings file from being saved. return true } if s == "walRename" && d.triggered { // Prevent the WAL from being renamed, which will prevent the existing // WAL from being overwritten. return true } if s == "cleanWALFile" { // Prevent the WAL file from being removed. return true } return false } // TestAddSectorRecovery checks that the WAL recovery of an added sector is // graceful / correct in the event of unclean shutdown. func TestAddSectorRecovery(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() d := new(dependencyNoSettingsSave) cmt, err := newMockedContractManagerTester(d, "TestAddSectorRecovery") if err != nil { t.Fatal(err) } defer cmt.panicClose() // Add a storage folder to the contract manager tester. storageFolderDir := filepath.Join(cmt.persistDir, "storageFolderOne") // Create the storage folder dir. err = os.MkdirAll(storageFolderDir, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderDir, modules.SectorSize*64) if err != nil { t.Fatal(err) } // Fabricate a sector and add it to the contract manager. root, data := randSector() // Disrupt the sync loop before adding the sector, such that the add sector // call makes it into the WAL but not into the saved settings. err = cmt.cm.AddSector(root, data) if err != nil { t.Fatal(err) } d.mu.Lock() d.triggered = true d.mu.Unlock() // Check that the sector was successfully added. sfs := cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("There should be one storage folder in the contract manager", len(sfs)) } if sfs[0].Capacity != sfs[0].CapacityRemaining+modules.SectorSize { t.Error("One sector's worth of capacity should be consumed:", sfs[0].Capacity, sfs[0].CapacityRemaining) } // Break the rules slightly - make the test brittle by looking at the // internals directly to determine that the sector got added to the right // locations, and that the usage information was updated correctly. if len(cmt.cm.sectorLocations) != 1 { t.Fatal("there should be one sector reported in the sectorLocations map") } if len(cmt.cm.storageFolders) != 1 { t.Fatal("storage folder not being reported correctly") } var index uint16 for _, sf := range cmt.cm.storageFolders { index = sf.index if sf.sectors != 1 { t.Error("the number of sectors is being counted incorrectly") } } for _, sl := range cmt.cm.sectorLocations { if sl.count != 1 { t.Error("Sector location should only be reporting one sector") } if sl.storageFolder != index { t.Error("Sector location is being reported incorrectly - wrong storage folder") } if sl.index > 64 { t.Error("sector index within storage folder also being reported incorrectly") } } // Check the usage. found := false for _, u := range cmt.cm.storageFolders[index].usage { if u != 0 { found = true break } } if !found { t.Error("usage field does not seem to have been updated") } // Try reloading the contract manager and see if all of the stateful checks // still hold. err = cmt.cm.Close() if err != nil { t.Fatal(err) } cmt.cm, err = New(filepath.Join(cmt.persistDir, modules.ContractManagerDir)) if err != nil { t.Fatal(err) } // Check that the sector was successfully added. sfs = cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("There should be one storage folder in the contract manager", len(sfs)) } if sfs[0].Capacity != sfs[0].CapacityRemaining+modules.SectorSize { t.Error("One sector's worth of capacity should be consumed:", (sfs[0].Capacity-sfs[0].CapacityRemaining)/modules.SectorSize) } // Break the rules slightly - make the test brittle by looking at the // internals directly to determine that the sector got added to the right // locations, and that the usage information was updated correctly. if len(cmt.cm.sectorLocations) != 1 { t.Fatal("there should be one sector reported in the sectorLocations map") } if len(cmt.cm.storageFolders) != 1 { t.Fatal("storage folder not being reported correctly") } for _, sf := range cmt.cm.storageFolders { index = sf.index if sf.sectors != 1 { t.Error("the number of sectors is being counted incorrectly") } } for _, sl := range cmt.cm.sectorLocations { if sl.count != 1 { t.Error("Sector location should only be reporting one sector:", sl.count) } if sl.storageFolder != index { t.Error("Sector location is being reported incorrectly - wrong storage folder", sl.storageFolder, index) } if sl.index > 64 { t.Error("sector index within storage folder also being reported incorrectly") } } // Check the usage. found = false for _, u := range cmt.cm.storageFolders[index].usage { if u != 0 { found = true break } } if !found { t.Error("usage field does not seem to have been updated") } } // TestAddVirtualSectorSerial adds a sector and a virual sector in serial to // the contract manager. func TestAddVirtualSectorSerial(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cmt, err := newContractManagerTester("TestAddVirtualSectorSerial") if err != nil { t.Fatal(err) } defer cmt.panicClose() // Add a storage folder to the contract manager tester. storageFolderDir := filepath.Join(cmt.persistDir, "storageFolderOne") // Create the storage folder dir. err = os.MkdirAll(storageFolderDir, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderDir, modules.SectorSize*64) if err != nil { t.Fatal(err) } // Fabricate a sector and add it to the contract manager. root, data := randSector() // Add the sector twice in serial to verify that virtual sector adding is // working correctly. err = cmt.cm.AddSector(root, data) if err != nil { t.Fatal(err) } err = cmt.cm.AddSector(root, data) if err != nil { t.Fatal(err) } // Check that the sector was successfully added. sfs := cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("There should be one storage folder in the contract manager", len(sfs)) } if sfs[0].Capacity != sfs[0].CapacityRemaining+modules.SectorSize { t.Error("One sector's worth of capacity should be consumed:", sfs[0].Capacity, sfs[0].CapacityRemaining) } // Break the rules slightly - make the test brittle by looking at the // internals directly to determine that the sector got added to the right // locations, and that the usage information was updated correctly. if len(cmt.cm.sectorLocations) != 1 { t.Fatal("there should be one sector reported in the sectorLocations map") } if len(cmt.cm.storageFolders) != 1 { t.Fatal("storage folder not being reported correctly") } var index uint16 for _, sf := range cmt.cm.storageFolders { index = sf.index } for _, sl := range cmt.cm.sectorLocations { if sl.count != 2 { t.Error("Sector location should only be reporting one sector") } if sl.storageFolder != index { t.Error("Sector location is being reported incorrectly - wrong storage folder") } if sl.index > 64 { t.Error("sector index within storage folder also being reported incorrectly") } } // Check the usage. found := false for _, u := range cmt.cm.storageFolders[index].usage { if u != 0 { found = true break } } if !found { t.Error("usage field does not seem to have been updated") } // Try reloading the contract manager and see if all of the stateful checks // still hold. err = cmt.cm.Close() if err != nil { t.Fatal(err) } cmt.cm, err = New(filepath.Join(cmt.persistDir, modules.ContractManagerDir)) if err != nil { t.Fatal(err) } // Check that the sector was successfully added. sfs = cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("There should be one storage folder in the contract manager", len(sfs)) } if sfs[0].Capacity != sfs[0].CapacityRemaining+modules.SectorSize { t.Error("One sector's worth of capacity should be consumed:", sfs[0].Capacity, sfs[0].CapacityRemaining) } // Break the rules slightly - make the test brittle by looking at the // internals directly to determine that the sector got added to the right // locations, and that the usage information was updated correctly. if len(cmt.cm.sectorLocations) != 1 { t.Fatal("there should be one sector reported in the sectorLocations map") } if len(cmt.cm.storageFolders) != 1 { t.Fatal("storage folder not being reported correctly") } for _, sf := range cmt.cm.storageFolders { index = sf.index } for _, sl := range cmt.cm.sectorLocations { if sl.count != 2 { t.Error("Sector location should only be reporting one sector:", sl.count) } if sl.storageFolder != index { t.Error("Sector location is being reported incorrectly - wrong storage folder", sl.storageFolder, index) } if sl.index > 64 { t.Error("sector index within storage folder also being reported incorrectly") } } // Check the usage. found = false for _, u := range cmt.cm.storageFolders[index].usage { if u != 0 { found = true break } } if !found { t.Error("usage field does not seem to have been updated") } } // TestAddVirtualSectorParallel adds a sector and a virual sector in parallel // to the contract manager. func TestAddVirtualSectorParallel(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cmt, err := newContractManagerTester("TestAddVirtualSectorParallel") if err != nil { t.Fatal(err) } defer cmt.panicClose() // Add a storage folder to the contract manager tester. storageFolderDir := filepath.Join(cmt.persistDir, "storageFolderOne") // Create the storage folder dir. err = os.MkdirAll(storageFolderDir, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderDir, modules.SectorSize*64) if err != nil { t.Fatal(err) } // Fabricate a sector and add it to the contract manager. root, data := randSector() // Add the sector twice in serial to verify that virtual sector adding is // working correctly. var wg sync.WaitGroup wg.Add(2) go func() { defer wg.Done() err := cmt.cm.AddSector(root, data) if err != nil { t.Fatal(err) } }() go func() { defer wg.Done() err := cmt.cm.AddSector(root, data) if err != nil { t.Fatal(err) } }() wg.Wait() // Check that the sector was successfully added. sfs := cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("There should be one storage folder in the contract manager", len(sfs)) } if sfs[0].Capacity != sfs[0].CapacityRemaining+modules.SectorSize { t.Error("One sector's worth of capacity should be consumed:", sfs[0].Capacity, sfs[0].CapacityRemaining) } // Break the rules slightly - make the test brittle by looking at the // internals directly to determine that the sector got added to the right // locations, and that the usage information was updated correctly. if len(cmt.cm.sectorLocations) != 1 { t.Fatal("there should be one sector reported in the sectorLocations map") } if len(cmt.cm.storageFolders) != 1 { t.Fatal("storage folder not being reported correctly") } var index uint16 for _, sf := range cmt.cm.storageFolders { index = sf.index } for _, sl := range cmt.cm.sectorLocations { if sl.count != 2 { t.Error("Sector location should be reporting a count of 2 for this sector:", sl.count) } if sl.storageFolder != index { t.Error("Sector location is being reported incorrectly - wrong storage folder") } if sl.index > 64 { t.Error("sector index within storage folder also being reported incorrectly") } } // Check the usage. found := false for _, u := range cmt.cm.storageFolders[index].usage { if u != 0 { found = true break } } if !found { t.Error("usage field does not seem to have been updated") } // Try reloading the contract manager and see if all of the stateful checks // still hold. err = cmt.cm.Close() if err != nil { t.Fatal(err) } cmt.cm, err = New(filepath.Join(cmt.persistDir, modules.ContractManagerDir)) if err != nil { t.Fatal(err) } // Check that the sector was successfully added. sfs = cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("There should be one storage folder in the contract manager", len(sfs)) } if sfs[0].Capacity != sfs[0].CapacityRemaining+modules.SectorSize { t.Error("One sector's worth of capacity should be consumed:", sfs[0].Capacity, sfs[0].CapacityRemaining) } // Break the rules slightly - make the test brittle by looking at the // internals directly to determine that the sector got added to the right // locations, and that the usage information was updated correctly. if len(cmt.cm.sectorLocations) != 1 { t.Fatal("there should be one sector reported in the sectorLocations map") } if len(cmt.cm.storageFolders) != 1 { t.Fatal("storage folder not being reported correctly") } for _, sf := range cmt.cm.storageFolders { index = sf.index } for _, sl := range cmt.cm.sectorLocations { if sl.count != 2 { t.Error("Sector location should only be reporting one sector:", sl.count) } if sl.storageFolder != index { t.Error("Sector location is being reported incorrectly - wrong storage folder", sl.storageFolder, index) } if sl.index > 64 { t.Error("sector index within storage folder also being reported incorrectly") } } // Check the usage. found = false for _, u := range cmt.cm.storageFolders[index].usage { if u != 0 { found = true break } } if !found { t.Error("usage field does not seem to have been updated") } } // TestAddVirtualSectorMassiveParallel adds the same sector many times in // parallel to the contract manager. func TestAddVirtualSectorMassiveParallel(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cmt, err := newContractManagerTester("TestAddVirtualSectorMassiveParallel") if err != nil { t.Fatal(err) } defer cmt.panicClose() // Add a storage folder to the contract manager tester. storageFolderDir := filepath.Join(cmt.persistDir, "storageFolderOne") // Create the storage folder dir. err = os.MkdirAll(storageFolderDir, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderDir, modules.SectorSize*64) if err != nil { t.Fatal(err) } // Fabricate a sector and add it to the contract manager. root, data := randSector() // Add the sector many times in parallel to make sure it is handled // gracefully. var wg sync.WaitGroup parallelAdds := uint16(20) for i := uint16(0); i < parallelAdds; i++ { wg.Add(1) go func() { defer wg.Done() err := cmt.cm.AddSector(root, data) if err != nil { t.Fatal(err) } }() } wg.Wait() // Check that the sector was successfully added. sfs := cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("There should be one storage folder in the contract manager", len(sfs)) } if sfs[0].Capacity != sfs[0].CapacityRemaining+modules.SectorSize { t.Error("One sector's worth of capacity should be consumed:", sfs[0].Capacity, sfs[0].CapacityRemaining) } // Break the rules slightly - make the test brittle by looking at the // internals directly to determine that the sector got added to the right // locations, and that the usage information was updated correctly. if len(cmt.cm.sectorLocations) != 1 { t.Fatal("there should be one sector reported in the sectorLocations map") } if len(cmt.cm.storageFolders) != 1 { t.Fatal("storage folder not being reported correctly") } var index uint16 for _, sf := range cmt.cm.storageFolders { index = sf.index } for _, sl := range cmt.cm.sectorLocations { if sl.count != parallelAdds { t.Error("Sector location should only be reporting one sector:", sl.count) } if sl.storageFolder != index { t.Error("Sector location is being reported incorrectly - wrong storage folder") } if sl.index > 64 { t.Error("sector index within storage folder also being reported incorrectly") } } // Check the usage. found := false for _, u := range cmt.cm.storageFolders[index].usage { if u != 0 { found = true break } } if !found { t.Error("usage field does not seem to have been updated") } // Try reloading the contract manager and see if all of the stateful checks // still hold. err = cmt.cm.Close() if err != nil { t.Fatal(err) } cmt.cm, err = New(filepath.Join(cmt.persistDir, modules.ContractManagerDir)) if err != nil { t.Fatal(err) } // Check that the sector was successfully added. sfs = cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("There should be one storage folder in the contract manager", len(sfs)) } if sfs[0].Capacity != sfs[0].CapacityRemaining+modules.SectorSize { t.Error("One sector's worth of capacity should be consumed:", sfs[0].Capacity, sfs[0].CapacityRemaining) } // Break the rules slightly - make the test brittle by looking at the // internals directly to determine that the sector got added to the right // locations, and that the usage information was updated correctly. if len(cmt.cm.sectorLocations) != 1 { t.Fatal("there should be one sector reported in the sectorLocations map") } if len(cmt.cm.storageFolders) != 1 { t.Fatal("storage folder not being reported correctly") } for _, sf := range cmt.cm.storageFolders { index = sf.index } for _, sl := range cmt.cm.sectorLocations { if sl.count != parallelAdds { t.Error("Sector location should only be reporting one sector:", sl.count) } if sl.storageFolder != index { t.Error("Sector location is being reported incorrectly - wrong storage folder", sl.storageFolder, index) } if sl.index > 64 { t.Error("sector index within storage folder also being reported incorrectly") } } // Check the usage. found = false for _, u := range cmt.cm.storageFolders[index].usage { if u != 0 { found = true break } } if !found { t.Error("usage field does not seem to have been updated") } } // TestRemoveSector tries to remove a sector from the contract manager. func TestRemoveSector(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cmt, err := newContractManagerTester("TestRemoveSector") if err != nil { t.Fatal(err) } defer cmt.panicClose() // Add a storage folder to the contract manager tester. storageFolderDir := filepath.Join(cmt.persistDir, "storageFolderOne") // Create the storage folder dir. err = os.MkdirAll(storageFolderDir, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderDir, modules.SectorSize*64) if err != nil { t.Fatal(err) } // Add two sectors, and then remove one of them. root, data := randSector() err = cmt.cm.AddSector(root, data) if err != nil { t.Fatal(err) } root2, data2 := randSector() err = cmt.cm.AddSector(root2, data2) if err != nil { t.Fatal(err) } err = cmt.cm.RemoveSector(root2) if err != nil { t.Fatal(err) } // Check that the sector was successfully added. sfs := cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("There should be one storage folder in the contract manager", len(sfs)) } if sfs[0].Capacity != sfs[0].CapacityRemaining+modules.SectorSize { t.Error("One sector's worth of capacity should be consumed:", sfs[0].Capacity, sfs[0].CapacityRemaining) } // Break the rules slightly - make the test brittle by looking at the // internals directly to determine that the sector got added to the right // locations, and that the usage information was updated correctly. if len(cmt.cm.sectorLocations) != 1 { t.Fatal("there should be one sector reported in the sectorLocations map") } if len(cmt.cm.storageFolders) != 1 { t.Fatal("storage folder not being reported correctly") } var index uint16 for _, sf := range cmt.cm.storageFolders { index = sf.index } for _, sl := range cmt.cm.sectorLocations { if sl.count != 1 { t.Error("Sector location should only be reporting one sector") } if sl.storageFolder != index { t.Error("Sector location is being reported incorrectly - wrong storage folder") } if sl.index > 64 { t.Error("sector index within storage folder also being reported incorrectly") } } // Check the usage. found := false for _, u := range cmt.cm.storageFolders[index].usage { if u != 0 { found = true break } } if !found { t.Error("usage field does not seem to have been updated") } // Try reloading the contract manager and see if all of the stateful checks // still hold. err = cmt.cm.Close() if err != nil { t.Fatal(err) } cmt.cm, err = New(filepath.Join(cmt.persistDir, modules.ContractManagerDir)) if err != nil { t.Fatal(err) } // Check that the sector was successfully added. sfs = cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("There should be one storage folder in the contract manager", len(sfs)) } if sfs[0].Capacity != sfs[0].CapacityRemaining+modules.SectorSize { t.Error("One sector's worth of capacity should be consumed:", (sfs[0].Capacity-sfs[0].CapacityRemaining)/modules.SectorSize) } // Break the rules slightly - make the test brittle by looking at the // internals directly to determine that the sector got added to the right // locations, and that the usage information was updated correctly. if len(cmt.cm.sectorLocations) != 1 { t.Fatal("there should be one sector reported in the sectorLocations map:", len(cmt.cm.sectorLocations)) } if len(cmt.cm.storageFolders) != 1 { t.Fatal("storage folder not being reported correctly") } for _, sf := range cmt.cm.storageFolders { index = sf.index } for _, sl := range cmt.cm.sectorLocations { if sl.count != 1 { t.Error("Sector location should only be reporting one sector:", sl.count) } if sl.storageFolder != index { t.Error("Sector location is being reported incorrectly - wrong storage folder", sl.storageFolder, index) } if sl.index > 64 { t.Error("sector index within storage folder also being reported incorrectly") } } // Check the usage. found = false for _, u := range cmt.cm.storageFolders[index].usage { if u != 0 { found = true break } } if !found { t.Error("usage field does not seem to have been updated") } } // TestRemoveSectorVirtual tries to remove a virtual sector from the contract // manager. func TestRemoveSectorVirtual(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cmt, err := newContractManagerTester("TestRemoveSectorVirtual") if err != nil { t.Fatal(err) } defer cmt.panicClose() // Add a storage folder to the contract manager tester. storageFolderDir := filepath.Join(cmt.persistDir, "storageFolderOne") // Create the storage folder dir. err = os.MkdirAll(storageFolderDir, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderDir, modules.SectorSize*64) if err != nil { t.Fatal(err) } // Add a physical sector, then a virtual sector, and then remove the // virtual one. root, data := randSector() err = cmt.cm.AddSector(root, data) if err != nil { t.Fatal(err) } err = cmt.cm.AddSector(root, data) if err != nil { t.Fatal(err) } err = cmt.cm.RemoveSector(root) if err != nil { t.Fatal(err) } // Check that the sector was successfully added. sfs := cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("There should be one storage folder in the contract manager", len(sfs)) } if sfs[0].Capacity != sfs[0].CapacityRemaining+modules.SectorSize { t.Error("One sector's worth of capacity should be consumed:", sfs[0].Capacity, sfs[0].CapacityRemaining) } // Break the rules slightly - make the test brittle by looking at the // internals directly to determine that the sector got added to the right // locations, and that the usage information was updated correctly. if len(cmt.cm.sectorLocations) != 1 { t.Fatal("there should be one sector reported in the sectorLocations map") } if len(cmt.cm.storageFolders) != 1 { t.Fatal("storage folder not being reported correctly") } var index uint16 for _, sf := range cmt.cm.storageFolders { index = sf.index } for _, sl := range cmt.cm.sectorLocations { if sl.count != 1 { t.Error("Sector location should only be reporting one sector") } if sl.storageFolder != index { t.Error("Sector location is being reported incorrectly - wrong storage folder") } if sl.index > 64 { t.Error("sector index within storage folder also being reported incorrectly") } } // Check the usage. found := false for _, u := range cmt.cm.storageFolders[index].usage { if u != 0 { found = true break } } if !found { t.Error("usage field does not seem to have been updated") } // Try reloading the contract manager and see if all of the stateful checks // still hold. err = cmt.cm.Close() if err != nil { t.Fatal(err) } cmt.cm, err = New(filepath.Join(cmt.persistDir, modules.ContractManagerDir)) if err != nil { t.Fatal(err) } // Check that the sector was successfully added. sfs = cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("There should be one storage folder in the contract manager", len(sfs)) } if sfs[0].Capacity != sfs[0].CapacityRemaining+modules.SectorSize { t.Error("One sector's worth of capacity should be consumed:", sfs[0].Capacity, sfs[0].CapacityRemaining) } // Break the rules slightly - make the test brittle by looking at the // internals directly to determine that the sector got added to the right // locations, and that the usage information was updated correctly. if len(cmt.cm.sectorLocations) != 1 { t.Fatal("there should be one sector reported in the sectorLocations map") } if len(cmt.cm.storageFolders) != 1 { t.Fatal("storage folder not being reported correctly") } for _, sf := range cmt.cm.storageFolders { index = sf.index } for _, sl := range cmt.cm.sectorLocations { if sl.count != 1 { t.Error("Sector location should only be reporting one sector:", sl.count) } if sl.storageFolder != index { t.Error("Sector location is being reported incorrectly - wrong storage folder", sl.storageFolder, index) } if sl.index > 64 { t.Error("sector index within storage folder also being reported incorrectly") } } // Check the usage. found = false for _, u := range cmt.cm.storageFolders[index].usage { if u != 0 { found = true break } } if !found { t.Error("usage field does not seem to have been updated") } } // TestDeleteSector tries to delete a sector from the contract manager. func TestDeleteSector(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cmt, err := newContractManagerTester("TestDeleteSector") if err != nil { t.Fatal(err) } defer cmt.panicClose() // Add a storage folder to the contract manager tester. storageFolderDir := filepath.Join(cmt.persistDir, "storageFolderOne") // Create the storage folder dir. err = os.MkdirAll(storageFolderDir, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderDir, modules.SectorSize*64) if err != nil { t.Fatal(err) } // Add two sectors, and then delete one of them. root, data := randSector() err = cmt.cm.AddSector(root, data) if err != nil { t.Fatal(err) } root2, data2 := randSector() err = cmt.cm.AddSector(root2, data2) if err != nil { t.Fatal(err) } err = cmt.cm.DeleteSector(root2) if err != nil { t.Fatal(err) } // Check that the sector was successfully added. sfs := cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("There should be one storage folder in the contract manager", len(sfs)) } if sfs[0].Capacity != sfs[0].CapacityRemaining+modules.SectorSize { t.Error("One sector's worth of capacity should be consumed:", sfs[0].Capacity, sfs[0].CapacityRemaining) } // Break the rules slightly - make the test brittle by looking at the // internals directly to determine that the sector got added to the right // locations, and that the usage information was updated correctly. if len(cmt.cm.sectorLocations) != 1 { t.Fatal("there should be one sector reported in the sectorLocations map") } if len(cmt.cm.storageFolders) != 1 { t.Fatal("storage folder not being reported correctly") } var index uint16 for _, sf := range cmt.cm.storageFolders { index = sf.index } for _, sl := range cmt.cm.sectorLocations { if sl.count != 1 { t.Error("Sector location should only be reporting one sector") } if sl.storageFolder != index { t.Error("Sector location is being reported incorrectly - wrong storage folder") } if sl.index > 64 { t.Error("sector index within storage folder also being reported incorrectly") } } // Check the usage. found := false for _, u := range cmt.cm.storageFolders[index].usage { if u != 0 { found = true break } } if !found { t.Error("usage field does not seem to have been updated") } // Try reloading the contract manager and see if all of the stateful checks // still hold. err = cmt.cm.Close() if err != nil { t.Fatal(err) } cmt.cm, err = New(filepath.Join(cmt.persistDir, modules.ContractManagerDir)) if err != nil { t.Fatal(err) } // Check that the sector was successfully added. sfs = cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("There should be one storage folder in the contract manager", len(sfs)) } if sfs[0].Capacity != sfs[0].CapacityRemaining+modules.SectorSize { t.Error("One sector's worth of capacity should be consumed:", sfs[0].Capacity/modules.SectorSize, sfs[0].CapacityRemaining/modules.SectorSize) } // Break the rules slightly - make the test brittle by looking at the // internals directly to determine that the sector got added to the right // locations, and that the usage information was updated correctly. if len(cmt.cm.sectorLocations) != 1 { t.Fatal("there should be one sector reported in the sectorLocations map") } if len(cmt.cm.storageFolders) != 1 { t.Fatal("storage folder not being reported correctly") } for _, sf := range cmt.cm.storageFolders { index = sf.index } for _, sl := range cmt.cm.sectorLocations { if sl.count != 1 { t.Error("Sector location should only be reporting one sector:", sl.count) } if sl.storageFolder != index { t.Error("Sector location is being reported incorrectly - wrong storage folder", sl.storageFolder, index) } if sl.index > 64 { t.Error("sector index within storage folder also being reported incorrectly") } } // Check the usage. found = false for _, u := range cmt.cm.storageFolders[index].usage { if u != 0 { found = true break } } if !found { t.Error("usage field does not seem to have been updated") } } // TestDeleteSectorVirtual tries to delete a sector with virtual pieces from // the contract manager. func TestDeleteSectorVirtual(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cmt, err := newContractManagerTester("TestDeleteSectorVirtual") if err != nil { t.Fatal(err) } defer cmt.panicClose() // Add a storage folder to the contract manager tester. storageFolderDir := filepath.Join(cmt.persistDir, "storageFolderOne") // Create the storage folder dir. err = os.MkdirAll(storageFolderDir, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderDir, modules.SectorSize*64) if err != nil { t.Fatal(err) } // Add two sectors, and then delete one of them. root, data := randSector() err = cmt.cm.AddSector(root, data) if err != nil { t.Fatal(err) } root2, data2 := randSector() err = cmt.cm.AddSector(root2, data2) if err != nil { t.Fatal(err) } err = cmt.cm.AddSector(root2, data2) if err != nil { t.Fatal(err) } err = cmt.cm.DeleteSector(root2) if err != nil { t.Fatal(err) } // Check that the sector was successfully added. sfs := cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("There should be one storage folder in the contract manager", len(sfs)) } if sfs[0].Capacity != sfs[0].CapacityRemaining+modules.SectorSize { t.Error("One sector's worth of capacity should be consumed:", sfs[0].Capacity, sfs[0].CapacityRemaining) } // Break the rules slightly - make the test brittle by looking at the // internals directly to determine that the sector got added to the right // locations, and that the usage information was updated correctly. if len(cmt.cm.sectorLocations) != 1 { t.Fatal("there should be one sector reported in the sectorLocations map") } if len(cmt.cm.storageFolders) != 1 { t.Fatal("storage folder not being reported correctly") } var index uint16 for _, sf := range cmt.cm.storageFolders { index = sf.index } for _, sl := range cmt.cm.sectorLocations { if sl.count != 1 { t.Error("Sector location should only be reporting one sector") } if sl.storageFolder != index { t.Error("Sector location is being reported incorrectly - wrong storage folder") } if sl.index > 64 { t.Error("sector index within storage folder also being reported incorrectly") } } // Check the usage. found := false for _, u := range cmt.cm.storageFolders[index].usage { if u != 0 { found = true break } } if !found { t.Error("usage field does not seem to have been updated") } // Try reloading the contract manager and see if all of the stateful checks // still hold. err = cmt.cm.Close() if err != nil { t.Fatal(err) } cmt.cm, err = New(filepath.Join(cmt.persistDir, modules.ContractManagerDir)) if err != nil { t.Fatal(err) } // Check that the sector was successfully added. sfs = cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("There should be one storage folder in the contract manager", len(sfs)) } if sfs[0].Capacity != sfs[0].CapacityRemaining+modules.SectorSize { t.Error("One sector's worth of capacity should be consumed:", sfs[0].Capacity, sfs[0].CapacityRemaining) } // Break the rules slightly - make the test brittle by looking at the // internals directly to determine that the sector got added to the right // locations, and that the usage information was updated correctly. if len(cmt.cm.sectorLocations) != 1 { t.Fatal("there should be one sector reported in the sectorLocations map") } if len(cmt.cm.storageFolders) != 1 { t.Fatal("storage folder not being reported correctly") } for _, sf := range cmt.cm.storageFolders { index = sf.index } for _, sl := range cmt.cm.sectorLocations { if sl.count != 1 { t.Error("Sector location should only be reporting one sector:", sl.count) } if sl.storageFolder != index { t.Error("Sector location is being reported incorrectly - wrong storage folder", sl.storageFolder, index) } if sl.index > 64 { t.Error("sector index within storage folder also being reported incorrectly") } } // Check the usage. found = false for _, u := range cmt.cm.storageFolders[index].usage { if u != 0 { found = true break } } if !found { t.Error("usage field does not seem to have been updated") } } // TestSectorBalancing checks that the contract manager evenly balances sectors // between storage folders. func TestSectorBalancing(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cmt, err := newContractManagerTester("TestSectorBalancing") if err != nil { t.Fatal(err) } defer cmt.panicClose() // Add a storage folder to the contract manager tester. storageFolderDir := filepath.Join(cmt.persistDir, "storageFolderOne") err = os.MkdirAll(storageFolderDir, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderDir, modules.SectorSize*64) if err != nil { t.Fatal(err) } // Add a second storage folder. storageFolderDir2 := filepath.Join(cmt.persistDir, "storageFolderTwo") err = os.MkdirAll(storageFolderDir2, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderDir2, modules.SectorSize*64) if err != nil { t.Fatal(err) } // Add a third storage folder, twice as large. storageFolderDir3 := filepath.Join(cmt.persistDir, "storageFolderThree") err = os.MkdirAll(storageFolderDir3, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderDir3, modules.SectorSize*64*2) if err != nil { t.Fatal(err) } // Add 20 sectors. var wg sync.WaitGroup for i := 0; i < 20; i++ { wg.Add(1) go func() { defer wg.Done() err := cmt.cm.AddSector(randSector()) if err != nil { t.Fatal(err) } }() } wg.Wait() // Verify that that all 20 sectors were accepted. sfs := cmt.cm.StorageFolders() if len(sfs) != 3 { t.Fatal("There should be two storage folders in the contract manager", len(sfs)) } if sfs[0].Capacity+sfs[1].Capacity+sfs[2].Capacity != sfs[0].CapacityRemaining+sfs[1].CapacityRemaining+sfs[2].CapacityRemaining+modules.SectorSize*20 { t.Error("sectors do not appear to have been added correctly") } // Break the rules slightly - make the test brittle by looking at the // internals directly to determine that the sector got added to the right // locations, and that the usage information was updated correctly. if len(cmt.cm.sectorLocations) != 20 { t.Fatal("there should be one sector reported in the sectorLocations map") } if len(cmt.cm.storageFolders) != 3 { t.Fatal("storage folder not being reported correctly") } // Check a storage folder at random, verify that the sectors are sane. var index uint16 for _, sf := range cmt.cm.storageFolders { index = sf.index } for _, sl := range cmt.cm.sectorLocations { if sl.storageFolder != index { continue } if sl.count != 1 { t.Error("Sector location should only be reporting one sector") } if sl.index > 64*2 { t.Error("sector index within storage folder also being reported incorrectly") } } // Try reloading the contract manager and see if all of the stateful checks // still hold. err = cmt.cm.Close() if err != nil { t.Fatal(err) } cmt.cm, err = New(filepath.Join(cmt.persistDir, modules.ContractManagerDir)) if err != nil { t.Fatal(err) } // Verify that that all 20 sectors were accepted, and that they have been // distributed evenly between storage folders. sfs = cmt.cm.StorageFolders() if len(sfs) != 3 { t.Fatal("There should be two storage folders in the contract manager", len(sfs)) } if sfs[0].Capacity+sfs[1].Capacity+sfs[2].Capacity != sfs[0].CapacityRemaining+sfs[1].CapacityRemaining+sfs[2].CapacityRemaining+modules.SectorSize*20 { t.Error("sectors do not appear to have been added correctly") } // Break the rules slightly - make the test brittle by looking at the // internals directly to determine that the sector got added to the right // locations, and that the usage information was updated correctly. if len(cmt.cm.sectorLocations) != 20 { t.Fatal("there should be twenty sectors reported in the sectorLocations map:", len(cmt.cm.sectorLocations)) } if len(cmt.cm.storageFolders) != 3 { t.Fatal("storage folder not being reported correctly") } // Check a storage folder at random, verify that the sectors are sane. for _, sf := range cmt.cm.storageFolders { index = sf.index } for _, sl := range cmt.cm.sectorLocations { if sl.storageFolder != index { continue } if sl.count != 1 { t.Error("Sector location should only be reporting one sector") } if sl.index > 64*2 { t.Error("sector index within storage folder also being reported incorrectly") } } } // dependencyFailingWrites is a mocked dependency that will prevent file writes // for some files. type dependencyFailingWrites struct { productionDependencies triggered *bool mu *sync.Mutex } // failureProneFile will begin returning failures on Write for files with // names/paths containing the string "storageFolderOne" after d.triggered has // been set to "true". type failureProneFile struct { triggered *bool mu *sync.Mutex *os.File sync.Mutex } // createFile will return a file which will cause errors on Write calls if // "storageFolderOne" is in the filepath. func (d dependencyFailingWrites) createFile(s string) (file, error) { osfile, err := os.Create(s) if err != nil { return nil, err } fpf := &failureProneFile{ triggered: d.triggered, mu: d.mu, File: osfile, } return fpf, nil } // Write returns an error if the errors in the dependency have been triggered, // and if this file belongs to "storageFolderOne". func (fpf *failureProneFile) WriteAt(b []byte, offset int64) (int, error) { fpf.mu.Lock() triggered := *fpf.triggered fpf.mu.Unlock() name := fpf.Name() if triggered && strings.Contains(name, "storageFolderOne") { return 0, errors.New("storage folder is failing") } return fpf.File.WriteAt(b, offset) } // TestFailingStorageFolder checks that the contract manager can continue when // a storage folder is failing. func TestFailingStorageFolder(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() d := new(dependencyFailingWrites) d.mu = new(sync.Mutex) d.triggered = new(bool) cmt, err := newMockedContractManagerTester(d, "TestFailingStorageFolder") if err != nil { t.Fatal(err) } defer cmt.panicClose() // Add a storage folder to the contract manager tester. storageFolderDir := filepath.Join(cmt.persistDir, "storageFolderOne") err = os.MkdirAll(storageFolderDir, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderDir, modules.SectorSize*64*2) if err != nil { t.Fatal(err) } // Add a second storage folder. storageFolderDir2 := filepath.Join(cmt.persistDir, "storageFolderTwo") err = os.MkdirAll(storageFolderDir2, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderDir2, modules.SectorSize*64*2) if err != nil { t.Fatal(err) } // Add 50 sectors. var wg sync.WaitGroup for i := 0; i < 50; i++ { wg.Add(1) go func() { defer wg.Done() err := cmt.cm.AddSector(randSector()) if err != nil { t.Fatal(err) } }() } wg.Wait() // Verify that that all 20 sectors were accepted, and that they have been // distributed evenly between storage folders. sfs := cmt.cm.StorageFolders() if len(sfs) != 2 { t.Fatal("There should be two storage folders in the contract manager", len(sfs)) } if sfs[0].Capacity+sfs[1].Capacity != sfs[0].CapacityRemaining+sfs[1].CapacityRemaining+modules.SectorSize*50 { t.Error("expecting 20 sectors consumed:", sfs[0].Capacity+sfs[1].Capacity, sfs[0].CapacityRemaining+sfs[1].CapacityRemaining-modules.SectorSize*50) } // Break the rules slightly - make the test brittle by looking at the // internals directly to determine that the sector got added to the right // locations, and that the usage information was updated correctly. if len(cmt.cm.sectorLocations) != 50 { t.Fatal("there should be one sector reported in the sectorLocations map") } if len(cmt.cm.storageFolders) != 2 { t.Fatal("storage folder not being reported correctly") } // Check a storage folder at random, verify that the sectors are sane. var index uint16 for _, sf := range cmt.cm.storageFolders { index = sf.index } for _, sl := range cmt.cm.sectorLocations { if sl.storageFolder != index { continue } if sl.count != 1 { t.Error("Sector location should only be reporting one sector") } if sl.index > 128 { t.Error("sector index within storage folder also being reported incorrectly") } } // Trigger one of the storage folders to begin failing. d.mu.Lock() *d.triggered = true d.mu.Unlock() // Add 50 more sectors. for i := 0; i < 50; i++ { wg.Add(1) go func() { defer wg.Done() err := cmt.cm.AddSector(randSector()) if err != nil { t.Fatal(err) } }() } wg.Wait() // Verify that that all 20 sectors were accepted, and that they have been // added to storageFolderTwo. sfs = cmt.cm.StorageFolders() if len(sfs) != 2 { t.Fatal("There should be two storage folders in the contract manager", len(sfs)) } if strings.Contains(sfs[0].Path, "storageFolderTwo") { // sfs[0] is the working one, should have strictly more than 50 // sectors. if sfs[0].CapacityRemaining+modules.SectorSize*50 >= sfs[0].Capacity { t.Error("expecting more than 50 sectors in sfs0") } if sfs[1].CapacityRemaining+modules.SectorSize*50 <= sfs[1].Capacity { t.Error("expecting less than 50 sectors in sfs1") } if sfs[1].FailedWrites == 0 { t.Error("failed write not reported in storage folder stats") } } else { // sfs[1] is the working one, should have strictly more than 50 // sectors. if sfs[1].CapacityRemaining+modules.SectorSize*50 >= sfs[1].Capacity { t.Error("expecting more than 50 sectors in sfs1") } if sfs[0].CapacityRemaining+modules.SectorSize*50 <= sfs[0].Capacity { t.Error("expecting less than 50 sectors in sfs0") } if sfs[0].FailedWrites == 0 { t.Error("failed write not reported in storage folder stats") } } // Break the rules slightly - make the test brittle by looking at the // internals directly to determine that the sector got added to the right // locations, and that the usage information was updated correctly. if len(cmt.cm.sectorLocations) != 100 { t.Fatal("there should be one sector reported in the sectorLocations map") } if len(cmt.cm.storageFolders) != 2 { t.Fatal("storage folder not being reported correctly") } // Check a storage folder at random, verify that the sectors are sane. for _, sf := range cmt.cm.storageFolders { index = sf.index } for _, sl := range cmt.cm.sectorLocations { if sl.storageFolder != index { continue } if sl.count != 1 { t.Error("Sector location should only be reporting one sector") } if sl.index > 128 { t.Error("sector index within storage folder also being reported incorrectly") } } // Try reloading the contract manager and see if all of the stateful checks // still hold. err = cmt.cm.Close() if err != nil { t.Fatal(err) } cmt.cm, err = New(filepath.Join(cmt.persistDir, modules.ContractManagerDir)) if err != nil { t.Fatal(err) } // Verify that that all 20 sectors were accepted, and that they have been // added to storageFolderTwo. sfs = cmt.cm.StorageFolders() if len(sfs) != 2 { t.Fatal("There should be two storage folders in the contract manager", len(sfs)) } if strings.Contains(sfs[0].Path, "storageFolderTwo") { // sfs[0] is the working one, should have strictly more than 50 // sectors. if sfs[0].CapacityRemaining+modules.SectorSize*50 >= sfs[0].Capacity { t.Error("expecting more than 50 sectors in sfs0") } if sfs[1].CapacityRemaining+modules.SectorSize*50 <= sfs[1].Capacity { t.Error("expecting less than 50 sectors in sfs1") } } else { // sfs[1] is the working one, should have strictly more than 50 // sectors. if sfs[1].CapacityRemaining+modules.SectorSize*50 >= sfs[1].Capacity { t.Error("expecting more than 50 sectors in sfs1") } if sfs[0].CapacityRemaining+modules.SectorSize*50 <= sfs[0].Capacity { t.Error("expecting less than 50 sectors in sfs0") } } // Break the rules slightly - make the test brittle by looking at the // internals directly to determine that the sector got added to the right // locations, and that the usage information was updated correctly. if len(cmt.cm.sectorLocations) != 100 { t.Fatal("there should be one sector reported in the sectorLocations map") } if len(cmt.cm.storageFolders) != 2 { t.Fatal("storage folder not being reported correctly") } // Check a storage folder at random, verify that the sectors are sane. for _, sf := range cmt.cm.storageFolders { index = sf.index } for _, sl := range cmt.cm.sectorLocations { if sl.storageFolder != index { continue } if sl.count != 1 { t.Error("Sector location should only be reporting one sector") } if sl.index > 128 { t.Error("sector index within storage folder also being reported incorrectly") } } } Sia-1.3.0/modules/host/contractmanager/storagefolder.go000066400000000000000000000354031313565667000232340ustar00rootroot00000000000000package contractmanager import ( "errors" "fmt" "math" "os" "path/filepath" "sync/atomic" "time" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/sync" "github.com/NebulousLabs/fastrand" ) var ( // errBadStorageFolderIndex is returned if a storage folder is requested // that does not have the correct index. errBadStorageFolderIndex = errors.New("no storage folder exists at that index") // errIncompleteOffload is returned when the host is tasked with offloading // sectors from a storage folder but is unable to offload the requested // number - but is able to offload some of them. errIncompleteOffload = errors.New("could not successfully offload specified number of sectors from storage folder") // errInsufficientRemainingStorageForRemoval is returned if the remaining // storage folders do not have enough space remaining to support being // removed. errInsufficientRemainingStorageForRemoval = errors.New("not enough storage remaining to support removal of disk") // errInsufficientRemainingStorageForShrink is returned if the remaining // storage folders do not have enough space remaining to support being // reduced in size. errInsufficientRemainingStorageForShrink = errors.New("not enough storage remaining to support shrinking of disk") // ErrLargeStorageFolder is returned if a new storage folder or a resized // storage folder would exceed the maximum allowed size. ErrLargeStorageFolder = fmt.Errorf("maximum allowed size for a storage folder is %v bytes", MaximumSectorsPerStorageFolder*modules.SectorSize) // errMaxStorageFolders indicates that the limit on the number of allowed // storage folders has been reached. errMaxStorageFolders = fmt.Errorf("host can only accept up to %v storage folders", maximumStorageFolders) // errNoFreeSectors is returned if there are no free sectors in the usage // array fed to randFreeSector. This error should never be returned, as the // contract manager should have sufficient internal consistency to know in // advance that there are no free sectors. errNoFreeSectors = errors.New("could not find a free sector in the usage array") // ErrNoResize is returned if a new size is provided for a storage folder // that is the same as the current size of the storage folder. ErrNoResize = errors.New("storage folder selected for resize, but new size is same as current size") // ErrRepeatFolder is returned if a storage folder is added which links to // a path that is already in use by another storage folder. Only exact path // matches will trigger the error. ErrRepeatFolder = errors.New("selected path is already in use as a storage folder, please use 'resize'") // ErrSmallStorageFolder is returned if a new storage folder is not large // enough to meet the requirements for the minimum storage folder size. ErrSmallStorageFolder = fmt.Errorf("minimum allowed size for a storage folder is %v bytes", MinimumSectorsPerStorageFolder*modules.SectorSize) // errStorageFolderGranularity is returned if a call to AddStorageFolder // tries to use a storage folder size that does not evenly fit into a // factor of 8 sectors. errStorageFolderGranularity = fmt.Errorf("storage folder must be a factor of %v sectors", storageFolderGranularity) // errStorageFolderNotFolder is returned if a storage folder gets added // that is not a folder. errStorageFolderNotFolder = errors.New("must use an existing folder") // errStorageFolderNotFound is returned if a storage folder cannot be // found. errStorageFolderNotFound = errors.New("could not find storage folder with that id") // errRelativePath is returned if a path must be absolute. errRelativePath = errors.New("storage folder paths must be absolute") ) // storageFolder contains the metadata for a storage folder, including where // sectors are being stored in the folder. What sectors are being stored is // managed by the contract manager's sectorLocations map. type storageFolder struct { // Progress statistics that can be reported to the user. Typically for long // running actions like adding or resizing a storage folder. atomicProgressNumerator uint64 atomicProgressDenominator uint64 // Disk statistics for this boot cycle. atomicFailedReads uint64 atomicFailedWrites uint64 atomicSuccessfulReads uint64 atomicSuccessfulWrites uint64 // Atomic bool indicating whether or not the storage folder is available. If // the storage folder is not available, it will still be loaded but return // an error if it is queried. atomicUnavailable uint64 // uint64 for alignment // The index, path, and usage are all saved directly to disk. index uint16 path string usage []uint64 // availableSectors indicates sectors which are marked as consumed in the // usage field but are actually available. They cannot be marked as free in // the usage until the action which freed them has synced to disk, but the // settings should mark them as free during syncing. // // sectors is a count of the number of sectors in use according to the // usage field. availableSectors map[sectorID]uint32 sectors uint64 // mu needs to be RLocked to safetly write new sectors into the storage // folder. mu needs to be Locked when the folder is being added, removed, // or resized. mu sync.TryRWMutex // An open file handle is kept so that writes can easily be made to the // storage folder without needing to grab a new file handle. This also // makes it easy to do delayed-syncing. metadataFile file sectorFile file } // mostSignificantBit returns the index of the most significant bit of an input // value. func mostSignificantBit(i uint64) uint64 { if i == 0 { panic("no bits set in input") } bval := []uint64{0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7} r := uint64(0) if i&0xffffffff00000000 != 0 { r += 32 i = i >> 32 } if i&0x00000000ffff0000 != 0 { r += 16 i = i >> 16 } if i&0x000000000000ff00 != 0 { r += 8 i = i >> 8 } if i&0x00000000000000f0 != 0 { r += 4 i = i >> 4 } return r + bval[i] } // randFreeSector will take a usage array and find a random free sector within // the usage array. The uint32 indicates the index of the sector within the // usage array. func randFreeSector(usage []uint64) (uint32, error) { // Pick a random starting location. Scanning the sector in a short amount // of time requires starting from a random place. start := fastrand.Intn(len(usage)) // Find the first element of the array that is not completely full. var i int for i = start; i < len(usage); i++ { if usage[i] != math.MaxUint64 { break } } // If nothing was found by the end of the array, a wraparound is needed. if i == len(usage) { for i = 0; i < start; i++ { if usage[i] != math.MaxUint64 { break } } // Return an error if no empty sectors were found. if i == start { return 0, errNoFreeSectors } } // Get the most significant zero. This is achieved by performing a 'most // significant bit' on the XOR of the actual value. Return the index of the // sector that has been selected. msz := mostSignificantBit(^usage[i]) return uint32((uint64(i) * 64) + msz), nil } // usageSectors takes a storage folder usage array and returns a list of active // sectors in that usage array by their index. func usageSectors(usage []uint64) (usageSectors []uint32) { // Iterate through the usage elements. for i, u := range usage { // Each usage element corresponds to storageFolderGranularity sectors. // Iterate through them and append the ones that are present. for j := uint64(0); j < storageFolderGranularity; j++ { uMask := uint64(1) << j if u&uMask == uMask { usageSectors = append(usageSectors, uint32(i)*storageFolderGranularity+uint32(j)) } } } return usageSectors } // vacancyStorageFolder takes a set of storage folders and returns a storage // folder with vacancy for a sector along with its index. 'nil' and '-1' are // returned if none of the storage folders are available to accept a sector. // The returned storage folder will be holding an RLock on its mutex. func vacancyStorageFolder(sfs []*storageFolder) (*storageFolder, int) { enoughRoom := false var winningIndex int // Go through the folders in random order. for _, index := range fastrand.Perm(len(sfs)) { sf := sfs[index] // Skip past this storage folder if there is not enough room for at // least one sector. if sf.sectors >= uint64(len(sf.usage))*storageFolderGranularity { continue } // Skip past this storage folder if it's not available to receive new // data. if !sf.mu.TryRLock() { continue } // Select this storage folder. enoughRoom = true winningIndex = index break } if !enoughRoom { return nil, -1 } return sfs[winningIndex], winningIndex } // clearUsage will unset the usage bit at the provided sector index for this // storage folder. func (sf *storageFolder) clearUsage(sectorIndex uint32) { usageElement := sf.usage[sectorIndex/storageFolderGranularity] bitIndex := sectorIndex % storageFolderGranularity usageElementUpdated := usageElement & (^(1 << bitIndex)) if usageElementUpdated != usageElement { sf.sectors-- sf.usage[sectorIndex/storageFolderGranularity] = usageElementUpdated } } // setUsage will set the usage bit at the provided sector index for this // storage folder. func (sf *storageFolder) setUsage(sectorIndex uint32) { usageElement := sf.usage[sectorIndex/storageFolderGranularity] bitIndex := sectorIndex % storageFolderGranularity usageElementUpdated := usageElement | (1 << bitIndex) if usageElementUpdated != usageElement { sf.sectors++ sf.usage[sectorIndex/storageFolderGranularity] = usageElementUpdated } } // availableStorageFolders returns the contract manager's storage folders as a // slice, excluding any unavailable storeage folders. func (cm *ContractManager) availableStorageFolders() []*storageFolder { sfs := make([]*storageFolder, 0) for _, sf := range cm.storageFolders { // Skip unavailable storage folders. if atomic.LoadUint64(&sf.atomicUnavailable) == 1 { continue } sfs = append(sfs, sf) } return sfs } // threadedFolderRecheck checks the unavailable storage folders and looks to see // if they have been mounted or restored by the user. func (cm *ContractManager) threadedFolderRecheck() { // Don't spawn the loop if 'noRecheck' disruption is set. if cm.dependencies.disrupt("noRecheck") { return } sleepTime := folderRecheckInitialInterval for { // Check for shutdown. select { case <-cm.tg.StopChan(): return case <-time.After(sleepTime): } // Check all of the storage folders and recover any that have been added // to the contract manager. cm.wal.mu.Lock() for _, sf := range cm.storageFolders { if atomic.LoadUint64(&sf.atomicUnavailable) == 1 { var err1, err2 error sf.metadataFile, err1 = cm.dependencies.openFile(filepath.Join(sf.path, metadataFile), os.O_RDWR, 0700) sf.sectorFile, err2 = cm.dependencies.openFile(filepath.Join(sf.path, sectorFile), os.O_RDWR, 0700) if err1 == nil && err2 == nil { // The storage folder has been found, and loading can be // completed. cm.loadSectorLocations(sf) } else { // One of the opens failed, close the file handle for the // opens that did not fail. if err1 == nil { sf.metadataFile.Close() } if err2 == nil { sf.sectorFile.Close() } } } } cm.wal.mu.Unlock() // Increase the sleep time. if sleepTime*2 < maxFolderRecheckInterval { sleepTime *= 2 } } } // ResetStorageFolderHealth will reset the read and write statistics for the // input storage folder. func (cm *ContractManager) ResetStorageFolderHealth(index uint16) error { err := cm.tg.Add() if err != nil { return err } defer cm.tg.Done() cm.wal.mu.Lock() defer cm.wal.mu.Unlock() sf, exists := cm.storageFolders[index] if !exists { return errStorageFolderNotFound } atomic.StoreUint64(&sf.atomicFailedReads, 0) atomic.StoreUint64(&sf.atomicFailedWrites, 0) atomic.StoreUint64(&sf.atomicSuccessfulReads, 0) atomic.StoreUint64(&sf.atomicSuccessfulWrites, 0) return nil } // ResizeStorageFolder will resize a storage folder, moving sectors as // necessary. The resize operation will stop and return an error if any of the // sector move operations fail. If the force flag is set to true, the resize // operation will continue through failures, meaning that data will be lost. func (cm *ContractManager) ResizeStorageFolder(index uint16, newSize uint64, force bool) error { err := cm.tg.Add() if err != nil { return err } defer cm.tg.Done() cm.wal.mu.Lock() sf, exists := cm.storageFolders[index] cm.wal.mu.Unlock() if !exists || atomic.LoadUint64(&sf.atomicUnavailable) == 1 { return errStorageFolderNotFound } if newSize/modules.SectorSize < MinimumSectorsPerStorageFolder { return ErrSmallStorageFolder } if newSize/modules.SectorSize > MaximumSectorsPerStorageFolder { return ErrLargeStorageFolder } oldSize := uint64(len(sf.usage)) * storageFolderGranularity * modules.SectorSize if oldSize == newSize { return ErrNoResize } newSectorCount := uint32(newSize / modules.SectorSize) if oldSize > newSize { return cm.wal.shrinkStorageFolder(index, newSectorCount, force) } return cm.wal.growStorageFolder(index, newSectorCount) } // StorageFolders will return a list of storage folders in the host, each // containing information about the storage folder and any operations currently // being executed on the storage folder. func (cm *ContractManager) StorageFolders() []modules.StorageFolderMetadata { err := cm.tg.Add() if err != nil { return nil } defer cm.tg.Done() cm.wal.mu.Lock() defer cm.wal.mu.Unlock() // Iterate over the storage folders that are in memory first, and then // suppliment them with the storage folders that are not in memory. var smfs []modules.StorageFolderMetadata for _, sf := range cm.storageFolders { // Grab the non-computational data. sfm := modules.StorageFolderMetadata{ ProgressNumerator: atomic.LoadUint64(&sf.atomicProgressNumerator), ProgressDenominator: atomic.LoadUint64(&sf.atomicProgressDenominator), FailedReads: atomic.LoadUint64(&sf.atomicFailedReads), FailedWrites: atomic.LoadUint64(&sf.atomicFailedWrites), SuccessfulReads: atomic.LoadUint64(&sf.atomicSuccessfulReads), SuccessfulWrites: atomic.LoadUint64(&sf.atomicSuccessfulWrites), Capacity: modules.SectorSize * 64 * uint64(len(sf.usage)), CapacityRemaining: ((64 * uint64(len(sf.usage))) - sf.sectors) * modules.SectorSize, Index: sf.index, Path: sf.path, } // Set some of the values to extreme numbers if the storage folder is // unavailable, to flag the user's attention. if atomic.LoadUint64(&sf.atomicUnavailable) == 1 { sfm.FailedReads = 9999999999 sfm.FailedWrites = 9999999999 } // Add this storage folder to the list of storage folders. smfs = append(smfs, sfm) } return smfs } Sia-1.3.0/modules/host/contractmanager/storagefolderadd.go000066400000000000000000000304351313565667000237050ustar00rootroot00000000000000package contractmanager import ( "os" "path/filepath" "sync" "sync/atomic" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/fastrand" ) // findUnfinishedStorageFolderAdditions will scroll through a set of state // changes and figure out which of the unfinished storage folder additions are // still unfinished. func findUnfinishedStorageFolderAdditions(scs []stateChange) []savedStorageFolder { // Use a map to figure out what unfinished storage folders exist and use it // to remove the ones that have terminated. usfMap := make(map[uint16]savedStorageFolder) for _, sc := range scs { for _, sf := range sc.UnfinishedStorageFolderAdditions { usfMap[sf.Index] = sf } for _, sf := range sc.StorageFolderAdditions { delete(usfMap, sf.Index) } for _, index := range sc.ErroredStorageFolderAdditions { delete(usfMap, index) } for _, sfr := range sc.StorageFolderRemovals { delete(usfMap, sfr.Index) } } // Return the active unifinished storage folders as a slice. var sfs []savedStorageFolder for _, sf := range usfMap { sfs = append(sfs, sf) } return sfs } // cleanupUnfinishedStorageFolderAdditions will purge any unfinished storage // folder additions from the previous run. func (wal *writeAheadLog) cleanupUnfinishedStorageFolderAdditions(scs []stateChange) { usfs := findUnfinishedStorageFolderAdditions(scs) for _, usf := range usfs { sf, exists := wal.cm.storageFolders[usf.Index] if exists && atomic.LoadUint64(&sf.atomicUnavailable) == 0 { // Close the storage folder file handles. err := sf.metadataFile.Close() if err != nil { wal.cm.log.Println("Unable to close metadata file for storage folder", sf.path) } err = sf.sectorFile.Close() if err != nil { wal.cm.log.Println("Unable to close sector file for storage folder", sf.path) } // Delete the storage folder from the storage folders map. delete(wal.cm.storageFolders, sf.index) } // Remove any leftover files. sectorLookupName := filepath.Join(usf.Path, metadataFile) sectorHousingName := filepath.Join(usf.Path, sectorFile) err := wal.cm.dependencies.removeFile(sectorLookupName) if err != nil { wal.cm.log.Println("Unable to remove documented sector metadata lookup:", sectorLookupName, err) } err = wal.cm.dependencies.removeFile(sectorHousingName) if err != nil { wal.cm.log.Println("Unable to remove documented sector housing:", sectorHousingName, err) } // Append an error call to the changeset, indicating that the storage // folder add was not completed successfully. wal.appendChange(stateChange{ ErroredStorageFolderAdditions: []uint16{usf.Index}, }) } } // managedAddStorageFolder will add a storage folder to the contract manager. // The parent function, contractmanager.AddStorageFolder, has already performed // any error checking that can be performed without accessing the contract // manager state. // // managedAddStorageFolder can take a long time, as it writes a giant, zeroed // out file to disk covering the entire range of the storage folder, and // failure can occur late in the operation. The WAL is notified that a long // running operation is in progress, so that any changes to disk can be // reverted in the event of unclean shutdown. func (wal *writeAheadLog) managedAddStorageFolder(sf *storageFolder) error { // Lock the storage folder for the duration of the function. sf.mu.Lock() defer sf.mu.Unlock() numSectors := uint64(len(sf.usage)) * 64 sectorLookupSize := numSectors * sectorMetadataDiskSize sectorHousingSize := numSectors * modules.SectorSize totalSize := sectorLookupSize + sectorHousingSize sectorLookupName := filepath.Join(sf.path, metadataFile) sectorHousingName := filepath.Join(sf.path, sectorFile) // Update the uncommitted state to include the storage folder, returning an // error if any checks fail. var syncChan chan struct{} err := func() error { wal.mu.Lock() defer wal.mu.Unlock() // Check that the storage folder is not a duplicate. That requires // first checking the contract manager and then checking the WAL. The // number of storage folders are also counted, to make sure that the // maximum number of storage folders allowed is not exceeded. for _, csf := range wal.cm.storageFolders { // The conflicting storage folder may e in the process of being // removed, however we refuse to add a replacement storage folder // until the existing one has been removed entirely. if sf.path == csf.path { return ErrRepeatFolder } } // Check that there is room for another storage folder. if uint64(len(wal.cm.storageFolders)) > maximumStorageFolders { return errMaxStorageFolders } // Determine the index of the storage folder by scanning for an empty // spot in the folderLocations map. A random starting place is chosen // to keep good average and worst-case runtime. var iterator int index := uint16(fastrand.Intn(65536)) for iterator = 0; iterator < 65536; iterator++ { // check the list of unique folders we created earlier. _, exists := wal.cm.storageFolders[index] if !exists { break } index++ } if iterator == 65536 { wal.cm.log.Critical("Previous check indicated that there was room to add another storage folder, but folderLocations set is full.") return errMaxStorageFolders } // Assign the empty index to the storage folder. sf.index = index // Create the files that get used with the storage folder. var err error sf.metadataFile, err = wal.cm.dependencies.createFile(sectorLookupName) if err != nil { return build.ExtendErr("could not create storage folder file", err) } sf.sectorFile, err = wal.cm.dependencies.createFile(sectorHousingName) if err != nil { err = build.ComposeErrors(err, sf.metadataFile.Close()) err = build.ComposeErrors(err, wal.cm.dependencies.removeFile(sectorLookupName)) return build.ExtendErr("could not create storage folder file", err) } // Establish the progress fields for the add operation in the storage // folder. atomic.StoreUint64(&sf.atomicProgressDenominator, totalSize) // Add the storage folder to the list of storage folders. wal.cm.storageFolders[index] = sf // Add the storage folder to the list of unfinished storage folder // additions. There should be no chance of error between this append // operation and the completed commitment to the unfinished storage // folder addition (signaled by `<-syncChan` a few lines down). wal.appendChange(stateChange{ UnfinishedStorageFolderAdditions: []savedStorageFolder{sf.savedStorageFolder()}, }) // Grab the sync channel so we know when the unfinished storage folder // addition has been committed to on disk. syncChan = wal.syncChan return nil }() if err != nil { return err } // Block until the commitment to the unfinished storage folder addition is // complete. <-syncChan // Simulate a disk failure at this point. if wal.cm.dependencies.disrupt("storageFolderAddFinish") { return nil } // If there's an error in the rest of the function, the storage folder // needs to be removed from the list of unfinished storage folder // additions. Because the WAL is append-only, a stateChange needs to be // appended which indicates that the storage folder was unable to be added // successfully. defer func(sf *storageFolder) { if err != nil { wal.mu.Lock() defer wal.mu.Unlock() // Delete the storage folder from the storage folders map. delete(wal.cm.storageFolders, sf.index) // Remove the leftover files from the failed operation. err = build.ComposeErrors(err, sf.sectorFile.Close()) err = build.ComposeErrors(err, sf.metadataFile.Close()) err = build.ComposeErrors(err, wal.cm.dependencies.removeFile(sectorLookupName)) err = build.ComposeErrors(err, wal.cm.dependencies.removeFile(sectorHousingName)) // Signal in the WAL that the unfinished storage folder addition // has failed. wal.appendChange(stateChange{ ErroredStorageFolderAdditions: []uint16{sf.index}, }) } }(sf) // Allocate the files on disk for the storage folder. stepCount := sectorHousingSize / folderAllocationStepSize for i := uint64(0); i < stepCount; i++ { err = sf.sectorFile.Truncate(int64(folderAllocationStepSize * (i + 1))) if err != nil { return build.ExtendErr("could not allocate storage folder", err) } // After each iteration, update the progress numerator. atomic.AddUint64(&sf.atomicProgressNumerator, folderAllocationStepSize) } err = sf.sectorFile.Truncate(int64(sectorHousingSize)) if err != nil { return build.ExtendErr("could not allocate sector data file", err) } // Write the metadata file. err = sf.metadataFile.Truncate(int64(sectorLookupSize)) if err != nil { return build.ExtendErr("could not allocate sector metadata file", err) } // The file creation process is essentially complete at this point, report // complete progress. atomic.StoreUint64(&sf.atomicProgressNumerator, totalSize) // Sync the files. var wg sync.WaitGroup wg.Add(2) go func() { defer wg.Done() err := sf.metadataFile.Sync() if err != nil { wal.cm.log.Println("could not synchronize allocated sector metadata file:", err) } }() go func() { defer wg.Done() err := sf.sectorFile.Sync() if err != nil { wal.cm.log.Println("could not synchronize allocated sector data file:", err) } }() wg.Wait() // TODO: Sync the directory as well (directory data changed as new files // were added) // Simulate power failure at this point for some testing scenarios. if wal.cm.dependencies.disrupt("incompleteAddStorageFolder") { return nil } // Storage folder addition has completed successfully, commit the addition // through the WAL. wal.mu.Lock() wal.cm.storageFolders[sf.index] = sf wal.appendChange(stateChange{ StorageFolderAdditions: []savedStorageFolder{sf.savedStorageFolder()}, }) syncChan = wal.syncChan wal.mu.Unlock() // Wait to confirm the storage folder addition has completed until the WAL // entry has synced. <-syncChan // Set the progress back to '0'. atomic.StoreUint64(&sf.atomicProgressNumerator, 0) atomic.StoreUint64(&sf.atomicProgressDenominator, 0) return nil } // commitAddStorageFolder integrates a pending AddStorageFolder call into the // state. commitAddStorageFolder should only be called during WAL recovery. func (wal *writeAheadLog) commitAddStorageFolder(ssf savedStorageFolder) { sf, exists := wal.cm.storageFolders[ssf.Index] if exists { if sf.metadataFile != nil { sf.metadataFile.Close() } if sf.sectorFile != nil { sf.sectorFile.Close() } } sf = &storageFolder{ index: ssf.Index, path: ssf.Path, usage: ssf.Usage, availableSectors: make(map[sectorID]uint32), } var err error sf.metadataFile, err = wal.cm.dependencies.openFile(filepath.Join(sf.path, metadataFile), os.O_RDWR, 0700) if err != nil { wal.cm.log.Println("Difficulties opening sector file for ", sf.path, ":", err) return } sf.sectorFile, err = wal.cm.dependencies.openFile(filepath.Join(sf.path, sectorFile), os.O_RDWR, 0700) if err != nil { wal.cm.log.Println("Difficulties opening sector metadata file for", sf.path, ":", err) sf.metadataFile.Close() return } wal.cm.storageFolders[sf.index] = sf } // AddStorageFolder adds a storage folder to the contract manager. func (cm *ContractManager) AddStorageFolder(path string, size uint64) error { err := cm.tg.Add() if err != nil { return err } defer cm.tg.Done() // Check that the storage folder being added meets the size requirements. sectors := size / modules.SectorSize if sectors > MaximumSectorsPerStorageFolder { return ErrLargeStorageFolder } if sectors < MinimumSectorsPerStorageFolder { return ErrSmallStorageFolder } if sectors%storageFolderGranularity != 0 { return errStorageFolderGranularity } // Check that the path is an absolute path. if !filepath.IsAbs(path) { return errRelativePath } // Check that the folder being linked to both exists and is a folder. pathInfo, err := os.Stat(path) if err != nil { return err } if !pathInfo.Mode().IsDir() { return errStorageFolderNotFolder } // Create a storage folder object and add it to the WAL. newSF := &storageFolder{ path: path, usage: make([]uint64, sectors/64), availableSectors: make(map[sectorID]uint32), } err = cm.wal.managedAddStorageFolder(newSF) if err != nil { cm.log.Println("Call to AddStorageFolder has failed:", err) return err } return nil } Sia-1.3.0/modules/host/contractmanager/storagefolderadd_test.go000066400000000000000000000653361313565667000247540ustar00rootroot00000000000000package contractmanager import ( "errors" "io/ioutil" "os" "path/filepath" "strings" "sync" "testing" "time" "github.com/NebulousLabs/Sia/modules" ) // TestAddStorageFolder tries to add a storage folder to the contract manager, // blocking until the add has completed. func TestAddStorageFolder(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cmt, err := newContractManagerTester("TestAddStorageFolder") if err != nil { t.Fatal(err) } defer cmt.panicClose() // Add a storage folder to the contract manager tester. storageFolderDir := filepath.Join(cmt.persistDir, "storageFolderOne") // Create the storage folder dir. err = os.MkdirAll(storageFolderDir, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderDir, modules.SectorSize*storageFolderGranularity*2) if err != nil { t.Fatal(err) } // Check that the storage folder has been added. sfs := cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("There should be one storage folder reported") } // Check that the storage folder has the right path and size. if sfs[0].Path != storageFolderDir { t.Error("storage folder reported with wrong path") } if sfs[0].Capacity != modules.SectorSize*storageFolderGranularity*2 { t.Error("storage folder reported with wrong sector size") } } // dependencyLargeFolder is a mocked dependency that will return files which // can only handle 1 MiB of data being written to them. type dependencyLargeFolder struct { productionDependencies } // limitFile will return an error if a call to Write is made that will put the // total throughput of the file over 1 MiB. type limitFile struct { throughput int64 mu sync.Mutex *os.File sync.Mutex } // createFile will return a file that will return an error if a write will put // the total throughput of the file over 1 MiB. func (dependencyLargeFolder) createFile(s string) (file, error) { osFile, err := os.Create(s) if err != nil { return nil, err } lf := &limitFile{ File: osFile, } return lf, nil } // Truncate returns an error if the operation will put the total throughput of // the file over 8 MiB. func (l *limitFile) Truncate(offset int64) error { l.mu.Lock() defer l.mu.Unlock() // If the limit has already been reached, return an error. if l.throughput >= 1<<20 { return errors.New("limitFile throughput limit reached earlier") } fi, err := l.Stat() if err != nil { return errors.New("limitFile could not fetch fileinfo: " + err.Error()) } // No throughput if file is shrinking. if fi.Size() > offset { return l.File.Truncate(offset) } writeSize := offset - fi.Size() // If the limit has not been reached, pass the call through to the // underlying file. Limit counting is a little wonky because we assume the // file being passed in has currently a size of zero. if l.throughput+writeSize <= 1<<20 { l.throughput += writeSize return l.File.Truncate(offset) } // If the limit has been reached, return an error. return errors.New("limitFile throughput limit reached before all input was written to disk") } // TestAddLargeStorageFolder tries to add a storage folder that is too large to // fit on disk. This is represented by mocking a file that returns an error // after more than 8 MiB have been written. func TestAddLargeStorageFolder(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() d := new(dependencyLargeFolder) cmt, err := newMockedContractManagerTester(d, "TestAddLargeStorageFolder") if err != nil { t.Fatal(err) } defer cmt.panicClose() // Add a storage folder to the contract manager tester. storageFolderDir := filepath.Join(cmt.persistDir, "storageFolderOne") // Create the storage folder dir. err = os.MkdirAll(storageFolderDir, 0700) if err != nil { t.Fatal(err) } addErr := cmt.cm.AddStorageFolder(storageFolderDir, modules.SectorSize*storageFolderGranularity*16) // Total size must exceed the limit of the limitFile. // Should be a storage folder error, but with all the context adding, I'm // not sure how to check the error type. if addErr == nil { t.Fatal(err) } // Check that the storage folder has been added. sfs := cmt.cm.StorageFolders() if len(sfs) != 0 { t.Fatal("Storage folder add should have failed.") } // Check that the storage folder is empty - because the operation failed, // any files that got created should have been removed. files, err := ioutil.ReadDir(storageFolderDir) if err != nil { t.Fatal(err) } if len(files) != 0 { t.Log(addErr) t.Error("there should not be any files in the storage folder because the AddStorageFolder operation failed.") t.Error(len(files)) for _, file := range files { t.Error(file.Name()) } } } // TestAddStorageFolderConcurrent adds multiple storage folders concurrently to // the contract manager. func TestAddStorageFolderConcurrent(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cmt, err := newContractManagerTester("TestAddStorageFolderConcurrent") if err != nil { t.Fatal(err) } defer cmt.panicClose() // Add a storage folder to the contract manager tester. storageFolderOne := filepath.Join(cmt.persistDir, "storageFolderOne") storageFolderTwo := filepath.Join(cmt.persistDir, "storageFolderTwo") storageFolderThree := filepath.Join(cmt.persistDir, "storageFolderThree") // Create the storage folder dir. err = os.MkdirAll(storageFolderOne, 0700) if err != nil { t.Fatal(err) } err = os.MkdirAll(storageFolderTwo, 0700) if err != nil { t.Fatal(err) } err = os.MkdirAll(storageFolderThree, 0700) if err != nil { t.Fatal(err) } // Launch three calls to add simultaneously and wait for all three to // finish. var wg sync.WaitGroup wg.Add(3) go func() { defer wg.Done() err := cmt.cm.AddStorageFolder(storageFolderOne, modules.SectorSize*storageFolderGranularity*8) if err != nil { t.Fatal(err) } }() go func() { defer wg.Done() err := cmt.cm.AddStorageFolder(storageFolderTwo, modules.SectorSize*storageFolderGranularity*8) if err != nil { t.Fatal(err) } }() go func() { defer wg.Done() err = cmt.cm.AddStorageFolder(storageFolderThree, modules.SectorSize*storageFolderGranularity*8) if err != nil { t.Fatal(err) } }() wg.Wait() // Check that the storage folder has been added. sfs := cmt.cm.StorageFolders() if len(sfs) != 3 { t.Fatal("There should be one storage folder reported") } } // dependencyBlockSFOne is a mocked dependency for os.Create that will return a // file for storage folder one only which will block on a call to file.Truncate // until a signal has been given that the block can be released. type dependencyBlockSFOne struct { blockLifted chan struct{} writeCalled chan struct{} productionDependencies } // blockedFile is the file that gets returned by dependencyBlockSFOne to // storageFolderOne. type blockedFile struct { blockLifted chan struct{} writeCalled chan struct{} *os.File sync.Mutex } // Truncate will block until a signal is given that the block may be lifted. // Truncate will signal when it has been called for the first time, so that the // tester knows the function has reached a blocking point. func (bf *blockedFile) Truncate(offset int64) error { if !strings.Contains(bf.File.Name(), "storageFolderOne") || strings.Contains(bf.File.Name(), "siahostmetadata.dat") { return bf.File.Truncate(offset) } close(bf.writeCalled) <-bf.blockLifted return bf.File.Truncate(offset) } // createFile will return a normal file to all callers except for // storageFolderOne, which will have calls to file.Write blocked until a signal // is given that the blocks may be released. func (d *dependencyBlockSFOne) createFile(s string) (file, error) { // If storageFolderOne, return a file that will not write until the signal // is sent that writing is okay. if strings.Contains(s, "storageFolderOne") { file, err := os.Create(s) if err != nil { return nil, err } bf := &blockedFile{ blockLifted: d.blockLifted, writeCalled: d.writeCalled, File: file, } return bf, nil } // If not storageFolderOne, return a normal file. return os.Create(s) } // TestAddStorageFolderBlocking adds multiple storage folders concurrently to // the contract manager, blocking on the first one to make sure that the others // are still allowed to complete. func TestAddStorageFolderBlocking(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() // Create the mocked dependencies that will block for the first storage // folder. d := &dependencyBlockSFOne{ blockLifted: make(chan struct{}), writeCalled: make(chan struct{}), } // Create a contract manager tester with the mocked dependencies. cmt, err := newMockedContractManagerTester(d, "TestAddStorageFolderBlocking") if err != nil { t.Fatal(err) } defer cmt.panicClose() // Add a storage folder to the contract manager tester. storageFolderOne := filepath.Join(cmt.persistDir, "storageFolderOne") storageFolderTwo := filepath.Join(cmt.persistDir, "storageFolderTwo") storageFolderThree := filepath.Join(cmt.persistDir, "storageFolderThree") // Create the storage folder dir. err = os.MkdirAll(storageFolderOne, 0700) if err != nil { t.Fatal(err) } err = os.MkdirAll(storageFolderTwo, 0700) if err != nil { t.Fatal(err) } err = os.MkdirAll(storageFolderThree, 0700) if err != nil { t.Fatal(err) } // Spin off the first goroutine, and then wait until write has been called // on the underlying file. sfOneSize := modules.SectorSize * storageFolderGranularity * 8 go func() { err := cmt.cm.AddStorageFolder(storageFolderOne, sfOneSize) if err != nil { t.Fatal(err) } }() select { case <-time.After(time.Second * 5): t.Fatal("storage folder not written out") case <-d.writeCalled: } // Check the status of the storage folder. At this point, the folder should // be returned as an unfinished storage folder addition, with progress // indicating that the storage folder is at 0 bytes progressed out of // sfOneSize. sfs := cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("there should be one storage folder reported") } if sfs[0].ProgressNumerator != 0 { t.Error("storage folder is showing progress despite being blocked") } if sfs[0].ProgressDenominator != sfOneSize+sectorMetadataDiskSize*storageFolderGranularity*8 { t.Error("storage folder is not showing that an action is in progress, though one is", sfs[0].ProgressDenominator, sfOneSize) } var wg sync.WaitGroup wg.Add(2) go func() { defer wg.Done() err := cmt.cm.AddStorageFolder(storageFolderTwo, modules.SectorSize*storageFolderGranularity*8) if err != nil { t.Fatal(err) } }() go func() { defer wg.Done() err = cmt.cm.AddStorageFolder(storageFolderThree, modules.SectorSize*storageFolderGranularity*8) if err != nil { t.Fatal(err) } }() wg.Wait() close(d.blockLifted) cmt.cm.tg.Flush() // Check that the storage folder has been added. sfs = cmt.cm.StorageFolders() if len(sfs) != 3 { t.Fatal("There should be one storage folder reported") } // All actions should have completed, so all storage folders should be // reporting '0' in the progress denominator. for _, sf := range sfs { if sf.ProgressDenominator != 0 { t.Error("ProgressDenominator is indicating that actions still remain") } } } // TestAddStorageFolderConsecutive adds multiple storage folders consecutively // to the contract manager, blocking on the first one to make sure that the // others are still allowed to complete. func TestAddStorageFolderConsecutive(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() // Create a contract manager tester with the mocked dependencies. cmt, err := newContractManagerTester("TestAddStorageFolderConsecutive") if err != nil { t.Fatal(err) } defer cmt.panicClose() // Add a storage folder to the contract manager tester. storageFolderOne := filepath.Join(cmt.persistDir, "storageFolderOne") storageFolderTwo := filepath.Join(cmt.persistDir, "storageFolderTwo") storageFolderThree := filepath.Join(cmt.persistDir, "storageFolderThree") // Create the storage folder dir. err = os.MkdirAll(storageFolderOne, 0700) if err != nil { t.Fatal(err) } err = os.MkdirAll(storageFolderTwo, 0700) if err != nil { t.Fatal(err) } err = os.MkdirAll(storageFolderThree, 0700) if err != nil { t.Fatal(err) } // Spin off the first goroutine, and then wait until write has been called // on the underlying file. sfSize := modules.SectorSize * storageFolderGranularity * 8 err = cmt.cm.AddStorageFolder(storageFolderOne, sfSize) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderTwo, sfSize) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderThree, sfSize) if err != nil { t.Fatal(err) } // Check that the storage folder has been added. sfs := cmt.cm.StorageFolders() if len(sfs) != 3 { t.Fatal("There should be one storage folder reported") } // All actions should have completed, so all storage folders should be // reporting '0' in the progress denominator. for _, sf := range sfs { if sf.ProgressDenominator != 0 { t.Error("ProgressDenominator is indicating that actions still remain") } } } // TestAddStorageFolderDoubleAdd concurrently adds two storage // folders with the same path to the contract manager. func TestAddStorageFolderDoubleAdd(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() // Create a contract manager tester with the mocked dependencies. cmt, err := newContractManagerTester("TestAddStorageFolderDoubleAdd") if err != nil { t.Fatal(err) } defer cmt.panicClose() // Add a storage folder to the contract manager tester. storageFolderOne := filepath.Join(cmt.persistDir, "storageFolderOne") // Create the storage folder dir. err = os.MkdirAll(storageFolderOne, 0700) if err != nil { t.Fatal(err) } // Call AddStorageFolder in three separate goroutines, where the same path // is used in each. The errors are not checked because one of the storage // folders will succeed, but it's uncertain which one. sfSize := modules.SectorSize * storageFolderGranularity * 8 err = cmt.cm.AddStorageFolder(storageFolderOne, sfSize) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderOne, sfSize*2) if err != ErrRepeatFolder { t.Fatal(err) } // Check that the storage folder has been added. sfs := cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("There should be one storage folder reported") } // All actions should have completed, so all storage folders should be // reporting '0' in the progress denominator for _, sf := range sfs { if sf.ProgressDenominator != 0 { t.Error("ProgressDenominator is indicating that actions still remain") } } } // dependencyNoSyncLoop is a mocked dependency that will disable the sync loop. type dependencyNoSyncLoop struct { productionDependencies } // disrupt will disrupt the threadedSyncLoop, causing the loop to terminate as // soon as it is created. func (dependencyNoSyncLoop) disrupt(s string) bool { if s == "threadedSyncLoopStart" || s == "cleanWALFile" { // Disrupt threadedSyncLoop. The sync loop will exit immediately // instead of executing commits. Also disrupt the process that removes // the WAL file following clean shutdown. return true } return false } // TestAddStorageFolderDoubleAddNoCommit hijacks the sync loop in the contract // manager such that the sync loop will not run automatically. Then, without // doing an actual commit, the test will indicate to open functions that a // commit has completed, allowing the next storage folder operation to happen. // Because the changes were finalized but not committed, extra code coverage // should be achieved, though the result of the storage folder being rejected // should be the same. func TestAddStorageFolderDoubleAddNoCommit(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() d := new(dependencyNoSyncLoop) cmt, err := newMockedContractManagerTester(d, "TestAddStorageFolderDoubleAddNoCommit") if err != nil { t.Fatal(err) } // The closing of this channel must happen after the call to panicClose. closeFakeSyncChan := make(chan struct{}) defer close(closeFakeSyncChan) defer cmt.panicClose() // The sync loop will never run, which means naively AddStorageFolder will // never return. To get AddStorageFolder to return before the commit // completes, spin up an alternate sync loop which only performs the // signaling responsibilities of the commit function. go func() { for { select { case <-closeFakeSyncChan: return case <-time.After(time.Millisecond * 250): // Signal that the commit operation has completed, even though // it has not. cmt.cm.wal.mu.Lock() close(cmt.cm.wal.syncChan) cmt.cm.wal.syncChan = make(chan struct{}) cmt.cm.wal.mu.Unlock() } } }() // Add a storage folder to the contract manager tester. storageFolderOne := filepath.Join(cmt.persistDir, "storageFolderOne") // Create the storage folder dir. err = os.MkdirAll(storageFolderOne, 0700) if err != nil { t.Fatal(err) } // Call AddStorageFolder in three separate goroutines, where the same path // is used in each. The errors are not checked because one of the storage // folders will succeed, but it's uncertain which one. sfSize := modules.SectorSize * storageFolderGranularity * 8 err = cmt.cm.AddStorageFolder(storageFolderOne, sfSize) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderOne, sfSize*2) if err != ErrRepeatFolder { t.Fatal(err) } // Check that the storage folder has been added. sfs := cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("There should be one storage folder reported", len(sfs)) } // All actions should have completed, so all storage folders should be // reporting '0' in the progress denominator for _, sf := range sfs { if sf.ProgressDenominator != 0 { t.Error("ProgressDenominator is indicating that actions still remain") } } } // TestAddStorageFolderFailedCommit adds a storage folder without ever saving // the settings. func TestAddStorageFolderFailedCommit(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() d := new(dependencyNoSettingsSave) cmt, err := newMockedContractManagerTester(d, "TestAddStorageFolderFailedCommit") if err != nil { t.Fatal(err) } defer cmt.panicClose() // Add a storage folder to the contract manager tester. storageFolderOne := filepath.Join(cmt.persistDir, "storageFolderOne") // Create the storage folder dir. err = os.MkdirAll(storageFolderOne, 0700) if err != nil { t.Fatal(err) } sfSize := modules.SectorSize * storageFolderGranularity * 8 err = cmt.cm.AddStorageFolder(storageFolderOne, sfSize) if err != nil { t.Fatal(err) } d.mu.Lock() d.triggered = true d.mu.Unlock() // Check that the storage folder has been added. sfs := cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("There should be one storage folder reported") } // All actions should have completed, so all storage folders should be // reporting '0' in the progress denominator if sfs[0].ProgressDenominator != 0 { t.Error("ProgressDenominator is indicating that actions still remain") } // Close the contract manager and replace it with a new contract manager. // The new contract manager should have normal dependencies. err = cmt.cm.Close() if err != nil { t.Fatal(err) } // Create the new contract manager using the same persist dir, so that it // will see the uncommitted WAL. cmt.cm, err = New(filepath.Join(cmt.persistDir, modules.ContractManagerDir)) if err != nil { t.Fatal(err) } // Check that the storage folder was properly recovered. sfs = cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("There should be one storage folder reported", len(sfs)) } } // dependencySFAddNoFinish is a mocked dependency that will prevent the type dependencySFAddNoFinish struct { productionDependencies } // disrupt will disrupt the threadedSyncLoop, causing the loop to terminate as // soon as it is created. func (d *dependencySFAddNoFinish) disrupt(s string) bool { if s == "storageFolderAddFinish" { return true } if s == "cleanWALFile" { // Prevent the WAL file from being removed. return true } return false } // TestAddStorageFolderUnfinishedCreate hijacks both the sync loop and the // AddStorageFolder code to create a situation where the added storage folder // is started but not seen through to conclusion, and no commit is run. func TestAddStorageFolderUnfinishedCreate(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() d := new(dependencySFAddNoFinish) cmt, err := newMockedContractManagerTester(d, "TestAddStorageFolderUnfinishedCreate") if err != nil { t.Fatal(err) } defer cmt.panicClose() // Add a storage folder to the contract manager tester. storageFolderOne := filepath.Join(cmt.persistDir, "storageFolderOne") // Create the storage folder dir. err = os.MkdirAll(storageFolderOne, 0700) if err != nil { t.Fatal(err) } // Call AddStorageFolder, knowing that the changes will not be properly // committed, and that the call itself will not actually complete. sfSize := modules.SectorSize * storageFolderGranularity * 8 err = cmt.cm.AddStorageFolder(storageFolderOne, sfSize) if err != nil { t.Fatal(err) } // Check that the storage folder has been added. sfs := cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("There should be one storage folder reported") } // Close the contract manager and replace it with a new contract manager. // The new contract manager should have normal dependencies. err = cmt.cm.Close() if err != nil { t.Fatal(err) } // Create the new contract manager using the same persist dir, so that it // will see the uncommitted WAL. cmt.cm, err = New(filepath.Join(cmt.persistDir, modules.ContractManagerDir)) if err != nil { t.Fatal(err) } // Check that the storage folder was properly removed - incomplete storage // folder adds should be removed upon startup. sfs = cmt.cm.StorageFolders() if len(sfs) != 0 { t.Error("Storage folder add should have failed.") } // Check that the storage folder is empty - because the operation failed, // any files that got created should have been removed. files, err := ioutil.ReadDir(storageFolderOne) if err != nil { t.Error(err) } if len(files) != 0 { t.Error("there should not be any files in the storage folder because the AddStorageFolder operation failed:", len(files)) t.Error(len(files)) for _, file := range files { t.Error(file.Name()) } } } // TestAddStorageFolderDoubleAddConcurrent concurrently adds two storage // folders with the same path to the contract manager. func TestAddStorageFolderDoubleAddConcurrent(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() // Create a contract manager tester with the mocked dependencies. cmt, err := newContractManagerTester("TestAddStorageFolderDoubleAddConcurrent") if err != nil { t.Fatal(err) } defer cmt.panicClose() // Add a storage folder to the contract manager tester. storageFolderOne := filepath.Join(cmt.persistDir, "storageFolderOne") // Create the storage folder dir. err = os.MkdirAll(storageFolderOne, 0700) if err != nil { t.Fatal(err) } // Call AddStorageFolder in three separate goroutines, where the same path // is used in each. The errors are not checked because one of the storage // folders will succeed, but it's uncertain which one. var wg sync.WaitGroup sfSize := modules.SectorSize * storageFolderGranularity * 8 wg.Add(3) go func() { _ = cmt.cm.AddStorageFolder(storageFolderOne, sfSize) wg.Done() }() go func() { _ = cmt.cm.AddStorageFolder(storageFolderOne, sfSize*2) wg.Done() }() go func() { _ = cmt.cm.AddStorageFolder(storageFolderOne, sfSize*3) wg.Done() }() wg.Wait() // Check that the storage folder has been added. sfs := cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("There should be one storage folder reported") } // All actions should have completed, so all storage folders should be // reporting '0' in the progress denominator. for _, sf := range sfs { if sf.ProgressDenominator != 0 { t.Error("ProgressDenominator is indicating that actions still remain") } } } // TestAddStorageFolderReload adds a storage folder to the contract manager, // and then reloads the contract manager to see if the storage folder is still // there. func TestAddStorageFolderReload(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() // Create a contract manager tester with the mocked dependencies. cmt, err := newContractManagerTester("TestAddStorageFolderReload") if err != nil { t.Fatal(err) } defer cmt.panicClose() // Add a storage folder to the contract manager tester. storageFolderOne := filepath.Join(cmt.persistDir, "storageFolderOne") // Create the storage folder dir. err = os.MkdirAll(storageFolderOne, 0700) if err != nil { t.Fatal(err) } sfSize := modules.SectorSize * storageFolderGranularity * 24 err = cmt.cm.AddStorageFolder(storageFolderOne, sfSize) if err != nil { t.Fatal(err) } // Check that the storage folder has been added. sfs := cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("There should be one storage folder reported") } // Check that the size of the storage folder is correct. if sfs[0].Capacity != sfSize { t.Error("capacity reported by storage folder is not the capacity alloacted") } if sfs[0].CapacityRemaining != sfSize { t.Error("capacity remaining reported by storage folder is not the capacity alloacted") } // All actions should have completed, so all storage folders should be // reporting '0' in the progress denominator. for _, sf := range sfs { if sf.ProgressDenominator != 0 { t.Error("ProgressDenominator is indicating that actions still remain") } } // Close the contract manager and open a new one using the same // persistence. err = cmt.cm.Close() if err != nil { t.Fatal(err) } cmt.cm, err = New(filepath.Join(cmt.persistDir, modules.ContractManagerDir)) if err != nil { t.Fatal(err) } // Check that the storage folder has been added. sfs = cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("There should be one storage folder reported", len(sfs)) } // Check that the size of the storage folder is correct. if sfs[0].Capacity != sfSize { t.Error("capacity reported by storage folder is not the capacity alloacted") } if sfs[0].CapacityRemaining != sfSize { t.Error("capacity remaining reported by storage folder is not the capacity alloacted", sfs[0].Capacity, sfs[0].CapacityRemaining) } // Check that the storage folder as represented on disk has the correct // size. sectorLookupTableSize := int64(storageFolderGranularity * 24 * sectorMetadataDiskSize) expectedSize := int64(sfSize) fi, err := os.Stat(filepath.Join(storageFolderOne, sectorFile)) if err != nil { t.Fatal(err) } if fi.Size() != expectedSize { t.Error("sector file had unexpected size", fi.Size(), expectedSize) } fi, err = os.Stat(filepath.Join(storageFolderOne, metadataFile)) if err != nil { t.Fatal(err) } if fi.Size() != sectorLookupTableSize { t.Error("sector file had unexpected size", fi.Size(), sectorLookupTableSize) } } Sia-1.3.0/modules/host/contractmanager/storagefolderempty.go000066400000000000000000000175361313565667000243220ustar00rootroot00000000000000package contractmanager import ( "errors" "sync" "sync/atomic" "github.com/NebulousLabs/Sia/build" ) var ( // ErrPartialRelocation is returned during an operation attempting to clear // out the sectors in a storage folder if errors prevented one or more of // the sectors from being properly migrated to a new storage folder. ErrPartialRelocation = errors.New("unable to migrate all sectors") ) // managedMoveSector will move a sector from its current storage folder to // another. func (wal *writeAheadLog) managedMoveSector(id sectorID) error { wal.managedLockSector(id) defer wal.managedUnlockSector(id) // Find the sector to be moved. wal.mu.Lock() oldLocation, exists1 := wal.cm.sectorLocations[id] oldFolder, exists2 := wal.cm.storageFolders[oldLocation.storageFolder] wal.mu.Unlock() if !exists1 || !exists2 || atomic.LoadUint64(&oldFolder.atomicUnavailable) == 1 { return errors.New("unable to find sector that is targeted for move") } // Read the sector data from disk so that it can be added correctly to a // new storage folder. sectorData, err := readSector(oldFolder.sectorFile, oldLocation.index) if err != nil { atomic.AddUint64(&oldFolder.atomicFailedReads, 1) return build.ExtendErr("unable to read sector selected for migration", err) } atomic.AddUint64(&oldFolder.atomicSuccessfulReads, 1) // Create the sector update that will remove the old sector. oldSU := sectorUpdate{ Count: 0, ID: id, Folder: oldLocation.storageFolder, Index: oldLocation.index, } // Place the sector into its new folder and add the atomic move to the WAL. wal.mu.Lock() storageFolders := wal.cm.availableStorageFolders() wal.mu.Unlock() var syncChan chan struct{} for len(storageFolders) >= 1 { var storageFolderIndex int err := func() error { // NOTE: Convention is broken when working with WAL lock here, due // to the complexity required with managing both the WAL lock and // the storage folder lock. Pay close attention when reviewing and // modifying. // Grab a vacant storage folder. wal.mu.Lock() var sf *storageFolder sf, storageFolderIndex = vacancyStorageFolder(storageFolders) if sf == nil { // None of the storage folders have enough room to house the // sector. wal.mu.Unlock() return errInsufficientStorageForSector } defer sf.mu.RUnlock() // Grab a sector from the storage folder. WAL lock cannot be // released between grabbing the storage folder and grabbing a // sector lest another thread request the final available sector in // the storage folder. sectorIndex, err := randFreeSector(sf.usage) if err != nil { wal.mu.Unlock() wal.cm.log.Critical("a storage folder with full usage was returned from emptiestStorageFolder") return err } // Set the usage, but mark it as uncommitted. sf.setUsage(sectorIndex) sf.availableSectors[id] = sectorIndex wal.mu.Unlock() // NOTE: The usage has been set, in the event of failure the usage // must be cleared. // Try writing the new sector to disk. err = writeSector(sf.sectorFile, sectorIndex, sectorData) if err != nil { wal.cm.log.Printf("ERROR: Unable to write sector for folder %v: %v\n", sf.path, err) atomic.AddUint64(&sf.atomicFailedWrites, 1) wal.mu.Lock() sf.clearUsage(sectorIndex) delete(sf.availableSectors, id) wal.mu.Unlock() return errDiskTrouble } // Try writing the sector metadata to disk. su := sectorUpdate{ Count: oldLocation.count, ID: id, Folder: sf.index, Index: sectorIndex, } err = wal.writeSectorMetadata(sf, su) if err != nil { wal.cm.log.Printf("ERROR: Unable to write sector metadata for folder %v: %v\n", sf.path, err) atomic.AddUint64(&sf.atomicFailedWrites, 1) wal.mu.Lock() sf.clearUsage(sectorIndex) delete(sf.availableSectors, id) wal.mu.Unlock() return errDiskTrouble } // Sector added successfully, update the WAL and the state. sl := sectorLocation{ index: sectorIndex, storageFolder: sf.index, count: oldLocation.count, } wal.mu.Lock() wal.appendChange(stateChange{ SectorUpdates: []sectorUpdate{oldSU, su}, }) oldFolder.clearUsage(oldLocation.index) delete(wal.cm.sectorLocations, oldSU.ID) delete(sf.availableSectors, id) wal.cm.sectorLocations[id] = sl syncChan = wal.syncChan wal.mu.Unlock() return nil }() if err == errInsufficientStorageForSector { return err } else if err != nil { // Try the next storage folder. storageFolders = append(storageFolders[:storageFolderIndex], storageFolders[storageFolderIndex+1:]...) continue } // Sector added successfully, break. break } if len(storageFolders) < 1 { return errInsufficientStorageForSector } return nil } // managedEmptyStorageFolder will empty out the storage folder with the // provided index starting with the 'startingPoint'th sector all the way to the // end of the storage folder, allowing the storage folder to be safely // truncated. If 'force' is set to true, the function will not give up when // there is no more space available, instead choosing to lose data. // // This function assumes that the storage folder has already been made // invisible to AddSector, and that this is the only thread that will be // interacting with the storage folder. func (wal *writeAheadLog) managedEmptyStorageFolder(sfIndex uint16, startingPoint uint32) (uint64, error) { // Grab the storage folder in question. wal.mu.Lock() sf, exists := wal.cm.storageFolders[sfIndex] wal.mu.Unlock() if !exists || atomic.LoadUint64(&sf.atomicUnavailable) == 1 { return 0, errBadStorageFolderIndex } // Read the sector lookup bytes into memory; we'll need them to figure out // what sectors are in which locations. sectorLookupBytes, err := readFullMetadata(sf.metadataFile, len(sf.usage)*storageFolderGranularity) if err != nil { atomic.AddUint64(&sf.atomicFailedReads, 1) return 0, build.ExtendErr("unable to read sector metadata", err) } atomic.AddUint64(&sf.atomicSuccessfulReads, 1) // Before iterating through the sectors and moving them, set up a thread // pool that can parallelize the transfers without spinning up 250,000 // goroutines per TB. var errCount uint64 var wg sync.WaitGroup workers := 250 workChan := make(chan sectorID) doneChan := make(chan struct{}) for i := 0; i < workers; i++ { go func() { for { select { case id := <-workChan: err := wal.managedMoveSector(id) if err != nil { atomic.AddUint64(&errCount, 1) wal.cm.log.Println("Unable to write sector:", err) } wg.Done() case <-doneChan: return } } }() } // Iterate through all of the sectors and perform the move operation on // them. readHead := startingPoint * sectorMetadataDiskSize for _, usage := range sf.usage[startingPoint/storageFolderGranularity:] { // The usage is a bitfield indicating where sectors exist. Iterate // through each bit to check for a sector. usageMask := uint64(1) for j := 0; j < storageFolderGranularity; j++ { // Perform a move operation if a sector exists in this location. if usage&usageMask == usageMask { // Fetch the id of the sector in this location. var id sectorID copy(id[:], sectorLookupBytes[readHead:readHead+12]) // Reference the sector locations map to get the most // up-to-date status for the sector. wal.mu.Lock() _, exists := wal.cm.sectorLocations[id] wal.mu.Unlock() if !exists { // The sector has been deleted, but the usage has not been // updated yet. Safe to ignore. continue } // Queue the sector move. wg.Add(1) workChan <- id } readHead += sectorMetadataDiskSize usageMask = usageMask << 1 } } wg.Wait() close(doneChan) // Return errPartialRelocation if not every sector was migrated out // successfully. if errCount > 0 { return errCount, ErrPartialRelocation } return 0, nil } Sia-1.3.0/modules/host/contractmanager/storagefoldergrow.go000066400000000000000000000204711313565667000241320ustar00rootroot00000000000000package contractmanager import ( "errors" "sync" "sync/atomic" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" ) type ( // storageFolderExtension is the data saved to the WAL to indicate that a // storage folder has been extended successfully. storageFolderExtension struct { Index uint16 NewSectorCount uint32 } // unfinishedStorageFolderExtension contains the data necessary to reverse // a storage folder extension that has failed. unfinishedStorageFolderExtension struct { Index uint16 OldSectorCount uint32 } ) // findUnfinishedStorageFolderExtensions will scroll through a set of state // changes as pull out all of the storage folder extensions which have not yet // completed. func findUnfinishedStorageFolderExtensions(scs []stateChange) []unfinishedStorageFolderExtension { // Use a map to figure out what unfinished storage folder extensions exist // and use it to remove the ones that have terminated. usfeMap := make(map[uint16]unfinishedStorageFolderExtension) for _, sc := range scs { for _, usfe := range sc.UnfinishedStorageFolderExtensions { usfeMap[usfe.Index] = usfe } for _, sfe := range sc.StorageFolderExtensions { delete(usfeMap, sfe.Index) } for _, index := range sc.ErroredStorageFolderExtensions { delete(usfeMap, index) } for _, sfr := range sc.StorageFolderRemovals { delete(usfeMap, sfr.Index) } } // Return the active unifinished storage folder extensions as a slice. usfes := make([]unfinishedStorageFolderExtension, 0, len(usfeMap)) for _, usfe := range usfeMap { usfes = append(usfes, usfe) } return usfes } // cleanupUnfinishedStorageFolderExtensions will reset any unsuccessful storage // folder extensions from the previous run. func (wal *writeAheadLog) cleanupUnfinishedStorageFolderExtensions(scs []stateChange) { usfes := findUnfinishedStorageFolderExtensions(scs) for _, usfe := range usfes { sf, exists := wal.cm.storageFolders[usfe.Index] if !exists || atomic.LoadUint64(&sf.atomicUnavailable) == 1 { wal.cm.log.Critical("unfinished storage folder extension exists where the storage folder does not exist") continue } // Truncate the files back to their original size. err := sf.metadataFile.Truncate(int64(len(sf.usage) * storageFolderGranularity * sectorMetadataDiskSize)) if err != nil { wal.cm.log.Printf("Error: unable to truncate metadata file as storage folder %v is resized\n", sf.path) } err = sf.sectorFile.Truncate(int64(modules.SectorSize * storageFolderGranularity * uint64(len(sf.usage)))) if err != nil { wal.cm.log.Printf("Error: unable to truncate sector file as storage folder %v is resized\n", sf.path) } // Append an error call to the changeset, indicating that the storage // folder add was not completed successfully. wal.appendChange(stateChange{ ErroredStorageFolderExtensions: []uint16{sf.index}, }) } } // commitStorageFolderExtension will apply a storage folder extension to the // state. func (wal *writeAheadLog) commitStorageFolderExtension(sfe storageFolderExtension) { sf, exists := wal.cm.storageFolders[sfe.Index] if !exists || atomic.LoadUint64(&sf.atomicUnavailable) == 1 { wal.cm.log.Critical("ERROR: storage folder extension provided for storage folder that does not exist") return } newUsageSize := sfe.NewSectorCount / storageFolderGranularity appendUsage := make([]uint64, int(newUsageSize)-len(sf.usage)) sf.usage = append(sf.usage, appendUsage...) } // growStorageFolder will extend the storage folder files so that they may hold // more sectors. func (wal *writeAheadLog) growStorageFolder(index uint16, newSectorCount uint32) error { // Retrieve the specified storage folder. wal.mu.Lock() sf, exists := wal.cm.storageFolders[index] wal.mu.Unlock() if !exists || atomic.LoadUint64(&sf.atomicUnavailable) == 1 { return errStorageFolderNotFound } // Lock the storage folder for the duration of the operation. sf.mu.Lock() defer sf.mu.Unlock() // Write the intention to increase the storage folder size to the WAL, // providing enough information to allow a truncation if the growing fails. wal.mu.Lock() wal.appendChange(stateChange{ UnfinishedStorageFolderExtensions: []unfinishedStorageFolderExtension{{ Index: index, OldSectorCount: uint32(len(sf.usage)) * storageFolderGranularity, }}, }) syncChan := wal.syncChan wal.mu.Unlock() <-syncChan // Prepare variables for growing the storage folder. currentHousingSize := int64(len(sf.usage)) * int64(modules.SectorSize) * storageFolderGranularity currentMetadataSize := int64(len(sf.usage)) * sectorMetadataDiskSize * storageFolderGranularity newHousingSize := int64(newSectorCount) * int64(modules.SectorSize) newMetadataSize := int64(newSectorCount) * sectorMetadataDiskSize if newHousingSize <= currentHousingSize || newMetadataSize <= currentMetadataSize { wal.cm.log.Critical("growStorageFolder called without size increase", newHousingSize, currentHousingSize, newMetadataSize, currentMetadataSize) return errors.New("unable to make the requested change, please notify the devs that there is a bug") } housingWriteSize := newHousingSize - currentHousingSize metadataWriteSize := newMetadataSize - currentMetadataSize // If there's an error in the rest of the function, reset the storage // folders to their original size. var err error defer func(sf *storageFolder, housingSize, metadataSize int64) { if err != nil { wal.mu.Lock() defer wal.mu.Unlock() // Remove the leftover files from the failed operation. err = build.ComposeErrors(err, sf.metadataFile.Truncate(housingSize)) err = build.ComposeErrors(err, sf.sectorFile.Truncate(metadataSize)) // Signal in the WAL that the unfinished storage folder addition // has failed. wal.appendChange(stateChange{ ErroredStorageFolderExtensions: []uint16{sf.index}, }) } }(sf, currentMetadataSize, currentHousingSize) // Extend the sector file and metadata file on disk. atomic.StoreUint64(&sf.atomicProgressDenominator, uint64(housingWriteSize+metadataWriteSize)) stepCount := housingWriteSize / folderAllocationStepSize for i := int64(0); i < stepCount; i++ { err = sf.sectorFile.Truncate(currentHousingSize + (folderAllocationStepSize * (i + 1))) if err != nil { return build.ExtendErr("could not allocate storage folder", err) } // After each iteration, update the progress numerator. atomic.AddUint64(&sf.atomicProgressNumerator, folderAllocationStepSize) } err = sf.sectorFile.Truncate(currentHousingSize + housingWriteSize) if err != nil { return build.ExtendErr("could not allocate sector data file", err) } // Write the metadata file. err = sf.metadataFile.Truncate(currentMetadataSize + metadataWriteSize) if err != nil { return build.ExtendErr("could not allocate sector metadata file", err) } // The file creation process is essentially complete at this point, report // complete progress. atomic.StoreUint64(&sf.atomicProgressNumerator, uint64(housingWriteSize+metadataWriteSize)) // Sync the files. var err1, err2 error var wg sync.WaitGroup wg.Add(2) go func() { defer wg.Done() err1 = sf.metadataFile.Sync() if err != nil { wal.cm.log.Println("could not synchronize allocated sector metadata file:", err) } }() go func() { defer wg.Done() err2 = sf.sectorFile.Sync() if err != nil { wal.cm.log.Println("could not synchronize allocated sector data file:", err) } }() wg.Wait() if err1 != nil || err2 != nil { err = build.ComposeErrors(err1, err2) wal.cm.log.Println("cound not synchronize storage folder extensions:", err) return build.ExtendErr("unable to synchronize storage folder extensions", err) } // Simulate power failure at this point for some testing scenarios. if wal.cm.dependencies.disrupt("incompleteGrowStorageFolder") { return nil } // Storage folder growth has completed successfully, commit through the // WAL. wal.mu.Lock() wal.cm.storageFolders[sf.index] = sf wal.appendChange(stateChange{ StorageFolderExtensions: []storageFolderExtension{{ Index: sf.index, NewSectorCount: newSectorCount, }}, }) syncChan = wal.syncChan wal.mu.Unlock() // Wait to confirm the storage folder addition has completed until the WAL // entry has synced. <-syncChan // Set the progress back to '0'. atomic.StoreUint64(&sf.atomicProgressNumerator, 0) atomic.StoreUint64(&sf.atomicProgressDenominator, 0) return nil } Sia-1.3.0/modules/host/contractmanager/storagefoldergrow_test.go000066400000000000000000000372071313565667000251760ustar00rootroot00000000000000package contractmanager import ( "errors" "os" "path/filepath" "sync" "testing" "github.com/NebulousLabs/Sia/modules" ) // TestGrowStorageFolder checks that a storage folder can be successfully // increased in size. func TestGrowStorageFolder(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cmt, err := newContractManagerTester("TestGrowStorageFolder") if err != nil { t.Fatal(err) } defer cmt.panicClose() // Add a storage folder. storageFolderOne := filepath.Join(cmt.persistDir, "storageFolderOne") // Create the storage folder dir. err = os.MkdirAll(storageFolderOne, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderOne, modules.SectorSize*storageFolderGranularity) if err != nil { t.Fatal(err) } // Get the index of the storage folder. sfs := cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("there should only be one storage folder") } sfIndex := sfs[0].Index // Verify that the storage folder has the correct capacity. if sfs[0].Capacity != modules.SectorSize*storageFolderGranularity { t.Error("new storage folder is reporting the wrong capacity") } // Verify that the on-disk files are the right size. mfn := filepath.Join(storageFolderOne, metadataFile) sfn := filepath.Join(storageFolderOne, sectorFile) mfi, err := os.Stat(mfn) if err != nil { t.Fatal(err) } sfi, err := os.Stat(sfn) if err != nil { t.Fatal(err) } if uint64(mfi.Size()) != sectorMetadataDiskSize*storageFolderGranularity { t.Error("metadata file is the wrong size") } if uint64(sfi.Size()) != modules.SectorSize*storageFolderGranularity { t.Error("sector file is the wrong size") } // Increase the size of the storage folder. err = cmt.cm.ResizeStorageFolder(sfIndex, modules.SectorSize*storageFolderGranularity*2, false) if err != nil { t.Fatal(err) } // Verify that the capacity and file sizes are correct. sfs = cmt.cm.StorageFolders() if sfs[0].Capacity != modules.SectorSize*storageFolderGranularity*2 { t.Error("new storage folder is reporting the wrong capacity") } mfi, err = os.Stat(mfn) if err != nil { t.Fatal(err) } sfi, err = os.Stat(sfn) if err != nil { t.Fatal(err) } if uint64(mfi.Size()) != sectorMetadataDiskSize*storageFolderGranularity*2 { t.Error("metadata file is the wrong size") } if uint64(sfi.Size()) != modules.SectorSize*storageFolderGranularity*2 { t.Error("sector file is the wrong size") } // Restart the contract manager to see that the change is persistent. err = cmt.cm.Close() if err != nil { t.Fatal(err) } cmt.cm, err = New(filepath.Join(cmt.persistDir, modules.ContractManagerDir)) if err != nil { t.Fatal(err) } // Verify that the capacity and file sizes are correct. sfs = cmt.cm.StorageFolders() if sfs[0].Capacity != modules.SectorSize*storageFolderGranularity*2 { t.Error("new storage folder is reporting the wrong capacity") } mfi, err = os.Stat(mfn) if err != nil { t.Fatal(err) } sfi, err = os.Stat(sfn) if err != nil { t.Fatal(err) } if uint64(mfi.Size()) != sectorMetadataDiskSize*storageFolderGranularity*2 { t.Error("metadata file is the wrong size") } if uint64(sfi.Size()) != modules.SectorSize*storageFolderGranularity*2 { t.Error("sector file is the wrong size") } } // dependencyIncompleteGrow will start to have disk failures after too much // data is written and also after 'triggered' ahs been set to true. type dependencyIncompleteGrow struct { productionDependencies triggered bool threshold int mu sync.Mutex } // triggerLimitFile will return an error if a call to Write is made that will // put the total throughput of the file over 1 MiB. Counting only begins once // triggered. type triggerLimitFile struct { dig *dependencyIncompleteGrow throughput int mu sync.Mutex *os.File sync.Mutex } // createFile will return a file that will return an error if a write will put // the total throughput of the file over 1 MiB. func (dig *dependencyIncompleteGrow) createFile(s string) (file, error) { osFile, err := os.Create(s) if err != nil { return nil, err } tlf := &triggerLimitFile{ dig: dig, File: osFile, } return tlf, nil } // Write returns an error if the operation will put the total throughput of the // file over 8 MiB. The write will write all the way to 8 MiB before returning // the error. func (l *triggerLimitFile) WriteAt(b []byte, offset int64) (int, error) { l.mu.Lock() defer l.mu.Unlock() l.dig.mu.Lock() triggered := l.dig.triggered l.dig.mu.Unlock() if !triggered { return l.File.WriteAt(b, offset) } // If the limit has already been reached, return an error. if l.throughput >= l.dig.threshold { return 0, errors.New("triggerLimitFile throughput limit reached earlier") } // If the limit has not been reached, pass the call through to the // underlying file. if l.throughput+len(b) <= l.dig.threshold { l.throughput += len(b) return l.File.WriteAt(b, offset) } // If the limit has been reached, write enough bytes to get to 8 MiB, then // return an error. remaining := l.dig.threshold - l.throughput l.throughput = l.dig.threshold written, err := l.File.WriteAt(b[:remaining], offset) if err != nil { return written, err } return written, errors.New("triggerLimitFile throughput limit reached before all input was written to disk") } // Truncate returns an error if the operation will put the total throughput of // the file over 8 MiB. func (l *triggerLimitFile) Truncate(offset int64) error { l.mu.Lock() defer l.mu.Unlock() l.dig.mu.Lock() triggered := l.dig.triggered l.dig.mu.Unlock() if !triggered { return l.File.Truncate(offset) } // If the limit has already been reached, return an error. if l.throughput >= l.dig.threshold { return errors.New("triggerLimitFile throughput limit reached earlier") } // Get the file size, so we know what the throughput is. fi, err := l.Stat() if err != nil { return errors.New("triggerLimitFile unable to get FileInfo: " + err.Error()) } // Run truncate with 0 throughput if size is larger than offset. if fi.Size() > offset { return l.File.Truncate(offset) } writeSize := int(offset - fi.Size()) // If the limit has not been reached, pass the call through to the // underlying file. if l.throughput+writeSize <= l.dig.threshold { l.throughput += writeSize return l.File.Truncate(offset) } // If the limit has been reached, return an error. // return an error. return errors.New("triggerLimitFile throughput limit reached, no ability to allocate more") } // TestGrowStorageFolderIncopmleteWrite checks that growStorageFolder operates // as intended when the writing to increase the filesize does not complete all // the way. func TestGrowStorageFolderIncompleteWrite(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() d := new(dependencyIncompleteGrow) cmt, err := newMockedContractManagerTester(d, "TestGrowStorageFolderIncompleteWrite") if err != nil { t.Fatal(err) } defer cmt.panicClose() // Add a storage folder. storageFolderOne := filepath.Join(cmt.persistDir, "storageFolderOne") // Create the storage folder dir. err = os.MkdirAll(storageFolderOne, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderOne, modules.SectorSize*storageFolderGranularity*3) if err != nil { t.Fatal(err) } // Get the index of the storage folder. sfs := cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("there should only be one storage folder") } sfIndex := sfs[0].Index // Verify that the storage folder has the correct capacity. if sfs[0].Capacity != modules.SectorSize*storageFolderGranularity*3 { t.Error("new storage folder is reporting the wrong capacity") } // Trigger the dependencies, so that writes begin failing. d.mu.Lock() d.threshold = 1 << 20 d.triggered = true d.mu.Unlock() // Increase the size of the storage folder, to large enough that it will // fail. err = cmt.cm.ResizeStorageFolder(sfIndex, modules.SectorSize*storageFolderGranularity*25, false) if err == nil { t.Fatal("expecting error upon resize") } // Verify that the storage folder has the correct capacity. if sfs[0].Capacity != modules.SectorSize*storageFolderGranularity*3 { t.Error("new storage folder is reporting the wrong capacity") } // Verify that the on-disk files are the right size. mfn := filepath.Join(storageFolderOne, metadataFile) sfn := filepath.Join(storageFolderOne, sectorFile) mfi, err := os.Stat(mfn) if err != nil { t.Fatal(err) } sfi, err := os.Stat(sfn) if err != nil { t.Fatal(err) } if uint64(mfi.Size()) != sectorMetadataDiskSize*storageFolderGranularity*3 { t.Error("metadata file is the wrong size:", mfi.Size(), sectorMetadataDiskSize*storageFolderGranularity*3) } if uint64(sfi.Size()) != modules.SectorSize*storageFolderGranularity*3 { t.Error("sector file is the wrong size:", sfi.Size(), modules.SectorSize*storageFolderGranularity*3) } // Restart the contract manager. err = cmt.cm.Close() if err != nil { t.Fatal(err) } cmt.cm, err = New(filepath.Join(cmt.persistDir, modules.ContractManagerDir)) if err != nil { t.Fatal(err) } // Verify that the storage folder has the correct capacity. if sfs[0].Capacity != modules.SectorSize*storageFolderGranularity*3 { t.Error("new storage folder is reporting the wrong capacity") } // Verify that the on-disk files are the right size. mfi, err = os.Stat(mfn) if err != nil { t.Fatal(err) } sfi, err = os.Stat(sfn) if err != nil { t.Fatal(err) } if uint64(mfi.Size()) != sectorMetadataDiskSize*storageFolderGranularity*3 { t.Error("metadata file is the wrong size:", mfi.Size(), sectorMetadataDiskSize*storageFolderGranularity*3) } if uint64(sfi.Size()) != modules.SectorSize*storageFolderGranularity*3 { t.Error("sector file is the wrong size:", sfi.Size(), modules.SectorSize*storageFolderGranularity*3) } } // dependencyGrowNoFinalize will not add a confirmation to the WAL that a // growStorageFolder operation has completed. type dependencyGrowNoFinalize struct { productionDependencies } // disrupt will prevent the growStorageFolder operation from committing a // finalized growStorageFolder operation to the WAL. func (dependencyGrowNoFinalize) disrupt(s string) bool { if s == "incompleteGrowStorageFolder" { return true } if s == "cleanWALFile" { return true } return false } // TestGrowStorageFolderShutdownAfterWrite simulates an unclean shutdown that // occurs after the storage folder write has completed, but before it has // established through the WAL that the write has completed. The result should // be that the storage folder grow is not accepted after restart. func TestGrowStorageFolderShutdownAfterWrite(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() d := new(dependencyGrowNoFinalize) cmt, err := newMockedContractManagerTester(d, "TestGrowStorageFolderShutdownAfterWrite") if err != nil { t.Fatal(err) } defer cmt.panicClose() // Add a storage folder. storageFolderOne := filepath.Join(cmt.persistDir, "storageFolderOne") // Create the storage folder dir. err = os.MkdirAll(storageFolderOne, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderOne, modules.SectorSize*storageFolderGranularity*3) if err != nil { t.Fatal(err) } // Get the index of the storage folder. sfs := cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("there should only be one storage folder") } sfIndex := sfs[0].Index // Verify that the storage folder has the correct capacity. if sfs[0].Capacity != modules.SectorSize*storageFolderGranularity*3 { t.Error("new storage folder is reporting the wrong capacity") } // Increase the size of the storage folder, to large enough that it will // fail. err = cmt.cm.ResizeStorageFolder(sfIndex, modules.SectorSize*storageFolderGranularity*25, false) if err != nil { t.Fatal(err) } // Restart the contract manager. err = cmt.cm.Close() if err != nil { t.Fatal(err) } cmt.cm, err = New(filepath.Join(cmt.persistDir, modules.ContractManagerDir)) if err != nil { t.Fatal(err) } // Verify that the storage folder has the correct capacity. if sfs[0].Capacity != modules.SectorSize*storageFolderGranularity*3 { t.Error("new storage folder is reporting the wrong capacity") } // Verify that the on-disk files are the right size. mfn := filepath.Join(storageFolderOne, metadataFile) sfn := filepath.Join(storageFolderOne, sectorFile) mfi, err := os.Stat(mfn) if err != nil { t.Fatal(err) } sfi, err := os.Stat(sfn) if err != nil { t.Fatal(err) } if uint64(mfi.Size()) != sectorMetadataDiskSize*storageFolderGranularity*3 { t.Error("metadata file is the wrong size:", mfi.Size(), sectorMetadataDiskSize*storageFolderGranularity*3) } if uint64(sfi.Size()) != modules.SectorSize*storageFolderGranularity*3 { t.Error("sector file is the wrong size:", sfi.Size(), modules.SectorSize*storageFolderGranularity*3) } } // dependencyLeaveWAL will leave the WAL on disk during shutdown. type dependencyLeaveWAL struct { mu sync.Mutex productionDependencies triggered bool } // disrupt will prevent the WAL file from being removed at shutdown. func (dlw *dependencyLeaveWAL) disrupt(s string) bool { if s == "cleanWALFile" { return true } dlw.mu.Lock() triggered := dlw.triggered dlw.mu.Unlock() if s == "walRename" && triggered { return true } return false } // TestGrowStorageFolderWAL completes a storage folder growing, but leaves the // WAL behind so that a commit is necessary to finalize things. func TestGrowStorageFolderWAL(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() d := new(dependencyLeaveWAL) cmt, err := newMockedContractManagerTester(d, "TestGrowStorageFolderWAL") if err != nil { t.Fatal(err) } defer cmt.panicClose() // Add a storage folder. storageFolderOne := filepath.Join(cmt.persistDir, "storageFolderOne") // Create the storage folder dir. err = os.MkdirAll(storageFolderOne, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderOne, modules.SectorSize*storageFolderGranularity*3) if err != nil { t.Fatal(err) } // Get the index of the storage folder. sfs := cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("there should only be one storage folder") } sfIndex := sfs[0].Index // Verify that the storage folder has the correct capacity. if sfs[0].Capacity != modules.SectorSize*storageFolderGranularity*3 { t.Error("new storage folder is reporting the wrong capacity") } // Increase the size of the storage folder, to large enough that it will // fail. err = cmt.cm.ResizeStorageFolder(sfIndex, modules.SectorSize*storageFolderGranularity*25, false) if err != nil { t.Fatal(err) } d.mu.Lock() d.triggered = true d.mu.Unlock() // Restart the contract manager. err = cmt.cm.Close() if err != nil { t.Fatal(err) } cmt.cm, err = New(filepath.Join(cmt.persistDir, modules.ContractManagerDir)) if err != nil { t.Fatal(err) } // Verify that the storage folder has the correct capacity. sfs = cmt.cm.StorageFolders() if sfs[0].Capacity != modules.SectorSize*storageFolderGranularity*25 { t.Error("new storage folder is reporting the wrong capacity", sfs[0].Capacity/modules.SectorSize, storageFolderGranularity*25) } // Verify that the on-disk files are the right size. mfn := filepath.Join(storageFolderOne, metadataFile) sfn := filepath.Join(storageFolderOne, sectorFile) mfi, err := os.Stat(mfn) if err != nil { t.Fatal(err) } sfi, err := os.Stat(sfn) if err != nil { t.Fatal(err) } if uint64(mfi.Size()) != sectorMetadataDiskSize*storageFolderGranularity*25 { t.Error("metadata file is the wrong size:", mfi.Size(), sectorMetadataDiskSize*storageFolderGranularity*25) } if uint64(sfi.Size()) != modules.SectorSize*storageFolderGranularity*25 { t.Error("sector file is the wrong size:", sfi.Size(), modules.SectorSize*storageFolderGranularity*25) } } Sia-1.3.0/modules/host/contractmanager/storagefolderremove.go000066400000000000000000000050611313565667000244470ustar00rootroot00000000000000package contractmanager import ( "path/filepath" ) type ( // storageFolderRemoval indicates a storage folder that has been removed // from the WAL. storageFolderRemoval struct { Index uint16 Path string } ) // commitStorageFolderRemoval will finalize a storage folder removal from the // contract manager. func (wal *writeAheadLog) commitStorageFolderRemoval(sfr storageFolderRemoval) { // Close any open file handles. sf, exists := wal.cm.storageFolders[sfr.Index] if exists { delete(wal.cm.storageFolders, sfr.Index) } if exists && sf.metadataFile != nil { err := sf.metadataFile.Close() if err != nil { wal.cm.log.Printf("Error: unable to close metadata file as storage folder %v is removed\n", sf.path) } } if exists && sf.sectorFile != nil { err := sf.sectorFile.Close() if err != nil { wal.cm.log.Printf("Error: unable to close sector file as storage folder %v is removed\n", sf.path) } } // Delete the files. err := wal.cm.dependencies.removeFile(filepath.Join(sfr.Path, metadataFile)) if err != nil { wal.cm.log.Printf("Error: unable to remove metadata file as storage folder %v is removed\n", sfr.Path) } err = wal.cm.dependencies.removeFile(filepath.Join(sfr.Path, sectorFile)) if err != nil { wal.cm.log.Printf("Error: unable to reomve sector file as storage folder %v is removed\n", sfr.Path) } } // RemoveStorageFolder will delete a storage folder from the contract manager, // moving all of the sectors in the storage folder to new storage folders. func (cm *ContractManager) RemoveStorageFolder(index uint16, force bool) error { cm.tg.Add() defer cm.tg.Done() // Retrieve the specified storage folder. cm.wal.mu.Lock() sf, exists := cm.storageFolders[index] if !exists { cm.wal.mu.Unlock() return errStorageFolderNotFound } cm.wal.mu.Unlock() // Lock the storage folder for the duration of the operation. sf.mu.Lock() defer sf.mu.Unlock() // Clear out the sectors in the storage folder. _, err := cm.wal.managedEmptyStorageFolder(index, 0) if err != nil && !force { return err } // Wait for a synchronize to confirm that all of the moves have succeeded // in full. cm.wal.mu.Lock() syncChan := cm.wal.syncChan cm.wal.mu.Unlock() <-syncChan // Submit a storage folder removal to the WAL and wait until the update is // synced. cm.wal.mu.Lock() cm.wal.appendChange(stateChange{ StorageFolderRemovals: []storageFolderRemoval{{ Index: index, Path: sf.path, }}, }) // Wait until the removal action has been synchronized. syncChan = cm.wal.syncChan cm.wal.mu.Unlock() <-syncChan return nil } Sia-1.3.0/modules/host/contractmanager/storagefolderremove_test.go000066400000000000000000000253131313565667000255100ustar00rootroot00000000000000package contractmanager import ( "bytes" "os" "path/filepath" "sync" "testing" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" ) // TestRemoveStorageFolder tries removing a storage folder that has no sectors // in it. func TestRemoveStorageFolder(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cmt, err := newContractManagerTester("TestRemoveStorageFolder") if err != nil { t.Fatal(err) } defer cmt.panicClose() // Add a storage folder to the contract manager tester. storageFolderDir := filepath.Join(cmt.persistDir, "storageFolderOne") // Create the storage folder dir. err = os.MkdirAll(storageFolderDir, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderDir, modules.SectorSize*storageFolderGranularity*2) if err != nil { t.Fatal(err) } // Determine the index of the storage folder. sfs := cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("there should be storage folder in the contract manager") } err = cmt.cm.RemoveStorageFolder(sfs[0].Index, false) if err != nil { t.Fatal(err) } // Check that the storage folder has been removed. sfs = cmt.cm.StorageFolders() if len(sfs) != 0 { t.Fatal("Storage folder should have been removed") } // Check that the disk objects were removed. _, err = os.Stat(filepath.Join(storageFolderDir, metadataFile)) if !os.IsNotExist(err) { t.Fatal("metadata file should have been removed") } _, err = os.Stat(filepath.Join(storageFolderDir, sectorFile)) if !os.IsNotExist(err) { t.Fatal("sector file should have been removed") } // Restart the contract manager to see if the storage folder is still gone. err = cmt.cm.Close() if err != nil { t.Fatal(err) } // Create the new contract manager using the same persist dir, so that it // will see the uncommitted WAL. cmt.cm, err = New(filepath.Join(cmt.persistDir, modules.ContractManagerDir)) if err != nil { t.Fatal(err) } // Check that the storage folder was properly eliminated. sfs = cmt.cm.StorageFolders() if len(sfs) != 0 { t.Fatal("Storage folder should have been removed") } } // TestRemoveStorageFolderWithSector tries removing a storage folder that has a // sector in it. func TestRemoveStorageFolderWithSector(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cmt, err := newContractManagerTester("TestRemoveStorageFolderWithSector") if err != nil { t.Fatal(err) } defer cmt.panicClose() // Add a storage folder to the contract manager tester. storageFolderDir := filepath.Join(cmt.persistDir, "storageFolderOne") // Create the storage folder dir. err = os.MkdirAll(storageFolderDir, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderDir, modules.SectorSize*storageFolderGranularity*2) if err != nil { t.Fatal(err) } // Give the storage folder a sector. root, data := randSector() err = cmt.cm.AddSector(root, data) if err != nil { t.Fatal(err) } // Determine the index of the storage folder. sfs := cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("there should be storage folder in the contract manager") } if sfs[0].Capacity != sfs[0].CapacityRemaining+modules.SectorSize { t.Fatal("there should be one sector reported in the storage folder") } // Verify that the data held within the storage folder is the correct data. readData, err := cmt.cm.ReadSector(root) if err != nil { t.Fatal(err) } if !bytes.Equal(readData, data) { t.Fatal("Reading a sector from the storage folder did not produce the right data") } // Add a second storage folder, then remove the first storage folder. storageFolderTwo := filepath.Join(cmt.persistDir, "storageFolderTwo") err = os.MkdirAll(storageFolderTwo, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderTwo, modules.SectorSize*storageFolderGranularity*2) if err != nil { t.Fatal(err) } err = cmt.cm.RemoveStorageFolder(sfs[0].Index, false) if err != nil { t.Fatal(err) } // Check that the remaining storage folder has picked up the right sector. sfs = cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("there should be storage folder in the contract manager") } if sfs[0].Capacity != sfs[0].CapacityRemaining+modules.SectorSize { t.Fatal("there should be one sector reported in the storage folder") } // Verify that the data held within the storage folder is the correct data. readData, err = cmt.cm.ReadSector(root) if err != nil { t.Fatal(err) } if !bytes.Equal(readData, data) { t.Fatal("Reading a sector from the storage folder did not produce the right data") } // Check that the disk objects were removed. _, err = os.Stat(filepath.Join(storageFolderDir, metadataFile)) if !os.IsNotExist(err) { t.Error("metadata file should have been removed") } _, err = os.Stat(filepath.Join(storageFolderDir, sectorFile)) if !os.IsNotExist(err) { t.Error("sector file should have been removed") } // Restart the contract manager to see if the storage folder is still gone. err = cmt.cm.Close() if err != nil { t.Fatal(err) } // Create the new contract manager using the same persist dir, so that it // will see the uncommitted WAL. cmt.cm, err = New(filepath.Join(cmt.persistDir, modules.ContractManagerDir)) if err != nil { t.Fatal(err) } sfs = cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("there should be storage folder in the contract manager") } if sfs[0].Capacity != sfs[0].CapacityRemaining+modules.SectorSize { t.Fatal("there should be one sector reported in the storage folder") } // Verify that the data held within the storage folder is the correct data. readData, err = cmt.cm.ReadSector(root) if err != nil { t.Fatal(err) } if !bytes.Equal(readData, data) { t.Fatal("Reading a sector from the storage folder did not produce the right data") } } // TestRemoveStorageFolderConcurrentAddSector will try removing a storage // folder at the same time that sectors are being added to the contract // manager. func TestRemoveStorageFolderConcurrentAddSector(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cmt, err := newContractManagerTester("TestRemoveStorageFolderConcurrentAddSector") if err != nil { t.Fatal(err) } defer cmt.panicClose() // Add three storage folders. storageFolderOne := filepath.Join(cmt.persistDir, "storageFolderOne") storageFolderTwo := filepath.Join(cmt.persistDir, "storageFolderTwo") storageFolderThree := filepath.Join(cmt.persistDir, "storageFolderThree") storageFolderFour := filepath.Join(cmt.persistDir, "storageFolderFour") // Create the storage folder dir. err = os.MkdirAll(storageFolderOne, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderOne, modules.SectorSize*storageFolderGranularity) if err != nil { t.Fatal(err) } err = os.MkdirAll(storageFolderTwo, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderTwo, modules.SectorSize*storageFolderGranularity*15) if err != nil { t.Fatal(err) } sfs := cmt.cm.StorageFolders() err = os.MkdirAll(storageFolderThree, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderThree, modules.SectorSize*storageFolderGranularity*25) if err != nil { t.Fatal(err) } // Run a goroutine that will continually add sectors to the contract // manager. var sliceLock sync.Mutex var roots []crypto.Hash var datas [][]byte adderTerminator := make(chan struct{}) var adderWG sync.WaitGroup // Spin up 250 of these threads, putting load on the disk and increasing the // change of complications. for i := 0; i < 100; i++ { adderWG.Add(1) go func() { for { root, data := randSector() err := cmt.cm.AddSector(root, data) if err != nil { t.Error(err) } sliceLock.Lock() roots = append(roots, root) datas = append(datas, data) sliceLock.Unlock() // See if we are done. select { case <-adderTerminator: adderWG.Done() return default: continue } } }() } // Add a fourth storage folder, mostly because it takes time and guarantees // that a bunch of sectors will be added to the disk. err = os.MkdirAll(storageFolderFour, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderFour, modules.SectorSize*storageFolderGranularity*50) if err != nil { t.Fatal(err) } // In two separate goroutines, remove storage folders one and two. var wg sync.WaitGroup wg.Add(2) go func() { defer wg.Done() err := cmt.cm.RemoveStorageFolder(sfs[0].Index, false) if err != nil { t.Error(err) } }() go func() { defer wg.Done() err := cmt.cm.RemoveStorageFolder(sfs[1].Index, false) if err != nil { t.Error(err) } }() wg.Wait() // Copy over the sectors that have been added thus far. sliceLock.Lock() addedRoots := make([]crypto.Hash, len(roots)) addedDatas := make([][]byte, len(datas)) copy(addedRoots, roots) copy(addedDatas, datas) sliceLock.Unlock() // Read all of the sectors to verify that consistency is being maintained. for i, root := range addedRoots { data, err := cmt.cm.ReadSector(root) if err != nil { t.Fatal(err) } if !bytes.Equal(data, addedDatas[i]) { t.Error("Retrieved data does not match the intended data") } } // Close the adder threads and wait until all goroutines have finished up. close(adderTerminator) adderWG.Wait() // Count the number of sectors total. sfs = cmt.cm.StorageFolders() var totalConsumed uint64 for _, sf := range sfs { totalConsumed = totalConsumed + (sf.Capacity - sf.CapacityRemaining) } if totalConsumed != uint64(len(roots))*modules.SectorSize { t.Error("Wrong storage folder consumption being reported.") } // Make sure that each sector is retreivable. for i, root := range roots { data, err := cmt.cm.ReadSector(root) if err != nil { t.Fatal(err) } if !bytes.Equal(data, datas[i]) { t.Error("Retrieved data does not match the intended data") } } // Restart the contract manager and verify that the changes stuck. err = cmt.cm.Close() if err != nil { t.Fatal(err) } // Create the new contract manager using the same persist dir, so that it // will see the uncommitted WAL. cmt.cm, err = New(filepath.Join(cmt.persistDir, modules.ContractManagerDir)) if err != nil { t.Fatal(err) } // Count the number of sectors total. sfs = cmt.cm.StorageFolders() totalConsumed = 0 for _, sf := range sfs { totalConsumed = totalConsumed + (sf.Capacity - sf.CapacityRemaining) } if totalConsumed != uint64(len(roots))*modules.SectorSize { t.Error("Wrong storage folder consumption being reported.") } // Make sure that each sector is retreivable. for i, root := range roots { data, err := cmt.cm.ReadSector(root) if err != nil { t.Fatal(err) } if !bytes.Equal(data, datas[i]) { t.Error("Retrieved data does not match the intended data") } } } Sia-1.3.0/modules/host/contractmanager/storagefoldershrink.go000066400000000000000000000061421313565667000244510ustar00rootroot00000000000000package contractmanager import ( "sync/atomic" "github.com/NebulousLabs/Sia/modules" ) type ( // storageFolderReduction dictates a completed storage folder reduction to // the WAL. storageFolderReduction struct { Index uint16 NewSectorCount uint32 } ) // commitStorageFolderReduction commits a storage folder reduction to the state // and filesystem. func (wal *writeAheadLog) commitStorageFolderReduction(sfr storageFolderReduction) { sf, exists := wal.cm.storageFolders[sfr.Index] if !exists { wal.cm.log.Critical("ERROR: storage folder reduction established for a storage folder that does not exist") return } if atomic.LoadUint64(&sf.atomicUnavailable) == 1 { // Cannot complete the storage folder reduction - storage folder is not // available. return } // Shrink the sector usage, but only if the sector usage is not already // smaller. if uint32(len(sf.usage)) > sfr.NewSectorCount/storageFolderGranularity { // Unset the usage in all bits for i := sfr.NewSectorCount; i < uint32(len(sf.usage))*storageFolderGranularity; i++ { sf.clearUsage(i) } // Truncate the usage field. sf.usage = sf.usage[:sfr.NewSectorCount/storageFolderGranularity] } // Truncate the storage folder. err := sf.metadataFile.Truncate(int64(sfr.NewSectorCount * sectorMetadataDiskSize)) if err != nil { wal.cm.log.Printf("Error: unable to truncate metadata file as storage folder %v is resized\n", sf.path) } err = sf.sectorFile.Truncate(int64(modules.SectorSize * uint64(sfr.NewSectorCount))) if err != nil { wal.cm.log.Printf("Error: unable to truncate sector file as storage folder %v is resized\n", sf.path) } } // shrinkStoragefolder will truncate a storage folder, moving all of the // sectors in the truncated space to new storage folders. func (wal *writeAheadLog) shrinkStorageFolder(index uint16, newSectorCount uint32, force bool) error { // Retrieve the specified storage folder. wal.mu.Lock() sf, exists := wal.cm.storageFolders[index] wal.mu.Unlock() if !exists { return errStorageFolderNotFound } if atomic.LoadUint64(&sf.atomicUnavailable) == 1 { // TODO: Better error. return errStorageFolderNotFound } // Lock the storage folder for the duration of the operation. sf.mu.Lock() defer sf.mu.Unlock() // Clear out the sectors in the storage folder. _, err := wal.managedEmptyStorageFolder(index, newSectorCount) if err != nil && !force { return err } // Wait for a synchronize to confirm that all of the moves have succeeded // in full. wal.mu.Lock() syncChan := wal.syncChan wal.mu.Unlock() <-syncChan // Allow unclean shutdown to be simulated by returning before the state // change gets committed. if wal.cm.dependencies.disrupt("incompleteShrinkStorageFolder") { return nil } // Submit a storage folder truncation to the WAL and wait until the update // is synced. wal.mu.Lock() wal.appendChange(stateChange{ StorageFolderReductions: []storageFolderReduction{{ Index: index, NewSectorCount: newSectorCount, }}, }) syncChan = wal.syncChan wal.mu.Unlock() // Wait until the shrink action has been synchronized. <-syncChan return nil } Sia-1.3.0/modules/host/contractmanager/storagefoldershrink_bench_test.go000066400000000000000000000020071313565667000266430ustar00rootroot00000000000000package contractmanager import ( "testing" ) // BenchmarkUnsetManySectors checks that unsetting a bunch of sectors // individually takes an acceptable amount of time. Millions should be possible // in well under a second. // // My laptop is doing 10e6 in 1.2 seconds. This is on the edge of too slow, but // overall it's close enough. func BenchmarkUnsetManySectors(b *testing.B) { // Create a uint64 that has all of the bits set. base := uint64(0) base-- // Benchmark how long it takes to unset the bits. for i := 0; i < b.N; i++ { // Create a usage array with all bits set. b.StopTimer() usageArray := make([]uint64, 10e6) for i := 0; i < len(usageArray); i++ { usageArray[i] = base } b.StartTimer() // Set all of the bits to zero. for j := 0; j < len(usageArray); j++ { // Set each bit to zero. for k := 0; k < storageFolderGranularity; k++ { usageElement := usageArray[j] usageElementUpdated := usageElement & (^(1 << uint64(k))) usageArray[j] = usageElementUpdated } } } } Sia-1.3.0/modules/host/contractmanager/storagefoldershrink_test.go000066400000000000000000001117361313565667000255160ustar00rootroot00000000000000package contractmanager import ( "bytes" "os" "path/filepath" "sync" "sync/atomic" "testing" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" ) // TestShrinkStorageFolder checks that a storage folder can be successfully // decreased in size. func TestShrinkStorageFolder(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cmt, err := newContractManagerTester("TestShrinkStorageFolder") if err != nil { t.Fatal(err) } defer cmt.panicClose() // Add a storage folder. storageFolderOne := filepath.Join(cmt.persistDir, "storageFolderOne") // Create the storage folder dir. err = os.MkdirAll(storageFolderOne, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderOne, modules.SectorSize*storageFolderGranularity*8) if err != nil { t.Fatal(err) } // Get the index of the storage folder. sfs := cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("there should only be one storage folder") } sfIndex := sfs[0].Index // Verify that the storage folder has the correct capacity. if sfs[0].Capacity != modules.SectorSize*storageFolderGranularity*8 { t.Error("new storage folder is reporting the wrong capacity") } // Verify that the on-disk files are the right size. mfn := filepath.Join(storageFolderOne, metadataFile) sfn := filepath.Join(storageFolderOne, sectorFile) mfi, err := os.Stat(mfn) if err != nil { t.Fatal(err) } sfi, err := os.Stat(sfn) if err != nil { t.Fatal(err) } if uint64(mfi.Size()) != sectorMetadataDiskSize*storageFolderGranularity*8 { t.Error("metadata file is the wrong size") } if uint64(sfi.Size()) != modules.SectorSize*storageFolderGranularity*8 { t.Error("sector file is the wrong size") } // Decrease the size of the storage folder. err = cmt.cm.ResizeStorageFolder(sfIndex, modules.SectorSize*storageFolderGranularity*2, false) if err != nil { t.Fatal(err) } // Verify that the capacity and file sizes are correct. sfs = cmt.cm.StorageFolders() if sfs[0].Capacity != modules.SectorSize*storageFolderGranularity*2 { t.Error("new storage folder is reporting the wrong capacity") } mfi, err = os.Stat(mfn) if err != nil { t.Fatal(err) } sfi, err = os.Stat(sfn) if err != nil { t.Fatal(err) } if uint64(mfi.Size()) != sectorMetadataDiskSize*storageFolderGranularity*2 { t.Error("metadata file is the wrong size") } if uint64(sfi.Size()) != modules.SectorSize*storageFolderGranularity*2 { t.Error("sector file is the wrong size") } // Restart the contract manager to see that the change is persistent. err = cmt.cm.Close() if err != nil { t.Fatal(err) } cmt.cm, err = New(filepath.Join(cmt.persistDir, modules.ContractManagerDir)) if err != nil { t.Fatal(err) } // Verify that the capacity and file sizes are correct. sfs = cmt.cm.StorageFolders() if sfs[0].Capacity != modules.SectorSize*storageFolderGranularity*2 { t.Error("new storage folder is reporting the wrong capacity") } mfi, err = os.Stat(mfn) if err != nil { t.Fatal(err) } sfi, err = os.Stat(sfn) if err != nil { t.Fatal(err) } if uint64(mfi.Size()) != sectorMetadataDiskSize*storageFolderGranularity*2 { t.Error("metadata file is the wrong size") } if uint64(sfi.Size()) != modules.SectorSize*storageFolderGranularity*2 { t.Error("sector file is the wrong size") } } // TestShrinkStorageFolderWithSectors checks that a storage folder can be // successfully decreased in size when it has sectors which would need to be // moved. func TestShrinkStorageFolderWithSectors(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() cmt, err := newContractManagerTester(t.Name()) if err != nil { t.Fatal(err) } defer cmt.panicClose() // Add a storage folder. storageFolderOne := filepath.Join(cmt.persistDir, "storageFolderOne") // Create the storage folder dir. err = os.MkdirAll(storageFolderOne, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderOne, modules.SectorSize*storageFolderGranularity*8) if err != nil { t.Fatal(err) } // Get the index of the storage folder. sfs := cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("there should only be one storage folder") } sfIndex := sfs[0].Index // Verify that the storage folder has the correct capacity. if sfs[0].Capacity != modules.SectorSize*storageFolderGranularity*8 { t.Error("new storage folder is reporting the wrong capacity") } // Verify that the on-disk files are the right size. mfn := filepath.Join(storageFolderOne, metadataFile) sfn := filepath.Join(storageFolderOne, sectorFile) mfi, err := os.Stat(mfn) if err != nil { t.Fatal(err) } sfi, err := os.Stat(sfn) if err != nil { t.Fatal(err) } if uint64(mfi.Size()) != sectorMetadataDiskSize*storageFolderGranularity*8 { t.Error("metadata file is the wrong size") } if uint64(sfi.Size()) != modules.SectorSize*storageFolderGranularity*8 { t.Error("sector file is the wrong size") } // Create some sectors and add them to the storage folder. roots := make([]crypto.Hash, storageFolderGranularity*3) datas := make([][]byte, storageFolderGranularity*3) for i := 0; i < storageFolderGranularity*3; i++ { root, data := randSector() roots[i] = root datas[i] = data } // Add all of the sectors. var wg sync.WaitGroup wg.Add(len(roots)) for i := 0; i < len(roots); i++ { go func(i int) { err := cmt.cm.AddSector(roots[i], datas[i]) if err != nil { t.Error(err) } wg.Done() }(i) } wg.Wait() // Add a second storage folder so that the displaced sectors have somewhere // to go. storageFolderTwo := filepath.Join(cmt.persistDir, "storageFolderTwo") // Create the storage folder dir. err = os.MkdirAll(storageFolderTwo, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderTwo, modules.SectorSize*storageFolderGranularity*3) if err != nil { t.Fatal(err) } // Verify that every single sector is readable and has the correct data. wg.Add(len(roots)) var misses uint64 for i := 0; i < len(roots); i++ { go func(i int) { data, err := cmt.cm.ReadSector(roots[i]) if err != nil || !bytes.Equal(data, datas[i]) { atomic.AddUint64(&misses, 1) } wg.Done() }(i) } wg.Wait() if misses != 0 { t.Errorf("Could not find all %v sectors: %v\n", len(roots), misses) } // Decrease the size of the storage folder. err = cmt.cm.ResizeStorageFolder(sfIndex, modules.SectorSize*storageFolderGranularity*2, false) if err != nil { t.Fatal(err) } // Verify that the capacity and file sizes are correct. sfs = cmt.cm.StorageFolders() capacity := sfs[0].Capacity + sfs[1].Capacity capacityRemaining := sfs[0].CapacityRemaining + sfs[1].CapacityRemaining if capacity != modules.SectorSize*storageFolderGranularity*5 { t.Error("new storage folder is reporting the wrong capacity") } if capacityRemaining != modules.SectorSize*storageFolderGranularity*2 { t.Error("new storage folder capacity remaining is reporting the wrong remaining capacity") } mfi, err = os.Stat(mfn) if err != nil { t.Fatal(err) } sfi, err = os.Stat(sfn) if err != nil { t.Fatal(err) } if uint64(mfi.Size()) != sectorMetadataDiskSize*storageFolderGranularity*2 { t.Error("metadata file is the wrong size") } if uint64(sfi.Size()) != modules.SectorSize*storageFolderGranularity*2 { t.Error("sector file is the wrong size") } // Verify that every single sector is readable and has the correct data. wg.Add(len(roots)) misses = 0 for i := 0; i < len(roots); i++ { go func(i int) { data, err := cmt.cm.ReadSector(roots[i]) if err != nil || !bytes.Equal(data, datas[i]) { atomic.AddUint64(&misses, 1) } wg.Done() }(i) } wg.Wait() if misses != 0 { t.Errorf("Could not find all %v sectors: %v\n", len(roots), misses) } // Restart the contract manager to see that the change is persistent. err = cmt.cm.Close() if err != nil { t.Fatal(err) } cmt.cm, err = New(filepath.Join(cmt.persistDir, modules.ContractManagerDir)) if err != nil { t.Fatal(err) } // Verify that the capacity and file sizes are correct. sfs = cmt.cm.StorageFolders() capacity = sfs[0].Capacity + sfs[1].Capacity capacityRemaining = sfs[0].CapacityRemaining + sfs[1].CapacityRemaining if capacity != modules.SectorSize*storageFolderGranularity*5 { t.Error("new storage folder is reporting the wrong capacity") } if capacityRemaining != modules.SectorSize*storageFolderGranularity*2 { t.Error("new storage folder capacity remaining is reporting the wrong remaining capacity") } mfi, err = os.Stat(mfn) if err != nil { t.Fatal(err) } sfi, err = os.Stat(sfn) if err != nil { t.Fatal(err) } if uint64(mfi.Size()) != sectorMetadataDiskSize*storageFolderGranularity*2 { t.Error("metadata file is the wrong size") } if uint64(sfi.Size()) != modules.SectorSize*storageFolderGranularity*2 { t.Error("sector file is the wrong size") } // Verify that every single sector is readable and has the correct data. wg.Add(len(roots)) misses = 0 for i := 0; i < len(roots); i++ { go func(i int) { data, err := cmt.cm.ReadSector(roots[i]) if err != nil || !bytes.Equal(data, datas[i]) { atomic.AddUint64(&misses, 1) } wg.Done() }(i) } wg.Wait() if misses != 0 { t.Errorf("Could not find all %v sectors: %v\n", len(roots), misses) } } // TestShrinkStorageFolderIncopmleteWrite checks that shrinkStorageFolder // operates as intended when the writing to move sectors cannot complete fully. func TestShrinkStorageFolderIncompleteWrite(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() d := new(dependencyIncompleteGrow) cmt, err := newMockedContractManagerTester(d, "TestShrinkStorageFolderIncompleteWrite") if err != nil { t.Fatal(err) } defer cmt.panicClose() // Add a storage folder. storageFolderOne := filepath.Join(cmt.persistDir, "storageFolderOne") // Create the storage folder dir. err = os.MkdirAll(storageFolderOne, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderOne, modules.SectorSize*storageFolderGranularity*8) if err != nil { t.Fatal(err) } // Get the index of the storage folder. sfs := cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("there should only be one storage folder") } sfIndex := sfs[0].Index // Create some sectors and add them to the storage folder. roots := make([]crypto.Hash, storageFolderGranularity*3) datas := make([][]byte, storageFolderGranularity*3) for i := 0; i < storageFolderGranularity*3; i++ { root, data := randSector() roots[i] = root datas[i] = data } // Add all of the sectors. var wg sync.WaitGroup wg.Add(len(roots)) for i := 0; i < len(roots); i++ { go func(i int) { err := cmt.cm.AddSector(roots[i], datas[i]) if err != nil { t.Error(err) } wg.Done() }(i) } wg.Wait() // Add a second storage folder so that the displaced sectors have somewhere // to go. storageFolderTwo := filepath.Join(cmt.persistDir, "storageFolderTwo") // Create the storage folder dir. err = os.MkdirAll(storageFolderTwo, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderTwo, modules.SectorSize*storageFolderGranularity*3) if err != nil { t.Fatal(err) } // Trigger some failures. d.mu.Lock() d.threshold = 1 << 15 d.triggered = true d.mu.Unlock() // Decrease the size of the storage folder. err = cmt.cm.ResizeStorageFolder(sfIndex, modules.SectorSize*storageFolderGranularity*2, false) if err == nil { t.Fatal("expected a failure") } // Verify that the capacity and file sizes are correct. sfs = cmt.cm.StorageFolders() capacity := sfs[0].Capacity + sfs[1].Capacity capacityRemaining := sfs[0].CapacityRemaining + sfs[1].CapacityRemaining if capacity != modules.SectorSize*storageFolderGranularity*11 { t.Error("new storage folder is reporting the wrong capacity") } if capacityRemaining != modules.SectorSize*storageFolderGranularity*8 { t.Error("new storage folder capacity remaining is reporting the wrong remaining capacity") } mfn := filepath.Join(storageFolderOne, metadataFile) sfn := filepath.Join(storageFolderOne, sectorFile) mfi, err := os.Stat(mfn) if err != nil { t.Fatal(err) } sfi, err := os.Stat(sfn) if err != nil { t.Fatal(err) } if uint64(mfi.Size()) != sectorMetadataDiskSize*storageFolderGranularity*8 { t.Error("metadata file is the wrong size") } if uint64(sfi.Size()) != modules.SectorSize*storageFolderGranularity*8 { t.Error("sector file is the wrong size") } // Verify that every single sector is readable and has the correct data. wg.Add(len(roots)) var misses uint64 for i := 0; i < len(roots); i++ { go func(i int) { data, err := cmt.cm.ReadSector(roots[i]) if err != nil || !bytes.Equal(data, datas[i]) { atomic.AddUint64(&misses, 1) } wg.Done() }(i) } wg.Wait() if misses != 0 { t.Errorf("Could not find all %v sectors: %v\n", len(roots), misses) } // Restart the contract manager to see that the change is persistent. err = cmt.cm.Close() if err != nil { t.Fatal(err) } cmt.cm, err = New(filepath.Join(cmt.persistDir, modules.ContractManagerDir)) if err != nil { t.Fatal(err) } // Verify that the capacity and file sizes are correct. sfs = cmt.cm.StorageFolders() capacity = sfs[0].Capacity + sfs[1].Capacity capacityRemaining = sfs[0].CapacityRemaining + sfs[1].CapacityRemaining if capacity != modules.SectorSize*storageFolderGranularity*11 { t.Error("new storage folder is reporting the wrong capacity") } if capacityRemaining != modules.SectorSize*storageFolderGranularity*8 { t.Error("new storage folder capacity remaining is reporting the wrong remaining capacity") } mfi, err = os.Stat(mfn) if err != nil { t.Fatal(err) } sfi, err = os.Stat(sfn) if err != nil { t.Fatal(err) } if uint64(mfi.Size()) != sectorMetadataDiskSize*storageFolderGranularity*8 { t.Error("metadata file is the wrong size") } if uint64(sfi.Size()) != modules.SectorSize*storageFolderGranularity*8 { t.Error("sector file is the wrong size") } // Verify that every single sector is readable and has the correct data. wg.Add(len(roots)) misses = 0 for i := 0; i < len(roots); i++ { go func(i int) { data, err := cmt.cm.ReadSector(roots[i]) if err != nil || !bytes.Equal(data, datas[i]) { atomic.AddUint64(&misses, 1) } wg.Done() }(i) } wg.Wait() if misses != 0 { t.Errorf("Could not find all %v sectors: %v\n", len(roots), misses) } } // TestShrinkStorageFolderIncopmleteWriteForce checks that shrinkStorageFolder // operates as intended when the writing to move sectors cannot complete fully, // but the 'force' flag is set. // capacity and capacity remaining. func TestShrinkStorageFolderIncompleteWriteForce(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() d := new(dependencyIncompleteGrow) cmt, err := newMockedContractManagerTester(d, "TestShrinkStorageFolderIncompleteWriteForce") if err != nil { t.Fatal(err) } defer cmt.panicClose() // Add a storage folder. storageFolderOne := filepath.Join(cmt.persistDir, "storageFolderOne") // Create the storage folder dir. err = os.MkdirAll(storageFolderOne, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderOne, modules.SectorSize*storageFolderGranularity*8) if err != nil { t.Fatal(err) } // Get the index of the storage folder. sfs := cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("there should only be one storage folder") } sfIndex := sfs[0].Index // Create some sectors and add them to the storage folder. roots := make([]crypto.Hash, 6) datas := make([][]byte, 6) for i := 0; i < len(roots); i++ { root, data := randSector() roots[i] = root datas[i] = data } // Add all of the sectors. var wg sync.WaitGroup wg.Add(len(roots)) for i := 0; i < len(roots); i++ { go func(i int) { err := cmt.cm.AddSector(roots[i], datas[i]) if err != nil { t.Error(err) } wg.Done() }(i) } wg.Wait() // Add a second storage folder so that the displaced sectors have somewhere // to go. storageFolderTwo := filepath.Join(cmt.persistDir, "storageFolderTwo") // Create the storage folder dir. err = os.MkdirAll(storageFolderTwo, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderTwo, modules.SectorSize*storageFolderGranularity*3) if err != nil { t.Fatal(err) } // Trigger some failures. d.mu.Lock() d.threshold = 1 << 11 d.triggered = true d.mu.Unlock() // Decrease the size of the storage folder. err = cmt.cm.ResizeStorageFolder(sfIndex, modules.SectorSize*storageFolderGranularity*2, true) if err != nil { t.Fatal("expected a failure") } // Verify that the capacity and file sizes are correct. sfs = cmt.cm.StorageFolders() capacity := sfs[0].Capacity + sfs[1].Capacity capacityRemaining := sfs[0].CapacityRemaining + sfs[1].CapacityRemaining if capacity != modules.SectorSize*storageFolderGranularity*5 { t.Error("new storage folder is reporting the wrong capacity") } mfn := filepath.Join(storageFolderOne, metadataFile) sfn := filepath.Join(storageFolderOne, sectorFile) mfi, err := os.Stat(mfn) if err != nil { t.Fatal(err) } sfi, err := os.Stat(sfn) if err != nil { t.Fatal(err) } if uint64(mfi.Size()) != sectorMetadataDiskSize*storageFolderGranularity*2 { t.Error("metadata file is the wrong size") } if uint64(sfi.Size()) != modules.SectorSize*storageFolderGranularity*2 { t.Error("sector file is the wrong size") } // Data was lost. Count the number of sectors that are still available. wg.Add(len(roots)) var remainingSectors uint64 for i := 0; i < len(roots); i++ { go func(i int) { defer wg.Done() data, err := cmt.cm.ReadSector(roots[i]) if err != nil { // Sector probably destroyed. return } if !bytes.Equal(data, datas[i]) { t.Error("ReadSector has returned the wrong data") } atomic.AddUint64(&remainingSectors, 1) }(i) } wg.Wait() // Check that the capacity remaining matches the number of reachable // sectors. if capacityRemaining != capacity-remainingSectors*modules.SectorSize { t.Error(capacityRemaining/modules.SectorSize, capacity/modules.SectorSize, remainingSectors) } // Restart the contract manager to see that the change is persistent. err = cmt.cm.Close() if err != nil { t.Fatal(err) } cmt.cm, err = New(filepath.Join(cmt.persistDir, modules.ContractManagerDir)) if err != nil { t.Fatal(err) } // Verify that the capacity and file sizes are correct. sfs = cmt.cm.StorageFolders() capacity = sfs[0].Capacity + sfs[1].Capacity capacityRemaining = sfs[0].CapacityRemaining + sfs[1].CapacityRemaining if capacity != modules.SectorSize*storageFolderGranularity*5 { t.Error("new storage folder is reporting the wrong capacity") } mfi, err = os.Stat(mfn) if err != nil { t.Fatal(err) } sfi, err = os.Stat(sfn) if err != nil { t.Fatal(err) } if uint64(mfi.Size()) != sectorMetadataDiskSize*storageFolderGranularity*2 { t.Error("metadata file is the wrong size") } if uint64(sfi.Size()) != modules.SectorSize*storageFolderGranularity*2 { t.Error("sector file is the wrong size") } // Check that the same number of sectors are still available. wg.Add(len(roots)) var nowRemainingSectors uint64 for i := 0; i < len(roots); i++ { go func(i int) { defer wg.Done() data, err := cmt.cm.ReadSector(roots[i]) if err != nil { // Sector probably destroyed. return } if !bytes.Equal(data, datas[i]) { t.Error("ReadSector has returned the wrong data") } atomic.AddUint64(&nowRemainingSectors, 1) }(i) } wg.Wait() // Check that the capacity remaining matches the number of reachable // sectors. if capacityRemaining != capacity-remainingSectors*modules.SectorSize { t.Error(capacityRemaining/modules.SectorSize, capacity/modules.SectorSize, remainingSectors) } if remainingSectors != nowRemainingSectors { t.Error("available sector set changed after restart", remainingSectors, nowRemainingSectors) } } // dependencyShrinkNoFinalize will not add a confirmation to the WAL that a // shrink storage folder operation has completed. type dependencyShrinkNoFinalize struct { productionDependencies } // disrupt will prevent the growStorageFolder operation from committing a // finalized growStorageFolder operation to the WAL. func (dependencyShrinkNoFinalize) disrupt(s string) bool { if s == "incompleteShrinkStorageFolder" { return true } if s == "cleanWALFile" { return true } return false } // TestShrinkStorageFolderShutdownAfterMove simulates an unclean shutdown that // occurs after the storage folder sector move has completed, but before it has // established through the WAL that the move has completed. The result should // be that the storage folder shirnk is not accepted after restart. func TestShrinkStorageFolderShutdownAfterMove(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() d := new(dependencyShrinkNoFinalize) cmt, err := newMockedContractManagerTester(d, "TestShrinkStorageFolderShutdownAfterMove") if err != nil { t.Fatal(err) } defer cmt.panicClose() // Add a storage folder. storageFolderOne := filepath.Join(cmt.persistDir, "storageFolderOne") // Create the storage folder dir. err = os.MkdirAll(storageFolderOne, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderOne, modules.SectorSize*storageFolderGranularity*8) if err != nil { t.Fatal(err) } // Get the index of the storage folder. sfs := cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("there should only be one storage folder") } sfIndex := sfs[0].Index // Verify that the storage folder has the correct capacity. if sfs[0].Capacity != modules.SectorSize*storageFolderGranularity*8 { t.Error("new storage folder is reporting the wrong capacity") } // Verify that the on-disk files are the right size. mfn := filepath.Join(storageFolderOne, metadataFile) sfn := filepath.Join(storageFolderOne, sectorFile) mfi, err := os.Stat(mfn) if err != nil { t.Fatal(err) } sfi, err := os.Stat(sfn) if err != nil { t.Fatal(err) } if uint64(mfi.Size()) != sectorMetadataDiskSize*storageFolderGranularity*8 { t.Error("metadata file is the wrong size") } if uint64(sfi.Size()) != modules.SectorSize*storageFolderGranularity*8 { t.Error("sector file is the wrong size") } // Create some sectors and add them to the storage folder. roots := make([]crypto.Hash, storageFolderGranularity*3) datas := make([][]byte, storageFolderGranularity*3) for i := 0; i < storageFolderGranularity*3; i++ { root, data := randSector() roots[i] = root datas[i] = data } // Add all of the sectors. var wg sync.WaitGroup wg.Add(len(roots)) for i := 0; i < len(roots); i++ { go func(i int) { err := cmt.cm.AddSector(roots[i], datas[i]) if err != nil { t.Error(err) } wg.Done() }(i) } wg.Wait() // Add a second storage folder so that the displaced sectors have somewhere // to go. storageFolderTwo := filepath.Join(cmt.persistDir, "storageFolderTwo") // Create the storage folder dir. err = os.MkdirAll(storageFolderTwo, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderTwo, modules.SectorSize*storageFolderGranularity*3) if err != nil { t.Fatal(err) } // Verify that every single sector is readable and has the correct data. wg.Add(len(roots)) var misses uint64 for i := 0; i < len(roots); i++ { go func(i int) { data, err := cmt.cm.ReadSector(roots[i]) if err != nil || !bytes.Equal(data, datas[i]) { atomic.AddUint64(&misses, 1) } wg.Done() }(i) } wg.Wait() if misses != 0 { t.Errorf("Could not find all %v sectors: %v\n", len(roots), misses) } // Decrease the size of the storage folder. err = cmt.cm.ResizeStorageFolder(sfIndex, modules.SectorSize*storageFolderGranularity*2, false) if err != nil { t.Fatal(err) } // Verify that the capacity and file sizes are correct. sfs = cmt.cm.StorageFolders() capacity := sfs[0].Capacity + sfs[1].Capacity capacityRemaining := sfs[0].CapacityRemaining + sfs[1].CapacityRemaining if capacity != modules.SectorSize*storageFolderGranularity*11 { t.Error("new storage folder is reporting the wrong capacity") } if capacityRemaining != modules.SectorSize*storageFolderGranularity*8 { t.Error("new storage folder capacity remaining is reporting the wrong remaining capacity") } mfi, err = os.Stat(mfn) if err != nil { t.Fatal(err) } sfi, err = os.Stat(sfn) if err != nil { t.Fatal(err) } if uint64(mfi.Size()) != sectorMetadataDiskSize*storageFolderGranularity*8 { t.Error("metadata file is the wrong size") } if uint64(sfi.Size()) != modules.SectorSize*storageFolderGranularity*8 { t.Error("sector file is the wrong size") } // Verify that every single sector is readable and has the correct data. wg.Add(len(roots)) misses = 0 for i := 0; i < len(roots); i++ { go func(i int) { data, err := cmt.cm.ReadSector(roots[i]) if err != nil || !bytes.Equal(data, datas[i]) { atomic.AddUint64(&misses, 1) } wg.Done() }(i) } wg.Wait() if misses != 0 { t.Errorf("Could not find all %v sectors: %v\n", len(roots), misses) } // Restart the contract manager. WAL update was not completed, so changes // should not have persisted. All sectors should still be available though, // and they may have moved around but the capacity reporting should align // correctly. err = cmt.cm.Close() if err != nil { t.Fatal(err) } cmt.cm, err = New(filepath.Join(cmt.persistDir, modules.ContractManagerDir)) if err != nil { t.Fatal(err) } // Verify that the capacity and file sizes are correct. sfs = cmt.cm.StorageFolders() capacity = sfs[0].Capacity + sfs[1].Capacity capacityRemaining = sfs[0].CapacityRemaining + sfs[1].CapacityRemaining if capacity != modules.SectorSize*storageFolderGranularity*11 { t.Error("new storage folder is reporting the wrong capacity", capacity/modules.SectorSize, storageFolderGranularity*11) } if capacityRemaining != modules.SectorSize*storageFolderGranularity*8 { t.Error("new storage folder capacity remaining is reporting the wrong remaining capacity") } mfi, err = os.Stat(mfn) if err != nil { t.Fatal(err) } sfi, err = os.Stat(sfn) if err != nil { t.Fatal(err) } if uint64(mfi.Size()) != sectorMetadataDiskSize*storageFolderGranularity*8 { t.Error("metadata file is the wrong size") } if uint64(sfi.Size()) != modules.SectorSize*storageFolderGranularity*8 { t.Error("sector file is the wrong size") } // Verify that every single sector is readable and has the correct data. wg.Add(len(roots)) misses = 0 for i := 0; i < len(roots); i++ { go func(i int) { data, err := cmt.cm.ReadSector(roots[i]) if err != nil || !bytes.Equal(data, datas[i]) { atomic.AddUint64(&misses, 1) } wg.Done() }(i) } wg.Wait() if misses != 0 { t.Errorf("Could not find all %v sectors: %v\n", len(roots), misses) } } // TestShrinkStorageFolderWAL completes a storage folder shrinking, but leaves // the WAL behind so that a commit is necessary to finalize things. func TestShrinkStorageFolderWAL(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() d := new(dependencyLeaveWAL) cmt, err := newMockedContractManagerTester(d, "TestShrinkStorageFolderWAL") if err != nil { t.Fatal(err) } defer cmt.panicClose() // Add a storage folder. storageFolderOne := filepath.Join(cmt.persistDir, "storageFolderOne") // Create the storage folder dir. err = os.MkdirAll(storageFolderOne, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderOne, modules.SectorSize*storageFolderGranularity*8) if err != nil { t.Fatal(err) } // Get the index of the storage folder. sfs := cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("there should only be one storage folder") } sfIndex := sfs[0].Index // Verify that the storage folder has the correct capacity. if sfs[0].Capacity != modules.SectorSize*storageFolderGranularity*8 { t.Error("new storage folder is reporting the wrong capacity") } // Verify that the on-disk files are the right size. mfn := filepath.Join(storageFolderOne, metadataFile) sfn := filepath.Join(storageFolderOne, sectorFile) mfi, err := os.Stat(mfn) if err != nil { t.Fatal(err) } sfi, err := os.Stat(sfn) if err != nil { t.Fatal(err) } if uint64(mfi.Size()) != sectorMetadataDiskSize*storageFolderGranularity*8 { t.Error("metadata file is the wrong size") } if uint64(sfi.Size()) != modules.SectorSize*storageFolderGranularity*8 { t.Error("sector file is the wrong size") } // Create some sectors and add them to the storage folder. roots := make([]crypto.Hash, storageFolderGranularity*3) datas := make([][]byte, storageFolderGranularity*3) for i := 0; i < storageFolderGranularity*3; i++ { root, data := randSector() roots[i] = root datas[i] = data } // Add all of the sectors. var wg sync.WaitGroup wg.Add(len(roots)) for i := 0; i < len(roots); i++ { go func(i int) { err := cmt.cm.AddSector(roots[i], datas[i]) if err != nil { t.Error(err) } wg.Done() }(i) } wg.Wait() // Add a second storage folder so that the displaced sectors have somewhere // to go. storageFolderTwo := filepath.Join(cmt.persistDir, "storageFolderTwo") // Create the storage folder dir. err = os.MkdirAll(storageFolderTwo, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderTwo, modules.SectorSize*storageFolderGranularity*3) if err != nil { t.Fatal(err) } // Verify that every single sector is readable and has the correct data. wg.Add(len(roots)) var misses uint64 for i := 0; i < len(roots); i++ { go func(i int) { data, err := cmt.cm.ReadSector(roots[i]) if err != nil || !bytes.Equal(data, datas[i]) { atomic.AddUint64(&misses, 1) } wg.Done() }(i) } wg.Wait() if misses != 0 { t.Errorf("Could not find all %v sectors: %v\n", len(roots), misses) } // Decrease the size of the storage folder. err = cmt.cm.ResizeStorageFolder(sfIndex, modules.SectorSize*storageFolderGranularity*2, false) if err != nil { t.Fatal(err) } // Verify that the capacity and file sizes are correct. sfs = cmt.cm.StorageFolders() capacity := sfs[0].Capacity + sfs[1].Capacity capacityRemaining := sfs[0].CapacityRemaining + sfs[1].CapacityRemaining if capacity != modules.SectorSize*storageFolderGranularity*5 { t.Error("new storage folder is reporting the wrong capacity") } if capacityRemaining != modules.SectorSize*storageFolderGranularity*2 { t.Error("new storage folder capacity remaining is reporting the wrong remaining capacity") } mfi, err = os.Stat(mfn) if err != nil { t.Fatal(err) } sfi, err = os.Stat(sfn) if err != nil { t.Fatal(err) } if uint64(mfi.Size()) != sectorMetadataDiskSize*storageFolderGranularity*2 { t.Error("metadata file is the wrong size") } if uint64(sfi.Size()) != modules.SectorSize*storageFolderGranularity*2 { t.Error("sector file is the wrong size") } // Verify that every single sector is readable and has the correct data. wg.Add(len(roots)) misses = 0 for i := 0; i < len(roots); i++ { go func(i int) { data, err := cmt.cm.ReadSector(roots[i]) if err != nil || !bytes.Equal(data, datas[i]) { atomic.AddUint64(&misses, 1) } wg.Done() }(i) } wg.Wait() if misses != 0 { t.Errorf("Could not find all %v sectors: %v\n", len(roots), misses) } // Restart the contract manager to see that the change is persistent. err = cmt.cm.Close() if err != nil { t.Fatal(err) } cmt.cm, err = New(filepath.Join(cmt.persistDir, modules.ContractManagerDir)) if err != nil { t.Fatal(err) } // Verify that the capacity and file sizes are correct. sfs = cmt.cm.StorageFolders() capacity = sfs[0].Capacity + sfs[1].Capacity capacityRemaining = sfs[0].CapacityRemaining + sfs[1].CapacityRemaining if capacity != modules.SectorSize*storageFolderGranularity*5 { t.Error("new storage folder is reporting the wrong capacity") } if capacityRemaining != modules.SectorSize*storageFolderGranularity*2 { t.Error("new storage folder capacity remaining is reporting the wrong remaining capacity") } mfi, err = os.Stat(mfn) if err != nil { t.Fatal(err) } sfi, err = os.Stat(sfn) if err != nil { t.Fatal(err) } if uint64(mfi.Size()) != sectorMetadataDiskSize*storageFolderGranularity*2 { t.Error("metadata file is the wrong size") } if uint64(sfi.Size()) != modules.SectorSize*storageFolderGranularity*2 { t.Error("sector file is the wrong size") } // Verify that every single sector is readable and has the correct data. wg.Add(len(roots)) misses = 0 for i := 0; i < len(roots); i++ { go func(i int) { data, err := cmt.cm.ReadSector(roots[i]) if err != nil || !bytes.Equal(data, datas[i]) { atomic.AddUint64(&misses, 1) } wg.Done() }(i) } wg.Wait() if misses != 0 { t.Errorf("Could not find all %v sectors: %v\n", len(roots), misses) } } // TestShrinkSingleStorageFolder verifies that it's possible to shirnk a single // storage folder with no destination for the sectors. func TestShrinkSingleStorageFolder(t *testing.T) { // TODO: Supporting in-place storage folder shrinking requires the // move-sector function to be able to recognize the storage folder that it // is currently using - right now it needs a storage folder lock to migrate // a sector in, and a storage folder lock to migrate a sector out, and // these locks are independent, so it cannot move a sector into the folder // that the sector is being moved out of. t.Skip("In-place shrinking not currently supported") if testing.Short() { t.SkipNow() } t.Parallel() cmt, err := newContractManagerTester(t.Name()) if err != nil { t.Fatal(err) } defer cmt.panicClose() // Add a storage folder. storageFolderOne := filepath.Join(cmt.persistDir, "storageFolderOne") mfn := filepath.Join(storageFolderOne, metadataFile) sfn := filepath.Join(storageFolderOne, sectorFile) // Create the storage folder dir. err = os.MkdirAll(storageFolderOne, 0700) if err != nil { t.Fatal(err) } err = cmt.cm.AddStorageFolder(storageFolderOne, modules.SectorSize*storageFolderGranularity*8) if err != nil { t.Fatal(err) } // Get the index of the storage folder. sfs := cmt.cm.StorageFolders() if len(sfs) != 1 { t.Fatal("there should only be one storage folder") } sfIndex := sfs[0].Index // Create some sectors and add them to the storage folder. roots := make([]crypto.Hash, storageFolderGranularity*3) datas := make([][]byte, storageFolderGranularity*3) for i := 0; i < storageFolderGranularity*3; i++ { root, data := randSector() roots[i] = root datas[i] = data } // Add all of the sectors. var wg sync.WaitGroup wg.Add(len(roots)) for i := 0; i < len(roots); i++ { go func(i int) { err := cmt.cm.AddSector(roots[i], datas[i]) if err != nil { t.Error(err) } wg.Done() }(i) } wg.Wait() // Decrease the size of the storage folder. err = cmt.cm.ResizeStorageFolder(sfIndex, modules.SectorSize*storageFolderGranularity*4, false) if err != nil { t.Fatal(err) } // Verify that the capacity and file sizes are correct. sfs = cmt.cm.StorageFolders() if sfs[0].Capacity != modules.SectorSize*storageFolderGranularity*4 { t.Error("new storage folder is reporting the wrong capacity") } if sfs[0].CapacityRemaining != modules.SectorSize*storageFolderGranularity*1 { t.Error("new storage folder capacity remaining is reporting the wrong remaining capacity") } mfi, err := os.Stat(mfn) if err != nil { t.Fatal(err) } sfi, err := os.Stat(sfn) if err != nil { t.Fatal(err) } if uint64(mfi.Size()) != sectorMetadataDiskSize*storageFolderGranularity*4 { t.Error("metadata file is the wrong size") } if uint64(sfi.Size()) != modules.SectorSize*storageFolderGranularity*4 { t.Error("sector file is the wrong size") } // Verify that every single sector is readable and has the correct data. wg.Add(len(roots)) misses := uint64(0) for i := 0; i < len(roots); i++ { go func(i int) { data, err := cmt.cm.ReadSector(roots[i]) if err != nil || !bytes.Equal(data, datas[i]) { atomic.AddUint64(&misses, 1) } wg.Done() }(i) } wg.Wait() if misses != 0 { t.Errorf("Could not find all %v sectors: %v\n", len(roots), misses) } // Restart the contract manager to see that the change is persistent. err = cmt.cm.Close() if err != nil { t.Fatal(err) } cmt.cm, err = New(filepath.Join(cmt.persistDir, modules.ContractManagerDir)) if err != nil { t.Fatal(err) } // Verify that the capacity and file sizes are correct. sfs = cmt.cm.StorageFolders() if sfs[0].Capacity != modules.SectorSize*storageFolderGranularity*4 { t.Error("new storage folder is reporting the wrong capacity") } if sfs[0].CapacityRemaining != modules.SectorSize*storageFolderGranularity*1 { t.Error("new storage folder capacity remaining is reporting the wrong remaining capacity") } mfi, err = os.Stat(mfn) if err != nil { t.Fatal(err) } sfi, err = os.Stat(sfn) if err != nil { t.Fatal(err) } if uint64(mfi.Size()) != sectorMetadataDiskSize*storageFolderGranularity*4 { t.Error("metadata file is the wrong size") } if uint64(sfi.Size()) != modules.SectorSize*storageFolderGranularity*4 { t.Error("sector file is the wrong size") } // Verify that every single sector is readable and has the correct data. wg.Add(len(roots)) misses = 0 for i := 0; i < len(roots); i++ { go func(i int) { data, err := cmt.cm.ReadSector(roots[i]) if err != nil || !bytes.Equal(data, datas[i]) { atomic.AddUint64(&misses, 1) } wg.Done() }(i) } wg.Wait() if misses != 0 { t.Errorf("Could not find all %v sectors: %v\n", len(roots), misses) } } Sia-1.3.0/modules/host/contractmanager/writeaheadlog.go000066400000000000000000000311001313565667000232010ustar00rootroot00000000000000package contractmanager import ( "encoding/json" "errors" "io" "os" "path/filepath" "sync" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/persist" ) type ( // sectorUpdate is an idempotent update to the sector metadata. sectorUpdate struct { Count uint16 Folder uint16 ID sectorID Index uint32 } // stateChange defines an idempotent change to the state that has not yet // been applied to the contract manager. The state change is a single // transaction in the WAL. // // All changes in the stateChange object need to be idempotent, as it's // possible that consecutive unclean shutdowns will result in changes being // committed to the state multiple times. stateChange struct { // These fields relate to adding a storage folder. Adding a storage // folder happens in several stages. // // First the storage folder is added as an // 'UnfinishedStorageFolderAddition', because there is large amount of // I/O preprocessing that is performed when adding a storage folder. // This I/O must be nonblocking and must resume in the event of unclean // or early shutdown. // // When the preprocessing is complete, the storage folder is moved to a // 'StorageFolderAddition', which can be safely applied to the contract // manager but hasn't yet. // // ErroredStorageFolderAdditions are signals to the WAL that an // unfinished storage folder addition has failed and can be cleared // out. The WAL is append-only, which is why an error needs to be // logged instead of just automatically clearning out the unfinished // storage folder addition. ErroredStorageFolderAdditions []uint16 ErroredStorageFolderExtensions []uint16 StorageFolderAdditions []savedStorageFolder StorageFolderExtensions []storageFolderExtension StorageFolderRemovals []storageFolderRemoval StorageFolderReductions []storageFolderReduction UnfinishedStorageFolderAdditions []savedStorageFolder UnfinishedStorageFolderExtensions []unfinishedStorageFolderExtension // Updates to the sector metadata. Careful ordering of events ensures // that a sector update will not make it into the synced WAL unless the // sector data is already on-disk and synced. SectorUpdates []sectorUpdate } // writeAheadLog coordinates ACID transactions which update the state of // the contract manager. Consistency on a field is only guaranteed by // looking it up through the WAL, and is not guaranteed by direct access. writeAheadLog struct { // The primary feature of the WAL is a file on disk that records all of // the changes which have been proposed. The data is written to a temp // file and then renamed atomically to a non-corrupt commitment of // actions to be committed to the state. Data is written to the temp // file continuously for performance reasons - when a Sync() -> // Rename() occurs, most of the data will have already been flushed to // disk, making the operation faster. The same is done with the // settings file, which might be multiple MiB large for larger storage // arrays. // // To further increase throughput, the WAL will batch as many // operations as possible. These operations can happen concurrently, // and will block until the contract manager can provide an ACID // guarantee that the operation has completed. Syncing of multiple // operations happens all at once, and the syncChan is used to signal // that a sync operation has completed, providing ACID guarantees to // any operation waiting on it. The mechanism of announcing is to close // the syncChan, and then to create a new one for new operations to // listen on. // // uncommittedChanges details a list of operations which have been // suggested or queued to be made to the state, but are not yet // guaranteed to have completed. fileSettingsTmp file fileWALTmp file syncChan chan struct{} uncommittedChanges []stateChange // Utilities. The WAL needs access to the ContractManager because all // mutations to ACID fields of the contract manager happen through the // WAL. cm *ContractManager mu sync.Mutex } ) // readWALMetadata reads WAL metadata from the input file, returning an error // if the result is unexpected. func readWALMetadata(decoder *json.Decoder) error { var md persist.Metadata err := decoder.Decode(&md) if err != nil { return build.ExtendErr("error reading WAL metadata", err) } if md.Header != walMetadata.Header { return errors.New("WAL metadata header does not match header found in WAL file") } if md.Version != walMetadata.Version { return errors.New("WAL metadata version does not match version found in WAL file") } return nil } // writeWALMetadata writes WAL metadata to the input file. func writeWALMetadata(f file) error { changeBytes, err := json.MarshalIndent(walMetadata, "", "\t") if err != nil { return build.ExtendErr("could not marshal WAL metadata", err) } _, err = f.Write(changeBytes) if err != nil { return build.ExtendErr("unable to write WAL metadata", err) } return nil } // appendChange will add a change to the WAL, writing the details of the change // to the WAL file but not syncing - syncing is orchestrated by the sync loop. // // The WAL is append only, which means that changes can only be revoked by // appending an error. This is common for long running operations like adding a // storage folder. func (wal *writeAheadLog) appendChange(sc stateChange) { // Marshal the change and then write the change to the WAL file. Syncing // happens in the sync loop. changeBytes, err := json.MarshalIndent(sc, "", "\t") if err != nil { wal.cm.log.Severe("Unable to marshal state change:", err) panic("unable to append a change to the WAL, crashing to prevent corruption") } _, err = wal.fileWALTmp.Write(changeBytes) if err != nil { wal.cm.log.Severe("Unable to write state change to WAL:", err) panic("unable to append a change to the WAL, crashing to prevent corruption") } // Update the WAL to include the new storage folder in the uncommitted // changes. wal.uncommittedChanges = append(wal.uncommittedChanges, sc) } // commitChange will commit the provided change to the contract manager, // updating both the in-memory state and the on-disk state. // // It should be noted that long running tasks are ignored during calls to // commitChange, as they haven't completed and are being managed by a separate // thread. Upon completion, they will be converted into a different type of // commitment. func (wal *writeAheadLog) commitChange(sc stateChange) { for _, sfa := range sc.StorageFolderAdditions { for i := uint64(0); i < wal.cm.dependencies.atLeastOne(); i++ { wal.commitAddStorageFolder(sfa) } } for _, sfe := range sc.StorageFolderExtensions { for i := uint64(0); i < wal.cm.dependencies.atLeastOne(); i++ { wal.commitStorageFolderExtension(sfe) } } for _, sfr := range sc.StorageFolderReductions { for i := uint64(0); i < wal.cm.dependencies.atLeastOne(); i++ { wal.commitStorageFolderReduction(sfr) } } for _, sfr := range sc.StorageFolderRemovals { for i := uint64(0); i < wal.cm.dependencies.atLeastOne(); i++ { wal.commitStorageFolderRemoval(sfr) } } for _, su := range sc.SectorUpdates { for i := uint64(0); i < wal.cm.dependencies.atLeastOne(); i++ { wal.commitUpdateSector(su) } } } // createWALTmp will open up the temporary WAL file. func (wal *writeAheadLog) createWALTmp() { var err error walTmpName := filepath.Join(wal.cm.persistDir, walFileTmp) wal.fileWALTmp, err = wal.cm.dependencies.createFile(walTmpName) if err != nil { wal.cm.log.Severe("Unable to create WAL temporary file:", err) panic("unable to create WAL temporary file, crashing to avoid corruption") } err = writeWALMetadata(wal.fileWALTmp) if err != nil { wal.cm.log.Severe("Unable to write WAL metadata:", err) panic("unable to create WAL temporary file, crashing to prevent corruption") } } // recoverWAL will read a previous WAL and re-commit all of the changes inside, // restoring the program to consistency after an unclean shutdown. The tmp WAL // file needs to be open before this function is called. func (wal *writeAheadLog) recoverWAL(walFile file) error { // Read the WAL metadata to make sure that the version is correct. decoder := json.NewDecoder(walFile) err := readWALMetadata(decoder) if err != nil { wal.cm.log.Println("ERROR: error while reading WAL metadata:", err) return build.ExtendErr("walFile metadata mismatch", err) } // Read changes from the WAL one at a time and load them back into memory. // A full list of changes is kept so that modifications to long running // changes can be parsed properly. var sc stateChange var scs []stateChange for err == nil { err = decoder.Decode(&sc) if err == nil { // The uncommitted changes are loaded into memory using a simple // append, because the tmp WAL file has not been created yet, and // will not be created until the sync loop is spawned. The sync // loop spawner will make sure that the uncommitted changes are // written to the tmp WAL file. wal.commitChange(sc) scs = append(scs, sc) } } if err != io.EOF { wal.cm.log.Println("ERROR: could not load WAL json:", err) return build.ExtendErr("error loading WAL json", err) } // Do any cleanup regarding long-running unfinished tasks. Long running // task cleanup cannot be handled in the 'commitChange' loop because future // state changes may indicate that the long running task has actually been // completed. wal.cleanupUnfinishedStorageFolderAdditions(scs) wal.cleanupUnfinishedStorageFolderExtensions(scs) return nil } // load will pull any changes from the uncommitted WAL into memory, decoding // them and doing any necessary preprocessing. In the most common case (any // time the previous shutdown was clean), there will not be a WAL file. func (wal *writeAheadLog) load() error { // Create the walTmpFile, which needs to be open before recovery can start. wal.createWALTmp() // Close the WAL tmp file upon shutdown. wal.cm.tg.AfterStop(func() { wal.mu.Lock() defer wal.mu.Unlock() err := wal.fileWALTmp.Close() if err != nil { wal.cm.log.Println("ERROR: error closing wal file during contract manager shutdown:", err) return } err = wal.cm.dependencies.removeFile(filepath.Join(wal.cm.persistDir, walFileTmp)) if err != nil { wal.cm.log.Println("ERROR: error removing temporary WAL during contract manager shutdown:", err) return } }) // Try opening the WAL file. walFileName := filepath.Join(wal.cm.persistDir, walFile) walFile, err := wal.cm.dependencies.openFile(walFileName, os.O_RDONLY, 0600) if err == nil { // err == nil indicates that there is a WAL file, which means that the // previous shutdown was not clean. Re-commit the changes in the WAL to // bring the program back to consistency. wal.cm.log.Println("WARN: WAL file detected, performing recovery after unclean shutdown.") err = wal.recoverWAL(walFile) if err != nil { return build.ExtendErr("failed to recover WAL", err) } err = walFile.Close() if err != nil { return build.ExtendErr("error closing WAL after performing a recovery", err) } } else if !os.IsNotExist(err) { return build.ExtendErr("walFile was not opened successfully", err) } // err == os.IsNotExist, suggesting a successful, clean shutdown. No action // is taken. // Create the tmp settings file and initialize the first write to it. This // is necessary before kicking off the sync loop. wal.fileSettingsTmp, err = wal.cm.dependencies.createFile(filepath.Join(wal.cm.persistDir, settingsFileTmp)) if err != nil { return build.ExtendErr("unable to prepare the settings temp file", err) } wal.cm.tg.AfterStop(func() { wal.mu.Lock() defer wal.mu.Unlock() err := wal.fileSettingsTmp.Close() if err != nil { wal.cm.log.Println("ERROR: unable to close settings temporary file") return } err = wal.cm.dependencies.removeFile(filepath.Join(wal.cm.persistDir, settingsFileTmp)) if err != nil { wal.cm.log.Println("ERROR: unable to remove settings temporary file") return } }) ss := wal.cm.savedSettings() b, err := json.MarshalIndent(ss, "", "\t") if err != nil { build.ExtendErr("unable to marshal settings data", err) } enc := json.NewEncoder(wal.fileSettingsTmp) if err := enc.Encode(settingsMetadata.Header); err != nil { build.ExtendErr("unable to write header to settings temp file", err) } if err := enc.Encode(settingsMetadata.Version); err != nil { build.ExtendErr("unable to write version to settings temp file", err) } if _, err = wal.fileSettingsTmp.Write(b); err != nil { build.ExtendErr("unable to write data settings temp file", err) } return nil } Sia-1.3.0/modules/host/contractmanager/writeaheadlogsync.go000066400000000000000000000224261313565667000241110ustar00rootroot00000000000000package contractmanager import ( "encoding/json" "path/filepath" "sync" "sync/atomic" "time" "github.com/NebulousLabs/Sia/build" ) // syncResources will call Sync on all resources that the WAL has open. The // storage folder files will be left open, as they are not updated atomically. // The settings file and WAL tmp files will be synced and closed, to perform an // atomic update to the files. func (wal *writeAheadLog) syncResources() { // Syncing occurs over multiple files and disks, and is done in parallel to // minimize the amount of time that a lock is held over the contract // manager. var wg sync.WaitGroup // Sync the settings file. wg.Add(1) go func() { defer wg.Done() tmpFilename := filepath.Join(wal.cm.persistDir, settingsFileTmp) filename := filepath.Join(wal.cm.persistDir, settingsFile) err := wal.fileSettingsTmp.Sync() if err != nil { wal.cm.log.Severe("ERROR: unable to sync the contract manager settings:", err) } err = wal.fileSettingsTmp.Close() if err != nil { wal.cm.log.Println("unable to close the temporary contract manager settings file:", err) } // For testing, provide a place to interrupt the saving of the sync // file. This makes it easy to simulate certain types of unclean // shutdown. if wal.cm.dependencies.disrupt("settingsSyncRename") { // The current settings file that is being re-written will not be // saved. return } err = wal.cm.dependencies.renameFile(tmpFilename, filename) if err != nil { wal.cm.log.Severe("ERROR: unable to atomically copy the contract manager settings:", err) } }() // Sync all of the storage folders. for _, sf := range wal.cm.storageFolders { // Skip operation on unavailable storage folders. if atomic.LoadUint64(&sf.atomicUnavailable) == 1 { continue } wg.Add(2) go func(sf *storageFolder) { defer wg.Done() err := sf.metadataFile.Sync() if err != nil { wal.cm.log.Severe("ERROR: unable to sync a storage folder:", err) } }(sf) go func(sf *storageFolder) { defer wg.Done() err := sf.sectorFile.Sync() if err != nil { wal.cm.log.Severe("ERROR: unable to sync a storage folder:", err) } }(sf) } // Sync the temp WAL file, but do not perform the atmoic rename - the // atomic rename must be guaranteed to happen after all of the other files // have been synced. wg.Add(1) go func() { defer wg.Done() err := wal.fileWALTmp.Sync() if err != nil { wal.cm.log.Severe("Unable to sync the write-ahead-log:", err) } err = wal.fileWALTmp.Close() if err != nil { // Log that the host is having trouble saving the uncommitted changes. // Crash if the list of uncommitted changes has grown very large. wal.cm.log.Println("ERROR: could not close temporary write-ahead-log in contract manager:", err) return } }() // Wait for all of the sync calls to finish. wg.Wait() // Now that all the Sync calls have completed, rename the WAL tmp file to // update the WAL. if !wal.cm.dependencies.disrupt("walRename") { walTmpName := filepath.Join(wal.cm.persistDir, walFileTmp) walFileName := filepath.Join(wal.cm.persistDir, walFile) err := wal.cm.dependencies.renameFile(walTmpName, walFileName) if err != nil { // Log that the host is having trouble saving the uncommitted changes. // Crash if the list of uncommitted changes has grown very large. wal.cm.log.Severe("ERROR: could not rename temporary write-ahead-log in contract manager:", err) } } // Perform any cleanup actions on the updates. for _, sc := range wal.uncommittedChanges { for _, sfe := range sc.StorageFolderExtensions { wal.commitStorageFolderExtension(sfe) } for _, sfr := range sc.StorageFolderReductions { wal.commitStorageFolderReduction(sfr) } for _, sfr := range sc.StorageFolderRemovals { wal.commitStorageFolderRemoval(sfr) } // TODO: Virtual sector handling here. } // Now that the WAL is sync'd and updated, any calls waiting on ACID // guarantees can safely return. close(wal.syncChan) wal.syncChan = make(chan struct{}) } // commit will take all of the changes that have been added to the WAL and // atomically commit the WAL to disk, then apply the actions in the WAL to the // state. commit will do lots of syncing disk I/O, and so can take a while, // especially if there are a large number of actions queued up. // // A bool is returned indicating whether or not the commit was successful. // False does not indiciate an error, it can also indicate that there was // nothing to do. // // commit should only be called from threadedSyncLoop. func (wal *writeAheadLog) commit() { // Sync all open, non-WAL files on the host. wal.syncResources() // Extract any unfinished long-running jobs from the list of WAL items. unfinishedAdditions := findUnfinishedStorageFolderAdditions(wal.uncommittedChanges) unfinishedExtensions := findUnfinishedStorageFolderExtensions(wal.uncommittedChanges) // Clear the set of uncommitted changes. wal.uncommittedChanges = nil // Begin writing to the settings file. var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() // Begin writing to the settings file, which will be synced during the // next iteration of the sync loop. var err error wal.fileSettingsTmp, err = wal.cm.dependencies.createFile(filepath.Join(wal.cm.persistDir, settingsFileTmp)) if err != nil { wal.cm.log.Severe("Unable to open temporary settings file for writing:", err) } ss := wal.cm.savedSettings() b, err := json.MarshalIndent(ss, "", "\t") if err != nil { build.ExtendErr("unable to marshal settings data", err) } enc := json.NewEncoder(wal.fileSettingsTmp) if err := enc.Encode(settingsMetadata.Header); err != nil { build.ExtendErr("unable to write header to settings temp file", err) } if err := enc.Encode(settingsMetadata.Version); err != nil { build.ExtendErr("unable to write version to settings temp file", err) } if _, err = wal.fileSettingsTmp.Write(b); err != nil { build.ExtendErr("unable to write data settings temp file", err) } }() // Begin writing new changes to the WAL. wg.Add(1) go func() { defer wg.Done() // Recreate the wal file so that it can receive new updates. var err error walTmpName := filepath.Join(wal.cm.persistDir, walFileTmp) wal.fileWALTmp, err = wal.cm.dependencies.createFile(walTmpName) if err != nil { wal.cm.log.Severe("ERROR: unable to create write-ahead-log:", err) } // Write the metadata into the WAL. err = writeWALMetadata(wal.fileWALTmp) if err != nil { wal.cm.log.Severe("Unable to properly initialize WAL file, crashing to prevent corruption:", err) } // Append all of the remaining long running uncommitted changes to the WAL. wal.appendChange(stateChange{ UnfinishedStorageFolderAdditions: unfinishedAdditions, UnfinishedStorageFolderExtensions: unfinishedExtensions, }) }() wg.Wait() } // spawnSyncLoop prepares and establishes the loop which will be running in the // background to coordinate disk syncronizations. Disk syncing is done in a // background loop to help with performance, and to allow multiple things to // modify the WAL simultaneously. func (wal *writeAheadLog) spawnSyncLoop() (err error) { // Create a signal so we know when the sync loop has stopped, which means // there will be no more open commits. threadsStopped := make(chan struct{}) syncLoopStopped := make(chan struct{}) wal.syncChan = make(chan struct{}) go wal.threadedSyncLoop(threadsStopped, syncLoopStopped) wal.cm.tg.AfterStop(func() { // Wait for another iteration of the sync loop, so that the in-progress // settings can be saved atomically to disk. wal.mu.Lock() syncChan := wal.syncChan wal.mu.Unlock() <-syncChan // Close the threadsStopped channel to let the sync loop know that all // calls to tg.Add() in the contract manager have cleaned up. close(threadsStopped) // Because this is being called in an 'AfterStop' routine, all open // calls to the contract manager should have completed, and all open // threads should have closed. The last call to change the contract // manager should have completed, so the number of uncommitted changes // should be zero. <-syncLoopStopped // Wait for the sync loop to signal proper termination. // Allow unclean shutdown to be simulated by disrupting the removal of // the WAL file. if !wal.cm.dependencies.disrupt("cleanWALFile") { err = wal.cm.dependencies.removeFile(filepath.Join(wal.cm.persistDir, walFile)) if err != nil { wal.cm.log.Println("Error removing WAL during contract manager shutdown:", err) } } }) return nil } // threadedSyncLoop is a background thread that occasionally commits the WAL to // the state as an ACID transaction. This process can be very slow, so // transactions to the contract manager are batched automatically and // occasionally committed together. func (wal *writeAheadLog) threadedSyncLoop(threadsStopped chan struct{}, syncLoopStopped chan struct{}) { // Provide a place for the testing to disable the sync loop. if wal.cm.dependencies.disrupt("threadedSyncLoopStart") { close(syncLoopStopped) return } syncInterval := 500 * time.Millisecond for { select { case <-threadsStopped: close(syncLoopStopped) return case <-time.After(syncInterval): // Commit all of the changes in the WAL to disk, and then apply the // changes. wal.mu.Lock() wal.commit() wal.mu.Unlock() } } } Sia-1.3.0/modules/host/dependencies.go000066400000000000000000000112371313565667000176510ustar00rootroot00000000000000package host import ( "errors" "io/ioutil" "net" "os" "github.com/NebulousLabs/Sia/persist" "github.com/NebulousLabs/fastrand" ) // Fake errors that get returned when a simulated failure of a dependency is // desired for testing. var ( mockErrListen = errors.New("simulated Listen failure") mockErrLoadFile = errors.New("simulated LoadFile failure") mockErrMkdirAll = errors.New("simulated MkdirAll failure") mockErrNewLogger = errors.New("simulated NewLogger failure") mockErrOpenDatabase = errors.New("simulated OpenDatabase failure") mockErrReadFile = errors.New("simulated ReadFile failure") mockErrRemoveFile = errors.New("simulated RemoveFile faulure") mockErrSymlink = errors.New("simulated Symlink failure") mockErrWriteFile = errors.New("simulated WriteFile failure") ) // These interfaces define the Host's dependencies. Mocking implementation // complexity can be reduced by defining each dependency as the minimum // possible subset of the real dependency. type ( // dependencies defines all of the dependencies of the Host. dependencies interface { // disrupt can be inserted in the code as a way to inject problems, // such as a network call that take 10 minutes or a disk write that // never completes. disrupt will return true if the disruption is // forcibly triggered. In production, disrupt will always return false. disrupt(string) bool // listen gives the host the ability to receive incoming connections. listen(string, string) (net.Listener, error) // loadFile allows the host to load a persistence structure form disk. loadFile(persist.Metadata, interface{}, string) error // mkdirAll gives the host the ability to create chains of folders // within the filesystem. mkdirAll(string, os.FileMode) error // newLogger creates a logger that the host can use to log messages and // write critical statements. newLogger(string) (*persist.Logger, error) // openDatabase creates a database that the host can use to interact // with large volumes of persistent data. openDatabase(persist.Metadata, string) (*persist.BoltDatabase, error) // randRead fills the input bytes with random data. randRead([]byte) (int, error) // readFile reads a file in full from the filesystem. readFile(string) ([]byte, error) // removeFile removes a file from file filesystem. removeFile(string) error // symlink creates a sym link between a source and a destination. symlink(s1, s2 string) error // writeFile writes data to the filesystem using the provided filename. writeFile(string, []byte, os.FileMode) error } ) type ( // productionDependencies is an empty struct that implements all of the // dependencies using full featured libraries. productionDependencies struct{} ) // disrupt will always return false, but can be over-written during testing to // trigger disruptions. func (productionDependencies) disrupt(string) bool { return false } // listen gives the host the ability to receive incoming connections. func (productionDependencies) listen(s1, s2 string) (net.Listener, error) { return net.Listen(s1, s2) } // loadFile allows the host to load a persistence structure form disk. func (productionDependencies) loadFile(m persist.Metadata, i interface{}, s string) error { return persist.LoadJSON(m, i, s) } // mkdirAll gives the host the ability to create chains of folders within the // filesystem. func (productionDependencies) mkdirAll(s string, fm os.FileMode) error { return os.MkdirAll(s, fm) } // newLogger creates a logger that the host can use to log messages and write // critical statements. func (productionDependencies) newLogger(s string) (*persist.Logger, error) { return persist.NewFileLogger(s) } // openDatabase creates a database that the host can use to interact with large // volumes of persistent data. func (productionDependencies) openDatabase(m persist.Metadata, s string) (*persist.BoltDatabase, error) { return persist.OpenDatabase(m, s) } // randRead fills the input bytes with random data. func (productionDependencies) randRead(b []byte) (int, error) { return fastrand.Reader.Read(b) } // readFile reads a file from the filesystem. func (productionDependencies) readFile(s string) ([]byte, error) { return ioutil.ReadFile(s) } // removeFile removes a file from the filesystem. func (productionDependencies) removeFile(s string) error { return os.Remove(s) } // symlink creates a symlink between a source and a destination file. func (productionDependencies) symlink(s1, s2 string) error { return os.Symlink(s1, s2) } // writeFile writes a file to the filesystem. func (productionDependencies) writeFile(s string, b []byte, fm os.FileMode) error { return ioutil.WriteFile(s, b, fm) } Sia-1.3.0/modules/host/errors.go000066400000000000000000000125741313565667000165440ustar00rootroot00000000000000package host // errors.go is responsible for logging the various errors that the host runs // into related to operations that cannot immediately provide feedback to the // user. (e.g. network failures, disk failures, etc.). Different errors should // be handled and logged differently, depending on severity and frequency, such // that the person reading the logs is able to see all of the major issues // without having them obstructed by the minor ones. import ( "errors" "strings" "sync/atomic" "github.com/NebulousLabs/fastrand" ) const ( errorCommunicationProbability = 5 errorConnectionProbability = 20 errorConsensusProbability = 1 errorInternalProbability = 3 errorNormalProbability = 20 ) type ( // ErrorCommunication errors are meant to be returned if the host and the // renter seem to be miscommunicating. For example, if the renter attempts // to pay an insufficient price, there has been a communication error. ErrorCommunication string // ErrorConnection is meant to be used on errors where the network is // returning unexpected errors. For example, sudden disconnects or // connection write failures. ErrorConnection string // ErrorConsensus errors are meant to be used when there are problems // related to consensus, such as an inability to submit a storage proof to // the blockchain, or an inability to get a file contract revision on to // the blockchain. ErrorConsensus string // ErrorInternal errors are meant to be used if an internal process in the // host is malfunctioning, for example if the disk is failing. ErrorInternal string ) // composeErrors will take multiple errors and compose them into a single // errors with a longer message. Any nil errors used as inputs will be stripped // out, and if there are zero non-nil inputs then 'nil' will be returned. // // The original types of the errors is not preserved at all. func composeErrors(errs ...error) error { // Strip out any nil errors. var errStrings []string for _, err := range errs { if err != nil { errStrings = append(errStrings, err.Error()) } } // Return nil if there are no non-nil errors in the input. if len(errStrings) <= 0 { return nil } // Combine all of the non-nil errors into one larger return value. return errors.New(strings.Join(errStrings, "; ")) } // extendErr will return an error that is the same type as the input error, but // prefixed with the provided context. This only works for the error types // defined in the host package. If the input error is nil, the extension is // ignored and nil will be returned. func extendErr(s string, err error) error { if err == nil { return nil } switch v := err.(type) { case ErrorCommunication: return ErrorCommunication(s) + v case ErrorConnection: return ErrorConnection(s) + v case ErrorConsensus: return ErrorConsensus(s) + v case ErrorInternal: return ErrorInternal(s) + v default: return errors.New(s + err.Error()) } } // Error satisfies the Error interface for the ErrorCommunication type. func (ec ErrorCommunication) Error() string { return "communication error: " + string(ec) } // Error satisfies the Error interface for the ErrorConnection type. func (ec ErrorConnection) Error() string { return "connection error: " + string(ec) } // Error satisfies the Error interface for the ErrorConsensus type. func (ec ErrorConsensus) Error() string { return "consensus error: " + string(ec) } // Error satisfies the Error interface for the ErrorInternal type. func (ec ErrorInternal) Error() string { return "internal error: " + string(ec) } // mangedLogError will take an error and log it to the host, depending on the // type of error and whether or not the DEBUG flag has been set. func (h *Host) managedLogError(err error) { // Determine the type of error and the number of times that this error has // been logged. var num uint64 var probability int // Error will be logged with 1/probability chance. switch err.(type) { case ErrorCommunication: num = atomic.LoadUint64(&h.atomicCommunicationErrors) probability = errorCommunicationProbability case ErrorConnection: num = atomic.LoadUint64(&h.atomicConnectionErrors) probability = errorConnectionProbability case ErrorConsensus: num = atomic.LoadUint64(&h.atomicConsensusErrors) probability = errorConsensusProbability case ErrorInternal: num = atomic.LoadUint64(&h.atomicInternalErrors) probability = errorInternalProbability default: num = atomic.LoadUint64(&h.atomicNormalErrors) probability = errorNormalProbability } // If num > logFewLimit, substantially decrease the probability that the error // gets logged. if num > logFewLimit { probability = probability * 25 } // If we've seen less than logAllLimit of that type of error before, log // the error as a normal logging statement. Otherwise, probabilistically // log the statement. In debugging mode, log all statements. shouldLog := num < logAllLimit || fastrand.Intn(probability+1) == probability if shouldLog { h.log.Println(err) } else { h.log.Debugln(err) return } // Increment the log counter. switch err.(type) { case ErrorCommunication: atomic.AddUint64(&h.atomicCommunicationErrors, 1) case ErrorConnection: atomic.AddUint64(&h.atomicConnectionErrors, 1) case ErrorConsensus: atomic.AddUint64(&h.atomicConsensusErrors, 1) case ErrorInternal: atomic.AddUint64(&h.atomicInternalErrors, 1) default: atomic.AddUint64(&h.atomicNormalErrors, 1) } } Sia-1.3.0/modules/host/errors_test.go000066400000000000000000000207721313565667000176020ustar00rootroot00000000000000package host import ( "bufio" "errors" "os" "path/filepath" "strings" "testing" "github.com/NebulousLabs/Sia/modules" ) // countFileLines is a helper function that will count the number of lines in a // file, based on the number of '\n' characters. countFileLines will load the // file into memory using ioutil.ReadAll. // // countFileLines will ignore all lines with the string 'DEBUG' in it. func countFileLines(filepath string) (uint64, error) { file, err := os.Open(filepath) if err != nil { return 0, err } scanner := bufio.NewScanner(file) lines := uint64(0) for scanner.Scan() { line := scanner.Text() if !strings.Contains(line, "[DEBUG]") { lines++ } } return lines, nil } // TestComposeErrors checks that composeErrors is correctly composing errors // and handling edge cases. func TestComposeErrors(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() trials := []struct { inputErrors []error nilReturn bool expectedComposedError string }{ { nil, true, "", }, { make([]error, 0), true, "", }, { []error{errors.New("single error")}, false, "single error", }, { []error{ errors.New("first error"), errors.New("second error"), }, false, "first error; second error", }, { []error{ errors.New("first error"), errors.New("second error"), errors.New("third error"), }, false, "first error; second error; third error", }, { []error{ nil, errors.New("second error"), errors.New("third error"), }, false, "second error; third error", }, { []error{ errors.New("first error"), nil, nil, }, false, "first error", }, { []error{ nil, nil, nil, }, true, "", }, } for _, trial := range trials { err := composeErrors(trial.inputErrors...) if trial.nilReturn { if err != nil { t.Error("composeError failed a test, expecting nil, got", err) } } else { if err == nil { t.Error("not expecting a nil error when doing composition") } if err.Error() != trial.expectedComposedError { t.Error("composeError failed a test, expecting", trial.expectedComposedError, "got", err.Error()) } } } } // TestExtendErr checks that extendErr works as described - preserving the // error type within the package and adding a string. Also returning nil if the // input error is nil. func TestExtendErr(t *testing.T) { // Try extending a nil error. var err error err2 := extendErr("extend: ", err) if err2 != nil { t.Error("providing a nil error to extendErr does not return nil") } // Try extending a normal error. err = errors.New("extend me") err2 = extendErr("extend: ", err) if err2.Error() != "extend: extend me" { t.Error("normal error not extended correctly") } // Try extending ErrorCommunication. err = ErrorCommunication("err") err2 = extendErr("extend: ", err) if err2.Error() != "communication error: extend: err" { t.Error("extending ErrorCommunication did not occur correctly:", err2.Error()) } if _, ok := err2.(ErrorCommunication); !ok { t.Error("extended error did not preserve error type") } // Try extending ErrorConnection. err = ErrorConnection("err") err2 = extendErr("extend: ", err) if err2.Error() != "connection error: extend: err" { t.Error("extending ErrorConnection did not occur correctly:", err2.Error()) } switch err2.(type) { case ErrorConnection: default: t.Error("extended error did not preserve error type") } // Try extending ErrorConsensus. err = ErrorConsensus("err") err2 = extendErr("extend: ", err) if err2.Error() != "consensus error: extend: err" { t.Error("extending ErrorConsensus did not occur correctly:", err2.Error()) } switch err2.(type) { case ErrorConsensus: default: t.Error("extended error did not preserve error type") } // Try extending ErrorInternal. err = ErrorInternal("err") err2 = extendErr("extend: ", err) if err2.Error() != "internal error: extend: err" { t.Error("extending ErrorInternal did not occur correctly:", err2.Error()) } switch err2.(type) { case ErrorInternal: default: t.Error("extended error did not preserve error type") } } // TestManagedLogError will check that errors are being logged correctly based // on the logAllLimit, the probabilities, and the logFewLimit. func TestManagedLogError(t *testing.T) { if testing.Short() { t.SkipNow() } ht, err := newHostTester("TestManagedLogError") if err != nil { t.Fatal(err) } defer ht.Close() logFilepath := filepath.Join(ht.persistDir, modules.HostDir, logFile) // Count the number of lines in the log file. baseLines, err := countFileLines(logFilepath) if err != nil { t.Fatal(err) } // Log 'logAllLimit' for ErrorCommunication. for i := uint64(0); i < logAllLimit; i++ { ht.host.managedLogError(ErrorCommunication("comm error")) } logLines, err := countFileLines(logFilepath) if err != nil { t.Fatal(err) } if logLines != baseLines+logAllLimit { t.Error("does not seem that all communication errors were logged") } baseLines = logLines // Log 'logAllLimit' for ErrorConnection. for i := uint64(0); i < logAllLimit; i++ { ht.host.managedLogError(ErrorConnection("conn error")) } logLines, err = countFileLines(logFilepath) if err != nil { t.Fatal(err) } if logLines != baseLines+logAllLimit { t.Error("does not seem that all connection errors were logged") } baseLines = logLines // Log 'logAllLimit' for ErrorConsensus. for i := uint64(0); i < logAllLimit; i++ { ht.host.managedLogError(ErrorConsensus("consensus error")) } logLines, err = countFileLines(logFilepath) if err != nil { t.Fatal(err) } if logLines != baseLines+logAllLimit { t.Error("does not seem that all consensus errors were logged") } baseLines = logLines // Log 'logAllLimit' for ErrorInternal. for i := uint64(0); i < logAllLimit; i++ { ht.host.managedLogError(ErrorInternal("internal error")) } logLines, err = countFileLines(logFilepath) if err != nil { t.Fatal(err) } if logLines != baseLines+logAllLimit { t.Error("does not seem that all internal errors were logged") } baseLines = logLines // Log 'logAllLimit' for normal errors. for i := uint64(0); i < logAllLimit; i++ { ht.host.managedLogError(errors.New("normal error")) } logLines, err = countFileLines(logFilepath) if err != nil { t.Fatal(err) } if logLines != baseLines+logAllLimit { t.Error("does not seem that all normal errors were logged", logLines, baseLines, logAllLimit) } baseLines = logLines // Log enough ErrorInternal errors to bring ErrorInternal close, but not // all the way, to the 'logFewLimit'. remaining := logFewLimit - logAllLimit logsNeeded := remaining * errorInternalProbability for i := uint64(0); i < logsNeeded/3; i++ { ht.host.managedLogError(ErrorInternal("internal err")) } logLines, err = countFileLines(logFilepath) if err != nil { t.Fatal(err) } if logLines < baseLines+remaining/6 || logLines > baseLines+remaining { t.Error("probabilistic logging is not logging with the correct probability:", logLines, baseLines, remaining) } // Log enough ErrorInternal errors to bring it all the way to // 'logFewLimit'. for i := uint64(0); i < logsNeeded*5; i++ { ht.host.managedLogError(ErrorInternal("internal err")) } logLines, err = countFileLines(logFilepath) if err != nil { t.Fatal(err) } if logLines < baseLines+remaining || logLines > baseLines+logsNeeded*2 { t.Error("probabilisitic logging is not clamping correctly:", baseLines, logLines, logsNeeded) } baseLines = logLines // Log enough ErrorCommunication errors to bring ErrorCommunication close, but not // all the way, to the 'logFewLimit'. remaining = logFewLimit - logAllLimit logsNeeded = remaining * errorCommunicationProbability for i := uint64(0); i < logsNeeded/3; i++ { ht.host.managedLogError(ErrorCommunication("comm err")) } logLines, err = countFileLines(logFilepath) if err != nil { t.Fatal(err) } if logLines < baseLines+remaining/6 || logLines > baseLines+remaining { t.Error("probabilistic logging is not logging with the correct probability:", baseLines, logLines, logsNeeded, remaining) } // Log enough ErrorCommunication errors to bring it all the way to // 'logFewLimit'. for i := uint64(0); i < logsNeeded*5; i++ { ht.host.managedLogError(ErrorCommunication("comm err")) } logLines, err = countFileLines(logFilepath) if err != nil { t.Fatal(err) } if logLines < baseLines+remaining || logLines > baseLines+logsNeeded*2 { t.Error("probabilisitic logging is not clamping correctly:", baseLines, logLines, logsNeeded, remaining) } } Sia-1.3.0/modules/host/host.go000066400000000000000000000325261313565667000162040ustar00rootroot00000000000000// Package host is an implementation of the host module, and is responsible for // participating in the storage ecosystem, turning available disk space an // internet bandwidth into profit for the user. package host // TODO: what happens if the renter submits the revision early, before the // final revision. Will the host mark the contract as complete? // TODO: Host and renter are reporting errors where the renter is not adding // enough fees to the file contract. // TODO: Test the safety of the builder, it should be okay to have multiple // builders open for up to 600 seconds, which means multiple blocks could be // received in that time period. Should also check what happens if a parent // gets confirmed on the blockchain before the builder is finished. // TODO: Double check that any network connection has a finite deadline - // handling action items properly requires that the locks held on the // obligations eventually be released. There's also some more advanced // implementation that needs to happen with the storage obligation locks to // make sure that someone who wants a lock is able to get it eventually. // TODO: Add contract compensation from form contract to the storage obligation // financial metrics, and to the host's tracking. // TODO: merge the network interfaces stuff, don't forget to include the // 'announced' variable as one of the outputs. // TODO: 'announced' doesn't tell you if the announcement made it to the // blockchain. // TODO: Need to make sure that the revision exchange for the renter and the // host is being handled correctly. For the host, it's not so difficult. The // host need only send the most recent revision every time. But, the host // should not sign a revision unless the renter has explicitly signed such that // the 'WholeTransaction' fields cover only the revision and that the // signatures for the revision don't depend on anything else. The renter needs // to verify the same when checking on a file contract revision from the host. // If the host has submitted a file contract revision where the signatures have // signed the whole file contract, there is an issue. // TODO: there is a mistake in the file contract revision rpc, the host, if it // does not have the right file contract id, should be returning an error there // to the renter (and not just to it's calling function without informing the // renter what's up). // TODO: Need to make sure that the correct height is being used when adding // sectors to the storage manager - in some places right now WindowStart is // being used but really it's WindowEnd that should be in use. // TODO: The host needs some way to blacklist file contracts that are being // abusive by repeatedly getting free download batches. // TODO: clean up all of the magic numbers in the host. // TODO: revamp the finances for the storage obligations. // TODO: host_test.go has commented out tests. // TODO: network_test.go has commented out tests. // TODO: persist_test.go has commented out tests. // TODO: update_test.go has commented out tests. import ( "errors" "fmt" "net" "path/filepath" "sync" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/modules/host/contractmanager" "github.com/NebulousLabs/Sia/persist" siasync "github.com/NebulousLabs/Sia/sync" "github.com/NebulousLabs/Sia/types" ) const ( // Names of the various persistent files in the host. dbFilename = modules.HostDir + ".db" logFile = modules.HostDir + ".log" settingsFile = modules.HostDir + ".json" ) var ( // dbMetadata is a header that gets put into the database to identify a // version and indicate that the database holds host information. dbMetadata = persist.Metadata{ Header: "Sia Host DB", Version: "0.5.2", } // persistMetadata is the header that gets written to the persist file, and is // used to recognize other persist files. persistMetadata = persist.Metadata{ Header: "Sia Host", Version: "1.2.0", } // errHostClosed gets returned when a call is rejected due to the host // having been closed. errHostClosed = errors.New("call is disabled because the host is closed") // Nil dependency errors. errNilCS = errors.New("host cannot use a nil state") errNilTpool = errors.New("host cannot use a nil transaction pool") errNilWallet = errors.New("host cannot use a nil wallet") ) // A Host contains all the fields necessary for storing files for clients and // performing the storage proofs on the received files. type Host struct { // RPC Metrics - atomic variables need to be placed at the top to preserve // compatibility with 32bit systems. These values are not persistent. atomicDownloadCalls uint64 atomicErroredCalls uint64 atomicFormContractCalls uint64 atomicRenewCalls uint64 atomicReviseCalls uint64 atomicRecentRevisionCalls uint64 atomicSettingsCalls uint64 atomicUnrecognizedCalls uint64 // Error management. There are a few different types of errors returned by // the host. These errors intentionally not persistent, so that the logging // limits of each error type will be reset each time the host is reset. // These values are not persistent. atomicCommunicationErrors uint64 atomicConnectionErrors uint64 atomicConsensusErrors uint64 atomicInternalErrors uint64 atomicNormalErrors uint64 // Dependencies. cs modules.ConsensusSet tpool modules.TransactionPool wallet modules.Wallet dependencies modules.StorageManager // Host ACID fields - these fields need to be updated in serial, ACID // transactions. announced bool announceConfirmed bool blockHeight types.BlockHeight publicKey types.SiaPublicKey secretKey crypto.SecretKey recentChange modules.ConsensusChangeID unlockHash types.UnlockHash // A wallet address that can receive coins. // Host transient fields - these fields are either determined at startup or // otherwise are not critical to always be correct. autoAddress modules.NetAddress // Determined using automatic tooling in network.go financialMetrics modules.HostFinancialMetrics settings modules.HostInternalSettings revisionNumber uint64 workingStatus modules.HostWorkingStatus connectabilityStatus modules.HostConnectabilityStatus // A map of storage obligations that are currently being modified. Locks on // storage obligations can be long-running, and each storage obligation can // be locked separately. lockedStorageObligations map[types.FileContractID]*siasync.TryMutex // Utilities. db *persist.BoltDatabase listener net.Listener log *persist.Logger mu sync.RWMutex persistDir string port string tg siasync.ThreadGroup } // checkUnlockHash will check that the host has an unlock hash. If the host // does not have an unlock hash, an attempt will be made to get an unlock hash // from the wallet. That may fail due to the wallet being locked, in which case // an error is returned. func (h *Host) checkUnlockHash() error { if h.unlockHash == (types.UnlockHash{}) { uc, err := h.wallet.NextAddress() if err != nil { return err } // Set the unlock hash and save the host. Saving is important, because // the host will be using this unlock hash to establish identity, and // losing it will mean silently losing part of the host identity. h.unlockHash = uc.UnlockHash() err = h.saveSync() if err != nil { return err } } return nil } // newHost returns an initialized Host, taking a set of dependencies as input. // By making the dependencies an argument of the 'new' call, the host can be // mocked such that the dependencies can return unexpected errors or unique // behaviors during testing, enabling easier testing of the failure modes of // the Host. func newHost(dependencies dependencies, cs modules.ConsensusSet, tpool modules.TransactionPool, wallet modules.Wallet, listenerAddress string, persistDir string) (*Host, error) { // Check that all the dependencies were provided. if cs == nil { return nil, errNilCS } if tpool == nil { return nil, errNilTpool } if wallet == nil { return nil, errNilWallet } // Create the host object. h := &Host{ cs: cs, tpool: tpool, wallet: wallet, dependencies: dependencies, lockedStorageObligations: make(map[types.FileContractID]*siasync.TryMutex), persistDir: persistDir, } // Call stop in the event of a partial startup. var err error defer func() { if err != nil { err = composeErrors(h.tg.Stop(), err) } }() // Create the perist directory if it does not yet exist. err = dependencies.mkdirAll(h.persistDir, 0700) if err != nil { return nil, err } // Initialize the logger, and set up the stop call that will close the // logger. h.log, err = dependencies.newLogger(filepath.Join(h.persistDir, logFile)) if err != nil { return nil, err } h.tg.AfterStop(func() { err = h.log.Close() if err != nil { // State of the logger is uncertain, a Println will have to // suffice. fmt.Println("Error when closing the logger:", err) } }) // Add the storage manager to the host, and set up the stop call that will // close the storage manager. h.StorageManager, err = contractmanager.New(filepath.Join(persistDir, "contractmanager")) if err != nil { h.log.Println("Could not open the storage manager:", err) return nil, err } h.tg.AfterStop(func() { err = h.StorageManager.Close() if err != nil { h.log.Println("Could not close storage manager:", err) } }) // Load the prior persistence structures, and configure the host to save // before shutting down. err = h.load() if err != nil { return nil, err } h.tg.AfterStop(func() { err = h.saveSync() if err != nil { h.log.Println("Could not save host upon shutdown:", err) } }) // Initialize the networking. err = h.initNetworking(listenerAddress) if err != nil { h.log.Println("Could not initialize host networking:", err) return nil, err } return h, nil } // New returns an initialized Host. func New(cs modules.ConsensusSet, tpool modules.TransactionPool, wallet modules.Wallet, address string, persistDir string) (*Host, error) { return newHost(productionDependencies{}, cs, tpool, wallet, address, persistDir) } // Close shuts down the host. func (h *Host) Close() error { return h.tg.Stop() } // ExternalSettings returns the hosts external settings. These values cannot be // set by the user (host is configured through InternalSettings), and are the // values that get displayed to other hosts on the network. func (h *Host) ExternalSettings() modules.HostExternalSettings { h.mu.RLock() defer h.mu.RUnlock() err := h.tg.Add() if err != nil { build.Critical("Call to ExternalSettings after close") } defer h.tg.Done() return h.externalSettings() } // WorkingStatus returns the working state of the host, where working is // defined as having received more than workingStatusThreshold settings calls // over the period of workingStatusFrequency. func (h *Host) WorkingStatus() modules.HostWorkingStatus { h.mu.RLock() defer h.mu.RUnlock() return h.workingStatus } // ConnectabilityStatus returns the connectability state of the host, whether // the host can connect to itself on its configured netaddress. func (h *Host) ConnectabilityStatus() modules.HostConnectabilityStatus { h.mu.RLock() defer h.mu.RUnlock() return h.connectabilityStatus } // FinancialMetrics returns information about the financial commitments, // rewards, and activities of the host. func (h *Host) FinancialMetrics() modules.HostFinancialMetrics { h.mu.RLock() defer h.mu.RUnlock() err := h.tg.Add() if err != nil { build.Critical("Call to FinancialMetrics after close") } defer h.tg.Done() return h.financialMetrics } // PublicKey returns the public key of the host that is used to facilitate // relationships between the host and renter. func (h *Host) PublicKey() types.SiaPublicKey { h.mu.RLock() defer h.mu.RUnlock() return h.publicKey } // SetInternalSettings updates the host's internal HostInternalSettings object. func (h *Host) SetInternalSettings(settings modules.HostInternalSettings) error { h.mu.Lock() defer h.mu.Unlock() err := h.tg.Add() if err != nil { return err } defer h.tg.Done() // The host should not be accepting file contracts if it does not have an // unlock hash. if settings.AcceptingContracts { err := h.checkUnlockHash() if err != nil { return errors.New("internal settings not updated, no unlock hash: " + err.Error()) } } if settings.NetAddress != "" { err := settings.NetAddress.IsValid() if err != nil { return errors.New("internal settings not updated, invalid NetAddress: " + err.Error()) } } // Check if the net address for the host has changed. If it has, and it's // not equal to the auto address, then the host is going to need to make // another blockchain announcement. if h.settings.NetAddress != settings.NetAddress && settings.NetAddress != h.autoAddress { h.announced = false } h.settings = settings h.revisionNumber++ err = h.saveSync() if err != nil { return errors.New("internal settings updated, but failed saving to disk: " + err.Error()) } return nil } // InternalSettings returns the settings of a host. func (h *Host) InternalSettings() modules.HostInternalSettings { h.mu.RLock() defer h.mu.RUnlock() err := h.tg.Add() if err != nil { return modules.HostInternalSettings{} } defer h.tg.Done() return h.settings } Sia-1.3.0/modules/host/host_errors_test.go000066400000000000000000000137001313565667000206300ustar00rootroot00000000000000package host import ( "net" "os" "path/filepath" "strings" "testing" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/persist" ) // dependencyErrMkdirAll is a dependency set that returns an error when MkdirAll // is called. type dependencyErrMkdirAll struct { productionDependencies } func (dependencyErrMkdirAll) mkdirAll(string, os.FileMode) error { return mockErrMkdirAll } // TestHostFailedMkdirAll initializes the host using a call to MkdirAll that // will fail. func TestHostFailedMkdirAll(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() ht, err := blankHostTester("TestHostFailedMkdirAll") if err != nil { t.Fatal(err) } defer ht.Close() err = ht.host.Close() if err != nil { t.Fatal(err) } ht.host, err = newHost(dependencyErrMkdirAll{}, ht.cs, ht.tpool, ht.wallet, "localhost:0", filepath.Join(ht.persistDir, modules.HostDir)) if err != mockErrMkdirAll { t.Fatal(err) } // Set ht.host to something non-nil - nil was returned because startup was // incomplete. If ht.host is nil at the end of the function, the ht.Close() // operation will fail. ht.host, err = newHost(productionDependencies{}, ht.cs, ht.tpool, ht.wallet, "localhost:0", filepath.Join(ht.persistDir, modules.HostDir)) if err != nil { t.Fatal(err) } } // dependencyErrNewLogger is a dependency set that returns an error when // NewLogger is called. type dependencyErrNewLogger struct { productionDependencies } func (dependencyErrNewLogger) newLogger(string) (*persist.Logger, error) { return nil, mockErrNewLogger } // TestHostFailedNewLogger initializes the host using a call to NewLogger that // will fail. func TestHostFailedNewLogger(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() ht, err := blankHostTester("TestHostFailedNewLogger") if err != nil { t.Fatal(err) } defer ht.Close() err = ht.host.Close() if err != nil { t.Fatal(err) } ht.host, err = newHost(dependencyErrNewLogger{}, ht.cs, ht.tpool, ht.wallet, "localhost:0", filepath.Join(ht.persistDir, modules.HostDir)) if err != mockErrNewLogger { t.Fatal(err) } // Set ht.host to something non-nil - nil was returned because startup was // incomplete. If ht.host is nil at the end of the function, the ht.Close() // operation will fail. ht.host, err = newHost(productionDependencies{}, ht.cs, ht.tpool, ht.wallet, "localhost:0", filepath.Join(ht.persistDir, modules.HostDir)) if err != nil { t.Fatal(err) } } // dependencyErrOpenDatabase is a dependency that returns an error when // OpenDatabase is called. type dependencyErrOpenDatabase struct { productionDependencies } func (dependencyErrOpenDatabase) openDatabase(persist.Metadata, string) (*persist.BoltDatabase, error) { return nil, mockErrOpenDatabase } // TestHostFailedOpenDatabase initializes the host using a call to OpenDatabase // that has been mocked to fail. func TestHostFailedOpenDatabase(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() ht, err := blankHostTester("TestHostFailedOpenDatabase") if err != nil { t.Fatal(err) } defer ht.Close() err = ht.host.Close() if err != nil { t.Fatal(err) } ht.host, err = newHost(dependencyErrOpenDatabase{}, ht.cs, ht.tpool, ht.wallet, "localhost:0", filepath.Join(ht.persistDir, modules.HostDir)) if !strings.Contains(err.Error(), "simulated OpenDatabase failure") { t.Fatal(err) } // Set ht.host to something non-nil - nil was returned because startup was // incomplete. If ht.host is nil at the end of the function, the ht.Close() // operation will fail. ht.host, err = newHost(productionDependencies{}, ht.cs, ht.tpool, ht.wallet, "localhost:0", filepath.Join(ht.persistDir, modules.HostDir)) if err != nil { t.Fatal(err) } } // dependencyErrLoadFile is a dependency that returns an error when // LoadFile is called. type dependencyErrLoadFile struct { productionDependencies } func (dependencyErrLoadFile) loadFile(persist.Metadata, interface{}, string) error { return mockErrLoadFile } // TestHostFailedLoadFile initializes the host using a call to LoadFile that // has been mocked to fail. func TestHostFailedLoadFile(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() ht, err := blankHostTester("TestHostFailedLoadFile") if err != nil { t.Fatal(err) } defer ht.Close() err = ht.host.Close() if err != nil { t.Fatal(err) } ht.host, err = newHost(dependencyErrLoadFile{}, ht.cs, ht.tpool, ht.wallet, "localhost:0", filepath.Join(ht.persistDir, modules.HostDir)) if err != mockErrLoadFile { t.Fatal(err) } // Set ht.host to something non-nil - nil was returned because startup was // incomplete. If ht.host is nil at the end of the function, the ht.Close() // operation will fail. ht.host, err = newHost(productionDependencies{}, ht.cs, ht.tpool, ht.wallet, "localhost:0", filepath.Join(ht.persistDir, modules.HostDir)) if err != nil { t.Fatal(err) } } // dependencyErrListen is a dependency that returns an error when Listen is // called. type dependencyErrListen struct { productionDependencies } func (dependencyErrListen) listen(string, string) (net.Listener, error) { return nil, mockErrListen } // TestHostFailedListen initializes the host using a call to Listen that // has been mocked to fail. func TestHostFailedListen(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() ht, err := blankHostTester("TestHostFailedListen") if err != nil { t.Fatal(err) } defer ht.Close() err = ht.host.Close() if err != nil { t.Fatal(err) } ht.host, err = newHost(dependencyErrListen{}, ht.cs, ht.tpool, ht.wallet, "localhost:0", filepath.Join(ht.persistDir, modules.HostDir)) if err != mockErrListen { t.Fatal(err) } // Set ht.host to something non-nil - nil was returned because startup was // incomplete. If ht.host is nil at the end of the function, the ht.Close() // operation will fail. ht.host, err = newHost(productionDependencies{}, ht.cs, ht.tpool, ht.wallet, "localhost:0", filepath.Join(ht.persistDir, modules.HostDir)) if err != nil { t.Fatal(err) } } Sia-1.3.0/modules/host/host_test.go000066400000000000000000000400401313565667000172310ustar00rootroot00000000000000package host import ( // "errors" "os" "path/filepath" "testing" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/modules/consensus" "github.com/NebulousLabs/Sia/modules/gateway" "github.com/NebulousLabs/Sia/modules/miner" // "github.com/NebulousLabs/Sia/modules/renter" "github.com/NebulousLabs/Sia/modules/transactionpool" "github.com/NebulousLabs/Sia/modules/wallet" siasync "github.com/NebulousLabs/Sia/sync" "github.com/NebulousLabs/Sia/types" ) // A hostTester is the helper object for host testing, including helper modules // and methods for controlling synchronization. type hostTester struct { cs modules.ConsensusSet gateway modules.Gateway miner modules.TestMiner // renter modules.Renter renting bool tpool modules.TransactionPool wallet modules.Wallet walletKey crypto.TwofishKey host *Host persistDir string } /* // initRenting prepares the host tester for uploads and downloads by announcing // the host to the network and performing other preparational tasks. // initRenting takes a while because the renter needs to process the host // announcement, requiring asynchronous network communication between the // renter and host. func (ht *hostTester) initRenting() error { if ht.renting { return nil } // Because the renting test takes a long time, it will fail if // testing.Short. if testing.Short() { return errors.New("cannot call initRenting in short tests") } // Announce the host. err := ht.host.Announce() if err != nil { return err } // Mine a block to get the announcement into the blockchain. _, err = ht.miner.AddBlock() if err != nil { return err } // Wait for the renter to see the host announcement. for i := 0; i < 50; i++ { time.Sleep(time.Millisecond * 100) if len(ht.renter.ActiveHosts()) != 0 { break } } if len(ht.renter.ActiveHosts()) == 0 { return errors.New("could not start renting in the host tester") } ht.renting = true return nil } */ // initWallet creates a wallet key, initializes the host wallet, unlocks it, // and then stores the key in the host tester. func (ht *hostTester) initWallet() error { // Create the keys for the wallet and unlock it. key := crypto.GenerateTwofishKey() ht.walletKey = key _, err := ht.wallet.Encrypt(key) if err != nil { return err } err = ht.wallet.Unlock(key) if err != nil { return err } return nil } // blankHostTester creates a host tester where the modules are created but no // extra initialization has been done, for example no blocks have been mined // and the wallet keys have not been created. func blankHostTester(name string) (*hostTester, error) { return blankMockHostTester(productionDependencies{}, name) } // blankMockHostTester creates a host tester where the modules are created but no // extra initialization has been done, for example no blocks have been mined // and the wallet keys have not been created. func blankMockHostTester(d dependencies, name string) (*hostTester, error) { testdir := build.TempDir(modules.HostDir, name) // Create the modules. g, err := gateway.New("localhost:0", false, filepath.Join(testdir, modules.GatewayDir)) if err != nil { return nil, err } cs, err := consensus.New(g, false, filepath.Join(testdir, modules.ConsensusDir)) if err != nil { return nil, err } tp, err := transactionpool.New(cs, g, filepath.Join(testdir, modules.TransactionPoolDir)) if err != nil { return nil, err } w, err := wallet.New(cs, tp, filepath.Join(testdir, modules.WalletDir)) if err != nil { return nil, err } m, err := miner.New(cs, tp, w, filepath.Join(testdir, modules.MinerDir)) if err != nil { return nil, err } h, err := newHost(d, cs, tp, w, "localhost:0", filepath.Join(testdir, modules.HostDir)) if err != nil { return nil, err } /* r, err := renter.New(cs, w, tp, filepath.Join(testdir, modules.RenterDir)) if err != nil { return nil, err } */ // Assemble all objects into a hostTester ht := &hostTester{ cs: cs, gateway: g, miner: m, // renter: r, tpool: tp, wallet: w, host: h, persistDir: testdir, } return ht, nil } // newHostTester creates a host tester with an initialized wallet and money in // that wallet. func newHostTester(name string) (*hostTester, error) { return newMockHostTester(productionDependencies{}, name) } // newMockHostTester creates a host tester with an initialized wallet and money // in that wallet, using the dependencies provided. func newMockHostTester(d dependencies, name string) (*hostTester, error) { // Create a blank host tester. ht, err := blankMockHostTester(d, name) if err != nil { return nil, err } // Initialize the wallet and mine blocks until the wallet has money. err = ht.initWallet() if err != nil { return nil, err } for i := types.BlockHeight(0); i <= types.MaturityDelay; i++ { _, err = ht.miner.AddBlock() if err != nil { return nil, err } } // Create two storage folder for the host, one the minimum size and one // twice the minimum size. storageFolderOne := filepath.Join(ht.persistDir, "hostTesterStorageFolderOne") err = os.Mkdir(storageFolderOne, 0700) if err != nil { return nil, err } err = ht.host.AddStorageFolder(storageFolderOne, modules.SectorSize*64) if err != nil { return nil, err } storageFolderTwo := filepath.Join(ht.persistDir, "hostTesterStorageFolderTwo") err = os.Mkdir(storageFolderTwo, 0700) if err != nil { return nil, err } err = ht.host.AddStorageFolder(storageFolderTwo, modules.SectorSize*64*2) if err != nil { return nil, err } return ht, nil } // Close safely closes the hostTester. It panics if err != nil because there // isn't a good way to errcheck when deferring a close. func (ht *hostTester) Close() error { errs := []error{ ht.host.Close(), ht.miner.Close(), ht.tpool.Close(), ht.cs.Close(), ht.gateway.Close(), } if err := build.JoinErrors(errs, "; "); err != nil { panic(err) } return nil } // TestHostInitialization checks that the host initializes to sensible default // values. func TestHostInitialization(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() // Create a blank host tester and check that the height is zero. bht, err := blankHostTester("TestHostInitialization") if err != nil { t.Fatal(err) } defer bht.Close() if bht.host.blockHeight != 0 { t.Error("host initialized to the wrong block height") } // Initialize the wallet so that a block can be mined, then mine a block // and check that it sets the host height to 1. err = bht.initWallet() if err != nil { t.Fatal(err) } _, err = bht.miner.AddBlock() if err != nil { t.Fatal(err) } if bht.host.blockHeight != 1 { t.Fatal("block height did not increase correctly after first block mined:", bht.host.blockHeight, 1) } } // TestHostMultiClose checks that the host returns an error if Close is called // multiple times on the host. func TestHostMultiClose(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() ht, err := newHostTester("TestHostMultiClose") if err != nil { t.Fatal(err) } defer ht.Close() err = ht.host.Close() if err != nil { t.Fatal(err) } err = ht.host.Close() if err != siasync.ErrStopped { t.Fatal(err) } err = ht.host.Close() if err != siasync.ErrStopped { t.Fatal(err) } // Set ht.host to something non-nil - nil was returned because startup was // incomplete. If ht.host is nil at the end of the function, the ht.Close() // operation will fail. ht.host, err = newHost(productionDependencies{}, ht.cs, ht.tpool, ht.wallet, "localhost:0", filepath.Join(ht.persistDir, modules.HostDir)) if err != nil { t.Fatal(err) } } // TestNilValues tries initializing the host with nil values. func TestNilValues(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() ht, err := blankHostTester("TestStartupRescan") if err != nil { t.Fatal(err) } defer ht.Close() hostDir := filepath.Join(ht.persistDir, modules.HostDir) _, err = New(nil, ht.tpool, ht.wallet, "localhost:0", hostDir) if err != errNilCS { t.Fatal("could not trigger errNilCS") } _, err = New(ht.cs, nil, ht.wallet, "localhost:0", hostDir) if err != errNilTpool { t.Fatal("could not trigger errNilTpool") } _, err = New(ht.cs, ht.tpool, nil, "localhost:0", hostDir) if err != errNilWallet { t.Fatal("Could not trigger errNilWallet") } } // TestSetAndGetInternalSettings checks that the functions for interacting with // the host's internal settings object are working as expected. func TestSetAndGetInternalSettings(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() ht, err := newHostTester("TestSetAndGetInternalSettings") if err != nil { t.Fatal(err) } defer ht.Close() // Check the default settings get returned at first call. settings := ht.host.InternalSettings() if settings.AcceptingContracts != false { t.Error("settings retrieval did not return default value") } if settings.MaxDuration != defaultMaxDuration { t.Error("settings retrieval did not return default value") } if settings.MaxDownloadBatchSize != uint64(defaultMaxDownloadBatchSize) { t.Error("settings retrieval did not return default value") } if settings.MaxReviseBatchSize != uint64(defaultMaxReviseBatchSize) { t.Error("settings retrieval did not return default value") } if settings.NetAddress != "" { t.Error("settings retrieval did not return default value") } if settings.WindowSize != defaultWindowSize { t.Error("settings retrieval did not return default value") } if !settings.Collateral.Equals(defaultCollateral) { t.Error("settings retrieval did not return default value") } if !settings.CollateralBudget.Equals(defaultCollateralBudget) { t.Error("settings retrieval did not return default value") } if !settings.MaxCollateral.Equals(defaultMaxCollateral) { t.Error("settings retrieval did not return default value") } if !settings.MinContractPrice.Equals(defaultContractPrice) { t.Error("settings retrieval did not return default value") } if !settings.MinDownloadBandwidthPrice.Equals(defaultDownloadBandwidthPrice) { t.Error("settings retrieval did not return default value") } if !settings.MinStoragePrice.Equals(defaultStoragePrice) { t.Error("settings retrieval did not return default value") } if !settings.MinUploadBandwidthPrice.Equals(defaultUploadBandwidthPrice) { t.Error("settings retrieval did not return default value") } // Check that calling SetInternalSettings with valid settings updates the settings. settings.AcceptingContracts = true settings.NetAddress = "foo.com:123" err = ht.host.SetInternalSettings(settings) if err != nil { t.Fatal(err) } settings = ht.host.InternalSettings() if settings.AcceptingContracts != true { t.Fatal("SetInternalSettings failed to update settings") } if settings.NetAddress != "foo.com:123" { t.Fatal("SetInternalSettings failed to update settings") } // Check that calling SetInternalSettings with invalid settings does not update the settings. settings.NetAddress = "invalid" err = ht.host.SetInternalSettings(settings) if err == nil { t.Fatal("expected SetInternalSettings to error with invalid settings") } settings = ht.host.InternalSettings() if settings.NetAddress != "foo.com:123" { t.Fatal("SetInternalSettings should not modify the settings if the new settings are invalid") } // Reload the host and verify that the altered settings persisted. err = ht.host.Close() if err != nil { t.Fatal(err) } rebootHost, err := New(ht.cs, ht.tpool, ht.wallet, "localhost:0", filepath.Join(ht.persistDir, modules.HostDir)) if err != nil { t.Fatal(err) } rebootSettings := rebootHost.InternalSettings() if rebootSettings.AcceptingContracts != settings.AcceptingContracts { t.Error("settings retrieval did not return updated value") } if rebootSettings.NetAddress != settings.NetAddress { t.Error("settings retrieval did not return updated value") } // Set ht.host to 'rebootHost' so that the 'ht.Close()' method will close // everything cleanly. ht.host = rebootHost } /* // TestSetAndGetSettings checks that the functions for interacting with the // hosts settings object are working as expected. func TestSetAndGetSettings(t *testing.T) { if testing.Short() { t.SkipNow() } ht, err := newHostTester("TestSetAndGetSettings") if err != nil { t.Fatal(err) } defer ht.Close() // Check the default settings get returned at first call. settings := ht.host.Settings() if settings.MaxDuration != defaultMaxDuration { t.Error("settings retrieval did not return default value") } if settings.WindowSize != defaultWindowSize { t.Error("settings retrieval did not return default value") } if settings.Price.Cmp(defaultPrice) != 0 { t.Error("settings retrieval did not return default value") } if settings.Collateral.Cmp(defaultCollateral) != 0 { t.Error("settings retrieval did not return default value") } // Submit updated settings and check that the changes stuck. settings.TotalStorage += 15 settings.MaxDuration += 16 settings.WindowSize += 17 settings.Price = settings.Price.Add(types.NewCurrency64(18)) settings.Collateral = settings.Collateral.Add(types.NewCurrency64(19)) err = ht.host.SetSettings(settings) if err != nil { t.Fatal(err) } newSettings := ht.host.Settings() if settings.MaxDuration != newSettings.MaxDuration { t.Error("settings retrieval did not return updated value") } if settings.WindowSize != newSettings.WindowSize { t.Error("settings retrieval did not return updated value") } if settings.Price.Cmp(newSettings.Price) != 0 { t.Error("settings retrieval did not return updated value") } if settings.Collateral.Cmp(newSettings.Collateral) != 0 { t.Error("settings retrieval did not return updated value") } // Reload the host and verify that the altered settings persisted. err = ht.host.Close() if err != nil { t.Fatal(err) } rebootHost, err := New(ht.cs, ht.tpool, ht.wallet, "localhost:0", filepath.Join(ht.persistDir, modules.HostDir)) if err != nil { t.Fatal(err) } rebootSettings := rebootHost.Settings() if settings.TotalStorage != rebootSettings.TotalStorage { t.Error("settings retrieval did not return updated value") } if settings.MaxDuration != rebootSettings.MaxDuration { t.Error("settings retrieval did not return updated value") } if settings.WindowSize != rebootSettings.WindowSize { t.Error("settings retrieval did not return updated value") } if settings.Price.Cmp(rebootSettings.Price) != 0 { t.Error("settings retrieval did not return updated value") } if settings.Collateral.Cmp(rebootSettings.Collateral) != 0 { t.Error("settings retrieval did not return updated value") } } // TestPersistentSettings checks that settings persist between instances of the // host. func TestPersistentSettings(t *testing.T) { if testing.Short() { t.SkipNow() } ht, err := newHostTester("TestSetPersistentSettings") if err != nil { t.Fatal(err) } defer ht.Close() // Submit updated settings. settings := ht.host.Settings() settings.TotalStorage += 25 settings.MaxDuration += 36 settings.WindowSize += 47 settings.Price = settings.Price.Add(types.NewCurrency64(38)) settings.Collateral = settings.Collateral.Add(types.NewCurrency64(99)) err = ht.host.SetSettings(settings) if err != nil { t.Fatal(err) } // Reboot the host and verify that the new settings stuck. err = ht.host.Close() // host saves upon closing if err != nil { t.Fatal(err) } h, err := New(ht.cs, ht.tpool, ht.wallet, "localhost:0", filepath.Join(ht.persistDir, modules.HostDir)) if err != nil { t.Fatal(err) } newSettings := h.Settings() if settings.TotalStorage != newSettings.TotalStorage { t.Error("settings retrieval did not return updated value:", settings.TotalStorage, "vs", newSettings.TotalStorage) } if settings.MaxDuration != newSettings.MaxDuration { t.Error("settings retrieval did not return updated value") } if settings.WindowSize != newSettings.WindowSize { t.Error("settings retrieval did not return updated value") } if settings.Price.Cmp(newSettings.Price) != 0 { t.Error("settings retrieval did not return updated value") } if settings.Collateral.Cmp(newSettings.Collateral) != 0 { t.Error("settings retrieval did not return updated value") } } */ Sia-1.3.0/modules/host/negotiate.go000066400000000000000000000300401313565667000171730ustar00rootroot00000000000000package host import ( "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) var ( // errBadContractOutputCounts is returned if the presented file contract // revision has the wrong number of outputs for either the valid or the // missed proof outputs. errBadContractOutputCounts = ErrorCommunication("rejected for having an unexpected number of outputs") // errBadContractParent is returned when a file contract revision is // presented which has a parent id that doesn't match the file contract // which is supposed to be getting revised. errBadContractParent = ErrorCommunication("could not find contract's parent") // errBadFileMerkleRoot is returned if the renter incorrectly updates the // file merkle root during a file contract revision. errBadFileMerkleRoot = ErrorCommunication("rejected for bad file merkle root") // errBadFileSize is returned if the renter incorrectly download and // changes the file size during a file contract revision. errBadFileSize = ErrorCommunication("rejected for bad file size") // errBadModificationIndex is returned if the renter requests a change on a // sector root that is not in the file contract. errBadModificationIndex = ErrorCommunication("renter has made a modification that points to a nonexistent sector") // errBadParentID is returned if the renter incorrectly download and // provides the wrong parent id during a file contract revision. errBadParentID = ErrorCommunication("rejected for bad parent id") // errBadPayoutUnlockHashes is returned if the renter incorrectly sets the // payout unlock hashes during contract formation. errBadPayoutUnlockHashes = ErrorCommunication("rejected for bad unlock hashes in the payout") // errBadRevisionNumber number is returned if the renter incorrectly // download and does not increase the revision number during a file // contract revision. errBadRevisionNumber = ErrorCommunication("rejected for bad revision number") // errBadSectorSize is returned if the renter provides a sector to be // inserted that is the wrong size. errBadSectorSize = ErrorCommunication("renter has provided an incorrectly sized sector") // errBadUnlockConditions is returned if the renter incorrectly download // and does not provide the right unlock conditions in the payment // revision. errBadUnlockConditions = ErrorCommunication("rejected for bad unlock conditions") // errBadUnlockHash is returned if the renter incorrectly updates the // unlock hash during a file contract revision. errBadUnlockHash = ErrorCommunication("rejected for bad new unlock hash") // errBadWindowEnd is returned if the renter incorrectly download and // changes the window end during a file contract revision. errBadWindowEnd = ErrorCommunication("rejected for bad new window end") // errBadWindowStart is returned if the renter incorrectly updates the // window start during a file contract revision. errBadWindowStart = ErrorCommunication("rejected for bad new window start") // errEarlyWindow is returned if the file contract provided by the renter // has a storage proof window that is starting too near in the future. errEarlyWindow = ErrorCommunication("rejected for a window that starts too soon") // errEmptyObject is returned if the renter sends an empty or nil object // unexpectedly. errEmptyObject = ErrorCommunication("renter has unexpectedly send an empty/nil object") // errHighRenterMissedOutput is returned if the renter incorrectly download // and deducts an insufficient amount from the renter missed outputs during // a file contract revision. errHighRenterMissedOutput = ErrorCommunication("rejected for high paying renter missed output") // errHighRenterValidOutput is returned if the renter incorrectly download // and deducts an insufficient amount from the renter valid outputs during // a file contract revision. errHighRenterValidOutput = ErrorCommunication("rejected for high paying renter valid output") // errIllegalOffsetAndLength is returned if the renter tries perform a // modify operation that uses a troublesome combination of offset and // length. errIllegalOffsetAndLength = ErrorCommunication("renter is trying to do a modify with an illegal offset and length") // errLargeSector is returned if the renter sends a RevisionAction that has // data which creates a sector that is larger than what the host uses. errLargeSector = ErrorCommunication("renter has sent a sector that exceeds the host's sector size") // errLateRevision is returned if the renter is attempting to revise a // revision after the revision deadline. The host needs time to submit the // final revision to the blockchain to guarantee payment, and therefore // will not accept revisions once the window start is too close. errLateRevision = ErrorCommunication("renter is requesting revision after the revision deadline") // errLongDuration is returned if the renter proposes a file contract with // an experation that is too far into the future according to the host's // settings. errLongDuration = ErrorCommunication("renter proposed a file contract with a too-long duration") // errLowTransactionFees is returned if the renter provides a transaction // that the host does not feel is able to make it onto the blockchain. errLowTransactionFees = ErrorCommunication("rejected for including too few transaction fees") // errLowHostMissedOutput is returned if the renter incorrectly updates the // host missed proof output during a file contract revision. errLowHostMissedOutput = ErrorCommunication("rejected for low paying host missed output") // errLowHostValidOutput is returned if the renter incorrectly updates the // host valid proof output during a file contract revision. errLowHostValidOutput = ErrorCommunication("rejected for low paying host valid output") // errLowVoidOutput is returned if the renter has not allocated enough // funds to the void output. errLowVoidOutput = ErrorCommunication("rejected for low value void output") // errMismatchedHostPayouts is returned if the renter incorrectly sets the // host valid and missed payouts to different values during contract // formation. errMismatchedHostPayouts = ErrorCommunication("rejected because host valid and missed payouts are not the same value") // errSmallWindow is returned if the renter suggests a storage proof window // that is too small. errSmallWindow = ErrorCommunication("rejected for small window size") // errUnknownModification is returned if the host receives a modification // action from the renter that it does not understand. errUnknownModification = ErrorCommunication("renter is attempting an action that the host does not understand") ) // createRevisionSignature creates a signature for a file contract revision // that signs on the file contract revision. The renter should have already // provided the signature. createRevisionSignature will check to make sure that // the renter's signature is valid. func createRevisionSignature(fcr types.FileContractRevision, renterSig types.TransactionSignature, secretKey crypto.SecretKey, blockHeight types.BlockHeight) (types.Transaction, error) { hostSig := types.TransactionSignature{ ParentID: crypto.Hash(fcr.ParentID), PublicKeyIndex: 1, CoveredFields: types.CoveredFields{ FileContractRevisions: []uint64{0}, }, } txn := types.Transaction{ FileContractRevisions: []types.FileContractRevision{fcr}, TransactionSignatures: []types.TransactionSignature{renterSig, hostSig}, } sigHash := txn.SigHash(1) encodedSig := crypto.SignHash(sigHash, secretKey) txn.TransactionSignatures[1].Signature = encodedSig[:] err := modules.VerifyFileContractRevisionTransactionSignatures(fcr, txn.TransactionSignatures, blockHeight) if err != nil { return types.Transaction{}, err } return txn, nil } // managedFinalizeContract will take a file contract, add the host's // collateral, and then try submitting the file contract to the transaction // pool. If there is no error, the completed transaction set will be returned // to the caller. func (h *Host) managedFinalizeContract(builder modules.TransactionBuilder, renterPK crypto.PublicKey, renterSignatures []types.TransactionSignature, renterRevisionSignature types.TransactionSignature, initialSectorRoots []crypto.Hash, hostCollateral, hostInitialRevenue, hostInitialRisk types.Currency) ([]types.TransactionSignature, types.TransactionSignature, types.FileContractID, error) { for _, sig := range renterSignatures { builder.AddTransactionSignature(sig) } fullTxnSet, err := builder.Sign(true) if err != nil { builder.Drop() return nil, types.TransactionSignature{}, types.FileContractID{}, err } // Verify that the signature for the revision from the renter is correct. h.mu.RLock() blockHeight := h.blockHeight hostSPK := h.publicKey hostSK := h.secretKey h.mu.RUnlock() contractTxn := fullTxnSet[len(fullTxnSet)-1] fc := contractTxn.FileContracts[0] noOpRevision := types.FileContractRevision{ ParentID: contractTxn.FileContractID(0), UnlockConditions: types.UnlockConditions{ PublicKeys: []types.SiaPublicKey{ types.Ed25519PublicKey(renterPK), hostSPK, }, SignaturesRequired: 2, }, NewRevisionNumber: fc.RevisionNumber + 1, NewFileSize: fc.FileSize, NewFileMerkleRoot: fc.FileMerkleRoot, NewWindowStart: fc.WindowStart, NewWindowEnd: fc.WindowEnd, NewValidProofOutputs: fc.ValidProofOutputs, NewMissedProofOutputs: fc.MissedProofOutputs, NewUnlockHash: fc.UnlockHash, } // createRevisionSignature will also perform validation on the result, // returning an error if the renter provided an incorrect signature. revisionTransaction, err := createRevisionSignature(noOpRevision, renterRevisionSignature, hostSK, blockHeight) if err != nil { return nil, types.TransactionSignature{}, types.FileContractID{}, err } // Create and add the storage obligation for this file contract. fullTxn, _ := builder.View() so := storageObligation{ SectorRoots: initialSectorRoots, ContractCost: h.settings.MinContractPrice, LockedCollateral: hostCollateral, PotentialStorageRevenue: hostInitialRevenue, RiskedCollateral: hostInitialRisk, OriginTransactionSet: fullTxnSet, RevisionTransactionSet: []types.Transaction{revisionTransaction}, } // Get a lock on the storage obligation. lockErr := h.managedTryLockStorageObligation(so.id()) if lockErr != nil { build.Critical("failed to get a lock on a brand new storage obligation") return nil, types.TransactionSignature{}, types.FileContractID{}, lockErr } defer func() { if err != nil { h.managedUnlockStorageObligation(so.id()) } }() // addStorageObligation will submit the transaction to the transaction // pool, and will only do so if there was not some error in creating the // storage obligation. If the transaction pool returns a consensus // conflict, wait 30 seconds and try again. err = func() error { // Try adding the storage obligation. If there's an error, wait a few // seconds and try again. Eventually time out. It should be noted that // the storage obligation locking is both crappy and incomplete, and // that I'm not sure how this timeout plays with the overall host // timeouts. // // The storage obligation locks should occur at the highest level, not // just when the actual modification is happening. i := 0 for { err = h.managedAddStorageObligation(so) if err == nil { return nil } if err != nil && i > 4 { h.log.Println(err) builder.Drop() return err } i++ if build.Release == "standard" { time.Sleep(time.Second * 15) } } }() if err != nil { return nil, types.TransactionSignature{}, types.FileContractID{}, err } // Get the host's transaction signatures from the builder. var hostTxnSignatures []types.TransactionSignature _, _, _, txnSigIndices := builder.ViewAdded() for _, sigIndex := range txnSigIndices { hostTxnSignatures = append(hostTxnSignatures, fullTxn.TransactionSignatures[sigIndex]) } return hostTxnSignatures, revisionTransaction.TransactionSignatures[1], so.id(), nil } Sia-1.3.0/modules/host/negotiatedownload.go000066400000000000000000000254761313565667000207440ustar00rootroot00000000000000package host import ( "fmt" "net" "time" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) var ( // errLargeDownloadBatch is returned if the renter requests a download // batch that exceeds the maximum batch size that the host will // accommodate. errLargeDownloadBatch = ErrorCommunication("download request exceeded maximum batch size") // errRequestOutOfBounds is returned when a download request is made which // asks for elements of a sector which do not exist. errRequestOutOfBounds = ErrorCommunication("download request has invalid sector bounds") ) // managedDownloadIteration is responsible for managing a single iteration of // the download loop for RPCDownload. func (h *Host) managedDownloadIteration(conn net.Conn, so *storageObligation) error { // Exchange settings with the renter. err := h.managedRPCSettings(conn) if err != nil { return extendErr("RPCSettings failed: ", err) } // Extend the deadline for the download. conn.SetDeadline(time.Now().Add(modules.NegotiateDownloadTime)) // The renter will either accept or reject the host's settings. err = modules.ReadNegotiationAcceptance(conn) if err == modules.ErrStopResponse { return err // managedRPCDownload will catch this and exit gracefully } else if err != nil { return extendErr("renter rejected host settings: ", ErrorCommunication(err.Error())) } // Grab a set of variables that will be useful later in the function. h.mu.RLock() blockHeight := h.blockHeight secretKey := h.secretKey settings := h.settings h.mu.RUnlock() // Read the download requests, followed by the file contract revision that // pays for them. var requests []modules.DownloadAction var paymentRevision types.FileContractRevision err = encoding.ReadObject(conn, &requests, modules.NegotiateMaxDownloadActionRequestSize) if err != nil { return extendErr("failed to read download requests:", ErrorConnection(err.Error())) } err = encoding.ReadObject(conn, &paymentRevision, modules.NegotiateMaxFileContractRevisionSize) if err != nil { return extendErr("failed to read payment revision:", ErrorConnection(err.Error())) } // Verify that the request is acceptable, and then fetch all of the data // for the renter. existingRevision := so.RevisionTransactionSet[len(so.RevisionTransactionSet)-1].FileContractRevisions[0] var payload [][]byte err = func() error { // Check that the length of each file is in-bounds, and that the total // size being requested is acceptable. var totalSize uint64 for _, request := range requests { if request.Length > modules.SectorSize || request.Offset+request.Length > modules.SectorSize { return extendErr("download iteration request failed: ", errRequestOutOfBounds) } totalSize += request.Length } if totalSize > settings.MaxDownloadBatchSize { return extendErr("download iteration batch failed: ", errLargeDownloadBatch) } // Verify that the correct amount of money has been moved from the // renter's contract funds to the host's contract funds. expectedTransfer := settings.MinDownloadBandwidthPrice.Mul64(totalSize) err = verifyPaymentRevision(existingRevision, paymentRevision, blockHeight, expectedTransfer) if err != nil { return extendErr("payment verification failed: ", err) } // Load the sectors and build the data payload. for _, request := range requests { sectorData, err := h.ReadSector(request.MerkleRoot) if err != nil { return extendErr("failed to load sector: ", ErrorInternal(err.Error())) } payload = append(payload, sectorData[request.Offset:request.Offset+request.Length]) } return nil }() if err != nil { modules.WriteNegotiationRejection(conn, err) // Error not reported to preserve type in extendErr return extendErr("download request rejected: ", err) } // Revision is acceptable, write acceptance. err = modules.WriteNegotiationAcceptance(conn) if err != nil { return extendErr("failed to write acceptance for renter revision: ", ErrorConnection(err.Error())) } // Renter will send a transaction signature for the file contract revision. var renterSignature types.TransactionSignature err = encoding.ReadObject(conn, &renterSignature, modules.NegotiateMaxTransactionSignatureSize) if err != nil { return extendErr("failed to read renter signature: ", ErrorConnection(err.Error())) } txn, err := createRevisionSignature(paymentRevision, renterSignature, secretKey, blockHeight) // Update the storage obligation. paymentTransfer := existingRevision.NewValidProofOutputs[0].Value.Sub(paymentRevision.NewValidProofOutputs[0].Value) so.PotentialDownloadRevenue = so.PotentialDownloadRevenue.Add(paymentTransfer) so.RevisionTransactionSet = []types.Transaction{{ FileContractRevisions: []types.FileContractRevision{paymentRevision}, TransactionSignatures: []types.TransactionSignature{renterSignature, txn.TransactionSignatures[1]}, }} err = h.modifyStorageObligation(*so, nil, nil, nil) if err != nil { return extendErr("failed to modify storage obligation: ", ErrorInternal(modules.WriteNegotiationRejection(conn, err).Error())) } // Write acceptance to the renter - the data request can be fulfilled by // the host, the payment is satisfactory, signature is correct. Then send // the host signature and all of the data. err = modules.WriteNegotiationAcceptance(conn) if err != nil { return extendErr("failed to write acceptance following obligation modification: ", ErrorConnection(err.Error())) } err = encoding.WriteObject(conn, txn.TransactionSignatures[1]) if err != nil { return extendErr("failed to write signature: ", ErrorConnection(err.Error())) } err = encoding.WriteObject(conn, payload) if err != nil { return extendErr("failed to write payload: ", ErrorConnection(err.Error())) } return nil } // verifyPaymentRevision verifies that the revision being provided to pay for // the data has transferred the expected amount of money from the renter to the // host. func verifyPaymentRevision(existingRevision, paymentRevision types.FileContractRevision, blockHeight types.BlockHeight, expectedTransfer types.Currency) error { // Check that the revision is well-formed. if len(paymentRevision.NewValidProofOutputs) != 2 || len(paymentRevision.NewMissedProofOutputs) != 3 { return errBadContractOutputCounts } // Check that the time to finalize and submit the file contract revision // has not already passed. if existingRevision.NewWindowStart-revisionSubmissionBuffer <= blockHeight { return errLateRevision } // Determine the amount that was transferred from the renter. if paymentRevision.NewValidProofOutputs[0].Value.Cmp(existingRevision.NewValidProofOutputs[0].Value) > 0 { return extendErr("renter increased its valid proof output: ", errHighRenterValidOutput) } fromRenter := existingRevision.NewValidProofOutputs[0].Value.Sub(paymentRevision.NewValidProofOutputs[0].Value) // Verify that enough money was transferred. if fromRenter.Cmp(expectedTransfer) < 0 { s := fmt.Sprintf("expected at least %v to be exchanged, but %v was exchanged: ", expectedTransfer, fromRenter) return extendErr(s, errHighRenterValidOutput) } // Determine the amount of money that was transferred to the host. if existingRevision.NewValidProofOutputs[1].Value.Cmp(paymentRevision.NewValidProofOutputs[1].Value) > 0 { return extendErr("host valid proof output was decreased: ", errLowHostValidOutput) } toHost := paymentRevision.NewValidProofOutputs[1].Value.Sub(existingRevision.NewValidProofOutputs[1].Value) // Verify that enough money was transferred. if !toHost.Equals(fromRenter) { s := fmt.Sprintf("expected exactly %v to be transferred to the host, but %v was transferred: ", fromRenter, toHost) return extendErr(s, errLowHostValidOutput) } // If the renter's valid proof output is larger than the renter's missed // proof output, the renter has incentive to see the host fail. Make sure // that this incentive is not present. if paymentRevision.NewValidProofOutputs[0].Value.Cmp(paymentRevision.NewMissedProofOutputs[0].Value) > 0 { return extendErr("renter has incentive to see host fail: ", errHighRenterMissedOutput) } // Check that the host is not going to be posting collateral. if paymentRevision.NewMissedProofOutputs[1].Value.Cmp(existingRevision.NewMissedProofOutputs[1].Value) < 0 { collateral := existingRevision.NewMissedProofOutputs[1].Value.Sub(paymentRevision.NewMissedProofOutputs[1].Value) s := fmt.Sprintf("host not expecting to post any collateral, but contract has host posting %v collateral: ", collateral) return extendErr(s, errLowHostMissedOutput) } // Check that the revision count has increased. if paymentRevision.NewRevisionNumber <= existingRevision.NewRevisionNumber { return errBadRevisionNumber } // Check that all of the non-volatile fields are the same. if paymentRevision.ParentID != existingRevision.ParentID { return errBadParentID } if paymentRevision.UnlockConditions.UnlockHash() != existingRevision.UnlockConditions.UnlockHash() { return errBadUnlockConditions } if paymentRevision.NewFileSize != existingRevision.NewFileSize { return errBadFileSize } if paymentRevision.NewFileMerkleRoot != existingRevision.NewFileMerkleRoot { return errBadFileMerkleRoot } if paymentRevision.NewWindowStart != existingRevision.NewWindowStart { return errBadWindowStart } if paymentRevision.NewWindowEnd != existingRevision.NewWindowEnd { return errBadWindowEnd } if paymentRevision.NewUnlockHash != existingRevision.NewUnlockHash { return errBadUnlockHash } if !paymentRevision.NewMissedProofOutputs[1].Value.Equals(existingRevision.NewMissedProofOutputs[1].Value) { return errLowHostMissedOutput } return nil } // managedRPCDownload is responsible for handling an RPC request from the // renter to download data. func (h *Host) managedRPCDownload(conn net.Conn) error { // Get the start time to limit the length of the whole connection. startTime := time.Now() // Perform the file contract revision exchange, giving the renter the most // recent file contract revision and getting the storage obligation that // will be used to pay for the data. _, so, err := h.managedRPCRecentRevision(conn) if err != nil { return extendErr("failed RPCRecentRevision during RPCDownload: ", err) } // The storage obligation is returned with a lock on it. Defer a call to // unlock the storage obligation. defer func() { h.managedUnlockStorageObligation(so.id()) }() // Perform a loop that will allow downloads to happen until the maximum // time for a single connection has been reached. for time.Now().Before(startTime.Add(iteratedConnectionTime)) { err := h.managedDownloadIteration(conn, &so) if err == modules.ErrStopResponse { // The renter has indicated that it has finished downloading the // data, therefore there is no error. Return nil. return nil } else if err != nil { return extendErr("download iteration failed: ", err) } } return nil } Sia-1.3.0/modules/host/negotiateformcontract.go000066400000000000000000000315411313565667000216240ustar00rootroot00000000000000package host import ( "net" "time" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) var ( // errCollateralBudgetExceeded is returned if the host does not have enough // room in the collateral budget to accept a particular file contract. errCollateralBudgetExceeded = ErrorInternal("host has reached its collateral budget and cannot accept the file contract") // errMaxCollateralReached is returned if a file contract is provided which // would require the host to supply more collateral than the host allows // per file contract. errMaxCollateralReached = ErrorInternal("file contract proposal expects the host to pay more than the maximum allowed collateral") ) // contractCollateral returns the amount of collateral that the host is // expected to add to the file contract based on the payout of the file // contract and based on the host settings. func contractCollateral(settings modules.HostInternalSettings, fc types.FileContract) types.Currency { return fc.ValidProofOutputs[1].Value.Sub(settings.MinContractPrice) } // managedAddCollateral adds the host's collateral to the file contract // transaction set, returning the new inputs and outputs that get added to the // transaction, as well as any new parents that get added to the transaction // set. The builder that is used to add the collateral is also returned, // because the new transaction has not yet been signed. func (h *Host) managedAddCollateral(settings modules.HostInternalSettings, txnSet []types.Transaction) (builder modules.TransactionBuilder, newParents []types.Transaction, newInputs []types.SiacoinInput, newOutputs []types.SiacoinOutput, err error) { txn := txnSet[len(txnSet)-1] parents := txnSet[:len(txnSet)-1] fc := txn.FileContracts[0] hostPortion := contractCollateral(settings, fc) builder = h.wallet.RegisterTransaction(txn, parents) err = builder.FundSiacoins(hostPortion) if err != nil { builder.Drop() return nil, nil, nil, nil, extendErr("could not add collateral: ", ErrorInternal(err.Error())) } // Return which inputs and outputs have been added by the collateral call. newParentIndices, newInputIndices, newOutputIndices, _ := builder.ViewAdded() updatedTxn, updatedParents := builder.View() for _, parentIndex := range newParentIndices { newParents = append(newParents, updatedParents[parentIndex]) } for _, inputIndex := range newInputIndices { newInputs = append(newInputs, updatedTxn.SiacoinInputs[inputIndex]) } for _, outputIndex := range newOutputIndices { newOutputs = append(newOutputs, updatedTxn.SiacoinOutputs[outputIndex]) } return builder, newParents, newInputs, newOutputs, nil } // managedRPCFormContract accepts a file contract from a renter, checks the // file contract for compliance with the host settings, and then commits to the // file contract, creating a storage obligation and submitting the contract to // the blockchain. func (h *Host) managedRPCFormContract(conn net.Conn) error { // Send the host settings to the renter. err := h.managedRPCSettings(conn) if err != nil { return extendErr("failed RPCSettings: ", err) } // If the host is not accepting contracts, the connection can be closed. // The renter has been given enough information in the host settings to // understand that the connection is going to be closed. h.mu.RLock() settings := h.settings h.mu.RUnlock() if !settings.AcceptingContracts { h.log.Debugln("Turning down contract because the host is not accepting contracts.") return nil } // Extend the deadline to meet the rest of file contract negotiation. conn.SetDeadline(time.Now().Add(modules.NegotiateFileContractTime)) // The renter will either accept or reject the host's settings. err = modules.ReadNegotiationAcceptance(conn) if err != nil { return extendErr("renter did not accept settings: ", ErrorCommunication(err.Error())) } // If the renter sends an acceptance of the settings, it will be followed // by an unsigned transaction containing funding from the renter and a file // contract which matches what the final file contract should look like. // After the file contract, the renter will send a public key which is the // renter's public key in the unlock conditions that protect the file // contract from revision. var txnSet []types.Transaction var renterPK crypto.PublicKey err = encoding.ReadObject(conn, &txnSet, modules.NegotiateMaxFileContractSetLen) if err != nil { return extendErr("could not read renter transaction set: ", ErrorConnection(err.Error())) } err = encoding.ReadObject(conn, &renterPK, modules.NegotiateMaxSiaPubkeySize) if err != nil { return extendErr("could not read renter public key: ", ErrorConnection(err.Error())) } // The host verifies that the file contract coming over the wire is // acceptable. err = h.managedVerifyNewContract(txnSet, renterPK) if err != nil { // The incoming file contract is not acceptable to the host, indicate // why to the renter. modules.WriteNegotiationRejection(conn, err) // Error ignored to preserve type in extendErr return extendErr("contract verification failed: ", err) } // The host adds collateral to the transaction. txnBuilder, newParents, newInputs, newOutputs, err := h.managedAddCollateral(settings, txnSet) if err != nil { modules.WriteNegotiationRejection(conn, err) // Error ignored to preserve type in extendErr return extendErr("failed to add collateral: ", err) } // The host indicates acceptance, and then sends any new parent // transactions, inputs and outputs that were added to the transaction. err = modules.WriteNegotiationAcceptance(conn) if err != nil { return extendErr("accepting verified contract failed: ", ErrorConnection(err.Error())) } err = encoding.WriteObject(conn, newParents) if err != nil { return extendErr("failed to write new parents: ", ErrorConnection(err.Error())) } err = encoding.WriteObject(conn, newInputs) if err != nil { return extendErr("failed to write new inputs: ", ErrorConnection(err.Error())) } err = encoding.WriteObject(conn, newOutputs) if err != nil { return extendErr("failed to write new outputs: ", ErrorConnection(err.Error())) } // The renter will now send a negotiation response, followed by transaction // signatures for the file contract transaction in the case of acceptance. // The transaction signatures will be followed by another transaction // signature, to sign a no-op file contract revision. err = modules.ReadNegotiationAcceptance(conn) if err != nil { return extendErr("renter did not accept updated transactions: ", ErrorCommunication(err.Error())) } var renterTxnSignatures []types.TransactionSignature var renterRevisionSignature types.TransactionSignature err = encoding.ReadObject(conn, &renterTxnSignatures, modules.NegotiateMaxTransactionSignaturesSize) if err != nil { return extendErr("could not read renter transaction signatures: ", ErrorConnection(err.Error())) } err = encoding.ReadObject(conn, &renterRevisionSignature, modules.NegotiateMaxTransactionSignatureSize) if err != nil { return extendErr("could not read renter revision signatures: ", ErrorConnection(err.Error())) } // The host adds the renter transaction signatures, then signs the // transaction and submits it to the blockchain, creating a storage // obligation in the process. The host's part is done before anything is // written to the renter, but to give the renter confidence, the host will // send the signatures so that the renter can immediately have the // completed file contract. // // During finalization, the signature for the revision is also checked, and // signatures for the revision transaction are created. h.mu.RLock() hostCollateral := contractCollateral(h.settings, txnSet[len(txnSet)-1].FileContracts[0]) h.mu.RUnlock() hostTxnSignatures, hostRevisionSignature, newSOID, err := h.managedFinalizeContract(txnBuilder, renterPK, renterTxnSignatures, renterRevisionSignature, nil, hostCollateral, types.ZeroCurrency, types.ZeroCurrency) if err != nil { // The incoming file contract is not acceptable to the host, indicate // why to the renter. modules.WriteNegotiationRejection(conn, err) // Error ignored to preserve type in extendErr return extendErr("contract finalization failed: ", err) } defer h.managedUnlockStorageObligation(newSOID) err = modules.WriteNegotiationAcceptance(conn) if err != nil { return extendErr("failed to write acceptance after contract finalization: ", ErrorConnection(err.Error())) } // The host sends the transaction signatures to the renter, followed by the // revision signature. Negotiation is complete. err = encoding.WriteObject(conn, hostTxnSignatures) if err != nil { return extendErr("failed to write host transaction signatures: ", ErrorConnection(err.Error())) } err = encoding.WriteObject(conn, hostRevisionSignature) if err != nil { return extendErr("failed to write host revision signatures: ", ErrorConnection(err.Error())) } return nil } // managedVerifyNewContract checks that an incoming file contract matches the host's // expectations for a valid contract. func (h *Host) managedVerifyNewContract(txnSet []types.Transaction, renterPK crypto.PublicKey) error { // Check that the transaction set is not empty. if len(txnSet) < 1 { return extendErr("zero-length transaction set: ", errEmptyObject) } // Check that there is a file contract in the txnSet. if len(txnSet[len(txnSet)-1].FileContracts) < 1 { return extendErr("transaction without file contract: ", errEmptyObject) } h.mu.RLock() blockHeight := h.blockHeight lockedStorageCollateral := h.financialMetrics.LockedStorageCollateral publicKey := h.publicKey settings := h.settings unlockHash := h.unlockHash h.mu.RUnlock() fc := txnSet[len(txnSet)-1].FileContracts[0] // A new file contract should have a file size of zero. if fc.FileSize != 0 { return errBadFileSize } if fc.FileMerkleRoot != (crypto.Hash{}) { return errBadFileMerkleRoot } // WindowStart must be at least revisionSubmissionBuffer blocks into the // future. if fc.WindowStart <= blockHeight+revisionSubmissionBuffer { h.log.Debugf("A renter tried to form a contract that had a window start which was too soon. The contract started at %v, the current height is %v, the revisionSubmissionBuffer is %v, and the comparison was %v <= %v\n", fc.WindowStart, blockHeight, revisionSubmissionBuffer, fc.WindowStart, blockHeight+revisionSubmissionBuffer) return errEarlyWindow } // WindowEnd must be at least settings.WindowSize blocks after // WindowStart. if fc.WindowEnd < fc.WindowStart+settings.WindowSize { return errSmallWindow } // WindowEnd must not be more than settings.MaxDuration blocks into the // future. if fc.WindowStart > blockHeight+settings.MaxDuration { return errLongDuration } // ValidProofOutputs shoud have 2 outputs (renter + host) and missed // outputs should have 3 (renter + host + void) if len(fc.ValidProofOutputs) != 2 || len(fc.MissedProofOutputs) != 3 { return errBadContractOutputCounts } // The unlock hashes of the valid and missed proof outputs for the host // must match the host's unlock hash. The third missed output should point // to the void. if fc.ValidProofOutputs[1].UnlockHash != unlockHash || fc.MissedProofOutputs[1].UnlockHash != unlockHash || fc.MissedProofOutputs[2].UnlockHash != (types.UnlockHash{}) { return errBadPayoutUnlockHashes } // Check that the payouts for the valid proof outputs and the missed proof // outputs are the same - this is important because no data has been added // to the file contract yet. if !fc.ValidProofOutputs[1].Value.Equals(fc.MissedProofOutputs[1].Value) { return errMismatchedHostPayouts } // Check that there's enough payout for the host to cover at least the // contract price. This will prevent negative currency panics when working // with the collateral. if fc.ValidProofOutputs[1].Value.Cmp(settings.MinContractPrice) < 0 { return errLowHostValidOutput } // Check that the collateral does not exceed the maximum amount of // collateral allowed. expectedCollateral := contractCollateral(settings, fc) if expectedCollateral.Cmp(settings.MaxCollateral) > 0 { return errMaxCollateralReached } // Check that the host has enough room in the collateral budget to add this // collateral. if lockedStorageCollateral.Add(expectedCollateral).Cmp(settings.CollateralBudget) > 0 { return errCollateralBudgetExceeded } // The unlock hash for the file contract must match the unlock hash that // the host knows how to spend. expectedUH := types.UnlockConditions{ PublicKeys: []types.SiaPublicKey{ types.Ed25519PublicKey(renterPK), publicKey, }, SignaturesRequired: 2, }.UnlockHash() if fc.UnlockHash != expectedUH { return errBadUnlockHash } // Check that the transaction set has enough fees on it to get into the // blockchain. setFee := modules.CalculateFee(txnSet) minFee, _ := h.tpool.FeeEstimation() if setFee.Cmp(minFee) < 0 { return errLowTransactionFees } return nil } Sia-1.3.0/modules/host/negotiaterecentrevision.go000066400000000000000000000143311313565667000221600ustar00rootroot00000000000000package host import ( "errors" "net" "time" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/fastrand" "github.com/NebulousLabs/bolt" ) var ( // errRevisionWrongPublicKeyCount is returned when a stored file contract // revision does not have enough public keys - such a situation should // never happen, and is a critical / developer error. errRevisionWrongPublicKeyCount = errors.New("wrong number of public keys in the unlock conditions of the file contract revision") ) // managedVerifyChallengeResponse will verify that the renter's response to the // challenge provided by the host is correct. In the process, the storage // obligation and file contract revision will be loaded and returned. // // The storage obligation is returned under a storage obligation lock. func (h *Host) managedVerifyChallengeResponse(fcid types.FileContractID, challenge crypto.Hash, challengeResponse crypto.Signature) (so storageObligation, recentRevision types.FileContractRevision, revisionSigs []types.TransactionSignature, err error) { // Grab a lock before it is possible to perform any operations on the // storage obligation. Defer a call to unlock in the event of an error. If // there is no error, the storage obligation will be returned with a lock. err = h.managedTryLockStorageObligation(fcid) if err != nil { err = extendErr("could not get "+fcid.String()+" lock: ", ErrorInternal(err.Error())) return storageObligation{}, types.FileContractRevision{}, nil, err } defer func() { if err != nil { h.managedUnlockStorageObligation(fcid) } }() // Fetch the storage obligation, which has the revision, which has the // renter's public key. h.mu.RLock() defer h.mu.RUnlock() err = h.db.View(func(tx *bolt.Tx) error { so, err = getStorageObligation(tx, fcid) return err }) if err != nil { err = extendErr("could not fetch "+fcid.String()+": ", ErrorInternal(err.Error())) return storageObligation{}, types.FileContractRevision{}, nil, err } // Pull out the file contract revision and the revision's signatures from // the transaction. revisionTxn := so.RevisionTransactionSet[len(so.RevisionTransactionSet)-1] recentRevision = revisionTxn.FileContractRevisions[0] for _, sig := range revisionTxn.TransactionSignatures { // Checking for just the parent id is sufficient, an over-signed file // contract is invalid. if sig.ParentID == crypto.Hash(fcid) { revisionSigs = append(revisionSigs, sig) } } // Verify that the challegne response matches the public key. var renterPK crypto.PublicKey // Sanity check - there should be two public keys. if len(recentRevision.UnlockConditions.PublicKeys) != 2 { // The error has to be set here so that the defered error check will // unlock the storage obligation. h.log.Critical("wrong public key count in file contract revision") err = errRevisionWrongPublicKeyCount err = extendErr("wrong public key count for "+fcid.String()+": ", ErrorInternal(err.Error())) return storageObligation{}, types.FileContractRevision{}, nil, err } copy(renterPK[:], recentRevision.UnlockConditions.PublicKeys[0].Key) err = crypto.VerifyHash(challenge, renterPK, challengeResponse) if err != nil { err = extendErr("bad signature from renter: ", ErrorCommunication(err.Error())) return storageObligation{}, types.FileContractRevision{}, nil, err } return so, recentRevision, revisionSigs, nil } // managedRPCRecentRevision sends the most recent known file contract // revision, including signatures, to the renter, for the file contract with // the id given by the renter. // // The storage obligation is returned under a storage obligation lock. func (h *Host) managedRPCRecentRevision(conn net.Conn) (types.FileContractID, storageObligation, error) { // Set the negotiation deadline. conn.SetDeadline(time.Now().Add(modules.NegotiateRecentRevisionTime)) // Receive the file contract id from the renter. var fcid types.FileContractID err := encoding.ReadObject(conn, &fcid, uint64(len(fcid))) if err != nil { return types.FileContractID{}, storageObligation{}, extendErr("could not read file contract id: ", ErrorConnection(err.Error())) } // Send a challenge to the renter to verify that the renter has write // access to the revision being opened. var challenge crypto.Hash fastrand.Read(challenge[16:]) err = encoding.WriteObject(conn, challenge) if err != nil { return types.FileContractID{}, storageObligation{}, extendErr("cound not write challenge: ", ErrorConnection(err.Error())) } // Read the signed response from the renter. var challengeResponse crypto.Signature err = encoding.ReadObject(conn, &challengeResponse, uint64(len(challengeResponse))) if err != nil { return types.FileContractID{}, storageObligation{}, extendErr("could not read challenge response: ", ErrorConnection(err.Error())) } // Verify the response. In the process, fetch the related storage // obligation, file contract revision, and transaction signatures. so, recentRevision, revisionSigs, err := h.managedVerifyChallengeResponse(fcid, challenge, challengeResponse) if err != nil { modules.WriteNegotiationRejection(conn, err) // Error not reported to preserve error type in extendErr. return types.FileContractID{}, storageObligation{}, extendErr("challenge failed: ", err) } // Defer a call to unlock the storage obligation in the event of an error. defer func() { if err != nil { h.managedUnlockStorageObligation(fcid) } }() // Send the file contract revision and the corresponding signatures to the // renter. err = modules.WriteNegotiationAcceptance(conn) if err != nil { err = extendErr("failed to write challenge acceptance: ", ErrorConnection(err.Error())) return types.FileContractID{}, storageObligation{}, err } err = encoding.WriteObject(conn, recentRevision) if err != nil { err = extendErr("failed to write recent revision: ", ErrorConnection(err.Error())) return types.FileContractID{}, storageObligation{}, err } err = encoding.WriteObject(conn, revisionSigs) if err != nil { err = extendErr("failed to write recent revision signatures: ", ErrorConnection(err.Error())) return types.FileContractID{}, storageObligation{}, err } return fcid, so, nil } Sia-1.3.0/modules/host/negotiaterenewcontract.go000066400000000000000000000314411313565667000220000ustar00rootroot00000000000000package host import ( "errors" "net" "time" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) var ( // errRenewDoesNotExtend is returned if a file contract renewal is // presented which does not extend the existing file contract. errRenewDoesNotExtend = errors.New("file contract renewal does not extend the existing file contract") ) // renewBaseCollateral returns the base collateral on the storage in the file // contract, using the host's external settings and the starting file contract. func renewBaseCollateral(so storageObligation, settings modules.HostExternalSettings, fc types.FileContract) types.Currency { if fc.WindowEnd <= so.proofDeadline() { return types.NewCurrency64(0) } timeExtension := fc.WindowEnd - so.proofDeadline() return settings.Collateral.Mul64(fc.FileSize).Mul64(uint64(timeExtension)) } // renewBasePrice returns the base cost of the storage in the file contract, // using the host external settings and the starting file contract. func renewBasePrice(so storageObligation, settings modules.HostExternalSettings, fc types.FileContract) types.Currency { if fc.WindowEnd <= so.proofDeadline() { return types.NewCurrency64(0) } timeExtension := fc.WindowEnd - so.proofDeadline() return settings.StoragePrice.Mul64(fc.FileSize).Mul64(uint64(timeExtension)) } // renewContractCollateral returns the amount of collateral that the host is // expected to add to the file contract based on the file contract and host // settings. func renewContractCollateral(so storageObligation, settings modules.HostExternalSettings, fc types.FileContract) types.Currency { return fc.ValidProofOutputs[1].Value.Sub(settings.ContractPrice).Sub(renewBasePrice(so, settings, fc)) } // managedAddRenewCollateral adds the host's collateral to the renewed file // contract. func (h *Host) managedAddRenewCollateral(so storageObligation, settings modules.HostExternalSettings, txnSet []types.Transaction) (builder modules.TransactionBuilder, newParents []types.Transaction, newInputs []types.SiacoinInput, newOutputs []types.SiacoinOutput, err error) { txn := txnSet[len(txnSet)-1] parents := txnSet[:len(txnSet)-1] fc := txn.FileContracts[0] hostPortion := renewContractCollateral(so, settings, fc) builder = h.wallet.RegisterTransaction(txn, parents) err = builder.FundSiacoins(hostPortion) if err != nil { builder.Drop() return nil, nil, nil, nil, extendErr("could not add collateral: ", ErrorInternal(err.Error())) } // Return which inputs and outputs have been added by the collateral call. newParentIndices, newInputIndices, newOutputIndices, _ := builder.ViewAdded() updatedTxn, updatedParents := builder.View() for _, parentIndex := range newParentIndices { newParents = append(newParents, updatedParents[parentIndex]) } for _, inputIndex := range newInputIndices { newInputs = append(newInputs, updatedTxn.SiacoinInputs[inputIndex]) } for _, outputIndex := range newOutputIndices { newOutputs = append(newOutputs, updatedTxn.SiacoinOutputs[outputIndex]) } return builder, newParents, newInputs, newOutputs, nil } // managedRenewContract accepts a request to renew a file contract. func (h *Host) managedRPCRenewContract(conn net.Conn) error { // Perform the recent revision protocol to get the file contract being // revised. _, so, err := h.managedRPCRecentRevision(conn) if err != nil { return extendErr("RPCRecentRevision failed: ", err) } // The storage obligation is received with a lock. Defer a call to unlock // the storage obligation. defer func() { h.managedUnlockStorageObligation(so.id()) }() // Perform the host settings exchange with the renter. err = h.managedRPCSettings(conn) if err != nil { return extendErr("RPCSettings failed: ", err) } // Set the renewal deadline. conn.SetDeadline(time.Now().Add(modules.NegotiateRenewContractTime)) // The renter will either accept or reject the host's settings. err = modules.ReadNegotiationAcceptance(conn) if err != nil { return extendErr("renter rejected the host settings: ", ErrorCommunication(err.Error())) } // If the renter sends an acceptance of the settings, it will be followed // by an unsigned transaction containing funding from the renter and a file // contract which matches what the final file contract should look like. // After the file contract, the renter will send a public key which is the // renter's public key in the unlock conditions that protect the file // contract from revision. var txnSet []types.Transaction var renterPK crypto.PublicKey err = encoding.ReadObject(conn, &txnSet, modules.NegotiateMaxFileContractSetLen) if err != nil { return extendErr("unable to read transaction set: ", ErrorConnection(err.Error())) } err = encoding.ReadObject(conn, &renterPK, modules.NegotiateMaxSiaPubkeySize) if err != nil { return extendErr("unable to read renter public key: ", ErrorConnection(err.Error())) } h.mu.RLock() settings := h.externalSettings() h.mu.RUnlock() // Verify that the transaction coming over the wire is a proper renewal. err = h.managedVerifyRenewedContract(so, txnSet, renterPK) if err != nil { modules.WriteNegotiationRejection(conn, err) // Error is ignored to preserve type for extendErr return extendErr("verification of renewal failed: ", err) } txnBuilder, newParents, newInputs, newOutputs, err := h.managedAddRenewCollateral(so, settings, txnSet) if err != nil { modules.WriteNegotiationRejection(conn, err) // Error is ignored to preserve type for extendErr return extendErr("failed to add collateral: ", err) } // The host indicates acceptance, then sends the new parents, inputs, and // outputs to the transaction. err = modules.WriteNegotiationAcceptance(conn) if err != nil { return extendErr("failed to write acceptance: ", ErrorConnection(err.Error())) } err = encoding.WriteObject(conn, newParents) if err != nil { return extendErr("failed to write new parents: ", ErrorConnection(err.Error())) } err = encoding.WriteObject(conn, newInputs) if err != nil { return extendErr("failed to write new inputs: ", ErrorConnection(err.Error())) } err = encoding.WriteObject(conn, newOutputs) if err != nil { return extendErr("failed to write new outputs: ", ErrorConnection(err.Error())) } // The renter will send a negotiation response, followed by transaction // signatures for the file contract transaction in the case of acceptance. // The transaction signatures will be followed by another transaction // signature to sign the no-op file contract revision associated with the // new file contract. err = modules.ReadNegotiationAcceptance(conn) if err != nil { return extendErr("renter rejected collateral extension: ", ErrorCommunication(err.Error())) } var renterTxnSignatures []types.TransactionSignature var renterRevisionSignature types.TransactionSignature err = encoding.ReadObject(conn, &renterTxnSignatures, modules.NegotiateMaxTransactionSignatureSize) if err != nil { return extendErr("failed to read renter transaction signatures: ", ErrorConnection(err.Error())) } err = encoding.ReadObject(conn, &renterRevisionSignature, modules.NegotiateMaxTransactionSignatureSize) if err != nil { return extendErr("failed to read renter revision signatures: ", ErrorConnection(err.Error())) } // The host adds the renter transaction signatures, then signs the // transaction and submits it to the blockchain, creating a storage // obligation in the process. The host's part is now complete and the // contract is finalized, but to give confidence to the renter the host // will send the signatures so that the renter can immediately have the // completed file contract. // // During finalization the signatures sent by the renter are all checked. h.mu.RLock() fc := txnSet[len(txnSet)-1].FileContracts[0] renewCollateral := renewContractCollateral(so, settings, fc) renewRevenue := renewBasePrice(so, settings, fc) renewRisk := renewBaseCollateral(so, settings, fc) h.mu.RUnlock() hostTxnSignatures, hostRevisionSignature, newSOID, err := h.managedFinalizeContract(txnBuilder, renterPK, renterTxnSignatures, renterRevisionSignature, so.SectorRoots, renewCollateral, renewRevenue, renewRisk) if err != nil { modules.WriteNegotiationRejection(conn, err) // Error is ignored to preserve type for extendErr return extendErr("failed to finalize contract: ", err) } defer h.managedUnlockStorageObligation(newSOID) err = modules.WriteNegotiationAcceptance(conn) if err != nil { return extendErr("failed to write acceptance: ", ErrorConnection(err.Error())) } // The host sends the transaction signatures to the renter, followed by the // revision signature. Negotiation is complete. err = encoding.WriteObject(conn, hostTxnSignatures) if err != nil { return extendErr("failed to write transaction signatures: ", ErrorConnection(err.Error())) } err = encoding.WriteObject(conn, hostRevisionSignature) if err != nil { return extendErr("failed to write revision signature: ", ErrorConnection(err.Error())) } return nil } // managedVerifyRenewedContract checks that the contract renewal matches the // previous contract and makes all of the appropriate payments. func (h *Host) managedVerifyRenewedContract(so storageObligation, txnSet []types.Transaction, renterPK crypto.PublicKey) error { // Check that the transaction set is not empty. if len(txnSet) < 1 { return extendErr("zero-length transaction set: ", errEmptyObject) } // Check that the transaction set has a file contract. if len(txnSet[len(txnSet)-1].FileContracts) < 1 { return extendErr("transaction without file contract: ", errEmptyObject) } h.mu.RLock() blockHeight := h.blockHeight externalSettings := h.externalSettings() internalSettings := h.settings lockedStorageCollateral := h.financialMetrics.LockedStorageCollateral publicKey := h.publicKey unlockHash := h.unlockHash h.mu.RUnlock() fc := txnSet[len(txnSet)-1].FileContracts[0] // The file size and merkle root must match the file size and merkle root // from the previous file contract. if fc.FileSize != so.fileSize() { return errBadFileSize } if fc.FileMerkleRoot != so.merkleRoot() { return errBadFileMerkleRoot } // The WindowStart must be at least revisionSubmissionBuffer blocks into // the future. if fc.WindowStart <= blockHeight+revisionSubmissionBuffer { return errEarlyWindow } // WindowEnd must be at least settings.WindowSize blocks after WindowStart. if fc.WindowEnd < fc.WindowStart+externalSettings.WindowSize { return errSmallWindow } // ValidProofOutputs shoud have 2 outputs (renter + host) and missed // outputs should have 3 (renter + host + void) if len(fc.ValidProofOutputs) != 2 || len(fc.MissedProofOutputs) != 3 { return errBadContractOutputCounts } // The unlock hashes of the valid and missed proof outputs for the host // must match the host's unlock hash. The third missed output should point // to the void. if fc.ValidProofOutputs[1].UnlockHash != unlockHash || fc.MissedProofOutputs[1].UnlockHash != unlockHash || fc.MissedProofOutputs[2].UnlockHash != (types.UnlockHash{}) { return errBadPayoutUnlockHashes } // Check that the collateral does not exceed the maximum amount of // collateral allowed. expectedCollateral := renewContractCollateral(so, externalSettings, fc) if expectedCollateral.Cmp(externalSettings.MaxCollateral) > 0 { return errMaxCollateralReached } // Check that the host has enough room in the collateral budget to add this // collateral. if lockedStorageCollateral.Add(expectedCollateral).Cmp(internalSettings.CollateralBudget) > 0 { return errCollateralBudgetExceeded } // Check that the missed proof outputs contain enough money, and that the // void output contains enough money. basePrice := renewBasePrice(so, externalSettings, fc) baseCollateral := renewBaseCollateral(so, externalSettings, fc) if fc.ValidProofOutputs[1].Value.Cmp(basePrice.Add(baseCollateral)) < 0 { return errLowHostValidOutput } expectedHostMissedOutput := fc.ValidProofOutputs[1].Value.Sub(basePrice).Sub(baseCollateral) if fc.MissedProofOutputs[1].Value.Cmp(expectedHostMissedOutput) < 0 { return errLowHostMissedOutput } // Check that the void output has the correct value. expectedVoidOutput := basePrice.Add(baseCollateral) if fc.MissedProofOutputs[2].Value.Cmp(expectedVoidOutput) > 0 { return errLowVoidOutput } // The unlock hash for the file contract must match the unlock hash that // the host knows how to spend. expectedUH := types.UnlockConditions{ PublicKeys: []types.SiaPublicKey{ types.Ed25519PublicKey(renterPK), publicKey, }, SignaturesRequired: 2, }.UnlockHash() if fc.UnlockHash != expectedUH { return errBadUnlockHash } // Check that the transaction set has enough fees on it to get into the // blockchain. setFee := modules.CalculateFee(txnSet) minFee, _ := h.tpool.FeeEstimation() if setFee.Cmp(minFee) < 0 { return errLowTransactionFees } return nil } Sia-1.3.0/modules/host/negotiaterevisecontract.go000066400000000000000000000331261313565667000221570ustar00rootroot00000000000000package host import ( "fmt" "net" "time" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) // managedRevisionIteration handles one iteration of the revision loop. As a // performance optimization, multiple iterations of revisions are allowed to be // made over the same connection. func (h *Host) managedRevisionIteration(conn net.Conn, so *storageObligation, finalIter bool) error { // Send the settings to the renter. The host will keep going even if it is // not accepting contracts, because in this case the contract already // exists. err := h.managedRPCSettings(conn) if err != nil { return extendErr("RPCSettings failed: ", err) } // Set the negotiation deadline. conn.SetDeadline(time.Now().Add(modules.NegotiateFileContractRevisionTime)) // The renter will either accept or reject the settings + revision // transaction. It may also return a stop response to indicate that it // wishes to terminate the revision loop. err = modules.ReadNegotiationAcceptance(conn) if err == modules.ErrStopResponse { return err // managedRPCReviseContract will catch this and exit gracefully } else if err != nil { return extendErr("renter rejected host settings: ", ErrorCommunication(err.Error())) } // Read some variables from the host for use later in the function. h.mu.RLock() settings := h.settings secretKey := h.secretKey blockHeight := h.blockHeight h.mu.RUnlock() // The renter is going to send its intended modifications, followed by the // file contract revision that pays for them. var modifications []modules.RevisionAction var revision types.FileContractRevision err = encoding.ReadObject(conn, &modifications, settings.MaxReviseBatchSize) if err != nil { return extendErr("unable to read revision modifications: ", ErrorConnection(err.Error())) } err = encoding.ReadObject(conn, &revision, modules.NegotiateMaxFileContractRevisionSize) if err != nil { return extendErr("unable to read proposed revision: ", ErrorConnection(err.Error())) } // First read all of the modifications. Then make the modifications, but // with the ability to reverse them. Then verify the file contract revision // correctly accounts for the changes. var bandwidthRevenue types.Currency // Upload bandwidth. var storageRevenue types.Currency var newCollateral types.Currency var sectorsRemoved []crypto.Hash var sectorsGained []crypto.Hash var gainedSectorData [][]byte err = func() error { for _, modification := range modifications { // Check that the index points to an existing sector root. If the type // is ActionInsert, we permit inserting at the end. if modification.Type == modules.ActionInsert { if modification.SectorIndex > uint64(len(so.SectorRoots)) { return errBadModificationIndex } } else if modification.SectorIndex >= uint64(len(so.SectorRoots)) { return errBadModificationIndex } // Check that the data sent for the sector is not too large. if uint64(len(modification.Data)) > modules.SectorSize { return errLargeSector } switch modification.Type { case modules.ActionDelete: // There is no financial information to change, it is enough to // remove the sector. sectorsRemoved = append(sectorsRemoved, so.SectorRoots[modification.SectorIndex]) so.SectorRoots = append(so.SectorRoots[0:modification.SectorIndex], so.SectorRoots[modification.SectorIndex+1:]...) case modules.ActionInsert: // Check that the sector size is correct. if uint64(len(modification.Data)) != modules.SectorSize { return errBadSectorSize } // Update finances. blocksRemaining := so.proofDeadline() - blockHeight blockBytesCurrency := types.NewCurrency64(uint64(blocksRemaining)).Mul64(modules.SectorSize) bandwidthRevenue = bandwidthRevenue.Add(settings.MinUploadBandwidthPrice.Mul64(modules.SectorSize)) storageRevenue = storageRevenue.Add(settings.MinStoragePrice.Mul(blockBytesCurrency)) newCollateral = newCollateral.Add(settings.Collateral.Mul(blockBytesCurrency)) // Insert the sector into the root list. newRoot := crypto.MerkleRoot(modification.Data) sectorsGained = append(sectorsGained, newRoot) gainedSectorData = append(gainedSectorData, modification.Data) so.SectorRoots = append(so.SectorRoots[:modification.SectorIndex], append([]crypto.Hash{newRoot}, so.SectorRoots[modification.SectorIndex:]...)...) case modules.ActionModify: // Check that the offset and length are okay. Length is already // known to be appropriately small, but the offset needs to be // checked for being appropriately small as well otherwise there is // a risk of overflow. if modification.Offset > modules.SectorSize || modification.Offset+uint64(len(modification.Data)) > modules.SectorSize { return errIllegalOffsetAndLength } // Get the data for the new sector. sector, err := h.ReadSector(so.SectorRoots[modification.SectorIndex]) if err != nil { return extendErr("could not read sector: ", ErrorInternal(err.Error())) } copy(sector[modification.Offset:], modification.Data) // Update finances. bandwidthRevenue = bandwidthRevenue.Add(settings.MinUploadBandwidthPrice.Mul64(uint64(len(modification.Data)))) // Update the sectors removed and gained to indicate that the old // sector has been replaced with a new sector. newRoot := crypto.MerkleRoot(sector) sectorsRemoved = append(sectorsRemoved, so.SectorRoots[modification.SectorIndex]) sectorsGained = append(sectorsGained, newRoot) gainedSectorData = append(gainedSectorData, sector) so.SectorRoots[modification.SectorIndex] = newRoot default: return errUnknownModification } } newRevenue := storageRevenue.Add(bandwidthRevenue) return extendErr("unable to verify updated contract: ", verifyRevision(*so, revision, blockHeight, newRevenue, newCollateral)) }() if err != nil { modules.WriteNegotiationRejection(conn, err) // Error is ignored so that the error type can be preserved in extendErr. return extendErr("rejected proposed modifications: ", err) } // Revision is acceptable, write an acceptance string. err = modules.WriteNegotiationAcceptance(conn) if err != nil { return extendErr("could not accept revision modifications: ", ErrorConnection(err.Error())) } // Renter will send a transaction signature for the file contract revision. var renterSig types.TransactionSignature err = encoding.ReadObject(conn, &renterSig, modules.NegotiateMaxTransactionSignatureSize) if err != nil { return extendErr("could not read renter transaction signature: ", ErrorConnection(err.Error())) } // Verify that the signature is valid and get the host's signature. txn, err := createRevisionSignature(revision, renterSig, secretKey, blockHeight) if err != nil { modules.WriteNegotiationRejection(conn, err) // Error is ignored so that the error type can be preserved in extendErr. return extendErr("could not create revision signature: ", err) } so.PotentialStorageRevenue = so.PotentialStorageRevenue.Add(storageRevenue) so.RiskedCollateral = so.RiskedCollateral.Add(newCollateral) so.PotentialUploadRevenue = so.PotentialUploadRevenue.Add(bandwidthRevenue) so.RevisionTransactionSet = []types.Transaction{txn} h.mu.Lock() err = h.modifyStorageObligation(*so, sectorsRemoved, sectorsGained, gainedSectorData) h.mu.Unlock() if err != nil { modules.WriteNegotiationRejection(conn, err) // Error is ignored so that the error type can be preserved in extendErr. return extendErr("could not modify storage obligation: ", ErrorInternal(err.Error())) } // Host will now send acceptance and its signature to the renter. This // iteration is complete. If the finalIter flag is set, StopResponse will // be sent instead. This indicates to the renter that the host wishes to // terminate the revision loop. if finalIter { err = modules.WriteNegotiationStop(conn) } else { err = modules.WriteNegotiationAcceptance(conn) } if err != nil { return extendErr("iteration signal failed to send: ", ErrorConnection(err.Error())) } err = encoding.WriteObject(conn, txn.TransactionSignatures[1]) if err != nil { return extendErr("failed to write revision signatures: ", ErrorConnection(err.Error())) } return nil } // managedRPCReviseContract accepts a request to revise an existing contract. // Revisions can add sectors, delete sectors, and modify existing sectors. func (h *Host) managedRPCReviseContract(conn net.Conn) error { // Set a preliminary deadline for receiving the storage obligation. startTime := time.Now() // Perform the file contract revision exchange, giving the renter the most // recent file contract revision and getting the storage obligation that // will be used to pay for the data. _, so, err := h.managedRPCRecentRevision(conn) if err != nil { return extendErr("RPCRecentRevision failed: ", err) } // The storage obligation is received with a lock on it. Defer a call to // unlock the storage obligation. defer func() { h.managedUnlockStorageObligation(so.id()) }() // Begin the revision loop. The host will process revisions until a // timeout is reached, or until the renter sends a StopResponse. for timeoutReached := false; !timeoutReached; { timeoutReached = time.Since(startTime) > iteratedConnectionTime err := h.managedRevisionIteration(conn, &so, timeoutReached) if err == modules.ErrStopResponse { return nil } else if err != nil { return extendErr("revision iteration failed: ", err) } } return nil } // verifyRevision checks that the revision pays the host correctly, and that // the revision does not attempt any malicious or unexpected changes. func verifyRevision(so storageObligation, revision types.FileContractRevision, blockHeight types.BlockHeight, expectedExchange, expectedCollateral types.Currency) error { // Check that the revision is well-formed. if len(revision.NewValidProofOutputs) != 2 || len(revision.NewMissedProofOutputs) != 3 { return errBadContractOutputCounts } // Check that the time to finalize and submit the file contract revision // has not already passed. if so.expiration()-revisionSubmissionBuffer <= blockHeight { return errLateRevision } oldFCR := so.RevisionTransactionSet[len(so.RevisionTransactionSet)-1].FileContractRevisions[0] // Check that all non-volatile fields are the same. if oldFCR.ParentID != revision.ParentID { return errBadContractParent } if oldFCR.UnlockConditions.UnlockHash() != revision.UnlockConditions.UnlockHash() { return errBadUnlockConditions } if oldFCR.NewRevisionNumber >= revision.NewRevisionNumber { return errBadRevisionNumber } if revision.NewFileSize != uint64(len(so.SectorRoots))*modules.SectorSize { return errBadFileSize } if oldFCR.NewWindowStart != revision.NewWindowStart { return errBadWindowStart } if oldFCR.NewWindowEnd != revision.NewWindowEnd { return errBadWindowEnd } if oldFCR.NewUnlockHash != revision.NewUnlockHash { return errBadUnlockHash } // Determine the amount that was transferred from the renter. if revision.NewValidProofOutputs[0].Value.Cmp(oldFCR.NewValidProofOutputs[0].Value) > 0 { return extendErr("renter increased its valid proof output: ", errHighRenterValidOutput) } fromRenter := oldFCR.NewValidProofOutputs[0].Value.Sub(revision.NewValidProofOutputs[0].Value) // Verify that enough money was transferred. if fromRenter.Cmp(expectedExchange) < 0 { s := fmt.Sprintf("expected at least %v to be exchanged, but %v was exchanged: ", expectedExchange, fromRenter) return extendErr(s, errHighRenterValidOutput) } // Determine the amount of money that was transferred to the host. if oldFCR.NewValidProofOutputs[1].Value.Cmp(revision.NewValidProofOutputs[1].Value) > 0 { return extendErr("host valid proof output was decreased: ", errLowHostValidOutput) } toHost := revision.NewValidProofOutputs[1].Value.Sub(oldFCR.NewValidProofOutputs[1].Value) // Verify that enough money was transferred. if !toHost.Equals(fromRenter) { s := fmt.Sprintf("expected exactly %v to be transferred to the host, but %v was transferred: ", fromRenter, toHost) return extendErr(s, errLowHostValidOutput) } // If the renter's valid proof output is larger than the renter's missed // proof output, the renter has incentive to see the host fail. Make sure // that this incentive is not present. if revision.NewValidProofOutputs[0].Value.Cmp(revision.NewMissedProofOutputs[0].Value) > 0 { return extendErr("renter has incentive to see host fail: ", errHighRenterMissedOutput) } // Check that the host is not going to be posting more collateral than is // expected. If the new misesd output is greater than the old one, the host // is actually posting negative collateral, which is fine. if revision.NewMissedProofOutputs[1].Value.Cmp(oldFCR.NewMissedProofOutputs[1].Value) <= 0 { collateral := oldFCR.NewMissedProofOutputs[1].Value.Sub(revision.NewMissedProofOutputs[1].Value) if collateral.Cmp(expectedCollateral) > 0 { s := fmt.Sprintf("host expected to post at most %v collateral, but contract has host posting %v: ", expectedCollateral, collateral) return extendErr(s, errLowHostMissedOutput) } } // Check that the revision count has increased. if revision.NewRevisionNumber <= oldFCR.NewRevisionNumber { return errBadRevisionNumber } // The Merkle root is checked last because it is the most expensive check. log2SectorSize := uint64(0) for 1< settingsCalls { build.Severe("the host's settings calls decremented") } h.mu.Lock() if settingsCalls-prevSettingsCalls >= workingStatusThreshold { h.workingStatus = modules.HostWorkingStatusWorking } // First check is quick, don't set to 'not working' if host has not been // contacted enough times. h.mu.Unlock() for { prevSettingsCalls = atomic.LoadUint64(&h.atomicSettingsCalls) select { case <-h.tg.StopChan(): return case <-time.After(workingStatusFrequency): } settingsCalls = atomic.LoadUint64(&h.atomicSettingsCalls) // sanity check if prevSettingsCalls > settingsCalls { build.Severe("the host's settings calls decremented") continue } h.mu.Lock() if settingsCalls-prevSettingsCalls >= workingStatusThreshold { h.workingStatus = modules.HostWorkingStatusWorking } else { h.workingStatus = modules.HostWorkingStatusNotWorking } h.mu.Unlock() } } // threadedTrackConnectabilityStatus periodically checks if the host is // connectable at its netaddress. func (h *Host) threadedTrackConnectabilityStatus(closeChan chan struct{}) { defer close(closeChan) // Wait breifly before checking the first time. This gives time for any port // forwarding to complete. select { case <-h.tg.StopChan(): return case <-time.After(connectabilityCheckFirstWait): } for { h.mu.RLock() autoAddr := h.autoAddress userAddr := h.settings.NetAddress h.mu.RUnlock() activeAddr := autoAddr if userAddr != "" { activeAddr = userAddr } dialer := &net.Dialer{ Cancel: h.tg.StopChan(), Timeout: connectabilityCheckTimeout, } conn, err := dialer.Dial("tcp", string(activeAddr)) var status modules.HostConnectabilityStatus if err != nil { status = modules.HostConnectabilityStatusNotConnectable } else { conn.Close() status = modules.HostConnectabilityStatusConnectable } h.mu.Lock() h.connectabilityStatus = status h.mu.Unlock() select { case <-h.tg.StopChan(): return case <-time.After(connectabilityCheckFrequency): } } } // initNetworking performs actions like port forwarding, and gets the // host established on the network. func (h *Host) initNetworking(address string) (err error) { // Create the listener and setup the close procedures. h.listener, err = h.dependencies.listen("tcp", address) if err != nil { return err } // Automatically close the listener when h.tg.Stop() is called. threadedListenerClosedChan := make(chan struct{}) h.tg.OnStop(func() { err := h.listener.Close() if err != nil { h.log.Println("WARN: closing the listener failed:", err) } // Wait until the threadedListener has returned to continue shutdown. <-threadedListenerClosedChan }) // Set the initial working state of the host h.workingStatus = modules.HostWorkingStatusChecking // Set the initial connectability state of the host h.connectabilityStatus = modules.HostConnectabilityStatusChecking // Set the port. _, port, err := net.SplitHostPort(h.listener.Addr().String()) if err != nil { return err } h.port = port if build.Release == "testing" { // Set the autoAddress to localhost for testing builds only. h.autoAddress = modules.NetAddress(net.JoinHostPort("localhost", h.port)) } // Non-blocking, perform port forwarding and create the hostname discovery // thread. go func() { // Add this function to the threadgroup, so that the logger will not // disappear before port closing can be registered to the threadgrourp // OnStop functions. err := h.tg.Add() if err != nil { // If this goroutine is not run before shutdown starts, this // codeblock is reachable. return } defer h.tg.Done() err = h.managedForwardPort(port) if err != nil { h.log.Println("ERROR: failed to forward port:", err) } else { // Clear the port that was forwarded at startup. h.tg.OnStop(func() { err := h.managedClearPort() if err != nil { h.log.Println("ERROR: failed to clear port:", err) } }) } threadedUpdateHostnameClosedChan := make(chan struct{}) go h.threadedUpdateHostname(threadedUpdateHostnameClosedChan) h.tg.OnStop(func() { <-threadedUpdateHostnameClosedChan }) threadedTrackWorkingStatusClosedChan := make(chan struct{}) go h.threadedTrackWorkingStatus(threadedTrackWorkingStatusClosedChan) h.tg.OnStop(func() { <-threadedTrackWorkingStatusClosedChan }) threadedTrackConnectabilityStatusClosedChan := make(chan struct{}) go h.threadedTrackConnectabilityStatus(threadedTrackConnectabilityStatusClosedChan) h.tg.OnStop(func() { <-threadedTrackConnectabilityStatusClosedChan }) }() // Launch the listener. go h.threadedListen(threadedListenerClosedChan) return nil } // threadedHandleConn handles an incoming connection to the host, typically an // RPC. func (h *Host) threadedHandleConn(conn net.Conn) { err := h.tg.Add() if err != nil { return } defer h.tg.Done() // Close the conn on host.Close or when the method terminates, whichever comes // first. connCloseChan := make(chan struct{}) defer close(connCloseChan) go func() { select { case <-h.tg.StopChan(): case <-connCloseChan: } conn.Close() }() // Set an initial duration that is generous, but finite. RPCs can extend // this if desired. err = conn.SetDeadline(time.Now().Add(5 * time.Minute)) if err != nil { h.log.Println("WARN: could not set deadline on connection:", err) return } // Read a specifier indicating which action is being called. var id types.Specifier if err := encoding.ReadObject(conn, &id, 16); err != nil { atomic.AddUint64(&h.atomicUnrecognizedCalls, 1) h.log.Debugf("WARN: incoming conn %v was malformed: %v", conn.RemoteAddr(), err) return } switch id { case modules.RPCDownload: atomic.AddUint64(&h.atomicDownloadCalls, 1) err = extendErr("incoming RPCDownload failed: ", h.managedRPCDownload(conn)) case modules.RPCRenewContract: atomic.AddUint64(&h.atomicRenewCalls, 1) err = extendErr("incoming RPCRenewContract failed: ", h.managedRPCRenewContract(conn)) case modules.RPCFormContract: atomic.AddUint64(&h.atomicFormContractCalls, 1) err = extendErr("incoming RPCFormContract failed: ", h.managedRPCFormContract(conn)) case modules.RPCReviseContract: atomic.AddUint64(&h.atomicReviseCalls, 1) err = extendErr("incoming RPCReviseContract failed: ", h.managedRPCReviseContract(conn)) case modules.RPCRecentRevision: atomic.AddUint64(&h.atomicRecentRevisionCalls, 1) var so storageObligation _, so, err = h.managedRPCRecentRevision(conn) err = extendErr("incoming RPCRecentRevision failed: ", err) if err != nil { // The unlock can be called immediately, as no action is taken with // the storage obligation that gets returned. h.managedUnlockStorageObligation(so.id()) } case modules.RPCSettings: atomic.AddUint64(&h.atomicSettingsCalls, 1) err = extendErr("incoming RPCSettings failed: ", h.managedRPCSettings(conn)) case rpcSettingsDeprecated: h.log.Debugln("Received deprecated settings call") default: h.log.Debugf("WARN: incoming conn %v requested unknown RPC \"%v\"", conn.RemoteAddr(), id) atomic.AddUint64(&h.atomicUnrecognizedCalls, 1) } if err != nil { atomic.AddUint64(&h.atomicErroredCalls, 1) err = extendErr("error with "+conn.RemoteAddr().String()+": ", err) h.managedLogError(err) } } // listen listens for incoming RPCs and spawns an appropriate handler for each. func (h *Host) threadedListen(closeChan chan struct{}) { defer close(closeChan) // Receive connections until an error is returned by the listener. When an // error is returned, there will be no more calls to receive. for { // Block until there is a connection to handle. conn, err := h.listener.Accept() if err != nil { return } go h.threadedHandleConn(conn) // Soft-sleep to ratelimit the number of incoming connections. select { case <-h.tg.StopChan(): case <-time.After(rpcRatelimit): } } } // NetAddress returns the address at which the host can be reached. func (h *Host) NetAddress() modules.NetAddress { h.mu.RLock() defer h.mu.RUnlock() if h.settings.NetAddress != "" { return h.settings.NetAddress } return h.autoAddress } // NetworkMetrics returns information about the types of rpc calls that have // been made to the host. func (h *Host) NetworkMetrics() modules.HostNetworkMetrics { h.mu.RLock() defer h.mu.RUnlock() return modules.HostNetworkMetrics{ DownloadCalls: atomic.LoadUint64(&h.atomicDownloadCalls), ErrorCalls: atomic.LoadUint64(&h.atomicErroredCalls), FormContractCalls: atomic.LoadUint64(&h.atomicFormContractCalls), RenewCalls: atomic.LoadUint64(&h.atomicRenewCalls), ReviseCalls: atomic.LoadUint64(&h.atomicReviseCalls), SettingsCalls: atomic.LoadUint64(&h.atomicSettingsCalls), UnrecognizedCalls: atomic.LoadUint64(&h.atomicUnrecognizedCalls), } } Sia-1.3.0/modules/host/network_test.go000066400000000000000000000104601313565667000177500ustar00rootroot00000000000000package host import ( "sync/atomic" "testing" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" ) // blockingPortForward is a dependency set that causes the host port forward // call at startup to block for 10 seconds, simulating the amount of blocking // that can occur in production. // // blockingPortForward will also cause managedClearPort to always return an // error. type blockingPortForward struct { productionDependencies } // disrupt will cause the port forward call to block for 10 seconds, but still // complete normally. disrupt will also cause managedClearPort to return an // error. func (blockingPortForward) disrupt(s string) bool { // Return an error when clearing the port. if s == "managedClearPort return error" { return true } // Block during port forwarding. if s == "managedForwardPort" { time.Sleep(time.Second * 3) } return false } // TestPortFowardBlocking checks that the host does not accidentally call a // write on a closed logger due to a long-running port forward call. func TestPortForwardBlocking(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() ht, err := newMockHostTester(blockingPortForward{}, "TestPortForwardBlocking") if err != nil { t.Fatal(err) } // The close operation would previously fail here because of improper // thread control regarding upnp and shutdown. err = ht.Close() if err != nil { t.Fatal(err) } // The trailing sleep is needed to catch the previously existing error // where the host was not shutting down correctly. Currently, the extra // sleep does nothing, but in the regression a logging panic would occur. time.Sleep(time.Second * 4) } // TestHostWorkingStatus checks that the host properly updates its working // state func TestHostWorkingStatus(t *testing.T) { if testing.Short() || !build.VLONG { t.SkipNow() } t.Parallel() ht, err := newHostTester(t.Name()) if err != nil { t.Fatal(err) } defer ht.Close() // TODO: this causes an ndf, because it relies on the host tester starting up // and fully returning faster than the first check, which isnt always the // case. This check is disabled for now, but can be fixed by using the // disrupt() pattern. // if ht.host.WorkingStatus() != modules.HostWorkingStatusChecking { // t.Fatal("expected working state to initially be modules.HostWorkingStatusChecking") // } for i := 0; i < 5; i++ { // Simulate some setting calls, and see if the host picks up on it. atomic.AddUint64(&ht.host.atomicSettingsCalls, workingStatusThreshold+1) success := false for start := time.Now(); time.Since(start) < 30*time.Second; time.Sleep(time.Millisecond * 10) { if ht.host.WorkingStatus() == modules.HostWorkingStatusWorking { success = true break } } if !success { t.Fatal("expected working state to flip to HostWorkingStatusWorking after incrementing settings calls") } // make no settins calls, host should flip back to NotWorking success = false for start := time.Now(); time.Since(start) < 30*time.Second; time.Sleep(time.Millisecond * 10) { if ht.host.WorkingStatus() == modules.HostWorkingStatusNotWorking { success = true break } } if !success { t.Fatal("expected working state to flip to HostStatusNotWorking if no settings calls occur") } } } // TestHostConnectabilityStatus checks that the host properly updates its connectable // state func TestHostConnectabilityStatus(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() ht, err := newHostTester(t.Name()) if err != nil { t.Fatal(err) } defer ht.Close() // TODO: this causes an ndf, because it relies on the host tester starting up // and fully returning faster than the first check, which isnt always the // case. This check is disabled for now, but can be fixed by using the // disrupt() pattern. // if ht.host.ConnectabilityStatus() != modules.HostConnectabilityStatusChecking { // t.Fatal("expected connectability state to initially be ConnectablityStateChecking") // } success := false for start := time.Now(); time.Since(start) < 30*time.Second; time.Sleep(time.Millisecond * 10) { if ht.host.ConnectabilityStatus() == modules.HostConnectabilityStatusConnectable { success = true break } } if !success { t.Fatal("expected connectability state to flip to HostConnectabilityStatusConnectable") } } Sia-1.3.0/modules/host/persist.go000066400000000000000000000140311313565667000167070ustar00rootroot00000000000000package host import ( "encoding/json" "os" "path/filepath" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/persist" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/bolt" ) // persistence is the data that is kept when the host is restarted. type persistence struct { // Consensus Tracking. BlockHeight types.BlockHeight `json:"blockheight"` RecentChange modules.ConsensusChangeID `json:"recentchange"` // Host Identity. Announced bool `json:"announced"` AutoAddress modules.NetAddress `json:"autoaddress"` FinancialMetrics modules.HostFinancialMetrics `json:"financialmetrics"` PublicKey types.SiaPublicKey `json:"publickey"` RevisionNumber uint64 `json:"revisionnumber"` SecretKey crypto.SecretKey `json:"secretkey"` Settings modules.HostInternalSettings `json:"settings"` UnlockHash types.UnlockHash `json:"unlockhash"` } // persistData returns the data in the Host that will be saved to disk. func (h *Host) persistData() persistence { return persistence{ // Consensus Tracking. BlockHeight: h.blockHeight, RecentChange: h.recentChange, // Host Identity. Announced: h.announced, AutoAddress: h.autoAddress, FinancialMetrics: h.financialMetrics, PublicKey: h.publicKey, RevisionNumber: h.revisionNumber, SecretKey: h.secretKey, Settings: h.settings, UnlockHash: h.unlockHash, } } // establishDefaults configures the default settings for the host, overwriting // any existing settings. func (h *Host) establishDefaults() error { // Configure the settings object. h.settings = modules.HostInternalSettings{ MaxDownloadBatchSize: uint64(defaultMaxDownloadBatchSize), MaxDuration: defaultMaxDuration, MaxReviseBatchSize: uint64(defaultMaxReviseBatchSize), WindowSize: defaultWindowSize, Collateral: defaultCollateral, CollateralBudget: defaultCollateralBudget, MaxCollateral: defaultMaxCollateral, MinStoragePrice: defaultStoragePrice, MinContractPrice: defaultContractPrice, MinDownloadBandwidthPrice: defaultDownloadBandwidthPrice, MinUploadBandwidthPrice: defaultUploadBandwidthPrice, } // Generate signing key, for revising contracts. sk, pk := crypto.GenerateKeyPair() h.secretKey = sk h.publicKey = types.Ed25519PublicKey(pk) // Subscribe to the consensus set. err := h.initConsensusSubscription() if err != nil { return err } return nil } // loadPersistObject will take a persist object and copy the data into the // host. func (h *Host) loadPersistObject(p *persistence) { // Copy over consensus tracking. h.blockHeight = p.BlockHeight h.recentChange = p.RecentChange // Copy over host identity. h.announced = p.Announced h.autoAddress = p.AutoAddress if err := p.AutoAddress.IsValid(); err != nil { h.log.Printf("WARN: AutoAddress '%v' loaded from persist is invalid: %v", p.AutoAddress, err) h.autoAddress = "" } h.financialMetrics = p.FinancialMetrics h.publicKey = p.PublicKey h.revisionNumber = p.RevisionNumber h.secretKey = p.SecretKey h.settings = p.Settings if err := p.Settings.NetAddress.IsValid(); err != nil { h.log.Printf("WARN: NetAddress '%v' loaded from persist is invalid: %v", p.Settings.NetAddress, err) h.settings.NetAddress = "" } h.unlockHash = p.UnlockHash } // initDB will check that the database has been initialized and if not, will // initialize the database. func (h *Host) initDB() (err error) { // Open the host's database and set up the stop function to close it. h.db, err = h.dependencies.openDatabase(dbMetadata, filepath.Join(h.persistDir, dbFilename)) if err != nil { return err } h.tg.AfterStop(func() { err = h.db.Close() if err != nil { h.log.Println("Could not close the database:", err) } }) return h.db.Update(func(tx *bolt.Tx) error { // The storage obligation bucket does not exist, which means the // database needs to be initialized. Create the database buckets. buckets := [][]byte{ bucketActionItems, bucketStorageObligations, } for _, bucket := range buckets { _, err := tx.CreateBucketIfNotExists(bucket) if err != nil { return err } } return nil }) } // load loads the Hosts's persistent data from disk. func (h *Host) load() error { // Initialize the host database. err := h.initDB() if err != nil { err = build.ExtendErr("Could not initialize database:", err) h.log.Println(err) return err } // Load the old persistence object from disk. Simple task if the version is // the most recent version, but older versions need to be updated to the // more recent structures. p := new(persistence) err = h.dependencies.loadFile(persistMetadata, p, filepath.Join(h.persistDir, settingsFile)) if err == nil { // Copy in the persistence. h.loadPersistObject(p) } else if os.IsNotExist(err) { // There is no host.json file, set up sane defaults. return h.establishDefaults() } else if err == persist.ErrBadVersion { // Attempt an upgrade from V112 to V120. err = h.upgradeFromV112ToV120() if err != nil { return err } } else if err != nil { return err } // Get the contract count by observing all of the incomplete storage // obligations in the database. h.financialMetrics.ContractCount = 0 err = h.db.View(func(tx *bolt.Tx) error { cursor := tx.Bucket(bucketStorageObligations).Cursor() for k, v := cursor.First(); k != nil; k, v = cursor.Next() { var so storageObligation err := json.Unmarshal(v, &so) if err != nil { return err } if so.ObligationStatus == obligationUnresolved { h.financialMetrics.ContractCount++ } } return nil }) if err != nil { return err } return h.initConsensusSubscription() } // saveSync stores all of the persist data to disk and then syncs to disk. func (h *Host) saveSync() error { return persist.SaveJSON(persistMetadata, h.persistData(), filepath.Join(h.persistDir, settingsFile)) } Sia-1.3.0/modules/host/persist_compat_1.0.0_test.go000066400000000000000000000041721313565667000220320ustar00rootroot00000000000000package host import ( "path/filepath" "testing" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) // TestHostPersistCompat100 tests breaking changes in the host persist struct // resulting from spelling errors. The test occurs by loading // hostpersist_compat_1.0.0.json, a v0.6.0 host persistence file that has been // pulled from the wild and adapted to have all non-zero values in its fields // for the purposes of testing. func TestHostPersistCompat100(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() ht, err := newHostTester("TestHostPersistCompat100") if err != nil { t.Fatal(err) } defer ht.Close() // Close the host and then swap out the persist file for the one that is // being used for testing. ht.host.Close() source := filepath.Join("testdata", "v100Host.tar.gz") err = build.ExtractTarGz(source, filepath.Join(ht.host.persistDir)) if err != nil { t.Log(filepath.Abs(source)) t.Fatal(err) } h, err := New(ht.cs, ht.tpool, ht.wallet, "localhost:0", filepath.Join(ht.persistDir, modules.HostDir)) if err != nil { t.Fatal(err) } // Check that, after loading the compatibility file, all of the values are // still correct. The file that was transplanted had no zero-value fields. ht.host.mu.Lock() if h.financialMetrics.PotentialStorageRevenue.IsZero() { t.Error("potential storage revenue not loaded correctly") } if h.settings.MinContractPrice.IsZero() { t.Error("min contract price not loaded correctly") } if h.settings.MinDownloadBandwidthPrice.IsZero() { t.Error("min download bandwidth price not loaded correctly") } if h.settings.MinStoragePrice.IsZero() { t.Error("min storage price not loaded correctly") } if h.settings.MinUploadBandwidthPrice.IsZero() { t.Error("min upload bandwidth price not loaded correctly") } if h.revisionNumber == 0 { t.Error("revision number loaded incorrectly") } if h.unlockHash == (types.UnlockHash{}) { t.Error("unlock hash loaded incorrectly") } ht.host.mu.Unlock() // Set ht.host to 'h' so that the 'ht.Close()' method will close everything // cleanly. ht.host = h } Sia-1.3.0/modules/host/persist_compat_1.2.0.go000066400000000000000000000401621313565667000207740ustar00rootroot00000000000000package host import ( "encoding/hex" "encoding/json" "errors" "io/ioutil" "os" "path/filepath" "sync" "github.com/NebulousLabs/bolt" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/persist" "github.com/NebulousLabs/Sia/types" ) const ( // contractManagerStorageFolderGranularity is a mirror of the storage // folder granularity constant in the contract manager. The two values need // to remain equal, however it is unlikely that it will ever change from // 64. contractManagerStorageFolderGranularity = 64 // The directory names and filenames of legacy storage manager files. v112StorageManagerDir = "storagemanager" v112StorageManagerDBFilename = "storagemanager.db" v112StorageManagerPersistFilename = "storagemanager.json" ) var ( // minimumStorageFolderSize specifies the minimum storage folder size // accepted by the new contract manager. // // NOTE: This number needs to be kept in sync with the actual minimum // storage folder size of the contract manager, but it is unlikely that // synchronization would be lost. minimumStorageFolderSize = contractManagerStorageFolderGranularity * modules.SectorSize // v112PersistMetadata is the header of the v112 host persist file. v112PersistMetadata = persist.Metadata{ Header: "Sia Host", Version: "0.5", } // v112StorageManagerBucketSectorUsage is the name of the bucket that // contains all of the sector usage information in the v1.0.0 storage // manager. v112StorageManagerBucketSectorUsage = []byte("BucketSectorUsage") // v112StorageManagerDBMetadata contains the legacy metadata for the v1.0.0 // storage manager database. The version is v0.6.0, as that is the last // time that compatibility was broken with the storage manager persist. v112StorageManagerDBMetadata = persist.Metadata{ Header: "Sia Storage Manager DB", Version: "0.6.0", } // v112StorageManagerMetadata contains the legacy metadata for the v1.0.0 // storage manager persistence. The version is v0.6.0, as that is the last time // that compatibility was broken with the storage manager persist. v112StorageManagerMetadata = persist.Metadata{ Header: "Sia Storage Manager", Version: "0.6.0", } ) type ( // v112StorageManagerPersist contains the legacy fields necessary to load the // v1.0.0 storage manager persistence. v112StorageManagerPersist struct { SectorSalt crypto.Hash StorageFolders []*v112StorageManagerStorageFolder } // v112StorageManagerSector defines a sector held by the v1.0.0 storage // manager, which includes the data itself as well as all of the associated // metadata. v112StorageManagerSector struct { Count int Data []byte Key []byte Root crypto.Hash } // v112StorageManagerSectorUsage defines the sectorUsage struct for the // v1.0.0 storage manager, the data loaded from the sector database. v112StorageManagerSectorUsage struct { Corrupted bool Expiry []types.BlockHeight StorageFolder []byte } // v112StorageManagerStorageFolder contains the legacy fields necessary to load // the v1.0.0 storage manager persistence. v112StorageManagerStorageFolder struct { Path string Size uint64 SizeRemaining uint64 UID []byte } ) // loadCompatV100 loads fields that have changed names or otherwise broken // compatibility with previous versions, enabling users to upgrade without // unexpected loss of data. // // COMPAT v1.0.0 // // A spelling error in pre-1.0 versions means that, if this is the first time // running after an upgrade, the misspelled field needs to be transferred over. func (h *Host) loadCompatV100(p *persistence) error { var compatPersistence struct { FinancialMetrics struct { PotentialStorageRevenue types.Currency `json:"potentialerevenue"` } Settings struct { MinContractPrice types.Currency `json:"contractprice"` MinDownloadBandwidthPrice types.Currency `json:"minimumdownloadbandwidthprice"` MinStoragePrice types.Currency `json:"storageprice"` MinUploadBandwidthPrice types.Currency `json:"minimumuploadbandwidthprice"` } } err := h.dependencies.loadFile(v112PersistMetadata, &compatPersistence, filepath.Join(h.persistDir, settingsFile)) if err != nil { return err } // Load the compat values, but only if the compat values are non-zero and // the real values are zero. if !compatPersistence.FinancialMetrics.PotentialStorageRevenue.IsZero() && p.FinancialMetrics.PotentialStorageRevenue.IsZero() { h.financialMetrics.PotentialStorageRevenue = compatPersistence.FinancialMetrics.PotentialStorageRevenue } if !compatPersistence.Settings.MinContractPrice.IsZero() && p.Settings.MinContractPrice.IsZero() { h.settings.MinContractPrice = compatPersistence.Settings.MinContractPrice } if !compatPersistence.Settings.MinDownloadBandwidthPrice.IsZero() && p.Settings.MinDownloadBandwidthPrice.IsZero() { h.settings.MinDownloadBandwidthPrice = compatPersistence.Settings.MinDownloadBandwidthPrice } if !compatPersistence.Settings.MinStoragePrice.IsZero() && p.Settings.MinStoragePrice.IsZero() { h.settings.MinStoragePrice = compatPersistence.Settings.MinStoragePrice } if !compatPersistence.Settings.MinUploadBandwidthPrice.IsZero() && p.Settings.MinUploadBandwidthPrice.IsZero() { h.settings.MinUploadBandwidthPrice = compatPersistence.Settings.MinUploadBandwidthPrice } return nil } // readAndDeleteV112Sectors reads some sectors from the v1.0.0 storage // manager, deleting them from disk and returning. This clears up disk space // for the new contract manager, though puts the data at risk of loss in the // event of a power interruption. Risk window is small, amount of data at risk // is small, so this is acceptable. func (h *Host) readAndDeleteV112Sectors(oldPersist *v112StorageManagerPersist, oldDB *persist.BoltDatabase, numToFetch int) (sectors []v112StorageManagerSector, err error) { err = oldDB.Update(func(tx *bolt.Tx) error { // Read at most contractManagerStorageFolderGranularity sectors per // storage folder. sectorsPerStorageFolder := make(map[string]int) bucket := tx.Bucket(v112StorageManagerBucketSectorUsage) i := 0 c := bucket.Cursor() for sectorKey, sectorUsageBytes := c.First(); sectorUsageBytes != nil && i < numToFetch; sectorKey, sectorUsageBytes = c.Next() { var usage v112StorageManagerSectorUsage err := json.Unmarshal(sectorUsageBytes, &usage) if err != nil { continue } // Don't read more than contractManagerStorageFolderGranularity // sectors per storage folder. readSoFar := sectorsPerStorageFolder[string(usage.StorageFolder)] if readSoFar >= contractManagerStorageFolderGranularity { continue } sectorsPerStorageFolder[string(usage.StorageFolder)]++ // Read the sector from disk. sectorFilename := filepath.Join(h.persistDir, v112StorageManagerDir, hex.EncodeToString(usage.StorageFolder), string(sectorKey)) sectorData, err := ioutil.ReadFile(sectorFilename) if err != nil { h.log.Println("Unable to read a sector from the legacy storage manager during host upgrade:", err) } // Delete the sector from disk. err = os.Remove(sectorFilename) if err != nil { h.log.Println("unable to remove sector from the legacy storage manager, be sure to remove manually:", err) } sector := v112StorageManagerSector{ Count: len(usage.Expiry), Data: sectorData, Key: sectorKey, Root: crypto.MerkleRoot(sectorData), } sectors = append(sectors, sector) i++ } // Delete the usage data from the storage manager db for each of the // sectors. for _, sector := range sectors { err := bucket.Delete(sector.Key) if err != nil { h.log.Println("Unable to delete a sector from the bucket, the sector could not be found:", err) } } return nil }) return sectors, err } // upgradeFromV112toV120 is an upgrade layer that migrates the host from // the old storage manager to the new contract manager. This particular upgrade // only handles migrating the sectors. func (h *Host) upgradeFromV112ToV120() error { h.log.Println("Attempting an upgrade for the host from v1.0.0 to v1.2.0") // Sanity check - the upgrade will not work if the contract manager has not // been loaded yet. if h.StorageManager == nil { return errors.New("cannot perform host upgrade - the contract manager must not be nil") } // Fetch the old set of storage folders, and create analogous storage // folders in the contract manager. But create them to have sizes of zero, // and grow them 112 sectors at a time. This is to make sure the user does // not run out of disk space during the upgrade. oldPersist := new(v112StorageManagerPersist) err := persist.LoadJSON(v112StorageManagerMetadata, oldPersist, filepath.Join(h.persistDir, v112StorageManagerDir, v112StorageManagerPersistFilename)) if err != nil { return build.ExtendErr("unable to load the legacy storage manager persist", err) } // Open the old storagemanager database. oldDB, err := persist.OpenDatabase(v112StorageManagerDBMetadata, filepath.Join(h.persistDir, v112StorageManagerDir, v112StorageManagerDBFilename)) if err != nil { return build.ExtendErr("unable to open the legacy storage manager database", err) } // Create a map from old storage folders to their capacity. smFolderCapacities := make(map[string]uint64) for _, smFolder := range oldPersist.StorageFolders { smFolderCapacities[smFolder.Path] = smFolder.Size } // Fetch the set of storage folders already in the current contract // manager. When replacing existing storage folders in the storage manager, // duplicates will be avoided. Duplicates would otherwise be likely in the // event of a power outage during the upgrade. currentPaths := make(map[string]struct{}) currentStorageFolders := h.StorageFolders() for _, sf := range currentStorageFolders { currentPaths[sf.Path] = struct{}{} } // Count the number of storage folders that need to be created in the // contract manager. var newFoldersNeeded int for _, sf := range oldPersist.StorageFolders { _, exists := currentPaths[sf.Path] if !exists { newFoldersNeeded++ } } // Pre-emptively read some sectors from the storage manager. This will // clear up space on disk to make room for the contract manager folders. // // NOTE: The sectorData returned for the sectors may be 'nil' if there // were disk I/O errors. sectors, err := h.readAndDeleteV112Sectors(oldPersist, oldDB, contractManagerStorageFolderGranularity*newFoldersNeeded) if err != nil { h.log.Println("Error reading sectors from legacy storage manager:", err) } // Iterate through each storage folder and create analogous storage folders // in the new contract manager. These storage folders may already exist in // the new contract manager. for _, sf := range oldPersist.StorageFolders { // Nothing to do if the contract manager already has this storage // folder (unusually situation though). _, exists := currentPaths[sf.Path] if exists { continue } // Create a storage folder in the contract manager for the // corresponding storage folder in the storage manager. err := h.AddStorageFolder(sf.Path, minimumStorageFolderSize) if err != nil { h.log.Println("Unable to create a storage folder in the contract manager:", err) continue } } // Add all of the preloaded sectors to the contract manager. var wg sync.WaitGroup for _, sector := range sectors { for i := 0; i < sector.Count; i++ { if uint64(len(sector.Data)) == modules.SectorSize { wg.Add(1) go func(sector v112StorageManagerSector) { err := h.AddSector(sector.Root, sector.Data) if err != nil { err = build.ExtendErr("Unable to add legacy sector to the upgraded contract manager:", err) h.log.Println(err) } wg.Done() }(sector) } } } wg.Wait() // Read sectors from the storage manager database until all of the sectors // have been read. for { // Determine whether any of the storage folders need to be grown. var canGrow int cmFolders := h.StorageFolders() for _, cmFolder := range cmFolders { finalCapacity := smFolderCapacities[cmFolder.Path] if cmFolder.Capacity < finalCapacity-(modules.SectorSize*contractManagerStorageFolderGranularity) { canGrow++ } } // Read some sectors from the storage manager. // // NOTE: The sectorData returned for the sectors may be 'nil' if there // were disk I/O errors. sectors, err := h.readAndDeleteV112Sectors(oldPersist, oldDB, contractManagerStorageFolderGranularity*canGrow) if err != nil { h.log.Println("Error reading sectors from legacy storage manager:", err) continue } // Break condition - if no sectors were read, the migration is // complete. if len(sectors) == 0 { break } // Grow the storage folders that are able to be grown. for _, cmFolder := range cmFolders { finalCapacity := smFolderCapacities[cmFolder.Path] if cmFolder.Capacity < finalCapacity-(modules.SectorSize*contractManagerStorageFolderGranularity) { err := h.ResizeStorageFolder(cmFolder.Index, cmFolder.Capacity+(modules.SectorSize*contractManagerStorageFolderGranularity), false) if err != nil { err = build.ExtendErr("unable to resize storage folder during host upgrade:", err) h.log.Println(err) continue } } } // Add the sectors to the contract manager. var wg sync.WaitGroup for _, sector := range sectors { for i := 0; i < sector.Count; i++ { if uint64(len(sector.Data)) == modules.SectorSize { wg.Add(1) go func(sector v112StorageManagerSector) { err := h.AddSector(sector.Root, sector.Data) if err != nil { err = build.ExtendErr("Unable to add legacy sector to the upgraded contract manager:", err) h.log.Println(err) } wg.Done() }(sector) } } } wg.Wait() } // Save the desired storage folder sizes before closing out the old persist. cmFolders := h.StorageFolders() // Clean up up the old storage manager before growing the storage folders. // An interruption during the growing phase should result in storage folders // that are whatever size they were left off at. err = oldDB.Close() if err != nil { h.log.Println("Unable to close old database during v1.2.0 compat upgrade", err) } // Try loading the persist again. p := new(persistence) err = h.dependencies.loadFile(v112PersistMetadata, p, filepath.Join(h.persistDir, settingsFile)) if err != nil { return build.ExtendErr("upgrade appears complete, but having difficulties reloading host after upgrade", err) } h.loadPersistObject(p) // Apply the v100 compat upgrade in case the host is loading from a // version between v1.0.0 and v1.1.2. err = h.loadCompatV100(p) if err != nil { return build.ExtendErr("upgrade appears complete, but having trouble reloading:", err) } // Save the updated persist so that the upgrade is not triggered again. err = h.saveSync() if err != nil { return build.ExtendErr("upgrade appears complete, but final save has failed (upgrade likely successful", err) } // Delete the storage manager files. Note that this must happen after the // complete upgrade, including a finishing call to saveSync(). for _, sf := range oldPersist.StorageFolders { err = os.Remove(filepath.Join(h.persistDir, v112StorageManagerDir, hex.EncodeToString(sf.UID))) if err != nil { h.log.Println("Unable to remove legacy contract manager files:", err) } } err = os.Remove(filepath.Join(h.persistDir, v112StorageManagerDir, v112StorageManagerPersistFilename)) if err != nil { h.log.Println("Unable to remove legacy persist files:", err) } oldDB.Close() err = os.Remove(filepath.Join(h.persistDir, v112StorageManagerDir, v112StorageManagerDBFilename)) if err != nil { h.log.Println("Unable to close legacy database:", err) } // Resize any remaining folders to their full size. for _, cmFolder := range cmFolders { finalCapacity := smFolderCapacities[cmFolder.Path] finalCapacity -= finalCapacity % (modules.SectorSize * contractManagerStorageFolderGranularity) if cmFolder.Capacity < finalCapacity { err := h.ResizeStorageFolder(cmFolder.Index, finalCapacity, false) if err != nil { err = build.ExtendErr("unable to resize storage folder during host upgrade", err) h.log.Println(err) continue } } } return nil } Sia-1.3.0/modules/host/persist_compat_1.2.0_test.go000066400000000000000000000103771313565667000220400ustar00rootroot00000000000000package host import ( "os" "path/filepath" "testing" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/modules/consensus" "github.com/NebulousLabs/Sia/modules/gateway" "github.com/NebulousLabs/Sia/modules/transactionpool" "github.com/NebulousLabs/Sia/modules/wallet" "github.com/NebulousLabs/Sia/persist" ) const ( // v112StorageManagerOne names the first legacy storage manager that can be // used to test upgrades. v112Host = "v112Host.tar.gz" ) // loadExistingHostWithNewDeps will create all of the dependencies for a host, then load // the host on top of the given directory. func loadExistingHostWithNewDeps(modulesDir, hostDir string) (modules.Host, error) { testdir := build.TempDir(modules.HostDir, modulesDir) // Create the host dependencies. g, err := gateway.New("localhost:0", false, filepath.Join(testdir, modules.GatewayDir)) if err != nil { return nil, err } cs, err := consensus.New(g, false, filepath.Join(testdir, modules.ConsensusDir)) if err != nil { return nil, err } tp, err := transactionpool.New(cs, g, filepath.Join(testdir, modules.TransactionPoolDir)) if err != nil { return nil, err } w, err := wallet.New(cs, tp, filepath.Join(testdir, modules.WalletDir)) if err != nil { return nil, err } // Create the host. h, err := newHost(productionDependencies{}, cs, tp, w, "localhost:0", hostDir) if err != nil { return nil, err } return h, nil } // TestV112StorageManagerUpgrade creates a host with a legacy storage manager, // and then attempts to upgrade the storage manager. func TestV112StorageManagerUpgrade(t *testing.T) { if testing.Short() { t.SkipNow() } // Copy the testdir legacy storage manager to the temp directory. source := filepath.Join("testdata", v112Host) legacyHost := build.TempDir(modules.HostDir, t.Name(), modules.HostDir) err := build.ExtractTarGz(source, legacyHost) if err != nil { t.Fatal(err) } // Patch the storagemanager.json to point to the new storage folder // location. smPersist := new(v112StorageManagerPersist) err = persist.LoadJSON(v112StorageManagerMetadata, smPersist, filepath.Join(legacyHost, v112StorageManagerDir, v112StorageManagerPersistFilename)) if err != nil { t.Fatal(err) } smPersist.StorageFolders[0].Path = filepath.Join(legacyHost, "storageFolderOne") smPersist.StorageFolders[1].Path = filepath.Join(legacyHost, "storageFolderTwo") err = persist.SaveJSON(v112StorageManagerMetadata, smPersist, filepath.Join(legacyHost, v112StorageManagerDir, v112StorageManagerPersistFilename)) if err != nil { t.Fatal(err) } oldCapacity := smPersist.StorageFolders[0].Size + smPersist.StorageFolders[1].Size oldCapacityRemaining := smPersist.StorageFolders[0].SizeRemaining + smPersist.StorageFolders[1].SizeRemaining oldUsed := oldCapacity - oldCapacityRemaining // Create the symlink to point to the storage folder. err = os.Symlink(filepath.Join(legacyHost, "storageFolderOne"), filepath.Join(legacyHost, v112StorageManagerDir, "66")) if err != nil { t.Fatal(err) } err = os.Symlink(filepath.Join(legacyHost, "storageFolderTwo"), filepath.Join(legacyHost, v112StorageManagerDir, "04")) if err != nil { t.Fatal(err) } // Patching complete. Proceed to create the host and verify that the // upgrade went smoothly. host, err := loadExistingHostWithNewDeps(filepath.Join(t.Name(), "newDeps"), legacyHost) if err != nil { t.Fatal(err) } storageFolders := host.StorageFolders() if len(storageFolders) != 2 { t.Fatal("Storage manager upgrade was unsuccessful.") } // The amount of data reported should match the previous amount of data // that was stored. capacity := storageFolders[0].Capacity + storageFolders[1].Capacity capacityRemaining := storageFolders[0].CapacityRemaining + storageFolders[1].CapacityRemaining capacityUsed := capacity - capacityRemaining if capacity != oldCapacity { t.Error("new storage folders don't have the same size as the old storage folders") } if capacityRemaining != oldCapacityRemaining { t.Error("capacity remaining statistics do not match up", capacityRemaining/modules.SectorSize, oldCapacityRemaining/modules.SectorSize) } if oldUsed != capacityUsed { t.Error("storage folders have different usage values", capacityUsed/modules.SectorSize, oldUsed/modules.SectorSize) } } Sia-1.3.0/modules/host/persist_test.go000066400000000000000000000040421313565667000177470ustar00rootroot00000000000000package host import ( "path/filepath" "testing" "github.com/NebulousLabs/Sia/modules" ) // TestHostContractCountPersistence checks that the host persists its contract // counts correctly func TestHostContractCountPersistence(t *testing.T) { if testing.Short() { t.SkipNow() } ht, err := newHostTester(t.Name()) if err != nil { t.Fatal(err) } // add a storage obligation, which should increment contract count so, err := ht.newTesterStorageObligation() if err != nil { t.Fatal(err) } ht.host.managedLockStorageObligation(so.id()) err = ht.host.managedAddStorageObligation(so) if err != nil { t.Fatal(err) } ht.host.managedUnlockStorageObligation(so.id()) // should have 1 contract now if ht.host.financialMetrics.ContractCount != 1 { t.Fatal("expected one contract, got", ht.host.financialMetrics.ContractCount) } // reload the host err = ht.host.Close() if err != nil { t.Fatal(err) } ht.host, err = New(ht.cs, ht.tpool, ht.wallet, "localhost:0", filepath.Join(ht.persistDir, modules.HostDir)) if err != nil { t.Fatal(err) } // contract count should still be 1 if ht.host.financialMetrics.ContractCount != 1 { t.Fatal("expected one contract, got", ht.host.financialMetrics.ContractCount) } } // TestHostAddressPersistence checks that the host persists any updates to the // address upon restart. func TestHostAddressPersistence(t *testing.T) { if testing.Short() { t.SkipNow() } ht, err := newHostTester(t.Name()) if err != nil { t.Fatal(err) } // Set the address of the host. settings := ht.host.InternalSettings() settings.NetAddress = "foo.com:234" err = ht.host.SetInternalSettings(settings) if err != nil { t.Fatal(err) } // Reboot the host. err = ht.host.Close() if err != nil { t.Fatal(err) } ht.host, err = New(ht.cs, ht.tpool, ht.wallet, "localhost:0", filepath.Join(ht.persistDir, modules.HostDir)) if err != nil { t.Fatal(err) } // Verify that the address persisted. if ht.host.settings.NetAddress != "foo.com:234" { t.Error("User-set address does not seem to be persisting.") } } Sia-1.3.0/modules/host/storageobligations.go000066400000000000000000001204701313565667000211220ustar00rootroot00000000000000package host // storageobligations.go is responsible for managing the storage obligations // within the host - making sure that any file contracts, transaction // dependencies, file contract revisions, and storage proofs are making it into // the blockchain in a reasonable time. // // NOTE: Currently, the code partially supports changing the storage proof // window in file contract revisions, however the action item code will not // handle it correctly. Until the action item code is improved (to also handle // byzantine situations where the renter submits prior revisions), the host // should not support changing the storage proof window, especially to further // in the future. // TODO: Need to queue the action item for checking on the submission status of // the file contract revision. Also need to make sure that multiple actions are // being taken if needed. // TODO: Make sure that the origin tranasction set is not submitted to the // transaction pool before addSO is called - if it is, there will be a // duplicate transaction error, and then the storage obligation will return an // error, which is bad. Well, or perhas we just need to have better logic // handling. // TODO: Need to make sure that 'revision confirmed' is actually looking only // at the most recent revision (I think it is...) // TODO: Make sure that not too many action items are being created. import ( "encoding/binary" "encoding/json" "errors" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/bolt" ) const ( obligationUnresolved storageObligationStatus = iota // Indicatees that an unitialized value was used. obligationRejected // Indicates that the obligation never got started, no revenue gained or lost. obligationSucceeded // Indicates that the obligation was completed, revenues were gained. obligationFailed // Indicates that the obligation failed, revenues and collateral were lost. ) var ( // errDuplicateStorageObligation is returned when the storage obligation // database already has a storage obligation with the provided file // contract. This error should only happen in the event of a developer // mistake. errDuplicateStorageObligation = errors.New("storage obligation has a file contract which conflicts with an existing storage obligation") // errInsaneFileContractOutputCounts is returned when a file contract has // the wrong number of outputs for either the valid or missed payouts. errInsaneFileContractOutputCounts = errors.New("file contract has incorrect number of outputs for the valid or missed payouts") // errInsaneFileContractRevisionOutputCounts is returned when a file // contract has the wrong number of outputs for either the valid or missed // payouts. errInsaneFileContractRevisionOutputCounts = errors.New("file contract revision has incorrect number of outputs for the valid or missed payouts") // errInsaneOriginSetFileContract is returned is the final transaction of // the origin transaction set of a storage obligation does not have a file // contract in the final transaction - there should be a file contract // associated with every storage obligation. errInsaneOriginSetFileContract = errors.New("origin transaction set of storage obligation should have one file contract in the final transaction") // errInsaneOriginSetSize is returned if the origin transaction set of a // storage obligation is empty - there should be a file contract associated // with every storage obligation. errInsaneOriginSetSize = errors.New("origin transaction set of storage obligation is size zero") // errInsaneRevisionSetRevisionCount is returned if the final transaction // in the revision transaction set of a storage obligation has more or less // than one file contract revision. errInsaneRevisionSetRevisionCount = errors.New("revision transaction set of storage obligation should have one file contract revision in the final transaction") // errInsaneStorageObligationRevision is returned if there is an attempted // storage obligation revision which does not have sensical inputs. errInsaneStorageObligationRevision = errors.New("revision to storage obligation does not make sense") // errInsaneStorageObligationRevisionData is returned if there is an // attempted storage obligation revision which does not have sensical // inputs. errInsaneStorageObligationRevisionData = errors.New("revision to storage obligation has insane data") // errObligationUnlocked is returned when a storage obligation is being // removed from lock, but is already unlocked. errObligationUnlocked = errors.New("storage obligation is unlocked, and should not be getting unlocked") // errNoBuffer is returned if there is an attempted storage obligation that // needs to have the storage proof submitted in less than // revisionSubmissionBuffer blocks. errNoBuffer = errors.New("file contract rejected because storage proof window is too close") // errNoStorageObligation is returned if the requested storage obligation // is not found in the database. errNoStorageObligation = errors.New("storage obligation not found in database") ) type storageObligationStatus uint64 // storageObligation contains all of the metadata related to a file contract // and the storage contained by the file contract. type storageObligation struct { // Storage obligations are broken up into ordered atomic sectors that are // exactly 4MiB each. By saving the roots of each sector, storage proofs // and modifications to the data can be made inexpensively by making use of // the merkletree.CachedTree. Sectors can be appended, modified, or deleted // and the host can recompute the Merkle root of the whole file without // much computational or I/O expense. SectorRoots []crypto.Hash // Variables about the file contract that enforces the storage obligation. // The origin an revision transaction are stored as a set, where the set // contains potentially unconfirmed transactions. ContractCost types.Currency LockedCollateral types.Currency PotentialDownloadRevenue types.Currency PotentialStorageRevenue types.Currency PotentialUploadRevenue types.Currency RiskedCollateral types.Currency TransactionFeesAdded types.Currency // The negotiation height specifies the block height at which the file // contract was negotiated. If the origin transaction set is not accepted // onto the blockchain quickly enough, the contract is pruned from the // host. The origin and revision transaction set contain the contracts + // revisions as well as all parent transactions. The parents are necessary // because after a restart the transaction pool may be emptied out. NegotiationHeight types.BlockHeight OriginTransactionSet []types.Transaction RevisionTransactionSet []types.Transaction // Variables indicating whether the critical transactions in a storage // obligation have been confirmed on the blockchain. OriginConfirmed bool RevisionConstructed bool RevisionConfirmed bool ProofConstructed bool ProofConfirmed bool ObligationStatus storageObligationStatus } // getStorageObligation fetches a storage obligation from the database tx. func getStorageObligation(tx *bolt.Tx, soid types.FileContractID) (so storageObligation, err error) { soBytes := tx.Bucket(bucketStorageObligations).Get(soid[:]) if soBytes == nil { return storageObligation{}, errNoStorageObligation } err = json.Unmarshal(soBytes, &so) if err != nil { return storageObligation{}, err } return so, nil } // putStorageObligation places a storage obligation into the database, // overwriting the existing storage obligation if there is one. func putStorageObligation(tx *bolt.Tx, so storageObligation) error { soBytes, err := json.Marshal(so) if err != nil { return err } soid := so.id() return tx.Bucket(bucketStorageObligations).Put(soid[:], soBytes) } // expiration returns the height at which the storage obligation expires. func (so storageObligation) expiration() types.BlockHeight { if len(so.RevisionTransactionSet) > 0 { return so.RevisionTransactionSet[len(so.RevisionTransactionSet)-1].FileContractRevisions[0].NewWindowStart } return so.OriginTransactionSet[len(so.OriginTransactionSet)-1].FileContracts[0].WindowStart } // fileSize returns the size of the data protected by the obligation. func (so storageObligation) fileSize() uint64 { if len(so.RevisionTransactionSet) > 0 { return so.RevisionTransactionSet[len(so.RevisionTransactionSet)-1].FileContractRevisions[0].NewFileSize } return so.OriginTransactionSet[len(so.OriginTransactionSet)-1].FileContracts[0].FileSize } // id returns the id of the storage obligation, which is definied by the file // contract id of the file contract that governs the storage contract. func (so storageObligation) id() types.FileContractID { return so.OriginTransactionSet[len(so.OriginTransactionSet)-1].FileContractID(0) } // isSane checks that required assumptions about the storage obligation are // correct. func (so storageObligation) isSane() error { // There should be an origin transaction set. if len(so.OriginTransactionSet) == 0 { build.Critical("origin transaction set is empty") return errInsaneOriginSetSize } // The final transaction of the origin transaction set should have one file // contract. final := len(so.OriginTransactionSet) - 1 fcCount := len(so.OriginTransactionSet[final].FileContracts) if fcCount != 1 { build.Critical("wrong number of file contracts associated with storage obligation:", fcCount) return errInsaneOriginSetFileContract } // The file contract in the final transaction of the origin transaction set // should have two valid proof outputs and two missed proof outputs. lenVPOs := len(so.OriginTransactionSet[final].FileContracts[0].ValidProofOutputs) lenMPOs := len(so.OriginTransactionSet[final].FileContracts[0].MissedProofOutputs) if lenVPOs != 2 || lenMPOs != 2 { build.Critical("file contract has wrong number of VPOs and MPOs, expecting 2 each:", lenVPOs, lenMPOs) return errInsaneFileContractOutputCounts } // If there is a revision transaction set, there should be one file // contract revision in the final transaction. if len(so.RevisionTransactionSet) > 0 { final = len(so.OriginTransactionSet) - 1 fcrCount := len(so.OriginTransactionSet[final].FileContractRevisions) if fcrCount != 1 { build.Critical("wrong number of file contract revisions in final transaction of revision transaction set:", fcrCount) return errInsaneRevisionSetRevisionCount } // The file contract revision in the final transaction of the revision // transaction set should have two valid proof outputs and two missed // proof outputs. lenVPOs = len(so.RevisionTransactionSet[final].FileContractRevisions[0].NewValidProofOutputs) lenMPOs = len(so.RevisionTransactionSet[final].FileContractRevisions[0].NewMissedProofOutputs) if lenVPOs != 2 || lenMPOs != 2 { build.Critical("file contract has wrong number of VPOs and MPOs, expecting 2 each:", lenVPOs, lenMPOs) return errInsaneFileContractRevisionOutputCounts } } return nil } // merkleRoot returns the file merkle root of a storage obligation. func (so storageObligation) merkleRoot() crypto.Hash { if len(so.RevisionTransactionSet) > 0 { return so.RevisionTransactionSet[len(so.RevisionTransactionSet)-1].FileContractRevisions[0].NewFileMerkleRoot } return so.OriginTransactionSet[len(so.OriginTransactionSet)-1].FileContracts[0].FileMerkleRoot } // payous returns the set of valid payouts and missed payouts that represent // the latest revision for the storage obligation. func (so storageObligation) payouts() (valid []types.SiacoinOutput, missed []types.SiacoinOutput) { valid = make([]types.SiacoinOutput, 2) missed = make([]types.SiacoinOutput, 2) if len(so.RevisionTransactionSet) > 0 { copy(valid, so.RevisionTransactionSet[len(so.RevisionTransactionSet)-1].FileContractRevisions[0].NewValidProofOutputs) copy(missed, so.RevisionTransactionSet[len(so.RevisionTransactionSet)-1].FileContractRevisions[0].NewMissedProofOutputs) return } copy(valid, so.OriginTransactionSet[len(so.OriginTransactionSet)-1].FileContracts[0].ValidProofOutputs) copy(missed, so.OriginTransactionSet[len(so.OriginTransactionSet)-1].FileContracts[0].MissedProofOutputs) return } // proofDeadline returns the height by which the storage proof must be // submitted. func (so storageObligation) proofDeadline() types.BlockHeight { if len(so.RevisionTransactionSet) > 0 { return so.RevisionTransactionSet[len(so.RevisionTransactionSet)-1].FileContractRevisions[0].NewWindowEnd } return so.OriginTransactionSet[len(so.OriginTransactionSet)-1].FileContracts[0].WindowEnd } // value returns the value of fulfilling the storage obligation to the host. func (so storageObligation) value() types.Currency { return so.ContractCost.Add(so.PotentialDownloadRevenue).Add(so.PotentialStorageRevenue).Add(so.PotentialUploadRevenue).Add(so.RiskedCollateral) } // queueActionItem adds an action item to the host at the input height so that // the host knows to perform maintenance on the associated storage obligation // when that height is reached. func (h *Host) queueActionItem(height types.BlockHeight, id types.FileContractID) error { // Sanity check - action item should be at a higher height than the current // block height. if height <= h.blockHeight { h.log.Critical("action item queued improperly") } return h.db.Update(func(tx *bolt.Tx) error { // Translate the height into a byte slice. heightBytes := make([]byte, 8) binary.BigEndian.PutUint64(heightBytes, uint64(height)) // Get the list of action items already at this height and extend it. bai := tx.Bucket(bucketActionItems) existingItems := bai.Get(heightBytes) var extendedItems = make([]byte, len(existingItems), len(existingItems)+len(id[:])) copy(extendedItems, existingItems) extendedItems = append(extendedItems, id[:]...) return bai.Put(heightBytes, extendedItems) }) } // managedAddStorageObligation adds a storage obligation to the host. Because // this operation can return errors, the transactions should not be submitted to // the blockchain until after this function has indicated success. All of the // sectors that are present in the storage obligation should already be on disk, // which means that addStorageObligation should be exclusively called when // creating a new, empty file contract or when renewing an existing file // contract. func (h *Host) managedAddStorageObligation(so storageObligation) error { var soid types.FileContractID err := func() error { h.mu.Lock() defer h.mu.Unlock() // Sanity check - obligation should be under lock while being added. soid = so.id() _, exists := h.lockedStorageObligations[soid] if !exists { h.log.Critical("addStorageObligation called with an obligation that is not locked") } // Sanity check - There needs to be enough time left on the file contract // for the host to safely submit the file contract revision. if h.blockHeight+revisionSubmissionBuffer >= so.expiration() { h.log.Critical("submission window was not verified before trying to submit a storage obligation") return errNoBuffer } // Sanity check - the resubmission timeout needs to be smaller than storage // proof window. if so.expiration()+resubmissionTimeout >= so.proofDeadline() { h.log.Critical("host is misconfigured - the storage proof window needs to be long enough to resubmit if needed") return errors.New("fill me in") } // Add the storage obligation information to the database. err := h.db.Update(func(tx *bolt.Tx) error { // Sanity check - a storage obligation using the same file contract id // should not already exist. This situation can happen if the // transaction pool ejects a file contract and then a new one is // created. Though the file contract will have the same terms, some // other conditions might cause problems. The check for duplicate file // contract ids should happen during the negotiation phase, and not // during the 'addStorageObligation' phase. bso := tx.Bucket(bucketStorageObligations) // If the storage obligation already has sectors, it means that the // file contract is being renewed, and that the sector should be // re-added with a new expiration height. If there is an error at any // point, all of the sectors should be removed. if len(so.SectorRoots) != 0 { err := h.AddSectorBatch(so.SectorRoots) if err != nil { return err } } // Add the storage obligation to the database. soBytes, err := json.Marshal(so) if err != nil { return err } return bso.Put(soid[:], soBytes) }) if err != nil { return err } // Update the host financial metrics with regards to this storage // obligation. h.financialMetrics.ContractCount++ h.financialMetrics.PotentialContractCompensation = h.financialMetrics.PotentialContractCompensation.Add(so.ContractCost) h.financialMetrics.LockedStorageCollateral = h.financialMetrics.LockedStorageCollateral.Add(so.LockedCollateral) h.financialMetrics.PotentialStorageRevenue = h.financialMetrics.PotentialStorageRevenue.Add(so.PotentialStorageRevenue) h.financialMetrics.PotentialDownloadBandwidthRevenue = h.financialMetrics.PotentialDownloadBandwidthRevenue.Add(so.PotentialDownloadRevenue) h.financialMetrics.PotentialUploadBandwidthRevenue = h.financialMetrics.PotentialUploadBandwidthRevenue.Add(so.PotentialUploadRevenue) h.financialMetrics.RiskedStorageCollateral = h.financialMetrics.RiskedStorageCollateral.Add(so.RiskedCollateral) h.financialMetrics.TransactionFeeExpenses = h.financialMetrics.TransactionFeeExpenses.Add(so.TransactionFeesAdded) return nil }() if err != nil { return err } // Check that the transaction is fully valid and submit it to the // transaction pool. err = h.tpool.AcceptTransactionSet(so.OriginTransactionSet) if err != nil { h.log.Println("Failed to add storage obligation, transaction set was not accepted:", err) return err } // Queue the action items. h.mu.Lock() defer h.mu.Unlock() // The file contract was already submitted to the blockchain, need to check // after the resubmission timeout that it was submitted successfully. err1 := h.queueActionItem(h.blockHeight+resubmissionTimeout, soid) err2 := h.queueActionItem(h.blockHeight+resubmissionTimeout*2, soid) // Paranoia // Queue an action item to submit the file contract revision - if there is // never a file contract revision, the handling of this action item will be // a no-op. err3 := h.queueActionItem(so.expiration()-revisionSubmissionBuffer, soid) err4 := h.queueActionItem(so.expiration()-revisionSubmissionBuffer+resubmissionTimeout, soid) // Paranoia // The storage proof should be submitted err5 := h.queueActionItem(so.expiration()+resubmissionTimeout, soid) err6 := h.queueActionItem(so.expiration()+resubmissionTimeout*2, soid) // Paranoia err = composeErrors(err1, err2, err3, err4, err5, err6) if err != nil { h.log.Println("Error with transaction set, redacting obligation, id", so.id()) return composeErrors(err, h.removeStorageObligation(so, obligationRejected)) } return nil } // modifyStorageObligation will take an updated storage obligation along with a // list of sector changes and update the database to account for all of it. The // sector modifications are only used to update the sector database, they will // not be used to modify the storage obligation (most importantly, this means // that sectorRoots needs to be updated by the calling function). Virtual // sectors will be removed the number of times that they are listed, to remove // multiple instances of the same virtual sector, the virtural sector will need // to appear in 'sectorsRemoved' multiple times. Same with 'sectorsGained'. func (h *Host) modifyStorageObligation(so storageObligation, sectorsRemoved []crypto.Hash, sectorsGained []crypto.Hash, gainedSectorData [][]byte) error { // Sanity check - obligation should be under lock while being modified. soid := so.id() _, exists := h.lockedStorageObligations[soid] if !exists { h.log.Critical("modifyStorageObligation called with an obligation that is not locked") } // Sanity check - there needs to be enough time to submit the file contract // revision to the blockchain. if so.expiration()-revisionSubmissionBuffer <= h.blockHeight { return errNoBuffer } // Sanity check - sectorsGained and gainedSectorData need to have the same length. if len(sectorsGained) != len(gainedSectorData) { h.log.Critical("modifying a revision with garbage sector data", len(sectorsGained), len(gainedSectorData)) return errInsaneStorageObligationRevision } // Sanity check - all of the sector data should be modules.SectorSize for _, data := range gainedSectorData { if uint64(len(data)) != modules.SectorSize { h.log.Critical("modifying a revision with garbase sector sizes", len(data)) return errInsaneStorageObligationRevision } } // Note, for safe error handling, the operation order should be: add // sectors, update database, remove sectors. If the adding or update fails, // the added sectors should be removed and the storage obligation shoud be // considered invalid. If the removing fails, this is okay, it's ignored // and left to consistency checks and user actions to fix (will reduce host // capacity, but will not inhibit the host's ability to submit storage // proofs) var i int var err error for i = range sectorsGained { err = h.AddSector(sectorsGained[i], gainedSectorData[i]) if err != nil { break } } if err != nil { // Because there was an error, all of the sectors that got added need // to be reverted. for j := 0; j < i; j++ { // Error is not checked because there's nothing useful that can be // done about an error. _ = h.RemoveSector(sectorsGained[j]) } return err } // Update the database to contain the new storage obligation. var oldSO storageObligation err = h.db.Update(func(tx *bolt.Tx) error { // Get the old storage obligation as a reference to know how to upate // the host financial stats. oldSO, err = getStorageObligation(tx, soid) if err != nil { return err } // Store the new storage obligation to replace the old one. return putStorageObligation(tx, so) }) if err != nil { // Because there was an error, all of the sectors that got added need // to be reverted. for i := range sectorsGained { // Error is not checked because there's nothing useful that can be // done about an error. _ = h.RemoveSector(sectorsGained[i]) } return err } // Call removeSector for all of the sectors that have been removed. for k := range sectorsRemoved { // Error is not checkeed because there's nothing useful that can be // done about an error. Failing to remove a sector is not a terrible // place to be, especially if the host can run consistency checks. _ = h.RemoveSector(sectorsRemoved[k]) } // Update the financial information for the storage obligation - remove the // old values. h.financialMetrics.PotentialContractCompensation = h.financialMetrics.PotentialContractCompensation.Sub(oldSO.ContractCost) h.financialMetrics.LockedStorageCollateral = h.financialMetrics.LockedStorageCollateral.Sub(oldSO.LockedCollateral) h.financialMetrics.PotentialStorageRevenue = h.financialMetrics.PotentialStorageRevenue.Sub(oldSO.PotentialStorageRevenue) h.financialMetrics.PotentialDownloadBandwidthRevenue = h.financialMetrics.PotentialDownloadBandwidthRevenue.Sub(oldSO.PotentialDownloadRevenue) h.financialMetrics.PotentialUploadBandwidthRevenue = h.financialMetrics.PotentialUploadBandwidthRevenue.Sub(oldSO.PotentialUploadRevenue) h.financialMetrics.RiskedStorageCollateral = h.financialMetrics.RiskedStorageCollateral.Sub(oldSO.RiskedCollateral) h.financialMetrics.TransactionFeeExpenses = h.financialMetrics.TransactionFeeExpenses.Sub(oldSO.TransactionFeesAdded) // Update the financial information for the storage obligation - apply the // new values. h.financialMetrics.PotentialContractCompensation = h.financialMetrics.PotentialContractCompensation.Add(so.ContractCost) h.financialMetrics.LockedStorageCollateral = h.financialMetrics.LockedStorageCollateral.Add(so.LockedCollateral) h.financialMetrics.PotentialStorageRevenue = h.financialMetrics.PotentialStorageRevenue.Add(so.PotentialStorageRevenue) h.financialMetrics.PotentialDownloadBandwidthRevenue = h.financialMetrics.PotentialDownloadBandwidthRevenue.Add(so.PotentialDownloadRevenue) h.financialMetrics.PotentialUploadBandwidthRevenue = h.financialMetrics.PotentialUploadBandwidthRevenue.Add(so.PotentialUploadRevenue) h.financialMetrics.RiskedStorageCollateral = h.financialMetrics.RiskedStorageCollateral.Add(so.RiskedCollateral) h.financialMetrics.TransactionFeeExpenses = h.financialMetrics.TransactionFeeExpenses.Add(so.TransactionFeesAdded) return nil } // removeStorageObligation will remove a storage obligation from the host, // either due to failure or success. func (h *Host) removeStorageObligation(so storageObligation, sos storageObligationStatus) error { // Error is not checked, we want to call remove on every sector even if // there are problems - disk health information will be updated. _ = h.RemoveSectorBatch(so.SectorRoots) // Update the host revenue metrics based on the status of the obligation. if sos == obligationUnresolved { h.log.Critical("storage obligation 'unresolved' during call to removeStorageObligation, id", so.id()) } if sos == obligationRejected { if h.financialMetrics.TransactionFeeExpenses.Cmp(so.TransactionFeesAdded) >= 0 { h.financialMetrics.TransactionFeeExpenses = h.financialMetrics.TransactionFeeExpenses.Sub(so.TransactionFeesAdded) // Remove the obligation statistics as potential risk and income. h.log.Printf("Rejecting storage obligation expiring at block %v, current height is %v. Potential revenue is %v.\n", so.expiration(), h.blockHeight, h.financialMetrics.PotentialContractCompensation.Add(h.financialMetrics.PotentialStorageRevenue).Add(h.financialMetrics.PotentialDownloadBandwidthRevenue).Add(h.financialMetrics.PotentialUploadBandwidthRevenue)) h.financialMetrics.PotentialContractCompensation = h.financialMetrics.PotentialContractCompensation.Sub(so.ContractCost) h.financialMetrics.LockedStorageCollateral = h.financialMetrics.LockedStorageCollateral.Sub(so.LockedCollateral) h.financialMetrics.PotentialStorageRevenue = h.financialMetrics.PotentialStorageRevenue.Sub(so.PotentialStorageRevenue) h.financialMetrics.PotentialDownloadBandwidthRevenue = h.financialMetrics.PotentialDownloadBandwidthRevenue.Sub(so.PotentialDownloadRevenue) h.financialMetrics.PotentialUploadBandwidthRevenue = h.financialMetrics.PotentialUploadBandwidthRevenue.Sub(so.PotentialUploadRevenue) h.financialMetrics.RiskedStorageCollateral = h.financialMetrics.RiskedStorageCollateral.Sub(so.RiskedCollateral) } } if sos == obligationSucceeded { // Remove the obligation statistics as potential risk and income. h.log.Printf("Successfully submitted a storage proof. Revenue is %v.\n", h.financialMetrics.PotentialContractCompensation.Add(h.financialMetrics.PotentialStorageRevenue).Add(h.financialMetrics.PotentialDownloadBandwidthRevenue).Add(h.financialMetrics.PotentialUploadBandwidthRevenue)) h.financialMetrics.PotentialContractCompensation = h.financialMetrics.PotentialContractCompensation.Sub(so.ContractCost) h.financialMetrics.LockedStorageCollateral = h.financialMetrics.LockedStorageCollateral.Sub(so.LockedCollateral) h.financialMetrics.PotentialStorageRevenue = h.financialMetrics.PotentialStorageRevenue.Sub(so.PotentialStorageRevenue) h.financialMetrics.PotentialDownloadBandwidthRevenue = h.financialMetrics.PotentialDownloadBandwidthRevenue.Sub(so.PotentialDownloadRevenue) h.financialMetrics.PotentialUploadBandwidthRevenue = h.financialMetrics.PotentialUploadBandwidthRevenue.Sub(so.PotentialUploadRevenue) h.financialMetrics.RiskedStorageCollateral = h.financialMetrics.RiskedStorageCollateral.Sub(so.RiskedCollateral) // Add the obligation statistics as actual income. h.financialMetrics.ContractCompensation = h.financialMetrics.ContractCompensation.Add(so.ContractCost) h.financialMetrics.StorageRevenue = h.financialMetrics.StorageRevenue.Add(so.PotentialStorageRevenue) h.financialMetrics.DownloadBandwidthRevenue = h.financialMetrics.DownloadBandwidthRevenue.Add(so.PotentialDownloadRevenue) h.financialMetrics.UploadBandwidthRevenue = h.financialMetrics.UploadBandwidthRevenue.Add(so.PotentialUploadRevenue) } if sos == obligationFailed { // Remove the obligation statistics as potential risk and income. h.log.Printf("Missed storage proof. Revenue would have been %v.\n", h.financialMetrics.PotentialContractCompensation.Add(h.financialMetrics.PotentialStorageRevenue).Add(h.financialMetrics.PotentialDownloadBandwidthRevenue).Add(h.financialMetrics.PotentialUploadBandwidthRevenue)) h.financialMetrics.PotentialContractCompensation = h.financialMetrics.PotentialContractCompensation.Sub(so.ContractCost) h.financialMetrics.LockedStorageCollateral = h.financialMetrics.LockedStorageCollateral.Sub(so.LockedCollateral) h.financialMetrics.PotentialStorageRevenue = h.financialMetrics.PotentialStorageRevenue.Sub(so.PotentialStorageRevenue) h.financialMetrics.PotentialDownloadBandwidthRevenue = h.financialMetrics.PotentialDownloadBandwidthRevenue.Sub(so.PotentialDownloadRevenue) h.financialMetrics.PotentialUploadBandwidthRevenue = h.financialMetrics.PotentialUploadBandwidthRevenue.Sub(so.PotentialUploadRevenue) h.financialMetrics.RiskedStorageCollateral = h.financialMetrics.RiskedStorageCollateral.Sub(so.RiskedCollateral) // Add the obligation statistics as loss. h.financialMetrics.LostStorageCollateral = h.financialMetrics.LostStorageCollateral.Add(so.RiskedCollateral) h.financialMetrics.LostRevenue = h.financialMetrics.LostRevenue.Add(so.ContractCost).Add(so.PotentialStorageRevenue).Add(so.PotentialDownloadRevenue).Add(so.PotentialUploadRevenue) } // Update the storage obligation to be finalized but still in-database. The // obligation status is updated so that the user can see how the obligation // ended up, and the sector roots are removed because they are large // objects with little purpose once storage proofs are no longer needed. h.financialMetrics.ContractCount-- so.ObligationStatus = sos so.SectorRoots = nil return h.db.Update(func(tx *bolt.Tx) error { return putStorageObligation(tx, so) }) } // threadedHandleActionItem will look at a storage obligation and determine // which action is necessary for the storage obligation to succeed. func (h *Host) threadedHandleActionItem(soid types.FileContractID) { err := h.tg.Add() if err != nil { return } defer h.tg.Done() // Lock the storage obligation in question. h.managedLockStorageObligation(soid) defer func() { h.managedUnlockStorageObligation(soid) }() // Fetch the storage obligation associated with the storage obligation id. var so storageObligation h.mu.RLock() blockHeight := h.blockHeight err = h.db.View(func(tx *bolt.Tx) error { so, err = getStorageObligation(tx, soid) return err }) h.mu.RUnlock() if err != nil { h.log.Println("Could not get storage obligation:", err) return } // Check whether the storage obligation has already been completed. if so.ObligationStatus != obligationUnresolved { // Storage obligation has already been completed, skip action item. return } // Check whether the file contract has been seen. If not, resubmit and // queue another action item. Check for death. (signature should have a // kill height) if !so.OriginConfirmed { // Submit the transaction set again, try to get the transaction // confirmed. err := h.tpool.AcceptTransactionSet(so.OriginTransactionSet) if err != nil { h.log.Debugln("Could not get origin transaction set accepted", err) // Check if the transaction is invalid with the current consensus set. // If so, the transaction is highly unlikely to ever be confirmed, and // the storage obligation should be removed. This check should come // after logging the errror so that the function can quit. // // TODO: If the host or tpool is behind consensus, might be difficult // to have certainty about the issue. If some but not all of the // parents are confirmed, might be some difficulty. _, t := err.(modules.ConsensusConflict) if t { h.log.Println("Consensus conflict on the origin transaction set, id", so.id()) h.mu.Lock() err = h.removeStorageObligation(so, obligationRejected) h.mu.Unlock() if err != nil { h.log.Println("Error removing storage obligation:", err) } return } } // Queue another action item to check the status of the transaction. h.mu.Lock() err = h.queueActionItem(h.blockHeight+resubmissionTimeout, so.id()) h.mu.Unlock() if err != nil { h.log.Println("Error queuing action item:", err) } } // Check if the file contract revision is ready for submission. Check for death. if !so.RevisionConfirmed && len(so.RevisionTransactionSet) > 0 && blockHeight >= so.expiration()-revisionSubmissionBuffer { // Sanity check - there should be a file contract revision. rtsLen := len(so.RevisionTransactionSet) if rtsLen < 1 || len(so.RevisionTransactionSet[rtsLen-1].FileContractRevisions) != 1 { h.log.Critical("transaction revision marked as unconfirmed, yet there is no transaction revision") return } // Check if the revision has failed to submit correctly. if blockHeight > so.expiration() { // TODO: Check this error. // // TODO: this is not quite right, because a previous revision may // be confirmed, and the origin transaction may be confirmed, which // would confuse the revenue stuff a bit. Might happen frequently // due to the dynamic fee pool. h.log.Println("Full time has elapsed, but the revision transaction could not be submitted to consensus, id", so.id()) h.mu.Lock() h.removeStorageObligation(so, obligationRejected) h.mu.Unlock() return } // Queue another action item to check the status of the transaction. h.mu.Lock() err := h.queueActionItem(blockHeight+resubmissionTimeout, so.id()) h.mu.Unlock() if err != nil { h.log.Println("Error queuing action item:", err) } // Add a miner fee to the transaction and submit it to the blockchain. revisionTxnIndex := len(so.RevisionTransactionSet) - 1 revisionParents := so.RevisionTransactionSet[:revisionTxnIndex] revisionTxn := so.RevisionTransactionSet[revisionTxnIndex] builder := h.wallet.RegisterTransaction(revisionTxn, revisionParents) _, feeRecommendation := h.tpool.FeeEstimation() if so.value().Div64(2).Cmp(feeRecommendation) < 0 { // There's no sense submitting the revision if the fee is more than // half of the anticipated revenue - fee market went up // unexpectedly, and the money that the renter paid to cover the // fees is no longer enough. return } txnSize := uint64(len(encoding.MarshalAll(so.RevisionTransactionSet)) + 300) requiredFee := feeRecommendation.Mul64(txnSize) err = builder.FundSiacoins(requiredFee) if err != nil { h.log.Println("Error funding transaction fees", err) } builder.AddMinerFee(requiredFee) if err != nil { h.log.Println("Error adding miner fees", err) } feeAddedRevisionTransactionSet, err := builder.Sign(true) if err != nil { h.log.Println("Error signing transaction", err) } err = h.tpool.AcceptTransactionSet(feeAddedRevisionTransactionSet) if err != nil { h.log.Println("Error submitting transaction to transaction pool", err) } so.TransactionFeesAdded = so.TransactionFeesAdded.Add(requiredFee) // return } // Check whether a storage proof is ready to be provided, and whether it // has been accepted. Check for death. if !so.ProofConfirmed && blockHeight >= so.expiration()+resubmissionTimeout { h.log.Debugln("Host is attempting a storage proof for", so.id()) // If the window has closed, the host has failed and the obligation can // be removed. if so.proofDeadline() < blockHeight || len(so.SectorRoots) == 0 { h.log.Debugln("storage proof not confirmed by deadline, id", so.id()) h.mu.Lock() err := h.removeStorageObligation(so, obligationFailed) h.mu.Unlock() if err != nil { h.log.Println("Error removing storage obligation:", err) } return } // Get the index of the segment, and the index of the sector containing // the segment. segmentIndex, err := h.cs.StorageProofSegment(so.id()) if err != nil { h.log.Debugln("Host got an error when fetching a storage proof segment:", err) return } sectorIndex := segmentIndex / (modules.SectorSize / crypto.SegmentSize) // Pull the corresponding sector into memory. sectorRoot := so.SectorRoots[sectorIndex] sectorBytes, err := h.ReadSector(sectorRoot) if err != nil { h.log.Debugln(err) return } // Build the storage proof for just the sector. sectorSegment := segmentIndex % (modules.SectorSize / crypto.SegmentSize) base, cachedHashSet := crypto.MerkleProof(sectorBytes, sectorSegment) // Using the sector, build a cached root. log2SectorSize := uint64(0) for 1<= so.proofDeadline() { h.log.Println("file contract complete, id", so.id()) h.mu.Lock() h.removeStorageObligation(so, obligationSucceeded) h.mu.Unlock() } } // StorageObligations fetches the set of storage obligations in the host and // returns metadata on them. func (h *Host) StorageObligations() (sos []modules.StorageObligation) { h.mu.RLock() defer h.mu.RUnlock() err := h.db.View(func(tx *bolt.Tx) error { b := tx.Bucket(bucketStorageObligations) err := b.ForEach(func(idBytes, soBytes []byte) error { var so storageObligation err := json.Unmarshal(soBytes, &so) if err != nil { return build.ExtendErr("unable to unmarshal storage obligation:", err) } mso := modules.StorageObligation{ NegotiationHeight: so.NegotiationHeight, OriginConfirmed: so.OriginConfirmed, RevisionConstructed: so.RevisionConstructed, RevisionConfirmed: so.RevisionConfirmed, ProofConstructed: so.ProofConstructed, ProofConfirmed: so.ProofConfirmed, ObligationStatus: uint64(so.ObligationStatus), } sos = append(sos, mso) return nil }) if err != nil { return build.ExtendErr("ForEach failed to get next storage obligation:", err) } return nil }) if err != nil { h.log.Println(build.ExtendErr("database failed to provide storage obligations:", err)) } return sos } Sia-1.3.0/modules/host/storageobligations_smoke_test.go000066400000000000000000000563671313565667000233740ustar00rootroot00000000000000package host // storageobligations_smoke_test.go performs smoke testing on the the storage // obligation management. This includes adding valid storage obligations, and // waiting until they expire, to see if the failure modes are all handled // correctly. import ( "testing" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/fastrand" "github.com/NebulousLabs/bolt" ) // randSector creates a random sector, returning the sector along with the // Merkle root of the sector. func randSector() (crypto.Hash, []byte) { sectorData := fastrand.Bytes(int(modules.SectorSize)) sectorRoot := crypto.MerkleRoot(sectorData) return sectorRoot, sectorData } // newTesterStorageObligation uses the wallet to create and fund a file // contract that will form the foundation of a storage obligation. func (ht *hostTester) newTesterStorageObligation() (storageObligation, error) { // Create the file contract that will be used in the obligation. builder := ht.wallet.StartTransaction() // Fund the file contract with a payout. The payout needs to be big enough // that the expected revenue is larger than the fee that the host may end // up paying. payout := types.SiacoinPrecision.Mul64(1e3) err := builder.FundSiacoins(payout) if err != nil { return storageObligation{}, err } // Add the file contract that consumes the funds. _ = builder.AddFileContract(types.FileContract{ // Because this file contract needs to be able to accept file contract // revisions, the expiration is put more than // 'revisionSubmissionBuffer' blocks into the future. WindowStart: ht.host.blockHeight + revisionSubmissionBuffer + 2, WindowEnd: ht.host.blockHeight + revisionSubmissionBuffer + defaultWindowSize + 2, Payout: payout, ValidProofOutputs: []types.SiacoinOutput{ { Value: types.PostTax(ht.host.blockHeight, payout), }, { Value: types.ZeroCurrency, }, }, MissedProofOutputs: []types.SiacoinOutput{ { Value: types.PostTax(ht.host.blockHeight, payout), }, { Value: types.ZeroCurrency, }, }, UnlockHash: (types.UnlockConditions{}).UnlockHash(), RevisionNumber: 0, }) // Sign the transaction. tSet, err := builder.Sign(true) if err != nil { return storageObligation{}, err } // Assemble and return the storage obligation. so := storageObligation{ OriginTransactionSet: tSet, // TODO: There are no tracking values, because no fees were added. } return so, nil } // TestBlankStorageObligation checks that the host correctly manages a blank // storage obligation. func TestBlankStorageObligation(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() ht, err := newHostTester("TestBlankStorageObligation") if err != nil { t.Fatal(err) } defer ht.Close() // The number of contracts reported by the host should be zero. fm := ht.host.FinancialMetrics() if fm.ContractCount != 0 { t.Error("host does not start with 0 contracts:", fm.ContractCount) } // Start by adding a storage obligation to the host. To emulate conditions // of a renter creating the first contract, the storage obligation has no // data, but does have money. so, err := ht.newTesterStorageObligation() if err != nil { t.Fatal(err) } ht.host.managedLockStorageObligation(so.id()) err = ht.host.managedAddStorageObligation(so) if err != nil { t.Fatal(err) } ht.host.managedUnlockStorageObligation(so.id()) // Storage obligation should not be marked as having the transaction // confirmed on the blockchain. if so.OriginConfirmed { t.Fatal("storage obligation should not yet be marked as confirmed, confirmation is on the way") } fm = ht.host.FinancialMetrics() if fm.ContractCount != 1 { t.Error("host should have 1 contract:", fm.ContractCount) } // Mine a block to confirm the transaction containing the storage // obligation. _, err = ht.miner.AddBlock() if err != nil { t.Fatal(err) } err = ht.host.tg.Flush() if err != nil { t.Fatal(err) } // Load the storage obligation from the database, see if it updated // correctly. err = ht.host.db.View(func(tx *bolt.Tx) error { so, err = getStorageObligation(tx, so.id()) if err != nil { return err } return nil }) if err != nil { t.Fatal(err) } if !so.OriginConfirmed { t.Fatal("origin transaction for storage obligation was not confirmed after a block was mined") } // Mine until the host would be submitting a storage proof. Check that the // host has cleared out the storage proof - the consensus code makes it // impossible to submit a storage proof for an empty file contract, so the // host should fail and give up by deleting the storage obligation. for i := types.BlockHeight(0); i <= revisionSubmissionBuffer*2+1; i++ { _, err := ht.miner.AddBlock() if err != nil { t.Fatal(err) } err = ht.host.tg.Flush() if err != nil { t.Fatal(err) } } err = ht.host.db.View(func(tx *bolt.Tx) error { so, err = getStorageObligation(tx, so.id()) if err != nil { return err } return nil }) if err != nil { t.Fatal(err) } fm = ht.host.FinancialMetrics() if fm.ContractCount != 0 { t.Error("host should have 0 contracts, the contracts were all completed:", fm.ContractCount) } } // TestSingleSectorObligationStack checks that the host correctly manages a // storage obligation with a single sector, the revision is created the same // block as the file contract. func TestSingleSectorStorageObligationStack(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() ht, err := newHostTester("TestSingleSectorStorageObligationStack") if err != nil { t.Fatal(err) } defer ht.Close() // Start by adding a storage obligation to the host. To emulate conditions // of a renter creating the first contract, the storage obligation has no // data, but does have money. so, err := ht.newTesterStorageObligation() if err != nil { t.Fatal(err) } ht.host.managedLockStorageObligation(so.id()) err = ht.host.managedAddStorageObligation(so) if err != nil { t.Fatal(err) } ht.host.managedUnlockStorageObligation(so.id()) // Storage obligation should not be marked as having the transaction // confirmed on the blockchain. if so.OriginConfirmed { t.Fatal("storage obligation should not yet be marked as confirmed, confirmation is on the way") } // Add a file contract revision, moving over a small amount of money to pay // for the file contract. sectorRoot, sectorData := randSector() so.SectorRoots = []crypto.Hash{sectorRoot} sectorCost := types.SiacoinPrecision.Mul64(550) so.PotentialStorageRevenue = so.PotentialStorageRevenue.Add(sectorCost) ht.host.financialMetrics.PotentialStorageRevenue = ht.host.financialMetrics.PotentialStorageRevenue.Add(sectorCost) validPayouts, missedPayouts := so.payouts() validPayouts[0].Value = validPayouts[0].Value.Sub(sectorCost) validPayouts[1].Value = validPayouts[1].Value.Add(sectorCost) missedPayouts[0].Value = missedPayouts[0].Value.Sub(sectorCost) missedPayouts[1].Value = missedPayouts[1].Value.Add(sectorCost) revisionSet := []types.Transaction{{ FileContractRevisions: []types.FileContractRevision{{ ParentID: so.id(), UnlockConditions: types.UnlockConditions{}, NewRevisionNumber: 1, NewFileSize: uint64(len(sectorData)), NewFileMerkleRoot: sectorRoot, NewWindowStart: so.expiration(), NewWindowEnd: so.proofDeadline(), NewValidProofOutputs: validPayouts, NewMissedProofOutputs: missedPayouts, NewUnlockHash: types.UnlockConditions{}.UnlockHash(), }}, }} ht.host.managedLockStorageObligation(so.id()) err = ht.host.modifyStorageObligation(so, nil, []crypto.Hash{sectorRoot}, [][]byte{sectorData}) if err != nil { t.Fatal(err) } ht.host.managedUnlockStorageObligation(so.id()) // Submit the revision set to the transaction pool. err = ht.tpool.AcceptTransactionSet(revisionSet) if err != nil { t.Fatal(err) } // Mine a block to confirm the transactions containing the file contract // and the file contract revision. _, err = ht.miner.AddBlock() if err != nil { t.Fatal(err) } // Load the storage obligation from the database, see if it updated // correctly. err = ht.host.db.View(func(tx *bolt.Tx) error { so, err = getStorageObligation(tx, so.id()) if err != nil { return err } return nil }) if err != nil { t.Fatal(err) } if !so.OriginConfirmed { t.Fatal("origin transaction for storage obligation was not confirmed after a block was mined") } if !so.RevisionConfirmed { t.Fatal("revision transaction for storage obligation was not confirmed after a block was mined") } // Mine until the host submits a storage proof. for i := ht.host.blockHeight; i <= so.expiration()+resubmissionTimeout; i++ { _, err := ht.miner.AddBlock() if err != nil { t.Fatal(err) } } // Flush the host - flush will block until the host has submitted the // storage proof to the transaction pool. err = ht.host.tg.Flush() if err != nil { t.Fatal(err) } // Mine another block, to get the storage proof from the transaction pool // into the blockchain. _, err = ht.miner.AddBlock() if err != nil { t.Fatal(err) } // Grab the storage proof and inspect the contents. err = ht.host.db.View(func(tx *bolt.Tx) error { so, err = getStorageObligation(tx, so.id()) if err != nil { return err } return nil }) if err != nil { t.Fatal(err) } if !so.OriginConfirmed { t.Fatal("origin transaction for storage obligation was not confirmed after a block was mined") } if !so.RevisionConfirmed { t.Fatal("revision transaction for storage obligation was not confirmed after a block was mined") } if !so.ProofConfirmed { t.Fatal("storage obligation is not saying that the storage proof was confirmed on the blockchain") } // Mine blocks until the storage proof has enough confirmations that the // host will finalize the obligation. for i := 0; i <= int(defaultWindowSize); i++ { _, err := ht.miner.AddBlock() if err != nil { t.Fatal(err) } } err = ht.host.db.View(func(tx *bolt.Tx) error { so, err = getStorageObligation(tx, so.id()) if err != nil { return err } if so.SectorRoots != nil { t.Error("sector roots were not cleared when the host finalized the obligation") } if so.ObligationStatus != obligationSucceeded { t.Error("obligation is not being reported as successful:", so.ObligationStatus) } return nil }) if err != nil { t.Fatal(err) } if !ht.host.financialMetrics.StorageRevenue.Equals(sectorCost) { t.Fatal("the host should be reporting revenue after a successful storage proof") } } // TestMultiSectorObligationStack checks that the host correctly manages a // storage obligation with a single sector, the revision is created the same // block as the file contract. // // Unlike the SingleSector test, the multi sector test attempts to spread file // contract revisions over multiple blocks. func TestMultiSectorStorageObligationStack(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() ht, err := newHostTester("TestMultiSectorStorageObligationStack") if err != nil { t.Fatal(err) } defer ht.Close() // Start by adding a storage obligation to the host. To emulate conditions // of a renter creating the first contract, the storage obligation has no // data, but does have money. so, err := ht.newTesterStorageObligation() if err != nil { t.Fatal(err) } ht.host.managedLockStorageObligation(so.id()) err = ht.host.managedAddStorageObligation(so) if err != nil { t.Fatal(err) } ht.host.managedUnlockStorageObligation(so.id()) // Storage obligation should not be marked as having the transaction // confirmed on the blockchain. if so.OriginConfirmed { t.Fatal("storage obligation should not yet be marked as confirmed, confirmation is on the way") } // Deviation from SingleSector test - mine a block here to confirm the // storage obligation before a file contract revision is created. _, err = ht.miner.AddBlock() if err != nil { t.Fatal(err) } // Load the storage obligation from the database, see if it updated // correctly. err = ht.host.db.View(func(tx *bolt.Tx) error { so, err = getStorageObligation(tx, so.id()) if err != nil { return err } return nil }) if err != nil { t.Fatal(err) } if !so.OriginConfirmed { t.Fatal("origin transaction for storage obligation was not confirmed after a block was mined") } // Add a file contract revision, moving over a small amount of money to pay // for the file contract. sectorRoot, sectorData := randSector() so.SectorRoots = []crypto.Hash{sectorRoot} sectorCost := types.SiacoinPrecision.Mul64(550) so.PotentialStorageRevenue = so.PotentialStorageRevenue.Add(sectorCost) ht.host.financialMetrics.PotentialStorageRevenue = ht.host.financialMetrics.PotentialStorageRevenue.Add(sectorCost) validPayouts, missedPayouts := so.payouts() validPayouts[0].Value = validPayouts[0].Value.Sub(sectorCost) validPayouts[1].Value = validPayouts[1].Value.Add(sectorCost) missedPayouts[0].Value = missedPayouts[0].Value.Sub(sectorCost) missedPayouts[1].Value = missedPayouts[1].Value.Add(sectorCost) revisionSet := []types.Transaction{{ FileContractRevisions: []types.FileContractRevision{{ ParentID: so.id(), UnlockConditions: types.UnlockConditions{}, NewRevisionNumber: 1, NewFileSize: uint64(len(sectorData)), NewFileMerkleRoot: sectorRoot, NewWindowStart: so.expiration(), NewWindowEnd: so.proofDeadline(), NewValidProofOutputs: validPayouts, NewMissedProofOutputs: missedPayouts, NewUnlockHash: types.UnlockConditions{}.UnlockHash(), }}, }} ht.host.managedLockStorageObligation(so.id()) err = ht.host.modifyStorageObligation(so, nil, []crypto.Hash{sectorRoot}, [][]byte{sectorData}) if err != nil { t.Fatal(err) } ht.host.managedUnlockStorageObligation(so.id()) // Submit the revision set to the transaction pool. err = ht.tpool.AcceptTransactionSet(revisionSet) if err != nil { t.Fatal(err) } // Create a second file contract revision, which is going to be submitted // to the transaction pool after the first revision. Though, in practice // this should never happen, we want to check that the transaction pool is // correctly handling multiple file contract revisions being submitted in // the same block cycle. This test will additionally tell us whether or not // the host can correctly handle building storage proofs for files with // multiple sectors. sectorRoot2, sectorData2 := randSector() so.SectorRoots = []crypto.Hash{sectorRoot, sectorRoot2} sectorCost2 := types.SiacoinPrecision.Mul64(650) so.PotentialStorageRevenue = so.PotentialStorageRevenue.Add(sectorCost2) ht.host.financialMetrics.PotentialStorageRevenue = ht.host.financialMetrics.PotentialStorageRevenue.Add(sectorCost2) validPayouts, missedPayouts = so.payouts() validPayouts[0].Value = validPayouts[0].Value.Sub(sectorCost2) validPayouts[1].Value = validPayouts[1].Value.Add(sectorCost2) missedPayouts[0].Value = missedPayouts[0].Value.Sub(sectorCost2) missedPayouts[1].Value = missedPayouts[1].Value.Add(sectorCost2) combinedSectors := append(sectorData, sectorData2...) combinedRoot := crypto.MerkleRoot(combinedSectors) revisionSet2 := []types.Transaction{{ FileContractRevisions: []types.FileContractRevision{{ ParentID: so.id(), UnlockConditions: types.UnlockConditions{}, NewRevisionNumber: 2, NewFileSize: uint64(len(sectorData) + len(sectorData2)), NewFileMerkleRoot: combinedRoot, NewWindowStart: so.expiration(), NewWindowEnd: so.proofDeadline(), NewValidProofOutputs: validPayouts, NewMissedProofOutputs: missedPayouts, NewUnlockHash: types.UnlockConditions{}.UnlockHash(), }}, }} ht.host.managedLockStorageObligation(so.id()) err = ht.host.modifyStorageObligation(so, nil, []crypto.Hash{sectorRoot2}, [][]byte{sectorData2}) if err != nil { t.Fatal(err) } ht.host.managedUnlockStorageObligation(so.id()) // Submit the revision set to the transaction pool. err = ht.tpool.AcceptTransactionSet(revisionSet2) if err != nil { t.Fatal(err) } // Mine a block to confirm the transactions containing the file contract // and the file contract revision. _, err = ht.miner.AddBlock() if err != nil { t.Fatal(err) } // Load the storage obligation from the database, see if it updated // correctly. err = ht.host.db.View(func(tx *bolt.Tx) error { so, err = getStorageObligation(tx, so.id()) if err != nil { return err } return nil }) if err != nil { t.Fatal(err) } if !so.OriginConfirmed { t.Fatal("origin transaction for storage obligation was not confirmed after a block was mined") } if !so.RevisionConfirmed { t.Fatal("revision transaction for storage obligation was not confirmed after a block was mined") } // Mine until the host submits a storage proof. for i := ht.host.blockHeight; i <= so.expiration()+resubmissionTimeout; i++ { _, err := ht.miner.AddBlock() if err != nil { t.Fatal(err) } } // Flush the host - flush will block until the host has submitted the // storage proof to the transaction pool. err = ht.host.tg.Flush() if err != nil { t.Fatal(err) } // Mine another block, to get the storage proof from the transaction pool // into the blockchain. _, err = ht.miner.AddBlock() if err != nil { t.Fatal(err) } err = ht.host.db.View(func(tx *bolt.Tx) error { so, err = getStorageObligation(tx, so.id()) if err != nil { return err } return nil }) if err != nil { t.Fatal(err) } if !so.OriginConfirmed { t.Fatal("origin transaction for storage obligation was not confirmed after a block was mined") } if !so.RevisionConfirmed { t.Fatal("revision transaction for storage obligation was not confirmed after a block was mined") } if !so.ProofConfirmed { t.Fatal("storage obligation is not saying that the storage proof was confirmed on the blockchain") } // Mine blocks until the storage proof has enough confirmations that the // host will delete the file entirely. for i := 0; i <= int(defaultWindowSize); i++ { _, err := ht.miner.AddBlock() if err != nil { t.Fatal(err) } } err = ht.host.db.View(func(tx *bolt.Tx) error { so, err = getStorageObligation(tx, so.id()) if err != nil { return err } if so.SectorRoots != nil { t.Error("sector roots were not cleared out when the storage proof was finalized") } if so.ObligationStatus != obligationSucceeded { t.Error("storage obligation was not reported as a success") } return nil }) if err != nil { t.Fatal(err) } if !ht.host.financialMetrics.StorageRevenue.Equals(sectorCost.Add(sectorCost2)) { t.Fatal("the host should be reporting revenue after a successful storage proof") } } // TestAutoRevisionSubmission checks that the host correctly submits a file // contract revision to the consensus set. func TestAutoRevisionSubmission(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() ht, err := newHostTester("TestAutoRevisionSubmission") if err != nil { t.Fatal(err) } defer ht.Close() // Start by adding a storage obligation to the host. To emulate conditions // of a renter creating the first contract, the storage obligation has no // data, but does have money. so, err := ht.newTesterStorageObligation() if err != nil { t.Fatal(err) } ht.host.managedLockStorageObligation(so.id()) err = ht.host.managedAddStorageObligation(so) if err != nil { t.Fatal(err) } ht.host.managedUnlockStorageObligation(so.id()) // Storage obligation should not be marked as having the transaction // confirmed on the blockchain. if so.OriginConfirmed { t.Fatal("storage obligation should not yet be marked as confirmed, confirmation is on the way") } // Add a file contract revision, moving over a small amount of money to pay // for the file contract. sectorRoot, sectorData := randSector() so.SectorRoots = []crypto.Hash{sectorRoot} sectorCost := types.SiacoinPrecision.Mul64(550) so.PotentialStorageRevenue = so.PotentialStorageRevenue.Add(sectorCost) ht.host.financialMetrics.PotentialStorageRevenue = ht.host.financialMetrics.PotentialStorageRevenue.Add(sectorCost) validPayouts, missedPayouts := so.payouts() validPayouts[0].Value = validPayouts[0].Value.Sub(sectorCost) validPayouts[1].Value = validPayouts[1].Value.Add(sectorCost) missedPayouts[0].Value = missedPayouts[0].Value.Sub(sectorCost) missedPayouts[1].Value = missedPayouts[1].Value.Add(sectorCost) revisionSet := []types.Transaction{{ FileContractRevisions: []types.FileContractRevision{{ ParentID: so.id(), UnlockConditions: types.UnlockConditions{}, NewRevisionNumber: 1, NewFileSize: uint64(len(sectorData)), NewFileMerkleRoot: sectorRoot, NewWindowStart: so.expiration(), NewWindowEnd: so.proofDeadline(), NewValidProofOutputs: validPayouts, NewMissedProofOutputs: missedPayouts, NewUnlockHash: types.UnlockConditions{}.UnlockHash(), }}, }} so.RevisionTransactionSet = revisionSet ht.host.managedLockStorageObligation(so.id()) err = ht.host.modifyStorageObligation(so, nil, []crypto.Hash{sectorRoot}, [][]byte{sectorData}) if err != nil { t.Fatal(err) } ht.host.managedUnlockStorageObligation(so.id()) err = ht.host.tg.Flush() if err != nil { t.Fatal(err) } // Unlike the other tests, this test does not submit the file contract // revision to the transaction pool for the host, the host is expected to // do it automatically. // Mine until the host submits a storage proof. for i := types.BlockHeight(0); i <= revisionSubmissionBuffer+2+resubmissionTimeout; i++ { _, err := ht.miner.AddBlock() if err != nil { t.Fatal(err) } err = ht.host.tg.Flush() if err != nil { t.Fatal(err) } } // Flush the host - flush will block until the host has submitted the // storage proof to the transaction pool. err = ht.host.tg.Flush() if err != nil { t.Fatal(err) } // Mine another block, to get the storage proof from the transaction pool // into the blockchain. _, err = ht.miner.AddBlock() if err != nil { t.Fatal(err) } err = ht.host.tg.Flush() if err != nil { t.Fatal(err) } err = ht.host.db.View(func(tx *bolt.Tx) error { so, err = getStorageObligation(tx, so.id()) if err != nil { return err } return nil }) if err != nil { t.Fatal(err) } if !so.OriginConfirmed { t.Fatal("origin transaction for storage obligation was not confirmed after blocks were mined") } if !so.RevisionConfirmed { t.Fatal("revision transaction for storage obligation was not confirmed after blocks were mined") } if !so.ProofConfirmed { t.Fatal("storage obligation is not saying that the storage proof was confirmed on the blockchain") } // Mine blocks until the storage proof has enough confirmations that the // host will delete the file entirely. for i := 0; i <= int(defaultWindowSize); i++ { _, err := ht.miner.AddBlock() if err != nil { t.Fatal(err) } err = ht.host.tg.Flush() if err != nil { t.Fatal(err) } } err = ht.host.db.View(func(tx *bolt.Tx) error { so, err = getStorageObligation(tx, so.id()) if err != nil { return err } if so.SectorRoots != nil { t.Error("sector roots were not cleared out when the storage proof was finalized") } if so.ObligationStatus != obligationSucceeded { t.Error("storage obligation was not reported as a success") } return nil }) if err != nil { t.Fatal(err) } if !ht.host.financialMetrics.StorageRevenue.Equals(sectorCost) { t.Fatal("the host should be reporting revenue after a successful storage proof") } } Sia-1.3.0/modules/host/storageobligations_test.go000066400000000000000000000042061313565667000221570ustar00rootroot00000000000000package host import ( "testing" "github.com/NebulousLabs/Sia/types" ) // TestStorageObligationID checks that the return function of the storage // obligation returns the correct value for the obligaiton id. func TestStorageObligationID(t *testing.T) { t.Parallel() // Try a transaction set with just a file contract. so1 := &storageObligation{ OriginTransactionSet: []types.Transaction{{ FileContracts: []types.FileContract{{ ValidProofOutputs: []types.SiacoinOutput{ { UnlockHash: types.UnlockHash{2, 1, 3}, Value: types.NewCurrency64(35), }, { UnlockHash: types.UnlockHash{0, 1, 3}, Value: types.NewCurrency64(25), }, }, MissedProofOutputs: []types.SiacoinOutput{ { UnlockHash: types.UnlockHash{110, 1, 3}, Value: types.NewCurrency64(3325), }, { UnlockHash: types.UnlockHash{110, 1, 3}, Value: types.NewCurrency64(8325), }, }, }}, }}, } if so1.id() != so1.OriginTransactionSet[0].FileContractID(0) { t.Error("id function of storage obligation is not correct") } // Try a file contract that includes file contract dependencies. so2 := &storageObligation{ OriginTransactionSet: []types.Transaction{ { SiacoinOutputs: []types.SiacoinOutput{{ UnlockHash: types.UnlockHash{1, 3, 2}, Value: types.NewCurrency64(5), }}, }, { FileContracts: []types.FileContract{{ ValidProofOutputs: []types.SiacoinOutput{ { UnlockHash: types.UnlockHash{8, 11, 4}, Value: types.NewCurrency64(85), }, { UnlockHash: types.UnlockHash{8, 11, 14}, Value: types.NewCurrency64(859), }, }, MissedProofOutputs: []types.SiacoinOutput{ { UnlockHash: types.UnlockHash{8, 113, 4}, Value: types.NewCurrency64(853), }, { UnlockHash: types.UnlockHash{8, 119, 14}, Value: types.NewCurrency64(9859), }, }, }}, }, }, } if so2.id() != so2.OriginTransactionSet[1].FileContractID(0) { t.Error("id function of storage obligation incorrect for file contracts with dependencies") } } Sia-1.3.0/modules/host/storageobligationslock.go000066400000000000000000000042611313565667000217720ustar00rootroot00000000000000package host import ( "errors" "github.com/NebulousLabs/Sia/sync" "github.com/NebulousLabs/Sia/types" ) var ( // errObligationLocked is returned if the file contract being requested is // currently locked. The lock can be in place if there is a storage proof // being submitted, if there is another renter altering the contract, or if // there have been network connections with have not resolved yet. errObligationLocked = errors.New("the requested file contract is currently locked") ) // managedLockStorageObligation puts a storage obligation under lock in the // host. func (h *Host) managedLockStorageObligation(soid types.FileContractID) { // Check if a lock has been created for this storage obligation. If not, // create one. The map must be accessed under lock, but the request for the // storage lock must not be made under lock. h.mu.Lock() tl, exists := h.lockedStorageObligations[soid] if !exists { tl = new(sync.TryMutex) h.lockedStorageObligations[soid] = tl } h.mu.Unlock() tl.Lock() } // managedTryLockStorageObligation attempts to put a storage obligation under // lock, returning an error if the lock cannot be obtained. func (h *Host) managedTryLockStorageObligation(soid types.FileContractID) error { // Check if a lock has been created for this storage obligation. If not, // create one. The map must be accessed under lock, but the request for the // storage lock must not be made under lock. h.mu.Lock() tl, exists := h.lockedStorageObligations[soid] if !exists { tl = new(sync.TryMutex) h.lockedStorageObligations[soid] = tl } h.mu.Unlock() if tl.TryLockTimed(obligationLockTimeout) { return nil } return errObligationLocked } // managedUnlockStorageObligation takes a storage obligation out from under lock in // the host. func (h *Host) managedUnlockStorageObligation(soid types.FileContractID) { // Check if a lock has been created for this storage obligation. If not, // create one. The map must be accessed under lock, but the request for the // storage lock must not be made under lock. h.mu.Lock() tl, exists := h.lockedStorageObligations[soid] if !exists { h.log.Critical(errObligationUnlocked) return } h.mu.Unlock() tl.Unlock() } Sia-1.3.0/modules/host/storageobligationslock_test.go000066400000000000000000000054341313565667000230340ustar00rootroot00000000000000package host import ( "testing" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/types" ) // TestObligationLocks checks that the storage obligation locking functions // properly blocks and errors out for various use cases. func TestObligationLocks(t *testing.T) { if testing.Short() || !build.VLONG { t.SkipNow() } t.Parallel() ht, err := blankHostTester("TestObligationLocks") if err != nil { t.Fatal(err) } defer ht.Close() // Simple lock and unlock. ob1 := types.FileContractID{1} ht.host.managedLockStorageObligation(ob1) ht.host.managedUnlockStorageObligation(ob1) // Simple lock and unlock, with trylock. err = ht.host.managedTryLockStorageObligation(ob1) if err != nil { t.Fatal("unable to get lock despite not having a lock in place") } ht.host.managedUnlockStorageObligation(ob1) // Threaded lock and unlock. blockSuccessful := false ht.host.managedLockStorageObligation(ob1) go func() { time.Sleep(obligationLockTimeout * 2) blockSuccessful = true ht.host.managedUnlockStorageObligation(ob1) }() ht.host.managedLockStorageObligation(ob1) if !blockSuccessful { t.Error("two threads were able to simultaneously grab an obligation lock") } ht.host.managedUnlockStorageObligation(ob1) // Attempted lock and unlock - failed. ht.host.managedLockStorageObligation(ob1) go func() { time.Sleep(obligationLockTimeout * 2) ht.host.managedUnlockStorageObligation(ob1) }() err = ht.host.managedTryLockStorageObligation(ob1) if err != errObligationLocked { t.Fatal("storage obligation was able to get a lock, despite already being locked") } // Attempted lock and unlock - succeeded. ht.host.managedLockStorageObligation(ob1) go func() { time.Sleep(obligationLockTimeout / 2) ht.host.managedUnlockStorageObligation(ob1) }() err = ht.host.managedTryLockStorageObligation(ob1) if err != nil { t.Fatal("storage obligation unable to get lock, depsite having enough time") } ht.host.managedUnlockStorageObligation(ob1) // Multiple locks and unlocks happening together. ob2 := types.FileContractID{2} ob3 := types.FileContractID{3} ht.host.managedLockStorageObligation(ob1) ht.host.managedLockStorageObligation(ob2) ht.host.managedLockStorageObligation(ob3) ht.host.managedUnlockStorageObligation(ob3) ht.host.managedUnlockStorageObligation(ob2) err = ht.host.managedTryLockStorageObligation(ob2) if err != nil { t.Fatal("unable to get lock despite not having a lock in place") } err = ht.host.managedTryLockStorageObligation(ob3) if err != nil { t.Fatal("unable to get lock despite not having a lock in place") } err = ht.host.managedTryLockStorageObligation(ob1) if err != errObligationLocked { t.Fatal("storage obligation was able to get a lock, despite already being locked") } ht.host.managedUnlockStorageObligation(ob1) } Sia-1.3.0/modules/host/testdata/000077500000000000000000000000001313565667000165015ustar00rootroot00000000000000Sia-1.3.0/modules/host/testdata/v100Host.tar.gz000066400000000000000000000033641313565667000211620ustar00rootroot00000000000000X[o6ǽtfcE8K2i. fh[-e~}}qƋn`kӢ2#C,erf2V<@s‣v F w&<KkV,RQna{>>{{]OźxON+aE8a9#> bGx$B"a1*lH(CX&z>MTQ|:x}!rOoS?>I:}g/7.:ݨӌ~lD# uzie_~뿢Χe>Em}ޮUgK9Vg*J;Th>X'*NxZɻzgUiهAsr}<pDkG<o7WN]Poܗw|@IqQ;;jJ xoe% ө*N;=Bt FL3/Q*KTs](;wo8DJ%I! 3KNj"nw[n.ǫT1K3iC3˪̳s̸6O^׿e\&;r:- ׊"/"7LYUȸjJhhJL-8A.qZ&7./VYeK;n%Wy|2yԫ$|(.j^ݾBh0gYNFTQЄH "QzfQ<ipD*&2(TTQL0DQJ53Y&`(kSia-1.3.0/modules/host/testdata/v110Host.tar.gz000066400000000000000000000033771313565667000211670ustar00rootroot00000000000000UX[Fӥ&TҗX~lWٹێJ@WmeEhbOCbr_߁>[bgY‚(GYϜsf<U^ȱL;iMiCS$%D`Aw<~*YcQVb0I=tdcV.]Yeguaܿ?__7=z&y77=4cx3e4:F0fRpN-ڄ"0 ,H:[1EYtN.O!z(v!x>ጱK\eTq ۿu歃|֪X( IR 8Eh @p00ķcEX˿^UUE]?ۈJYa3t y{Z;nmE0#zшT_xx)#D Q d[VzlD.zU+0‘iQVY_HҫkZ;QWMwXlzlzlOQY$4,9LjҎ$cXȃcd-b)A\O0Zl{dNG9ti5Cy#5ŝ~͓F{O,fwÛ$QkPVNt&.TkUHQ+K覻nf4iD=SI5͸;tL27d2^vNN%uS{] MmLLLaYY0230311203  puv1t" h>![&3FAq9/M9?307%Z/a*P8R8apa4|Y:P5tЀ]WV"' CΫ0+ tdV|CwY픜9> ۖބ,ns"#]ÖI߂ib CHDJLS%Vgibk3B|`R4g3qmַe~`{,0EaӵWl\f1/'*"8,?Gh*~WW7*}]i4|- %H\MΫ%7eccV&څ+kyNњQ={1elJ LVA\Y}!^%KI \[/|ŏ7z5>pFZClt.9`HP7}0yY@A\ѺNAWnqjFJOQwdQHjY(e5Pu峙wܻ5Y^3ǖFt9:KA#|)eSzgjCuV3aL7`'v;2CӼyePU,y/Ka]SdJrWe5!O8RSlNjm|/6c0bIYzĊ+OHO L'=a̸W66 l8plM@vԳN bRSOA,F؅lVF6r弅W'DQ>Vk54æ6Z)iЧ`1HJ;#+kOAS^ 8AhZ鮲Ckb/``fk@O|{Lm)V|\%[ٞr)k59R5aw lEҥ[zrGzhWKMW:0?dllJj;>+]B㿟P3O*0T!R\, WZ-R;i]YO i7 ]q,;Mq75!08xsE~ubʪpqT{ D]T ̬זUg퇒0~>nlȪ^ -˦BEL@1]YmvgS>*9W˙X0R, n~w aRիǺE"i٫K -%,0#bRtKXds y_h|.:Eb9xDR2~ςFy :hͩ .[a85Ln..+%JB`.$6m+M*ƏОZ. ބ(O5I6Zy0n#ֶ_`OEC o0#W$.)Z -T‰}>z鐃Z.{<[n;%@EVom\ݪ7OuNĻR+?@^\7J( h׆@BLͽ[KIw٢X@#I><_'p͜U7]xOˎȁOGt OSNb^0,0{SQ!\yB?ei;1T!*z| .{հrl:SZ\_|[s(]zͣxYENƎce=<^?øp`}t |8r0c %)Nd_KN pi!OyhbO{7-f7g%L]U(=}2Cv*vou9$HC,|Z}]+gɊХ}^)Pf6v({P+iЭBTvЊK5GMo^ԯ/Ʋ j/`$ x`R 5♍{gh}{+1"4͒C z5FlB/׋(Pzy\s ~auEoTBl sծaJކ^K٭hfy@DY 6f>]]PjǿḾvhS]f~,?9~sS-T7%TkR3֋YF z̰Zy&^]~W-[KYĻ}:#kM}EӮ ֺ~CMy. eS^> ѸE41YLe!7{O2yUu5_|D5."~kG%0>nѣx҄iL-)6ʹ sŇ #vUARծN[$ | %!գgy13<ۊth7B՝œI8RT23*#nn N ] 돯E1Cn)d#(lNodث]vL: G["`8iVa\i$r#K(l6HaY=c\rFʋ3gY⥇Hщw m4]5f@nEZ)]$֕{n\:6 4a_ ][sY={W ϔVesk;Ik_@H5*.ЄsԾѧ(³avpPhrՋ(ѰS#V5mр֯_xS钢-'҃ʧF@U3udOU%B&+=I[qV#tt1rSɖ"}6(Q _6ǚhApVss/<U4n,d%2.5UwK$H=y+ {xaz!?#DyѾqw쥹H8MF &p2/}iq^Ht?>D4rw_{sp`Ua(59N; W9Db&9e(Y;b78@4-HP~Cl6 {jb2VRpfeRw1op 喭trPBiek|9\n˕^a@N#mlqg'8d\$A#Y?MN`CVzxgg2Xh6NxHO]>>cc@p˷kQ4Ŕ\fCz %Ł֯ 669A+DHtsmG(BiGtgI1!a*3@lYɻ:6Ն ^QJE)N.. -/nGpl,D~@e4$CIxX[Mvd<\ˈb >]@=g#~jku:FA}d{Qj*Sn$:f+e)T3;V+*gw˺yxr <̓uATMuHa5|q(Oz2?659\8EYM^i_*R^vr4řLqe ]6gM%CA-t9>r\NP A%tOiAW*a bJ-% !ltޑEJ~s GGA9@9oG$ ˗Ǭ-$0lpM>}롫G]l\dvLpoN]0R ؋V=}rPY-Ȟw(%~c=q:哒(@Qoy!(%(<ݻ~-td9  d&g !̯4 Y2q Q1AU*5T|ygx}LƥT"wzU #n!Ƙ-Q7%W7|`3 7Y0jSP*FX/1ƒ/xζhiP⮁uv}יz|!fU% |l&$e"1~bmq=G\$rH4Y*+P)j@̘z_v%W,HC EWwGu$j /Z,Z=]mdx+Sp`$r ɬ0 [I(&1੮ƄQۦ.l?0yNx}WqV r;-22\> "$M̾ !S^ COd\3XvqEI˝%s&6i@kQ)40yA)<}E>I#F1f/ >0+P`M‰4|H+&#(|lhޓC#Do*@!0vu]/׿{N?+ a"ML@ޑ$ .zX3gP^7-yAVg74LS8%ԞŎ+/c& [.Vd>sJF&&\2Gr1Ԣ_B:|v,:K:&4tX2ّ,6p*V8n};F#Ƞ7wh]T2><b4M3/TO^1 μ%=yýyTnPHeg`C/}G2t023!C=_>"ڋdyW۾@@Ti\~#yW=eM s*o}k_yƩ(`MG} D&'!3իaE:P5Z%{VcA8%*$'h0gP:ɚap~.,K4 |z&J|)Ĥ+~ +l\rv*檬&VH4d׿huF&2#J7'zjNJDb4d[ijc]ؔ9~gDF?ۘ$Z7BkIi_-qGe"XuTX`^iHJ,B?ĠG fe4@q qX.B WCm+lI Ѥl5< $xR`sݑqE{9b[mYY' "-RڃK3)Ԣ#uvbc6iϽv^ѿD+O]<_;S &LXqCP09''&sM&]4+&cZ٠^E!qL`LzgbAiCד2ۛvOx[6:@үW@l]`㻍BɻgqGfH*b^TK4(,8ӿO"Vw<;?|E,` )E@#upO(-.3GT* ~и}&CtiIlqAXwX:6J>%[dyFƺOS2Ξ^[@†<|TzACcwC'4BW˴kBztU sũ( )iU_կnqCYeQ#] !3'Vu{bPUV<|ϰQ6K֝yTo Ν?yŋ0$g&W򷚨B(}VS2Gb.J`c@q^3ܧ'#^J3$q(DC݀ qh@6`]~$w~o7ΐj bt42V\7*cZ-k<ģMPTS3Ms|y :[UYϿ ^OFŜ{+s̛<  >PvtՐHvP s4+6#ht[hJ}]wﻱcuؒe[1ZeU h{|G ;Zʢ;;B ZzֽL4nN4_NCtՅzF7 Vfl$: h\ p9""Hfis[ZvJ!3hkQKbW$HW ]tXWrGnez~J41b5uVf`ѷ #Ec#e$:20Gh ToMr:6}1Wg :b&d9tc3֚PÞ!fd!JNĵ{M6щTwS5RXFk~Y""NV#nۼͺg KIzs્ /zP UXD81x'ga#HN"N|Zb-0u%aǜŏ<L*2SI.[z5zf>u6U*/ӟ8,}VBXk:S1:)j=l׾g:A2>pФ{ giEuۘhg%9^1FuDHq{_M  K5BR2~B&_PE۔}Gc,'pyڰwlGamv5c&  ߚe`QGsR@ٮocKجYTW0)3a5Gp8  @4diqS_(,ea*]ֽC0k5ARix޷6Aqކk e/QGxIؠICb$$םj׊un_p⷗3H|t2udNGJCɈƈ&U?^]8|A'3AeKxlֲp׮TE8[ӛSxݵrϭR ¾ $^^?rJ1m>+=e]s\y~et0"W{gw[ez{qD+{`F S CG/y8cpHi|225Q~dFlpM-â9QPX'-JD b1]@ooɟHDaHk?Xe uK2f`_n'ku.Ѯ"yfu_H⫲1\_r46=:pak>Tq pUߝCc5ݨ"ѭ;jl<9O>{Ψ}M$vSD6/|Z8*ynHx _EϗɄHqpY I6/]n`-Y"~m*\*5 L9|c_ Ѵ"kߒ*k 쇀/Tr(|S"8?AWU~⠒a<"KN!`@x3b1tD7U5SҤT|;|$͝}WWA %cvf9U&`l={(;yKiym?Y͂y;ǿ]&!!U͓ET2Ld2-µ3:sZS<(2` vYv J/U.9N2 #NV^XiP1 <``4wfpGP߃3!rPp?!B#^#YiGE}Wp{"3TܤB -:j0#.O>-ٸv%Sf%I408ƕ3¶vb3Tӂvoņ1&>}GZf_ez}׌(#= wI<|FxB_W !=`z&iqWh ԮL틓r=Uk05-V, #\U`h\;G.Rr]yn&<yIԬY% sg0S0$/ڔ;?'W04y۸^!Zɞ4Z% ˥RuM,7wlU1(@>T5L'O5-g!kVpgגFi~[eB 2Ȳ ,qe3t#A*uLO~Z[ۨNuL3R_\ ճK^;f0K?Az3PkѕUg^}%X?6$_8[bY|cnShhv,Dyoϯ)JM^2+3 EwjĮ =@+}؎s'5d@E:VQyfjuSSrJAVE';JW 2y2VW޵Uwd+}+;keVB0Zym1禺rVC7}cu( ˬח-]t[&+☤iKS׏5?y<6']2y* Hm*L& bºm2w\J`gC|ӌ(Rq"j›(`1q*6Ol٨ ӑ_R;p0^aËh|H@ap u8|d= [ *8sbXkT1PI[ͷ v"2O_ŪO;FN:}Q3B0)z7S`;G҂8]D'uj+.rQv~.>΃si}[IɹxT*<,Bmɻ?Q[fy]֬`IO7»1ūn4To ҄ؔ!p6/F{pe3aАۼx݋A-ǻ? tpI︆2 :WԼ+ݔd4;XGRFuH~y#LřD_2!Т%CzR NӲ|6b|=P ӃTOCDd7JnӬdTI~!lD5&?es$l|IMi’z|єV+0Z.a曈O~ՙ6>|D?0W.R>þw=tؘUw_N=J>ĻȑKU?AP.Gv] oJJj4{~LL.l.}*6`B2- Zg'^:MkYìXK/c A+(yGnZ$T. -0fhjbװE =!r%o?ֶW ǧhqe ^qIXP&b 榽`'mS@]>TUck{Qt$Z{f?@J-b-E05fMGfJDz-ʇۋ'#'bide!iK3_}hBF.tMI[ṽ&ΤIOg' h([ l^s3Ģ.QU ɢ ٯk*CM;b ]F)z,G8Hr{hͶ5&2T7 =QKPeziBpϝw &.s;7ݰ0R;"3>bXRzQTc.ZSc͞ {) f?F:# Ug؃Ce1ӨkŶbz^a\цO^򹒠 L5gqD$h#a'nFUN/Lb >MB/4߭[.(aBAM"'"! @a=8B0H#'A;D.t{pk$tH,TI<ufMnxa>}`"IFƣ^Om2~TN%:'w*D-.#dyeQ"Rhr=3JVl9>{Д!AvF1,HS˄ybg }Xk_ȕ kwɓ-/Zږ'F*Agcikq f^҉M Y!8rͧTcȗ{YCָ"Z?FI0B 7{U2!'5nYzTo/G"QkN"RIz]*Uu'WΆrzঌ10!*Iכh_w~$ԢN!ĕ#p<$2#q//|gRĹ//]*;Q݃5>_'ڦKz09!k[a %8XN2\jf d'i,{V/ Ū VwLhqQ<2x9 n@gL!;YFD3`/bT!_@[FEڢ~YiіIsG?ECf_٦ġb]ءR[|$].F7z'ˬMY:/_0Ct%ܭM6wT`iT"Ld (\uPߪ zI Q9jT$j~$;ױ2z@\9W̺UI$Dh_TA{`k%4d5*O:lHRj>Iq:x5ONN9uTA)aR9G0 fY;l \!WmLgxw})a̫W1Ѵhy." e~ᰵlxU_l=\/;Hݎm*цr%9IOPj%jAb!-Xm.ڕ[#r~ ~qXq}z1j P{camۃC|%<.an@$&n=އGP`UΪkdݎ'2MC= 1ħ?"gnjKw']'nR>M9M^LQjT(S@AհRX~oB]zX'8r0 7~ڬNLSt|MrU޷(kmx|P <;`aKmFw8MٜJ#o Rl)2 lMKRNbH`6ckOfn,8A?R`xň-.OB3C>i Ѡ1 ,G)>vE F7c#JxPq.*e*CZ؄E~g>*uo Qц!F! %I& )~PD|ꁬq9ٱS&{=YΥ&fe`F)C!*fu ue D]M9e(r7F4X" uv^:d"z*$94*#G!XPB&1¤q+ӭ7{h. /1݋ mLMD't,APcΘHPNa Uj]^j^Bpl>8הsߌڹ`}.Tg%ͽ%Q/@QI+xB\Am!:ˮ] saQUutL(UĖV׉k4F1#-'"-3ֻӉd|쌈r0TC q F$<$0| ![[|WpkH8"&o¨&3D,Ӏ:Iʺ;nj uF_%TRf+7 Տ>d6*!A}9\[=@*@Bl={<2R\hK nd9̐Ftk^6b{_yhfr4Fؚʵ1Dj<ׇp"n-.~x(2ݗIWE_ $Fhb`-f#.l9RנSu40lȏuB4~/zd~Ԥ*j iLHk_ǀ#)Ny$>&dK "})NjQ()X#(g0ոF'+lm)`KrvY4%xy3~ ZS#=xHG6T6 0lAs@d8rܿr}E[#V'S3ѵ*Ξ ?%C1YAi.ŕxkaSf,5ҟ퇱F(G G<&];RHi=:┇qr${+e;~F@8E`h*Dn1$y~ԻI]|tSwRތ˱_vm}w9G nuӨ: ?JLj GzOv2WYmQt ށQ=?lU!2e9.lpGO.G*4ftai(o}^\]>Obk}`) Q:tB[f߷\4dݭ%o פKcssG1{2Ɠ~a1QrU"'/&j$q0b`jfTWDF6@UĩK =C-x 7Rcm7 sCթ?hX=8' &GmS7z.s#Q_Vby/޺XdI`BN TIoy^$7D]nBKgDmQ}IS8!Hm>Bi烄Ej'zb}5{ѥ}/2(3pmQxD UwJxj.@@u `j;V{Z:,Bʦu@+ջwae=$GV8! E|." s4%yles+oػ|18h~xZLI nHl.'HxoZ< ŪoEI;.X0h\51c'?}J; :&ɢZ*nxT(~7)^"炧hjhݱ$Z)XNӥf׃;d] SΔA* JQ(շW~w>0#ԝ@>M&f{(Q1bc].t Wjk?hxS'F=)fXXse}ʢY+uH/ 6i!*W@-šz|L22` 1`?К[,YteaHK5Me9̳YUmNi^s mH`_ߺH=N@Y,s I &N1>p{M6Gnasb3\߹@/:1'O{+;K}:1Y1»W,?(Ɠ7R6ٍ/IUk֋$VRXJ aޑ(¥v`|_ !uN CVэj"n-o"خOU/vJA9 =7)I92㺌D,!^5mN2:Xz< \>dм', -0y1O&j}kEeOað_FV 96g:ԌpZp~<@<:u.aWN]iwS~`xٵi%vCi͂?%?f;^Kg:6I3Nv;Z7@!CÉX .NNFW45j4 D隵"41`*Uo(QܳR0ϹsFLeʀ?-?DQf[}H͂"YW$ eܨ .+PS.䟷mfѝkcHZLcgUrC >o`_]"YeYϊq1vXʾgـ#6x8U"OD6GE 81Ȼc ]&1'Z,S(7t3oav4Zit:>Y _XB)+^PGFݺ,+hԪ: a6REyi0pԟ;[e[  \8xTHj&`gkŜ+G^V P76;ӋəZAu.ajigơtyndדQqKPL=Kj[U^lF) `;zMhX 'Mb(mΛ?M"yu dCCo"v]ߞA]`#543S4]K֜UHܤeэIJyQ^%9c@eATWL5X'hB!vK4~kU%*m 4*޻6P tɏוM>!9O{LʩZdhҶHx.I4+%$-Xk;HLR.yj^¯ILm]¹et}Bў>uwS%"c_"Ù4_tÿ3it.wcoWA|6t+.B,MKa{f)_IfH-*O231lr咾#[Qٟdv7Eu.O孡mRs_GҁɽK7!R L˩ok% я-"`dJ•_B_QO6R B^U%OrFBS@ɛzSrP1ݬkR 1K &\KG^664/ n|Q2.MuBDSzu~Vqm]k<=e^!]]FeHAИ~s]D8B?uQp]2CZ۷~vK ̷DvN)tؑlz4+엿vu26b5h P> y L 2C 5T6[v"FL4}m6 *ʼ 7II(ULɀo#!3Sq*UM("z XYXFFidet$FՒTO!E\m\!'$ߔĘ*cxkz1ltܽash` lz{*8]"*y7b !8gk93pB7=+N#֮>οӵu;,&u8iGyX4 %42#:Qb"VDۗ[fQkHC0E\z:ҒGDێ?xݾL|d '>&4Q򤱗= &`d)4ǑБIB)nk[=3ov\ Ce?T:4=yt;Ї?m铕p;$.AmωWE}w7 a M5JCtR_Ds(" +xQNɁ0Wiq ) aڱEA, Xsi_=?RwnGWdYH\2M'HΛn 9^\5W + .P-%fMeWZDl罃qʵmn ^1i`㭐25Uވ}.!j1)|:;(M\*{XK\LqvO_(k"2_B 5XiFx9ka36Rjsa2\ v$OK>k̿M\-E:^HI;6&VD$8pÔ5V@8!#]#Sr/?sȠ4T2ıgϦƶf-ϒ6މ?MZQCXq] gXu4N#1c[5o]cI #,Q 7O5%(Õ*Ȱ~; s%SX()Č,y@,X8ξ6Gw LES_]C-uMG<*p@Iю_Cc@հ ~س^bO-HS}_പOgҲ k?(H)fV dڟKK,xMwmlc.3Sn(̃"A)0ޮMvi ""/{r94~^5n/SwbO |q|7]`2#$0U|0D=Gv ѰQEVZ[f8*tаNj *: `<8L#q"nT3N%Cx;{kf+kE3?f xTD[sWy/\ K|stm9d_ȳ"ȕs1tQs/<߯4P%CrUs g )$L&ԿopU$#?Vtmօ{帥E}VE Q@ gE$Y27$ޟAIp/=d.G4GнB1җף[[3Mv)[1ix'rmtsMږ~ncAEJv.=!)YxB _P4H7.5N7`K}ZmVTJŌ]XlBÊSMxr\^SǨY]˹οK& vyzii!4 Z~Rf`SE2ߖj~  (x]*T9RlH {2mq Gpz!7L&HHܖ}d@1@NKcb= XI_ b"]؂@V)qo&|Bۢ`{gEI@b7aMEn-U) W&i:i0gBGݰ%Dj O€)2wW,6lAbaUK!ֺ~蝈.NԹН,&2-v $Tاj'c (#Jc(-:hP~1+D5Z\Qo7wzK8cwb24%d9C)sy&o0f҄M 9}A^Uu*4-܋cB o4D`ZWv7/UP6[ɟ~Ϫ=xCO3_z^U{| 8!N떨h\eo?u7&4 I@*;-R;qM15词Jt9<]Lj<}`)|ri]ANvLzmGEʱWAJyp.&oxpٸ1s_:&N +`v[g﹛x)wb-}Ԟi0FX׊wm/8-M;Kz!q8 Fɶh|rȧeOj{L$71շ" vIwMvHBׅ#B~\unlm wXl^#'#|^Jށ"R͊?*uZ5qL㹔 ʛȳ6] _QV ԩNQ~KRڛV/ӌȃvLPn!CeTٍc+ _+pNUN6u9" 2.;40>D5)ʨLR6چ])؄Ml8?2JEuU "Ļ-AAӘR .p;gq0/gHLfduKIˈt̗%)jdwfʁ>]P bQa+︜U?Ә3+jzrT}0ɡo2f&2v5폒J_y)oY"1?_p?jGkěsJ7?m }V![؂vo \dJc|A U1 r pߋoP&C ymĭ`n8"$26#-29# fնfp"nIuw9fLHlu ݔmEN>{ڟSE~m؊Prbsb9,KPORRAӕoYh4"fX €v 󊷫1#ѾI'b!}) 䛡˦xQ7.Fú } x}b!_R<ȒbuࣙNE7c;H|s7QT0oriy;Ci|zu\fJǎ)scD`)ΐ Np.eUcxEOˉӝQ& Jrz0Rλ3B E;/ jx}?u`C5ci#&v'%,WmۊG DQZARB;7K俔5wtcx#e0 ;3VNJi"L/-,m_9Svr,u%A?hGW[[:HNPB ʔcwr!s;zZ7zϏ}nI4:1IPjVkCǹ0q}v}Yt!xbP׹S.2sSji$7,uGTZ +<kr_SM$eQG_nw]\w3S!UʃR(#-߳AϢR؇]5tCʦh^;_&a~Z\LVũ>ͽƂ >^(K[ru A_ȇG";wQ$DF,QLX CݏŅ!dž_!`!(Uy]w{-/߅b8 ܡe~VS ŪBXsýwbּc(kpY(t61{5m i/=0_O7ڐu/@YtaBjCq"A'bg1,r͙fz}e0䟃gf \7c9f/(R"ٹXy7Xvg a-X64(hU|l;L70=6rZñ}9x냓rmI MPG>ʑ¦ T64 [q2p:E]~e$%p _ G!q-t?^5mf= &A|){:zi]u/u~[:ja+xPl" ni`0l_OHs͠a0o 6jiZ .Omo-|Hv7@nzHF.3^ 2 Ls0rM|ހn mf>!J<5\cM- ;O#%\2ӎ0~[>SlW\TNOB=x /0^).;SUs9*F}洫4CLKV/5\41H*A1m4=-V+sVF'm~ϙ-[ SWĸ:9q:5_؊Cm knPkt;#Re8go%X~h1?z 2&Vv[{4)c;] dV]t{- c'O~O0Ct,˟ L.sbbP ghm]aԝニȉy aCS SZX#,גz hks^-8 l;>j=P*09۔i'jX)yYx*8ol@.wߍZMH٠jua# -̗0QtJʄ_sV5h$J ڔ|fsKOv X TP%YknmP|?,ݞfJ;p(aȇp{uB]6 ԡMVd- &d[1[& fIR6YYԺZDmUݎS͙d! m,6`3(ᒘ=񑩠y:Ԅyαvf>u~wЕpsVEg D:ka7|hm]tH._Ҵ!3Ҽq^)i60c'b#8b)G,sH(CYc{5N ,\y{ "_Nܼ=Zfv^P#2-J1! JvcxW^g % S뾥xZ̠Qlf7O'S` .€dC4lEJbgkV;Ft 7sB<,O-ݎG F!TʉmB MaBOٱ۟3ѵ! 1KaΖ_SB wU]7C)[6LΎ(oVnkqvpA &O{b;RM*7]'ע&4?8 ^h5I?ۼ!pH[xdR~Tc2TA.`Z7{.bH\>Z{zKvAPJ1#dњI EUXQ3,.\#>V@h-(8yfÞ%`["dIpVHErJg$jnyp3gRP]q5~ъv6:v걙AB Z4zQr%T\d8+Hsgc*M*AylM3k@OS*IwdQ;|wIB0 .KsimhKZzIu뒀gu pu yaװǐ5>:nhs nNp"/rAOY5@ 3ϕaԶx}|TB++T" m-Z0QY(6Tnw|`N5-+.j )MiHnYϣ^uQrpW.7 9Y]5T^h"oWt 9v65(`H!oKJQ q:ilomIiKpc^AӼ J u)`XE!qB q'Np0RTjm˝AYVϽK"dSC0SGMZCWe$%m:SR@DYj\_*(u'an4qҚg j[Ý_XP폍X.}`O 4 %6 = 3ˈ&Nq>^] G4E{@+qح "T"l3%'clsҍJk}%`dzbbleȿEho-`eRO cPn :g^ɫA##{S;w-79 N|2<380_eazsoK;؞;,Ҙ,9f< <h7o%N^ϊ$ˍ3v)a q&*ʴFظ?(Y:X33֠)3Vj{Z[9pyi3EiI`uٶm۶m۶m۶mۮz˶mo~fNzk?m%W0qxqLqEmۭiuĪgԟ9rg鑆)m= 1^fE3 XjUI}JpOYW .v%r`eœrtm,%|X7+勭'SjIƖ]jFgʻWwIZ=fe2.2oF *$547<8 uX(CZFmUfpe1ePu0_0FFډZ0kvUYV?;W[/g:h1{9:8kWyy}ʺu Q&ZO {t,Ljh+L֕Φ<ƪlqϲx2n@כ#uEȝERn,*ES2.Բ`UG R`~(N.jmhs{$@ߩ0gB8* 렺ryX?*`g:K4}zϖ.zFNFm¬uVNńښbzFuCOh)&&Y6:杹UEFC)_X[9 C'-n2 %FS.':X`Rx@&cO6Rh5ǰn):ijZ1Rc![qi$ >M!?>/Rҿ-k5@ rdfǤs`}FTRRG,e,bF%tc+p[ʑB$s V9^^/#-wGeGo@sc"rZYM =;W4O'4Wp,ߛa+ӲZK@#0T {DLݠX9e23w]u0gi'=VK*`|1t|X7 J^Uh|eG"Q@kR<PsgeOg1)E!8SlΉXFG,` uxnvwn Sc㈑C%?hK'ݧ|G|wEZ:R%J҇r? UVvZ(xO;@Iߴ3H!2ԼsQ$|RΧ PdjA#yUxӅAAu i=C2:.yuLbyHD]I4ψsic>oj1.np9qE# vE*=QO =`ʝ& pwc3͍gHRj2RK&0-8H^[PٕD?^Dp)Sm0泒U@onPd@9h|C ]EҌV}d>JUaauf=OUfd;sWZ?6R.2WK,j/`nNI QDŽFDT)']Y:u8.p+z8fr2#EлG"v_={=FY &}`]KVMgӈּցk@VߓWfA%YjWп?Zߞ(m ·P;ٟ' nr&O:DtK-3rd:_Q{ٿhᩚDpXRA]fWUVFb1эmۅ- aEr\QY? @yz_=Bү\wyLOMfP[cU Xu-jsPTAXI9 -PIO*:ߦ:'Ǐa=;@t썵g/4DR[vR1H2"CO!nőYõ,ߕ-asi$O"!ƥ\OAt2%믣\f.mOrr$^ɊO 4EY|$Cp]Mi[ ?X&x=};!>ֲ`.Jnjt,_}ߛ ` \zCx×bbt(axuߠU? fk׏:ijZq}(Wj"xa ["*yAeiV*ҺXP#sqhH7eyT~yk<жBž[D­-E&o(XN@^'WDj(P%*r*u~`ppfͦ'9nי P] @зOob|3ȤKʛj$2ظG%ڣ+3{hnRL2_3 =^(Q;,d4ֳ$՝Xl*\X+Vib ݷ˜OYƈ^;$$1M.%o|ޱnM^3\#4ƀV.5Ht|iJ( #?faRTt[`SN/PL$Jc1fXlszi#gPRy6tf# NŦJ lm{"&2^A}QI)6zR\JL=b3hR{sV)G 6uH:Iq7I~{BVO2Si*G wᩜ(v?\OBZJy lGSxٮJMC>뙝dؠ4 s6U@}0(7'4/#zbK;dAj*pV| b3!v,39-" [bL?BwWY$zg%^ybL.c`#9t rN8g3ٸҰV FvPM&A(!x>XV'e,pU|s=ݬZ@τuIr>@~wv ŖsOZiG#;J.b*%@ڳ&$O*PlЄ}Y`ۇteuSW1|{i>"ԙ`[Ҟ[e ɯ2rbj @&ɬ+5]0D&!N/m; (cd1ҕ|ggn@^A0R5/MEDO0\EZ+]*vn䫷HvI%Co4}aNu^ѧaS0jdئT| YW#Lvк^U/y*#Bb&+'.#!i\K$dVP\¿\m އbfYs_ݯK-*~zavV\6幗juv-1~Cu(׳zz=%Ϋ+5cu`zC>_َی&we'BxrI1j^Yȯ&HTu֊&UURfpa^UU&40.ZK6TF<Гv`tliO؄Lj>sbB$oPR|->I#kCs. rߘUlghyQKV8L?iN9HG\{LB1cTna$MM%{[ zbj"?ҨlbsЈAn=QÅ9nDP.Dsoie6PLT]ٴm? 7 $\- 9pDԐ8 s}3J @`3S|#CC.:{H޽b#' Yvn<+n'7d `3hg v+\0Z*;X#ʔZ??g!"eڬ> 锏x_аnc~PM |(nt!!yYpno\f=0Eov.7Wz[:K;"m(AH|*/ Taˑ4OYu3=j02sy>s!u*cC6;IUc2F^!_$SU[Fu癪◨B<2)?U2@Ӧ[6Ϝw1uIHPK@]w :95">Yf{+t֋VeD޳ Zk0x,D Z.r`M +IpjmNs/]BCW@F=ZBsx:@YAG6dVyji,ys6ǬsY' -1yiiZuJMen(~yM-qhˑ]}.6P"0Y Jm V2]SNlA!8 :6pbjzvwU؈ϔl()F_O>B._ˌcx;Rf7yR+D~-ipB ([Q>mrdUkabi"-5kQBJ|kQcbK9(T Lfco Yrٻ Iw5n%=S/]oIUYxU q H^NCe˯t e*+&}62iԗ  vU /*|ۢ"ډ5օ@ O<`N[c\NVjcɒmo{I Q;L:F:ݍjXƻgq 7ߚjі*}N/djg:U|kNVz&6>]e@gc@ 3 61Ke LJH41VJ.蒕`)[Ӷ4%'pR9P⦺t5;m*Yq9/wwz{w }p? |pKI {vY~BYŽXA,\gZ7 4ζcZ)+G3=#GqiZkV]up fL3"xBQcD8IZgva`|K7ULs9` nCQpZ)PCnv7X!e3z 9^Zh',MVIT~49:{Fz;5 VSGB52ƳE1fb87aqzY_Ì^7p>n=˸!2A ۿ`tXhzو@5)8a|i޳eFJM"9]V"OQb%}YQ`gBxzVޡRйx̐N ޓ=&aMG?D@-% Ň(6v"9Qf]2Br3>^'ۗh IY)8QyLH}> aKxƻL WU#َgOiB}\M6am"e ߨ/XE^He/uЂvTV:J(,K<筏w-wpIEpPؕH n+{.J$ZÔ­*]"i5Ze9e7;φMrx a$++kHZJiaGSmM>&3Ed1Q$P*E᧫ﻹ6fr#:Mfb5^ua RxM{1ׄy!mo%8-qx孚S9Д~ qd*@y[(LL,/>Y[R,-s\T!& ?LWjrL$;'TGi%nw7Y=%K@h Axf(Yha^>%h?yQ۽R9W eЄ@9<Ս舥$ljC]%8x%:6ʍ@VO$>Sd}gc?mv8|;^^5/os+*Yw .@?]*AlמjȾ.9K՝`u"f62ω\[B?3K :(`zvɻ}tOgt",B݈NmӌTnMÄ!,w'aP97/ݮ?Bi^"Qrk 9Ⱦk>A8$< 1e4ۨS^)``/ & ѫ}^ f3h6tVfƨsAO3?.Z*Ҭ^lv ?IL^{wR[zJC#mgR3eX:g=ןOŚ!*{ERȠEX֝Jyx1 e ^L |o |&kP'Euh|t-y@[gbR<[h)ݫR J ~L`-v7 0/Ȕpp޵Zݧ((g6TUgav Tl҅pM!JjMNcY 'dNU>FR .Dr\w,k'Jw?b1-<I[Ǣu9: bk8\&п.-U ugبz8,x*B[MLIřM~[n6Ua8arMڙ(+w'o ۞k?HoR))\.gl,5K{1hS6Kw,M.R yTq&I$V-Su+;rbb9uFC0CÍUP,/.' >2Pa8vQ#0?d2aHT>|D!3BbU67[L6&3{PH0.LC`,O^YphWwt(zg\Mif ɂކ`0m[.d{ET;@7Dtz#ٕ@3rHFͮ#"? i JrX:Os̃k0{e63EYܻ{VcU*_C}10BRUtOhA'UtԴ0h7:ՆtsR>~k l6EJ}yR*48XTargOD~%.^Fs`BQԞD7 $qG3{s$/-e|Z9ܼ.f HKg_+odOv.9D#hӊi(8LEULU&KBd9&L,~~б5sj/߶™_ 3}H : ޻Jz=8lć_PlxbA.d2't 60s\)m(;H:Y8 M;Cn4}X[@BR n ~3S~fr!YHOLN %w1hi!`Sg !WVw Lri m+-N{;] J3&"TIٛeX>HKWnݳـD&pf->ڭ0v3&Xȁ% h+> UQ O2" Uƞixk$Wc0[yK>66à1vD ɜ9e&빲ʅR_W C?AI0s *;{PM,adKk? 4w. ^:m}4WEӬDՠ{]4 > ` 7|qv|)[1+P%!cUTmN`^1࠯`ĄZ`T(,PCRHT]o_:\I q ۞rϞgEŲQV^="ы*ts.^wѐEQe{j8T+*{w Ȕ 3@e&]N jUn⊧_قG˟6ޤ`a*©r =7:EQ vAI h4{0U 5SM\ZvwLiކw/ e:Fk +*||ƾOg}9a˙6Iڊݼ(To:΄_t6>aX2+W>vu@qJxl%7ϐfK5D";/]r b}*#)' ~A[?M|H~XKIHo8*d&iM#-gŒS S9C489M]tWn}_;ࢶ?6iC]\Aq:Ǡw,,uI"q#AH~Յ@ά3M!mALbq=pM}kD\Vcq+Q-a?1q.I8x_PaǮã)A}2(d=m6?X 5y O+CnN0qvE.`u0ta$$gܬ8e;pd^ڏLɈ7]ɃێCjߪTRV*_ȡMQXZDf7k७)-0+9tg_fN"]I?Zj-Nu=F L:ƪ:͍eD ~3 $D3A9!"ЫPJAVNऊ>AkS!abo#ڀ.L(+Uhz bNvp➿r~ cIfljR)čsZv#,وçU!Ŧ~LͮKh 康"UgޟAڛ-85ls0Vv /M5j '[[I2Zeޜ?<[ m#H`q_ `BQ@z8HgFTU-5CI͗躽MyxKq,לE/]]M}j|loHnmC6 ,>2%bC/ 剻k41mY ٧JO0BeS8fq`(w )_s@QHf*,!B!8*a\4Œke$ڟzY)C9wk?4kz(rT gL^XuQ iaW-WP]댣.<ྞZ HTra5MC%٧ɹa3%uNsT[dV~U )G`S tGS?ziI_#fKd#@Q4AV "DNs.mBBV8 t01a+WߓAa bpvJZn.4C~2|yNO(y1wŶRR  17)V=&xTD-39'?!D"~˨B}BEBrI$vWZ{!EQ +7!?z[U՜8i3֪3XaN6G}B4D8loOaMf0ܪ"@b%l ǚD3 &o 2_6V=MUzd,s% M)`]CP062Zr0}fV(zb R0W‘*7A~kʩ: M<ƪbp="¶bO9ILG$9n>wN96Oaߧ{X OfeߋDo1FJ-e[\gvID P'H=i_t ̀b!@4!d{%ِi"hS8ذmG⺠ =囂krR'$k,F @ǀ8䚜+j$Nt\0z"d ]="D ̏Gލ!=5JzU\ԾkBZ=@Z,`7fnhȢY=>UAN]S<ރV36(k3J~1!YZ :s4kJ7~z- X>|?2ӄ1uN3j;Q6O|$~xL@P M㧈.oCsxxfrՈM9k |"|=UŽ؁n'URlYQU#bz՚ѢJ05F_i1(juw4XiUrD&㓬>M{Fa ͪWo&O튈UooGF_&1ߔ>[0UH4tk|*q2 q7aFPZTR]e<r_H0Co|[āAxRWHs#T^RvB(pc߱=jq$ oVz!F39Ѵmv":EK`=fYkJTu S:8khc:G >'FSc[= #? '3b/"eFxRiKUz/cm @OXt0jŒȻ9pb7j"h -/J+MqYN B|FwMؐړALr:_z_wp,0rF:6Iw]q-;kS<,N}/x)섇]FIѨU@;;85ׂ8){z~F\1l!jv(%ITpSـw+>ejN~#h]۝}1%A9ǽ6f4eAȔ6; mH;Og>T;SMm*WR kABvئyxf3 ˲dֳx֕x"=:OWv n/߀A'R9Ie}vjOV@ !O \B~1z^ª]Pa( \XWi#(fHPOOqŖ=ˀ;T'Sϸ<:48t77n൱|3 ;~VmiKqjju?ب*>] Olϓ<^$Oצ?f#='us΄c9;\_!ZP)jQ!:!Ra]]{J1*g/ߤ3!*m㏚q6h :?)[D}V M8=:H*f">DdE >`l;M(PN PDI!-;ҝ<Ac~< b$]HͿy;lQsj޶n}@L79'3gd5ܛI.EPznc!Vj}A0`"J `O?61%_dLs2+I`f3Si-ݣS%vQd HhW˓B[?dL)HiI+K`%N5\nw&aӣWs! t&*,40k +g/#ҘzxdukXhX/;& FwxQ5y԰ (S!yS?vu (d@U7AƁ_sDp2t\"m 5cm~$A?_Uo[oI63N~lD8>i r}FnF"kRZ4_f녅,E͙NjU :mxWeOn h QG]8(j^>vUkpsxDқӅA{&TW g=Z`# m'eEYPBHz)%8wr%Ǘ T-57H摟^F!q/18DЈۀ 4髑uPtT/A7jR٠VL0A~s,fM aJp3np1-cǃLhsTyԚy`8'1O駷4POA8b2[aKEosa_E!ON%%Lv E|(*ykz_J3)FҁU#[ƽDm1{*Td:xV}XÇ{Bpvse*-Iw1,>ȝGK'i f.itr.9g2M([dq/0;т,1D&o5[7?Mj'jaQPP ˬB|ce{#*/^[xLL-R*3:cgZ#v;tllhL B0L,g܆Dn͜uo Տ_FįՒS`R{w& û|Y)wª (F6ۣ9\0{,eB@V|njTu^6v}jhV-n'|QzzH g1(dۖ0"ߚ\/K7pmbN1SilߧXaV{gNEZA8BS딢 G&{#"AtD5&(BY5B*./PG Sf7welXdoxho7qK/l ~4e(y?Njыߺ% (<!mOڽ kY /x>][$7CQA.L")%ȆG`dǨlݍ+W(X^FQqגv{ nG\\ddSe.+^EEOõ{ ^;i5v M:Dq4eԱ pk24Oִj~e8B-ݢg3r0ˤLsJh5KC'gn']j kb/Ӥ=GKܣ{kQ-:\4@M&Xk5K2ɬqhB9ѲF'̣{퍼DV<1bš]=~k9D\OmAo4%ApX]deh.=Ux̶9 9ZJ:Q^%M[X p+=A 9).  ԻpRif MoRE`ݹ]C`;% ]nI%/'LNodVx؃y3*W'w>;$Fu』# 8F E]_%j"үw&A\;u,$iz5ϢQt3\1qMY s)iP|/075iA6D9( wձ#yh._EadZ T%{57S[K~ ٮ$wkɓ%( SQ_T(J!.8+y5l(}>ױP :Dv^$оR}<,L~80̐D/hɯVvR`q ~^45ɃVT@TS{ݠܶ^̻V^HʃǗ\Jf{5r 0YU!d5?fnzӷ&ԃ (; C@R⛲0꿴BVU4X 2g!+"y @3q} ~;_)i5?ǻL]GhEo8olǷjԪbwW:?bwY_'*yOևU>W}v̵&d TFzvINq/G|:&5,$,91+T? ȶqhMjXΦ{"JA{~`Ty12 G4ߗMDmU}pZ׆?xlԯgwZ01guc=84rjOfR$J.*shƭ2rTOy09)lѴ HtpdzfFZkEDz@ȿ_|jpt|ڧ/{N{Ӻ'3n)t5HaZ%?a .tWő.nPģnIvēu< j.MAFieiS..otdㅘ1 SO"DU2Kp׾N,it-M{:=p85F@r'4I!S 9RgB1D ҕz_ 2xa^]{SLn5",S8w躕 bq-n`,.*quJAum$ sVPO 'j5%waYgD AOcBRuH1!Hl2UP[@l6SLbB&`,^^%EY piYV)NdhȌ߽\#ZU<7w$Fbv٨( /5q7gВ4w0vd_0_4 Z/ݳ-`IOaR)ixncA) ȅf +(3SGoE 0fad \h2B>ug|lK/@&,깍W; ´iRrYBz00TBXZ{RcfT">t}sŅ^EXx=|峪:%v )𽖃iL/;uMjtH_ u7p RFVX<`JëT@쌽bp@Fal9"Q\BUQB+L eOz'  Jꦇ(跞JsmO,:&2$FHƊJnUDAz׸H&j_'舡OO`Lv]B5<h+?hj?(8sDbx-ϳxDybnu4gV_!-V2z4بv~č$붌ٙ2`a}#_ּ;2ܯfrrDP*`")'"Zh«I)s95GI.1:mJQ}?K..z-zvYc\?Vi/x(8JSWĆiA ap|4TFaDm<9ֱS&% ?~L7TܫG'W^##h*;1mKXs(CpTCíUEvjx `q9'IK j⟤%J!=NB|l[gfPE'k3{v˜z?MT_b^oSO_JB4|W!!iV =p}=5cmF*)>6@ OP: ?Dp,uγĻx \Pn٣SKVUedq#KsL.gG. >8XݳpiQ;Ļ..7>\da+cUX-8(iMVQ{ЙvS~0t/}9Ҝ?@Ƞ]MuwĬ@#c@Ƣ;L}ACb|*G(t=_mx'#^.'h: v PW=QvAvQ%v]2?DNBY]VEFFIC'?_I=;~L:,AM:l*-"@<DWO,! ̦`,TwUYX^PN%v%F?qI7>]0.CEwWزg'S$cܧE@qݹ JBz ^ e*zaA33 m""Z>XzIID#&./^ñ+6v/7E6_B]䝡>f$M y,1fngʀ&QH 3| ɵot*J qzY,:\yFVe Gi4[K*lVl(+aWMqܲv9ۙfGdʘ6( N#M 붯Psh z_&F쩒쿵gQFԎzԏe)ٖJR$A& NI|Iw]B4NCˊ(7p]<;TQmn'Yy/}<[CV04LvsթxXd2.χ'xu?sO5O *!USUٯA \]:+CHkWB"M2և8ydwl[9 L%$J n=Q"eRYp$s;9J֚5G AؼtM(Igѩ"ED AD~w[YTlrc֓2:@emx𕦳4zb#˦3r1=;hlaZm{c>GL,@܀8TM7p唗2{_zL,?R!+̚F 9H($`^Z^eyn} ˃>$7,E|VHh Pf`~v ª) 'iЧOړ3D<ҌGT}q{`kሄXPq~ p?ݾZtl2VK`?:O贤O-~j ZgGUPBKQ O'~8ΜA*m='8N8΀qeK";ҩ#YI/xqPkRpTa@mrV>}|A}K_-2l3V0s\ƿA֬>aRhT~A/fxY\0Pj/ Ekh ND/ӂ B =Q'aG'=PxdوFBg`2LcH 8\ßV1C>GE 2L?Eoc? (mUbʸĜ:- TXe*ع :y~ixp#}&B_WZ1ԶEƝci\dzs,)/A@fU:HbK#RfWuz4Rlqg4 Yh+Rڦۛd8l>^5-e@K24" HWzNUWUP\__aa s9Ť7vLzZOi,ܹ\B 7i3ui?z^` bȋ&KSA V($jbWA>Bkn0QC9|yJ߸_V^.?Γk7:MtRK5ҘUt$o|fr0)C .Ʒ )jfGc#=X` ĺK_tg$(\E@_= 3Om uq ДMs-7|>7AdV9%?[ n.4m@ mlug0Ize{P.Do=.ET6RRvWZJ_;6hLS$B=.K| 2Bm3yέ ech<Kgb'y,Xe=QͬC'BISߔ~kԏ v\$5scz9 KHS_|,t<%5= MB| +yu7?8;TD&3 XNO">H,Dr[ig ׅ*]&EIY Vb=]L0wXy :@ ^%;ЖVI5xCmB4֚hhj"WdDEy<) Xg;YX7g 0R"]aZ4airVe4c >vHʀ>'2|8G 踉2I-]^췠^T\(mSD|y9o-%vY P }v/`%Akb/ۇ0 R|QKґ5=&_d%Vc)^TI'}#S~eN;WCme-4Ѷ$j7tFv3YnJ PCձ tQBܗAFI-/`\"HO+lcOYu8ԁ߄Zaa(['!L[wӗ'ڭq|KtDݜ\-y%,:czXk$ɋ;v,zBqsꪒƴGwT- ;93ђ:b>Y|Z=qq X}619f[K;mm LW67)NlR zZJBsd4g6u~# ӁʏijS̽.m_J>]hF: Ӱv`nW!).SAF(+l.* Rn1\-ZBpP4yH}NIQ\nNu!ʕVwTt\9ϿG|<=&@H@*fCQФ}-@ a;Cp,R)P1NzSU$\95N;pgP" sRx^gQ0*/q񀍑KͿՙ)?-~{8'>(l9=LעN`O`_l5A-ʫh %؛dD+]ltp ݟQ@uc BGM֖ "isߴPDX(@&gԺ*j/aͅ?cD{-rS'sf1}x2c@~m~7G//^KOC EKJ{?82 H2eG#;C8Y듄4.FB>6q0 Q[HĘfN>mM&A6ay|{G5!m~@kG+a*2쥷7zqZ5$=9iCw h!C>y%xU DͼsX?]Jti?L׽p%i8W}<zaMX1z.&]axӥ.N=zD^`V+mPBY5G)fBP·QQ2e*[xh7\:l9H\(y4/Ea˭#;KKHRiSL]itR|g_ѱB;RXJmrmb 4p)Fo k]taCϛxT]d* @8Sd%d1k-4· 3 {'ut@5gW^pQkj7DKsﲳ_&<=4˔e}QdzUt6yf'<`xS.`ubJަW~<ʉ.b09V G[~mK* 5|&cD'L]&ֽZ;v ͑!f+3-FrVڮ+ E 8ђqm_+/j!Fo6Y'P713+a0ȸ^Y()$vzܚl0lufXJ!5:R3W3Jf%XdM:JivOJ]U#.zS|F|)-}2! kv9oǪl8[qLq{c\L 3\{%FBoljv2Zn)=@B{*,*2Z=RA<]i>$3::8תgti~kYwUoXI(:TlGtZL9C#&3_o69u*H^2ih4lS 0*>bذ>,m aNm"-ysKߪ~7[m=f%X?I'TkBۦ4 "K_ɋX1lk aQ$&-m-SY^ T)~W+[||BU;"IFhJx0مgȩBM4CI*s^BGI$I_ݍ4flv;drTF._ Oж!Kch4ݾb Agy~-Q#qeGgHQW]ӿ}4Ż֝{nƋ/aRkH|"?ʗQO?K)-#[PfS{vqߧ i H-CȞHʋ@dh\vWlȬ.Ԩ& M] 'IJBEW-%ZObπ~vt88FX;JzRT_b S7]?R;-y13f%)qq Zco;FR=A<{6e,ƫzrCDkTļsN6K 6y NH8`!X-E$N0BF)ܒ4)mӎUF4.g/4γ2JQ|wÍ'$6U`q[oFP||:P:#@h ܀!pGҹ< :8]M es8ˎI 4(|Ya ٍnTFŽp("j h<6ҳ@I ̣z{sE8D*n>#vBX&*1 yUШ+#;b|H\\8=ZmZۇ€ =D|WpQg )$@XD\*'lߠ]^(r"zxځtE"*+v) YgS1+~-O6ĭAmփѣc0p]a ut+WJ=^K:_MZ'k`^e۴2b Cn9àJ:r%q1 ユ"^J&]Ïm齤sR}_( ycx 戭y*? شvNi-w} F0IO4}Ls޹b}u}Exa>0}"'ϐri!Y`կY `H6|bA5-`B X0vPiS%^*6qoˤ'S$neմ(r#~P3l/Q.q)<s?!8$y, 'NEJ ^̺Me(P>Þ7l B'7NG_b7kaebN ˽'0;<Cb7nvN Úr$ nuxW Bh-Z=KC:~6[~-&񂦿 WmJD߽%b4BV^B+晩7,=OZVt,xw?4~\QG2gN7X}*}0uxz`?^ וK!wmfHSc1j@ۋ>y^'i{zg+lJ=[+eU#cPwts+?7|8Oj (LX(OJb7d:0ҥp Qh"bH /+^(}9Fa3q4"w5& =I&o#MT}KZY$>8 ޽"|ҿBܳ[CJAes /VVxVX$='DdB&Ԡd,g9ɇg^*=.AYu_+Jm`#j):kbǟ)! O7s  B{4Jj6Q`oD\/A]I<:j9 &Hln~ !TL">JT5M fsڇi20*pp 64k|OerELC*D%Z]JT} o3. sMSˣ]*QLL@i i->vYҀܺh$~%J:MYO*lU'9u'Tq)d*M.䞊U蠕م*U y#l$2!QO?tß$ߠGtޙb5VI&@xWM?*_fqA~P " okE-nT>L hO/ %ų3V|CeǶ|ݍz &|O%U$>Bu)[3q>gj^IDs}^욯B#|5 $p߄Є[@ ZBIck||L,v~n|:]ZE`j7bBG)9MmLȋ揵7v_a~{Uvj[KWpOwA`i [~j`HV/9@eZC%B(z t-,(*KEtxM h> _WOyE[HK{Vσ22e4눳RSUN!`j}JXh[BK{x)jD~K_UK#كVLځ:uVLL=yCZHE PpӔXbEʂS  ^>׷ŔGI,Ax AG~8P1gKhü˶٭¯E`)2 17w0%fxBsax.ᎩY)l KuMWr+U{}˸_Caⲑȩ}s ~Sڲp7W,j@ˈ`U&Gv5jk~Ѵ J&{>zj-#F||;|Is[pB| \F=h$G;#0wsU 1 ޑS1xDol˻BtA*t@aXIˀ)A:FWBqpJ8V-s͉Do2Hb̏r9#t ;I)*5 .pB3Q%Mf-Cc.̶qY{e[[8*i>ofX eǵ*5x)!'Mt*B?I <]'H z "˨拊݈ˡ}>:eu/- ߴ*Kb8v0ߝ5j,:kG_+Y 1QÃ:F[2)V#gV m`)Zm 6b1SPE ա0xwL7I#IϥiʘZMPtAwwI#g+˰  :s靷 uOQ n w ٸ\xSQX-u'jke1l?xR$ZH,U1PkôE\fqto6rNHO~ vf-l\LU|U T 2[ҐHE"ҩ|LJ/L :ؓ\56RGf+~%-r E4]! iw"*s&Ɇ0 m zG|n WǍDiuǫZR1֒~k\i> M"?E\v%jrv#`_bk;VYo;DW/Z I|CfɽǦeV tJ}?Dv.ƿF #;%MC EjcB*O0A` <΂IgDZsśR6W5=`3O9+~&}axRk R| ,8(e}l#F֌{KJmL~DHTu }(Ѻ2 ÛUtvֆBiTZq.i\MfMRܢJEں:WpN_  ~Aq{+RB6=&)1 6pu*5hJ؎ə@8)/b2ȂKQ-$vwRL_>nGJs8",o<BWbקҙ.\03{X7m~LHj;h<Yvo\d:hoLV'JV6UxqclF Eں#[ L.'qqpY gF]x18,} "X/C\F45I+ ; T PmDR?s*Lϭ@02env3yjC^ϥgjP5JJ= f(]v/jc蚡3沧jdUQ*~ڳiyn F䠫a:jb(ѯ7 ,zoH?ů%o j"<),&_vuBlܢZk%3+FnE.n aȜ =oϿ3?yUg͖J*4"K2vYT۰eyqxizX.ާij(u*1)Q__nk2 51##wa5%ňYG *_\u!<-,&kl^pPb5(w>v]Mj5& vqLW"vUBd::m-F)>@d1f8ӖrUT<ġ DXozWB2yG*Cm)z^Z۠QxVhBjN M*{L:jnon&G^#Xu!1ɖ՘+=ܲ2MȽ s_m9u* Q¨g˜J!tEhל0T#Vׅm5ň&|<67 Rߤg iʁ}`2=?Aߗͫ72ūk)1B6E}s<@TI 1,s{7Kħ;!]7x4b%1{!8Mdai܉,h_8ۭݼKh |Gm"bCXSYғnf M"=1 eҺ<7gC%3`3 *J+./[ZM7{<](?J~Ȥ#+ٓ_^4 <MX U#UkB^zƞU -{&gL.HgOSЂW A9 nkO.;xd9Epͯch9K5ڭUgi-8)J!k&G$;g× YL/[i)f518.rJi*4xB[׮RTsl bDK&v,=EH -auN9!j50G͐aob*SKp+1Ë}'/ܧQ_4d5Q-FbIcsީΘ ie./YTkHo+jFGR:)>pi.PF6RtYHmtboyȖث^G7ᶖtl{de]Vz2D(مj cIvmXS]管֩J33اG Ygb־4ͿQf߼A@{Ca"'+lZ1VֺXnFX~qz#Na(ܥ &魂vX$ K(sqOdSp* [%Q om pcI%vU7(CU+u7ð_$j:37mD?ݍ3 F4*[NtX90+b]:&rACd@`%]rF۞ :F3敘tJy+0Rdvvt[ GUVQ!س254VdZDAڶVCރxF ^O$aŗ CZ쑞ja\qbD)TUHq(]i3 r:|WjxC-&o\p'(L/5JubV`=1_Ԇ%1r*Z nUfT})3/ h<ۣ= (ڌ*;+ߚ/s&I)]* #MCm[ĸk0βUߊ[7Z/^Z`H6B55xBx/SφgrjA)d`KQ=dh)ɾWX@yE(ZYkI/@GH;@Hir7b׏.:gϏ&/TO>kUުEZnk-|F ֫Ҝ&we֫ڨNnqd40qXrHXXb[aA~N>Ixך@cfo ŠnF7 <4#H2KxhFuU~@G+2UAfűnGB,IW[ͪnRW3}D<(C*s9L49o>==[t aӗ1I߽z7d 8tzǜ9Drh%t,X^u0a/SIBlsr6Y8۔0JĮߞ$?c!Y3zv;@2Fw'™eaL96Jql%PsΕ(8!hDŻPJZNpX\"QcZ1$}R9:yrǓƧ #hl)>_4)̙zc4c&9"6d1"5U,tǡ%@=>|?xɵ@a т>֥*>0Wبڊ+^h ܿΔ-^?3!d3i#.Ǚ_4Pkʘn`៸_W~޵q/D)8II¼hvp]&Иcʻ_Ҹ0{,\?H=ѠW ^:J]ALOvVBmQÍz Ҟ*cSJG5 Xp )lb^UPi 70upൢ dy?>#(bpЮ@/"j B,dL.#UW \P8@/w"+ؓFصF--;")qG\Ϸ! <::5ȟ8X"ܹ.c؁8y 6ŝ 6yNd֢mا)).{;C8|F>չ#(~c#Lcc۠v¢lQvڔ٪@ɫ Ki+th ]@ T<Pd:^s碢CDHD:+2ݳ;==¨hΠ2 \0)!1b߅"Hk;JWYö. qz?vH4Ks3J3yzd$SubKupUr[UnZɛ֙Qct$V8 >e0r$#OYoei$4H Y-RGJ%ԍiy4Q:Џ+({*=xڲH/#H5v-7J >8{Es5TVre{I]H&:S5κDk!`8ƅFywc:> uv3/fBd!)kNվ>ehfTBw:  U4ٿKӒѻ߱Y/+7 }gxNJ#,WCxYLC|rCG5Yv6Y!܄TzqYPv򢇎Ϊ/B!O+i{Ltd gJ1ÏaZE-k\es2FvFC2Vu l2hMq.T'{Uh//'p?:-U;*f)lm5jc4'&*!ec { _;SZc6m˳ϐ$UV&Qqk+`ۄ0l6Q @dosZ@ sZ}\3tzip*5egYe. eȜu뚡ڎ|6cqLBy,1ףcVf"$tſC[! J sy:8$& o؇<\ ǍiZ7JSG@<<(D('-%LQd9>0 8 mns[rQ͝FhiRSl.+!:Qf|k@nNB aYv,X7{6l$9ށUpy:HgXKż.?u; Y¥LIē;ܣj] XlP_v+U V0mڎO8 PY56ek >%eQN(u#:0vXؑ1H:7^ͯ=ۻoBf4j'8qiT1 `)3- P<(RwR Q5,u{dzV_6bJVY||2dӅ> ^B8/-*lܔ<yafD@Jn94ݚ lpљi[aE NIk¢T'&QX}S(RFq `1T_<~S#/5I7Fk)u+c7U@|$6$rWb.)\爩E\3ppw͋swa'!- à7wy,%gWK@')_C5OߢJCgw?N>a QBD=Zt鋰oY@G5S.?cS=i&F:Ǽ8|^݈k?N `h,U\]! 97` Wg wzawoq|";A&@:w9yc%^J3wVo,{sGx^-'.na|ݡx O~76}EyMcEF7})Q;c2odhn9@\~,[|clBRZ>ANfyR;(t+l#%cp kui~F#GVUcfF#էA`5@$:Z 1]QNZ lgL瘶izG?Fw2i`UBVO~n̸&.‚cf ?^_ډ wglnLcz 5O4l 0poE @yz[k$J{iw'%۶zk5JԌ+^ P]J=bZplɱE.1'ʮv&=|./, ˑR[9FG_wy=離(JUt-B[#[u氒|QΣ*3ݠul6!޷5E'%LucQ]u<+QcenuclS *QcNB#ԪӓПfcgC#@tlLjmkCNt륀Oq,  O)2[ 2ƍa/ jvbp} G蔮љCÚ:&Nu!o0@r\EERkU} e>2JGcL0f%_NNjx)81|R❨~ڲViNӺ:CoY@}@|Nږ[?.G!2+E;b4h 6-睰axmlS4#`QD歞wғ#>GvՓ~Btvƒ2@UtX!vɳ%h;vzBu^'^)^jYYP۽Ef#fU Ƃq!ـEI{{I3Zm ak-=rU#i1O?rIugg}YMoX?@(4O@h3jE"|)$ raWjIc9L3.Pn!KxL kȧ9n [@ٰ`XVX@6H&N# Z}!yYl%MDNbge|+nA0Sg?RSM%+)h C&*zMq~,~m Bn|ՖC$P2:<)~4IV}(L9Mi gcc $J!%I]|GXbM>n!nmN9MF6!$Ene@s^L,fάQ{4 <6_H' B&"D[ݍse\jU:A͎f | (g-A~?S01gl%*|B~qPYǪRCg] W<6 6oCCK* To`c|RaHl,/?^;w+IہpVTow6#R{D:qOc6vMV_tEFNijTMmV{)d|Hor+0UyƯisבCZH{$@<ys L}M~pg/0_ݨLe $} sZRY 7aIBujîm1i!@Yx?®J‰ꝩQz$YWVZ:X`vxLb ^m R( l2Zlb"3Ym_h6'ጦ,% ճjԊE+†`8\8b36ik$O4ϗpZւ;˲jdsz|U2_wZ=yQ >6@sWw4djzx6E#kA &0!:F,{\)l%oV-Z.(zFRXf9O қ >dfF<~zW[/pt/k0mS‹0;av+\h!#d%֫H(%?wje 0#"\o?!&]rP$*[ `ˇX3߻h؆5n&:;}[ڲn)u LJ3hA>Hy_J̑W_C.BlCM e3&ˮM'\ܑwxf(p.L'S<Ou@WB$V{m@= x~7q>I'9ٖuHlkPjП 1|X9+uɒ"E M%5._R^XעUz! zLXZ$$DiKo$^ppt<DADsFH́0D( ^2Yf8̀ʓRmX(>ޒnSY@)dEN,'$b#7mg hL>t+g?9*38e ,k_KMOt(̳7P*ӧ^} 9!^F!?)O #m~Cbςbٕ#`:nf,o@<|LG uH}H7hn4MG:(C{ki,}A*R'#r7^%yY\ƴ1^cuú62lڱx<5 "_ҩ“{n"%H$uZ6;~1tC0`_@rdA"R|G8Ԓfӗc40_JX)oJ"$b-$<]7S8 5`1Tҩd1~"]Dz 98,QH|=@.4 3T@Q,wZ+)j `:Nf]fS ۚn8/w\6U;=$`jQpYwZqS)nohk`^d`u9ԿƁuN/:al_}ܘڟ+Fa NkH 5d|7C`I ̕AVlEJ fq$*D&8$k|d.o;]3hU~"{0ʏ܈K~{D0tC{H~~E~45O%\e{=i82ݿظU _qT:ISd#\)PF>ݠ3i}+=!>Jjb/xD´sp4Y%.g4v(ZXXbS/P*L财c yhhy{T@2w \. nދlj9^ɠ:<OE9nQ`779rsr2ii~`d&  pG+uJ  ?=ܭI\^?q ֺd.H[IJE aUّC:aK_rn{|QWǯV E,զ:R${,1ߡqs)pGhhȄFH?s5»J>ښNCGy<]IbguuLRJ(.da6җ=%Yl |[j3mﭣk oyTXm<30nK'YcTEٟjh{gt{oL9@NJԋhܑ!QxewF -".frao٬b s6/漕90CTaw3.}5D|{jZ Ɉn?z}0%Mq[o2 _:p~{V`==?mrQQ3 v(J%Gn}g{c/MjcSa <Ǵ$mˉN,Бc=f46FB'c걭ED~F*- ثǐB6I4 Bjև8n=}m\3"H L.'\tՖ]ӗƿI;s( %ӅCm(rxf4ٶjQzK#&S@Njes#lSר, 0r?wonz bϙXlKy+{(|eʫQBИOuԺ.K7i0g#3V ɢnA Srq{éwz&uB0mHc@;ɢk}pCsX/(61-T)2Ʋ6bDzzoQNےz+=31Q;Y -|1- LB}͙ >Ou/0&kwGs/3&GɭWQ~PJ .Qu^9B}ˊVk%YkFV]-~sSJsguRPW 7@rVqW XI"M{|,MBRRK @VZ'NUI<)ި UtZ?~;=h'-DFOVLMl@վ{53̘nx0V)Q^-zbXٞ814:aP$P?> ^:?ڗ.hsb\qw6 2!w pNkiI@4o udoh54뗨a6ZC:8I ]jC4EX.nVUGa}$ZkjL.#2lcy$ jwfAd\`s'Ԅ|ɳgv-eE%KĊ+G \Ϧ/29>}zT$<>sZc!P"7XR@ !Ւ+b.أxbX]-/SGTaKܢ8W朸ȢžU!-4{gV弁{0%n8 YBLo{K Ȫ]Bl+ڼ5t #Z⭪682x Jk#h=5O@eImd˔:l{H@xoue|wN΀L񫚍.vR?|X;ĺSbKā{]Z̧>KR&}|0E*+-%Ж\~ VMh-.(O&l0✫ NV&]H|sA\6z 䆘Ia[E:ϯr(ݢ*16F_ gNV%&r <2ٚ-lA/"X`$Kg~H9a^!wn~2]SLLj'8BI2pph(@>{Ä3t왐&92ӕ7ivz (9:@)Hl¯rrQPW^XvK9uRgw<iȥK+2 u14H'"{sYH^RQHi_ˬM_jR87&]zˊT'-`Lh&wC` ;ۗ0Lw"1ef۟$7ǮL#9):$&gFb ݻ bZK?vTEZ"g$O";f B $ΗCub/S|9=}u4 FB*n4tKg,N4LzVla0rnQg \keGfK 9ԝI i6sCr'՚7BE#_Q8VŅd(Βmx&CI39Cn6ښ:V09|Nn:v1zUkcgIˋ54,USoGFq%#um{&]ijF9o2%w0w쫲;zHa4-+N2Olfu4+AcIK;=&@bh33R~#1}1..d".NȠucpT C~HT㽰ȍh)+h+$TkP#^ OwEcN([\?x4;4Kco|fP3x~u/j0A/||lE׏.A30V*偍#}{6`cmjS~t(m3&( 7HGA@DڻTA1pV8.̈́oSt$&oހ yk^<Ɔy)Ș4 $Q٨֨ JRufᖄۅkuR^ΫN@=ǯ =P:838P@OS\pw2Dak.V塒R#?4 KxhAX}ݡh{7檧M j䰰* |VoQ3!42{VQKfohQ]G޼CO&+rmkP;7\,&r31:Ʉ*dO E[95if-d'(%:62 0~VZrد /"٨nm| 3UH|TX61EX d%GM՝s}"eH w%)ilPB>3+|ohwXJsEjQI-{S/reOKmGvy_E@$FO^6R\/FeH_4u-P< GZ2^ݿL~ī M2C̼œ{j;Ͻ fh2j6cПph+޼NSW"M[K1mR"70 >Fpm_-T3'Io`@\-Wj4(iclatZPF~\-ll .'}m)Za-#* q2'*f[M7(R8C-ClA$x-|5'Uϯjo$a kً#}?Dg?*01Zඤ<6ϙ7:×4d4\SC>-]I ; Ƌ;m5)MQ ;/3Umkp3 s_|В7tEQf.n ޲"T5,'5!IumV>ؚ`l Lj0|V?FW1T ~Ke6A,H4_ ՌaK-OWXj:=caޝo=UsQi URl#f]SeZ[<8P#P j2БN#\ #K;S$gL,uuqM 2^ qGuyfv='tXЇnBA4lk'&R' _f5=jP1x(.q]GhD6 eݮ|גZ&I{,̒ZG-7g`0WO@ p#=pȘfN"wj/ߝԬue-n$tI<0IHHG;fVreZdWo6%9;RVtKf+r&-X`w鴮ꀵc5rom_GdxÓ gvl%7,._Bz A˩D}ᘕqO Ȋ"t⢥UC~2㫓Q\1i2p.dy,y &̮ i`&3_qľt4ɮ=wΊE&^VqQaaBd2w"Bn-ь}?Pq.D .\ճ_׍sb+p$9IDp%_`ǃ>VDb; ?BG;+l(_]f:`شSE룘;FOͦ6Ys*դ; q 〞 J]Z} S6#_a詖ԓfpܪ]A4$sjvTtoh)O!]$lX~(%l)N%5zMXv{ J-IC'wLUHug #.iů>ȧ:Ǚl9b Nu`}Hn- 1nIPɗiIRyʙ'3+f;ίάnDb\Vl`t!#Aaw(xgW;a i2&2=L|4сJyd M< ɇgA/Fq$bD$+}ia.@j-\c!GX*r4Vy|g^mfJ𒠉<G/-0%h:f8yD9`:\\XL/n,)DSa=0,Xc]$?s~!uQhV&x)MC5:4c<6f5A3sIgX.ٗ`&aNN?9}ȿߖ|~%널Jd9Aa**9WWwvҏ,%0+9Ha؝CW`UJOZ]Vȩt{N1NtɺWk-:7k۠LUm] |5?c?q2RvdeiDVj]B|ǵVN7!=z ي4ۗBeri$&+f4uDH*_`Km}E;E5ء Cx7=UO:Fqf`\2!5f/Ƞ*GRecF1H9|u8fpp;VeZ*!1KyU.Yߘ+[cq~q.h\JaFku.9u 1\b^pIlF)D̼~ O*'dz'>U+dB yib$s[G<m1E]h ύfeNЏ7v%"(:Xb0OV;=Iv?5ool(`Ƒq?ј*].pin+sHeM c ťh?%$Q.Lڼ=aSiBRWiYbCZW:;LZ$֓CbL:a' եJJ Z8?QF4o{6(b}SWbhnN#æD̿_{`\x/Zx;|Og؈ZJFĂ7Cx(t/KW7Ky[v/'|by0& q->-j0)9빒IjX6a1V̼c-t,zV3DUKE [~X#5| 'rhܮrTxoŦ:E{^3t-AJÓL:P)X-$ \4kGs~~袸ؓ ]$Όugg>HDHSjxm^2>ӺPC@! ɴpf>X:La~V [?Ź$_zͧ Zy# m(Ɉ/׮BG6N5#<Ļ=+ ʻ∌cnf}43ZpY L^4y@TWO 7VhJKPb J4xph۞VԬMBA|4ό1MPs# 8I;VɊxq? 2"z&a# uw  xh M{|}\`<'>V k~›^VV!-pf0p*j?XNlXxӀvRp91׈e;RPҬ?p"q'S!Ph~j-*@C03++\JmIT rgAex*CǽrW6ͺOͦ"Rj-I3mlA5[Nݕc Hv qK ַmaGrJ0AF &`! bzO猜"^"O6Vuv09%q {_e4g%qYhP)U*"n9 _L+6իKF 3'/;K M^>k={ۭdH*hp zP"\ojcy:;F+>: 5;7z%Exߦ0V*L~yؗfN,1@03^bTc+yjq4/>д7a)-;vhM\68H ؚ qCiaT &4fP /b}D<~>߀WfZOYQO޽itLxY76l^a95qK]eqzϼYowr'|TM^nM p:#LF?-j11-N鷽͠Ǯ'5Q$%1|%&RȒЅ;pU0$ZVFD錣s^S%EvS8@ #N7tsqhFOROP+t @E "#L=36rC'Y3q"wε;zn'y v%AP"*P:$܌~tq<_DS2Fm8M/fq><-+۷A!I}>*S㉼]mL[=pa|8xmTck]HKN8@7 19,,DT\\.XJ)]<0]?bVZN"vfU j֣OuYҮ.:Dh߿:EMZ]c9ڢ n-%WP2!w'ya{ŨUzKIZ$eNie'=tpW#F{2Ә\QTpmejGW<7lbIJű-N#WN;Vk`j#mn~cQ2;T y|.{P(_x#ZGR5F0wMԙD.s²l\x/`M#'bY^MC^!$NZP +]8a &%!9ia-XUj&~3Tg9(OKw~“:tR5NFkyEYe. ,F3.''QcEG ۜ+b{X1^J7w C}| LiE|P u,leˮ$"1m#W(i-U{E: %@N inF$گ2o?.gV(gfN4b?"MpTkӈ_>0C-HEUA^nYc n{۸iYFv`l^l~ū5NeRsNKd3~g?+QדtWڻB>'@azsuD1k)ev{1]$ȾՆZJ\R0*OGJPv?bS'n8+읒&4dGa1_ 6%{0gdGFa~ W7~F/a߿a yecx;j9T^feNƒ8eAF w,nNm ő\isPNa-DKcuh.7$RTe{c%ӽT \?0N%e:Q&f-y>4'n}7i5Qw.p,t\~_wD ߬}N ̘ MIF2ЦT EiL9pu[(m7iP,J!ۿŜP,f|k'5fґ}X,iBR907ŊTg ?Ǿu-h1k`8zbέ5fuIị lJ>ML34t:j7".NݻzŰs/Nwr$:㯴 1Fq1nd}`"A j05^oE))sn/H$?ϒpZg*-56<"HTc(Û (TءֳiFA42A8!ɓpﴁ\V zr"b\'P⃌|#ڦ-6or>Tm7+XXE` T Ƃa>*] QV?"Cc #1И,RڶKeRp2i9J3l6c;JVLK)[ T4kVdz غu&`gasZvqGB8*}$HlM~DRCqnp!oiY9Z$94[c9t ?*tn,pKnHM֪V\mcE'Kn3OV΂kT/f@-^5JY!4_jq_&Q;"*U۔J9ڙ!S? !^#ohQCNcMqMyR,i(4JʻR͝b>f50,ZA3=y\:þA>hŒvR٧EdU mfCHwb"B~Ѡ7e:&;hgsWiq}^NJU8DZYNFɄɹ1y;Xjq>cԌzr~si @kGO(x(A]{,~Yc"`.^no+UNaT ]rhf %S% J1 %,UB>-8F"O|v>u} 3}\R)gwP\%nj <@^ly[F?Dv;,yLJB$ͷ*;`?rɎUV@?s:])%υ/ S*G-δltc"a\B4fV꣬P9jJ&fPH3Qp+SïYӂ & &ldId0R/Ȓ2\x| Y^h-GHƸMA_Q{뿛 $1Vo|ef%3p?x6gXl߃@ґ3u4VGYoԶgػlը3b./ƛw݇nTY.eؾ Cf`_H[I^FmvQD$W7Zq&;fNB23g5:W)ٷhwpms&?w@kG 5wVi7+&!pzBuB/rG/kȨSuWnGsѝAOR-׃FyEueyQ O"621s X l8ܜ5ELld ?':ܤZ@"O'u, +`%iM0Eiyv}%銛'ByID Yuӑ8ǐ-¼ ȷHnA}an;5CcTg^<0vrZyf|>x2"WEzz_reRa@8jdr(a];D#wM݁  !/0ˮk$5;{#Y3m3)(yy gh {%X&qۇ௧DԔh\`YTd1y>Vk+hh?&El%'9B{\ ՌؠuHlC m\=#V{GixdJ*LADQ1MoU!%wهId!9thxH | cԖ˅~r}tRNm{#Be iM @zHUJI@36Y~{JCI@ugPaw銊MyGUɑ w|0QC˝\VFx$=DX)zv)q*'olڪćC(8N .md&!yw_&uyxd%kltu̇kdCY!j'Iо% r{SYDRт#Y' 9>xVCQI2@SPJW]#udRdá):==k~r)(2'+z̷ׇ`}W|%uA;>f?ia^:'L軧DiN~%@]Wq}SI892*n=6\ݟd{ܲMZ߃qZJ4~3lCI$.Ha e9ڋIPj'hJ҅~[y{ ֻ&ɡIU Xz tٽ6|tC1:oidJZ@H R-{kl'2-86 K oV#=l3"t_mBˏ J@UQjn㯣wSUsӽxXs@>i'&Fji _}4?Oaٵ> G"n~i.%Զi.Iq.Oz^ܗ&fc.~ZÆ_ݴA #R:|+^}Ŧ z&s &#WKCᚲ,Y_㱧luF8ypب}౲q=2*pwg"*XOd-O !v4TI@PAg~%H)'>Km8j9}鈙5~iW%/ |I'fۏJ]|T R*B3PR~_L=I^-*h &+JY g5vD_#݆{?<0e1#aPyw{/.bVzP׵_ƃɀШC y7/:؞ 7pi.s1""< , i·y ,eTX whU\5DN2Sz,CఏUW3{w=?Nn"*aZFgCfP)vFUCP#k=țEEM3a8vGF[Ѱ0*Q !eO D !Hփh "s {8 {GG=S/%9_̊u=&Kر[s28VT^yih9zLp7fgF_1g+mVG/O 5G";LYbjBKfgPk_xES_[d9[e_-Ad#JҖ нX(ūHfÛcU{~-=` 29XdF~J'=S,?f2 f&b ??Dxg}QCbO Qhڌ-vS)0_׺Bʹ]UA 1l |y^ryW1/ZVN<|72{ ηdr. LП7wAL.! iy]N~"ɹ5wێTS~`e"ɼ7U6{TuXK{Vz f8I~XMIIR~If_Y9`KH40] -/.y=n,WI}ZBgVjS?b7VL`m'Isv!R5I÷_gAtßД5:JC>^şQYŸL8+OiI6>4ǦCh˭&b'Qe\!9bIM]8;ac82 dV(uG@ЊyE:U6źoRӘI uq]RY*Q&$HfI`"LJ09H@6knjv`14;m&"'Ҽ-$_Ƙ,K!QaGS t7f!yT8{չ]s)’w_ ?`8'o:[xVE@GYE wgei(sJG-jMMgN-qp=%jF({k eo*:ʼnԷx\DBn뢒̚'Q%pt'P6qgx88Ju-D?E$K1M%s ҅"G lׇ&VcݽJ`#'N MBss@sSvP_!YfODc^On\lD?{9"L4y[ +YG](`B4n H;5/*0$vK H.V/Rk} iwȓ v^K|ڵk(:w{nd(_Q+l.؜@,\k:a X;n[̤aZ5g/jQW̵P>㥍%Az37(8C,rF!~5AmTBc5eHFncRQK؀=dXHEi Ajżg{ ~X6 <ǃ7oy,o#%E:O{)@ydGqeۍV|b51Q!7V -ugH'pas#=koYsA[:ㇹ|g"9 F \Z֚w 64=4 * ALdE(P>F$%<)d2~ >o2N'qufeR-Z"Vƒж*Z?FI(&wm9+ϧxW8"-Ŝ,c}-wn\'۞8QT u wArb& 12_)ڟ!~>=si@ mfw?B`- 1^6^lݿB% Wz|֍$ C7\xB?g&rqw $i9fVO%3JiMΠ,;T׿;n 2ҏ|UN҇kެ3n`f.w [뱠E;nôȕ6[=`ćiY^o^67)6Syܷx@CI*;M*&lU`rq E k@`5~21SҭC夘&C;JB/AV术V~L{ Ybo؈?8/%beu<[haH6/sc㵮zg4`0NA|C43C=J|hr*OZM;DjE1ODK:RU$%^ʍ.m V?")##/%]4I־0V[,LVM=%ڜ4-zy(@Tg6 ͣJg"6lᅯD` 6rȲU܏|,uJèszQQ@A>cC6-MҰE2EJ-X7tgVb S}m=`L|KEUJˋ!)z~)[oN Ƞ΀hI <])ɋ&hyv9cPIRdZu`B<\N_"d%yZ'LS+l\G/k.Ro9\~S*.lPN,A H ʝQ/7pͼ(r[8/ ;]4WPVz(Rr}Ȃһ=>02hEU sUŃi.lš2Uz3IUZΒQ6Ӏ1U&i,n56OCV`ALxZH Fi{s1)yB:sH- ˆ7xhw.{h%C,X|+)ͱk/vAAXp=zx f<u.^~ J?KuFN>6$3UNθiSrv66)ܣO52*9߸XM,ANWm~%̹l 4NrWں) sliv!ߏZ1҃PQ_Yigt@AٻJ,$5 յc ې$Z--*UZ{1 @ be]l⟳"y[!r__J>~&LNaק7F|ܣ k+]6o7˵]Qg7S7Q3`_zCRuP-k"8,)D )b|>bX/XX7FJ'tGLr 5gK yΌRj<]5>O/bnd)ad+QCC@JO6'eT|`4<(Y&L#>ﭔ{΅@΢*vSGlG~\5C~Wln4OϾ ܮ>Jn/}wxrmH1 ?t\4z$\y]Q ó#af̩ia/inee` $oރ+6%K6Q2NAr PB$d*Trs8 w?WAPb5}^Sj\6Vζ',H ՛2. ?і(#n[V 'w06-t<\KiFpfu'!>=C.7>+b)]X%?^fÄ q2vsr_f(q >g)JB֠#a59Q(q1E<)"+ <;wVkQ.$jebJed5 ;g7ȯv*()p@.3oBU*g>Ϩ'BGHCsvJCXqdWiZ긋T=45*ɺ86YD\brYs,K.&8Lgy!u/D< be1a:O|Äwڏˬ߳M 6S Pr6OϦ'X /p%5;FpCz}z&@YY6~/A-_8,~EXBJ328t eׇa7쓼>9A?W˳`Jnj?p!8U񎼕7'~nO޹mcvp_6@ ܩ Rz ԗ$}Q^H]} [*sJߑC%0 23yc.^79:d31?LE *a¤O1}&f:g ۜ4mOUijaO+* 6J؉A* ={ Kl!"۱JSնGHRl9͊V2ַi{H3WϮHLmM'xoBj@?]M]X%7A1ZW CW@[gdѕfsQ9ez TGƎ&F=Np̘H_:%r . wPR}6x@q[X{d!ojQnzQn"DyWp<ĥ:>\DoQ#6h~Ka iZo6]}w)t9”IMr_^ qZSb;C<:[Io{]Gx phWi(7A*&7I5Z8eUFZ~j%cÜIOl b[KẂMvr*B9POGIذſ^ a<2͢?q gG?=FC䋐ɡ%jZp$3fD:k'YQDJ=/Љvs^Ut).&Yb|^b*a >t>cNFNrX:21W%L|p S.!VCBK̪@ePXX(%퍸! cPA a=[g1WHb"Ec0CZ^AJ% `BaϋrKx]Vs}}G2_w4;79VMPav*Lh+ޔ;.;B%En1PERV4lx|O8a-0 1kY߃:Ud_uEk% ܷjųkhs6Ñ- *=ÁQ?i5 k{|ӐFފko4, gXxYk Gd++o[kgYZ:00:Sգ!q.c:a+OZ ǚ`]'4eKLc|S[(n) (Srj_1CĂQ6C|ZU[3RC'@<2/`PD$~ څ-KQNdAr>;eO oRiB*ձc%,>^cd`Qfau~'OS+Ʋ&C& _+4&ב4w! ͬ j |X_5M}. 7j][ G];+؅?"r5063nj߉\6'ɚ euH=ݽ|y8m}?߀ q)?oـ`.*o_WYF?by/  >>8 ^032إ,"^s>;/z+GfZ}Dfz mE"Ik,!.n&ՠr:Y3@V̖HtB*u=M0FBLy ϸq1GL7[r ;:i3G^>x$wxt*[iI^X>wb.Z߶IMOoB4ZSPb6qEI{t262^§_(YWF%|.u~-i?By !ﰨ -+GP\ j*S7r:`!qjru{ڿ3A5YբUb/tjEZ$ܤEee? {T|`[#Щ?s,/x%z~i<]@9J;,Z"*#6E±&P&``U(Ý<ԂSfr($Ÿ}{LYu 9.7O`͕CI2ګWfl xOn4`QT=|6Og]r 1>|ɕE5rP k<(?PA,++m;#-e;;XwR͌ȿ٥C+S5LZKH.xT̎WCGRkô׺^^:23<>zhF2"^&D{pT3\0 ̯QQnaFIEƌ8J'o|ɜ(hv@>BYJUzA_{ߖ@b).,6Co>U;sIŋ}b\v;y*+9~;m)qxe)hee];W$].OӂieWVrs_0/Fx#-AMI=ͫW 6ᩉvMmoMәwC-:h̪ zצFkAC<"vri'$ZxE6\i$Ꞝm}Qg/,MsH@q<5%aP3^[/D .7'maysLSͬ<2`υ푄CdJM|OĤ820򌯇qԜyz,pmRxy4ۨB 8@Nm_)Dk* YedaN-Be`f9 T$N޽mAOʧ!q" 6QfW Bi@[0bd­ʁ@<{xt'LL0n-ny[%>Y|9"e +S|rʶtf+x 4k_}Q3k>e9Z'왠e9 ˗0VFrPBZe{u%٠%efٖ$̑3Hz`{激&s N(uKM͑ S5R09F,s?S!qkʍb\CCѭqRTdM_S7-O}[CutyT  D /yxQX3zpi w4,1ۏNSN8q̨T&#dE9wJϣb>q*!roR&@oE.r5y"W}J<=jn/rdXnO*K?dnF0ջbnt vIN|Pw,i(1%y±=H;x89~RW6q~T;-"&F?]B BI\w4/ӐZ3NElP Gag}jьP7T K*<*?Rg=^+I~T7I8hbܱnSmk͔\׭?#)]g@.pIWBDTw/|yCs˿]ճT\3VZ~`o#X;BMnl.\!-zNCI_3ZoޢX(XDTď /IQ/lL[Ӓ$8+Dy6#c8z֗vz \70M>bHU聿F߲pDaPSvμ~ۍ"Ofq^7Ls ;[j\d#h I(},BQxI$>WԠmk<̘c/v;iu]o:>2`WDb(Yj]^׫X2{ٞݤ  Q~{  ?9by.ģ4R=|цfTZ$S RYzLY)Zʎ{#)_DLSUPU\ -b~ѐz+ޖ*dDܬKJ- 6QC5}8 :Z]|#u$ڪo$ҳʘݸ dV}"%TFMj9WqN'x~#aoYX6])"L 17-n`pyߺ͊ : JZʊ UÛ$.njlTTx.vu:jvo I WSPN41f]ũJb^a< PTA7w>|7&O5t4ʡ$q/u7B=bR ~qM6w/;?OD<| yi6En~}\?Ŀ{G ɞ!tv.xbx.k>ڃdYcԉ33i'8kWbUG(w` 6W.c'5KnA$aWuwE'N=g2F,ŵ>tpwm=\g 3hX:vI&y qF;q:(N3B6ic Oy2=)ˢ+zaaUqȎk [>}:C޼Rߤ8'x f!1#P2ђ΂hߢ쒃.7l`bj_q)-oH#ΜX8@PJ: A:=:")j2`1p?'B%*71@[ĎHy !I2ua skFROEs-BhF=ble]p69`*k`5F6bX%'8=Q.F梨JۨVI˩OcM7kұPƑ-M*3gǞHo߷kYtVø mP2&t<*mT.L$ۡ:ؽ -~'ٳ P|eϲOu[֜6j׿6|ӢL'?TGXnFkoG~j 4ϖۓfxmG.VR/!n%ջcrlȕw?K]򞵜ų=8f)ToKhE~^~Cg[+r$- R-K U#Y\G@)Jwm&(Hۛ tqEKA"N1@>? ^ ,jp<1*,^aՓ[?v*pNX eK} %o.AfIB?jn(ً@]8t͈yejϞaM~!{J7&p}֐ljzBjK\&%&X=ox_;L*n)#+Lm BBZ[uCH vP̰cRlE+a][+lΪr`o- fLJ C# )q|/e+V(n|.^hc,cAsA$JzlVr ~P_LՌ~% |O,z y,<C)̫+\ wj2r%$=h@OnMh6`.JIk:d!5]S ĺyHnjBO}P^{( %8?A F,עai P|E'}0͑ 3XmUގf0?++JI>'}zE\`mF5cIXkZ^c$`372P|=7P#)B9mO"xٺaFoGX GW!KJNt]C1&Sp;c!р#80 Sav\d1(4?VIRp]`Ĕ0?Pya1GO<Lcg,⹅&Zi#}IsaUd1A6 S>"r3 ķ$`@HxD#u9t)7w+AVR7#aE1뵅?gp,%rUyRb`TE^$,J/CK)6>nL?wYx;0%W|wLMC .gc`7 DI8?V=h;n)o5D`Q>kJCKFʬn uD~%M+k$I<S&7U1qiXZbɱ6rLo44#_=GH"N.n7Dj =訋55Yy9+"_cyV6JJI uӛV~34Yo5GsŶ)KyZ*K;9j]V ʜTAneRNrQ撯U7Q1?DMV9 #7"l=w(Mhz)C$< 7i#)"rϣQ% yi?|Be#4 xlNcBU>C|3:Fe[ӜڥvN ObNssziݬ.@;kTRԙ88p^#^˘6Q9EwЖm\T eG:ӑ@w40A_ÛJ3/杠#l.u/e%:YB} -X_?OVIPdmqLz!|F&8KMyL;lv6}a/VU@yN9}`-e/(|L׏vrZL>.(vKdӭc>^ ڷqWj"^[b'٫Iw7G_T(}2f1E:¶`7cP:|?i,>2Fpeټ^H.U2sM3pSߞg/,Uurfqw?yJt:~~/#g8# LPeptp2ԚWuC`iB^q"o( '$SPm+hތj MנUm׉w6LRR[m! y5t4;|k{zPےٍo& Ԡ?rA\R(-}۫%q#6\>(mjbက9Z;9ng# ;˛A$]f{=&S$yQ? 48'+ު^AR;,hk97^IK|2z#1:f樂9HO' G_kjTҪ]?| hƏ#z)&?l<8se1! @]E\x H4+~]0 |BkFq5<`GyuBRͧ\y4(~+5Q/L6U.2ӽPZn4ai}NQ<~h{Q)3c4J)wyLSq)Xa H&uC̗:T>;0j++7gtZ.LaL C4y_IՕ'q<h݃bfE K7* pXZFnjvKC&ѭ9C@aw"8BW'A.gi5׶sh ͡s' ^mQW3t×*L7UN4x]Y@'Z8ϕ1~zy"H@v~]0p̆PR'(nF} /ZW(bDIhc6k93VPA&/Fm?0۸{d|~' m\ +g){p'Q4-L/\K*PyjZf$7b6. (`QY@5ƕq6Ch\5 \$[}蠬vQqR|%M%ՇiMtCPSwG \$&fTۻ0‡q>զMnW'$.CmJ|,<+,2 )#t>v%&}ZqM,f̤c:rFrJ@aHqII=ӷ 5aR6u|USR]Fs m>) _pcpIՔJ)95DžOePz)H/w,m$FWue)^;nKTvq'{7ڥPMC`B4fW}nu\3mDY1ɬdRehS%= )-kXNi$wS[F'ۯ2$RpM#(4 npB“"hk"ڬK~B_}d:yW]iC=M(>TP0$~nܪgʊ-0\wR(GIs; 6=z?~Q2]<"nP$XYA_B)Ly?ht,xhry!Gv/9P'+3hu]rqq,a 30T7ChT αKL:c=ðy]~Nj)XsJ4s he#54'›d>,ZyIﲩN.U"TuxAXgp?eOx b!l#ZnrE|>rv! qq>3~R/K CW^UXS${xtdG ­.{dm%-5 \guI d'i9e8lPuS2&B|qXUtrvD3o\XQtLTd{}CbNw^7GٌPa˯\DM+)."0;(hUEK3F(~cŔļwYFHYniAn NF7 28׮ЙFb}\oB}ui֩XOF)zZ+VM\fF1t gjE/{ݕG൛\=X!<"p]jy1e)񇻋RE]n~4 GݩjH|sҮ?]!lvQ$Bb? 7PZTSb4ieex)#xnԿGnR: D&˽S8e yI< v̐23R)6Lʴ8 ?BqqSk'E8[,|UskyGF",aa7dT m<*YK9W? 5xc!"UEiaAa (}EQYKU 0r/ZM(>Dn4Qaxۖuq|n˓6T^9#g,}ȤHQsVsxp6vmI/SOƐ(NSVf0O}-R, I'rlHm!;}/SbS(Wi,r#3z&Gԗ=g,.m,tIA9VwB5^Ri;+H&N-򎄄D}T 4 UwNBKbA΃ K-gR@Kf՛r5d G/6k"fT=Zalɡ @^OKp?n!KrYR3]~΂-بx䀖SO7,"v bVEW- k^ r+FR?Zv8e[CjXC|"[<&䂗j?j4G+kKi/?6reuJӮ:^|aMi&R|:Y1JqL08Es&vB_vM{M8J :.{6Ӫ_<3fgB/=>+Qn̰Q[EC[=burEVvDޏa6Qຆ) /9* $  >uTdO|{PdsJ nAxp,]phO[еה)PCpwrOB>d|ZeGV ѵl ^ևgtT-BmU[ 7.yol@9 SVW93 Ry$r_r~nk-+G~#)^ۋ0Fՙm2kj,FD G4hɂ#`P{p0YZ{)c?FU,XD{Zzԝhá tï#T`z2D]xb\pp#HAر'%+.ˎ*4+u!v8}n|=`ϩsgs&BkV$> 0j0ޛ3a(E3|^n5QC y !V>mw{d1az 1Rl]dˏf_kN 6Z}~я `v2@ kn Y(hb"OX>څ> |Q$c*ʐ` .JqJcj/3prxlhY}PG* ت-6 (r_i 䁷dL0s 5i|WI8BnDj2j :`ɺ kZʾk;hwżM'P;s٢&IVӴlU'Xvʱ&vcUL^tr6f^t?D|mRpS},?_RW5#&e67)۰6ar >K:Ă1%t#*}w##އ=W\Xc7'yle?)w5?9?xF\ 2\Kapԁ rOy X|ROT9lz]u Q)0 Li L>l01{vKcq r ʔr΅Ӓo( ZƗnL{8?doPԉuYǩYh)W)|MܮaTQ2_G~॒c{os [?J>YJ-Rɂ8%K9VY4?ry9փQ"`X6a#\q̊J}V`Г=5i%8o?Aa *0K OG؞6[> N;) Mc#bݕזG6X&X!3Cb%ب( $kmU* ؀oZ|[ !b>=\S[bޒKȵݽse&_E`iw4TɝC$VOdƼ]VY4|Li>ߨ?aG9k]6't):  }@?SNrThǛյDq~˂HJӰyG"D{\-:^e&y֊I{%g='՟n%ޮU!3AeVISmxDn6&""BE/WGgkR -f/Ye耘l4wfHĈ`]pJ HqX {(q(/X習NF݆,~[  %<~seJ3"7%ՂYa_=徖͌Pٷ %HGd^mrAHY@ if)8 .7 nP{֒,7Y!".Mmy<:B ںUб/+TKL]JҚFR_R-2XbydDMh  [w!. {v4/Ⱥ:WQt8IzU&pTt*S }kܷp ^?jJ#1mTBφ,ks{5_/%~D.먺 P$cϬkv`kg<+Av\amcr <7J:3ͬ|ګ# M+4 &,6hO+] $o _ͶCE9ZP K5g ,OAC@0gC[nc z%!jnLo+@38ČLu>AEGhʚXg  D?eE40kҸN3Rv[ ʺ&(tCX㰍=z;ud @ QAQ'~w3dO#_v0qyyƨv7B?cTm[/f1i*}xlzkHXef1)QO@8ؐm^ǒk:>%.W<4ZlEjV Jb͙'ڍōSVV:3H{UNS:O:rE2Il(vJߺ@"쓢Ķ玝j>}=%JBz5H3q.W3JvG:Ue%*x &~&[t˞-1{`'K)݆1=&H( Xx3`?.iN (E"O^,ˣ'7:f!d 5s @0ʞe5Rr] 3n+TT25-7/?#)ENm `&][=RqPj,;9V7Y9-h,O6S/h?t 26Ġ.qeIK殺A&0N r*ב± ^`c&Lz_0x: ^I}K*f ODeY J]ȵĠP{>Hܗ͎#궿%L{ռƚ5 Jz4as|hn=;wFDsm)NO}T'y.d01`37,a@\d;7;U(;gdPevό@Ek$uĝD"vx9[^NL.A #d\&`nsL*yŚG̳N5M<E6J tOa$ܲ~ABU?\#p'$͇(L] @O ò5!NBz8AXymXٌ<ԟHʇw[岲C(h5Tzbs|-e ]SqO $̸:0#CøM5~Kd3w!V%gOsU7c,Hnegg_t;63 K#R4vllV\evy-'~\@ȥk=G,)b4hVeq̍:j}\bg*Ϊ aCx՜ŗfɧU2u4]PpCJWR[8oA(!]D+݃y)9#Efw…Q,lp(F*U Tj $̠k!SG_9 WB5h+NncFkfNAlpW$lg9=q):cKvZ|tT櫓^^IsyڀZ'$##YI4h~u3"Kr+BWP [ XY4rhW( ]ÿ)[^:% 魜>"^筪hXϵznDQ9IpO?\vVopmi%FC˘puoh+YH}ӡ(3ctS6A"arE2_ QD tF&| u_G@VgzXqL"4_ ER1/DŶSrOQI1FXoV]OWoY؜ۇZLmh4΂ 5U"YA’p d=I aXtسR[*)GYWL<^7S()1pЕ yٍbz rd&]1-#I:ns7ARrlYeW"{sوh9Vy6>vV =Oy-)Z18x[Կmڐ/M*܂RS*;xzԓloGŎNhQp8|_΍YjǨDO^$J|rD׊My0 "cO[cQCx0Ȕ^`;̳ 0+Y-/ACP.fd`4=?Иly'u)Of[C^qwGჯ+|Hj_tcZ_SF3|ɏWze9w[7ފ3@@D"_(|x]ᴈ7S;fyj:pa83,0)Fv?ʙ)Fb"ʛ_ NAp7Y$A6&/¨s(J٩ [sJ{HuE?S lx(Ӡ̀^3+liEAJ1Q7 ȘP$J^ {Qȸ| q;#pVמ( ?pV)IVC2I9b_jU,i7ny͔+4kC74B)LTj:4JZ¦re1=bypE9a3K g6E~cM`>C: z45Ozɘӆj63Dߣ e/v6C$3RӄC@H@VO ǟ!e Mj8͆-j)4Zkj )A ьAs@R_yiE v5w2~PғSWUԖgH~ =F\lF4ٺ6eשDȕ Ė5jlm R]gl̺$aA% Wɗ5RI%i~uJaAcWr/|]/uϩ4q`7Xfx\7W&w$,;iAnaYn{5L(vBϽ42%Kb,b eM04=۶Hbgs{50?ACՖEU:c7>$+ EچEjp1S&2P`aqJii#@Ea2P*#s^^4ghnW]Mc{; -p$׎n@E"иN_s+ =} cj:4I!ѧfx?N* Z^Ь+?]jACZ|LPxNWe lmsFGtz}|t<<5!R*[ὕ+Vavt &=`>7P$)`dt%pP~ 55 ocnTp#Ř]pLvmvt%o2iAD/*(q"RIG1^c_iZV$W C~ C7*[#)V/}t]1L#1CC|MjjO8|)tTSL(W1%SW~ۇq% ݋j iҿa8xLuh|^ޗNFWuBtkt2i2IuN&UL>j2ή^.AAŜ DH>]FW)ǰnG@W2uH`Gj4 YH׎YoV o/FtKY_L,;kGׇL>O"\K88GhhD-LÆnCҽZȈ\HLtp$,c$RM$Yo d/$p}g!7)mQ]u ixzI%mЁ)xVH&dKȵSHW%%fh3~x%apG BEؙEYy՞.7ćSgSIƥsj U4)ZG1TM fS+қ#g@_bN^#r}j~!1;Oq]Ӹojh 95{ 24*sޯȎxY^<2rGGIƕ8.){꛴U/,7 ~htn{ڹ`{Oa|3mYļѿC,~iro#x8L|ar,%.mNBu?Gk!?"HW vB.u<aU#dx^c$nZ}Á^p!XdO6toڔގj'~;OAT87=&zRnyi`/؜Og1 K~ZH6_̄%,* w[<]Ae<#L[ tOJO.ڔA8׎w+p' NCw]VHYAQvaz5i`_M[1#N4nWwP󴭈7Sʩv@ɠ>e.MڅQt!bDZ&;-}Y4 !]%?xO'˟M;FԀJ''  M7/' 0^k &)C! \_M2̸+60 Vѐ倣v1iب}v՝vմ#+s:3[SR8KMVFZ(}]03eƾV/q$kZnvxTY2aN%nAkA_L{-f9y+`L30I3B89߿(O*06{&Y8tEsZj+IQH G.|1y1}v!8<݈df3>@U mu[*JQl׈;9Ƒ ~ aOSЕ`?jw 7iLvXHC3SP~2_x@tT<6g}GUoWk}D䦴[۾4^2U9UDwu n;ޑp} _d.+fߺŝ &Z k)zgvԪU !x郥r\hov1rUS]og,B-$d=*INjM L$TGnwcejOuj8؜gp9-YZ GQ+bR3U а~W.h)]Ig5!\?NIL/*ܴZM Ua5,pͬRۏP}Z^z!O. Yr |M*[ܼߏdiHXp:$dآ(u7]@2]7 o3~kWOً|&$f;zHeqtpMӴݱEz@ڂTZF{% שEiV8WDgǟ'$!81)589504$0^> vnz^g41MRMs$-u ^1LuN>״JoK%{!}bUq` \[FΞ4:i9ەFN3hVs''~1+]b_^qg|dh["O`2WtwQAwi.^@|QU_P5V#fl9u,׹SWrbh}_'.)8)zy! e%o>> 5C}׌ndlc\+?CtdW{E|wAY6-) #sa0'jK99ΙE㑄xJnk_ 3ne "1~03:55fYr&ɍ},rV͙tjS'yF) [ԽdKA AB "1INlH!Q4%+dt #,9:x93)짅PwNcëC-q#nvP"M};+Ylq*Z19]h>b ߅z.Vv-PlL6ŪϺ9^!)#MCzdu_bEGx{zZ̸Iֲ}swAk7X;hmw3>&OޛSrjJ."~|^d5Vcȸ !/;if% +!n]/cpR* /ԾnxA v ?YPG|=PsZqK7kg1S)sPoۣyrI^/Pui" 3Bx?KƂ Z5gU?6 "dmܬ 99u#J_͛bp1ZM9;uQ(^IŚ">z΀kT ܙqDU+V|ǥ ai^Đpƈ R1WOE!}ՈlpÑ3$+w񛫷 (h(m4ӌJ\mK3"<ʠ`A~ߡ b!\L{BmbQ$Or@GohѿP#uwTsy))ݧWY~')fK:ed|֢A="@L1|%{.wLd>'VD[Iɜԏ{@  e}$D {&H~Y aA@>nmHYwiAy3u' V8$}xP-IV a4 DcyIb:J"P|*icUHWv M%uq gmKn#1Uu7De^5FTqc)Qݳ{tQwa㳮/u$t$ HLi)&!7^֤v,"STK.1 = 21A;7Ԝ_i?jWam^.ՙk* ?ly`]jM#!m r>@ e8?nWeWt {XHka#NSWdbTP(ܟ<.j)cQ)=gt6Ż3] UN<9ŋ WV[|0fX0i9 D)n{}/MٌkĬԜQma椚@+7]_wR:N (8} 㵾[*],%zmS.*Zk0=sy23##_C8"8]IM4i(D@g@˃9_MbX=ycMl-%Ce&`_e9kw hs"W%zu+c2>o@ě>BiOu/X9ȻLM?OY쩍څrE&f^ql'So  rV 4mLvei^74^-MƠCTbwP-$iN(OAb⪵7qB$VJRV;Z`P?q汲JRŮSrkeFF.uh]wAr CWLfZN|\p֭c] λ [+Wהߑ-L{RZg5wLL{\E}1@^I!o̔_t{d=#p5d#UYL]q]๦7>kJAf 5F{d?'K+PS:i;}: iͧ!U-Im! `E~S [>jcM'NnI\kh*@D ~kXCp3TaW26NŲFApP0*H0V>a';',#.`)_H@x&?diYeD͈IJu:ZOEźTw|X 3;f= :ÙDUC{$DZMHKu /LC(B*}Mh>όV>am֓,`o,#M3*vygF74I Fo?`SiMϳMPKMGx :e[F3w@)Ʌzw\|3Jf#SЇPƏ`ըzfe3^V꾋A?~-::/&&×LjOfiC_@1P39]'W\08D}D43+RYLH4fquq{= |XIu {K{G-j;}Ĵ.dz'c2%7^f8j8e c`7M(cdyUȼE~ʿTX͞5.`L vvfV4U!7:lsH m)Us} פLB[-`}\w2r ׄfP.&0m$GˣtjTz, FC*MK+Sϭ;CȡcN !G|(I .bįe(mQY-ʰ3t6XJT +L/*Qly |Q 6+E@{D XK/AXJF!9<3\spm:< M萹_(Y{Nǐ):-d Wr%2ۂ`3|/PQCqj_e# [?Bf]LwrӘW.F^1T]Ql">H4Iv H7e8P5H`jOq(⼚ ¡߃:K׶Y9?`dWӹ2k Q$47Rdy=H[7GֻEsb`Ʉ`QXxQB}dc 0ݲ3f7.'my@qȠ\٬/2},xKP 8)-G3/ˠހI2/EbEJ=H `S˗ RnP:rܧ{L=Aso@FR+nV2׽?jL"[7fOȀA[HzUսh1=s>F4 61>vJ{73ʣ P[^**W?hOM5*(KwHURRx  6n}b5$O/#;Ԍβ?GK/-3FTm(ofܰ6ߍ(7ËdsQG#qΔQw//R;˰RV`P`1ܭ^RaXoǪ3摨 kQO] tUɯ\j3"sl_{:ɥXcX1.̑\; ⾏ӇUP=Ч K9fe V?}:{ !@mF!';ɹ;KHt+|`޷,t -PCJmwD ݶ6,Aa&Pj%r <ɯso]Y'~Bq@]ѨvBYBODNqGB@IGs!Z"T/5_x멦t>(I^uGa~%%y*-s@ :٘wlJ% X70tǼ Np|P.3XCL*fݞ$eaN3'%pvWyw+sAVS K^"M:XSEE^{QA&R^_}"i[w]sY (J5#w~ A!Pd\kqjWp: Ek5Ri')H{*w,it}Sha!sÐ 2yŅ\]q%_G_*5뿼!"Veű :hP0*Sg56j,̲Nz#(B[o3]%3E.vULt>2J{6Pӡ2kYnS"7Aco.vGH)*s<;:N/Hg hd%h# U;_`]>rA ƻGnCS9*CdJJڣ5֦{,C/R賄? kc`O(WDph:J<2h.1۞5p?rBީ疝_X^-gPoI8{ ̙.v.[@xf'e>\y:H: Ik ~W fOIZsldƟ Bj)kv[ oА},g3S:rˀ 8.OU!d$wiqipep5%y?э:1;="a;xߡA.T[jZFeWg=tS32cP]v 4a: mq %qvkދ%;+DPYA*~t?ʚrl孧n\S%f= `Nм ͈3Y.2vcIXO^`^V}ȟYql(tB9$76EQ\9'fHm&=J ll|U۵f)}EdV}'( $),cyi`[-I%r_Li3 @ʹwXf {r%O76NrA$=M<^$G|O8,O-GD7 ijh R䙗 wN$<$Ii>-Vk:^M5)RLd㤵(bV!Ec 85$2pUY6rٳXBh7{ő G׉tWDL玝Y"V}9 &YGA&' UkFZuwC>4]y4 œHpsǹ nc/'mgj4w;\s%3ABX{<[DdZRHtoUpyZ y[TA|txpl̂'oKCAtA+4/-JBn97W4u8^sY(-F3JCe1gL9wDMZ8$$H?S^SX^%tR].(87rKgm1qmŒ/tՆ%5v,2כsQ& VG$@oK^N ķ P FV-}7?YU2"KzM-B j5Vv"l̨mP2D%R@KT"e;yy-&0=!OsVUq{3Hi[PGGnT^c)yT\ZCkp*JeG;B$NC.C!ar_68)$t;&TN͍ ?\Q=#ANQف`ԣZ㎱ K mՃ$Qf T"ʜL"Uc5E(Y~ᨮظ,͊-TepG =}?zrUi|Eяp3:&M +^mgؐd3ge哙űuʗ 0L?5G:MY=W#́{OJQɒRx$[Ӊ Q•Q X.sDM^Miw GLV$!\t=nlj򨷷*-`0jm.7Y§"8|]XDtzؿ_XbLx۾0J-Nm#1B"G&W;p;=g⸆I)WPc,ɶy 56REc|qr8 zJ:r [pi{/3и% !;\ĿӀ?ǻ0d 4L#%Ƌ]EUMɒ[.{ r92RJ(rHe@~j5* ֠X6DD{w N`9C,)y9r'hX&7J@SߣՄQ7;7k|jIN0kA2tv5 !뢔K}Y~k{A",ī'˵"S _}2cB:%kcNSiM yz uO%z17]3%pb#*ot1]'2@qpYfӿSjJcd #~X).BsQ: Au59P zt@,{\[0oհ!'"YA{`}Z#$N7:}v"IO`r{>G_,;Dh^d)Lpodqױq/wnwARt@eLTYd2q $ǺN*4T(}4tt!"ܪ-/@FvW5QPw|<ϰ^,vNT$49/C)#7#W9:q1>6:WOr|Ƕr洈(啙MGjuSF0(4BGxҧ[V8k-43N!c# RBⰨ0d]"Ń(B[MZr\R! ՆK^G$Տ}'dCGLbvt)=Xo 9IU~zGexnI eϳlba3.>kmʆeY;p&7FW$ pMnt+ˎG/6Hgc</iv4bjlʆy1bLzXw%"EHr8zjxyI1ؙJ 1/8}A<ح~}l5tF֓f'7Ƭ9┧D>ZU$|]ɺ漡>SoNP*OR*q0=Tl/q̭c/ aMRvu5D3c|_1/ĵ ׉KZcSfZe@)Ll::bD9'+!oXL(Ea.~HcNIeU\C^.l'XP=K/:S[^l 7=!o S&/cSŌ% k{ƌaor("wՖ)db'~6a.to0fM"w {FoUíX[AƢ, \M(Q3v]!1YI0NU^z@h}SOeuPPfc:֪6?PSE R}|7pc;#~G?a:*[c xZGs.X&MrzPy7S8e ~h07UWDШ1Zb9 "fEC^ o'q-љ.nZ-,$PmfG3{@Vv+7oyn׀&,xW~lXtpdxBA?łI9H&^gIa:Gjϸ3])'ve4ܾ&9h:0lFeQFmn˘yP_J3&ݧNu\@EeSqi&JƠ逤+`zTڧ[ GtJ)og^})tuLR8!gȧ_&{v&Q!?UzB*[k3ռpWm{\"Q^{fs${yUYU_@K,lk&tˬ_0@,<u b<9Dw% T.WZUNw#PƲgGNT-QP\ރ`t~4ݯպ cs>q[$#o$u%?y)q`³6u $ }&n P`+8ewXu͔rX}qAmw ٷ438X -c%.J O~;|KFBgO+VنQg\3| қF1APʐ,wb1N'',B4[ulZK)D쿰t'•+5YI)mIC|7LqNHsyj\ioX9]8;.9`\04X,R'!^_:Q"ˑݫ~ KTʎ!]'Vo#ݵsKq]Rc%A*:g{ FD&ξi%Xe'=E+~l3 {yꈃ4B K..r~M3xUvS")Ioq-&2I|k}̂wn7ş{?{Sfi:WM-hDR^tXf]W9=Gײv$NJXmwn$Fm_ B1˕9~@DM 8k2sE7}>K¼Q7k~.ۗ,^{PTaw:J @AHSYl9Vwϩᇌ2x-Sh7Yj@u;n3P }4G]JLo8Vrj0\`nFJfT"7uӐ]ӖVe䊽ti.?EyLkžWAs'ڷ0Ptpe\ԹFڜnOh.76ӡsn^2V0=ϲf 1t>~:=UF~ti֨ƽ[gBGq`Sh}'U -sUZhod);W$!b|~[N{&un?e)S|٩:%`9Ʋܼ<e* %" ː,YjJv#4Rzy4Þ<T,Wǫ{,r374_3tC.GI`3]ZYw0Վ%WX %Frb:xseÀi l&|: %r7X?Y  ~3*J^[Y-&,9%xu?{Ȃxl~ A8CΕ/Q+@+5lsiA=ĸSn{1ɧ""A5[#xlƔQJh.6эxl_,t(4{{4[ jT%h; Qr?sVI}}A\.thgL)1[߷.E$&8 `18/"f;{?,9vh,1q}E֥d,pZ3[hf tu:._am?/~%$m7C``>l)V&4t N ϢhD(>;n ~sM)6UUb>y2cdJnܼ{L#⻆h6ob8#wOw*+=8,@7GeIl‰}wݽe޴]9.Qߥa iBvP$۳b\$)s :ED YA`Sm zb@}h s˫V=&tM~n!]zgN}Cu>9q˕QMߞ)2D!ZO/'5YO$Ӄ W+ǟ+`ȒjUoNsaǡޫPK^l|PSg'9M`D\:\]eR`,9c?hPOTʡϷ!*t`x{CFwE xh}6ѸӦ5h]`,U7l,~J M@(y]+r-(ѱd|TAt4S[npe7Iy%k΄BU&O#7=:IpcS30+60ЩZbL-}(p`K S_)F]I)܎V\$ C(/ ilD"/VA֮ߏi8ro)D]58,NjJurקtl()cӁRiY JĀHy5% o~±T +Ӵ1j^CoIwJzFGS5h8 s"+`]s)Oj4^Aedf9았nR֤M,ɪ!cqD?IL\a늒8/[gO,hߍd({;Zo a&Ҍ }4h浄hl4Ԥv88煷&PN%ù'k4w3˻ 6늹/c4o)X30ĹriSXinp`[xTNWyn &\㰑viN6.l=)ìBNV:yyq\W$Z?>^ǿ䅋39 @R #b;&KX:}3Dt; ªH:e-;QA% Beb-DΐOʺ@N)u*_$8Y'rYܝnrY!bn]SJߞ]+? :b9&ݖ1|1Z*\ ,xW *&kr4n3W6pb~19߿@hUDi3W[ Tr4gEr#hJ44lG߯vLg<) j:-d)|^?l:prD!MBlMAƞuҜ (81-?~lN~:] mgBX#I"нxn>I$[dorY/:Vēq`3lT(2d{c.K,'N] Dw3نogD 2$ЄE7T n65f1!f.謤FD?gt4bhbhKك] Z̰e/i6FPD= o|(#/D& <'B!n*-oZ/-EybR$.E$ÎD:̦ ;y'.9^g&Iߎ̽g_)ժJm.EEG)_}`uڗRZ$U gl~}"mh0!TljE $s0"a#h(6 mA$TY[.JҺ#(l:'5S,(gBsև!>cl8Hd٩up"|9A)&9-?~zp~Df 7TG:\Qdmxs*t ]RSO:7Ě<;lR.VQ@/;pY4Q ؜mR8m;oȒaH6My7vAABJY'Xib~^CP+Рo`#?KTHU] `WGK}Co[|ϖ}||_TGjgM;0cQuҙVoR_:D&g 闝yY2祘 ud0M#HNir/VqL.Ph}yB[\z"}(Y'FH:f̡\{Q&0ΛÏ%}'%̏tm0yo+oeeh~8oh5 [[y\|;Bdt8[M` #.%xd̠jfeC]1&}ZC$ gaDEl 'h *]4YNówBs_֪!WYo% )^ Bݜ4ZrP+4d&(q3F+ A<|xOz\[n>!ت,ׄ!:U.~trvs]eQ8ւst\L, &4.\ed:F>Z6Xh-Zj]¿0mϘ*O⤖|# ,D95<1taU݈(4.c.ʽk$)D7!D%?yQX%~{ai^nK8!yTԅB1(pb ~~ W%G5nt#Hgv )joz)$o!K?"p}0 Wj ϥo&Rg )nQ<_jʰ&_vh)@2tVj@QszuR*׸ 1Du#IXV"侱 ,Y %T`|qѱ={$뵃 {bIƻ+2(FYE"˓R:I&z/*IE\#NJґ!o4`h{4_l~[)阍g!|$gKf h# hv.tnktwJ#{ ihK z&sߚi"9uXn LevNҤ͖TYS t;1U?3]s޶R7(!Z?i[S,ؔ+јJHeD5[ !.Vo)W.NЩ mj aF/^ػt[g>V$"V [ZҺ g+jW1Dn޶~7k JF67] ϰ+[x' 6 Bi^EɄf Ġ֚̚ͲTэ?*;ro8Fk]1_,[(pԬx8b'^WkT-q*tDd?5ɛ}>n1)k& JGQݚ'pyZ[5YLn,+g'E;X2r_-Rcg1zr4c: &/3%\K=$'ƏWnFL1B}O/%|_E~`Q!h/mis?c6AO䶨/tn=nnx}|f(fK Uy݌*ѵpF #wW Ѐq~uZ cU%ͮ*pHR 7r2kbCOE"5U, :wȿE< Cﰗ'Y(6sn{$b=v_h77Фq(ߩiTy|U:w˔jLin%crmo#/ ~7biN^׬IՆ=|eZdzM22×9V5`O#oV\'=6=%У6-bb7kDz\oNp$Jj?*ar_uܧcj{N-ҼL-ڣ@ : ne,ɯ=8AD(u#K|`K8ᯄ,[Vʱo:aߎ؉\T?v1 ̬m )$]%(p ~+E<SW͊2kB@)Ţb˻F !wa#@޽%XS{FVK>F>e_C{'.*91 bôd5@.uGR+I۞$XLsf ˅ #$;H9Y8}y`TJl٨\tl6E_NR xƀ6v~N$ P,?Ȗ!WM ?=BdoMkO D޻ sH\b&|6,])JeҺA\-N3k,xoHBo#ڒ{P?z\dlwclhS'0V#hn4*pFelXq\fmc)G L$*Jd}g. Lk.?{O.F$_EZ1ɘ{hl-Zj*Hs *V|#\''n0"-8Otb$s#nuy'﫳VD:NvEG_9-N|FޙDB2Nh -=T,K,:US07:aMXI9թIۃ lA_*'YV.ijך^`}/0[`c ,}“,sf0kUpkLֹX`JCDPouT2`,pe01G YܼXG@$0ݫT:Byj /<Px5V&B{ H;:FCNEhroFm,jVN ]xH;}Q" n*TN 6(Ylq ڣ8_ѿb}F}sNaCZdvߵoMSLG16 :)AzHdn' DE(hyjQ H?8FV[!P j򙓺LWn#_LXv 0 8ˌcԥb;#ڈSkcD`}V]pqb$D2}:Eo=yCHyetȃ4u\,yQܓaKōa''vY[::hDߜ!&$Cyي]|Om-~Dt?`FvB3aۗηOnݩ¢* Zݠmİk ռT5p@@XkRj'/Ix}-N솦LGl* )b?B3XD ^漺Ó\,fLU6uǿ!1~7p4ϭo[,識75XI#h}amo % t:!]'M>_f? \$m8 X?YwQb;jzMݟ/9xr5c. VIJA䊸RFN ;\J, V6abdhډ߭F}!D1߲煜;9#GH.f *[ n# P]{Р]ٳFv88kB=?;Cn$uQyWT22~ܟ3Gt4< ɔ:VC9`2D4rSuD٢wa#k,"3i `FHPTM-U'w3pew?ڀ՟OMp|3"Dnx_ hkSn0rF,9m=PsEޑz}NRTﮱSGc&rO95xPR"*`qhhNtt%j#xCQ?"fn[D jVugi<{{kh/5 rb5Xnga?8 baNL}"EI ŋMI}YoQ|mY DfeQ|K`+fcżyr<;ɱȳE"ϩJ:fOm]䉜Vr~kNLœfaHg{O+ACGTE#wQ<"A9-Ɉ漾e1&au`s *~^!6QpUE Hv؆n!Oش#rl$˅TK~S‚qÚD>,M}RzF!9,nd DY:C+.=١Wd%hV{) G&}<1S&q(NTES N(KWY@@ҩfM:cN|7:bnK1bNB>(ؽ-!.{%j$3vv|dIauqZfpG S8hr.:W1\i$?=;G'K8\I)F\d #>@Gv f9{jWqJ:aS2my";\4.[Pi{ޥ€,!bبEkZ* ;yctHҷ}x,NȐl=;v7 ~hz|q+hܚZ8ɉpc=$h@>4 Ԍ_D]%N-'?=4Nq*<ctҡiq\@&WJH6FZ/[m*tĄ{~=Z~@'_v36-w^x>$_)+u[[FґM;kj6fcUy+5'ȪڰFxh,-/ t*ȊKy j+d< 7 G@r`]7EIqO輻]΅= 6p61; P޳srPkYg)8 W7W=6'.2VXե 7Y7ΔK:RZc]TA `щhb %"_y2Qk)bjn5-HYA\c?BcBvؓo&x^^ޗ\0/zq}Aj;Hކ ueIs?a<œsxnɐH k iXo t754$^bWwsBG࿫r^CyZTT&:M!:d 5 ؔ%Y5sć|b1`n6 bOb6"m-T<h/byP/S1jƕ>1YD 36pڹn6"=u~~|< @ ||0ӒZ_['5_:0x7^0\d)"+1L79< eB$?پÂvU,XŲ`Pp=9N<.){A CP{폘CZb%&ەR͉vtڵ&S*s*,NV;UXJnS3N?eِ+ ) ~$ҍ]-6PCU%p%ɖLU5bi<};*!]TKQmƶ.c3 2)-;u:oDv8eCfTRAPO"=]K_7u",+*myQvR veLVW&e5c)#>>{x"az6*bg(w6WY*aF]7а) GcU5߅_'B,\x`?UCV;rڡ\=:T^dOԣ4Gcr  [;1ڳj#\u6Ց\.<F1R.>6v(hun7"AH$!!ͼ*![G2/ḽp)XtygK D^#OoSU‚ 'ԝOSbUzfBRE"`n>t&k{$؟#hޢJ?Wex^ 2,su*lM7vCqF=/:FWT5倸ñvIx0IküLR Y{Cz[CLpE ]6;E8)8ؕsެSق UnV3@Nl?`졕nESUz2Sm?D^Ǡ,z, O];Д:M]aTA[j2Js_VF1e`x?E"*bC-.[Aۀ6i5ew-U \٠-O] X^"*VL\S=|S2U]FutңMDW<&fIT $ېO]H52,ki8gB51ԍ09/hf}Q/*zC8駒zl; g\|mHi1<^r+-mkԙnGijCjJ{g"t _ӭV4+l(š4oh$.Htxg -n]B]?:<$ˆSG#t;<ĺ)3 =ԍDR8 =u*3Ғ hƯБ]̽Aabo7 }#MPM* 69:7xQy1HܝJ >"H횆`HS&n- 6vKj u1XzZfP=3|Qt<(Kn4G?&j|\o6:=+7!;4f̡$V0*ԏ;|yH6'faP Do#Zw :+\56gu>*,j\/A,?hR )!yi(`-ѿGp .K_1%Ÿw^BviE9$d̲|Ԙ}rx_r;5ƿewI/@)4XfӕDs?ǚJ%\ ]?M(Si$ Lŋ٠o_xC6/#a4y Y1:Nk |{١)Z ^Hcl\p۶m۶m۶m۶mv**vj4\D1QY~5XztZKHWW^gB%Fz;l~jǥ%{c_WZ@p&ansmc췘 <PoflzabnecN l߼ݓ 5CꗿZԇiP8H0]}ֱ3O%RȀ dh8 5il W[CFK=e^thqo&M5}3p SR3 ǸnucXӾ@ k.r)A9GHK_}/&EqMmkbF`iց줴zcm4ӸD jKA(йc7ajqGƈMȴnJ7 \Ї2q A9g+kރuURų',AMnEndӵ;+kx%=c:)+[Ca?b,`!U_0د+UjNinu>R*>/+X+Lie 68UJvk1ש^vNsJ{m)Y%)- U_E )f%gZ b`ƫ~E 41w<"DZX})튻뮇S[vz.9b:s|yTiD}jGɤCN:b'uAu6z꒛ prqE˽F)v)@Q! MÂY&jVL?kHTHފ)oj&mędM=GYI<9H|q}  ìsF= p yy++f%(r# Ÿzq['luylĊY>)qVMmr&X6"Q |~=h %EI_~iΜTK} .}MBiT)ZlhX~Xyq]Xh2ģ C4&iI4iI"%ێ2'vOFG;|I mcK>ى{^-+}Zg6N ¤"eWs&J#3-#Ph|A#G!/~}fPmX#b8όOhr6JkmXLqPu&'yJO9|z IX!pk/ԆcIsM ? X^] i~30jR>[fżOh2|HNb)Aw_EW6} $PD@(%$t[sb.rzFnTl;6c Qe:MKciW6y0Em`TPFOȔ֧\ت]\ƀz91I[`LjgW&mmzk鈾+GB%I*dvޗg{4rCECO6 FD"|F9eg)VxN m#{A(6(ԥPJН`$*'|6}(75d'O-dĩF=+#Уǻts_*b|}]$kiտ \|`2De{ؗSu\ ^s-BPO[x!L3}˄bK*>JKJhT=E;4߁S+`%=3@-ᙵghTZJWJJlvcRj//'yh8, Vۅ[iҹ|52t?xwH0&wɻ\*z!ywRLDlC8ovլ;t@(?6Y%CŰ#蒓-=dYy,c_nYس]` Ti\3חC+?+&0KRaR{ZǾ!ݳMG͓E*^\p~>!rdWKهzȨBR1@ر#^sY4bj]maypPD'L.V.{O ٮ)[92[|l[Wz 㚃ςg%Kg.b]_ڜֲĺ,}\ɫR8&SoKC8NR%l 2l͖}6<y1Vb(`0ޚ;NUq VFT|Z#y5.eicywf}Z1[.zH MƠsyM Q4aXwR(ϡ|7E/ה8x&nn(чk=26h{K#*^<_i95#O8xrhY dЋºt2Oɶa3 Bq8Voψtq<958VaM1_/M\B!$"AA)ȝc%^{1H jG>̚ B!B1[t#3A⭺!(qIs6LaL4B g rG'Iϊ#3ajk^BH".,bI(12 Fp8b^C6Iqe!pK6cT^{U 0blA6@J{w&m'R+G`~Z7R}u&<-RX LR7j^Ig0xYjUfF݋҆]^ wU*t) VbЛWUZ_IeE}8<&1Oi.-Kbr#G)Z\34ڠ0ΐ2Ī2s-"li I w/u濿"\xhR≫nܡ:YzY%pxۗ5S9E}@j>;vI&f~&aURyvx.'WF>&PwQ SWT$. ދ}NH۠Ƥ sS:jDʑsE\o}}K\XaB^5@༨/bU`*gQ@+u=`g\5aI}(Q~HYhBHwS[*1 /tWجY,U坩3[Sok ሗ8\g{I=Ňܺ-) ;q{̷al{h|A5tK=f1z")yU[ k/1kWr WĽlPEbO8v(*u՝^)69.T)Dċ Hl(bjT TA0}"̘3PKBFEywC[OY1CI%O9fV#c?\|Q&p@kʢQI*T+KN K^L'C$o&Pj`?7z?CnEPb?K O#v&<j/aRY-;u }BLJ6vR3 L"ll(';t]-<{_r,!)-b|oz'(ocL~{=9Z^ e0ZHYGS J6v L+o(KlfVK@em>|n)_"f&$ i({B9S(QVrZKD~h3hΚjv'̭U\3WBoTMjFc~gw2sAf֪rIƳ+|ީ2m CN;h_S΅aPZcX6 W?jQj1kt^ZL|٩\lcIlZ${ \|EPp '|QFqKx%SѶoAT:*bLS^$+aa=@a i<^F{9tjijY;z!0I!.}YX@ب;o1 >$fQb\W\ ]yG0☞LRwL)MnSsm"V*=A o*JwcӲzYlq{^߼9-e[zTsbJo&>« gV2u5>Udv_۸$D@i@nAwMxD)+͉D#• @~q DoJ`//Q )UO2̫ ^.q@1ݨy?HQi, *ȅg`-7g8vqD11D;p#_1V ܟptJ N[y= .uPYPtYNdB:=~%M9 X9Yc:ƽ }^;fnmwhh@7SR oUKLovL6;WZlo/< ^tE"SY 1b2iMcAcrX$L'TۜG B=}}7Nkxӱc(ӛ<)_a Μ*L]]j\=3|kxK">(ɆlOw>?=ɚZFQW\YnnqW';Vs \hY(hȷU=*D -f(j"ɮl=,8x-ݠh"'el"]>6"'$bC.=Y˪/B4} Y9AyPQa&-̱ ՊݕɫP / &g*$5\r^} AK)ި gE^32tԌHԊo7/1>YB9yV_FY{0B*@Kq }M%BƍkĊ]uG +[Z^F@٪Y^GWT(@˹FL2_oIh'KQUrSy*A[kȩG½ѢF33zZLY>pv;\VC&{ =^k':gmxsRsæRe쎒D%8e';֛~.kԷ3&6inAJ,:&ЮygOS yA'wTRIGӄ?i>N n5䬦++=il7mk;dvJv9rO.hf^.`~$B6爄KfBK^|jhuEKWQM`ūGA\Җ]fO5+ o,7 ҁ}FmiYC. 2e&olH%$csqy C9LNC3d@I2Ztjq`পS6Zp iX^~|U3ѡ6]HIG/]I:ݿɑ;֣s;el1|'.Xфv V3L53oЇf-a>![K4%=C!1Ig~@6vܡ=mkYx!6M2[Y i~J%$Zn {]0O:c e]# ׯLi ʱ(G0)Od"h( NraGX-L+^U4cfK,M7SE`{. 0xDW8!Ƹh?j H~`]ƅ(YE!B9?Ia\ŗ>Ʀ+2˴0a8LQ;iHWƭ<ٛ`<0g Z8}ۈEeCHltg(AUWQ֔CZ_Vy&4mudMcљC8e v?Ra_c_`!M_`egƻi=Nʥ|PKs)q5P-tmj)Ohc3kk[.ѧC%ez;ar)g)T}L n`))+p]ӋUvʷIaa4{] C />ՎE~:zzh{V?}*z *{m ﹄{F1߫}$w"$wÏ ;e68+0U%Su"eb{$d#Tl*NjR>73̼OcGI"ʅe\1[mO<;-KͲ9"[vGAI@Ie'66'j*b5է@8~wJŐ5ሜ{G#e[V_ SQ G~xиHgR?XdE,sB8yB/}8,n5.cdAu3維+߰2e2KƼ/b WgWh)"0d06-a0?O'.];ʃ?alMBXy-(}i jX8Wi2JȔGl 0o=b[BGC^f ^v'lBD b\BA٣]j;X& -O.k4>EAQ&!NJb~MYOp2C{gxQK+־RͮЀh:Vcȹ9_a;a7@x!9]e0>s^l]v'3|vO&g: TZ=ހ>0lR_3G"hz1U NpfDDG1KmD8xe{[CrqdM#Șmr 6YB5P֛"(6O,p%b1ϭu-@*2P`ZŚc;t~;T .%"! oĤ;A]LHݷC) O9oI?o:m)]@*t~#4WRH˒|nKoĔNpëMxV<1o5CM"S3Ǫvb(Fm(?(3G)l$ wFʮE9e1*[B;b%/yĘҋW Աe0CTo+F.:D<,mDMXd5{(vG;uf}B;k8)=Ӗ\p̶ZSp )UYPT]|-pW Θ"]=dkWp!wEH2NW8}Q`}H 4{s\qD[ ?kzBz:.yX>Wc9 #3gQd!7 ~Ԍ!JK4}Π9tӅH6ϑ:foHҮ˺ʎg} 3V p?m;  0Z۬*-MMit}m:_c5>sAkEW8 Kx!{âϠk @pS2| chƇn]_M:iG1q 5ERuB#qzi`-oh j~C/(,~P҂tGΔ?i_D$֙L,ԏ&{=8v2+MHI4ٕQN ]0F<洃416`s0up 4 #!Rb4c 3J0X;dD2CGdM8Ni,509seiLbe$~_) ߳ jgDPaS ɮ{p=t#".g,υB>!GWŚֱe6I]8mm7@/j&W?OHR?l%Qc- E'h|D?b` uk `*L$ ڞ!#8 I4!mȼ.s:0mCS1PLu)gA}?|Jp'jf րS_q-º/ַQ< Tb++ʽVϝU|RH[trP㚽nI'A%Wc ;#y1V\*tgs/iO^?辽y:=1%;I{OF?C;=5<_4DKKUJz:*"`yt7H,n[ЦRpEE8^5ȦX w^;A\aft-n4ޤa7#bS,Y%a$Y*}way3p'>Cgn2>؊~B,;ҌUݤ;\Pі{s#hrڑfPᆱ'Q_RBmTPGJXb E`Cc& .Bdtg<\Q4* añNnqXs]'俥s; J͟0xo.q8KyՔsY0S*VyrQ#ݵk ]EW(Eղޡ`GX%f\:|qtw]8*|Z9i wޔں^eZK+TI )k_T1[Kߊ>nMą>J1Gg2=w.H#@ + gfWpM BDTUY.zPg#g6'd5502"/NR II\ЮF77K(24XXUprkZi<?Jf^UIDF dx^5MP[Dk=Ln4b~?·z DSՠ.K j-yHi%\qʦ`0O2GT,JynǫsZu]}}2|_;AoK3jYnC|Qj0:O"H٩Pd0F;aAX\E$>jeǁ|TPst8ڠ3) lѻ{Yl~ ؞ eW̒Ѷb^rFed]ald)  KJLK#!w0veb_*T,t{6I ԅZMjrKK~ A Q \rE0V?h ¿9,S4=jawc2 hJR@BP ,w;Ζo#\KNB8c>#kJUOT! <U#0=8RMЈi(9R7앂a-DiY],/[ M!S9p^ezAl ;c-TI_XG7{b\!*yAo*/9y. @K_S|ҚU5 0V/~ XP#+x3}0f]Q|sFtz$.?>AܓOb9[VRYѸ~FRfѭp,]TioTGiB3[8BYjO`R#a@l\ ǥntͱ v(32|9\RCTmȕ(69+_J >DI<< zeo9<89h"9XZ {}\sb,B xM6wD(ǽA67; pUZ t Eݬy%N>r)mT, 9+LM뀮?IR{ZEӱwH  c6'/ 8%TxgQΑT z6[JM!rXpF͔)N 4_*1Wc*}Z5H?+9dDDJwz[J5 M7qUz6=fhPAoܠ~R Mz^p1;(]? >oǟUG%pW/x)VlYs<2*`Mڭ=|̢vSQ݂1BL9w`*H3~i{}"uqZX$:4NUjT#OmCi:/ɍ%POFuKMf/[$)'UcO19RwT#toD(wpN}#2WudgׄrܛeSοS ,ԘCBA|ZPb]ƪp{64~r*3 Ѳ42g{'~p|y*)RHk^9`{—ܫNTu d6Բ\=%9#)=)P| [Bܲz1 uVmL6t#UW܂nW # fK"qLWo*w E F!Z?c2Ǒ  }6Y5UPp(* u2µTQ1r r^:caoc`#I[7SJ ylJkwg-BW61BLo{0t4m\\@4edEC'.[pG `i@: a kL!W1fƚ]JKe1( kW@[v~"JS1' >{xk'&N(ϰŽɜX#@a BH:pB?erNm<{%RŐaAM#5­z޳"=|>sa$jh%V12u\ZhVOxRZ>ߜ{I>)I!tLYGƻ0%-S1=Gmihjqb2pװoЕ툵O@dPͭ=@:N> { ;-H#zaoCO_tؤ=% }9_綡$y/qIewHDA``i<Y^ t75 ق6h&N|?:XeC}*yRf!:`0+q{3x2G f4ol`) ¬J؉UWn;_ ײ˨?&!7LZ|'o.V7IvU4^{Z{Lޔh BXp_ 7TBj-锑(ꖚ)DUIbp̐ Ds]&PK/G\&=ir)X {ggи[0 d)/9Ok+e As&>@7-@Eݏ39ȪCZuOMLuھ~}6-ySn3y:}#-c_< X]4WadȾ$GeX5! Xζ`{;]}n`E;e+t;jf!9ѳ.pd6Agy++&lԪV)7w3ˣ$6pY#G~9/Sؒ4xjbe-߳qXCMn4FRrK;Z zL6nãͯ3ܦץ_L=`5Q`+~eܨ%0i8oz'fTpB:jg פIO]ƪ"R3P,Pޣ^=O"6-~H4rTO#F^;֘S̩f!v_i @j_N3V1 KC]U,,!*QȾ?`BňEq*a)E*>JJ`þ]'qf`yfz#/υ$wDMZ3QBzEX6Ǖ` V A @a=Lgt4 C/ztZm>=w>%Xׇ T0'saJ =)ۡj|8i ىc./C؊tӳ"&p8~E b`cE*zoԝ)(JFv4 T_W^ңIH'CqӞCM/˂Ƞ[!6#^{j~m4 MLyv~7Yz tYA T1&L."%nK7݋;6_ >F)3Pz~@nzeL{ޖj5vʐ(R=ԳBEr~z[m&h}dy@dzݘV+`Zb4yu3WR=?rg.8ZsD0P&Q'J̍_WհБƀCYJjH: M@TvmR1'Hp~϶Gr1bI(ERf8b9g=mfH~Լ^* D$Z5LJ7+>,t{`N)R>z/!7 eR<7RND^~Z.S aF|oJ)5ؚA$Z,.][@)LLrƇ"$JʝB aͽtX+mnTzvwFULLLD ,a|[kuq[+hO%l5PݓӿO&TniL 97P~(˜ ي;=YZy(;e<1̢Yu&D`1z> 5GE u!ǺI=Q孧\!g!(- ܷ t]|okQ0=./MDk|F$+E]FĴ>MįRav+\nӳ7 /Z]. (Pd2ܳV__Όx8ZC9vqr! l.{a,Zhi[lg@,ɂ&ۺ̙\ #{C%9:Y\3$O&B 9 hZ$9r|MoNwּ @ry)bS%ܶ't4"fDfe^b*9sT_jRZ'f"iWmFLWq34PqjL2T9Gɝ iU}{UP3.yTCgզ 5,6W =BC:]O'AZYb?@c=ӿx@zt fvJ~(%᛾ir&c+iS|X k.9!MIu d# gJIlQ϶M!!wn'_ @I9܏V}tMmJRSרb߭ 4C2!>ӕ8O g,76}'VUkm,ʎDrVkV-0un.^~fT㓅rtJYQ xqx=+>mKߐӼ#߅zg`bʧ{9qc*w֑k ksX4ꊠi%ϫ&9jPKtkeܵu=ce$T!pJ+?1D4 _ *+l:  i^`,8"DB ?jw.qomQ*)kOլ Fl}f9v ٽm;.'}Am]i4vX 8B?9/aHч%CoSHrlB h­Q ،!(ZK4GgZNO|il_N^?&ܞhTKBI + lYcM@Oˆ JdՑGĠ|DKp,<42YB tꞋ$򬝔1#b>l#2zSTo]Dd!Bl;Uj4(Xٛq&st\N"k܏ڵ\aY]{Eplx :6 pcb mL 1bk'uzv}z`wXt3\"-#s0]Y$:1! O8${ sh k b(Kq&b_tpWvD 1'd$+z'k20A#q ~o<]\bK;:d~!5Q݃$q!Pp\cD!nR_s9QHf7ƞiĐrO+Y5|TAy]jyZm{r9Cq$gU>|0s#rD- ܷ&\MF/uU(P\(S[p $.{x,yvZ Qe;HL1ǾR(2;S]Cֺ21}hnIDUҟUasHX FU=AӴŸ́^c 2R|?nPV5f:t^}~+ .MjI푃^wc5W9w;BO'OZZNso`愳$?ܜHerރNJmux$䣗ӸwP6.חxs\N;G`8ָaw 7CI{lz@#qE`?Y:WC ٶ_(< Hq a𫳨a` {A5ORޕd"A*mPd)KG*Ax?,rT .+}3; ^g,r=Nڱ, xEɈk޼DP $R9L'Ei 쒆VZa_&G/ 9~ A;-L"#Q%(ڇ1# iDI(ű{ [Dwve0gt9pY? m〙L%P|=W.!FEayPSů6ʕ2.{;[#pyąP~3*P"ȡ3'zΉi\,)&gE뻎G5i^s_v@HB2ӎ[ iЋrXI]! gf'X{ C :HjfE'kJrG:Mbs J :Aɤek XGϭ"Ai5荂(u_8(1w!Q07;XSL$] l޳m^!Ȇ@.0v6$(Sr'LňbO@Oq1k x"j1 ;%ˌo\V mxP%\!ffdA'_˴O2n @MS#B}DUS+%S\)\z$ zO7UMph aU!3u[J@eD~RLaZ+@!Be= f.!qܴ0G]jC"g9/(E/vNѩx]j;g_ c̔ O)rr=YX-L@eߖJIs{q;hm8ϸn%{JU$c2dkmR8uٛpj˟В$(p+Cbj?G{k9g6N12xf;VҐwl1h9IcuauW<+^[u7 ;ߺ)hM7T"ƀJ?Dz{ HEp۔r}$, Ù%zJ|&|uR  OʬĻ Cy;0?A Ow {-ia0I+&ɷ(0ĝx} 5߉ 2+-QeW[QZ`žFFZ`vi+̓tҩ3J T.{a|O]@>:0"ɗ{!%oqNibcLgKbe2vaiU󦆂?ރMP왦bdX7VU!>E& /H_-ЇpnŔY1؀ t=k@)MEl?#ۮk(>@Àii*WRփ(:5{NC{ߋ] *WМ6z.cmדӌ[GIÁ"L?lGR+k]AOQx  5XY e;c?Эʌw0{,8Q9~SK"%3&$٠gb휠#h'y7_"C$mR>87k?-Kޛ8m_׏mb|{ Y ?4men1_\fc}=>7F7Yc m m0rc!c}2EO |0~{%jRo$,1&*ht!rmL`|1œ_{oB6&Fot׈5U*wϭCWҾRӼ)0*ջ#);@u+yJ:=y aq[!xӧN^~uQp2<@U?bO٫0d-]8@Ȩajve8K>@BQ>/m4QwD]^IwrVM7 h-/I`%v#~ۥoAfBIh%20(jבG~0坑c)Ǻdك41ȳعߋ:5Tv:G6KV95c^&UJɞMlCΔd=$4۝.^WH;)ל MVL|U$nQ텧[Gtf0&Ǘh7nfNiq{^c ˢl,aQfd-ګTػ(]g٦q8%7&I <0SisKf!XPgOIDEt{i+sNHhMYu)1g%wt.,#v"񯍪xcX+aqF)\SB!>z迉6% aT G=VcV}SR#R,Ry]z#F|5 y_F\ʣ- z, yQ=;" ".M  )zk条@jCG&ŤqT4{ig9*;S~jq*OY$P%ExKz?lgs>?運teE8{M{ R%D˺=%í5$V4SЬ-Nx&׏)<8{ClP c8bGWXLҒm^hDvEdxJJWdٷ֮QD)8i2kCo_wpo`&Ϝ 4ޓMIȜUx D:E~b[Viay$}FW@pOizYzx1+W?zHJUt`jkp}ɯzDA=S$b/8seWCS Xy]Z:qqFmӋsO:)2^Jv"..ȉiYJ*#@aHINh:Sl҈$2!$= *BNhfn@ s>ķI$JϔձfF6@2*o9|uZf֥ƬvHǽF&LF/->{4A/Wz!< HUn> ܟĠu*Qv /fELVeG<6! sNJQGT[r ?uc ƟnʔjNkL A3Qakܷ)pnȎ0)OKB Ȗofu&\W֏_UѱpE-:M9Y1W0sB<.]-Σir[=l%})K6ASKZ))>zMwpA'3:WmBqi.UOm?rz);ozPBW03bI]¹*lJd9L š]) `1a5ͨvީ?[KUwrBCl :NCQ Kynޡ g?$&b--k}DA zf* ҫg fc$M OփTNV\'UmaO-o 8 1O"Mֈj|CŭVn_Z΂(fY8?i}H^%L FQaZņfFaj/!mG<  RnD5ky$8EcDc b*EWu*M+aO07~ԩz;NlPŮC\AZKF-?`@۰!%T=;C%{4" [ɝutq1#X@ #9 W zàAƚc8|j)n!rq\'D{͗m,$&vE9=M697E@]@b2FBvʠX+iLm5/J|-L0(I}Ig~ 2;j,2=+} {(4oX2DWUs#G;3Y6KK%R{?>k"aD$S_7g쥭e7ƛ7 H;=Adyδ[ /% 3Ԕa@ˬhKWyvLff/Giˣ. 'ZP/s)Dzp#Gӭˠ~Qu=(F{QSxtwD‡Hf&eK m.^\Ӧ<,Ľ18y.osft>s8$ǥC ?.4B€u)Cuc8ꂶGT][]wngBЯ U. Ӷ'^i%";9x?u]Κ|5oVy2s56>%h"Ғ_ُIyM%keI+z7wr0U0N3?yQg*b (*GI tb̸MwXP/=d ea:'$"!`v`k b4t'aY؂l71қ:j!v&qᤐxuVyg8ї2h@wj7{,^Qzp+tOEe"M ?%8hKa|k5tN&ܣ.k ۊ"y^"=;M t.sH[1"jB.ě "s~ lϙbҮY~-T#4^U"Lj羽xgVʼnKccs4m]|%a8nMt߾Qhmo^BA=Eoւsx;+HL61HosD.J84BNwogIOCv[>>lbDDž! km ֖&51;8c͔qQ e̢fx1TBx V x󃲍*pZz$7a˖!LT4z";JʬK=`k#Oqb*ה| x%b2ڊLggAB1"$rȓ> PA73m.<ӉjZEYK;Z2+#tU#Ǐ6|G|W,K*D˧%NPWD Bftj~N=dYCoǫMB4kK<5p (_ z4d ]`0G}R [w<ʖ~҆j~h(['z.UnѡI?SJq5E"QJ&},Ė  r(yhIcyp ؃0QPy\B&iݳZ5ea#I[v22\뽇?\ϛg/骝l=o v 1B12zQE_clsmoguo9bٺ gPr;ߜoWUt r>Z4+P <Վ~>HcI3{S/k;%K0L <I[N8=]["f7 vۿ(+COt<쒓 ?&gʷ:k6],YK>\Hѽ;5T2B7F] OWY`n#k ]?y0KGUm?yZpv[ن_@#~=բOcx^N[71yW 3ap'W7kg$)1rVqQ}%Fי&=FV+> @kunNG%UȲӽyOOTp-c3LU"i .d}FfYR""燠^> Ic.GIʰ/PD索^wRR4T6㸐sgMEG4cP?uJꊛXlaZTEo= Msr q,`vΉYfe/iT.鬮BH߭vm$$cXP~H_\6ɣ[b0 q,{& Ե#xАƼ )6 iëXag4YƲ0y}"ی""s'ò7T3>yKH%lvSvE"qydBۏ֕) yq>.z!GTQ8 v#y_wZ. HYl&o})?^]O25&uՌI esZ& ԝX Cj*Q\ 颌 IAM oQ Dfڼ x#dg5 z]ZZ# .8bE8XxlT7 x?OY2mO*3z bD%^~XWf668=cBFhԦ^ZT`՜4\o<@C䘐f$\o̐91h &LMin◁ GKKlV߻88 9`d L6S(7GfQ"Zp\HVG~l Hg$lv?Fw;gKTk _^XV {qOa*ateud׼\ʦbI:}lx]J :fDu^L2dY<(_'Sg:Zbf|o^NTmuy=~2Aݝl'ul0W^-EkimUkL<~Oe:%]c\bV+WI`&iwNӨʶr'yzn O X;MpU(lafL-H̓[XB#/PZ1;WR6me˧mio:SzNp%^!}^u(X:7PAtOѯ{;!ה+ #[!D6C;~&Ks4`VUM2WF9 >H`yU=.xػ^& Ƕm۶m۶m۾Ƕm۶m7Yӣ^a']O*UGcKtr\o nTb{ܔqWʚXTw3IO.i xg\uM_P~ RGL^IL/-IO YkKEc;;fr_3/_7.:EG@5y,Ka9H"E9Č3If3-HMM'Bf(A2%S##ŜL'u#.Rvܖ(H]?A~3J)!%$t7F/ X_%(;E_"dj'G5&,;61$mH5 ¦ܓ5xTm5QJ>ʘ ScN ҭ/LU )yl;MQ" ߏk>ÆaOAA c2i[uwhH0×_ziHŨo^S{ߵy:؂"I"%RR!h#.n_VL"-S }"a=Hx1:_0nH#S@/7iwW5͏ B=t\!?ͱW>Č&OA"/sCV-ũĬ4:2P(nZ RhCtL(| CП[uC3TA"Q%C>#k-lU5Fps.[O`O#@­ כVV4ۂ$c;5ݰ/A!D]>Ngey#_}ѰScnY|XF- 0YQ?.`Z[Bcn<7D0BOYѺLyNAb3 )̭1 SP)d#[T4(`AP!u9@i!=X(]!C;4=̆27O. ?8k¹ gxď,̴bkИFxESP$)imp.PW{ mlxF ;FkJ sm`~cM\-]aZdM9%X-,ؼ2ODT]6j@gnҩm,=ԡX_?]x*"~kq'q.>G0紡;n!L˨':uo) =86?$Y**d˜K`ml_"j:눧_Q>F/%)4W3eI<wԅf3~Ks5@gܯ/67kjpT -8fSA2}KizS `Y J3;gvf4 \'ڬ4HV2x1޹e<|j»4_,ܔPw?xKQfѰ.K"+L]KhW|0pPm*IcCNdznzn MFkz_tof2 (#ƎT U?dP1!0 ɅMGaSG&xCxET)PJPeY0Pmm9ĎͯI${Q!K?zSx1/YXg*(|x(] +Y$eZIk g` iKo(nS^hkS\2Wvބ2d9ŦY,)ܗ HøR>t9bvjћ7eAő,ם3x8!xG܎k'g&Vݤ՘| '@BUmmzse ; $2!z\_WmbχP$ `/jJ_tV SoƲ^xv.Μ!~W&tζrgB,$ ߸Ca2q?HNXD߻|eבjj7ꋐiU8پ&}GxqDy(Yfp44s gwo:mD UHc/t7uyt۴JQ&!I Ұ5KKվguJkhR.:_teiz"ߙ- 70؏pLQRFZw𪍔P/โ湸e6!Y ڽ@>t[_8{fꨠ{ʾoy0<}p$z]Qp!-i═PeXۧew+A:S#ڽ`nD\;x܈5 mLST]is-zZ9vuǒ76͸BjPT5%‰ObU{ UBXVM.+NJ{{w1SliziC{( jT<ߙ11TiۑVX/[]ռqG= MhŔۈ@›sM#jOuQg{2gL`@QU+\ɘL*qc6Ka#8M?@E%>sT![:irb(1tC`b"S +P;YqFlUfD@,;QfU77|6OksѢ!u*Ib#=6mfrFucʃPRK'R1g~[CTF}rTy]jcYeb*ZrE 1$dpDuNy7=(\;(&h ^Ac8oD^\tW8 8-X}e+Һ 4p0M**La4us9܅‰Q#cNP$Cr ڌ f&UZvD䌎q&2^[3_ SlW"]ĀJ8rIM,S0W' /ʃɉ(-b(kOpnJZ;b餲TL0rĴ`±#z5n9sk'5s,ib& ZW﬋[nUs ٺ{ ,5E4kpv̇h0d[L-P'Э/jZ.cf!] ۼflA8RK;0rrJf4|fSL|m[ :k;\KZ! #=u{o!~ebt` ZvMgvץhy wE{*w:x}XTYT7-Z> )@3:re0&m!5^QaB mu =ĽVGk -w3׹j!;;[P- mhYLFZ>D1Q3P7,ۢޓ5IO̻7<jJgW͘Jb:Md|\H Q%=5lٹ`$:M.9ߵF *Ne!ÉJ=A7O ϳz.i!2F(?ȷ .pYu磛s1F\XU#g%eu[==n#Лz{Raf,0ᮙBk!;WX| m~Ƙ~..)G"Y+n*lXr9;NO2Y*COBf,{tntrIوѶs\cAശg+zGw xnlN#x-UY:F†Cpt |5v1E`mhIMmN`ϩC0NDUДk?A~D&<)RSz؀'wZel,ۚ$tsIfҀӦŶtH{d$xD"q.4-";OaCݿ]6k/54dJ9ӀA,E\Ԕ984X?&rC AAɞy7-Ǣ)H@H2W$8NOݿ@8.@gzt/r|l.zjPPDAbiv{V{̉E4gw(JpgTd6gƀ7 >E|ڡds{Xɍ <hrPU{q3BS/·q7 'ikyl7}^l)< $?F3V~ 3g6hN1=ܲ}_CKɸRƍiLB=/Ӯ7GFNOe6Zi$f;^mDvݻUQlL5q[otS]xfwhq3N_+U4Q E0QĝFj_}̛gx8q(B3S\昭mesķm;ϬbNXX-R#Vc4s|'ޖ8R©Q/s=x$gcNRuiPDq9_cxK$dr,^ܜx$+92Jw ^OR4Luȫaz7I]XI5:բe k[,\?dDf2l{&;gLQc1aNѕ!!(g }w !:*/G=E5WaDux}FPh~+'GM,9zON~D*5h1|}>!ÿo Y(a<a5s_2X O#(W,m'(\ HdYjX :hs f|" ɠvc> _}kF7MrF_g^x S7a5=l{WYC--m{b C2LS/K 5fox97Xt@ rYEL/6T] (y!w {NCfs/'yT~{^wjGŕE[9|v 0]! |}npH?ξ ':i|o1$C^Y| <=Nw6@^9i^Ҁyz"NH= o&2B3odH#T{3-)qWp:YV_ ;EV#H/pn+P^ms>g#/`QBRՠ;ݫ$dQ@-|'NSFm025 Ci:"YT?~o^ud럏)p,eĥ & jJV,,bXnd c1ZUƨDV7Wk"yAA {dd`(q%Dy"Qv(ۙv>cܑΎ*5 7t7n[wYcG=88=}]e!u'ߤ- e=I _YI⃏$68@@Q KI㇧y#[&@Zݝȼ=uYG+ڂ]L5/J+]H*P=mhXkCu6MpQ*r6uN`qjz..rW[b>`@}W{Ccէ-]P(&󭔋{xtͧ PgڷaA1nACS)ƔM('kT٧BXRvMګxX ;<=l4“Gb7s㐁 "t("If?ACqi[2{ ~kz%i=ېIF. ,FGv{syan6sڡRp4I~e-nzzr) @ztmt aIR&gVazѾ[~y,\1X㗊䨟$w9>9ߔ 8.-grfD^8C ס½h;n6 w]όjţۜ\(h /"H&˿_ZPMكTC {"+zC)7zlm{.H)њB(Ahf򵹉'p5T^jPt Mh )[-MFgQ/Bi1ynf-ٜ%u4NAM25,E-CzdR<< 1Ӝv*~Ț,F%zdx"񉅒k]&_]SLUopJ;XH,IQ &@YGTo;mjuHAm\yַe/3W9-}khxoNX2Z+ `r^ 7b">~v,V6YC'|+wUPč>viab˄ɀ})P&PXdMж.!t 9lYyBnҴǵ^N9jN24J5_cksGVQ,_7˙7^x,+\H(CM$[]ق,>#)?UTF{6V-sh1i+Pկ門؜y tLcZh$oK @lj2ӵ?>z] 4>vP[PT kNH?n#1Ȳi^R /?Lʚ&ެϤAZagLsSO0!2|+;YVa*;{72@mu ,L̄ROß ӒNc$8jY칢X}xm´yqCkdDj ui)[۟1İ3GztNqql]wE/1$E#WՕvھ pf-efXO&K s{+lV1/P c'9#JyQ 8"cT(.f򜫶I!9pub٨o7Ku@L͜ݼnZ,)pPwk1L'5ҟ_{ѱ$E!bE D9zW\" 0OxI8U,wI#c g&Ծas76@hܜi+!^c/m=4@X;~R6*oƈD_Bم/n9I7gO7 A}g#}(4ɍ!Q~l@h*ᄮ=` RrؔMe;X5}.!3>.`e4Sag-`Zjn kdkvF?\ Jo})3w ڈN|jIV3xypkޔ (n(WzPCFnວ/ @>|Z% `CպD2Ѣw lbүTC /?7m3Qc.GTS&ihP {kO^NY@Wo|x:klK & oT؇%"}H4#mh#6+79kf|=QqkFss!Ljig\5G܁Lj{e%)T95?t50 9C!ğxkG?GΕhbuWwΘ?cAuD]sA1> /VCPTے|lyhCg=fN#eN[EwKw!~K<*()|<5SsHslE-s˖@.R]q%,ݧ_"ZJ~});-+'&)jipP61 v3XMCH(澖ZWTϧu2,$Ҵ4fReA=i_Q!)ڬ6-ws/y]!9*Ü" '.T"wkC̽Z{Fie]WKx ^*Bo 5'ը?qrJߌd*NU兎f7͖N?~2)Kn7^%xdoJAgN^b[= Q ;L_E@rb ]I#MqSB׿;|ϰeL{6E!B3HmvC%ΣǕqtk\Ƴ}`ڸ(juPYB~ТJib[gniV:|LhiY)لԸpÏt>fO ߏ{E%b6xp[N''okoSfuq9;POhZn$qREpOijaXES;=W04rpZp'8-%\1"cSPQxFs #aå_0 GԊuB){^eF][6/gm.O:Q"3ʒfMl:gAޞh+<-} 4 |tH A|4j6co* (4 cD8$^c EnXH2C)X3:ǘ_`x*˹ٲƎA]#zXR*cJ b1WYi6SC+q#ks5ܵ 1e𳧅Z$U_ x[sC&_V ]O[R"*y'3 8xpUMZ6:R0i>$YUXQIP9at( aB6"A寥l]o/7e,țGϬ@Md exF2W9mppl͜-l2c/4}W.ۀʤ(LI.mzeSq&/ S,-vav N(P5PNubHl11oHQdÀR35f,]GNeHCSq0 [@Z@ T|{1-+JRrvxf%bn-ÐOC'mt)ď A7X5F# Fz>~Ò=[5Fyԫ9ȝrPW[ȗQ20:o3ۄ;bwA ݱ_p` W[X|yWe5kګU6Ɏ6$z B jmwj3jڜLZJUJHdp=;XIVW!%,%_nYE*nfx>>{ՉކQ|[}LNSKDsp{uѾkJ|9=b".eD7[%GȾX´eŴ>2?܉Bx@p!f9J$F1i?3MzbnP]jZ ?$)(hu?{KyȽݜ-f;b&{65,h-j[r{oGηqPk/Pn N$C._o䓲GJ{mG]fg0w,3VT|4†X^e5^KФl7E){^xW*> D8AiUr[vυՀGU}5qi@Nd{%Ԑ^6zK,-J&F*,iyx\&M;5LhubקT*Wryg" CoFB2!n'?U,._&W%{@!C# ͮDU+úicwIopQcQӞ)5@{u#/l$qHjLSiPAܠcI\xG_"#$CڰDuJ<vFi0,>h\ FUQ{^YK6eb {o0 #dZ>EOٌF8rtiЭ'SC>Zx?LvnVv" ?g(R&cwѽEۖ=9bb[}J6<:t(};pp@emrq^UEZ*"Eխ=_,IT.n <;suYnFxfVg %&8 c+GF hf\yKp_NһҘwb) ojA+r?M,5|B(:^h~eg3%]ʒkηq],3[:}J62ބZe2\% f=1=)嵱dzyTՀ>K=~/Ve3^a^pcF{v'>YD_Lk< _>0ȕ*^&XD%dmK%qS[)棰Uï˖|a8QB3sb;!)i:Evo^$+ἳi2&F6mGYj:kT@X~ǁ;5v`@n~$l-#WXZnUpPPpG$g8 sסݎqLV򪴼6*f糉(LR _x)"fn~]1PHb#:}^LcVZx~Zb|s204ڢYK=_*~c30`0CۯX5@D馮Uxܵ.iayh:B~xi8iK\ϝcİG% Z{BƯq=pL3H]շ>fI3?͖a s6.eNRrUS. v#];EAcxcF\y*1(@2G|8#]wօ$HFb\T .M[]˸W=TD!@NVԱ6Tb9l:si:xsCŁ0`-@+LH@A<}Qi&,t]'1:YdB$%L,Uᆾ|LL꽰حMQJ^̓F$]ULŘ+ ҠOS@KB(qk#pzbt@=;U& ܥ6OExHhצ}fYz$${[!࢈qCP 'ĮޮߨaǴ8X5}Z!jS[jp-sUr2Su5!#X\*y/,s؟UR?Lucg6bV#D@yq0=7D )&1iF}jwc TyLv\ |[L11M* NmndGD>h+շ6n0h7س9} #eTqxOf=x}$T PV'lI֣AoQ t2!2aSGg:2d1帉d_X]zy7 \ m5\\37㖚Ҍ4\\Kաw_)>; o:94Kn,LYL`mx+Qamm(&&zd!qE`T 0L55e  sTEcu*Tii$z$j?rW`m:* k8Mp O?LF_تN 7/qM uK7py 3 *q#jp'_m=Uk' 9?rTQ^[]\Ò$PyRyyC{EzCKxQ6Yݬ4j X LcewnҭT7q3׃_F79}%e蜃agC“=GY6|}2x$>$ l47v ~b̡_y6&E\%kD:KL7be``=1f/Xȳ%Onji)>ؚ[ &w+ -X;vf$ubloCn7NxvKt`xi2 _D^*hdC X7pc4 rGFqS9.vEbD gB'g~$%#Y9̅4p~vJxH 92hJXI/d? cߤ %ʕp)i![֮} ǎnR-8xcEF=Tp0HON8y!ѮFOAgB9 'Q)K=>^McG=5*.•2D/]oϒٿnװ- ''A~W쑍2_i+lu/ĵ@sC7P%|zdsG$ѺsE#1|I]\NVϡ(%$v#Hjo_.(7_NWPqW0]__2##2vn}/tkIJH /{oCd7m+UmA;@7G AiKꤧ"=>{&y*iۓBjhQ+X@[G*_{٫);/!tܵBȬr>1;}s-Νgo$``&>Qj4g]? pB<|z ,bd*h<ІY"J 5's>xUhP\RboZ&[H In":$t; + ;)e%YN/]G-8ʕڬѲnDEEN(m&i Ҋa2S1t;^laY{,>YA1O;&Wޠ8l[r/!iO ѫpDxGg_# }Btamznl14p~6Zތ3QsDڑlI Bb")^C֪Թ'&EҞZNp}G!DviJ2.9,:`u>q/OC N1>VlH\2` RrT7䣥nĉ:$A.>xu/k#b(qGqsUlmk&UB'?cMH9d03x-5G6o3aey6 B|; . Cq\<<9yP8φ(3gXb Pس@q80UrV[zky䂢&q7\4F/#{]nA sV쫄{of Cr=(k DL ar#,>U6 ɑ]lZ]Z@zDsN: Q>5~7{!@5`+ {r?-tZ \} 6,Y-j=me{!$] DKnPq{ N^t蛅zNTڈ,I֝PM} y[sFw/wR 'uk=9j{?H{)r'[/IfHp$ܬ 2y]n˘u "2AO3N Jtgq[eϬShLC$vT[b;QXY<7B($yX e{FH]P k[$ 䴒D&Z[cz(L:-]ětlB؈(~⹴" #ۈȻkDM"~1["1`S&`OތiX1DÈuTJ˄BL "B3#X"Œ]qW>9`xlu>5:\јC(JX>;SrDU+-%y$BD1)U75cg s{0e;]3BkRx3!FbEaZת?Sk3 9ʖk nӸԍ/5ij/zʯ6uGHOλ\{ͩqJc(5@qLvgjr216UdŎ1 biW w7XG\=B\~59%]JA]%d\5<20M/ V5ge!B?4^gzDn5D(= ?dB]+}@8-jKeYxY`ŋ#q[$/8W} 2Ả:>€^6o}zD;eZe fuغ]p9̦JRٝ+,C "_d2؎3|}D! p uwKsv"pgM}bcs68QgEpN RAE+ (O;hoH#_qZqv槒Mu!@!6Ncm־JJc+ ~}wk筼T#LM3Nj!ӿ!_tQz}fƻ"Z >pS_?$~lqR{{|21c˹? IW\ z/Hvg!rj߆u"8J(/b[PN+]IYgЖ}ੀ10"1劝lK8^,XVe3ۚ}m0B_kʹpPvST>߂xì V;wɕqBc1#/2t&W:,o/2F6w@Ҩ VD۸&Xa1p(*њ2]774ͼbER;]se6ȕF$F*!`bӔ HE9Z #F={d+QiH^e>AaQ40 k &.ݤ'WȆ]\lT%,G9k?Yi{: +>| msb@렏l;Gt%iᚣn0jբSz|™ sH}72dW(YίϹ_1:M5PIsC1?~gL/gq# _ZGnEQ\S¤9/(5JV9sD#UT:r|9޾Q1H=v\g~Np?2-Yvy@%0luؙs Ongdix#MKjqONʬהj [{ 7o썐("f}VhOAĨ4|پr(!? 0aTL4nMWU2}=ci:q01Y} UIq:|5O67ݬoݐq#;dI]щq"n;` \ O B].1 2fʁQf3M_ˍE\I!A6 :AiaJ#gT*ejRoc($$Hlemhk` _$R;Nrrs(RCZf-͑*@$иsIAdLq^̥F궖H uh7"x/8 E!T͆rINz',g1P"qT ?yaǂT/f!|ow%AI1w *N̞kިÙ;qi<-s(DBpMI2z$^>T'^ pbк'Z2+C9?S.8ɾxW۝8RJZZgqL)5Vq֟\Ojqޝ1BEijnČzW<44䳱]+v2~5:-H6t?˙B>wwzSdh -Zѿ Wc2*c;OUtEW-{+)}'a$>{!j DPNpK۞4D)/\<#WlUznZ~.?Ӣ!cg\>1+Aٖ}ms=SWճ$ _[H~KMꨡȸ6XPDP<Je~V_ " Km晦KTDo j6̴jBXQ*0#1YR@lͤ4yLQt{VVBr{hOEc <=25bX[k<ֵ=GjB ^D +D^$S^)g%ƶKc?efb;{rZ 0) J#%O5cf 5x'2]9\hO@˖GL~Z0O a4P_ 8?>Jw?5˱E? 'Ac5g.?ٿ͡'@o&LU0=Cf9js@uGlPafys ܔ*ukW`>=STö1%]r""C>KR8 NH+Ps_$oh;G볽I3'Yh}df> q@_9zBݜ?wG{ qV"E;j"9q>]}H-Z.$6{s G㖅 2bX$ߠW1A7Rhq7}@ܕP]86`"k#@I44Cm_j9 auU,Ssl406Bt7K8ӊ*l` ?Y O'N+Zm[2"ƓqWcT@ 1&w'VۣI'\d;|9\v4",Hm5,Eu*txe\ 7~w9YQLi+7fmеN[C9WY kds\7 "Miymyhw|?: '2Ƅ@ceqŚ4P:5-`ApB'A2xExl8Ӛt'o2C:]9Tա5~<ǁ,ƭ4j v4p ~8pY8q(! `bs![L #ܸsyRӅ*KNor#Q9qc>}G݉f d)Tl/8[F˾٢ wXΦL ^߾^V+ߤq;),n!*m };˽/zzf'WdL~ 5ZTovbLOmJ'aT U4.1S/@BЉNPMZzow6Ȍ'R?Ilarف{柲ͳ.=IJ8SNcOJhHe%S]}Yq*j 耖L$|7J=_)U%RBYG`Q4כnZ5{I=Cڌ]0>W{Ԧڂ\¢Xig4hP, 2ÿz_9hgϘMͪl#$,-g_22:-ofHB(]dleC&`eVk=@݋m@]QUCRJ/`ƣ=innw4wאuyH@J^0 '`½l!=JU@9JNZЉ8 jZN4Y7A2S*,j%I3P+2ţ6Kvd/f60yP>_H#m`zu*&7ć|=> >ȕ>̧ȡ(t"O)ZQ_дQ))A#-Ru,d;W.6p/G| ĴdoYa"xK_LI-ȸ2*%f{54/2\폃ָxf^m8̘%Dz"3{1(bhѸ-MX gʐ Q9p#^Kdl Lo:wIfՃǰR?钅6JOGkrO71o|RqJf׿ż/g( HwEf!ZNGwX8pb痚p6 twMn*A# ZIUyAd?pL8Mϖ#Fn;"Uf.E7RFC~|-/)@X'63QĆU)'y[n7\q=!&:%eV.(Gqє3׷u=)ӱ+4+qwpqKheG0EaI1,!Pwa8DckNG{M8j z!v#wX6MiSm`H&n^B&[NqN0;ͭ D;]t gDܪQu.]22oƗ̓'MLLpzD|TX n~)dYfTp?FxA=p3Y4Ú@?C ͊ m_2;NT}pra ̕rYċr۱Օt>mt<~QI<_unLp `|a#LcO>0qQϽp9Y}MMBi @ )bZ<2^W,?M|i^T[s;MsInXD2K 85ٟG_Jo18J+f'啞cLJ96d+..p`$PA_p/ij}<6 +Ba:Qlde€Zrp55b%_Q =j U @(B)60|:1[C|Kk 4_&YPHAH5Z=) Ek fX$q[~[b/S>ǃK UNBoBz_4hwL7f&: מxS em78RrLnhw׼ԇjMsLҹ|zPmB]D"v鎎=Y⍼mZ? {,D-/K;`37R=%`vD}vmrǕa"=8|#JgOc`B q }^{WF@骬yoRjbV@סnchu,nj)tg.!?HI5W8Tx DZgjͷ|ҡNt/O}/jp l[^[BF~UGj,ÌפN1榠9X<6EMxљLbUC`SCMp1ǟ!Cs+&i ȘdӚS/r+eЗnGJMC @\FFIje; %!tQoBnUP->!ah  #dKp Ch Y(#WpDŽ~~Vln5* ,L!X)^]iHc;/U0A_^9%ڵȌu{GGe$ut ce9Ӈ'$A^4o(mDL[eT3+v٪'ZfK Z*8>(Loy,my"}9Hߟ M<pǻrs~Uhf8Xg0vR_)6;.pgZq=Rx4j-Tȗ*@ 2:co^@ޘXf$8zWؿ&IQS %V Ī}1fdS-$K宖3 džRt bK]Zk=m0)4bо h׻unf\Qbs6"%D_[|Ou;wHJ8sK8')pm%ZLi6}oED.Mllvާ4˄)~{2({G%a*5;5ܮbB"vɽ_N>*F{D,>Fp6݀PN?M]Y;5ߜu*Ik|=CȌI~UW}b8a=S*nΊW$Jag??Hs;גA+0ۗbgFx~jYdܡuD}&U?M?qmlk@ϋ`n 86K1_u,0c?u?Tt`TP; [G ߀x3KDa=n9P83{{m>ѫ  jCtwq0ooӏq:$;:qA͑b{ea+wĤ*.ʟtX_ڀ2,~K *0s1'ԝwׄшEiD4bLP '<}KR(nW:LV3qG 28z $6<TogZ`>VpkIdWpK* ]kjā1?c)ϼzKwL> 7Q+a~Zrcx|ɹc>Ȭ7 cއuCf- >-ҍ>^1 <~aOΣaF{ԩܰԹ`dB>7R='>l-TgWW!Y /^1M#:wBwxx&_+9dY4'ױTXϰ@*kU|Ur&f_ }8I"@QbH@iڏ}܃m~wJiy=Ó\‘ڞ` 4>V >z7޿)*s`bl9_唁 -eòJT r%IEbohG(rŨJ$MdRĞRw4oi&@ux#pL1[ ?*0X|AԈ FL(|fw=rR17qsD(3L3Ŀ&Ik AA8ox x"{UI2ѿC?>b؞f6 L񳍚YxQ{*Ox8&[Mo̦;Xh5a2A nʙUիp!]UrfB5AV02Q䶏y)Um:.*4R^`6)( v+;`Mr빽\kMYg>C`%҃šDw\_,!{.Ϭl!u JCͺ}tմɥwX:0pEdX =ɼ@,@tt÷P?̒*r B ?&coeJG..R7hAoMWBMr)+~f]>r%Bʞ8^)9|PX'PϴO&Vm+7#'} XphJd}(UG7CNo1)u*f)` Rgɑm4?pZ[*:rfRzJ, ӿ-yQ@ΗzZ9y3Oʫ=w J1R& }2@s̴Al6.Ne1D&s$y e_(Eca4YCߧ. xRs:7%F6=TP?c㩳Jy •Xz Ν'NU.'kxeS@w84[%Y׶J>5Bͨ(?(dȝe *N@ϭVe؅ W)MЊ~*j+jRRvUJLW`"˖8c l%)!Hg ;ܾpyiy b*66h+Qn14qؼ6BFyԩj#/J58 v 3$Jy%D4>IL-u1q qqwy)ۉҾen@UV,nE=}ThD曖BG"qYb{65 =X la_u3.Lzpc$*ʪu(jdyE&jvVF|3ϖ#arW}ɦ .?ywlO*]2q A'l$'6vb{>E8Hlpu23!!~Ԇ?~r'!mD : +]?Re@/R譪 \>ϔ8 XJh4,C^ h\1 b9M%ClLzl2E+uÌP)!?rӱ/jw8f> S`t{Ew*RTx6+p$u }WwUDIMgEȯ5J.m)&9J1Nj1F/ؖ=H;;10lRwܡ׵S%GK4D8" \-ϘY ~vU23wmO1i^8U+Aīv1-zďj [^@Y7T/`G2 "b{n dš)6d&zu}}F<zD;%K{ tN<y7r\UBU%rpmՑ TyOEsg$B?yò9'Ir{+EiAcFP﹃H,+ԚB.߳aE;H^sKno 4N$ qɝK-eY^9U As7C^16gM?M/ŋi0W_QK3 S,/l$lP̠`xJ;8G0?2^ЁbSR\Ea=!6 +IFI:("ek'MmGFr v+ej& @md+ZT>H(Q~" ^j͆r҆p3*_ablE?81p„!-챥Viq(FrVi j Wu]w E_xP49BwMK۶m۶mڶm۶m۶mnY#Ȭ ̆Cz1;6Q @xX ,]* !k%s !)]QgS0?`¶ʔ+t[v ).d2Я+[?LϚ M:N:$k!'@r3,ayro&.,)kCFq>7@73d/\ó"B% b'3*f>< l,Gp@0=Y[Ӹm3L/ΓpۈB6lyZo"Gj6@+_C uP)2ɧ<͹ܯR'J^v'ׅ[`xiApEbSJX"Es$_@>cBk&ƽqD Kb/U!ZٸUsTsHŏ=X%p5?2F;:D-65TI!PIPO1~U{l>ǀUgdy9qe|)nPR>Tr/N_Z+ [>Okԃժ"TD4͚J I(QQl}Uc x.ܧf޸nJ΍~®`;l \zđPn?>Mh tM+֠rɯ 9KL *kPqVsq<`_u UfLJ GȟG 8WQNߋ{y{?.:ۡگhx%AԞKH9|e,zQgV/fTwq0fy$ElAA%N9AY?&F&*l9D8\T?8LYUuPЁ áay tN@vkYWnL8I** sX!MgBIE̊:jwQL``)|׽E$PJGg}$ RZߥ%b5i݊r~@poEvG֋fAHGV-0_F]NS9qHz9.؟ĉ*VL9A -  [)pڻӏ=e[`CNW,5,ENc% f>UN<-щ(t5XʜdS4l-n4aS5N{:\{9L7F%b9I@aRS"-[>ϋ yWe .|@Wzڅxe#A,Tc# ovzDٍb[GHfTh^A@]=aɓrԊIS⥅?JOT\ vImNMH&A EV2PrP]uxUZbA G5 ԳUXPB6)s+2g0ۋ!h`y9c7RnjU]Hҋܣ$ Evj#nwoOسOދ\-|8R;l XՒ$"ʅWE ,9Wf޻~ˁi:o؊Ȣ'7=~ ̷Ԗtx斛149{l חʮ/TQ%L (đw+#z_jٟjJzVn'rz< ^HTSx 0룗8ZA.67 0־K9DZ@AZFgL9"Lқ z1(!nNN%vWȒrp [CF~ s jxI(?J('BinLĚ`6?đ=HQ5'bèM\ƸX'xYT˘c~Cg_$=1cA4;xjʓHG&OmהU3(%8e*Ya`8;៥w##\S_P:XDk wjF[-WPP¸N#|хAyR*(utDMV'j2=d-Udnp+^ȹDe(o4v-m.ĊpQ@J>ʧ+/юu L8DC$o/{$8yh鹲À3f%Cߤ΂q! E.e)”d6f!÷|ݍH~Lv<U3υnH`5.픇{-֓#u1wMx]w N9IBG2l?n47 p( |.E dAfsYSs_۬] z R)fqO;[ӓ>$Z)?ľt]/7I>a|E ii`k(^DLȭSQr={~w;֤Dl?ꎆ`Rcnr;4>3e"Jn (c&O2z01^oUDclFU2LbQhC#Hr0Bz"ӻKOv@8:r"Rl.d,$:>XzMlH-LnTܔϾM'v;Q(e>{Od!~2 ,(K*dӕH̛JԵNS8,"Sbe䃙kfP5X? (whDpչHaY ƒ>2Raޠά#B700 {}9"T:!E!-~@ ȉpƣg)xwSr$W""Oa*;zrJP9ELisB1w-Cl7Evc֗lCs9-}b_^cduvs; `PTjf[nAvR7`Q=Z-qĢ)-n@#S\U[J] ;]bJ^)edR ȎL` m>ՉxO&ZR(*sZ YV(TsxgD!K%;-!RtyF,*囁\IfXLJRcyNq^cMռ ]Pxu< g)sDTi,y߹02`ːmla/Nw&VQUstxY8wo-X >"9v GZzC%O6!H"!gn9Aލu_ֳ ˜SLQ  (``UB<νc 4PͿ3\E@ 9嚲5v{ FF`6Q-j;~y^cm5$2EidU#19ubw0J~.XF}Q:޸1T1?㦼`OL#rh/a #oi$7-GȢg導̧}p82ŊAo"zt'' iH6&H]!_9,x9Rk5SClC{)..SJMO=k/H š Vۨg/9⚘ ]xnꢘa˅FfwV5/J跃_pfp:@QJkd25X푈b󹗯a;?*S`5ByMGyΐ"4CJLJAW(;?2Nu|7b췭9J(X˦s?50ݤ>u -&?&aNSYEspZ+#f ҷ軳tniz-hٻ])$S=R!]݂k{[G+h#YC}gqÍJk+=#'ӔD<8/> AyZaYFx\KQA97#ciS^O*Q*dSS6FkI)l ˒nSp`frH̍b%)fr*-4d]Xݭb?o$>{chXw*BlE~stv'N 3m.`%VK4TU>Jib 12xSz yǧ~s#8Oϓ }UrVS8SY|LڄskC>|ivrn4%OR\ॲtGv (i\arjYES跤KM-B,l WG4D3d5&B 'RTLX|Ov*= ҹ02~a&L^t&oh.-><}uwBSRGL$kCО?o4n?Y?O%t?$a.f~7`w&FSAunS8ò_u\3&;b"q&Xo-W.;]乆,XKD\Mn #;lyތ@QpKwTj!WvV.7g0QrXh3fn-^;^)r$a >K&`{},ٳ+ѕ{m;XD3]O j2VA9e!0 0]bN\S(w\&R:Po1^cD0cgÊC|B䴤`[S.&0pG;nfp-ςqHU%u-`ՄALj^ '`-1ijj*?*j (84 -4W?ٽ4s]>+I&k, )9RO(!K; |VCv%YIK_ՏW$G\ˮ>y4 \׷ɗ N!ؖQ_=~$>Dofv_4A_G4lpڑӂO#|:gWw3^[TJ2Iլ7_ =%{%62=/:Z;/Sv8|5t7HJdvC2zOjH!0r#vc_<&TУmQ0t.MQFX :qSyYu|V}=谭56hZQL,+@c#m@/[`NIS`QSВanNxRrvԱX:P)Tp}a-S]ҵEi4D8E_Dv3BKhS:z@1,b4k3\oڞ.X9/wZɞI1ʥws {ڝkU}6 #lf’"k,tY!>PQ6LG>q2,<?z/"JL{rX"Pe{ngq'rÎj(W}ŐY)Ҟv> Ap33 L4L2ӒEopɬ`Լ9".)mbe߈q7D+( 8x2 r;zv=X_'ԧ<"9Q*;\$'08c #j>NvN4L7;2L: 6D0ʦόAcVC<[oʓv@f낽-PUiDɣe :`ܖt=.vȵ"065.sX`p%W2|u5cN]YUiV(곁gA'ҡElR9^MX >)ٯva(Hx֣nv!qELBrq4yxtHP1k:$&{2i&F%"~]urj;q,v$m<MC}^Lawi,tt_!/?݊[=OR-Zmkj~뽥*RG7ӱ{`\aKSJ& QK-ȯ17_Ym՘'inې [daߒCZ7YcMȤ_!?,^ejl϶{7?dz:0 w*#i[oH/EOҤ{^<`y-`\e'*}-n^n1{`glijDVX'%@Li~ f ikeLy2oAǵ> Hn:^eZ3GL`w3+F7UҚV Vk5a_EژH<75;>/k( Gt*EߚȱemY2yL0lV\:,%H?w&ߞd|1En5=JH_KˣϟFXaS1"Ŏ|*W6%w|XuHh' j?ncg deY_<.K>F2a%3j~̃6?sRWa0eZ<,nBmq;(Vl~E܉tC 7?NDϒ8Z-w4:J;E TDzp9tlΞfZo^{w)|b5d/cRج+)F>ҿ pkX[Yau%ϫ3 %Q6U-4ZXJHj/WO*Cr\uo[Y\!Ef?})dYx$@ R4e'/#r^ `8`rdwQɦι~*+uM'+C㔔[sKqX6MA' JLd]18zGtDAx T;si(k{UP wQ8#n8OsHCf[vfj::dr@"Ir¸T896z-Ka3ϣ]3O38L5pm.72/'%[蝹iGKx\V,)EnYw2SSQ`y"H= c'Ei`dj/vT#k4aH0Dxʐd@˒GvT14=D~8շ YOQG8 K#Ї1{ĄxQ8`1Fk~ tĖ q$J#ƃp Һ?wٙ}O tsCt S)幓41brtnq i!ɱ>Dz2%׊TQ⋃ַƷ)xc9Ή>0Y0m ⌠xvL4y(f4)*JP6#>_rcЌJ2+U(1\'aC(q$;$L*bS* ȡ#)Pt2(71bT -zm"ey-Q8y+E4 0U_a5֤ȴsD _|\fZ|΋ TE]<]5Emԕ9D ay `\P=RR_Ӌ`O_6v1^ebh0|Oi0l[&qcb?E8E{JyWXmJqLF6X)6iK vLШz&%ȁGy^(ƭ F>:$X䗺"I$\lJ@qH&N|(IDS+aWFP.d q8e^ m6; Q[(0(ҵʽS%$w㮁h /@3&u9TV{H8Hؤ'`;d0Sx9o%-Ƹ̿fI48߷OkŊ {B,HujsC|9*25  #}ʤy6,J.ͻO~[t "CY+ ҄!0C.yCSqjZmSn%BL҉U F<9_u^`l 5H# iFzn0b(I"?1k&c0|oPX@G8vZ*:0FF)wwŵ1'KP^V=T=A@H6ܯQ -kl v)}v}j(p[h'1Q8 dcnA|> 1\1H*۠ D ˢҞ̉a8\ 3CP cVaoAo>Rοy꟡ZCE,] _aОÐr pw帻2^;үE&QIȻ1_Esޡ亡Ty*e& zalFǢ=U Jky~[f[ ЗkG6z`$\Y0[2f#oGҫbXɲ^P ͨN#Ցl1ߴx[)wmmRr(̥+zsilԧ) l%wg'Sr_;.@~v@g/`yR_1qOArqAɡύگ4_rbLݨ}k^MjD aL,8A(J^ZТp'ORۥeTn7?c___#WGTƞmX$':CIyy1PdV8H[MJI+ &R zAqW2ԝ':*&&[kY/ݪ16S^eQmX KV>'N Wұ9m߭S91 5e;3PpP/-ɵrƥmi6fi[q!q ž21g".<_&$ԫ7Nӄ(}yjH e8  ǁ`JD6Kô0[PwEhO(g!+&-uyҥ${=/ll?5oR6LV:I'7Om"8[9HGb-!EᵲiK;du蠒wń‡1SX|"CɉNr4~ 4ĶOmO{O`%J㋲4Ғ?\xȍab0(-.uESԆjEҔW?ާkīm?7y7keP.UBb|Mc\"Je Sn|.1Bü r]Ic# 'H2,qZP.zwM(2Ӑ:9i:@to;z$B#LuYrȥC`Ƴr'o xOٱ%݇ j7M$gL"_v;}X=UbkGO秙Ce%N. " HVqK+2?bqc75o@~k>w}4<岅Jy뤱負=!&g)tkfx+Vʯn$A(PE%=?a6Ĩ<~* 7D2?S^ 5(a/C:$MKyo&b[\a2${w qu{8^#pe"c8uګ]0iPȮW}uhF(V6fR\ qRC+ALb&38QIN~Dlz"44VCE)+i0۫7FsRn/jag`ĮC)g zA4m]mlG!B D/IQimEYLzqҰZYdrrf<c1GLϪ$"[/~Xۤ0D؝i 5ׄnK#AʽCeA6!H'g]m&^tj=Pv(nrJ2hƻ}!ii-4~bhcF=vG`bȭ>{&wJ|z|pz&'n0hgl/Jk(a [uV^Σ hΤj𞜥$=~ 5V0!d9W]he!9z鶜SEԴOnFUD /g^A~PXo(K- Uq67H!lj([ Ͷ;Ǥi_{ƺ%إ̮DYnˏ`"AiIo};M?a/ r_s263 zm\u0*h Oo :Y8XQȏ'ǍFxkWmTgGuʭ}7@}򋟬GuD1WxDe"tOZ{5/sa[j]NHW,a4j۩l?jAmSl(q(g#>˘jXe25'5r @+^۠ڽF\kZTc<|xG6x!wա9ݞ#>bQICa&Q2 ^u8bLtlu5N007bNv\\>Gި(Ǿf^Zq@O^K"a&qb :B,ҖQƤg1e+0pv)VĖkG6dAH- UvU?*s[z]~Gj& 04~X@,!<(m1MZ?#XFhV JDTԻٺ0u4Z 8> >L4y9&йKg~ƣ͆V x_mzZQmȜiCxL 726$g'f97Mrm%],]2M$0z`S 餻pf|Bjp9*DU.zBCO2c/IM2P JI\9l- qh?߉sq%7ĥ5ez~{]PYiG}E4f>E8kZV๊Olkᚤ7K%H*t>ՔE zų -_ǻf:w4Ny, dzQo&lTh)WkPUwkR$Smcd@:89fH|+I ڰV(Zt!S ﮑ0 G6߈GnJՊF5 oKp!@%pe5N64[5i1& -SslP71? I!~_ mh7U626佽S" =\UHfyI:bs0_Qx}8`7urV8_C P)c"ՀijP aRXG@w^RV]{N4XX5F-}y) G]LiJwOHAlp(?ndž<֪L A,vxT* CG_aȵB3Wo}}z<LbvkW?\T) };\x7ًF6 }y ˈ@]ds++VN5 tr,zr .Ѭ1ud{-iF$,#?50qiNՙ#+ t תn9Z=Hp] 2֒gK3*ꦁTTNil,i[ɬnEכW {I>78X'}E*| Iprawٓ2osQ Ma n]lB= r60Ek)3ݫ@wcMZܨa }sdv7kr\XD=t9t <~i C[GZTy7 .}7Ú*= !Ę5JSTjnN*KZ4=|!d ǔZ@ Eo*8_r9'eY1J DsFFpON- z֞Ʃ^qJg׉ʚKS<ɀ:o#9z#2n,y]0-ށ\KS.L,&%z+ρ\P΅XACRWqׂB?B.{ ؉mZ0Ȗ ]GTs$G=WnAʤ(&Dyh{,#U l^+s$#|ùW*JLxa|LNV@Vލ</8VyEx5>Ztկ$REvUVΝc*nj\8/pnCXmQuC=DžsEKڡvƗ?#GA܈5nf3J"j,d͔q{~M {bv8|LW4o(z<0Q f*Rv>dKO`;W%6kb$@*Xve3€kԊDFؿ:. JyMaؙZHےYYspnpaWn]dS.I꼣\8֪;_y4Pdkձ⻑&.?}\Kb.߈<8n iT'mS߳' i}lS:eaAYJJC7.%}׻#/#TTg몎S,1sˊbKl433_ѺE\WR~4:Gf{۬#NC|Zotfc%e jT$6]t՛a*?udx-f}k;$z#8keBdk5StQ(3F% -% W4vj3jc }8A9M龳OHs`[6|eD, םt#=&n)'WN _Z'IeAKd{{:)=g}T1gj:Q;*977"I6(b'x6W0LpM@Ҏ)EKl10Thڷ}M.~FQM&y#LT}:3!j\H؆|wCYw5>RAD+k{?Yk5eXilLE)c$bB Se 5¡G$mC%?\igF/S^4d %;&LY~1]sc>`-KbS|%S`4ܟ"r(q]zM7ű`ZQY+(cGM^;Y슇e5~ ?$Ebh;v9'C*g Ԇu}=G pJ$ 0)Yq&K )&;[b<3<'cbrg?N?jtrFHAXzt Ŵ Z]X҅xpX6qԹ_'u(/+wCI[;G289A{…b\GV)m|u+ 2HYkxJodhS\*n,xLG Wc$(}I>Xֳ3{N`q=<χE#AI]V'C;9Lz /B'ˠjc5N$5rZ /o,9pwBD;cDS8v"_߹'qa]Ɯ(}4P ~ ]L FY;̹ #m1Pc(zWiApq怒}Z%tK2@pq!P|h÷q✴\wK7_90IԷ#R%B8Y0kWwL[)hh:qB1>v^K3r\o\.BҜ ދQZy5$ijCcM{3Gkv*Nc#eFFu W>Aihn|$ U~BUF̏?s@ugj} Ioc(C`i1Bf̡ 9D:gM/h =d|Ŧ9q͍X-'*im5)sp2qe&R˷'f~3#}koIBp'rH*ӝ\BwC#U\Jy~~2cyۊi[P_YloG,*׍AVߜ'dd"J۷G9ig0F4O!8&&S%dz}=W=}C \fe35xiы /nC_ݖkv&hfG+1tBxzRGDP9X`SvIm ΓΡ^\AoUrQdڋV(MyLy.̟uxF;Yѐ5=s@|8좜Zs/\j/Z57ug)ٛ.#61xW k(؈# W|Dhlv1,\hoC:-l1}_|03gpb,9Uqi?k-C/Z7ʕz+|I3XGŎ;(m+ݛb KLzBkRC x8Dm\.:;,{joYR+SkhǷA+>;b§֮fTvV<0),r: |i8x6`i)I6c稞S-T]h =QȈz34"ZaeBD qӶk!6Q܇6z:c'sN>+5PY mkije 8~C.IC7K++i6t^z|Cyn To8=KkC)}<)(EV/ZxҮH:’O?|sLc_`"`PF#ѻy)wFe$di apZջwpnp BC q |5tO-p&Ȍ9O_k&$v*6̬ m~l7V)LGZ*/X@jʢ]m*g;4>@c [԰(_gjֽ]#Gyk*%"f?z3pCe<U˵`%Rp ߳_ kZ;ɴKpfb_ѐ (H1%RL,?Ym(hSf#l]LcIQ3'N?I٥adkatM]mw4BOlڂm}IpSFC'x,{z]hQ1Y͓ʀВ5d Et1-$?! "i2o?!YvqV*V ptZۃh)Ȁ&rd~(ہ7D*3̓^@Vw%Sǐ+|dNuolWE%sP#@l.rsVXgvNB7PG89P(Qf2׶ |^ꩄ~ȻFK-~2$ C9#­Ob#e|챸϶ܢ:,X ;"g8o:6#;J53H81| K`?`lnIcou9~64.7̌M>K>IpO|to,6جhb,?G^w(.@VywFhK~2!INˌƣg+2OhJR%1O'UL&\qєhIHZQ6UqEKTܱ?]u [Dz[c1g ! Ms5? 43Je rS =Z3c _`oA^JVjTPvWKrI*xAb2S1{bW*qV8{Rj٭HDꯗ@ΩHG3o(t iOxn3]Q#8%^] *b:d"I+ Z9XdگcJXG8q'H4өӀxwlRT+  {͒$$|Cݤ2R)$PSCZB!3=?A˛k_amQ+ x̣r^J]AMd'[Ĺp>U=h'Mv~Ywoe\Mgf4֝6pTQ^:UшIjp>)ݵ"i-Ey[{E/ߺb8\?|L^?GU]UUA…VIKF?` _%WQ_8pVGE2> o.g;yQ@,Od_=6e&=n1V+\"p)h6=/ܬD"=N>K]k!/AKVrYZu0ucs7G#սKCpQ.TBAō{ʱX´7 e 7N'COƱtz@ChMXHtďVJ-{ ej0?1}1;CKg@@sahE2ZU93W4\B-~/͹ ,>w_kGch\pY̲u7)|E'e=2Jݵd8Uw hvJ$S#%T*)f Kh‹:o}ϗ)&ͰMk>ɻzɭwm2Fe?" ՟{6ViIW 7&H|AЍ`W C Nm45[x=.ڢS\ @! ) ؆VTC+[PeHknWЁӥ8=ڑ،-ny6t49UhF S3gY]Y |LxG:+n?7D'h8ߓ?lmZb=X ӈk<q?4ˌec=tM<-{AF)O}_c͝K3e1~v;njW6iIi͐>&>!}IbHLWE(0$9;' [aghC.ۧXxb|Z5Y|zqjO*=?'s\S ; ʈI[^ _džsDx'GosYtîQ9-i훥3;%;ޚt>RcR$ұ6񿌑ZP:KR,ͨUDz9aȣHߤ9tWMg=YQ `@hZ+mJOX+|֪OP nߜ!\_ZuBjYhS0V,VƍL>)نu?ncl#Ro#ªn=pI1F.K\Z~4gx&Soq5k+'10[51*):^C)4[_6@˨98_BRj-F׾_D"LTz 1(vRiWt{@'p$rlu7H\># B1!ZGpnOM7aDfFfR _ SHjyiTolF1d^BJ q[5Q$~ QE;2fJʬ? hHĝH eWګOWe "{f&a>LCeq|GQK~g|ǵKawm@5 *뛿_5k>cE'nqzv# t}/&n%`a~R| ¥Я@tc+UW_LЋ.?"#_Q;D QCpqraF`] 1{(rvJ9x҉4 mn|1 o=Ҝ(tXM#Hry)R]L|&H _/ :v-af{&zcM*#-sZJO>ى(ѕr*V-9P'"TLE:ɢ?h!BŤRK 4:XqYpܞ";nuYp;B/PBJs`C쫌HcFpʤ¥ObֶON7Q ĥkMH^ˏ%Zu%CZAy3Ymb"-*I3:0^( ֘L+>4V qDSqMi#ݷ-XRc1Pj܈_Zx8{õ`%gXsB#,Np+RɡÿGk>*||W? Agӎ˼cc8)Phv,B]|lc饾`&jx 8vҝ8O;M-zϭ=s!Q(4̸pl1sˑY&A o\ٍʊ2˗DrB'vA;l/oOgh= ? Z͉Ol%xa>UُYxZc.^JEv"??J-Ϋ)%L%f3<>U<֭Ei 1LE% D|v^p7p a"B2A w@<D +K?X|ȘKv HW@Dp.1Q[O`B{V+|oB<m(E @EbrwSڕk'EIH-&KdCYnXZ#iTtӶD`$pbS,ЫҖ+/4l}܃i#+U>:_Ȃg̼x9$$'RHg"Oٹ"g$!Ѫf!W~..3,]1k;G w%c >R?p2y}kPY_bn^o';hg¥iոjxڧCwGT?B OQSNJ&4/s|V$w,srb*=~g üA\n oԙT,RzE҄x4)42G$iU.2s>e6lz3x[Q7#^0K`7&b!=_̾ɂn4,4^ Hc7׏F^?Q;!jw-d@_jL\ 1]\t`ԣ ҺAIZWytsɌw0Ĉ@.K[ӳ'D@d/Qu CS JHBSV9VKώF A2-/Id?!a|d$ Wy_~}+ ?i%ݎ&e"Sx ?%q6ȄTQD}忐&%B]qMĦDw3MGͳcs$`ئZxs |Fh Y :4Oxo iDV.2 4o3R+Ѭ5YBuN}R;v25SmhD! {,) ::_0YgFx)?n<|Q:)&xZ.2eFH9D(\gYn9}8:UN%hԠWU| `z$^0߮ cq:DQ)ԓ$gj_,V"0?+P̼VyL%P߫Ӱ=I%%Խ%O (ůS.DtHL+,&f0n)zto~Jtr翗20Bzjiǡ<2#|d>Q4vJ̼6@'&? 7OYIbqyWQQ2D:һ yNL$4E) Cx8-H( uvd^5nza.EkUXZp;V<Mt 6$-7mx_d->p<'%fƳ9"@8qih4+/Zz:/.IHĒq ??3[w6Qi !#ྡྷ(gfBy{2t2[?kt?yl/; m&d ˇIkL/R;cC]gwn>R ~x+ì'ߍ%C^ÞЅo)j>$k^RiuFZ!J3>O]T/O%2'DS$vEh;*&3 0A5MToJVѹluѢOÚ=ŊolvbPmJ,A?r9~6]chotF[=ɦnT?SNzsX쬬/`{n!_2/ 3AyC8q=1߷a,^Kԥ2{: _{11 e_M}!J+1V & нx0fnsO| R֐[z@9 LݛcXd^P췒fba$ְA [a`4-畱k(~&o|M;Qe{=UiEqH_Qtpv` Qb0 ~z7Ś_eQЧ|oE45_2&JXe.>灩dk\ EKo6GOi,t@S_.rGV-Y,SNwadL[C:c%'OoP0"Y5|Eb8\ݗB8B*rrox{ &i%X9u*R*2EuʯqPt.%c6Cg.h=ep.*{ƊH]vGՇ<3_+p?{Yec yVt+mmoR*$gBLzm 8ZJ-F1JQEI݊Yv"O?L=2vwrou>._3ƽj&#'Spqu'LVxa{ˆ7z!*ϠI TFĂ쇢N7 -/Wm8tKU-8 + tV36iQyMIx75!@ EdkĔREwy[bs tSN(Wg޸{y]Ero?ݠb wtD]EB8^bZĦ5|ąlҩwľ>e!j/juAeqI)~@U1ԺYa%0Cy{\73p }ڍ_IvD)b$0qYjeY }zzN o( _a[$5Wܭ$2:?y.o+qA5܄듧n@ӒeN-O6[INOp׼fuqu]c+ "upUuPm9b3.C@qo7"`yi9P$cG_asT9i73_U"7{,rZ¼@ %&P+>}՞( O-p=j{&t3TR<[5=Y#0?H8ǂVp,xG5ׅ}ǠJh1K o2Ou7xzbhNE# ['d\Fy߳4b&gQeO.Xb4`^[K93F %fH  1}”hH6PZI#vV_x )iS. Z+XuI:0i6ӔPx_b ƒ1jBCcHin)kJAų 7狷s>KQ;2?xW\~AۖEdKɽibg܆а.N8([.֓){(HYľ M}#ϡM#<)'.rX̼sd'W6߿DT p.c Js<80uG {Rhh)sX>s>YWFe&7+D&Ǻہaw^koQA>љR){Y&itBn#YMX ߠ51"0HClr'BvQc97[uFVFWA`ht33bn"9p tWG|,NE+hϩdm!*c.h0wFPAx!WXA 0\P d[XuV@IuelnيSkVjV|2g}>4ѿ8W|C. IA:"fI]<{ͰVA. #$ZV< |ޙm,xG@nf ;0. \r6YY0lt8zUದb10n2B\n[XW.8&`ncÝV# %Mb:>Jo)N$4=, !-Q6t"qZe:6]?j'Ҁ- .'/xX,8՛\TO/*!qG=0rǵAZh_ގF|<mӼCZ=(1}>,UE,T.A6qcYY!9"|~xߕPuwjS+e TdUAIgTFZT7|r>$FG9%қ?%ǰ޶C'+ IbXh_i¬B|ԒvB5!v0w?K ;[#Nd'AV̭k.AfZLum2)Gl&T*weq~Gȧ΅+=P͗Qm%!K\ɇ>JJuʰq`KN0>DNwhJݻCąxS'S€Sza{g?K轄<.h΄Кw0uW;ǦxXgBnIrЬ?G "=՘9qyr %+Dn*wTYS6t%Aj-$jf894,X܈\(48P;8l+NeE"Ö=Poso_G9P=05k[UMiJ@˟& uvTJLEKs8Gi~6ue@ UvKw)'~8a}+2% -yw RbdXöFPe|a^5 EF=,0(8Z׃H.%v3YIm8YA\HdfeBDswQ|ۤd mr"@V k4$y 6G$v'aG \ ]Fj2&.y4gTU&Fi~ ,\~A UX]1İ^ I 3{vX-|y,R ~m;Ʃ-U{N֡y1 % YZ9]> "#ϴ'H4/2?3كUKi*ظӿψv ˶ 99h>HO{)99єbV_(N MģHluM G -'h?J"!yv@LPSdO~ ֟exa0X9%Q(X[ oʜeJoa;wm'"٢f\hjy$@E>\0Us`  ZaAʊtg!gV[!HBWAAwFaaL_ ֊-jO{a* K+Nrba^S+=\}0ׇ+i0~'v闅JSGy>[OUl5 8< ]FФR}Xtab:)g؈JFK Ú]ԯ,$wTkJaQLalcAv(د8QTFQUYP !J ǰXo  L{=^T-z!gaiS3ef#fjJFw|d Hc bv[^Gr^S.mhU}{uw h~+emn WnY a?jG:`7j_ ց?,uǙP-:9,v8Ɩ"C:0h:pi-5GO#TBw %W$k?YlmDajS$T{X-lm۶m۶m:۶m۶m۶qۮ^EEjÊ9b}}[ש`kg~33[dJ> RI*zPMJ[BlUPS  Qi|Cv XnZXLdڎg:.9 ]~wXlf$_11s%}, B\5{TPB@PH")ŵO`b2Nڬ^@AN&fHP[9@n9hn n0e"Lc܀ .ٵU@sAφ8sMo2V(eUb$&t,Tcmڔn&/Q 5XcuDnB%g?R1M;fD] ,ħvC  BQrYyc}eS߸BoqM&1f1+$ŝ]L.z<'g:tj?]cgJ X2i.0+y^wXT7&i5MYÑZBXGMC 'S F|oeJeҡF;&)1Ng'3g$h h(~j8ZĤ@kW. Hyw1`Kig6hzP>ʕtPSj*UHlT?L~2*.RJ;9@A'tj> 55(v/RXeasɰ7km.D̑L1zä)Btv!zԾlO>diG ;Y 7ib1rD9GN" H)&[ $dqAJ cCDk%VB2Ox}A$RIx_$lOݚ}ʟ|stWezLiF~[ S*"+O) > koh  ?s z%܀VJO.?>y&/N+M76GL.mqyKYc2>)RלOH֥ܶ| ɐSŲ=QK_%43˝u'wn"D@ﹼ5ۚڀ)#*rxL9i/c{U~)GIZP\ "q8`ҟs9/< ($@lT%Qcahggf<J+U|F:n::aJXIa՟{#$nA莘?+4^AbbT\JW GsLBBvo!} bn9ǵi7'yP!L~j5ﵮdAyr|h.a @R3pQ\Dޯ%T*Ynl9ZT~m6MBK$1Gof̔ Ft%h/Dch∤a[MK`)F@I}{UL[r(z `p!뽎#͘۲Գ{nIM{:D/Lh)ˍS#Z=Y90D 8 yƫ>U[<zG#(;´V?kRhm~޲|?9V/krYcIDՒc+6~QzuMxgO޵knK 3PPaK\c $l7 jK]M=wTUr>Ed|L%Oy1O@h0Mhqqs$B&+?"/9=R8^ ZC1#N:G)[JK(į8!N.KGD|:h7q0-5(K64.23t($l}<2E9'VTVܵ?%L4.mB ŜEQF‹iHPM蟌`g*Z^'S%ee }w*!LΟ˧igmeJA'jgı xbKM(P=&6i 㷃k ;'>Ya \Y))'l}<{rn~ytй7>7`08fee^9!.%D8)mW;6]b /yn#uQoŐN/<>>3>؞7`\K Bw2͓DD 7C{HNYPnvu.%?u:6Q>KOj^& A A Aҥ "t,ܳ"uARbķ/ƛ{56&w$DE0xjVJG;>T('ejA/FTXh?4޻ ;*Jȟz)u{'Qt|}IC֒ްR!ԣ-.:EA*4)H9Fv[C ,&Ez 9#@$c Z7d;0gݩA*Rki n?x zG6f37;6W~K#}Qz"uFAIW")-lBNj%eTSU4іcg %} 8ap`\kJ>$oß<1[z]h-i@yUfu1jzd6m=lr-e97U  tOšbFyAq2l bN H}pe" V5Cк7Q y]WmgOXayӖ̂~ ($+)\ÔBG6p"-.6%9ͥA~@@NsH "]z2Zc:+ EbQ%.Jݙ{2X2En4`` QU6mV_"" ZhZi ]= A rBaeN2}vMܺJO#kHZ+̈́.c]3-$5XZߞB;(<@I.~'Y@Vm7Ռ2dk1@U?t[a,"T;:`7,z昝!\VJ/7/ٓYWyh'C. LJlzdۼA *1+9˳u!|l l:,)Tv&K_ΖfGi&prD37aS дΑ=3tqk nƺ8׀#NH[}rNV`BOH ]-NJY`-xk3/MMW[fn= eHyu'#!鄮5M-I`ꩥ kR'/PtdEW'>LTCn z!DV M12c`$ud5)pt!ݕw.M:6?bؓ~۞*Vi[C&0'g~4^gYk`qV u?bFf) 5z@~8޷8P]~J 9d5qDz ;se>1 XZ gF[c܀ Ti1>+1[$2_@Yy+qs. 4x/ J,J%c>Mm0\䇸nw EO [l<~J$QбEWw!#noănL*72d^_Ro\3@W "͒%Zډ20 2[:10l9y!("!o6'5E܎SA$fe" C>B7[ts0 8 `f=4cn^,`QMg8/:KjPԡ%E2ݐ% H) 1)}!] |y5 8ͥ1qGP\=K{gͼ.HbA)mZSvO?e|Fkf=OZ_[Y{-X(w̉>i2;(gnzq>tMO%n$f!%9IfF0Pk6(W ;+*zĘ*gtUc-lk]iP.v6>Jn72I6YOth˨*ᣛ[~T*#')=^#)dią]Z ;N3Ȯ8Q1n} ɔePB~їdiq몇xY\$)b!\^`Gxᾩ މٯ+W xɘ_%\I-b tœ ߃C=)B+ྱ+V)`xw]MP6,p-;-Tm(&ƨ/vwkGn=TX gZO6>Lޥ.Tbٵ? `lʪ7 ;5yvzh w,!K,L/=O?m5LI_RMY\ZQJmB,_! n\X=,.y[3+qɌbUtBkyՋP^XK W_{c6V<&a L#\"pHRd4 P.}g a< 'oֱz|:_Ԁر _~{ae% PqOAp:g kU S;Y4N4.z}.=6i0T{Mvu;N\͛L,_2uJz41j8Lj I@$U/2ٛ}y ?;V1~3Gj+Hl2W?+BF?0奃͉ s F> x`olя4&W9#\O/)Xڱ7\Z]pss]Q_"^& 3~gԖu^)Q}Ů 8M]8[YlF.%_|Ly1)U>Tܭ$2w}}'*ߖN[˪Y\Ⲛ|b cA/wI- nxfm;;22c4H ~q<)w,V)cjRy"Jc1TN' PI'5ɶw#?tFV^ <NJ=.^Z+K/'ah67bMN੮jp)1jޯwpK-%]VSp } }/˽bzS7|8bu:,]5<+^\ʸٮC7 =%߯O?da"=ˣWZ?(m˕5y1MUk[6(C*6!uY"ш=:$T矴6znZG'j8ϺcL&9 mC\A*h-kYBƿ H9\ٶ[zs2F i6+V}퉓).v3v@:&/n7/4P ⹈Bù3%R3y$/aC)b}%8[M+|Kp=ɵeHsqkD!HtԹoڢGʳP.AnF e4]oWJ |Nj72yRqPw?>bJK ;uY? 7:JSoSiбߥ A0Wn1'I祦$?_@$!iU1q˲D5v$\1CFĊ%WZN&$^(IY,a#!Ir&(*{fnQݘpml`/nGIEµX[/ydV tkD6;uߧ{KUe',|Sw+|4; (C:)YUNMdvnvCm2]MF+vcES_4ɣQ?fW&1ڲ*{5 v~œ^)xpxQD%C/Rdd/S}wbfiߖ~)YQ-SW#4L-9YL# xݸ3~1*%oG K<'6$ yleHG(%jrΕj&y4"cj9() sWĿ_#c&L\/fD dkߡ"H2`ů`Ypt /~׬K! C?!{-tU/*a_xqklөsŝL)Q%!1r k!F> X!ZPKh8BF{*ESB-G)-iPzudQ<ǤX;4 tR>"̒Weg&#BXŠcmt ~MwgU}arU@YE86kɩ"I ŐV<]: ppm 签ƅ edYY\d=D̓ȓPf-4acI]l06U좼cD"Ԋ3b\ނ\2ZH2?twYu-Tnq4"ޥ{wB0OP90`ٜAP2:WjdD+Q=~J=ȁ!YZFjzx&Q :.%M] ڡ,䘼SrĩQi@Xz/ OKMM_G]68qǁH(mp<@m`*z+XDKf|^ 4?q1`>1Vt(򋌂}s-I9BJxf _\a١asBQM܂zL,x΀hFpj۱Ll$]\wqrFb6ʇ*<SUc-Y0.3Raʌ ϑ]  ,Y,'Ȗnc>*^mhOof"gKsr%Uiż9ῳ#֟&FwzFlxX7Lg7҅L8K^ZS^'%*4lw,Ժ 3yݟE͸gcu)dvfy` %d_`Ğalnrl4:ȡSs'(j$R2EZВ_nG> +5SUwt`G1k¦1R`As$y>Rpqs/p=SNDA+;=/~0W%) 'F .ߞNV0sFJT0}tkHFM![ؿN mK?R8F[6/K>6z(L$ < 6D/ֆz( zmdQD\6^~9iń(QhWU#3%~26蘒&Ϳ-xA.W xQܠIB(k/]IJW҇bؿogOL3K4VBbp,,B;ŀti_2Zߖ%nUZm͕ZIpouθmMA^ v,`}A 5~UmǍ}e7EZ?cVO~ͦp.2-}ǃm-"Gۂ s$}q^%`0W50=D@e1ײذ){89@V  KB!]jU7%E'ghRer,ITBMY \6`HC ړ狕BޤDW]0V4 TՄ-m I+XHY!$Y\HK k^,o N(a >C0++Şl"b./yM6NXj|I cỊ)ͦ`u98MMC ]&9FHkFg.GD13}D^k2<FF56V;S'9]Yئ^*ɛRS؅jpׅLs>z6_:m,_Eqz`eUCҁTWR^@1v27 S{) ph/iδqV$e`hë.t86(yWb]>m9gPX>|R/r$Am<3+gn*4Jv0F`uz1ⲗDVuz4q$$G}go'p&"S@B,Sjx7\o:ت΀]mb\$d7UR`yYj}0F W@r$[!7*[ntNAuYuNb9hAlo#x=3}pRm1PaTnԒ7yWWTlc)l+ "_‰b." U5>iݵ_x@cۧ&~~4qFbcg1TVKcx-̏apdϊ;{|8 nJχRl`T p6[f+Q4lff* \Db+:)?ҟͮE1c!?趔p|)NqXO4b3]åQ14G*+K}-:o]V:2^lk<tCH rEpZ0tUp9'׳SYmU/2<'Hj@)#~BfBS(2b1S/998O dFmf#sG^H2j+QKcH p4UELXl;*I )X1Fod\G T3SW  mv~-#>۔-QC. +Q `4 ܥB:I?*g_|zvNvBxʿy~)s Em]Û1qTCI:=Q%M6)2qYcbJ]qr)(6x^fLHN#PdZr4G>i2dm3hrq?'UuBu#8%_c?"C \orV`כ‘Hr4^d^EKyE \Pȣ)"?k{P ^d.o9 XRa#!r}k|U&xy;H`5&-zwնhRx`vsȶb@)K~l?s-KLZL;CN2OguJl#Rc~gl2,>z!!5?n3p d>eYA H͵ w%I @ Nj )\^x]-DI5( dII7{v$t4]Ns)^A[(.6Odl#­IZ+Qp/spբ@A!JTW#4wlcokJ[*BZI] QwX|86輥>9@E5OHZ yԄlXrp9= UQOڬ< 6(y(j6EҰV.CyV4ϫUQ >4++҅\0YP(H,e]v7AQVꤠcUh.9 uېVE #XXMDk~?]<(m0i,pTPC8r /vouCOf*~@Jo9n Hk$^ōΥ'"|W!~lpZLHq҅ՐoWWd&H͆?/`Veo"}+8K>+T:ڃ|\|)&}= arp <"r67{2ow.q^^|9}I!Dg櫸d\HeILONNS3g˷ g]o,+=RQ`#RvAl8G,z-877V[/uuП> [eg홑`mO !0 `7e[sn=LÍ+g[2XeI歰"8ftx*Rnc+5M~UX&EV4dCIlKkgZ3('g%TOJc!aw{!g~0: 8C nYUC>mo-={.ľ T69 Prc#hP;/#77_;?vlO]~ xw{2"$sW K݃[u3LчZ2 hW_S]ITڌ]];X7bNZ ^0c>ą( ҉{ݰp]tӣLG,w(JhE**6q Ioo)^:}BQ«0j=bHO{W 4Np:|^9ݚ KvL$~*m($v5y6,3Z\XjeNT* 3d"S %{:^Cb.0C`*ڞ=bEw Mӣn ؚ o:h o+oTq[U(:Qih66 v7g:i5@>s^$(l"-T`nD}jf= DLi=lOْEag ǼM&Gk%i=b\&uuم4u3X2mm,Dl12ӥʝ _8ԟ,Be%l Uлuϟt"/ʏF&MsRI(hœSm5+]yf'PcK0^l>f!ʒ kRF#_Bu9L$7Ј84t&Kc-JLJqM`}4 \Pa,Mzo5j2i+|;FO\L0o}jĹP.,ju>BV09(ӓw LcߦC'j-QQpHE  gH6m{}x3ӏ:akH|uWښ~k%./K8JlID!|72#P23W|3<5{T\f& 6֕3>0 #:g\V (1b e p9@ޏlOs9+}’i{)͚tY˾\?=􏷑ˆ ɯz)zD荃^Pjq]N59mQE AۋW!.4/ M'3G42H@TiyF.rF>oVtJ[`өz#@?7M7ח&3i100,^f^#)oк_ǫt5I;|bTSs$*^3nI{@NL?Z)tb1 =x݀j4;&Z@7^d0)ԝ*xBBU{{fZL\q0w5+ZOٺ)RGV{14?tA>άU[tևtըD.sf}R0U2Ygإ',P:v%I^Βl'{vBIߑMwSif{Ɵ5w指t.ilLԍ+-h2d z c^bDZc*yb_kIwt0?[+զoN[4_cA\Q/pďS3Y>]EїJry~La4avRR߶dm0zj<KbkS9sGj(Dfg* ҚŇQCTR<:W R}q_@E\:O{ ZKWq,Xf) 2lOT^tdO#@S2F}̭uS`^,+#rϊRo8 V v41&s`zY{K^O@G _֌KZ-<M<Ȕ6JUr?gd }nYK]E1%-0>xfN^ #+} |w|V 5t-%;)fdω<u4$>R&}o2 TF'F#aBzf(- B| "f26ʘ[e6dg\i bAٶ;ÕR&]>KVgpbA7K^U9$9[!SG7ZhT0XNMvp>}4`1Qzl)DΠ{i{LN`|g})KZK/͋z/HM@2cdV?T.]27 .S-?@&Nb-aT8.nTA-EBNE;K5VwXV?/Vvղ;LͶ}JKCưSF˻MRvVI@Z!Xstm=[w`J,EhTYXԕ:O~iۍV,>nR7Dƾ^Y0nϒO_`@KΚC-%n:fMC"J)P$bEWLpv,B }ɀۡQSxwbfl+>>> UG &<ٗaڛ+mhm`D Q򻐟A0` ,I$Q|*T)WDy=:nlt .$.GyY{%tR*5+@]oþ&Kw`}OLĆs3Zc+&##G[+YVsc_Ņ3K8{R!ǰ+#u.\v7Mc^Чgx(sDﯼr11dV;dS T]P EBU˦? gHxɥKsGæ8 ):JvP 芢]g >N߿(yQxj]$ܓ!Zǚ rZLR9蟜ƒS8HY,f10T=4sY/E؄&rQ1,f<$۾YB^h#s5)e*O9›bx<q8)-ba# :v}㕯/*srV4Cv4~*l6w!@ԩ T[_a!̂BcD`uwDGݎ uz!:N}0Jˁ􃻹рVxWN߃W]q."B|5"xiQ5G|5ʑI!3|3JLZRM-QL:9KfsuN(H |Wajnip7ܡ5&.G<=Zm(\C\5Rt2 Kp>i=nׁ#\ ksT$dI0@GKı7Np}OUxo Qxg;y1J78^*v9ܨa\-.|^$̎ĐzL+U59QZaIr)kf/9ql,bݪ_҇UzN-ɀtfu.@ڊ݇ۥXԟ-B #K_ '{O/^6]EV5]qDm._\0oE`^V-UΡ fO8A(ရOaVpoThV3`k´*$I6U+a&D/XԠL w1$CuRv_c"rjTCwfd78jD`nP W*QevҧZ;;.LtSdףyA! Cn )eU)~Y: \֓k4"9O%;-m|BPz3/oЛ#D-!/H9TBsMԞvWufMZ.A{5!*Z-ca@ 9`*}؉ۦhkXBtvPbrw Pq_qonb`nѴa 䮐 9uC3\-Xcn&lc_j+PD )u&a͆ gcKD$-K1Z]=ȫ&OtoMDݢ_6+Jt7M"eI^"Պ1q7ɯهa+.S閭<ݯen^:9Ncm2/LkgI>p@C}6^1]سˉs5apc>Athtd˾f\kNBB\t?r " 8 ;lΛi5m #~PtۀRz6/e9- 롷u,F J,:T<=Y{=LӺ1|˸ϿLIQ1k=UI$~vV"RNM׊@P>)7^$A|}M~/854X7iBx+q#RL/zٟEeERDGZ'X`lH^lD¨ـk)C`&72 ZFCm|TW>$NC%>O1|BTm{WyqcLjGu%YN5?m5U_q3 {5ERk2F&3H%<աRrV٧@ Hvݿy1ʿ3fAtj~ܯtݶ]7kmMqG~؄2a, oRv䆄\}(CIKl:|Hao%3o r풙oR%ł:ci_wi8Vg Oҹ3?r`^;{OޅroUB'23?fGe=m`v;Ed^y<S\d S.-'˘2V)6ΉVU5y Ǹ;oPlX͑4`)jĈaa!ԑ-hI}ܜy@\ȩvtgګ!Mn&&헳5su B @@N3h]"X3wnV25۵;DL]HJK1-Oitā #;MLѮҢׁ"fRMAJu%O$V$[!Q=LWC23ŸgLsEBJ$uJ˚:.ZQg>B0 :DRT`v/5V`OѩQ/g-Ypz`P [ΈHyV;]DlMK-'''%/8%`$7zkWMxCCnm؟A|/׎Kra `[5k0"}rn6j&ko984P$$#̹w@͛΂Qyp5^ @kXՏ|?HPpH~r^skZmd}pw<<;e5V-ӂJaw޺m_. VW.!K 4CNFWNQ=[*QSz18.qF?KݚpCJ[*qŐz"u`d,fܹ *˄^y Uq>ݎμ4~Lg {=[K'HbLyMTF0{\k}7rQfcP*%3Ì5 Tm%HY˜x\;J%H6ye]{R3#dY DZ.eox@â|.Bߒ1nJiZڳ0]jW% xl7e2%aWRfjQKInvn$f}MEB77FUaK,tgx.ѿ au:H]X&3񘩴 )Bp&hP R*b eQOF.}ٚ/q2ӪBH|ga[Jˤ4_%50I97c,tӘ0ԏPڤm6IsMQӋ2{II0CQx r3+g11@OLgmFsm 0|-z pt$6 vhF8^ǝ_j2P4GнG4*S;{D2K;委a;Rj[0㿾8URA&<`Le\1]br;86G+S׿g& |lG#2u Z绅̵aRKCSGcf6%=̂TT(@V,e]F#@ LB 5#o-TcHD$麄RdX(oecHs껸fQ/HB>K. wGX-]W$?uAvuF}nK3r>?i_ĈO;:%P3 ĵ#o"I__zBn!CqS9"?)_A3mK}I+VPPt;-җ6VQq+ ߍܽd2<yQ_vg؅UcلMvdAh5.s-d}84-\b[D'\=%+,ڎ+p#ͺM=Y[Q4.sb6(uy LC&3(s25}N`d|oibHڟЧ!g?ycypLaWzzYrML5t{-X']Pik{$FxHf{{88'|3|*?0y FbgnGы׾=Z ){;PqxEl$Yw&[5Obh ׭UFL1H|Wr ]}`R0<Қ]痱=>FxJ?aM.9c[6E9vjm(oM `}rGPe ~Mlp z!JR]jr4*:y)S눽.0)Ν$z%J0Ů6RHӔEhh ~Z+00>M=F'Yv CF|M,30X! ֓΁ܙ Vi<)6ZB95^X_?4a;Q7_ۼ[`d%QC1pX^z3x6eB"pca0)˅c2 F_"O> mKql͟,[ ϶v>I%Ji|wjw+)gsc9WG^g޲E :,-?ˁ$MPdEZ/>3J#a0.NWRz*߽45TO* oF[1NW.i0KyBڛ&և+n F{NL{.$b+yƠ) 퉊$)ê~ oۛΈ}e!{-<bf|~\}͜UPo,;߄aP 11C_jeӽĈ.4B1r^mRgbd7ԑ]ǡ[~$L5C3Ky. ;T`A]F{MC# pOwOeSsݨLuӓ%!~Z9^*Z/׬{ |3}\ ؞^0{Y$(L@%ilJg#CV).]$HEEwM~ 5MwH,nC n:?ZyQmC G'ژFWN 0yGa}>qJ;vOG3$ 'n >ʚV67`ݜv4TT%d{%-Y@LGCퟚ'X7w$б7O qb'm7uWqKTDEcz/KGQ:XJވ-h>Ճ'~GͲLsK7#X 2#/-Ssa.Ʃ5$@[*v`Bۮ[KFiޫm3ʠ1G&㴯{huHj.|}QFg;/E74fP([_BHQ\RܐQA󿑌/)ƂrQ=n f=WiOdU[=/'L.+o#II6%I-G d]E4x\=o?0zD.<(Q lV4HםzyՓ ʕki k=U:Ϣ,Vkqg"v)RY*mڵfF$%DFrfl=~&sFYMZKi(|u->]L C'9 Fl4Nu@9yakAY_ Œٝ/79(_ӝ=vΩY2> 4y"U9!\S쐹C s.F,#kY_(myR3Fʁf;=ᒠEӏ?) Ugಷ ; ,$%VфѼ3h%7"4L.HL){xb1jo{4fH]E]b=I^wأu0l?? Ṕ>tZo~V#0U!p}с*,~Eչ?E8V^Tsd/+=`i-dYս]=]Su caB4+U ^1^1:.Ry3˒K?K|X:W1FfȆRNd•#WcGAє7ӟ^-4J"դ fD2 oQwO҄6}]J X#&u6v.z5逿QqE=6"ٯX$/~2TeN٫A`dXM;|ɷ+]_)9ɎdFLHFbra$Rd hTnJ\s =Zx1օVKJ;F  0f/RƗfteS|Ƞ? 36`@H%ظ v`(CgUN)ʭAo7ν" "VSgf]kS}]ۦCR"' d@'?@5Y3vpku1 95ߪXbڌ ^ȗǦOPƁڭ C+>*6' ܍ tU} BKjW,2.ۀ,KjT:|y=#QTk˚ƿ;R }0`&a/ΛQcvxa k@ u&W+Xo?c֙%[$tnjAK#Uwn%Y sNnbO,0FrI0M*OU;ampaZ+reK>x8_DŽ+FA6i >s_f\0 lMc蝓PYVQ*[Vab0S3EI? ^{5 Q4Kui0*=Ν!,7ifvC7 Tn LyZg +#qMl}\;~-*(`#䆵# ̧Zu@CH7[ 13?`}eiya-"bXCF2[8*j<8^5Nk00hƟ 8-ʇ{G'x#f8J4T貽v+?FE>\ ~_U^&jmgPIA Wlu *Է2JUN!zS$-؝I4#C3!Mz8տanq%f^rXy+ѿ J}F޸@ bɿV lT=!ݾ]s-2b1`yWD,w\:i mٌ\5|fK<uCzչ"Mdot&o >I YGv .!цVI0+jq= PDHQ}e5%C*]kKf! '[*s:nẓpvZ c4+猈Y1(YIFC^^Dj:7u 6c}_d@)⥎BFsﹴg4*PyfiX.cih՗{CfÅE#,Lu7rQHAn' Q&.(Ƚh:XE .upO?g; nqMr#jr>\,$ON6 1]^@H`I/0%׵] Hݟ7km{e 8}]zV$)4|пP_o"ͷ:&߶zV`>u\yĐ8T][5l<3绋c=?k7 tɹ1NoMZTΫ3h ]Ƈ{ dy07[9"ot m\}LuyVEb!FʻʓaCAKD-/v{WL1F%-%_unfO('?.Hntװ3^)ee+cZ)0$Y,YD /1GRNK򇋔Z7m\CȓH^;myΝ'V}vR1ʽT.k-L+h50dÛޞmvH y0z6Y;E_\1P`My*T>#5ui]ToT5"hJ8Q*$0ƺȀWtFa[Wm%U7*x";+ bi?Qh|U*[-jxDcQ% o7B\AǞ ޶ε=x9x"n}7`4l"@!݅+jCwI+^b{>%q2صCx*_ul=^t18p0HjNh^QlCDH̝2dhBTwdF Z'Rt~ uux]lq(\6MP{P3z81.h -@i2M"hڔحҦ1I{UI®8rQd{YJJ􆾥'ls_f_0Ȑ*,mlINYSZLEO݉\svz?=/) ށR0 I{"ί̎o,^\׷-m0nw"3g.e#rF<`֪tdj:α|h-rk1Q<ʩ5:hcag \JHtۖ:l ˄ Un)W %fj\p3e|w4JЁ5؈hՋb n?P {՟9 uO9A4YU{k1~ˬ &@Bk-үw#6j?54ozm~H/X58H]6d?K`)v-(#B K WTcx'a.@$ps5F4xNyJ|9Dm/Ŝjh]'hGΤkEJ:7Si;"% JERBBeS m\`'Y򤗏sCe~{,eJØSZ̝iv`{}Tn*rʝuBDPGX:i.=u+ oH+5l!%޺cqW}*mP7S:ը^l 1a,5g켦$ Hǿ\0oj@&IL\uAcP!qSpt$ZS8c{|WF`zEa᥅ RDnak%M]Jңϯ+-\tD0|k`|RcľWi~M^x䴢&i.t=<(ʂREՖR4Z_T [ࢯH4dK5.pEWR-+D[D0jqg:,T$}Y,!Pw-'y@TP'-i ;mڐՄ<< .|@p &v7!]A_肑}cG$KWw3Fgb)Ʒ>;2w/}_UJ+bi v_-B;13Qݿ,0W6#SM jD<>86ι8E:R!/'h-q.,`[#,>"z- S4#UŦ>K>Uگb TZgRvYA&FFbh84ld㚼;22g!~aTwe;Trn餾lӍ\M7; V\ݐ/cRmЊ4挦ϬpIHo+dC#=6R_}HX6^Ga% H"y,1!}҄LqO\rf*TJce{٠3\ /.B$6I l0- -rd|AXw_|1[ҭvVTwzIf_LzhJN&Y_sP- }rpMJ >]錩7ʩ}ф#`?.^OwT'q?+I!L0g`DlTb(*x` Lg 5A!-I/bP%*S`J.?~5}׫ /SI{({+/5v3)\` gew]2ҺS 4YcB1 OnQvVEw |`~~]lZgs`|=}LM8mz5#&=P6!T/NkLj[ָtz+]l|2tB_{aN'?X:T9-c$[a]AI?+Hw-Қ>N VD!YxC% ]3~]R%(]BYf #Emk>6KߝRKu@2wMob[l}5=iUsU;35]3u)KIu/2$@ԥYĿ4CҸ:u\} KyjbnΈmfƳ֌!DX&i;߾s[0D|?żΣ Zd"%bn/Oj4 /--/~@1}s;sip-Z[]yE2#d0x.oUHoj ߧ]IVQ=]Goi;݁l8e`$Nh:zXgn]&ʋ}=rX|Sڋ:!T,Rf{-C{/s $cQI9`l1d3T1b| ʙɒ q{uL9ϑ+Fp>KGMPY OeuѴg!%X p+p}vBӵr2E/h0XdO2m*F׷=,\/M#n1@[ni"˜2qxYK~^ iipzD~Ї k@ -CYj Z#p6l@ s8Bk\6YfZx|; SseeIhyT(.խaAa(E}k}>/. }TJBQ/6ȱh;)akNXs sp(HVxNip3ё ހ 8 (.΋P7KyLA)X^dɰj'<$"K_G=grdTnҊ^YS`&kC #jD6%a%&ޟ;x (ux{H?A2ZIUVB'B<ȪmF4}WovP 5I^05[YX.>iME7m6gZ l&,\@aGO{5:a,ܰGaa*][ł hDѥT|Rb^GX煇Q4H/RI[P6Ү3ς{0X_ЙsH"?W@% z%68"6t`hQڒHaSWWds12Q0T0(R"}r\'l03q=wVM{A?gSnylҿB#n{.J8 fޔRWڈmjɽ.{KyML(+Op8 'oԐA'R' jjyįn"a ɷ Aef}C!Ev~\\[jXPnJ_%`w,Ɍ3ZP-ٸL9O;by?ߊy t8nݣQ\.5=QlZ{.S AO:#LӼRv󆺖d`?kKkp1L^7$;ph1k,!tگx"*)+U 4ܫMQQbVes^p Oh0䌤RcpBKkݻ' ` 6ߑs+t Jq 5xaCqT0E\ в'?eIj`/$ .\-nKP&RPP:&$Ek0?YA l hAXč*-cW"yPo&EJ<|A~LYBk|LJϞw.41x||#2RLxkޥF(T$YR\=ճ}ݷs8p|{eݔsmRccV$#5 QDHqٹ{ h$LDcd57x%eDXoʗɐpN\YNa/^F03MMW-崈=d[V272?Z?1AUu 姾Ҿ_2gB[ Qh7 ݆H[O flwT|tA9{ԑOخWUdSZ9fP^j)IH+ٚ8t]S(ST-q<٢+E$B!bmJ1iV[u+`"q7ԉƖF% mp4Cr%n,P@3-$hSK\ a1]y즙[u?R8Y??-5ry+)ٕye=:s1;ieC /#8aɗ=ѝ|)FCz{VëcmgeNTw^˝;'n] kmecֵW;@,ET&>hYuo  KP;s(UbߡufTp4%#մWktf06O.x]bhXoT"rT%:5WGVk)UQ* 3zװ(H'Aʖz-6T~hkH84gvQ"cz%Q ?2Ul~bK+SOںu)gW{K$po1|YaYtX'ch0")ڗ.[<6.FI1)אaeO)̰"moG 5ue=\eT!.q-${#f|AF4²gmR$vmEQQ2OP>3ʁБhowarҖSb /4~%vvc32$0̢5r"񧒴A UR|o#]}5> lKg/*:3e%-&xRxY4}B=9H{[GhЫ7*W "N%a"0;qc\t[43CbAW3Ե(\g*NghA/.Hrb*7InuRH,1!A$~Yr-<*=ߛ/~B3#m!gt1F̋iX &reݦyFfϔzV©,r- 5vvϡ5ౢ3/t"01&0+)sJ%V,LC.%q(/33}_ %3St=C+9D/ W)ú 5j2<#?Xlt>&žʱ;jwʼn;ΧBb=4L$6yd[iVr)g48 kPnx5$\b9y3P ?S|]& ^M*>1!Opt"ZیwRx㝷5G>"x.- hOs#s6k&׬?Θ7^h k*:xax0tKAYK8b2]]4;kdv"(񩗩o@ZO S'0H8L߆|=T+WXӚZ}X( Er>v"oYqFr0vaGCr҄l*vaEJq Z2VOj+%oqi; M?o֊&szhv˩s킨xƜلy\Ni.VZi}r]~ g2`3G-S:O^rȟ}Мui76oZ{(66u> DiA9՗sbX@d֣:Vfy !,`+p{2+ܟ%{y*شh#äx::ʲj%-7w>0:=ŃѬkF_lc`Z3(7[6*dlXkj{Q %x{IE1}A*bG'ڏDM>j ,3X+Pa_ETSы]MȘA;_ ?9ٍiHãuc-;.bIKuPGoK94UhvfA[gٰQ)%+ٜ0vܴ(0Y c3hT*# Lӫ Y>^:js3TQYHΤm㶠oIi 4i1O9/kLË1"͆|4G&0 58>&:5x> woIݶuD\f>Tj>ES 7{:[[l(">/D}J!m`]d12B/M }zZp^/ư6~ 6ZO!.t{3hE Z0խ.ky!Des ~ 7^CͦJar1 辢-/9!m(,!280rhM<[W9נҁO ![|y=_YN !a;3ւpLH) %ٟ5lWo'|^pȯP27}7 ک>;͸=kĽ%1*- =Î 9+YTe[T[ޣcψ+~_z/I@ŋt9 Kׄtn,*F(lN:qhs`<)s$fUN3$@mW_,87Lz:TվX`iE$V )`v{}F>Nݥ2rk[VU*K(PH$hIyӄyӈ t@AsʶD C9zּ'`ZԮ6Ṯ^3[f .=xGLyPjeXz_ֵ4kDV7wG膧~73F@1@ad6gxv*5nB/@|0y8ƅ;"-ey` &2v4*f.g! Z@2HN|mͿ8 HIynNl>OPM8M+)i-w C6Pm o`Zv,IV4O̱oU.88:DS4ina\c9ipf5dcL\ 'Z,[fw`@V,| ;GixIMPOاo!v\6ޠՋ i;&M@$&[^LRO=r8Xv q u,V$tZkIsGE)9sjGounD'~$8>/MfFDBI`: vOd<G̴<H94!PO Y/ܿHRR 3ϰZmkz]v1sC8m.7& #k,.HKFpmPW-ߞ1 ;OnTXygܨ2jZ5+$ݬ)2c"tӱ<T<"gү"t(*Yg"ql)ŒME]_3iMS7߿ܮ*K Y#&ImP$/ ;S4/gG&qڼa>,$w7Lni3T^Fq^1 4|P,nw\g&hb. VZDqY#"#^P/qD&Q ]j^=SȏHW:!: 2$1áe-/g˘/g5[.͐U Ӭ^ MvUBJ}Ooo-Vr,-MALb$Wc-87mPrgu1V, 'AUүm ֺbE $ F8|qĨB:^ɉk ~D]3mSDv$Nj :}wXf^QYl5HxhxϴƵ_ʼn Xum+y#9>,h}e"z 5D.qؙ SIxCJ͕xqA%Ui^2L~>__.B03[* ihb܀&s&-^\ծ+q}O,ͧ:=}K?>59H*0A!60-cz&9at>]a"sGor^1C»nCm%WU xT?,:3O[&?Z:7?Y9 +m-MHA'D`BA3dqݍ92a 9%+#tɋeʗ]13 H{>ɾ9*?G% nŬ-e򻌛M/@ qyRy&~p! ,`E Ɂ\lDhȂ˰>/VWL\͵0|>zpb+yD \L8-g>{ Hq7Pq^e̗Q ?7wP&T{{`u3!iH 3.+>yDզ%$TKy> M'GfT ~+k nxse| `/${E5|U9lTtUerqY?w pT¦B2c N>(Icٷ!Fh\PUY;y#$RV~.KH(أ8 uU,Y3Iu_{Dt m2B&EyLQ#t+^bEvng\/kc<x1WI28r~d!m9AJ'.VftZl ePx@dQ(-ZV:=LfS.i4EPPC1]wk ;lr䎰=Xc32^30MxE0GɅh kpbOuQ1[vI 6$C΁:PIvxGjgѶ7?(JxX%^g ¤)BQWcVwjBLƟODz2Z:$f5joVv;n!ē9]#Z[wQ["Y#V"OsdmA5zf*f'A_qZRO`˱忄HI[#7"ۯf:Q_8b؊a{ +vl?*T[$+GUN/`>*+Wm%i:+љ/?,D0˵e!tnV/oC1+JTj-vpU/>]VVр!t?t@|V[ ]fd'RQk1I4k NSM#zor3I2*Vۖ*cV!t(`NUmM]ؤ8l?!*W]0OHhIH)n[,}?'@ρk{;E b46Ċ~eIcRt<ƅ#H=M\^ yE@'݋GdF ] RNrj\pt Y >J뗔'߆-c SG:{4rMFgdu+`4c 7ߤsjJ45)ӈ|iV4$'rm7SGL 0 ,ES$RٛTٮG](#rg~$d%kT\\ЊaŹ|\ژSYzVT2"#V c(.?s6/y3 \ʼnh{p#f6WZ VEw]?esxeaf FJnXG"[AMIt;V&&I!ݗ߀Ȍ4>yh=wٯϪ:SfWeqr/?.bA8(2.C\ITNJmUT]!R2!9N/b,Fr5}fc?PʅW `)lm:zWi=mjÉ8;/M򶊸eQ&^P zȸ3Li? ]^*y=Q0ym}+4xT~<*:pf,RAQ:cqc<1/0onX^\ a{i4W:d!3vd>l jJTN/ :  X7/97gdذVʷ +9K@̪'+$ρ{m})wmOj/]/c>! }c~dم("0݄b,Ҹ4K*02|4o99e%^V%$GdkPB#b_bˉ̴52)>upkDX%Am<Y DYpr4WY;oT+}IA m"!ڋW8)Q Չ>@ }75ǓQRX|׃kҎ?ݦ.'.c^snr&=WDx ExO%g671jv;rb³e,a?wUJW{ eц6;+}I6xq@ooA7!9/:(hծR"H $Z;DwZPڠGrKf<8qNVU4KcjOKukF(=7zAx/UԛveXuxd̯c[n.DWJHs$ÎWН婲#S^IJwυ/I\g2M Ժ,* i:g]^4E/1g0LJyAQR08Y,Oqcۧ`߻"knm[T>[eJ2a D tMw (@*z䭕ќYbA%9T>Sk@ 猌9] 2ylCM^R6Vhܟz[V "3 CJJ(6G]k t"U@tމpLyP"]82+nrL7Y 6B#Q3qXMlMS9*ڠ o*P^l=hxJ~IjZL8w$OqcK‹Mu\A,Pґ՗Wh ('Dn5o[X_l╫>dT-ndFav`v4 k_ц=Y Q)pɇgM%҆t4ܽ+ы&gCx;.U(f iԻ޴t%_7x \m(ЂV( ov:nΎ p. 1b$s;ߤh /bƫT8]6(Ե 6EdFjb4儺g.:MxJᘚ"$V)d23gMF -;7s7 W'cgT=YY0r=ΘaSy?u]/&W(_l4Rs9ne{yʎ7kPꓘչĨ8ŨMhn!\Z[76ont /cZ/```VU3Voܴ|h۷6m:!G;zģՒXQ_*,W9Ӹq>'pz˄bS.E Wg_OLj|E߫Hq<6 [*LxH6=oPp3~3@J7V~]R ^.̢+ s]e0~wRpI+\{_2dT5˘xd'Ѓ:ninuuԕ R0˫x.7iRhL3$߭J}!lA7hro&Ho r':_-2ƂU;U"X/SS1if۵Ǩd9@u׹=:P9PYtT@T>% tpXxGe_S؎ȇ"zN-2;'4"4?ň.{3rQ1Z)k +q UY4ܢIW5V+wVTUjr||#ÁT>Y͉8Oь?G]__ XxX.Z|e/Ye_}LD +Z宼lLV%3pJK)Vӎ;@HK:~wA\"y2-:DU˹9M޶{*a)Wh6нPM.UDf9i5:ϕ'g%K}SD1AP Uv.6r.[2_xxߡݪa64?F|FΩU)A~u aa.Kd [xb!-9ԥwI3c6oPƣvu.;3n(]l( ӟx4NfcwL0ѵ$7eop>eeKY$^VDt/_N[S`F%Q:LIL@%>\ک;r?#}Yz(՝ aӣ.YY"fal2Է+q&WKwB)PebčF7U\kD7\Zy4Ami|-w!ݍ?v4o唙4Gd?nfÙDr\nT}T~K5d4~r>7mG<[A]AWњ' Գ ڎb{Eѡ4QQLG.x2˙qmCF@ɓ'=Tw46Q< :BiɂʃeXD>;炴OQI; )L jY;qNA6Z;dF"{MXu,v3$/P?ṑhBݹf6qф#|Yy6vUэIo!g7p)z݅_o:qi(Pid"8` W6Y4!ޫ,m|bt-az i GNqL_:l6PR]i(T2i=*Fv9T7r)9(y7yԃJCk#=f77?BC i?u{ĉ0 LCX R+.tBMj>{Q >&BQF 换s\]iF@r.^Nֲz 6KݧO‰{?ɢ+mr/zq5(#ݼUuv rn$[3KpDC/-ń0.7ɜ@)f/,;^u s[#p؋",8iE翭p9R.5ԺH8fw?uVc[1s9ԾOx4. A,iS6h|g$X'ȽFaNiD9un=R@&Tb5fkZ#)//hWG>GƼvL|{IbXbia1!D6uimw g#UTXqP  Nh>^()[UCrzELN攮ĎN҈ۼA!V_mj.pBƈ^&n _gDZ/t^dLĴU.Z (?+XZwɕdM0iɅP'm3x1]${ \L|c|.٧A3p))e`XQ5]f&}b^B#V.g,{ذɢ^[md7|{[wD|T|a_++9)@|U6暶= Z9 Rx *ǜ: 7'cj.4[{Y(#:[pL/=%3gBg۱,SW{m%;}qP\#piO\j.z@sAjm*JIЬe jOicح@V( af:$Va7&(0b373\xˋ&[-"mq뙚lh-I( L~U%3/C<hyB>2^Q5Dv&b+$h(a+UgXQ^ [U* uN"_Or1ȭG!#x w CJQmlTr:;pZI93{Hh0%C6 }yTU:(ѓufgVu=9=Fx9h8&TϺw}ܛq:)L#02xZC>irܿQf%V pV<+,!ǐ>I?NO^j!ٷ+knOL"U3Su5˰ܥ]I@˽ebՊ\BUAl.4*}zp;)9} ߃u >Uzkd_J,! .xh7wS'8:=B fPnWWfBCU_<}3uHWVcoFh:q~@b]~PFwZ;\ݗ9+9)p%p$ۏ;j3ycgv{PJ~V};07ˠK~;5(ح0J3P2`)R6i rۆre NSlRfMnn}\%2d^"͏+@s.y f!&^τ/=1f#XMuAdA[Xgq}jįKX)ʡ5i /tm4RNLLU}cp޹F(i[(48 U^JgU J8T7v9(p;zYx_xLT%&4= 8BL/3pi Zg2 g}ϼ:FV5b+yÍ}M䘪OÖ߯LuKn AH9% T#֍5I)\U:ɷv(0y8e.ϰ5;ci~<1MωV~6^mQ8n~24YNS9:Q4ӷvGGYo8m?W]vvy!h)W>X[ǼEeۈqp8a#~idMYv!M Xa/U*Qf3vV\]~̇3=|Q;(7kEUBΐ}gԅ奰O䮋L@eQW]a.|fkYsR csw$I>2jjiotft%wP.y̌BM/R,=0?ƀ`uVyg!YtQNK а 4RP862pد3^HM`so1c1eeBP`yM R}:?6uWξ(Kr+U M>)23^`*|&45snG ьu9- ryzzpYtqkU5)%`[f'm2P$N],%LKʪÒ.]g߫{{ϸ%afTB425>4~@ӈgS+ZUKzYȒ(2Ou7=b02Xd bx! %i X;[l*Qư<|pY& |(-&y9re~wgH)2σK˹߈pZyގǗ>}ǝo{ӕSGRsA%ENU:M?2|H&T陯h呐x[HU8av8Xn&an zˏUT%;3]eY313Qq왍^"r"MkYuŒGvuqz&kx)&oq2dX" j~LB.9Ed0/6,sr/MN3ZZVM=سm?)SC~AO %^##GԡMrA(sߺ] )}vӳ[L>?*;JvIܫ:1$M.`k!O-_: Znqc_77{Qz~u)%K85_/9.bճs4* NVqkh;XOrVGIi?ɩCK_? cow*uP(mMУ RjDsp*ff/8uH$QiȝPLgȇK0k_͸ge^y)R\ޥx1U?n˄?x>TNNc>ơ(SMyB5bG%*{&lIU2SqAXjhZi:GF̸Tֆ&OY]{ PSp?Q4KI_~S]JLowaSQ&>N2Dnz(^R;J`iwH)ZW Ym;-]3QYįPG- H9'9uXR6vyF-Rgr#Y>Z>Q\8qӬФk'|+&ogZjrɼu{&d@v ?\|g\Qi;q0$iH@ɒԩkJKF>, Jny{Lvv&@NnuaZe(" ٪&v$U pA; f[K/)׏وZAU&=R)N '$/⌹X_ѓ[z|Z/칯k_bnx ΗadpsFvgڗfz}ACTP86`ttRG`ث*aO]xM{!A>#zSkK⬃f(ʳbGA{2Pؐ fR`Dc:~q ca>pXyOηX. [KM-)bu51XVITG\;QjJBg/ !ds3'FUQ.dñ9ۯ<&I"d&0U/%vE2l_Omn */efm {q΄0K9RFfkx9llAK1\ \ж2TnߴJ =4<ջ|"UTR{Zܚk`ev&VI Jw|=Z@v+2^΄@*]p CK_BvV'xh&ŝDa0D' XT `{bPxKOiץC79]}5ܗa8Ğrm"_j5<HZʌp@l Lh$e,q(s^;.܌ة#| osdž NKb/Љ ZMhM3`a1."vK__l 1s^(g1w(e]4ܕFYBdN7["^9x<ԢrKYRSr SHcRǗq`u\Ԓ74!ۥme{ƍ6`` @1蛴KE"%⊮u.sB}b:>h4Ve+X̵ziHưŨig *>eF^Ƶ ! x.1B #rKRDtٲ4SޓdK]x$Dv:e'~Z.agZu9N-C1{7Da@,Xn9AXb: ()Ǔz F)GY[ƻ MfqOòA$*QYCەPs]հG_xݚ/uHJb"|Pɱu^K94siFP<\;{k;П $oTl߇y^v9V8ήc bh4i!㽽DΓNob#z衍 r^*z],^ ";TR_g0 ' a|C G2ve?VqdmeŬBTM9j8LIlQlF&c E\e~;-īi<7_+!5T04O8J`X@5FX|$>a pJOm[ߠs?(MK V-y@ OH TIBxV<=IxTBCt;f.HqcuCm[mVW;QHB<+5cqbYːx 4hʹRZfkhvhAA |v-S MvbTq>I`Q쐰/)`:өa+>[P"~UJ-x ,vJTܐ WTs*NМ n0?Q|.dRGRA(Mꇓ>Acg3g3Sʗkqֵ .w=[a{QBW[$4_htt0ݶ??/eKSǥ 3"LYX:#ҝ"nK Z=]Z6+v+E9!sV"a4f;N b1R{p܊ PNv28> ̴ X; oqeZ>z.+@1_N~Pf]γ$'Jj"aAբץFP޿DEU1JZiZ%i-:p&ykF(;pGR唀qvhG9ʃ y:/Քr|v5ٶxj! EBU5;szw @m~k/B6C +K|TjO?3eElT 䰡πM4ײ[H3f|:Bo˰ HdqhTpd c>Z\ :_ƅl_aK1#jQBxK7Z[r~&"y}s]$X %0JV֩k:\qEpU'e(%,NM3ʊ?ohKMq]#Ŧ܃ąz_ [аlcol, 76J 5+\u㘂t3EָztUK¢]..ZY' .ˋݬ8JxЌo#-!S̉2By"˹+R\YE}l%GW= *@s|օlH5J$m C |EI-s E87"S`J_ˠxӔ}a"|Q&0e}Z.LR+hD_ˆ!K^^X6~Gc;‡Bl|0eVZDFb0@Lm%;%ʤ[3Y>NbuObNguV!mT;Elq PMS6 WD޹*3MbР&Pt ӌݤdG }Gz/ TqL˓cw!B5`@,MIƺ@UpXF9 fmrv u~M'RtZpP@\e[o)d)=dRkk!W"= !{ 7hD6&v?I@s |yR UYԠ7xwV`}ɺ;eM6PE% CS W.p'.uyPQf̋ O r %cY |XKSX:Z.;{[teGN1o&$A(jM]Ķ1Js~ . {=)S#*;`6j^|OݥĿ7&.JF,X0,9QAQAc^y~vC}Od\AQ𜶳kXo_oYg ӏXwc4㱹[6Ο)& Rl%:kvˤ? ?Js+0)zl'Sa+E ^+?R'bP檮PF!=xS(lctжx|eu>O7aET8H5ϒel'0}óD 5צSWmmSX0nߚUn1Wcf*~Y ~ThIa#aȼsfDeh6),Vvxע,O_'47Ԧ%B3e`ОfC]L s~pE{*#g1e1TA;K%xMlQhV#$mQK,8A}ZD%|\5١-OIvaY/ ˽1 ÷Ť:j{0J2/}ep^veC9+#htoҲ(I⳶(Jf,J(sN ok(Ck Ln͚ї|GOtu5vD -YN[@%sq;}ly;߼5\΋Ev5o*9Aq{7n8 u䢨: jSD£4d߅|L%-f509> )ٹaN6V*1A(;DjX! `@Mh88"*t HfB`oǥ?SA{gqvJtY ¸׽X5DMv@[h7!J_gHRUMͤ%] 7ȴC{#L3'݂K:V VC?6u LS"fÖ!@|֕\`M.X~:{xXËМZ7x=x=np_ځ3"?W+qBdm>?j:&&qO?Dlളt,@N>jbnx8(V*d4%oW A3l u"1@!fffb%i}\LbwwQ'? d(F+'ciƼQo*6왏B|°lIZe1?nX  FP͙Z֑M=!%G6Օq1;>ަQtSW`"q[jͺ?A‡Aswt0ne ~*49Yh3>{@xy~繯&vsJJgs  ʜ_<η$3TRiU}Y#?%BeŕyzWv/=6BMM ԉ傝A9Crj4koׄR.Ԥ tsB:xeF?yӫyw8DpǯjL>怔]__;yvBJ6Q8["~J$LmC v㠛zкd yC~sX\Rz.t!dHTvOfz GqPצJ|`÷KiYט9: k>)#IKCRgtGag*s RLK&!?i6w( "ҕ1ˌa r-Q52CoC[Zzޑ4oo.InsCO8]9B)-4ANٓ;(Z`b} _Vz7|VTL%h [ RH7(l :wt tjcN~-Rd(qZ,킴h`,\/+*)¶f}͊l83U{LC3"T<V=C2 UO/[ !TbNS#hI$)4<3{'2GN@qX&$M 9A:'t8 hR̼)Gzpt^qM%NC+0%y75} 0t`O(D0rƯ/l=P^\: AM~ !8RAS$ S=0/D/ zف"1$̰k:x -pv[OS_YUI'D^}9mW^n1ʴ ,)[\*}j"7-aTU3w҃ۙA2զ6jTFi +#cˮ@LiXT>BMxI$_Dd+WE{V4 V~0,dEIM]S#CtI]1R57NȏYR2f Qk]|`\[K "J`8Di*,pU]PѾVf-^qjDG t.+W݉Ø ph?e,:5}?q.h3!bzhV+,ނ+V eRKMZJKS&@99U!k;fmm }QG8@7FwNsy/Ӛjlj C6cTCM\43ֆ3H}_x!H%H Ѥ,C7Mtz3'㡟i[P$.e4F~k?Hw8[g=F6Q2BB nΉ|Ǣv~3|9p~f[8f\" m۶m۶m>߱m۶mdE:S]o&t1%'dЗs|T֤+HR ?_@`)MI$Ƭ%6>Z=ʃSmeu ZB<-A#ZrD&S4-0g5Ko.gKKTv=5fDWѭBvbtaԿcI숁MLSjMI:-C&PoYPrȟpImç`mn6F@I4UF.#SpvS=ǑFB\<3Vz)7? ?(il4]i0HޓUb '9ƚf;k1,W7֭xĒL ~e\+jֹ6L)ܔf^ۈdJ`&e'GsGL$%jWk7/CUw8-&ئxȫ^ dxV,vfs]B t.F\AU ۄ ϥa4ٻl>͘DdH_!D![J?~ST$wؖC_ZAQ4F$LZ{>hW),v6{2vrilk:yQB I۱ *9ut$^9Yr93#Y% A$&8;; 0dM7-6R5"_;ͳx TgFN}$lfhO0loĶovo'nLnnƐ"όR $XY.A>. M 9Cz~'A6rʏ:C+?ZTrLQB];aLF%ˋ!Y'o7j:y@}dŒ*^^3O$u;ygFWnxy^  e@y4Efo;5νFrV3xRa_)QĔB,vBֵTWצhjs4=u7^ION[ ?N~de}\ +TVheRD.rxrexk ,) r,[2*D }AqА;Sǭ]%R_'0aFصhy=\V5\њC3o~>(fBgrtO\9icŇ;I1Uć,|ҁNΉOoGXF~ ! l<^ C@{qNꕢ<]tV -Ze]Q0]C޸6U?l. vN.VLL2$fEz oQ4`"dġO)Z1wD)+o9TUۍ~˛}n խ5sP\Ʈ$ST8y3aPMl1k)VCVBg0 wV @;ѲOP;Ql-/ΟBz|p&Cw??Q!UՆ޷ x!je L#~C#0S#Z3&8g|.D;1A!-T[o[oEړ I}b8ifBս3ĀG\_f NkVMa9M].x)Y3t&]XzV[cmy$JR0DnTu *ƥPgђsfp )|9ێ3}T{k1LγDÑQM3-ȝ| 6~e UPCk G}j&T΅iABlH}oؗ;>["Eχ>>I3'h_j%8Kї ?I&t/1g"uWeAæY`,<9+iRW\^S]`Y@\Ԧ7npK*ߩ3RP"+78ǎFkH/Ú_X=ac~D&v9WPIbT|WR؁`Yh"Wv=wA⼫Vx Te.Bmf'0M$"JD!kwLާ*/?rp2~wu|xl{䏞/>i݋bpRN؋TUߛ6{qpRn}O7Emf"+b[ rY2Cwm|96h\9I KvNg8j&חϐ$$W pMڱA{'Ql13# ̑iuʷe-V#ˈé!=rX{M D\L#3~z|i4KZDG_ q,z!e I?!'$fgF͛Dw>RHWڢea 10^}p? xAJ,w춶f;K`{>pYN6iIVVⰨ-!'t6(2lа"nw/lco;XXf L,U4~|+'!>[g&c)O͒jPCiܲ:, 2wï6HƹW?Mܾ%6C)!mptYs#c0Ab]IbKRʹaFrF^ݝsIuDFCDF@"X\ׁ ҮL3#R=u6-O-{@5B Xŷd0= cf>dBeՁô欃pj j~m7 "G Hvb;IlJjϺ9B</zPz' ,+YAqVZ}X@?K*1w#EfrK3Gs0I'aq倜r8hV )AGWk!!7mZ '%*E/zMysۢy҈2I;誘x؞]ƒ&|o:}Vk!PDiK\NJAP{%x?/}sE액lMg)jHz-qy9gIEpJɥy>Ng0!w5g i FPEkwS]T9tlc}NNt[ejCH %DD 8?HA+in良UO reMͺ)eaZݨy +&;C#r7} oȱ7?T\ƛdkhH}o/Km*ũ;l_e}S ^;>w_0; r/HMa.L-Ay4 ?;([xnP3SPL^cF?vjc.'ѧ?f+禱qtCfuDHKMDW)ܻ U%`D$6|!ZɅgH.dWA>16CP.3il+WAP_nTd % 9tc)w-/nOwEpVꉏɤd6zY6mQS)gBmwV4 xGNG8<D=bR<+[4HnT:.zK F}Fўg>շNyM-@&O0@p@LEFwVܰ#}c;$J N2&*e8%721L8,[,U;#581dۀad8B.}lR5{S * 4U;Z7}-ߵLBpeGC@C7Bђ)HK G槾8ڱܿdv|X;Lƿ&I mELL%UzlsɊE.n{Y0ݝWb-q 61W%HmLo \>Z9.YWzjs~q  řM\؉5 5I~q)(LaBc}󒢞,lfXU`5W`'C-E>>I':8%'dX xepJn>V*Et!mF,l,6`@ D!2RooQ/GM%ؘgscBwl|Լz{| "'ʵ(wE{ޅTQ4Cwab|PЀ5dAn`|)Ww6_B,ڗK0o.o_.+;zǡEfDvdR)yW(OԈ₳QKĐ53Ԟ&=D#usÓ:NlW-=Ȗחo-O櫟bO$|r)ڳ}fImCk̞dnR,I5 yS9Q),cb##VFܒQ3E*EeT*,L%nyi:F ݞJ`aq3/TjPp}(~ɲȃd ad kOIwfí:=ItNe^Xa@gGKJh6WmyNf:t8Q奦٬,6\K( =g.P+ 3`~|{R4\F+ɼVSfu 8~ dcz|99T_0SНk2Is}+EN- QQ/fHVuˉv}7У}EZ|I!|iX 10Yr.9fT\ܤÅX,5#.ECʳ}/}57?Oy)o4Ӌ=`LHdL(VXR%Fד菕f tՋORﲛLjɅ_+m--I9plf"i'4OUYDl]Js(03(\4✠i\ΘHrufFKUBwγ~&TU U]}}:tjvRflF}PT۩k<8)ϩX~=Es;/w%ݗ_,EaRag(*h˫1ԶK0sWYMͽ]DAfNjGoHh5f5& 7^liՄLYmw1߿FKȖa5SSn̯?f\bx$Eљڡ5ЄvncBjQ_ hYSƍt+cW *| ZFZv>?#az# +kD=6H+n ddx8ɀihegR&AdKYKSqD#1YƘ_ذ,}Pjj(]¯<;"gCqO=2ՈK%#2 \8OlKbʁFt Og28z%,WӾj*MdwMbΙ$V#bZ/Ó`+TUfA[9Ȓ+7J8:-utUqAŒb Plڨ3] "91x~c[D{)gظy:׎` 5q[^-4jLp$r yp.8d !TYB'j<F?8rn\`ynY"A&ؼR8"r٦J}[!;sO->ڶ N7,:/[8, &0TJ{՘.T^`FhR[y`ZOsǙL"wsZ@+v&h < iNzy=#\ RpHF1z{ҐtsӌHJ.^`RXW}x# g7/ۭcmY<2tFĨ9[Pu' >'1Ô *6IlrK2\R)1讌s7ҿ&<U$NX(k.w~rJsV%i7[ZQc,]و:yTlkMj45G sB7g=d睭d)pGZZ?')`yM]3"x5OCH 璉q]!畏d*b/HxFܕt^\;'Op~$)` FR 6j1÷PpIyS7kP^O Aȋ (#&WyY-]>2cJ*|wCy>_ '#ȆM(xHv՟,_9FM$`jPBG>$S?[> I4 6IŜh mHz"e,z> Za5]w[u>Ke2gf:؟<>RA!E` Ujlr61& X ;7bֻKƔ؛ĩ7ycYC!m ON&LmvdL (q}7*Ԩ`M1߅P?@֓S,Q\-7ȉ/ڿI*rK+=<,q`i'_̛+-'U.3@[u6Th$?8B1q{5S;C/-QhHO2yoY|V4.:#5GE (jlzWzw2ez{ <׮Xe]%RE<@hK۸ ͞R~ EV}<Zlq) ̑XFw@b-=V*R2EioEΒe%^{d(l2*$w*k{l^y2 (NRr+$Izb68"/uڱσv?q!iB9dNlEK;P5\L%pQP;Rע6ҵgh;/v&X-i+ŃEjhNÓIdk.Mhj3Rx,/┳Y[$gP_&$ fsDG *C.*%Xp@B[&l{P $5, dD턗 (G7q=tWxWO v0MStbBӱhKKAW+3_co3 tfn:T^`6'( ՕGю k^2y{4zɷ*Y-/iD^`l apN_ ^?xW8^xS)vP6J~& tԷpWD՝$Î ÏGĔ`&$k/# \'38e=*!Gc[ /̍L9#d C2e`a:ArKLҫ])O=2ui:}nT7C+췲 ΋` J,/rwxS"Rq^IcD/,zp:O ZcDxM[ێma>z m̜sᝄX+6 ո|Ok:ڦˣE-&6϶S@^'óDss d93EW9nN^ZjzuNMJIN2V1;cG5#>F~-xeH uL[+Rm/*0g=;VJ$}1$S*a9]vr1svU^h~wA!Oav{EE>AB6Q Iâףe[9>ZtӅnѶ<7!heɀ{'d9'5fr>[[zXƐJ%72Z) 4=י;`{L.!"-P/ `NM&'p4-rAs)t=,2EwoHwعP\ɸc1B_jU+,o0L#Q";{4 ܗs+4ͿHlq{]+b ]95LM $"b¤Yގ72]+shB2Hvt2k_KrA*@j 3XdlشN彛E2ޓ̤ D„~_K<݋v%?! 6&CYSFj"H--sW~XyQ^}Noj -97%4͇8"54caHF&v|551ZB'i A%Fc7׾rWdDž`l|:q2gKW`Y(W16B |.ŠгȬ+A>r$Y-XjLe%c6(ht:rw3.INL;Q"يIaZ8jxp: H(Q WD&&jUT~Z (BfrY=6HLzq@=7KhNed*R:>/9:*8g*Fn't\?se\c1uG3P5p Zk9jZw[iz~2Y &&֏BMW`X֙BB|#`^;HnƵ0Q̮qۅeqɛB ef>f}RH@7;lꁂEgXd .|~LiɊT*PW\c~vٜ1%ߣGϸ\6H,Iڻ+1se⅓|_xzl]鍯?~rdsp1t;Zn!;@s=@B<~R4E W_IUfԊgAeZ-'LǾپ3t?9z{Ṵkza>xő˗ha0e83b^P#cQضNw%錨sF{NȬ{~w=P<7qC=p ܰQA̞1tb@Ek!PuPFI=vhߘQi09Y|~N FtOc瞏dxn=AǟkY_-ƹrmMpC(k#I0q#-fQ8BPxSMDaנN6dz7nLBAi/-KLv$rNUϫK5ֵ_WoU[@@emHᒞcSv1V*@<aEI1=H22chrB朚nM_dQ- Fra\g} L lX* +G br7gc}Җ4 +m=./U;T&^[PewaBoဉ5y>4NDp"J@AsӅҋ|ُWef.t9zĕ@E@c큼ާ _tLW-0B :i5V!vGV-%oA`kBEs%Ul=a&*͍ hWkci Km/34Zo+ZQ6#*xq'ԣY޼יnxK&'%ځ+;n:wFj\6I)jݿ\Dc1otSO%=ecMZ2h3Ga eRVTקq&G` O]qj%Ѽ͛XY@%e-`~&pKI}]M7F|\Pm3 gvJ ]?f/.!hAh"/E܁ W=m | 2:sm3! U9(Og֛>UEO.OY-.N]Mn%sc}kr#-΄Dҕ \ Ԡh_ /xq܂4֢W^45P9ePvR9oFF@GWSXp?^2R qݢ$> %cQAMEt=8tBG&R#>Uq*]ԥ(csjH`Ld rt^tH՚@:}=.FQh n?Bv 93ލMPRbY` TSܾnX3W!bwO 'Oo`CYI4#ܲ|ҹB ސTsS_9M3i_ t7Lfv4B[<3fm6Ѭ%th#hZ$ɓG/JYJTF"@ֺ<\;sb%"}$pڶ*׳gdef.)7NÜ2UHl ^. #斺I2Z=/ǒAEQMFLS1_Ep+EU <m#0\KE!j.I~RZwf5a#Ɗ>ƃ}1't͉ V~Sy됃W#ܖ |-{"ItͅR6naI8+lC |,F:r4YAHg6/rpVS18|m7}Ixiڌ,W2@ 9A܋ӲH+ $|FM8YT-`⹇ nRG?}bX+o_IZ !zi'}*GiԹEbPR~A_8/'a7jwNKkrչ.Pg .R1 x_09OiA>m.@J/J6e^VaYtC5qȲal}>MN l}B˥Ʒl_c##_!a#n2<ӡF״kfRaΗ%]f%N#$wQ} Rrld_a)B&H$MJ踞v-9ړت?NjIj[޲8p!'ˏ1@F8?bk/@Eއ AuGj6f!!~c;2lj yyy ֑;8ed-boޑx<6wȼ$.?9a=\ty_q-Ai]?oy}07K1Ϋ|vo3lr>X $*Z$4-o2&3뽰P[)+#r`U2a^.;4L1sF_0vxV4Y]98 #{bYkP\$f. &J\lg+O7TFv4;k^L2*"W[^he¨HO w _;XpBOr'㉎8\t ss:si B_F.zDE\=.|o`J ;/bsrJm#^ڹf&݅ ^wE O;Sfj7@:^;.Sun=׀o4Q U|:SL"8XX׳@c#) .%Y 2mͥawi-/m5՗bGnE {+XO61..ɴ*c瘨(7\"}e<>WgB#f}m{pS#q#4*%Z 68y}&^8vv!‹UifM3%jaQP#Wѵ0@'?f8 P$H3oK'u.}/o߼^. C C!tݫdEߐ$3$; {إ/knp3~!l: G̹԰C*إ2ißzBUBmfÞZA:*X85i1ǁHTrAjT(!(νAKVWQ 2Y~+je1~Q q,]a6l ,M2 S3C`)H&\( ?uWdj>xbdSyfՙ44[%%HqnofpJ$ڲ1KZYczikс Oeў_r',]c:ڡ Z:Eˇg[Uq׵Ɨ heq=?ӡDۿ!n`:̔vxq79-&Mج,-z%4,U2EGaI~fnO{oLn-Rzi_Jmp* B鯇/ػ0= ,ֳ*)t3*tJH GYȞ݈YԖ֯->,=cyɉ/bWKPK*e5 nъ9<2W~{cf?4د)S9зq鱸**ܵ]36N.SH<7`"2vޥy*8H ?? gscPmbt K_{}L\ ==pzJv)FDypL%K+$!){)K"zs "Τw׮4,~FA$VŒFݩ@I^ѳz;W*lH)j [ [s+k5ׄI~ S>ECǥϣ05IMP3kr])%< :TZNc=du a C\* hzp6&Sf gMΟvP]r'(;dMb@?zCW2}󗼠(ˌ:~ >1n?)LzXW)2uqM~&◱6eAH=c/vxd:()NmHN2wI[`|]v. =iĭ?+\֮/M&x9]38Mtِ"Nr?6DlGoby!s4-$:Pi1T}Njb#ٙ/H^X`D3 wU;'"w`5e ƶa ՛'3N,~yy?ZP7|=,&`VkL ri8H'p_Ŕd+7ϝNc0լKb,y4O&_Ob'ZyuǍWK| a 96L Ŗ6CfcwsI&F %yo~*Zɶ3BUpVP(}Wrd_Lg033/0!ifM˭l{YQn6yT @AҴAQJK-cHĂ#Xi,/ub2Awc8=̢n*_Ou 969ʊJB %v{G8sPqttp#jJk),f(i3? 9=(m!RzFڈV6[l DS-`X<.>ٲDz]vaCI^Ii/*Y Oc#Xq؎`V1y4&ھNc I#Ȝѱ|nz~xɵv3j0ߗn^Wo[99WƳžM.~ӶB"e1QQ bd埡)*1uBCaJ-gzα # (s*=W(P\A3h9M 2A푞4 &7_4DuzOVr,ٛM> ^4'4|V}ϔpȼ46wnwxvc53%B^eesbCs+8wn)ӈf5euyn&)Ӵ%ej 4RH7ē$]]bg,PΆ),QK=t ѐ42:KUg:iHtT coV36a,b_HL5LnABlFf?Azn\,> ЋN @|)?j?kרsσ;Tܙ ^R? e,a:,?Yu$*o\y^ \,[7PlxM[m YF9 D~Hºo=^,dbC2QcKVp1 r}YY2(^DH٬ߚ"b3bCm(*Jj2l׍ My1ZѨ!@`Uc#VA@ӎ #ÓGkC/~^fv?y2oͶ(v2vfH":W5 e4PhbUd_9$wN8ȷhn$!TwFsu3EE"q(4C=<2,b5/t p-.ekI'Yf%y!҉\sƬ$ePθW]طŨ\Swd A O{wX/5E4fLAWT^Ye!nuXi)ZBb?CH䴕Mggnamʦ/7Ss CK{؆C %.&~שM.!j ZWQ\-. Xi>p2"~Ёp?|L\b%`C&d%nzLD fTH)u=?ix'#%`]zw)8YX!)k>~o ""-%r[ 47f'X}ǗO6pv{O)"zW*"T?uD\0#F& wa pL )\ Ck٣Z+A֖>k]Z∆4=c=![8Gbsa`sT 3'VQڬMp1: PU`?jٰU~omj`Dh|N\WqnԼ9鸓I郫a-7jљ#EF~[|^]FcxNb(w`|9 * Ax۳z^|tF0쎜u;{o%z;6a^_`0WeL -4)K]⊈K4dXHmFTLA{K!]'{$4ʑ{Y/J)!dB,<.\HLihIvQ @qLL!f<}sNT,l23IG- #)mqnTyNώ~+4B* ,CM\Si8NNY; 1)~i)GH9`WJRzґ)k:Alߎm}ѺyVgPYFJ,{J5'I d]}k }R1 TghoRnjX{w#]FXF)6 DFT֙rN]鰐)k.V \vZr֌PJ >yK+,oUp)GIG~cX[EooȌlkW~C{SX,=mG .|[JO;J t1e 9v%fwΥv~~&\Q=Ựi k/7c݆{shn{+oa@O=~PX,Vqk-K ni|6Lєj#4V(KNVTT5e.TNR^_5 ;ɢD\e88eV>W0>?w^%"j>fmCIp拆|Z `[ڴ'=r9qO~U ^ֽ.& E > #=u"?Kd`O׬5hsp2^G2XD8{RQl 䊘_M C~7P XL[Eb)8 j;V7?18- $zu"k^Z*EnTp9|B D g;Qt02KJF-P0sh{g4͎Ċ~R5â9x:j"o*˒Y h.μo{F*``nَME.N 2^Ds\W<y=z[݂Bm?"DnOn{&*wY4CsW8 鿪L%u.O\s+`w[*i)udh#u$.@@W_PB#Nэ\tٺ& sKuf15>p}c+[A#!G*ӚL lC8JL6۳K71ӫ8ODqd9Wfk[,u$ ݃Z| {˙ -ψ$((]La!n둿P׉q‰8O҃?ඃM~{*c=lI_e~s]CMӶSؓ/`vF3ub1]xF &8qŽB{[wPC˻i!Q*ҿͳ#8Zl%݇5 zrY ,}k>ŎM!Ii8J9h!5$9vnH-bUy CU6'bBo0"YU;-7hL5z })v˕)Œ0ASD5dR`A NK+.i }!)ܳ<_YNP'K;~*zr 4!*Iy^M>D/ *(F"we `[ݨ,Fb)Y_:o C@7jqo2.Acr*<#Ba7ձqW9WyPp3J}8P&ׁ;? JhDW}؆m!09vZ=T|Io("U,Sb;-E om:c0"y7QL +xCq|ݝ .px5But׋t'>c#¤"ٗݼ`>E:u`,uR ?+avJ%4\NN7~]ek@v2f|Gh:Z WQ\6P:tUr$Y[S $^ ;X<>CϺ(r6߫Tol)wM<7b#'H|H>)NG%Z_/s}ye0[lS4Ai{ٔb[L֞T'1%}}_>f~JCw_c/Dz\F$G{A Ө%LmErV5V1*]`=S&WȤ}uMS)D\>a^$xRO Snzr&(j0LNGKp($[OT~34JӏR(\ :<Ϳoef4B `E$C{7U_/7vR_M/vʭQZ&?`4\MR 3>#qXwNS2kt`8g/WΎ y*c0z`6t/M]UC 5|f|DPU#chzdp{9ys,kSvAĖ f1 xY&tfqBt#߼wD`E#l-6 mH.ՒV޾_L`$Pu+rHO?5h'5,jз ;K!J &O؞"f[_"^H=-#7D7M~^CBoQY?{vr[`wAhJ)Kˆqc + H["Ȼ _l<4ZREifo=`&vG$"Chz@-ڥ/4VbT>{D{H/XT$T&/o9K!O/]}XB$@d-wz e):3^\߀V TM_n{O Aì_]q'lj30_&؁huy y1i;UW~&$0 šbG]1C}=Hl΁ӰԊ"w GĴt_| 1"ߖr76pb{Ͷ;K"7ALw;,z=n*FK/.C91 ݦ~'}|T4uЄiݹY.]j h"f]PƈVBS3Zp9KO9n܏hro_:tpoGN 'Ε=X!M,A}Bg|f5ZQRweLk'€N*er&l/񗈰z)qPT{,*| ^0Xm}Pc4A~EOcs#-nǀIrRF=?;U\g{` 卯l=rJ.k<©ҧ[QG8xY>N1(ܦbE]v7w_;~^ -D jN֕*X/@);/hj.ݹWJЛ5{xT{eD @۪DBnY9)h/Y҃Gouij:i~g$~çRՀ'j~YwHc1%zJ`S@`:3PY azF=ֿyWrnQ09aZ{E[>"vX? 1V}HcZ%tq_|~.Mu$)pϵzǕ$}FC([)!M(eڳ+!|lQfY{a;f4./bzbd?XW% =s-~—&Zx6ƥ9>KҎK `fUR~L7&%]Oqd7S> =7*Kzóѣ_:nex'4=y#cLbp 㟪1gNRI{F$,Ţ $JYh-H%/ZՖ>{\Ʌ\Ue$bUL%:&}1}ꮵvAڰ,.S;8XApcѼd,hbՆ0"l#h92JzdcZ4VxG?CH^*]ʂ^M+C:ı6v /.Sh~ DV ggeK`SvT36I _Prjvֿ>o!, !F+ӆm "m9 II'p u3 SkNB5؄t#q ~*yrܟΌ]V`IS$ɾH!EUcWNj/#iX n5Ug߼4\]kE`1-M `to~p[O/dPbvgBlɑ'@E㑘^:Q𣺗8VގF5?[xB/VY$X#`c-bң_UaD/'6AT޾l;ldac*r63|&jBfHHշd-x#0@ѐʩ$@/eȓ Fk} ;vrֽ =73DsR[?`Dlc k{cByn7!FjI;KX[Lr-+M;W8n Ş[~wpRE>(DYˈw11oӢ$QE\)sdrT>|y\jĊ EaJRN: x ~=P&>HSmdF51Y\K[?ƙ:tcGgQƘnm$HVNv`˼smϞ\Qh]C"`bEO3>98) f4xQ`k>{ dy眏4:ܴB"Etzߎo |iTi`i+C qI1krA>ӽwܱ wcuh}>4CyS_,anG82h w#fkރ輭UsZurJk,# N$Ws= de 'Im ,*u_n7l >1BsD{P<2 /RuqSz[UWr61U6+giBlc^4#6{S3xSQf1Snі(LצjX!*V"ҘTyn\".j+[xͭI>%R[,o 3Q`Qg h-wƒsBhݥ}I^^5k>G$gjZ}~#s+30℈z`]v%M/Cfv9GzCNn{{+׭@+>/MeQo RƗkR2Ie+;bSR̹%f{W1va̽F3,+$@˧i\f `>CtqBMK|ThFL3@dh E&()Zϝb0Ď<y zɩ϶aoWYf}9@="Pc] ]b)$h}mZQ7J1>zb/*ϰtb¹/-zR#MO j؇, Qg$jE٘$~z^[fFl Umb@}n>!-v>;#G?hNV[M 0{3?Q~m1BG =gs\^JZ})x2G['3; L`бGYX>8|8*lf6YpP{ڿ;,&kpXc1MMչ\8D:p2Q\r/S*%:aTM"-Y؀vKw MXa[M߉V9 ?\ώOrlOٕo>u $BP \.RΞ̧1hk ngE %!8hi9j=::H؆ݑP6ȞoV˦ gveMh#"E O(X󏆵dh,Kŋ&' JEu$d*))fYeV.3knLag7ᗈխ9Qc]_#=;C"|P1*Lk:f2C0dƗDDȇi^oeTqF@Mg;GEL-UdQ}|co@wOqLHx6)YaFU=MGX7|jRmckdLKdZ>8Sm9h! V)pϹjP!k;?]OH"m9Xwk؋np., mY-Uk6=Yƨm ax#F8UQD{W6T5.ʹ]xlE[ O{ֱ#jyn(Z[|q ?$HA˨Zom #R̵ps.B93ɟeڡɲ|<1K64}(5GwNM *V FUGGeuGF!a+Uog<=Ve߆z%IA:j]m$e(RD⪙ B-18ߘMHX;ج1Px+[m-g:"4ƳZ00 gQ$"q.7v2[̉6f% r(;^h \;S)?^C)E/amSq|h", iz {b+2TW<%to no:'>[ 3{RRF$)wPO~L `"Fu:[5{}1(8kMGV<DtǽkegpfmA mJV> w?%<"3~kɒ0G?sdvdo-`|$Y ȇ!oȕ $} ;Ry(ˌPqZ u~Dו8FKM0yM{M9JTFZk _gx01JruUՊ}o ߑW+[k.}g\¸w]˓.IKIZ۲5vFGA}7Vo4E!5CNܒ{㜝sߦm[VSz~|1~|/Vq ]NRƥvȁa=\v ZB06=a$pX:W?pbA_;sQqi1@躷s)?UwU1}4 dta n@ +Uץ)f?xD߅s=m(8"cm#$hrۺR vޤrXTnqn`e}[uM#<ǿf˔/-o'4FGʾQy vsݔ)|[YP;QV35ˀytpkjpқ̏٪}"uWGQYvѼ,)9DY琳IDzdA`WjSޙB ||V*mÅ(uEvdn(0koHq=_u`Co+hd) mc iIB##&#B'R*HY=d*HZp--V}97Ե (3NKdtb~/p.bK(}ENA}%کLf&(xc]Nd 8X:룰^A~?}Pz ]Nx LTɃWnOib1TIh-^7tHI xF]Js!5wQxN^b窵'r'^qǬGHm1ƻ+"\jRvR?͇:ϵJ<2ڝlGQۛI#gQbj+ ĜQ~'\9+V 8} M˺%Ku~PǾّn2אEn'V@Yq!"Zij( &ǓV Qk!뜙y,4`@<  )m1_Q44r5`/2F<M7 Ag9͍Jϊun܊u"8qSY7n36;M&Tyj~}܋])*HҌÖ2hUXԀ"49!H2QJ,қxMMqmaEl 4e^q1LU!{/Ji,eIrPKvֿuV24rWt]?!M,N[acpm\7E!m~@ Xl1at:t)Q Gg~$@{ &^ pE}}P^C`™ [(cA j/'^wbA 30\aJ-}JPR>S{g 3HmH=Ŋ2u.]& -з]HZX j +D2M[c~{ mIVGFS;^sI}g,< õa(߅C)͏Ie!Dy2: m:Bq}8ukkcQo۶m۶m۶m϶m۶m:YWW2K%je]fX2T~ (Frh2`F`.b}ؔ='erk4X~t\ T"TtÍ8M:`L<0~(iZH鴓ջWVō$˙1ğވ~." )AsT*1?f{Rr:%wJPz.{`z5<2%VY7Ztrs|!5ŃfCJǖ̧AnHGf$PHT]Վ5;Qi*O:Z{D+֥J%pBs) 뛮&!v5O90VylMd][N&G]nI늽;IpV KGܓ&E~Iz<:fi,d W㮯fH2.N-o*FCe+\Xg3$= #0l-%f{MiH*2]g׵<=؞1tk~AC|Z]NaIA$[o׸-MWK:Қ%6SJTG"Pgs\Zt:5Պtb -cC ԮC*]? 8{6>31|-(fxQ,XKw|Mq!_҈V"܍>s^{a2bA؇#fۊ#ov$[rK(aѫyTlʀP%u&r GxX1ʂK6v ;mSLB߇`Eڎ; ᤖLN@Xȑ@]+Ƃ{^X}XTll$XfS# 1U*ٺFjuJDL.æv5Nd95T<ș {2#:!(y,]n]e1vՂژ+Y\{^}>rx~ 0}n\4 >[eiTb 0"9ү-Zio E*lpRt_O0Y чoM@ek4sM4g->hz1sx2 /\bѽ+#)}T8rI'G+Pt(GF*m߾T=P=FWT!'BXKj~$ơ0`qbqƥլ{*'Ųp ␘e!9{U;tM=MH&ؤl*sEnlw r Ul) 37cG:T><|E<+IOPCx^Ñ_VN@մ{5,8힘oK͜+OycI3"_ކݙt1͋_}k4`l~[Y\jՠ=,G؇I)׋@<3&2l{伕tFO à DW 7/i~7UswXB7hpsVڒsi>Oʐn1C-w9|gPd=|I\*]eոU/oENDnMnrDAqk1=1l˪BM{wck=wOnĭ_['h3prX߿PM"rrtM_$!}錕q > K.x\eo-!ܖS]e߼]awuހ YSȿž!$wʗ`kKQy}\,OdiXT mB9#<(XB۹q…HLFN*c*vxjz qHa?[_՗*旨 2l׻)Wxė[IɍJqc!uP{=Dr.,swQsW}@"Wqy1 *+= %?~}⚺ֹkn*/4bHO[x{gR r}hY'4sjmCw_WTxĵ=yA5+CRJB{F;) < ]56ZkUThXnr60/nL14SHh:|NI(hw慪˂Z ߅-Zѡ ^L9͢NoR=3| 5ɚG&sG_JU+=kv; ?K>#N ;>knb}i̱e.DM>W]9 y-˚Iys#kڿi;[M/TgNa&b-XIdǽ\e$ [9uo>y ֮AɺQJism|eF*E>F=ݢo.%N[3 }NvsxM0hx(To^|n=O@gbnEכ(x?WqD-H;A=8;Qr&,L1cdžS~ԃ?N{p΢ &R*d,3D^ # p[+BIJ(sP<U(@0^iQ&ٜE-**pOk.t3>|BW>DWh:gvs$\Qh069 u(*OlmX~ݿ@H\34RC^Gb N[ot]営f^,/a?!Jv[K({&TD\u`<<lAV^sA: 3[*?= +BY"c{Po'z.sdWע6ˆvfqWcMs=;_C! A%`wp&&f-Khqe21dS~KP4J[䯋kH3 cDli J#4A1+fQOn#)ڪ=Ļ>(r ":}TZo]&rWj}^0s',X: S V;{iC$f1@V캭B9S`N0[ sv7LGF=AcBȨEC?m wV:\qE` ?<$$/xq^eF"U[iu<㦀㧱9cDd{X<ҩӑT$S:LZ $=T6 7'm'A ^{ ~laȳV1,B٤Y &Rkv4>z2g"ex}'b/(o@*4H49N(R+e Fs i0HShPQ ,wr+6ؐuܿ,:Ϗlj͞vuIH(nKiJx{n7ڙ8k_&-{6ƟN ,S/%ed">]/(!s%%~F ɰ[䫸 SH Bz?&9za?RH:gjEޥejҰ5iT% JM6Oԗ(nF[O:BB3ܥ7zKQ\8^Uvw$;<[vœdhjŋJN6~ yݒvOCmatJs>x(9梄"6`x})vJvbcHN &L}.|0syp ؜ !o@ +)Zs>|&ڡOFaAͪΥˏh &ldYgB\`Tد#b DFe~0g cO? |UNrkc`)CWl:;[Xcik yI w6dR|Bؑ9Hl펀xy3Cus6 Yf_hTC(LmN }k;;t׿F0Q؉@V/Q SPOy|ѻaЗvn6-l \ хS!󿝶M|Rl?:Rl1v`O[+ޏ̎7[>V$0bqkXxٍSZ|;PA-PG޹#׈@؇ D,Ct|6 KK_Ί{O4s$bZ w9F‚\<(Ut.f~s\g  {0WZ V>Ӿ`8˜ʇt?{t=1Wsco%#A)tKgQ\=&UU~n]x?F5L2iuГ9ເ̑jx_X.Ϥ}L`M$6޼HZڡ 7Qf2 )Ӻ֘ܙ\8]s[49MZV&#_p 8SHF3dc \UU(L$DI_e[/8 ! nvgV^5}BY6,oȵfeIOsNdx'M E3H2y0iKU9˜sQWߘYMQ}.(b \&Ħ(KM (sP >e-uťRPˀhOJ܉"5](0yQ(}F P)5 U+[_tܯ |X(>ïTD`RT[:[.iŸKCl_;ȑz*R Iii86sLGadCY2+Ι:0~ghTT;Dؽ}]QJf۵r9mB>.?|>Ι.'r:I5!*2󜉛oSP?*'eJ$Gj j'+nLVȓ(pejP䉣lؙ|)(P9An X O8}  _ :3?ϝމci.Kh?bb.?,z 3ABϘ.kTc0/HtVÆgs:yda`$ķfS:˭xkma筿7z{hR\W=QaN$dÇBS3 O^YvA$5lM٢ :0 еX54űs= >k) g.  ŭQUsE8SGA;)[϶3vukbayzrM^ Py ZZП-0 )dbBxONF0QEx2Ӆ彷aƴGrDhu9UX[orwIv~>`eaNv'r%*t Wg\h%!_eKfUI0̇/f|14/Wk2t^~1+x lqb|@^I[jX{Ώk,G,'1|ekx`p {ZۮvyK^ ,v&fp|*ARm6F<3nZ;OS< A5lpΡh9aݲEk j5U)eϷM?*7~_*SvV^El-0|a(zx}c=gЀ#JfVu_`0c>hhj6zAprc<:ݸSƒXL9[}P.[v*YPymO3! 5`JsdE{2f|* i3~RMfvzN 4|?`}2*hb;8rְr&E);lDs"!;EnLv#!t Sgy _ )< **Pg1"H #GE|e[;ƛ_bsz;" f+y~m\BfV)j[ēp<2sU7K)hxap>P`f%Bp|z|gyֵz5X;3󢁭ːj2XdE@0[ &":M>Q0&765OÀUT{rWw-[^b JIMSMyD昢,Cn{ADR@w%&\vr~Wߑx=1ShdPeC S\3aga T PY &?/_C:,z4^ݭIl4챩B6SOӐ, iAH Q) *F^, ^rVP\+]wlݐ߆ixr*Hk9+S1bYT㑙),.G\R*/!7|P﹕t )i1:Jz v"jOuO.Sew_fs_Օ)~d\臤yƧd،K|:+Y+ t)Y /QL0aeU:5X>W~ϫU2FkPDVU͡BA9R1ML `2Z5*$Mӛ=\xD U{s.aWlM^ߢ+3;b Z #HMN~T= &&wDЁr l$vs(-B*^h*Nj<Cj^n#5"XyTUw)$.{)^Ĵr7qKenV>ph#kC_`'zoAZAhp1pйs1Z!8J7%F#o9^+mqY !*ps)CyФ8"j`+{JeD NȩYt_Xf)Vzc1\I.G;C-Ube} k A#+Sʤ* ƱF ]Dxrqo5Kpgqp՟ }CWLZD9@̲s`=U ^ǍK#b%yolòXVą[6$ 9C|`3R0y YȚ$ѱ6] hO>Q(ҦCkc /쑍+#*5םk郣PRUo k܎)̮/5hQp-&\QI4gCe% :U7&3 lߨ>p| luf>yP.q꬀ƒ 6;c@D{S3= @FêF-) o&Z%0?O[H{K笧qLGFoREXyT\:6Ί;~{|V4Ғx7;#/xEԩ dqk${)[E\p:c9͵Z~ަjB=YkƢưOq@W]IH~J3GI.*k v !R#e"j,%l&%?` 3Dƚ)1+i,ԕ? v6>;Wo@DK? "cM•} ܥU<^N۳Ὠp}Um_Y; 7 W4n;^Ȟ櫷êqKaE>S\5zj{b["Rڿw3" 4r V8DIߏjUVb!?] ]>_]zBپ-J=wfcY O5mZ6vȒhc'ȹ,A͛v&p!f׶SYnX*]6fGa1?[ٔwu{ui%Ф qQmSTn:#{\1j7.~1=K8B{Pn9t:4.Gk9SɋqZfSUCfDð׫̳Q'1r}GGb=d%)G8.۟V )SrH`I7٘N #<DJ ݰn[`iSBxmNP)!q5vƄ?xt;M?\9TH%XTDO_wC o2BWI;I;u)j-,' &kZ Z@MP2qDt7`lji~ wqѢS9NgPRZݔ0Yi_=fT8b!+*`6_9o1wTmBkK@gO)ۼB7,ޠS|:^oѪK&S B3~&ʲ#DnH"$+ SQw><6h3("`-.@7U\e7a 0 ʅ8ehN }7쾉w ⽇d =]3&4yFlS?c$ ﲀ`l`ԌԀl vaůQvW;PDhWQ~B6GxV^@iҟu) ֌ tuV(OBu!A) :ǢrsU"VZIQ"l"hɜA9aޞD132hw}glu!]·|F>vcի1>yc3 (TSS8"0ǃjZՇx* |ŃI[aJҵ˝kuA0Y;/΋ƺ/H:D:TeV5 ڱ>ﭣ*Wݳ#kVҡ7()Ek@ѥ>gGkٴtOA`㋻()oykkY[K<,MdlΠ+W*e: +J:_XT ߪ YB'mTnku<;+/]@ZԤ!,]$vZ39L%%64N,}d8v>i,?,i߽oceSR/mO, [K4Y_C \G VA-jr4+ 뚶g^mC̿@._]FGBÎBy@OK=H|6~CJŇv*p܊pz5.cD'cq{7=ڥ)\pf㝴Ua@SG?j 3AlZG_/,K\$SOST>~`16MtLsTo,rjkCYsC߮.&ro*NP}1W99QmUc}lx>PըbB K6ne&Cj@S}7sĭG\ļ#F* t\!+ڿ>M~Z"Gr|u߆G m;!h5:yd7/gI Aݩ~^x]>k-? 巍ht$Y_I7$$ZZ4sɀ3 ]& &J YԫQ@{?"Bkf٠"9)k6jnJ׿B ںzV!&2fUXEݮX6,9X:l}%AL Ng'rطWf[4!>B1!}Z"xE^/"h`Y0)rWYNwR9Ԫ}FUfVGj|=~a`$NL|#Dp¹N1ܜALU;z1/r/c~'d#%~)4Lͦ2$vAٽZWe&ߟ3QhE 7OXQO5VCwd Pantm昴/] q\iZ+e:ɍe Oa؃4\2Ə8]~%1ҊH fX4ԪJI20 1w72&'@sO4SxgO `1%bӊ\4 LB{pͧh]Sz_\('i ҥd36)Nճ #eěj U:ēe٣R  ^ܟ}^N7 .wZ.!;D.I%xCSçy1E+&ߑx\hBj8OyLo,a?eyA+${>@~^3HX(ujBHc N˾ 2c=x;$6|ȑ`Q6 ZX x rOgzE7F邾s-wl&Pjc2 ¹EBEZ:d?Rqo9`K0;ũ iI~^Yi410?áY\I>jn#5vǵ q**%$HIF*)Iz,1,~~w܅÷r~K_ڳ3|q|'l3b^U9pc:K{'It|m=h?vQQ!eU. _KUei{ G+p$ gzڽሊfBe _5B֮Ya>BB w.+{2V>V-gCJ/q4l~X{3grY&iO-)6RҊ F)ȻA}BU`\~$¼&4h #f@xhcn&+*Lr-.=ИZ|'^PjE>}W"S)qv='Uc9$KlX[e8t %_v?ٴ*_W ۧS/+VHg5g+h, nsm@Pͺ3p3K5MEB,2Ĺ)L1mT oj!נO[ 븪kCo;Q.)+ШD[ETt(J>ERNHU;bX/QQ/cwN5q;J~#m"͋BAjǦR_ 6*:RcZwUb-60JH딫1iwIdgVB{fy߆fe1QνҔB@EĀRK| ̀z\;kWK2ΈUe9bbΪPϕ9i-vzS&[8N `sv۹XÙC(}yjΔb#s_=m5*e3qiK'`чL5HeHk`nLC5[n[_Hܸe~N "5^%'kK67:yPqx}Br=,'Эn ?FHwWgg]<1/fcPÍKj,Uc#^~SIѐm:~hK XCzbg5c6N?BQ gL50`PDp(> G8WxNvg{^vs#'aJ^b}mL8Ԏkw?w"zJ0"(lw"}i`9^~kGmz=TM/ސ*{d1./Ql +1.UTs tFy ]cV{#P>Vï_geUN"f8ANA؍~39FPq*9XUD8k\}W,/P Ǡpw[tN 35;7s .S)Hs!B&Byj3oёqI): bi5i؇񟠻\crzҥ B9Wz(@)Hk`* A>b N3cqhn3[߇҈߭XeѾFW*2S*c@n}9 ]f2OQl-wGmx Ϻomc~4L&<ˌ4XnBm[[de#7&(} xDD'y=etMr dGW7kmk"ʛ:+}}MCS6sU)'^o"6-|阀ږ~}yZ~4Dr: 5z\pwC^BpmvR$:_^sO>Wr :E6x<`~UֿZR~H@#LYȹN4ܫ8JJjY:.diXeξYȓGoa܍ _ϻgXa?JCMُl4F O6ɒS{Kc*eV^ޠ(D_ؓd'G}70~haWN5d!:YxRhd$vY#$]l8 +S5iQu-!-%w::H.A:6?9:&1E}} @nFue~` FIB 5?g rPf!q.bE@%e})0F2GTJ"j7;LnLefL\KOY[g.,*P_@mC gn Pfsܤ6 fQ)Ȱ08r(i3gF&IsS)]8?yeI+ycoܗ4C&hχZU&Ceӏ H\/jY q~ɮGNo %s4"7ћw墚qW: ,UɍA$uOV] 5[}k] |dTe3k0a$X aGNݲ悀D@$xg'%M+yةL1SaI3j]և 8 \94Oޚg)\O+Cjz ļef{I@l3&η~!A+%!8HU: <TGו(bbZvHwCpզy/FA9ia_ý~d.YV m1?CMеg:cՏt*Y^^póW) [7%A zS= ILM/wm9 >UQZLw>nV;SsN\yż8v^ŢlDEVWԌ6ۍ žEbX6d,T-vXWL.SZd=cv%%|mKbݛmrzρuvKB' *jyC:0N@X(ЏzUSiݝdR|-b%oksHn}z Z{}5.is;aMp%JH5ÁkRsW?-+^Y(jj]Tk?:-&[v}eΛ  4Dž ;d;HkfSIoQR VoTt,ˀ !D!#!6t: nbTPeACjgԎAڠu;7t.߰U -X5 9EtPf0B(yW̶ !dnH4M%TTqATKп3I%;|޳s[psA݉̉|FA-T <3!Iv:vs6aT1:^8ozҋ46(cʈ}!niwiRK;澥jWlՀ#-6Yoo5lR\\T 鎃=jj*j[k33Vtv/z&YmI m5LEJ?)PO_vz,"]ve[b2 &@vW{ߙ1 SvYFB:Cܺ ^PY"@a \V1a0m:ߊ\~81֮#{CqČ 5;K[0w ?cMD)^I6(OyX0[s(G͟bkZ[,+foo b &9$1<-b5ܚ*[Ej094UJ|{be54<67DZ|7Qf,C49(8UpY;H*G5E$Ё,v 8 ҫ@wB3셋Nir2;h l+۶sq>1ɺ(¢>h(wqDž@ë́s;\%W F3>Ѫi]azem,CBS$kf95 jȿͼWڿlIR%Bc :]a t!㓽 $pSM~zŹt3A =>0if( ޞ?0%2m$K~gXq (9ؤ_~$Be(9ڃ цaEιkwHзV(8snփ7{# ~^C 'd>?_p5]o=wFȺ/mbb]KgX_慘ggĚ F5&[tp9/v!5+=F,g?8H%;+ꎪ&*bX{n7DC m xLy-{Mڐs+Rj,׸PC;'Tu&aZw^|;uT7~d'@H,Sd)Or fIVơ%):i G$y ɢ{ ۞O NeM "Gi=|VX lY'pA)v9>R/˔HU@_0OF!:; y _cA>M1gdg. 'n3) 뿼_Cq (\vRB/}5BE%7T>'oz(w8tGб%N$;+Ƞǜo޲ kRt}p}ՒG,D1hρѧeاQkZnq **? [ j 4< 9fYIO<_B" !|;l7%De(NӰ1_F0+_ oy`R9d=8mF(V2q9J*R|v6*?TLutNŹcRמm,XrxLGbsczp3K\"q:,1;{AS-#dM ԩX~s?W8dLutS4XN"w fS%97%YnYҲjTصKa{s= uOX)“]K+n/ckؘ?h H=żN[6 C8>ne=+Vtd.B e}>}Q<_6ŷ%9٢oZIxqyҿLuz{R 9 D=X_"iU%r7-@3ѹ<qCXB,>H{>"gbl2<|Cf7XH>MשD)JhÐ!Ƨ32A+ *>5KDךBm;.Rs1'K -Z5);Qlnx529:s@wrr\Aan_vszTxͮzˣע˙L/Z\T37Tƽ)Ϯl~MޘSͅ(ŪaǕ:Tl?`+]`!2^w<u,$&gX-{BvHQBV{cbR6h0IJ8{?W=kBm^6aYR|D27y`,ӅujuZ~Dž٬N >%.&lb,âĦ?` 󿒠f݈-D9%\!m 4^A9@ TAK;__,|w*;lj?l9ܧ9?@}n U_'U%b2$*ɀͳF!S쁖 'ӣޑһE)L/KUzc+SqZ6$o&bgKLFP'c /!#2p(Y9t67^DWZ^1bR 4ʲ:-p$j}"M~#&^BWs >N)^8E Vlڕ&qhx  64KpAS@ `I(݊l^ގX)iWdn'AW{nm7亴V=(RjO~:_Hy6J1hO5e -|j/sUCS)lI=-ݓx 8ia`x?O;Jb? [)erqhiE*.5 ,Ho='t~Ό!qhB1 MNXA2Y1Vh@&Ȧ˼;e}N.8WHK@!kxV,SQmnn0r9!peHg^YTc-wc̠γ34WوxƝڐd pFD֧$G 3&W}\fw{n@0Ƅv`Cgy0N*anj)3!u# yrCiuu8m?8bn? B(?Vsnu=Ɩ!\V[\fs9';U.=wm?QZbK_ L{_ ]yR(!n:z(M26mhChUw:m9ájfH[QGsrĝrrEΖئw/I6` {pl#:^ۜSU\%`(i2|{l8/;@ߗF?|dž?*$ea{Qom}f$.̟_X.chDi JMǯMÙ` ycESV%b ^d*l5&H,_̙1Of"$aX=DNPO%3j"Nm58F JZ,th7tbA [bȪC {(`GuNդV͢ӀN(p<_R ֔U{qN: 8E1|~d~~{X6F G7V\oSG_-xM|nhaj\XLq)flrʊ +Q#mjwkP7S2 pJɤ_ xB~/̪HLwj[:4=ޤ&᲍V@!YO2f#wN{܏ZOVW6tR>f[`޴@d=I'? ~z5=g6'CZls%přaޑ\/V8\ b^ 1J-50qȻE3 D/JEҲZ?Hiƛض~uN֮bd5+[IvB#¬%gd 11gxa&(ʬ7 J= @<%JgiTSPzU%{Epqb1(N|/]Yw tLv`z01*ziIy-A0fM ۀ>} 4(aLL:b'!(FޔҚc$Fפ9vq9g$-v"c Nf{x|Q*JHLlU_p#sT\W-E`{ ݕjUd]<} ågݷUxgxlht$<O'pEÔ"OsH#3J645f2pSwBF2 \s}b8Gg&׫Z=C{{H3jo8Qdr͆N w2?"ˍ!LVL7AӍ{~pJe.A|0)1 $/9,؂ڪ|U/95vwUϗNax,a>0_S᭦Hv 9QJaBBbQ{\+J0)mQcپU3B0$T7wus;m*c}j`+/zPMw̚}r.EZyyПFTf jX8*\[{V-R < n(f \ 8-%TrYדc=F'H5bq*ʭfJ!m31#PX^e}w @Oy Yh/F/.pUOɒk`wX~q#j1x}&f|Y 者gӌlHPB='Ah2 >6Y{k`NT7C͉t\ӹz  | Jf>7BjSt*mǜ6(o(洣}qmh c\.Wd ôv戣\}ψBj /ّ4lGrσNˢoh1&dZQwK7c轢F:/7/G3~ЫK<%g+ %&|ˊv`i6N[ؖ `[Y *a p+mhu`f1dw;-CZ3\YQ0ܬhI] !9<ž?p˿-J:h[ל5}tWM i󉢑(KVf9iP@1dcϘnBfu-RҧTtl DAl[:k]NuEWߜgԼW>m8}9qe.7zeWp<M>>ub z&_ W}Z,|c-1w$ iMB5񹮿*To{:ٳ3 QyADI}!] 3IT>H޼vOnQoQ{ܣKz*=s^NW싀/ʸ6'\O'vF]R0eJn+x%P.˓T16m 7ט{:]ZA*d"u0׏" Ϊ(@@}Dͤ]E~]DKЫ٦)dѻEYgM*n7US5f%CK,5<>YJQ^2@2;wB'cCQ~#"儲[' &Y2Lu.\35:@WD}̭չbz& | sOwV+f^~\[U |z)% 8=.FjƔ2>)P^[A~:2 4spWYA\#}LHJfvfuK[tLoo$>$6nW̘ zL9\nQ|͌\<%E3MUUI .=ɻZR|8ӚE((=7p 4Sהq:ISN6SJ\Qg)Mawl#Ĥ5T_0e ^Uyj;ȶ Uk~= {5!Dg\q6)6A ZN%%3ΰنū 'n7|!'{1?Pxf^G)h,{wH=UQhZ5dS `%5@Y҇bCyrt zX^OFnoպbQ*O-VroW~CBaA\ZpcZwJYd*#r:7Ed3 T=k<wR8S"Lfþ5M&raqSb+ ܹ};~[ߒhrsꁹb7$[-Y 6m:* V؍]TB.^3nPl!*sA~5}2-$,a͗m!aLJN1' 05uQT7(@8XxrS v`oz˃x^g!t>W|5MM#y_gs~%$U:F>pq7 ҿL4)IBHmO#FwU4c vGID #WkbJWL sm[HtRc>wc#P`lBk|J76G5hucy!Xf9\,ٔ/^FX}>ā@ ",+B pL4+\=ʏ:?،|Ho#wGFe/2*8H)i̼Tg_>}5-M;kۭ|Q1ɛoBc>.9G]cS&ovw%oމd3E(dX~u9mx&|-h+(T) UY}&@:}VLxg&al /-SD0JWc\6 @#|r2l.{urSdܦ^{ ?,,/pn% Z!еηrzG3괥v}pH67ݾ#>8Z5H2~W$+á#kzJ[8j|/"AǗ/-֬ /o*jᑽc}qihDih}=ZW/(څ`ƽţYh{鷖YS3o9'~}}wC1Lm9e:@#"'@=}4#ʠ^목@y Ƥzˎ66ŧ8\BW1!񒞳LKyni a2;2PHz;8/ ~ Eu%e ڻW9 :xư!dלPveݔbOKL__1/`@bs6s- k{e s0Y8Ћ mniGefHrguiw@+Z Iv;RĂd|,01n-H3jz;z1JLշ9٘GJ,UH_)t <871=`H˨%@<Թ'Zl ?>u +zUÄuU,RFѪፚ0fg^7TFA"nb?O H߷n'Bt4Tbx+GYƺ 釱llSr/m7"꼒Bhgx |KcKT&(y cB3i 5Ho(_vOwKN[ZtHx75_*tajE h%XE9b70}'SZ U~`IUK.WcT݉{:5 sЮlS"ĵSS/~X^ao#$h2l֥}š̶ƝyBcn|mWr;R'ƃGsWjƨX4EFpu e8yCdϠYq>ӳ3 6fԏ1z'VN۷k4k8 32n NW٧ſ+?'].p Qnr}6:իFeK.'VKw]%<G'`ˑuhTmZ҂V(=N14BG7ۉx|54i؏jwE$_١udhw0 rhLc zY Q3TZ3p N[s@ \}6ry=FdstN\[+0#jJWK'9S*h`vHx|0 ldIrBe\{+KIeF90Pb!|Y;tÏBYR~P*%M`78 >z.|"[>;Y55x9ȋb0TW&(0ougn/DYcq:?q5?ے֋֚+6,2iMJ&;ȝ̅|=AW钣.OnJW["ArouA_U<#b OB8Zoܺ9~J*۸_ OHZ\KّG'jbTR0( 2 _,P$+q0Aph@^TA~L{Cnwva잺} @gS+ʥ]:r2s @MAj*-,~߁|]Lu42X KpOZ>8]zOd>dJF9.np(loc!M3pM%o0ɀv02nA@ɨCV55v2@2ӔǶ|1k?k<< rD{?^\ NE{K3)I7&12D8Dvv CqX"Zo;PWPm٪az H_@OД:Z9Xi:?QCNF3[MZϦgo{% l۶m۶m+۶m۶m۶k{5}l?xNFƙjVԛwoyFCH)~S%_/{G?X/+8W'O{7ɛu1lM[4i7g0<\a/nֱZ 5I(0-i7"Tqjo~<#de8%p;i└E5+tbI 1 mvP@ӮB Kg%+Y6eT2 FZdV6j6z&x3>5"VDh,!SzѼbЃA? 'l칞ID \ZhO;MÚP(sߖ7f 4˿= _އ - )JRm=vd.$c"r=Ff$-#D`تF}An-HYx?P-@G_N]ɤ핑7kH% U/zM+f/@Y˧g:_j\*UE Ǒ0+UxBcbiķs"U+!rf}U]quOJެ"~8=5>ʏ0Ifr0s_5d<ڴ àC웙 [;E?ϰ`(Sue9B}F; 4sbx;w]9IkݓL#,CfYW9Og.#WK[`ZOX.g(;I:`iAGi4x #E #N; o* p򶥫rW_FwT?` 1Ixi& V*1v75*zAv5AxJBSF+6@ 0BR6\z$n7xJVAE"RW)|P `Z*mjI$*I O~{Ü<*`x/QȔQuW@QݱK6.–5; J u JC'}6_CQJv3XAFi\ASV5WN/0#$k.({ZLcׂ$zشXy'IP^ʙ.tչuYgp=,o{Eԁ+63s"+^9uQJVNb-$<ޥT͵sɞtqI1Jԟfo|.G$; Xeb5rP)r UzĖ$x11c&̤4zpo' *ȜPK;M Y;6(avʀEz֎#-[aM+VRIe⻎πCPVeG-g /'pJJ6 AS rT^U;q$LK=Sr~^.Ԭ9ml*Jl`? j ]>9BaV|YPCJ &Pp{9bY]jKM_U]*wo~Y}، q?_@3N%]RcjHI8#J<oR>CtS峒O%NAҙ)WYQhk %kSIiGq co;JH9`O<<Ɍq/Nvg*a_^W@+ b EyZ b%kߑ:/ ?^R<륦2{4;D@%vӪï~?4X..^WW1-Hw<(&̓q|`ȁn`P{̼r6mJD`^)%d$ Ht9gϔCsDJ{;up+˒m%Udh'X(/yEᅒOīs:VK^Z_զ΍Ek\QjX ɶ)},PwZd׳ U&l_}9bph3aǣQѸ?(4wZ%m װD fwM xw1@Y&?v16Ԡ.Zch7Y~S擡͈8m,l e5iޢ?+lgem6ݤ]SFNDFb܁* C@vd=%0k+Q_B1 ͳԀCOsκU:߿pAMK<%27`,yq`169Z3C{i.{_-)%+#2!ʣWp61肁`Ѫ9k`|8vUJ ·ɓ8hw-qu{D_3# %W P41d)/X_|ל8J &7^ h҅3N\x͙(iOx rd8׶8,"?R\) izZWZӴ&g>a-5$7Fs%1)B3#} C@%p]~1|Z]P7CTzNRBAm8eqĘZJMRo 8Mlm+Y}Bظf'UȪZ ,1ĨS2FV0gy&_j]g>WC N$8{c.%s!Uc$Ehۊ@4xh=^f۶QFE)Jy̳4%Sa"9c\樦巘-Ot5W>mIcP1aCRo󻞋XUj}qr憦};Wp{o?he^AqKqeۿS.`PNF 1t!4IjZ6s @.W:_0 4Sn7hlB7{wvgZJ{ }Lq琔b˵3|8T\{hq!6Rzf֨-"b?\2krی]6/Kr_pۖR]Ts[b4^4Wu7NO}+& Ʒ,|>W:BC$0xga mwn/AX"ר;& Th-?Rc/O&jQNK.>~߆3B g,lg^v%r1]gK2'P˺h&໱<4Xr.;2袈64|m@⥾z(uc%Õ}S]y ۻs RAX% GzpI㯂ҿ@?;5= }OJ Qtm:^pb{+ha> D&weq@^<9[p6Q᚜,^.`!{ϧ$'>߳IVPA5tM ʄ>;˔`hSeNtP^\Zk !4aZ5h2IBBŸ&ɦahEF VS`n= ,xfe8P+z *5FN5ch*cMĈup{:Nyv@L ` ͍|Kb?g% nΠ_>֎8&*få<2n{$TG|伆 6'#ΏxdzZ=fzF2+ސ<{2i0#J) >71:CBV.eSZEuSsE`R:|Ur!D$rݺ%)> Q0RY?*CϕNH4ꚄA4\+.]D;X4 0lyZtR2`>.mT Jii&=MV":뼙.ͬl W[-zB+!40, l=Zy_V'9f66I[z?s{^-;DUGTOպ3-=no4 ƗyH%Pf^o|RLKs#~g5 V\I;tmGtdu-'%hsDw6A- [ó4 sb3mùr]U`AL '-*Lrs6U|milxw;tڣ?Ƈ<[<Y"F$wuC]OP:\eQ!ZQ\$sVԽ 2RJ$4y=^07[6q#e /yrk)‚[15aSc4#HP xk9/҃`Y% PDF¦ˆFDE~KFn4-e5,쮔b?K-$:q"sw qFB<Ф\v&}Xn)VLnzG94:z 9]5$͔:JnآZ28bB ~Q:| v;f# K"?ur VùId_l>b!-̳X+ jϟ"W`ʼns-Ahg_@:tKH /D)*&z-3%ՐU6eX (;dl \)0~M* &K0C?Xjs#QLpljqJmsgpbloOeUA|tWZAo~~IY!ZaٲG;?OP}`fZ@=pC}U4ǾWX3$#17Dr[y_7X|C`{Ư1vts#^F=Cm@|,촆$޴T$9!&Yv]#Y@"+~X쩊o,_yɜUc笸ݖ?%uǘd}^W*r>#uimaр8h\Z2*45W|jjJEM 4.lO^F55O\a-q8lm4K^$g-=8\=OipϯF8k=}!L롊eqKKK+: ?>Oy#VV딆z ]y)A_tÄ Δ~~ؗ6"l eՐB%} VA9gGdRbq=콥3TPe fzAGXtޛX`)IdNF;_U" IX|bL]nBTW[TչTJqe+\# yn>ۭ`p |ݖ ރ+n+*Н/w_IqtoƯU? ý!<:'˕2gagn9U9 @wlnxFh@/ egc]4̄媡I-ȜpWi& -h|½K=|_нМw=@TUfJSL"k+JJR:k4WvoIՒa JHSL>H4G=߸G"رDo*z[ShQx;:w:5oW?,f~G!=-n*N4kbUM& "m; Wkr훚PQ"-,izuɍA!'ݳ5zEXҪYOz Nes?Pim36+i,Ddm^>;uvEMuD7T<lWև7!ԅ/q逸-lsnyHK=0ܒՆ SFul/t\n=5B? Hc[QJ)"VvL/ ke!9`9Ľ<>%/pӕCnGMmpda Jg.fսÑ\8 L aOeb6qqWCL wGF?Rv}G4.(8Kv4,E 'TSĬ ϝL1z bjl,V)@|C\5i@qJ+Y8G2K*U0 0e M".#yI6o~x>R2/ Տ`|Ʉ1sc57fRn%&[u=G/?)X2SyF6T-?y Ac4lVhvn>.NAѕhÔq M8V۪`yoaCU(}]E";݊1Nblwr]Gi!(hTO|BC:rr IoCM6?7?@洁2Qocɯ ,;o?uNmc{AB:V^vo%L:|p4TV;qvW;pҽMExC۹u*tkx&HeӤ~zM;u00yTY^43>&^Q> M̥|gD|CV|*hǭ7nM-[N&װB]d쵈V;8-RzOOXfE:Q 1_+,~2-wQyf\&2(S-$7 &cN[l 9k}*Xyv|;zkEwq̭;m7ٖ0 .X\czGȸT3߶.6B0 A/GY75=Z%[GK?b k]pU?uʺªE$4@ @? {$H9n/Cp#8n!~" 03Ez0xI@ }`s(0fJ)T3GlpCg Z`~-{^[[njhүIVa$13gH*k"q05UO\ro Pj4>L&cNw27Čթt1 /k9(!@h@*g@14w3ՕTiįej8!H1}To+Yd_~";n E^EX#s^o߾#z!e$&8{1Ԉ]ZVoGq:y{Ib~ 'oZ!R&(.saF z uW? `wt좡_U͈u#,3wu-G>L] K2݄D(*3c̉+jkľ-~ʍb8% ږ,i" >drm// i{kI;+(E֬l G\DCI!iDKZ"݉-E%4w {/A.}s}l#m3aC9mni^C~ø ˫6D.Q<C$!.Z::9x[жwX'U4 B۹3wыfyJ4̭;J]FO~ :?'zAs d^erTʏ\k,qP5=kH]n97"aL My,ed[JRྥ"XU $~&6aO$@Єuz jbdMV;Dfր$e3BxawqI⧃+d&/[ĵyOu9xr\R>NNN!1{&i2~$wbU;0m14JL8G]!WDM%+YA=5u;n Sm-lgLWٓtPap:̐;#? d^z1T0WfEN AEf+WP.#q )~`U|UM ɾ*<-74IUj~\S&4]~o18{D~ܮ5 .#K؆TgA@15R *:]OGKLI*i5gzzшW(T(@um]2{_2%bܚM_;?*pv&2֫௳Ӏb |.g%NKIȢ{LTwNJ'%&]U|O1}Ds)?#8h4nc4wi#V283_$k`3Fqp(ystG8>>)}rhu,]i<$ۉu5}FџCs~/LK8ix?OD7%nW |4 ̒u()Fq%RN9FQ-}.1?h+-MTaZqu$5ST!R$JLꍣ#3`L.6UerGfSeKvuU s_kR;C'1NRL۫g Q5{4bfm!j,& Lyrj)" 6Ep7GiX9nuo4Ǹ_GEӜQ]YVARA;_+!N] d݉U~ut.f/SLMn5x犣y4bL\r*?kJt#CA2i842K#ՍfqYV|8l`|lGv'VScʌT;P?*nkp?+?B6pjg)=ajiٸ'j^=THạ̊O^Ie;I]~&aJ2GĴ"* |n xdEP*T܋2(!aV7(߷x9gOoX" Quೂ=+q(jH-s!n^Gx&m]tyt0Re>k7_[NX[1/ .Mx@k/d{I@V곔>XJf0echEa8# wgw5?G,ד`svQ2"Bt~ Jر!ޡM}"87 9AI`^d /=(q?E1ף`Սb%EC+P5ֳE9=96yh5Mi[;%~d!ޯa{ }\.-Rd;dsqùgk^{xCi'M$Usؿmp(AiRާ6iMTXFx%Ek#pl~Ĩmf+gTGz8d֏^e^d-␃ROJb  ?j5wY&vR)0. :#o+J=ۋ r[!XNTrOp_9j0ma7b1Z+/TQa;^#浘ϺT5Hҍ96-1kcDG@>uE-2mdbԩ> ӣpaGprxZdfE%Ռf"6ia,~5$ ƨ` dݪrWs!1OfC74RnVJ}~mkz}h7k Ȣa+K*Ο梉-"4^k`KpżhE,i6v!/Qc4$_ 0#/k7 Ltt_DryG |>MIp78 Y5 |샽 ߽2 E29eVXtA4H+aé8j%aP?>NZv I}p=J\K\rrjj1a@78t-!x!^sIS֜CAJzD3Cn1 hz澧O<]؈gn @#DR:kXm T ~DT@)wшF%ۏ{;6; k<=hV:\ V*s %Pv)JTTo_]J ꌤNaSp*~KS[cGĉ ]MUKfhٜl7EO@ٚ.ޗZ།.҃v^Vžb"-.82#˗rf:*aog]W\gI`2sJ4h2&{ZH5֐BlU{)4CqKGzl!g1 b{~|s$ 2DrQ0(rXvbwAEcTJ>?Ɍ↋l|_x@BKF;z'H?6EZKdE" xRui$uH* L -iv[K$7KZ+pPX0F\@3o53ث6O{fHy.TG VcኤnˇUTŠcz}XfHb/]5❌yDR.=U' fpn#M=XױQMu+(s܎H>j+AANTC}d) jf({HFg%yWޜjؕ+<ǂ"L`[_9YȞ},N+V+YL0NJωg=醵7e֝Ymgۼ8RSlRO䅤Ji$PZg \[CZ.2A *Vanz#7޺%N.Fw }!H yt'4ꅋD9RKaw o,݆Mx =<&˱-?Z$29bv$ g@ z}iL{#SIIj?zK4.y0>$<&!MGz*6Zaze[ynh_/îš"F!?` Y"ArF: e>^y9M\خ*/mBN[.ǰC28 ?p#,$u./iPeB\|.|HK$BZ}P~`rxyz5myB+@?Mݺ5(3YݥBfƸ%U&YAXs &+ YQ<ںj2x#VPo41/Е\N(."[҅PCW#מE+d\4F@;ڇJkݟbMgxxw*#4ur8[QtlHo@9͸(|#a^ۉO5NRwVŽ\Ims0VOO|xoDKk܄"_3~j26tXB/JBeur9?fGwTo|ztТ'E]zC3Lh `h %YHFmBR3n >0ynLlq|~FFb_ՊaU!鮕Z)tiEZ~[Ui<*GV :ffw^蔪drټK(0Z bg^ 5pjBmIS>N,n9KJs Ptw ޕ0"7Ym֩u||h1 ~ أ٩W|q52Xt%4[z6B{b%QKwti(ElDqB*A*]2jPśF[@aKR13L%T=@X\dvU1Ĺtu@ 0mUKʺ{Cwi O0rƴj4ىFl'!@<$X#w*, '_:i=7f}ɭ$us7剾 zG<].Aܑz-*.-՟>D%+`,z9vR.U3V.pdfb,K4X$@U+s,]Z3Mϲu] qQ8i,SÂ5~)MWVI Z5ܚhmיG.^үT݈'$cM{˭6#FSo ">=약FFt+NŽu"*b00 he/B(jtu:Nq0]CTqQEY?`3\QYp%b."Be<@Tj9NS^zB3ҰLMV&J< 8̶2a޶lDD}(x yuQ++\ʽY}x$HD `zpa&6gkf>>_7Wx F"aqRb<=鳆<GPc tg[^$ܺ_`fPv~C اO38?xi֝^`tFMiQrxVuOŴ%&rC3tWKL@ #"a5?Zۆnˆ0~w Ed!GMo)C0EӬ0pϢK 7fBtLF.I 1->sg]S8ak$L^ow}ҾW3G| p5X A1$.ncq5'3RY =mlZ ~op>>~3ޝ )fRa+ej9F,uE黅~H5#Yb>V EA*3ӕ4 ck%8Ndzc%=y!*X$(XCd=& DYT`JmVen-R?k`BiIIx;'je{j7Vd[;/;{2., -wRRGZ@`u'] m3 OIv֏Sm(>I}߂77xJedS S^ZqDnIlӳ0:%EЕ3z(94C|z$̧;6nSN%e!'^\ ;6J_~{?5$瀄Zq i2W ڇU>n-6^Z|]wfҗzxX -7:_^i;6a۬Ye@< #kS]K* VP"CP1Q$VJj,P=KXdWfU?j'cq8quKL2ނCy7'Fۖ+_B|`TQd(H8NpvdoMBd NhS lCD"ì#zψ3_>1k`ۮҞ J*H #u})rW 9]ySnHeL! .L73vi|52ߌR2ܽW+R͙84%&Qb~+\kWA\}l*h6=ei,͡`QnxlD**c(qmxT`E%:*bl 51HGVeJAE@}z6(SPdz/T99dl WSiNw=%AZd2]Pԃp8%^jk@D &OsFnwl*nĽoB~Q cNLrVRӋU<[%6~\q cx"VG q5S<X=()%6{}0ym~6!yoN$T"wA6a6r`atbk3ܙq"t1f ]D <9L >Cl=xrzH2)0 :^m>Oõ4iEwpűqs\Q jㄻn*^#: Sa}adWTN Dnh#-Z7-9Ϫth&g8Z;>k}t@%mp/]1X!t<^ j(KҪoaM%x=f|,ߌLۖ{# f~| )61\NYqy8-!ԋCYQaPZNw:7@gIx rUt]DW < w}V[]By:!\u<=dCcˏfƱ?9Cgj%~2_/V*UNm^G(ѓH2c㏿4EL>gXiVK:#ǶORB!$5{]c˕^Vig}>rCJ.yVh(B2/ 4- =I}Zz䘙ku<*IGȪan·qhݶ򖭭)NDzޥrȥ{\X5Hlsd_Qj2 qЛ.폼/RjNoMz&Xq^o.Pm'Ro*u|v|sUinqM5:scv"f߆b]}ĘUkCʖV0ߟFX=8IH6x*OAvQiq Xh*T$^I|>ye<\cNC+v{s`vV > ;߂ӭ-.>7vLOGsoLckﻩbJ76$&Eʷ($)%}0d)ְ,)bfUWcX [_c8drՒd ̐xMIۂlLTiJ5xP@|sM!%FIhu`-} /[9xJxT+ 1} bo\.b&3$yoDtѱ,|ur=3z7Vwv0Կx>s׬.Jp61ĩÓ{D;7(Xh@}=e.B ^_*鸾afOj%NN6삯&%#U8FyC4#2T 15M3Z:ITk SBVԏ" .><1|y-"~Em.mZĵBlxkrdw_Oj?1HlZɚjfP4AI%n* x0vEߩC:D|@IK鿈0K˜o(YXkئ*wy1' 6êhO!z!JA[F>7κ~WF,gK3j97W`ORoq}٢挫4Mp/֓뛥ch[lŊ@JΟsѬƖ&ά/Q̟׆!HK21h8؞:9>FLMx{t!_@:BYOJ9KggjBΑG1,%-i{^n2;ʩ )g A6aVxkhq VG/\˙!5 6*| Ӱ |uyj~_23fk=6O~FX&jLh܀>]<dc/~E#V@5d{:cҷSQI ]iqQYƤ IG mI'u\F2j(i8ODzD߼Z 3&g1*q[T)/(^s7HM ^9w̧.0Ƞ@mX8&O6=\`sz.@}KZdL Yrg8AM&Lp ^ٍgHo# ݔ âDsJ^A,+[SS8clg+9lS/+5u3o+C͟Rn]v'6@o%zabv5 QBa``e.Sug"|z2rh22b4+m!+O ԛ()`Y籢k8gFN#8]?mׇsZ_S'(6coaqs1rzN'\6X+a'2EE0n%M4,ZuN٩z,?-~1נ}2)5FdKl.ZxB6K#Bnc~J=h\tT:DP3k]kD&P=:!VrDנ{Mr׶z!wU5: d{Yi":Vb=\UEԋY^ESޱ:YQkEcC +F}?Z"nΜW"hgGڮ^'o_Vo(puLѢbլE0c;_jE0qVPȒ\/msE975QJǝ(U=̎ѲL.( v?*^JlPCOj{,#J(5w0<Ek:TM0Cu{;2E"a&7cS1sVB~14K }ۘ4׹"ZRSNQޙS L7.LK?nA{@0<|ც*Y8<۩t yGBKw[tĭDa ÃFX}xKM+wɨ?J 97#߼aFx$di11oaV& cܥݔm˃$ %iCʅ |!_B1 j) ֈqH,{i 3f@1~fDvS֨ 3v0(T}&9a1|b $IKqalf#"s&ީޑ~@y>³yGfOPֽ|{aO( ,B&Mg:|o50`ʔ{ܢ]zV~VOlMiMrFEm`Rc0bbBP6k{nZP7dqx`zދQE(BRksG]U8'pd^^ק%L,/Я)1!EVk^E yl LNS`Ay"_Vf&?ՔM;RqQWuL@9ۆQ}q(d0ֿyҪ1ZhWTu㒧43a0Nr.gؘ:8j̔xGS(`Ya/&2++.k +]i3}}B*_u^xaqPG4WW8yJM/O~0V}T0=ba(cz7Y>^zAKrs+D͉a\ llag-q`[E9L])F-l%Th8$R! Qf9bbL4N)ݭ$$]YtHd=^"u*, 'vUP9e߱ByFk33J/^2]*6E#U ߚD(oxJj;Z"櫗B8g~l3᝺\u#)RE7ΗcΐnPd@~ֲ6Wmxd@mIOI}& ,b*G|/MþQ>%b ՝wk.N'݋(D .{WP6ħjn*U㙃/ĭE7aH9H@P,TifkkTRD{/!5N JZԁ _'8CޖX{f`~gAȣw3L:a;.ȀT"楪#D~sOx_3^ɍ#M}6an/<U# λ6Ah@IԢO OqX$(<>ډi@d(YdATÞ32 o$ya0k_vNMHW;2aBQ6|p"x '\;`b~A2.gtCI'H5r3(5m4}8w'RN^ZO7ygxߨ5/VHa7^),,vdXx^/~q߭bk"`;rٺ"v`41%fĕҤ|;^^{+9#htɚGbuD"Bo A|3G =(V$Hnrs3 %f`|B>R朐"d}3Ge 2!z`5gF"~Bı$ FZp|䰫;q#}=0s#=\L7R>c7a|OLʗ ƫo~~dX72!TQo4<1#dR$њ jiǜw?>2WpTk.)0#WMLZت I{ 2wjɋ2 ?˘`|aY8͓0` RWPebB*$q%-> h*fb;|6SF|&·\B/N^D^f ܶ&ฦ}_a'2[\E&EA2P἟%H:yōR]Cè.Z*ZX<]~Jd˞]v0̓EIڃub=K!nsѬЍb&kO-SK8ciS VOD/I؃TQJHqc"<񊟻JnANY%O\o?]g'KPpxhGf*4hd,fi6pL^6%Tݙ8~%WI$o1(#ÆRFw G"6yubYTf:;C<ppyZ6M'n!hb;GmN/ iLjT4 /_vKN!F $lJǯӍ԰0 "{IF ęU)A7j0=ռ֑r g?ɲM>fCSRXEQʿFY*ՅzSI_(}-}_͔ ]ZvGgayhu6O:+k eE j4ݒx5ڷde8%:rjܙWDO/Fd09^ D0a#wZ; ATx80u_c`|tY!{vᴵL4$PxPQ_fġ, *=Zqk]%{;RHPVZ?!l&6@$mKNi9Be lR -M-r͕WwI+rM"JYh):$@}^q S|4 @dx{k*sm"xRCu9N`;gr}VI<Qts!U6E!p }$6C f+O}|8pd.;tWnOL_zir YzU~t"+tփb9^ݓfzN6O<;hs'2'Gkj~ii| cփWB9}(2.~D+RIfkEBn-~bRkRܜ,f ldN4[]RE_'Ve#T['r~3!KwaiT}Y|k,GMu|ˀFCSYfR9=PljЮ<>@?,&"rx$cz\|:a-ǪH/d߷eHӀAt>Km,ێYsriW/\ptI0'R/awX.70( {2j&>=KI@C1ٮ[7+g1pwukZ6vgyMAڌPʧhm"eB.XqYmU_bBOG!S2JF6`AqvUvY Pf4+~xwƏ`}ll;4¹#Gp^uZR.h0}+WÈ-*]#|F?`Ej0Cm:3Ӛ?v 2)%XhnBЯ!C|.vPTZlik+?#Ca\{}>@.T:d 5lce[Ad.n-F{%Vd Qy)fPSk[37B0 :"7 9K~o5{=9$uer噅eDU LEMPTe\Rb!Vb}AKC.&QahY|Z[F eUB1~j(p~g_s 's,G璊/.-z_Fwڄ4PѡgȻ>aK(})5m3,'8rdqp嶵S n$/& !x@C|ld8v{K/>@]7N~]B)'+׸Av$5)crrdHJU^-+n D .p8 Yգ4ɺ{fLŧ{+Cʛ0(Nl-c `fN|7]4ڲ a3Y5F匸3:׳?'ϩP}J92Aq 2{v"}vk_bu8wYTuŁԪ"%Wv;24E6RZKK,hYG}ȉ*i%)8VuE.޵Z<*G oA-g v^B#& W1ow'6+SOUA\pr^-lrQb,5%L~kTpbJ|đp9v¡|zԼ_%9BmP4ZPIVQKԬ[: ِnDU{橮{ ./mF瑸JЕ#d1\LܶӚ~}ʂ$XӕD-8{_,FafD2Pw?wϴfJݘEM%79ZOf?"OEd}\!{W`T` PQk|S 7ό"WCjdH5WPHeJOFF㿍˺MkWQLw(#.[6>+- fK\b#]LK rZm/fނ0]D%"a44%u/|Vup_mʆ~!t馮Ym8۶m۶m۶m۶m=; **2xs,'c#FNqkm%BYYO9qO'drE7jZqF{rjˍ~A2,mLnGfC1c@ጨP4IGoJq]7lz$jƥ5}uxP"Q|P)(TErg4 '+xQ~j!w֩3e֝斎܊IKfAz `Kx<s v8#g˭ΐdO ryr!Me?0Tp7 tqRo*ٝo3Q+ܜOy|5h fԁW㋣G=f/Rn;H3f6Nb?"srˈ-voIy2yS6;$٣IhcG#ZF_XO"M͑])IVZ OO[z@+h%q >zbľ|;<.cUpw,Zdm: 'v?/Xe/Jhl~uÝ!e̟ + d"|QKr쩮~}Qt>:F!NV[9 ۃ `"U]sR"?] ՞A0 y8|0^q'z7LDLqi NcPckhRdjUg:>X3*7()c\FwUN'Iea%ni"Tf(bzJanМ`j3 I`gnQ:XLwuk|uݼ+L-v -VbuC/T%96/ҴCѬMZzL{]MB]QVqQA{1%0g$)O]p/ >eER"ѱF9X;/~Ltz#E3b+iy^|arl!9? D2a a#]!! e;)4Fv+h}ӟ}DJMAoZ~ߩWhGH|,lh+66b=́ 5t) _ i<%ŬH;KP!CYL3h Q~gJ n/?'ae fK(o\ #ݸ-3u8p n 4ڧ["kWkq_=\æ v%N^KZAfh]ASavXuL>T9mx7.FqQҠotnJ߰%(9uZi>%Tv'Jܗ!-nDs)([F4ͦ6_j(8Sj]>1 ),كYjm+q(bPq9"L8kW+3'n-J^jkSќ'fxSs6b֝ P,Qh?^e,5-٣eί(CN  ՑY(wsZ}k;=KtG~)} ulnOp +G> O-2^Z~S!8?ρ,][%5T:{ӫ˲?Kx ‡-`al9Q9t)Ѥ#{]*R͊bٴތ&c4!Ճx_ 8ɑ;HLG{Kyc̠zyQLP%i :v H=\ԯ\1Zר$|346`I vqGB$yQ`FqfO<.\oDAMݣ;GޯBτIuG u p@#c hW;)Ae/(7\XM؜%$󿑬$sW2{Pu_Bz cvzh'Ko GnS l*Àt~Q/T, 4R0)!tJcSjy)RkSۉ'2BDȵ +#C7hIzVbڋ7rf*Ֆ D__d*Wa}CN‡(8F@.G&W=-V߿.Lg*D!:f D m݀ [Tq|D g;yv)C4^/ttԵ[v֙b;:0(Db=~=ۼ9ZW YA/B Zp 'h=|14t֖;#/e=7Ψ.σ6Fd_R6R=2(ʢz~⣹ӛSD=ypRU)FnRXG:Zy!UہfB-%" lF_5QK,oneg}3a}^ -XL)B t/0-wt8wT` ϸ@2*)ˌ:fwSbHVkpP[Gi^[.MJ:"|6` ÃI:,:iSF?qJ|+}\ ]grt=kTޓBuID?[)$cL52hw ݿ["؊!3>_VζX(# QEׯ)S40Dn݀2.(;꧲~ܒJoо* &j3L\t|+`*}4R\uR~QWrbW{G bRjeQYjuәNΨƤƝ<9$u 5lMHh׶jyhw;BŕfM @WO,`G4/^[JrJ +%8]jFCzzZw_nTQ-=-c>GPG#6̻Z٥N'Eh=&Vُ*~_|p?y2c=}bRn%,riT#h }(3ełX&V{a\j0P՛'[YT6Y!}C^Vx0Y"([`/ljz0?_B |"L<}'dSX"W0BtB}kNWGj]Y[OK'\de6bSe*Oh%!Ҕ&nJHЩ"' Vr&H4Iد~0QQ1tHx) X:R`B<=eFMh|Jo}^V2CÓ]XR1drv*,x]vERI`׽CCߋvw.`A1I] t 5InG3ʍP鵅KەrFA36DLCGlfT¦[qNu5̕zkP%ghY +֒>@&!r$)0m*.197x]]!PKMg>:I\ t9b$i _,Z_ņ+ĹGg%qrWuEb%9ڊ 'Snq=A (Ë= w#7Xz5TF~BFOdkn(v!1T~8`rxru>%LQ=wˢUdb죗q0'U LǴ輲[n>t;2 $ZuN1/gs1'0M1~=:Mgg(UeY-Q}Y¿ fǸ*%2P86X s̃~ wr 01UKa5H5<IaOKlrR& olz=y[kҖl{#uV\w!Xk |y?Fb"u~S'Af7G8OppdO34_-H]:SRD"fFsQF MV{u9rTI,Ӳ#jޢtejJ9Ź ;֣`% -kx; o197hAѳFN W*_Kh 7yQ](e`Om7?\a4  U;we2@H%Y8nG7+S vT `Q3grzl,;ǻ9 lT/quJmk ߶<%$C@j2,^d G:^WbY\VroBeI[Ct/3 r45+iKQ,;; h] fyϑdu>J>W4)EjA!qǔOjo%A );"n |F([UD{ &^6r$cHa=GY!) Nh2CŃda 6o;MRܺz 6xx3ū8u(52;SIԂ~}i&^ Σq>pl5 vh=1aRZp \)C(g!*FLc%ޙgU/?bHpd^-FtWO *q@273VIv^^jjS?TFˡ$>H^! ģ\g51R۽[0_!݄j; ז7BWZ)u>sin`3=W6* T8My z@R,/s/ydU=IqF7D hF"@4DNێNm,]%=w$%ܛΓ27e޲lPJoWL;G%cX% ".[q͚堫 a4z6W)QCb1C [^nǐ噪160] |pɻoV*W%\HygkoM`&SY a;Հu뫩 /lK.R?I7yuE \d4OXfB|F9hNJ^Xv+Ɇ~\H);α gd? ƣo'UA G*|W5;Ri@eӪd1'"Ҍ`1)([4uQ6Waߞ6u*M%E% Qݻ~>QZ/ yd|_`sơfMWJy8ojK8-S${,ό{]DiY H-u |{OL Ϝٲh~ fQ'UCZ#E*1-Wz[Ju6ZRNvjQS*0Q8`1ə1isdNF3tI ?xyG=s0<6O8kmjsǃOwE`:>?xśb T Jc-S"°K13?u>Ozٔ#idE-W9:M @K,~vBҎbE_]:7CKz0 > LCBw4}Y3iغ łX |D ';za=n-p&98ؤecຸ޽y`6v=,nn3_tw-CiwK5}9L7zlSڶ51gؿ*kVd?KMW `YO_lmw 73ڴ>QX8ˆ=f3 .ɫvc]NCswޘl=8fԥj)v٪h5V.L]F?V@ ("+˺} Pjxc̈́G2ui&\3Ej-:Gf2 }{ɝ +R^Q+"6#~<㯳ybX~%[Ht1c#2h1KH]&y!h+g, =;YEa)^}(fX + zh52sOMo,TvaMk;U-/MTHIo_4.x_"^[D؞W!PƷ& z/ܚe!!:]V؋`^Jaf4xr0,r]vbU, u(y1>C' XjR'inFLV :`4t Kz?#k B&7P3ulss8BTI c0mtG E,9-[%< j|f!MduQg9GF\%&&3%o$\iSះC,ӛbSWfC"`xH H"ʥpw IHCozW) >u~8ѽmL߹n VW:⯅rYMaj,6叠!UAo{n*L"M0o *0UoQWX4%{:xoMqp٨`tA͸P.W}f(e CW|0߶BL*2xâN;=T[=Ma'[h`0(@{`l?h+k֦>%#>r0+kld1yBA#5 u9c@!j?ϭ%dGWBI V.U rUzBV.̡qe v߮$+֠aPң* ݴ]@l;ɆRzN5.J r_kٍGFP~.`$|CwyX&ke|nSY O71Tw>ڦA[x #ӌ;$ + q8~ o=m#l r1_`WшA]mЉ}׌9efbE|e -Hx"?Vl$qt|_ njRv\N)Su0)b$17#x4ԄM 䳃*(@iv^^aԆ J#bG(3[opZ{a?ݣ0h0=/S#P]m2CϷ`S DdUK#g€:LA//(y*}05)T'S5,C&c;ݸShY瘚MsLvl(c&d PifW[V9Cy12κ7 =^EVN|d( F* 8^(c]%Q9U I#H}#l `}m'R3>) 7/ l R;J)06k7hnAX9Nar9NiQ`k U!4)xm߇y8$_H-qC#&!JZ[Y5hv TO[+MA*cwTU:A\fuVD1[WSfwT=A;-J. ёB"wV XjQ6Ar61؞O▄<ٕ+\8PFPsp.yjphpzRjuzA5qB2ޣzD~t^{+չiX^E0ţ[ԶwȹCI$W벒1eOxݼGM6W#ھ?U-Jh%eq^K {'&&3-tʭ;}j r_/PKq k#ǽ8|G)38iQN$/s虷5ǰo!O1[SPGԤ]*I V3¾̎3#g N;q۾wJH#գ9&spa*;ݥ+meKL8B&jg/HM3;Jړ읫$ׁ=ِYa+Q*.r{ҽawȴ$e^n'Y9!6 3vFfVc0o"YeCs ~srD.PpS8V~4̐!}Qg#養wb=CCmb)\I*o/,K WʣPp'$ْj7 #"lqNJwȧnގB1?9h[h FZc3LSh]6U8;N}}|4IvETܘIHڝk~aevSGo grogwjQ~]׭g{(l T!Klv۪n`36YI,8}j?zJb dBcҠ& OܝЁp&D.knj^]vWD%ZɖspTB/hH AOa,it !911FLۅJۼz0cGs5 0񥺌:ڸj]n2I-i :{Η̣f}ex.ԫtM:*!P?!%oAJC>‹! xAİ7pC7S CHEĮYWc<8(W򞍭Zg.4+_K:"&kt^=4 DU5y8ubxflZCD,<9v%^Ju{-=]mM-}wؙM7ٱs:Wvv3z9c>!}0zI'.Y2X0Pi#8>"iW۵lD {G.>sS8-Ֆ$NpL,Wb VLMel v]Gɧ ^yyL!>gA BaAԩ3yH{k,h\4G?_uj0I [|d<,*?:b*%v]^7ђ"em#+wZkYؖ|cb\/ L5g ~`%@^:XSq,rr4n 񥬃!hO gҝ5h9w̲u.tE\Qk~ i>(=he%PBgyy,3^ n>SmouDA̚F5>aæ$mjү4]ްc-$/ hV_8䲔v>R=m4-jPtfzۺ+kmF?f6#: . rW+AcF'A>-ЁnXJ/Xt`/0lQ9!08 Yysi#/ ʿr4.jqɞLx?7eBc=u% -!牀#|I.ɲ^ۥq~0$юMG(O N1R J샎}p~+c@eoOp֒|pѨB!3ת=ROFy&[s(>ٱxn^+ (##vyPC?_'~yU!>r2p O:nWu=o"_%A" H#Gr@V?%'b-DA3s3dcLFw׉9 cGQGQP*-Q,oڃ}V&8…^[^|߅ָœKHOopPKs u_FX&Jә'UMcOel ;J۩#tri09})҇Ǒ˹S-5p_umI6g Srgm1\.`3Xک&.o`岋qG< ЅrjsWό+z'ԘTqIWT:_]k{#2 P>tL䐜]{'OA6ezy+b kSz07o\1D`}0x$D3 BYZjQ[ڀuk9Gx񜢑CQsԛ('Ϋ|5H56cG٬TW`\H fMg%$<<z7$yױ(x3;֓2h/R5"!]-ewt1Q;[Bn@ [u";-i*zHH](qjզ# U(Jrb/s IHWg1! D89U|"l+|VU9@߬r~nd9w}ҶyרӖVw6b_|Ab> .؃gD~U qGXTg;V:/QG3Y惉X;6HH31z I{Vwҽb >?ڭ-].mI@ї3^r &{HnڙxA w#@]⼣71&;F0?(?yzp"$KoTrUZMt@l-UA\h e ܮyO,D7'<ܧ%5^S&ry(0`D#)a;gYL QPR4J\5H~ ϷD F aE0oy ŖoĚ0#yH[^ƆߞFN([ {*nj46b ؀^xm9@W'*.+ Z?ZTY{tX)DmcEWmLƄ*nI|#۶I1hTEo%9!wn5~taC^Gi>r>B[a+nk)gm/9Q;Lw2VU܄/ul=Cݷ(h"Ƨ3l'@+F s޳٪hgj~lexJQmHN@=?ݷ  d%|TlS ;'jOɹB\Z~o;,*۠VHYJ`{l֬eqUΝ ēc7?yݜ.~\jeߓlz)K}0qŦ%(0qAz} ,C6PǿA{yٓ-8Ҹ"%rޘuФ:o7dTT.A &31wJ95W *R5}9ȱƎU>퍏e$[->%WZr-y:k}010 R5g.b"*y:5 0WMA3^k w.$5 g9@o3tLt #,c&U>U6+a@\.Zfތ͛i:T2N=21՚4O -34tDS!ςjWK;Uh>*2Gs(~$BAz(CrÉkqpMnNz0h;W_jg&'<rk׭FX}a+FdkT>LB d4mw,&Gf rm`1|(|v`._B/WvGG2N&E+6ka:#oEƤF=irӱ/s1c=VQgC.*io@~˦`".V!n,Ԅ2oAGjnw`iĀQ=eSEmÙw}gSe8a-)kY9{ʲC!`HyQyu1o7$Sr=ʜ}aq_^DO9]^g*Uzlh}9Co5h 3>W$@iLYg}Զ1ZhabN!_b֮=YJ2z|m""op VP_ly'?j!S٫ FڸV&I|`Z5U=,h sCi8t/ Go4"ueST+! rgKhGY#6gup>:BAqlqz~ƻ3ñQt<ǚf4A.AKx0BX rK^:[t%;\x;u{+%3~୨J,PJ7Go=rE=Z+r<ʠDFAVݩwNLgDH\6ؠ6Bl$Zu3Fز f+l^f?utMQ"2ҕTCcfs<؍7zUS %$Wn*A$aS lLM?aܷrȿztG^w24{˵_;< %b|wCubR \ {0bvPVr lnO uK_p,khLO s|R rPqFg"_)$Z,-Wh^ @o j۽3w(FZ'5+DCvRUR8 -cW:yr#V!Z{fIZBGpTKL~߻zD+4`%bsW 3Xg79Q вb;qS-'?$VdN"-G6:d{ۖ=5,:Y)Al26AI1oso<()o:\Xa'FgPI囸>Tfޕn$@nuFtͷ{АjށE+"w/o>c}:=Oo%DVGAR.NcBbgK}*۵|/3儳G5v3܏%̳?1iq&Z:d#XŻroY e3F2x=-+.Ҩ\ /IsU%Ï"0v6庴\.kު^98~mQ6B`YT:&jxP/D[__CD/!=χ >65lKiJN2`!s pG?ћOY8Y؀zHjy"ҧIxGĐY,6y ȱ*D1etJ$Z}TCmdI5 ʧ(yRt˴lWA}R lD`8fTi#r,kBq ܤ_0Œ2kPt"gxx8e]7@~!r*g?6Q#@t~רG3A oS*;}JDr 6LC^¼7 佊F|dT)"bZ Z&g҃bC6 &G@\A+eKi=^96Fu:I{B/e[bkŭ S󜍇ӽ{#$-ToTڮ4Ba+M`~$p7 !aJHM7oםbP d0(G[[N!h-;e 鴤5I BP{u'|Y),6C|y[V6A%>nȌ*ت۩Env &¯i[KQh h= ةLW`Z3G[{CMx/Ux 2-ظW4p8g. VA?)K!;tŦwRRVobS+|2,M)Kpه`u| T$5@{j8Sy.oWI`5"PiQKZz/}0v4G^ihd3'EJhI(4;XV/EaF쌞+|pE 3ܵİbXg$R;N'vDT'p +$;bSzP\ztK]Gڌ!5CJDFVb9K#.ﺄeB]UwL3WKJkS4GP "Y~(hXi;e#|k :j9؊ͶxL~jN RIʐ mĮkfRT!1^-brvZr7d6&Z>)?s8-nE 5a =ɶ}sF]]bݟ0{`/ׇPUCZsf9Pew ) o:OX%; 4|+8LbMNiށGÐ;\eۊwW#>fuӘlޓU>]^1@L9?,;.z@rQ˖lf'qI 9q wPiP*I~p M|_IQG(Nּ!ӁBJfMQQP٢|یwv׀=o/gZbaj;?ThY8S@ ҟ2N;rn2J{{{FF]jv=C[[^wHF`;Hq9R~ Y(Ko'35YC 7wZW3a8˸*20J[aa`?I)W+c3xt|z'7el^N%5Ft28Цd$5ूEd8{ٙ"\ =PnYrF *,`LE(o7cʡ!+! 0%-=A ?7φeN6hW'r$oUkV 8u*> 5۽mc[tkWK^d͊+aDH@rT-\f픀쎕2=CE^ [7jBA)t߰"Uc}}sH#nHh:[ikdcuN0'!Yv3{b9trDaO) !$Z4td3t~aB.IA`)'N-3m> ^DU%%"eNlA;1C!x:YOs•ө>)7-.LGeANZ(`car"QPn&^u |D>N<0(3?P'7_Z8Fin%ĹZQ9-f7Cekn[mXg~*PPAOxA>"3.MD6;:t#AD/6huX :هL{\C[U Wh$r]ƥH%uŧF݃Ngj~]Mݶq5GƘ2{I\=:ՐDƆطEs (`FZ<TVJ!J|XdnJG<("pn(S^㹾5X F1=`*#%O':wB3j st:Aگjt&bM6 <܅U3`8PEP7/\jj)4> Uj&XPZ㴂%m{@9'}'6N>&Y8~ a.9sּRMXxVu_f'i i}CYjfW{K!T)q g%,E@KGxb6Սa!6اbsڈ|\^RYXm,9;_ pnOm4?*\^"Gf/*o\oyN㇯挋p9@$1L`3_A!\HӃ׋T9@axAlڇCvGg٦g'*Ѥ: A(ˈ@zxX+qI(f*dʴ V>Lh+_W3͎T ME3^,}QwF @)噍+zY &y.ХSk.g 31f4y%(L穊+ήhtcb'҂XndV{}(Y}vY Ԁ!"yq7ZJ֔0NOmSbZx+oa/^Лֹ-WJ>GLNQ:9&SduNh=ٰ +iDgCbᅵpqm;*dY&ej:B~kǠʼŕ(AIΖȲ8ʗ@D2,b ':Sj{pt+5G~oX͜N^A1[/9ԬXa(҉I[f1D\Z_Ē|+iJ% 0vƵ@G#Ȫp-nM$iELv6VROڹQ ]ӂQq{teɾy×8q52 _]~|Z=At:RԷIcG&<} ؑ9*abbՂI*qB{S f_Ǒ\cڿ<kaDYQS@ J[fʿɾcYaqr jPZ.o\3PJboဟnj\!YC4c7Q+༭eK5{iFo߁6L?-dLAȟNw7}Rk&(Dݿq_jKU3UxC7)")/M4#H|=ʐјJjD"6GvX2903?HN" M2uy׮/-x qDʹz5ʧ3Kv#M-p1v*ioݵTϹI-Bl€z*} +egx`BxcVJ۫]2G÷sߩ/fai(F{7}*LxMS26MwDkdUނ$tSc{|^d#1L88+&cF鏔6/3}ϻ(V"cyc*kJ@>Ī/4ij\QVIf+֥\ʪCRj͓.$+i:z2C)IL Q){Ͼ>BN I^Ѽ>6& '/R})%?2 ?Cn ?m1 { *gxmT5&pP%!OQΫSeV\re YrYq+HgtqݚQpT1ϯ/\yz-Q|x ݞ ow7Xүꭉ4=4muRȪDKe\Oy̳=T nF_:1c;JGCxh# ny"W.aω9Igd;ch-l@nt.T܁d"pnY}_e V Į"*͗.l= Ub~u~;0L ׎{P߀>uU7&F[]:r WM~ٻ/=raLTlT0CqZjU{ [{<ı&ۯ<6l:؝Yo> 2ܰs 4AbL1 -{gӁ,u[!σ!^ug?F)d4̻#(b8'nI bkIW>ia2}Fi{&g,XCg)!Qtbq.Cncs2IcRô/2#D۫lߐHEqQGK{wΨL2C2l>%7Uem}~z3>_Bs~ L d)-FA|)cse60V( C'Пc4;Kl| $&3qDйGqTd`rjRp% ?@HV`36|OmT|X% 疢KV-M#|.bu(Ҡ `&bЊ"~DP6ٔ1hd}!8(WؗX <7@z#JGaOrc _FEPMQZ^?+5%$?DEQ8DGDNvJKu<cP[=4 FkB.FRWW{'vG.h0畾njwNѸ=h eM-FoӮ;*;TY7TRN꯰WcUU$@#6GT%rptWlJ0] ̜9/='x^7TiD~ ퟍ-QqD˩i\+7 B"ƅQa`tJ_-ߺ7tY~Dtܹvfhamf!t`$8ܐ,09W6h `. ͚.E w_ӓ* nuX!&Uo#jq/{{Wθ?mBÄ!b~A2Wmc7od!)inĺ>t>EH5% M:7U w8mhUS)Jb6xf^:i' %pc߅B6IyeY|s=D(x*L|*d猼' )I/I9*>&g!xnÔnւ.XQ8POҡ5Txm]D3N=;5)6Jf]ɼ9>!*3xʨ,qDvrP9J?'?z umh}-"FKejMV; uy"rZյyWfz"NY;\m:9u2T1B}X.bnƤ=eg/ 7f-+\䪼2+&º DY~&{P !Y++UaұTVS8 +ˏ &5&1Àӟ{mx;-{~g;l[U#-4] 5 /2^O]() }d?, !;eKI' R lg qc׃gq |8PBW+o !7$׬[Ĥ23J0cqW#M@{S/8#Xt0 Z+Y 5a4;$̪@,hT[]x-!0O5L=*lO`ryVYV)zotWԫ\c4 o﬿"Z;weRFT/9iWslry]\-PQ/t\0'l %ndn\ BmBcO"rFoI>+2>Rhpm}Wny[ n>=/!bD5yRVlOգ0H?c4w=5$Ԅ$ʼn4˪|@dL8h8ٻzD_ 񱷉 ή>3sv&2ȻTI84?㸪I^hLSwSb&5_'KPN1):i=c@HY-'[,S{`{u8Ni2vt>F:em~ j %$+ȽRriQ 4{F,3KZ+8r}cJHY]c˽Lh- ^ I!nh2dWzr^3s0RWI>U>Uxk͛H A kej Yf{|F 4ݑT>yF$| kLL&L(B= #Ñ{cTýcp9c! AaȻ+E(gm8*1epffw(-}Ƞ wp{DlKlpGT{> [w: "_VƮڭɇKy z(#- > tۢU:zjf_c3 {>wVq~w`TvCF_s"@L`T[fœ]Nn8.m< 5 *0J58{<'uQޯVvX((O dOcQU sN0d .og&GH@b`_bcֺe+ϊZm'-,Y\[{aOޙ:θ}Iq%0v>(IcX0gB~戈Ԯ)~qH bD#G+0m3D0*&o& nKvŌٜP_%ma^ WA9yf͆jb>(_ 9ǜz-*L!\yڤBS'XY<*5zn1 /jN 밆#P'b$X;)3Eo%yLy}Ax>DlAjZM8n[Xk]wײu0'Kh,|u5a<@Ev9 V-R.ﰻjŮ ?eqXkOW}l0= \v%_V.}.LKbU,PaUV7dUSdw&_2DJ{wsRO6-+aaK"ټC#}FNN[k;ZZw_?\5vv1j^%(mV>`NX OQ}e%(\'T>DBu()WRۍ7hόZ%]%Ywj(!_^=5eNWoo$ DFVZ -"s흻]H\]CaUܳ~dmŕgR-ڕ{yqc5YNWmNj=wZ'tMYf,D,PpmT:YVp4Itċ>*6~k̷[rRʧb^؞}+) Uh\>/rl3z] + ΅up3Own*7}$ XR~T~0e>oU3cQ)=`bDxG%tj+|g>3V؈W9 b:-d\4( D礼:4\싡xnUߓieW-2C\yJ33#ٷTǂl؂HM%}Hys@X-";Zv6$^=Q 3ۀ!*AZg<.#!ejiUc}~6=v{[ok--~WqlqeK7a[]ر y'E05lN1YXQ${dT(.>+sSܯhBՆt18"Dfcv6Sxo H\ë<܈ԅvq> 9=Ŷ:ScXqb{?aΤCf"ȷo B>:Ԭ9cҵ !vL|qFZLq2<6Rߑ=2;cٻ~s#6;.Пl _,`JSmi֑lECK:^tL)2Jr~f1Y3/N4 $wgЃBt~4cLFhuS5ޟ|-x_6fN1gS-ޚ&ÍG1t.нli6b-c]$G+:2dW[A74iiznXT{S9Ҏ"5:⺏J],\ = Y ÅDe-qB΋ 3H&2Px*6_ X&%銽#:r1E4кmM 5  Cs`12rQճ"sg`n2s yU姮 1bKxOwMV^V0)B*C@[+>ZgD۩b+51q .7ʬa)& 1H|)T l |>YP+?b Pbʸ3zVA瘔U13I /6H H($ C\QPTJB55\*(؄ioN&b+ a-+h]F'Rm͠uUzSݑ }/!xB*1$25HM"\6gNDq#M7Ͷs)^ !Ȧ #H|PH -~/AAmzj<7낝P”D߇Ӝfl /4-@%N15N՗Хctp:1ccx{ׁ$~tf@\ liD~Y$^Ii& N~Lh*QNCK\u+.?!.8[aDQe9s1"WPӦlD Nf^ Β |i&Ԟoe 82V++'oqQ4;C -)׉N \{y$PZtد8 :^>hOQқd}PaOtW<  #Z|dr\Q1 :T$r%f<ݳW 0|Gjk>F-] IiDO;.έ@^o3ݰDKndL\N.8|mƊ-f<qv$x!gZ?69h w-UYڐj#HTo/Yo;k@ ^<Ä|#rViȕξ&@ؘ}s LSBg|Ln0"TO2sB_FWlJ W?U|veXc؋B6bҶm0"[EF;Z` Ņm3q/7E..ϽtɫxknSÃt><Lp}HN灭'ѝ FK;k$@.Xu xL:w?+)G ا' "vDSC - ]VzyԨ{gr۵Kv3?K(v1D3V#ؠ`] 0#p^c]Pq8^y2\.qgGz /q\n@k,"@^}bb#Iou=X%LRN'??ZE3գy-oHqZ /ؤXOSO3q|* # +pwQ'g{DI +3p Q4{z= \`B\PХKj#.W(2N`tc -&>CfbOQ$o jW7D{K8gN f>lv'V8uz*,_%L)A-X 5'bج$`rr ͭ96r@,z 4};rHv.4?h.ks6ڒdt[̋j[ Vz4OK/?v?I-"Jz;̺N- ' S -b~MvSU!MeOpȞGB3$q"*5:*T"ЊN`jLs~D'ZsmJ(=\%@q k YP-dYC\KJ(1/Ӝݣ!]qcYb?}($dl 4Y-Սhs#yZп7oFƆiܾ+L_zuY "~3.3iale 5NG)s(/Cצo¯tCDUǿA4|PV \)ȯ=fFj E9c{w]c)cy+ qǠ6|V+-6=d~7CQ͢Vyֻ̫S-bNmtcnn >>.f xpE[9!QUnc*"T^2P[;/!_76pdR0rm߂@R岞j><4-2iͼ- [ y[F`nì8Tvsdo!@T.aj /dNQ XoQr5-~ B(X2vݪavk}S??᪜lDJ!+Ga{`ɇT ψkB+ߎh8E?Z ōr¾qހ $ƧӖs9? &}.Sx-T+1ZlV%-wXnOn: &2;ԩ?5.R#==Pz].gN"/0)N[&\A!}֧|%UH>XsX7u FA:3 X=$ƢWG pZkt:ќ8qEoi;zfX c2Ȟ5b=tx/|iiɲ5$:#쎪6uwir1WxSpEA7frY3+΢ҳ]\Of@j]Q8w/SvR  Ҙ Cg搐E0"M&X4 hNSY@8.ּ(:K|M)~7VB[pH%g"f0'7W42ȃf+uiQz/(lw#e:٢{,|"|oM\[MMBށx- r gS7{PaUg$4)_^߬//wiJ ۽# Rn/{\[n,Y J˦=% .tvua3Re35q t`6vJj|2>"i--"K!l\jEF{`C/+e|}{:cEؙe_Yk}E]UV(ӘB}ޗnd@N"s(~r/9~Anc miH 8##_2R>TWF; ʷv@㰔gҵs@r\W1<;C,vӬy!N?PU9LYS4A7:v7cųU:ۊP|xip!q3;v>fHV[b^ܼ1O'^&}7 MEi+i#jSKV;YY%P{j>U,B3b9)g}9 V#=ٝRsq"?uq%A !.gↀz MvxlM կI`3lӲw%th=r'[/^ Q+fN$SC;F1~N<5[6ML Px$P'l[WultnڴtZ|W~x Ⱦ':$y|#o:Tkfu@:P:x,e6蓧eԤN+;)Ȑ~eٲZ\g53g[2Q6 5j0xՖ](H4ܳ~{7- +$wFh  GHO).x?*Hx7(]4D -lH`aRj=(5%]!2ë=Hk7Iz֮%["W$ mޘ jcOKc ׇ*"TOq?Qr.w_Aa]P6Jujj2icA5x9s9(]O/2֨a6AiOq@*\rNCj H,'i!F%vYOmfĘ(.wP{yTG=Ȁ;W&ďTNB`qe c0_zsD IϙqNv^`9tp: !\9C6~ͭǪ┊6WB?֐`Ռڛ73 O2*%,S- YF)I9BhC',@M"qucܓ->_lY. ^cXiTWVP:W??ƂNnVʬ^* ?%/F [~$KDT2NuA^M;tAr'K3xCL'"<+\~g?I xwPwd&|tBJ0ʇvKA& OהwҨtg'19^_釦Q|CыEfh2#oZ2WݥfWiiͳ $nz& R xTpf.oyb}wH /K*Ƕȉj\  oLŰTeX)+&(hku/=c:[IA.~'[`*rG*j.@UuEJ Y(wHiM)jfRBʙFP96y D7Hvwl@:a4{|r R\1AtF_^Wvn2ܪwEך5;Br;/:5F- dQޚJmD ,} gmqc7 {1< Ǘ(HT;:_ts߈!ZxakikNPEfHITQ<7~U.e5Id\7˱ ӣ9\ۢbϣÎ9Bo;L׍:g=L8X}>Z-a;qܹh(s.E7m5~1\^3=DXg8bw{]C!QNܣʮ⟍jO(b0ģeurfA*D9TKi5&#w KgD].Dks,TMZՒK.dqg(g!#z9N,S#4|i'NOZ L ٳbR g+^"WO@qKp#-0`Bh NC3Ls{U?dUMXS!ԞHwUDcM×3tSJ۾nƪrSVT@$iEbZ+v$`>'8Ѐ+u@#+yIlL*BSp_ad'>5Ax831#Q:"({Řc*`imIY:724x9Ȭ %Lpu%pd^jOk'8p][8Js YRb mZϿ»RM_! >.3Ĕ_5ݗ\'5QѳQ%Ri]}Jhc)A}D>:.=8,ۮk۪@~"ywj'$^ޜ)\1D4֎:)B/4u~\`LBXgړ8ITWD6<HcnZV鋑6Ep 5ˌ_eb`48ԃe,Oqɭ7>DsTj2zJf\ϫ T9&C4 :cݨFoL=#(qB`cos.P ͅuNHnCe n5>0HlMN/fX*%a߅H3yǀQ\ww2^Z"Ԗq1:Qwc%TUwPbB [: 4YLΛ˗ɒ(jQM%ȝJj-_jQ1KE.ae/NH>"tQv g#m^oR|?$a@IUUEnSgoH0T_H赁Ҟc"U!8`s1ĸׯQњz\F,Hx'n3aRq[k$_01aM  A. Ȓg*4ڐºSx8. ֽ|Y%Kszh7&4P?;8LVo޼Sd:b ݠǻg} I.3J^3:-vz!K=O³_9Lw*C#c"%Mj[­-UuHn@iZBɔ0.UJ>NP27<:r޼g B1#54OhUi OKX}d?JKh$-)P 5sxϵŒVV3gn}\R)M>†)vcp'l/p)Xֲlmˉ՚C\*7G5](@[W9"0b2 >N+ΘYw#VrȑdЕg w??9-r.8? U^,8 oF5-G 5=HQr.)ͅM͠;͂.$d8%ؾeHޤΠ+p Nbl>Ì+NEǠamiKP [.]ac#b5ʃwՅ&Do7>#uSN\}ܦM'<3nvf=d "Wxа'632U(Zܯ ]Qf ΡnަcKrE"d>ưZ|*ytSRk |Mm\lj<ף'S8>av]lAʯbfSIJoA? BR̢j F ?&̨(ɉ?ZѬv)^^I/ 9h|E{]z|n/ۿLG5t䉽|L3bwڽ^rY>bM! !1u叩)y(+~j\uP~àu僊=Ŀ򧘰bÜTBQZh@!~J0pig"LU5RN$4L3@Yaʰ ᘎ:` Zo4 wm+.fVCQ֤9BA^ڥB‡;KEɌPP~AoFVH~JdxC#ClT>\׷MJ ɬS7lBfS5mwTZe]CI[8S_OdP!< .x[ je6֨ ãZTt%wħ*Œ8HepOgR^Lک`K ]  d_"!eSeL6ZfUN 9! !@%m L߻uf ,ۜf>X}e{şo[?Oi5l,TMlVFv]t i!LfAz 0b[J(Wj5ĒFeobV!XqS-.:^̤P4 L>L"e9GHy^R)5N%>:߃ܭPhM׬7埧k.\T{, 9 و딶] CU4<^>3t˗ ܍>Va]w#5msY ! BBW< \TDL$w%a EWGۦ󻒖_0cNKU_.g1xx[,Z.ធPX?:w!f[&v6PQFu]&]to݁gw3;$N4ic⚄xQ]TwԴWO]gg> HʜS h+O'or Xgl_[_H ٙg$OՈ5< CEZg;Xz;–٩K4foJ9UWI(|h@Ĩ?yCPҢX|.Sg4L8C΄ w.x3:{碱 XuI/W>|עYh+ס1]/T5o˯M{&RM7M䱷D4Z>e Ӫ,Ŏb~OsF,Oxu-3Ѽ@">G&f_LQh#n"E>3%j'Oi8/xeFe5 Da;A^6$׵~p$:+PgڧY8-@I7ݦt64X:15Lg)e⭅qğԇK9C./Օ@p8j$Onp5G ƱUCgS敍ֲ>iFtRG`>Qǀ! xCK?k*rSPS*W>$N}VIo!&Y+ں><<> qlW`3Z#ywm&=dfL Jx$$N+bN9Iٯm@aM)FmZc%p m<](GS82*=dHN7iĠר6n|.2"jo  <-"Zuf,Sy%7D&(bLֿ(rA[$Ge@rŵ9-8YAsd̗(Uq]4P[=!80ezi91JL/%f)Ңǯ\72}8Cz1Ǎ@ljn_3f=RJ "ի/d}fϤT26΋2'WvFZ3S@j"6kKC]Ne]X<_{ ?k/Lp~u{ǾFux .)DIA4+aL^_Ae![^~-Cߣ<Ł67JFM~(sá b$߭Z%Hmi~4?O0#9@ vdsC?EX/Tf[\'/[pa2wVxTi,3SIC+,[ON2{udk/?S g)XAlc5D],돏)`c>|md!bϞp)urgFQL/6yt2`@fa3޶p'£^,jإѢDOb:\*Ǚkylӹx3KqsF %o694*?ͷVA:o@&mhml  AGyVX :2[^QXQjdisEKQjޑa[nˇ韁u)zmW_-PX@e -{yɲL8抖 @2y{ITtH^@&^}>MOI:́Us^mI|+?ωs}+x!Vѡ{i[cՎFqCTG4ωc=>Aʍ|SBDSPkHEnMbVjVソ#1dU:"l!vVi*`c}Px`Q9 {:ƒ 1P&K5N? 7i=Cᒯ3)^o59f>b`46~/HD34#\gin% K3/2m?@ٵ' P0e#K(>Nk7Ed*jjQhe$ ab*Į` a?ԁ7\#4^KZc/,1An]K(CDl Yb %HEyn?ZkA~ZkW(U!G]ر_̬-yxL".aU^Uÿ:xbF!9L_.hpn,TY\D&v'q,HJCYj ;昂 g ~]&.R%N޾C,Mw *@~7EQHG˗ (O V]ԕߖO_MΏk=|GDh0\5t,d6N7t.yRȲYOjyʊ}QxeCN uaKIq/# _kY~z3@]TVprh ^鵠/)U BZnT8&b0'E4TXqMrFo-[)yƇ$bA Xf`9h+Rq8݌1=qGżaEKsNM3y긼=ڳа嬼|UAL)NodV,Ecc$yI@Т .g]#՟bc]+kAy6s  T۾=Gpl2Vr''Ce@woh:6i6jRA= +f(^ jA#in ;nҴeM-~U5B{-~ ́IY @YC66᠍,U/gZ?ٵ[͵9(W~QZF'ݚWVe*6Fv&gv :#_޺E`"ڮ:=`W /FZdEeH1? kY`O$l~g|^;6RB 'cmu:)HKCX+h]>Tno,N, 0 ^2ge%m:$tz_&J] ΘQwD9h-N|S-wi /nB< BnT?#æ޲]2᭤CR 4$c|wMU˜2->-Bĕn$;7<~bsSh_٢FowƗ&O=0t4 :"B9cXqlqS}t& /<|wxe,6)0I>KM58.:̢HL```9b7&@yzP:E7rS RёS{bIP).8BqQFX +MotNYԔ@3^Fh%lŨiG{k냧ZM[*y'&'+Tpf5g6X|$6 Rbu\8GˉɥYT3M$Lɋ"Rp&@|;bcԾr˻~{p ${O02Pb =-R(O\Nk=QZdU(I6V*P  50 ^$,y^0~9r4O5ĥ ð U^B^::x&+}NYGHn|cHY iʕ}X:Ej{5Sd4[UY|t1ɍ :oDӫBhCOd%@ K)Ł1Jexw^+G˅ 657;]I: ot[P?LMgȬ^BZQ1c ;-OaA[? {&8C6Il3 kUKC@~s.L \zoimǖa7FVU So{N7SFMAȹMH 5)u7b4!B?t ]_dˀ]Y]y D̝T})MWGͦs$zn33 ;SFj3WOnq&ڲ6΂# g&qx\I0}Vyt aC&5ˤ0f1]wRS ʆ\(^1 b@i C fmyb46sxG4sȹʶSm$Mt#Zz1n,"2vO|zJDfkK7ڍW5^cC\,*>k|"qﺓ,cw,'݈? [ =q% vv%/nD(Xll;`یMu_gs"Js_0Ì j}ŋXs g- VܱH-=yحnNj\HL/S6z6!9|=agtQYUnnc¦#\ax XS!顄1cw{҈4*kbC5;D#5V0rus[-mз`Tw7O&1qU0fMƜwH%U|dd[R3U?h(j9/ 5-KaoUmMϮ?;ܰW&ɀ ?!݁cߚ齿&kÍe-1Ɠ>Y%j"&;:O"@K Џ AuFA^ 9k1JH7WEVs@(d( d'=odo?T8 Te8?L?<zˣ8 &gu'@"爐"R  TR^ =%f- =wG >potSp ̷%QŸ- ܮ<4Q)qOBb'fWJ Ed=&$% 0+\Xjy53ߌ:|OM w9m~ȝɊSMwewiŁm0L U~0P&2;KF\;zx8&x9eBb ]%Fۨ]\TAO5A+yRl6Hbr_e8v\e*\B.Ʉ|~MqjwYI;ؖQ菶A/Bcbȑi~M=e#ޫk^R+ Q>LTcjmw4߰rMhC#OOjVÊŤI냏{3!(~GvmC͹PmH(-@2URκ"yN1;EoFcYpߔ83Ad֧Lrz+h7k.!llftQǧ_ÖkcV,jE, G謍<f ϼ &~] \Te88-:К5~gTҜrɒgdC RI>5u_~[ MI(Ie*5bNUY6pR>cԀ 1|&YW[Uu}BG~2) x,;NS`~Z3%\ K]9z$[8]X \}e4fkoZ8Dx ضBrc@MއCD'Yչ֖ H JQQTRGPF%~Ҩ~&5 XH'/IiD5msB2Ål%0Zu P}Q+r]OifcE7qc4hWAT;/a_[3/oF1Khq* ^Nn1/.G6rYN?s'm ĮJ]tLj6XjG4ߛVN PN%#̶x'a6d\`)yy4_")\n&v ̀*AW"C\pc~gHh/6O۠WY~VtA&Gg{1٬$L#}~wqMꇦ39L7lü  r&Gkzݠff(=IfG5>]hCHH|K HϳO{H\5O'A j8wbW7F_%^:pLE`H!Z"wQr^t?: |OXTńͰL"zޤ;}"Oz<-qVDGy 2)r-O#ѩVP4$bGqh`($.lY@"ЊlX5>nf! K'hdJ֢~U+m~ jמ]-5@|ty]9.z=ʺF)NvLDҪ.k#L"Mcg.p2}6U:5Xt6'ʉr<`ZҀL\Po?y,xLi=kOiv#RU{}%OHrhـSz .çu9[%n]W޺U(faO>o1X5J!5RP؇=Gw]/Ic# _ RPp[!/oP!5N EQK"Oa;ŪFG-[={Wv-u|!=%C8@(W*Ha G=?#2 jJT{-z9: +pO:F ZVE:Ea #^dDXm lpB>9CH{^t[UwXu6#RR@pl[ʜpR웚ҭ+o3.>ڍA­&qI(oIrP/QQh8&bؒ+4"hƅŀqʪ0?˸Áq[^c_ xOC#Eo3J NMgN{GD*j@3v66tzX3cvqL|.iM!"z)"JoPVk@D%$4 E ٻ]KU[}Z_욭t]:1#ʯE)&A~I1- \l1,r@4%Ƴp -vFzO-5aFєfȐtVUztxI4_*KFIA}OMRԵ᭷6Ba1Lwjm*.$.@CV{%Tdm)lՆK/砗HTYv ;FM:: Kad6a$N}qhO)0 b}jzbYzDISvh*>}wyKAqIdʏW~}O-5@:twzW6 %#DPZѠ>f:հ1SPIxI۩~ a}I?,bJʂv^ ?_|CYB+M8h 5uVޖؕ6R0YL״ e]fԲwe_OlO4~h}O^\ )Bf8($A"l8*]} tAeGe!\Hф\d%.! 2~EgڝH\`oWRqz zP졣=7!oh\<`X Hj4ߥV.fk,R %>ϤR/в#*rD{ҝ m}͠vنq\y5suV`L+C2p#~%WWc2K>WF ~m+r@D|ӫ!J4*\HXxy)/|[Hx/4W"`p6{9#x[qQ֧^we+& ]v1^;O 0eῩ_۹prc+gR8~m+Åg/^jEDh 0kˇZF|o6A#ؕ1Y~"c4F"PӅ> E=WZng* 21mWtEH9Gv1 0oȘ珴zXܓ{:Z\%z+$2.z9W^R*^ 3+Ö7 :԰3|:IwT}Co97k`rڱ/bf*n#Y uc0U,0"ֵiGA{tZ{ MHqw5oڑC%Fk Io'<ksDv2^ ~Py:hgY3G~7=\Nzyf3ŗjG ^7[􌾢}8R㠾v/ Ly9r7QiO1܍_B?U&v?^ gUZsSg/1Qd8鷐w++k8 f$#E%祍@}o~!T9Wh jX"o0qsAp Pd T<&QeB7/L€ѓ˹2}~ݵQ`:bУ?3ֺE @`!=QC)MJQ\yg_4&JGB\qUqxOꬮQ  @#+,Y|>@2R' 2|笝 bX7Y1/i2CnR!Ơܦ3)Br&N'N!4ϗe8aeX~x}NҀV.iuԚu~-hYK>wViw |QYo/OeB^YPh x2ӞcM Xg%|fxŖ `j]\=2hk65k}9_#ՙDA_;Cc<|L.j2.tԈAqLpnM]N (6rW؀@B}3_2xcgܕӄ* _iH#k? >t6t~"l4 [#cd'әz1,0zA?W͇ƒn,fmxƥf~&VћFP)we7Hw\:>9ȵJd,QަH[䲍Bq 2&ϲ03U$.+3uq&-zP5jV;P kYS,XE8RLȯ,ZWKՔ:&bM 0v``·m 7Ip^>a+!.vfV56;h;6 L(2d!"8LR^C,$$J_УKoDe؍JgiSJL?fLs5|Ʌ~&X : |? RʅN s.0a;U e/v=Zz%Bw=6LAX=ئ SKof0 Cb;F(o meX'o]|BǡW{a ĩ,lX8 G Xrۂ.qڠkC?L(X{݇~JXu؞$om!w)`$Ot}3>l~d>q^dH /~>9ZfY ? v] (qqab P.G[G*`sl%`mi\<"hǮoUk>!gPr9C`xK.b ~_ ao*JZ{7.J,3zZ=+G{@`u9**s*ո COb +:^*"6N-ܒKC^H8PDujəĿN6clserfW_oIEo3B&~+oIdc4^O:G !@@lXL(^0D4G+f4 6jt#VzM1zB|0Rݞ64ʬil: `ZAAoңxu`O `Sz媰;w7'>6>CɡA{`:jqYs+:tޅxD*-PH )L!bL"H1EYBE` @#o iEPEFn@5>|E=G7K*Z#Tpxj&CuuoD*!kWw73E{?9ہ]?lNvrjr ?ȱŹTZ׽"N9ݎ;WxBp2Oj;l O\s@1Gpv̀' p vG`#P 7/U|^_6{t//$*opfϑtRyBg\°+[MIdF|6m(ʇbrRPQa[ ԫ`;;ۨhp5,οwWq߿^7 =/ U&Yv^rHJYf|2KvFex;"AmOK+4\N Z>$I:'ntUem2$/Z/%n _ٌՏP.-x xF~n=q]QٳD =T2`5M4H)dqJ'tʝ@nVZ?f(bx'd1*] Aɼzƞ{ʴ1S -FbԐ~BVqƠx qE|˵sս3+eQVnJX6(a')oK#n쩐 Z5(shG :ë &nf_v@Dg -]qp DEpsVb5B@S(=, =;譿;߉n-JQl/`jw\^8*'~DKe/d;2.~ǂ!ݐ&9xJ8G1JfQ':96-QjA̮Xf'Alo)X[>qNb|eOj # FmU}x:Crm=!fhްlO,*ņrP{.TV! G̊7M(1-H 0횷Q-WҔ;Nܷpόk^B Nf}nXpDU\]c3$zG⃳w7-k0jR6yVHYq2LA\w*"ŁL`%#| .ʶi-<*vd`h+%d.iM@Xppwد=%$4Ӎ-zay@V^ ֦I5#`b INjm0{e$ɋ*@YӼ0y:?iBbS^J,/Z,chNSUXI`2}cnpvʅ;C9W*RKWt㨼kPQ*z m&۵:i*׀ Lqw8G&OT/ 3-MU4J0e"ejb#c_y؄f^a~jQ'NϿ-*NQ "yX6},jX[&%i@i2cd._*R+X±"`?wY:0e`S{yIJY8=?1t Dtj"V_8Z2Q/Ksi(HKll KIyԁI vI0s<3M+7.|]D*U:ao~ ]`&`:3jN&81F0KN+A&jbH9 keaܽ^œKq×c Pc- ;;!*w&$ۅwDy˙ ޮ\Qik2¨F0'$xx Bk_P {7v3zv5WjaXpnn >ឞ rVZwE(?Sƴ^w2V,M +_oV2U>506nUZi؟Ȱ[oOOYm"4>5L' U$7>Fv|ȫOEN:z+-pX `zbp.PoX[r4HBrp!$2@^Fw&%m %U:'R*TUʆfw\M״Dڥ@ARsV`+v/]J7ɇJ 5ۂqz {o8+t9ek<׌;:HRbⲍwĢ;SqF,?eAVR x2&ed}XHNGg zߥiyOnWmRukY*EbZlQ)ak+29bRԚb2\Y ,(͸(5^ ]/53v@^\uֶΡ =Zh3fknC(0J]bVK]~7 d fTdS)f_c0uu4;Rn8 +pB%?"ESt6BN焈l{^j<6Kf//Yo2eZ텃b*?!$kO!,Ȳ/;`:vQ-N]:@ܾ6J/l FI8L 9W3WS2_0/_ ~fh>? \n}+%:}dJ8 5$p u7t]RlF~^M(j|NGm"z]<C"ǩݼZc^|X=i}Ͼp$Q `kF>}b-8 4 >m:G.օ9ѳgƄ&!XaNoo0>F2BzEk1}e2qzHW=*/ySBѫMy0hBoTobvrTKeFlJJ" vw@ca{FH7^F ^l%PPbDZ5|s>%"~v!dKNf~&+Alҽ ǍCF}eDKCN2Pǹgosq{Hm!]76 SH9Y{AJ+CO!:^`&D|q(SXtX0Flٶ~F&lTi*6~mwk~SlyPܧfMMA[ˬO83g Ms`g'(+B,d{Zʹe]x2׋/ݽ@ 9e  'E9N# U }r!h`pc;h[l|}0PuW)ܟY/[2;>TeګA\Ԑ!&{UcP rgW0S?B~6x5l84 =\$İէh.sMZᓏl}Ibׄ$w\ޚۻ9:GLPHK>SHbn?'?޸Z]p2.*:W%@ꜫǡ)H&5 :ʳp8\8:ޅ$T3;;!CPZR8v~A {f Z=^Jщ&jE 64]ي*ԭ^;yA dpkƘצ6s")k>!+ 0/߶X%8 ~}c8'ci^{d?Z;vׯ5 dڪބLr gSfu!/BKc:ֵVtrU*;bȠ x329mwD'MS?rlk_`ks|l,o %i-0촂N~&|@0~ԏºXX^} LL}&[m%'bRJ-ACNl$v´u%*f׫;GW;l@.0;\N0gnizȡr "ׂmvtVԊvZPj+W)B'TԹvӯBaj f(J"v Iҕf~FB'g4Խ*F;dTX4Qˬa~UbuT=g)2 4p 0"o"]rx\.q0]m0N=*"- b43ƔU.\hqAΖ3Z06+v}ސ}=4IIOu}Z80I*֘u+bP761nPZnA0tͱ fژ'm`Z\c`RNP(*YL٫㏓W6[y-+-A~:kļ6k _ǩso2 yx*p/dIK0o?`l)BAL6iNg\c7xmpg}LCON.C _5kG`EXaD3<6;<'9]vuh29Ib #r]tf#"y=,m|(Tz?P!6i4D8c ϛVo0vϛ CHEB# nHڭeك ffS_o "; v+V_q8١ /T'/\QgaɪHcrͺ~a*ڀB&j͌$r1e ,6nn$.3$[VR%te>gĆy_ 2 @*/Q'pmH=3ߓvSDU"kr*&wWZ2XhVT>3e1=Vtgm܌Fw~ f%g_ǭUנvS4RNzL{E,˜ ckje;>knC)"5VjW%7zB+BZB_s0.GL*n3)_S6xW*9ԯ" }L8h ,-lDq=f&^lM3?mDen­{`/ j9B0v'w+< .ώ+!Ut$iثJ]ʯAHgCCʣs3q#˄b>՟Rkў`Fwɴ5'"uF+ ]6\ ';̠ }(מKZQs݄M^?R>N6My ]rb|4կ .njMo%Ɉ~.m1 9Fr-9Wl[ 4 Z4{8N&=@LjSvԸ6*-eo]_ӛ;%˃ѻts)i N rD0w%g?k:8d26ݡg|i.I|1*)BT$M(&bo1<)21S,&P9M- Gm*~QqsіXY#oECJ y@"DlUmtyAg8w{S6,ب?GA\?{)+[lz`)u iv쉘bk+q"H[И] *v6'9"08D)Қ|]/wn5ZLLCߖ'D!xEMtMh綃"3gUlP=P*KK-R)pG)7RL/= I# }bNSiۧ, qFuB׺+-Cl*gi)tX66?T; arl^9n;(|o55S{!i m0Q0?uS4062ReLV$1)=)v?bk9Nk30ka }(C(N|Zn\4b4zn< nָ'ՙXa 2zUt!,N||(\bu՘7UUڧ0蝧#G_S"L2FJ;hd0C ]IA]8Ʀ4e1#UuY RY1ow?] o5OiΠsqkeN|IURt9{l9'並KE[PW"[ქAxWt a@zh0̍o-T>AOk[+cl.PקzEK䯉Ǿ]4]18T iw7dmM0V\&.쩯ɢhI5O Dk'Sߐ}6mtҨQ.Sda r^yA$3̊L?d7*yP/sv?."9&:>Y2Qδp¢uA`ѓ_Z":ҽx` !+m=_W0r;y5nsL۹cA74JΣlKG=x(>  "rR8XO ZA'WxyHjTPK>8q52OWdžzko<oMEDw]~kpFM '-?z0K4ݙԿeuk̏a8U/NBK~lJ`~Yâ'mgg_P ;so|vK_ H(W7i5Z v1jVR- TSڊ[f[G41/QBw劔 é%. eX'4DE~ 5ڡFЗt.Q JEH:lg& ~3/} Gy[8aR.30s@)V uA9H|xe CYh-`U_L|:<C3T Q~P9W₠@(2 S`1TR^ Ck/f f}: tJ}8~N?Pg͉,x0wNodSJwG(i<%J`ð2EA-an`ɯ#(`] ruq>yk-1w|Σv%r~;h[0mSh e_@AU5~pzo@ Jރϴ}=HRZ;˭0QSs ˤuqYC(qb[SsԎse >dζ95:*19Hmr'wڜBP]{uK;Uݳ5A*N1% $P.mR`y-p17B]{70O42ͮ]{ZNp18RmL4ؗ#HQU˙Vb nʦtHCݼ ҽ$t\X ml3xՏG7#Md pI&nl02:cẒ`u'{lj zCbyXFG{ /a) bv*>o,uY'Mt#YsI΢zcLUr*:AUIEn/K(o+rH=fX~DW9ehrA]>8dA*p(JfJѷku 2KL6hK8]K"PKl-Q~^b4$O#<g\}LS(b8'G^:}ޚSr_G3SB#05jZ+F#T 2~`ʷ٥$؊]=9Bu Z=&Q!9wih#zƾ:_Is:lp"MoN;awSLL!6,.HŞ:ڎU%{u2T;cy~F`vjFFzp$+N 陹J%?yxoXga{r8 8d]q`0EfV~ sh{ތ&(>4G^{z Uvg@Ȍ`k-ऐA[ltkJp_S-2nƦ(JFMpڥ/XM73tޙw;'%.-Ȟ&{Ment?t x14U MIwD!.phSF0H Cڵ|/[T`:k\&p|&q 1"6J~6JE^]zgm(RS8Ls'`m _}~V)q@(So%(vn|jEOTME(ϛOC]|}@2l!kVˉ{j~D,6q{F[uT#, ̧Ҷ{%&];\OM0n#]{-|8 @UnrM0nx5b/vCZU;_13h\kkC-y!Þ\ر=ܐ̧&*Gdwi g-kIxB km`*&HkXx [ʯE*`UO@4RLF7^ )ѮLwPc1Ԗ&(N'X{)Xaʬ8H9xd1ϱ+ۼ24AXR2}3ō[й@VA;H3vn<3>uͷte29x-Shę㵣_a|[|"G(ñ!sVcZGWx(ɀٟF>v~x`!M=5ZcJ` pv4 'ꦙVcR])J 5Z |bMYtm>5Л*p$`6W#8ܧSGfn,nX2[4IP&^F8-1`k#[KvMn+6VᖌTDP[/8Ǿg E kc@ 'kFJ_쓜i>SI@ s~DNh/,왥wАa} 䀒,$(_kgשg,%xdV>as&{H|ߊ1jKAHPnҴ'ha"j o4=8Wa :S:!䝤I遰>/ P*6:32n6Xk:S*];2RxsPj㿷o'"KNkВ:\ǏA4-E 47sҲWcHQ}.DIaZc<^Աdp[s 1o>XV5]R +aki2:H*|`y5G] Ǔ`ݞ;?|X(SX=3hbU*SpX[S%̚4z1:Fɤx>}8>UЏN?wήaf\$6FDG]aCAѻÉRtc8f4 P|w1r2ȓ%[F"F,˗)si=)4LҾSE= f^oͩiG3ٿ&`JbakRWZi=/˹@n8 T; %RK0_s(@+|l],Oq CJJ{P\l"7I" C&j#K33*mo+ɏr"x&03:8h )/0I@ֲ!qHG&d1M@N->|( W!dIJaӛט|/V7iO13A':U1 hCr^6*Cb 8U8<"?4eA`=< ާ4{2 D &%eK4ؘ_&O15Е ՉnvT=:vZ0;kQr匊#o?t6: ^ 9uVdB9K=Y4692Z٢2]FFc; OwO=IN/qyՁRΉYtDS#yXaBPfpӅ2hJW~j ƶf|S(< l. Lx˓7%^ ]gF,ZU utP#f}?͋^ ΒgAJ2c5'e jOl*_Qm PG*+WT-IM :CpaBtng?TN<)2B8 Fszk x)mo] fG8p,ݰ.|*Ǥd$uڝW?O`M:[TKqĝ4Iȿ]?0?W;c%V(0XW'pҞF äBK8t\' n?8<Pj݉-Ď_7O'+H( 1s6J[ EizS!"穸hSd|fΞ?>V8H~kͭE oEf <:_0C ?XWL=D#vpq@`M<ЛZ^N RzE  uƟNuVA or6KNeTm٘,=X{M S6`~fhx>Ro 1姢^x2=ͬ ͼ7WCѢx FL +UDXqRUǜ_{V|b<*_t1嶀<_EW஬`)St{ѻ|*/3u^%T),?Ա7,DunF%qn"Qnβ渉Zw_i0v@jUYìeS]ݞ/I܊J!Bϳ,$SbTv*j〿#@jz쓏KTPޝ"m:ߌ>\y'M|38@lj/Zh>#jp#`N-&fgWWCS [g*Dz-Z?8Em8t-FƙjqŒV(p @JEq,ؙ2/xٚT{wYt%UڡK}8 \^+u6OcA*|O5Z$uoR704 09o›4탆P.F6Zzt`fo6 Smj e7 D+E0V׏ު= _g;Ŷ^;h昩{)j6k}%c(YYj99 ᚨo(6U'G`ˌ~۰>/"qaRx8|EyhF!UʆC,{ߡЮ:5EJ{k Hd8{@IqtM׾%8>Mldҁmpj: C A jfs0O0P{G.KV@ #Z6GHͺ08i HaAk /a78dCl̮t&\EvхD/cO d^s]3}Rw$ Q7g}x{Gԑ?J^a!@z$)!2^qL=42#k= NIxo:{#N ]̏wu2a7)W܏dN}m @CW&\6NlR=a0 G"`ת pE{h A5 tn~YX YDl;w,y9Z[Ž4!hɜbe̕ &̚D&)}f.ܴw_%@x؇v;yL !fezcQ糙vh$@V.?鬾k9quv Cd>I.#ɒ](q85N)0R!Ј$.U Zd5UXI-Lb%GI#S[mv/…W_G)/NWdG. yL5=!ąm% ,DuѽH(w+ţ,)*{ *ԋnJ]IM&E/&26E!3/6rKyEː+7YoRڙ20gT꼲'Y4Jr^RAuRlHĔx* ^upKll.@ڄ}̠3^k bP;4#qS$ 挠Qtsձz@vХeE:MR2-1ѓEM!R9 ZX{W3YA܄u,*@J|e24p7EI<ֶ3qo|yB:XM2#l/g7k&ֹi/xOh14dgpB*VNZjV`6EЫ zs]#0RL`⺁t)RGN~Sy}qk`˽٠(T^p4Dc+f/w,*iPx)jO؃n3 *ìcr$Dv्| -*T}&0VT1(e,?KXI.D#K]G lcDT2RKzC%ܘ%IKsϤ G:[O8fV܂|;1@)(y\72n2r7| &d2s5M rFoYs5ix,㽒/0V/Q{S)hM?^ :giukWdQw?-R`-,mc"K؝ 6(G8a'M{#c9ekq4|d =*Iu"ؐ`cꙄڸwqgjIME]"8߽h;Ek*EݸTj#xbugǢ̓mD]OѼ̀$Zrg 2<+(ac[Yքaf?/д$u%`z3KlfhEkdGnG-#[g&~ծy &L\gz^Wrv %6^3Y貌9Q_U0ҪT}>.dM{5wl#aJI'#W1$ Πb GqQ*'*JOQ8{/o|"kJ웢鬬(cxc"{Bڰe@sjA\vlVՔ;] \ r7j(=&3ԍ#MXvWgs]L[Lu_* J@ 73J /+M@j{K_ y1l/ SM5C2|iWq5i=&07}:\ =o9kH9iO| ![0-/n-DTC?k{)==Ԅݟ;TYv#-OfDH1z[<^ +E= Az o(p\ExGCqyR RA>|-~JjqdP@R\n'g)” T#w봢O(5AfT^-1JO\ mn LG,3ppDNcΙ'%+1(:z^1zt|,O קVr5jK/`pEއݝI0NpOnKB <%&p3]*~$a7m=uFԮ̦k~c=8KJ˭2ljJxSo0*48bA!Z"lZ'#ӐwZGc{$WkkIHD.yzm#UNE[!=> N1Ǘ??FjW<)}o^%$\=nC2>v"=]l)'aJ9.7)/EƢI}}|@= 9KN9")Iꁙ|{ͯ/M5iR_qLk4E37>s' PDdӢ``ܹ:Jp5<톒V{Haη48_œkf?bVp.&lf&l]8̎`7(tDo ETi?xmRuU7'JseQ)b0:sM/=f4JlfXM|J(HзIU:Y=5 4B,_UE%~WknWCƌcLmG3z}hhR#{]A+Lѭ=lt*f]b?Ăqc1wcdgv1dd_` ??ήڢ"wzʲ }2kQ_08bfee[4pvݞߌ|lrG8Iw/]qtAs&jmM`vZf^jG\Gci tU !i$:;}q:x|Q:<=%;^ƞD+wZrzzM S'))ȁ`/:9U51QiB0PZbY,Y5 "{Sň022!rh"~}Ҷ##}tP-ÁST70E8AFtnׂWk:2>ɑ~sWRڈ_L\'DnH+. N`[zXO/4BrJCZ|pَ |E :?)Pl;83j/$I2N0)(/Pښ󅴏CB\|c& $ɞ i1/Ճl3U&o6MG|7ZSrBk!W/E2!8m,HtK[$ʺP0?>?yZvϥPIXG͋vLd  ^FR06tv4CÖD/`)2ƉխMz`[Dg_^I'zBE'l__A]ܜi¦߾ + C|M[#U{ay%=G Թޜ;GJqa 8/Kv)jLh\M7aoDf ߿5Soz ՗4#ckAPn;3wi{ZuH - .3c|r ~#7gM|/7 XF,E> Xa/ TgܰJMRfA Gayt ?om>OXW(J +jU+ <`c&m\XUhDsΙl.֬w.AJѰs*]>poO A!=ABZ$M PŝTꖸFw => HsK}h`Izʫ/Z+|(#aYN|?+y`bݜ  &Mm~U'G$z7+z_g晱~I>NB5VK" yhܸF5fHxac:װB ZpB5{?/RtqyI-zfY4u 0;5.XS&D% LYGn CA`7h>A~eu@UەV$ԗ4dz`#&'< 9W|>O5Lu+" Bx#;xTY!#?rP߂ߵѴ ۓN5zp4K7cDrHYte6$TTl)if~ !$_bnusq3h3\$ʎ:v]gTZ1=tx:{vq9}5~哝× wo LBCdkٝ+4ft1FH`X}p3N6U>ߔoAٝ9[ =@srH]LFFͅ7**!/OEf:ضIê,e$RbIQU؇@\] UTo|$ ˫j䰌+ UY )A0֮~DcÖűrnUoW*y+{C8ɮ ?A>GT$QqOWӳM73 ~,%1U@HS1 8E7~?i($c0zHA!3IUT/0Ѯs?w l`^U`UT;q;&2y;/{{*+T\+K;ZSM3mFRRbEYU"lA\x[ Xk`ZNϊ(ҕE%8,v[.o[k(H+pRG#? 늉( 3P6 sdda @:Jo wy՘l~ ZBo-o0s0bjįw}#9$il5PiBp-Eăq$8!崮 ijE[ݝ AK>vȣrmj;652zڪͰ6 Z?3Թ:LD\0)n~!W/ B^G>ҽjA#>,f5(K+cl:{>p%rp=cniInWo`_F!7╡fY}ƈmCW T9M8:)/*"v:ڮ ؐ Zb9̶Z8hրvJCc1zDZ,D5iF8Oͬ9L^[#*xc(ez5WpsR==j-0";2p MngWQCy^ Z,D:sߪ0#TDpBU Gg ӿ[g{y5 ]5 PZ;"lO{>r3`gG`S=:_;`6[ND>MIkMD`GIu>2ܜ&1TWg?b7֫kU>2vU&wDcm<7Nm#7la)ef'_?ѧdԦ!Rwڎ iݮcy܁C)jӃ{6 9OsP* rDqDwW8Wy@Ua!|ŹLO{2Tvx'_Q)L{REٮ/:?uHב GZΪ*[þ7SCv/|Yt?rӨ!= l[:སƺwb KHyEneƛ,#`e93וq[Q}/V}F˘ co}lv>(>uYV!zg&Kzky5U)Tw^s7{' Q 6ղtFp6 CS>'ZM3!,|_cD \%Cjzd?:h&ɲ'bq_V,%FPfdUMo l1Q#t9mwY(Ӱ HYWRx˫h>G Ƣ9BeݠW]ܳgCxeԻqsN;2vdKKGfRHgӞ֔2+y;@Wϳ{qڠ{ ,~%yJ{.q;J)ݤ|0ǰv1Z*O٩]@zީ~uNo.-A*2FPOUbp: D`+[P;,=n z!L=s'vde+װ =ݾֶ"o1Lv9hly?הۃ+,6\E!4!Og;&19qж{eb.wo|Գ/ |"IB.jD͒!gғҢ"aUc꒭ k>4 X^jlzDd~ah>w޽}ƣ^/l:xЕҎ[\LN/<6'{#op&+,U*y?PȩR#)#d^ު{EI!b?${4T!{= $۫7g\d O""l0>$dD _cD}XjkBEB +B:="vL'Nr,=_\ uC`yʰm24mwE}+t mJ'&hm=wb6 >%BOpwwcP)ofMu$+y>%7C۹g2#ǘ,h]F far_&xb" Tg2>d1 |#ͥ&Y" f۲/g#s]Əȼ ULlr՝ىj!mg`:LrA y ~@G1/%x(I{ msA#:a>/O \f{Ϣb] JSxCQ7x[BԩV'N8֞|GH~}Zv&ֹ(Var@ o7|6*ZU }/Mbv|IU6!6#*_6woz-x-#:vRQaD%ZD2f59u9W}N[fcvO6/9k\՜\ xαU!{X)4 бԷ<T3J0;~ {5Tn2>˽tX~;߅gv s[ć=%1L\ Yu.טQZZϋ4L 3wU(u*VcDjej +Ad41ːO$aCvbT03 ٔue+DG_>Cw#͘v2;}͍]@W ~[0ۤA?j=׍}in(}nb>L@鑽 9V,3s\ "Hdn.2{ XSr 2O7W_Ohurrf+4١֥/)Gz0]A^g.{LRIQ<)EVZ::e !ⷀ:&8|L a t:9F*nMٱ :,DOS9mz?=RZg@~ 5 q:H}X[DYKlnC%x.߅F~R^YpYx`@I=;!1"CA%?1$ Q(w@ L2BP+Œ9KaQ#fLxL1T_q)&E]"ZD.YGR &qer ͌hB''oJ"דMd4؎n?oMdF_+{rGSaK0U@> YWvfr.*8<&L\#y>zd1 8}oEKs1~Osw׮̪|@AYALWM{h-'<-j!ep71#qQI!4e gOf5p]1^:9P_ۯ&eqh?`YB~@Z5* {PvǏ$-k -=ą"'~g꫚VV4 /WDk5; 0si#6SkԘX§;ӗ\ mW>&N(_<70gQ~*W̛ \[A7`Ͳd(4\NОl [57f(Ȣ!@b/&(I nnfmtAZWnӄ!ܢ JR=g)J5ǘ*j~UUì}Q!y1P1z4摾#ȆhoL7ƇۯĐø7ub@HXO46ئ!"c^x`'H'T7J_^P-jEt@ d ]gC :#聇Z=7UQc@yLy4D}UM''[F/MŬi(iZ"VV޳D\L|=fNT)& '+WZWPJca3&䉅>4h]T)⫁T4`{1,ڗ 1A(aEgoNw1"?¯{l5$Kε꒺盛M#caP&O#U{"@NQW)weGue/Eg2iywtNĐ•Ms0\oO۶/(vSzT<+ʹ'gHk\0dc7)3e3iBgM]}QqօS,e*uݨWGOGrG"aqd{F)JK}7` I-fЇ WT-@@\ }a2(zJ5v;R"dzΙ `TCF`3ו9LG('鵮>^ jbh ~Ix <7l[L6VF4LS!VO \  X{9XH?FW1D>eg:\, |/y 2򚼕0 %j4^,js +-bO %"Qr!pbWJprM?3N0x!:t.t+ڔ[+\BJT\/^K(WFC|w%YoY@:#$c㞢k(ƱPm޹"{"LgL=_tͲߛ*N&3/mWhFϸc~ nE AI\:WSf]~we2N'\p%Rըϧ a~ [ӝ ~=,iWFdiו}2JZ"&aK!gGOJ*Fx Y+vypR AE$4|cvs@"r SXHAVIclFQ-i;\E/O ~6]87+G*Φ?)j[2G D%~'wn6I־m"Ӵ$yY_5FC`όUΈ"T.xI3W~XƗIkcR;yuU2 /'j &cR_NgB @K w+Md e>c7u FZHaCa%r7ki^"[]n&r' P-:S2RM2qN3U'Գ,.V&0* LbbX-m٨@ZHR99: E *|m4Z_!)Ie+4]>±TV( *L t7 9eH'ʬ1ozˢj+8yV,T;J 0ĉJE%ߴOAQtnQm^`CTߕ#DqoĕWfWC˨2dc |+}Y-;T%xy(Kg㧥H{R xb9\)s=sN"cx ݱm?2Y̨\@u *fYq0ZWibud]'hy!w Rɑ 4ItrTKJ,ar, &_!$5EЂDZVMtbyv ˯GsYkLA`{<\yHNdՑg+ɑ#t A i.sYnj[(~MhDq&CLA%*mapKFPDNσhOIK1XrcPrn]k7Ҋq.?M|X9m(sH[RFޖ0 tЎ% (&7~$+ona wɌ7UIUӡP&3klm59ʝ@jU0]#j `9ХU/L')I Uu( ,ieNAk!w쁄v` ̜8b'`\wN}ְ1^RataۡL5<dA&VN>&ȝ]֑6|"'T$rV$$HBoA >N].4yDZtvk^,TeEmU9_ZI;rmF3.ʷ;'DWFsadB2N1]=z8gM*5!2?䈌bb;`;"nKˡ ـDZ/?Cih-  n e&]'}8^ x hƕBzG^~Aݕg6~u r~ H#Q@dz`-mD繱x|8ݞF̅_SI&k:{vӉeh210!~03bH7R/͂6w [jo ;6x,frm~FΎi:?g'Ixē6uEYp$d)|@8P=?+a2:B:;}ZD\nJj`'H ;8~FU>?7%[K;k'2Du9SF3aV>/m,<^{:0) <19ZO0ڣj RqTtM [_6CPo`Wj_K}q@F8_ Lwʨ)Ֆ]$8Vd̳uq"U>DJ+SQk_Q%ŨG6:@ ݈|9!̸27U߄IvGmLosդ:Z{ 3KP77#gij" l!'hzXr5휫"/uE Q1Zlef5eWLU1bP%D $ohV^(QnV$m =?9`]oݨKo'TYqXɐp*/OMpGWL١i+6mEI] إk4?nNq#n<'ǘ݅EޒSȾUTyzd7Lt}Fp+ObrFBU4[8oB+ũi8YejW1D;wȍtLW:85ԠjRYfuC?7:aAhGC _~V_xK ͚B%t,o6U):9 k~XV6(jc^Gp:ƻ9q?1J/_gXHE0ADy#E:TW=r.7-%P/]|3'eMB"8:{ef"#ȸT~] T0YA4"oeӭ~uxA5AaQw=eh8"]QC{ X;Ьł5ΒS}eo!!%< ǁ %N#mȈ;Ji{h?pЈpVЌП|92AsBΛ, 9iYih9.W`UNġak?K շC_8kWxʚSoee,7P©KY*zYTl0jAFJJ*@b unb&vpMOAB:b(/;1(=]"{.ԏN5̢j1(N/^"xUpx%lC; mLu5}kM`9)* yL'K+XdLfZSW:cX !ZNNW/G&? HȏvsEԒ9}WE|˥ElȳQC=+e3"}!\p V.e Gd쾋L"ug@V]ͬ]M7Q_'aьgs9.\9Ho,ܛgJgl^g?_8[RҒxySޅ_`LմiJ0J8 ]*"q9`1f.8[OZaVOVQj?&cR:G@#vNk H5ytdVD7.()ԡlA NTEb)bzU4YZoN[e掵,0t/W{w C)1U+ vɎmroJOU20k09D˫qh.] ȞOBYQڤHQha[Q>o{/mO1d&YFzcL=8N kqalM4E*S\o>R(7jÁz9qihvpJzmX$Ywk̨=2vMfHٙIlݺ'O@ӭ9۵uÉ^f!N]EHʬ2)Oӽ",<2֛NUjՉ# x@Lwژt  |{LWqC۫~X~8$J!,e8ճz*JcԔ䊥93U_ Sxc1cQ%Or]QJ90`ޒTw] 㔽iWiɷD$bFZ=:Jzte*x4>|g,%2Ռ(Cvisn{y4ʸ Qg9UF8탉I{ uٱ`L~+z8̈́x  ]C?AC᎕tRG@& ,9рp8i;||q<>E(?g=Q [da@d_~d@v(sϭsv vʗ+Tz -hy'<<`&ɼYQg$<"#/Kn|=Tς`C$=~$"܍N;)ClI0^smDT>PI#>Y![n)G{إ蒮b /WyuV^XКKh=Xf6@Qy&VHWsb_ŎBky_+%/c]ĩ 1:- VtosAaLP'85"%&1&\)oxL[ m+ z\/WThAt2 =ez.T_da(s?_VOduIs~d1^]4x;ü?we׏E0Ar׼-ac&R^) қp^Wurtf]n 2 a_tsKצ"isVi 'e;= òěmJK'dmg?:uYͷ&#*fPk`A%Ώ\>N9=⡽ ~ZwM =-kBF9s ('y,e jiPvF:|wV$d߇`}AgA*PTB嶜;X{ @SG(GrvvE/fYkw?ݵSRC_`R@V@X(? IoLg^d`(nHﲩhD>vh<LV,ƃMâCVV;K[NdO ;fee a)z!")\XI!O3 o&}&2||,Jj^P>5UTS/k~+yR3kys`'L1d7a'/f'Y?DcQnև%ؔQ7=G ժkjy$yrc&BC:A~p{Ʒu@B}+LMM%g@?TH?ODB`Dd@/1b9W\)CgE h;d9-W 7o$lΌrc[rA[jooD}q׋3EFqúMS)*4@y-IMh̼=J<Ӻa34H5ښ=KpGT]|%q!M W2y쯠C 7P˨ )9o 8FWDn0۟a_DGĭ|bEy>\vugŞH8Qy1Ĵk{!dM\#@l>*dzz}µЂ>] {_bc#0fyƕj-Omj0{L$=qI8.iJʲ0.-i9Ł?̝Ȥu W,s٢;!m tT>NiIN#]%}f]<ڷ(S`ڇu7C{x^ߦ> Y=+/yfжC8<3mnIBDQM{,+03x4pWm,˷x6`a ።?Jô4A8恖ĩXj6lo$V ^tWs,T#($bCY~yEYĬu ER:i!ϦsOYk%&q 2\b?S]bj 9Kev0b%[(u =e^@O7PFz f{Hu>pit~wNTg66z~}]}ȾljJ)a:;) JzLYNs}z>ר1PxDPL00")T|jlzϐ=2'ƠJ`J "& |",sp{4Kl}Jv9wT- W@K[ 3w{H 嗭ACnhe@}홛b58mAY ?j/IˏuEA.VMf Υƹj$VVa"J+>?-ܕ1Af a#Xo\ʞ9r_l悷GQv`Nn:ÄWF"tQ"q*>wyǾ/%}nJ#"ΨOh "w*3$1ZNP\˾0GS^)']r.%@Xk Рs/1S#IXA-zPw= .j6tj瓼#(r-q)Xm CwbԢ٭ VS{稃f ֶLO8X#5D<̡]:\[MTQ{ȀK~SyN E "kC$Gar`1!tw}V8"eU7BnQ 'G݊utwVƫ<FzJ J>ЕWqs? #6t"nDC+'K^͕8MRaږ([> eʓm4 rQ]+9 tFZ8,5e yw]ސ2jJ,Oi g}9.<.ԲXBt k;{fC[A*:y[P~^)$h%釠FGWtTP։d-:YzK\)eRn4р_7o/ZdP ۤ`U$!N6m6XM !_N%3j@(,fV\"n_5_yK]+λwoΰ`=V_%I@\רy824.A:mvFz9R8W&iB?An<?P琚k_:f2\ Jxky4 @{r.B$bm>gA14)ë7PYSgm|l8 ENnrrLS ` 4ƭpmʊ[I1UD,ԉO>R SF|lLu5}%zOp!dX/^T/3KB3ceC^F'z"fji[gmG`7 'EvF`BabiZ^i}stk7'I|\՞lTH|(GN}ИǏdiNceq|%4Rb= v~kOҩ,iW]JK.Ͳ͂P D': 7FÆ'eO3s,E[=iZ:PYw(8;xϻ>[L(LJ`'/g SOOtoV쮖omRVk\Y[Zp<Ƅh;a3Hq<:8͟sb}tgc"z{ccp ij khր׋̀7Aa͜:/R`#k o>1!gĶAcs*/Fمx7ekij N=)B% b?K7}\?HJ;.3(}| cMz ZY@עyՆ/GW[q~X+΋f_<@`Z`#F:(R+Y%:^?Nc@mc0cir͒ z9NQᛃ$6KSkl9 zɝ`;^rx^̨ͣ{ /"n2C\yB;Vmӿv *v^i1T.V2/;`|ӓw8U%bFqg$;NC 4~~sp)E#Pkb/ Lⵁ+j}CQp#(<$hSxa;x|*+í16E`xz+Șgy)Z;m'ԁR<[ȩ"y3㝉0WП)/e u97 =R8Ay!4g],4A$,`;&sƦqF78@VyDa?ߞ#% R.ܪ[VQ8H<2.;!(}Nu-|&mz/;K`W(t̐+ !CK=f3ROfѽMs#;?C2Ro\zg&8?D/6JL;6ۂ4% ,w0t!?W` GfO~=ctdi#Wf9 ;v O0 JYm؈⃮mEAdd s4dU:BGyU_Lҧ| ðDuwSO$jqȴBM,1mvJP݈Y؝4~|W$׸hAZ`Դ3/s`Ǡ]%Rz9^?7䎱b??x9-#-{JĿL.xD\ܟ6>byU3E嘬267juaax,oFoQى]Ż<]w.Z;+P@B&e*NyЮ4x Q:!ƵoCxc x^)sX\FS"_]Kq-Hu]}-l 7+ M9F@yĉ !潭Ng-ZPag2ғ(]-ŗ4T/J0 Ђ(9ݟdƴ5*o r095Y*Ok+^JS#$b˯nqTLW0b҉w7};HhF;! S6vثؐBf77$.H}#rKɘ$"dH\MtR,K{|Nv :1?Mepd5EUp.j|#bcqDd!`rgdӵr_ 0$ZX?d"` rP%)/vbN=g[+%hN ;]8u|QX"$<:r90_Z3Rl^ǁxއ:unN)EGH890SK<=~-,ɗ1lčRy W@7`,bІLc.<Г)YY#Dx;m`G,kWWZ_3όf̸"4/Z ۵M`VWpM=,|1C&>x!m2tJVJ \ iLw=ĘΘRXUbࣜ"EIWɊ0cܤ#>vi!$ eCp'+ uEI==GȮl }Z kk9{NuAw>OKw#I=|O2A\ܼ5>!Xr|qZBeLpD|zq,eO|L{H&G e_0h'<y}3IScw7׽4Dg٢LoįA(Kp:oqr!i-Esr֏jwQ?4vEPv%`~QrGx O[Yx6T\Υ,:;ln`kp΃j*7Dc14ˮ$&+k9hiBң]JE;rG\uȶZH1B8jq1N4ɭqzFKPc_RrI, ;znS,\Æ4 =#H6etʢOD!: b1< Փ !!Oá (~x-Ƽ<;Rop/ //Mgv(A@$ހφ[N'VIG- 9Tb֚lnVFc' ΔR )T-N oC8n` \g`̻d~]"oH`?wml1"k@XJǠG`G>>FZ>a=>DA Hъ.-bgtiX?TRUdRF[-d~دXuQ2xgh(C7GK63K1D$>XMxoX`ziU]{\ ݘ*O95\;jlpo L!X3EßfSN|%& AcC6J.Kr$80اd׍3sY m$RX$2V<l;/\7sRq`UzhU CO)/O`r,Q-n=ˁ6@QPH32k~֜'kKȺ fVkя1M#X VWeLߌ'gf#o[E/5S_v/|a wW" ݄uad ջv",(`v ܜ1w-U?&xgugw J,v7 P/ީ'lE9 {،$KNp@~5=OĴCe swljw|}mA vW`W6ȱ Wd{GUXk\t4IhT nk6;J_fZrC\WMsMk1]hd7<KߦפTR]vc#$ HtSUq}P1ޣص֏Hu!O1"\ E܁tK= /3a`]"i|1j!N72dMV`y:x7U7,?z~1ux&M` cHrY݆:Yp )yVVئTv7&vb/3 <FWE!ɪfp!2/q=u*Njq(q,`D, #oq?Uye<:c $( rMQ6U ,lup`Lsn^2.2ux:, N2("b2Ώ+Go@C!OWf&29!h-VY A]/j.T t 0JĆ`9 f3i Ϙj+[yE#qU_~tLŊn!sv0!s1Rm`.N=CVbc4)ao #XX~njpLoi'a91K ܙm;O(CQG HA2? lw[kvEc rpK*2>*:/e0JK٫!{w\qX;7XYlXF;ޜuLLfқ6 hLJ2R}7L[Z\)tLֿGsc x5E ɶ.9è wMy<}3Gx5b $S x[ʄ@AXl$ஏ|\d5rGJ- ۼr9 VyO }|P \D&I&W%D7§ר ΚVW:3S( Ŋ=`2@˸<!]]` M^dY.iK<|zܒo?Y2t8ƻ ⦼ho<'gpk EqMy{7l b/cesSd -9[=^M㳻"2C*[2tI)ʄ0zâtgWc&ăEuzARֲ_UC,K98C\'4J#!xag]MU܄ῳ#Sp|kjО v@ 20`v2㕢HB(-&'vxTJ.aΊh~5(>hOk9!5 W}ٍdK0am(7,9,byduǜ,n t rK8L%I+/ ,m`{g.ߣlz|+F8N˂m3nA+e:FGf CR*Nęzٓ%@qMsjqA;bzPg~Ҕi>rDR6\`.~8oޫM.xfm-/}`'rX;'RݪG+s6)0IBP){eHQ:(J9F9oJ7c͐q yR)/)Uٯբ, >l۫xOEnTm95z/hZN˸P]9 ::\O7W1{߸|Smz²ݺy 7[i (iK^!R{ 3.19'3LaNVdq+{-fr&.nЏ?Lz=wxKOj5hc-cМ9mnezxfwA型*i2 \-A1KYJõ1p">Gu^ZS <(4A&8=6 s#Cw<ROL0?NY%|Ô÷>6~nHvPJZ6_0:BXiw81pvKV E v 뵻e"Fxrª|ZK5n:e>盭xە1_lts̙%_~R3zcVw5L<}Q,"NH/ɈhU[hLj;,Ҍ1io vkoTXy%&*hmgk]0.V#~'yTQ=}J50fʵ"TmQ%} ns2A\զ*n9$mΛ;IT']/P4@W_&8u@7^r<;6H V:J(Kl 9@È,B<. {ugWwƥ(6}x9(+ 4 3* N3 s4\T_-JVnRvas@ $ ZV-(`w\ #tڧ, ͬ_u0kx F%E.#ŌX3@ uvDA/TpUdMDӈ? LrjU/i$Qd<*)Ehn#DYj%8Q6wziI7{ #\MVw(E~X~!]LAb4Ùm"yz~c[YmNO6CYC9D'}vamG"{ 8 01/ڛ:$}hm# j ɲ0Bs;[C4*#ǭdf􍋷3'X~zuDU0q*t!/Q12_VRrFOq8`+|1Z`y[\Buoߤ Yx隫4DT]=oAufڈiIbJSɹGJ+c97Mw?!" Ie3ʥխYt}rJ)xIߊX|:%ߞXȢQ=K7čYtL-3S_N,YK"H8^,c u/g``dXX|]0Ȳn*\/5&AxRȲ^b(,Lnbra &Do._~DZZlqi sʰX 4{_( jt0-0洅L%q՛ 1TqW$o)YvϚf.^嶬QD;Rh̯Jg٢Y-:8Q v= Ġ*ED42E;LIrix?/DJ4sV_JM ԇOӓ`=*lFyvc'H.`JcSv=օRvI zVmW =]m>vUEizΔVҽ L>_tqU$݁I)ѿxi9W@`iBs+.L?AH/G(jbyEÖQcgS0jנa|bl-[`(/:/k$u2PmS{`7Dw$рPKa|^I={ 1C:BUAvi'ëhmfWh&?SgO#}yULpJ*G0Qpgby.1'5ݹijYﲩEp3IIWJӎ"HoH:UѤlG)#Uh؝qT=2D2 $^~KȤHN~)f&+L/]zS)߬YOD9 UJl&vyK'gn b/Q2O6a m8-0i4Ю'gZm>E1)`k=!]b~I P -YD8-FާM[wsp|ݨOdr(n;WXsh"9ո! $P FHHR|#>%{nP[rWE1MU+t:߿r-rp5j%B9}po1mDw)] ׶3!ŦgRV#E0ꔧc L#XzgE;Et*R8;*7L彻+~Λ 7S虭;z-#ψKټ< XL'$B[ Y*FqR4 /YMu>,!' sj$GDpu Wi2c8At>8WG \KO߭*5ƢΞAC}i o [e}Cw3bNJg8JUC\]UKy)'A@jgqGI37sqrw9qvzZ"֌S7+dy`^M".FKDK50:RT8Hݎ S|KNߢ+?//`f"kP|ܧn^h!ar06ΦlLy?53eY{`> * ֪T/E0lSi m'[D].lFJ]'r [G)ys?ur{ֱQ , GÙ"~)3_c_3{b9Njnea]ܰiұ[ %EZ=֚i,v]"IcvЩO?txH־|J&ٸ1"J@̩8J6ɭr*Ok6 ` ̮xo W4wV䬴@ ׬w/\O nn/B)*ZY"Q ;oqNʽ79% Dy0@FHFGU*/ ز`E1G{K+/I&D #?@:%,^-F )2,Kc&DALv2^h[3AwlZΈzdy/?, OʫmN~I4Ŀ-QzeٷqOv3[NjU ҆(4|N'=4ǖMTw(5'Ն0 3, kvhlsK``0:p"uC諢0ܿ~/krU9;\Σ3c/ u$L: *>?UASPmD͌ /=#Xޥ6Iqz0ц=WX8J27%e {"vA<+ :m{QH++pt c=B =$>S^S.JE^CN5Bh1%z#y#-oOxk S3BGYfa$ 6hlUäe2zNfa#_qu.R_lb'J м)]q cHf@lv nʹecn@XU-ݪlP*E%h$ +~û0;jaI)e.[׀~1锕U1كv P{:f ! H>KL B2RoB~oYR{roeLE*ִkHlYz@+[]VUJVoWD<+8ePsF\f3(nfe@"xIt`q4YBm&6Brƿ|<6+-uÝ\v)K':"R$4͊󬥠wg_ &B^`a/ smJ]a Trh R5|/ΌyUNx16_]8D!5`+FȲ'NȤBV:ʔ]:>Vf@>HohEA\4;wBRdGh0XXEydRݤ5!8oBjGA>$Y,B _|YUH6Y>"RQJ+U>+uLab%+ ґ{o¹ W>5߇Zq^ % )FNˋ1t.N }*~7v-jb3іz6(558 T+-ط⃉)0 rB+b"EyC&zEW$PՋKc%_ў0Dvze]M@DJ|n<ШA@$:[x(8rW P?A0NG@?hiC ܢ78x5-OE3-ŗ A[ޟEè׀I%uv/v*EZZuDZk% QK|OS&Fͼ n=BB?!h ?KnvI.&f#c7*tt z\@x;nZ34ErݺE qLgT+!LqЄ) G2 ҞK[Y`Y{m+T sASC $8N`磮mmj r*ڰv߂ Zl1wǡs/%rl]3XJUcy>bG&*L!DXs} . =0)caVglMxŞ5[;68mSRRlv03`W60_'Oc%#c|nXSwc*\;8'Axe]ed_d _t[=4d :r!&=XmFrjOP-$'"d(}gR;ӌSƵD7hZ!R Q!w) WSGk_00N n-~vz񃏐zȺ <1 7ZgJIMiH}Dg^gcyfjL;xyƑOM^*/$Mt hgW7ǖuړ7{qΣ6A&ںV.vd_  ˜`n2cO -f?;5vElk`<7]5Xt’ @pFSy5n3YU2IzaP:#+:=$:+1,e7Jeޤ)["WЧN^qs!BB N1CsP=s :!cUlTcV)7֟T TUfya tNexES`}mT/K54T zr ^Il=Gezu_` ]cIG86 tVtʃ",jql6Ij͝P5)2gƠ(P75fu'D3S){pOXגȳh.J-aHYuB)ի#`t0v.,0nh)5$!IϺ 8]#cM\@{*./Y([8c3JD|%_fo+@[#%Ih4Y|?p.p7o<~ {I(g<)Y6m&82o}2bz#8P :s*OMz\X=Ex5ze8"Ƌ#Ոgs7@ w)*ν*%Eg-!35H\–.(3@+YBK "[JvGRݭ,Z[R":UDa ;/ng5KKg@45ϭ]`AouzQvΞgB*Dn`|!nGM4U8Ԛ7#<-!VٵZpLLݲJK%=~R1Ey7]0LG N 8VQCzC:3Hí21KP4 5N9n 7HlB*YdWt;-{|HWlڜRh`$jXb`aJid=l"p~FDQ>"Y*?b9:wTwHQ*#<;?/LeIrnr?.h@)Bq5(Ʋ[ qhN.DA *-|S˺pjQ2Tz'J͒Eۑ5BI_^5+Kmn,i`ֲW/mėWuOqWֺCXЂߚ:={4~($BiarTGVt]|+S[s_nN(PD6{vb@$i@zQH&O]dyͼnv$uD'VN kճi8d4 ~9=yA/?-" Ae NYܩrY,X8ScǻILF b;qWf::r0ϱe'퀹Y =U eoVW}^()#cטǷ\1a\>.ZA[Uݖk?qb(&y(8LѺQrvJ0I503өI@8 t)˅{d^N)&jzG*CA^  2 |G 9aNXЗ^$R6V%Hp柁 JY8鄆ڍi2ſJw/r wt&9KKCf<%W+7r]L7z .7`nx.vA#lEҕ5ͪb d0wqzQ_mA?a>M{tlڰ6gۡp|wM[S4tޥƾl!YF*5GK&@ GPsCʒp;~AkM`Vֱ-e%v.}yS ~ Xes5d+,)(sY%6陶oW<,Т% _S`!A(w&x9&.RIɟ=ԴzY&niH\gdPiWϊ xtQ"o.ɨ{D^Z_3W+HE2Zs;A:CfqԲeEo"heBqX0p; Gwۑ% b3 wu& !@af8N3D|b{R) ǿI_Ї_=I#{@BJ1b=$Xֈ?]:Iz'c_VYWV1+y[\8hޛ$OeE?~ yb<7D e?Fl0]lީ8} USuiվwjMNTQZ!ƅUm:Sc Y[8 GEDd2+OA~X2"zWO?1lqF .sVw_'7*{I#"!;(ky*]?HR~i';~-AVnj?2S`asM JP~tc|T癩WZW*0 aAGϸuRJʈ|?k}J^<3@4q M$r(~>.J RÖ:VMOm؉58GVS uH|ߚWuGH7@fA@r#b<4)sWsߖepQp*h(GA[jal7Y\0Lq!xXnqt_B[o5ҋv[a) 25ƕcEF؟3iA/(Aoz^l0Ks[1:m[¢'h$IAYiȺ3BQ哚9n|NyRCo1aLq(7R?SGmZ%@Hj{2cwhbp/SY1)jaz#u?JrL,ZNSK3cO(n]c$΢Q+Wi[D1^MfK ۝eF@Pm2%b{3ՆwɮLdKJN T,g1/J/isk"ulMVk4VQMda@ӮOm/F*ё oE^!IVoƊ6s-)Sٲi˸b^ ~+Ԭt`Ϣ4'kS55ܵRh_H0sS8f VI2 6O *R{rA0oqwޫ!pp c5'{+F>ں$pJ2geWsj#~e$sq2*ۙJk|9|נA>.O6$ST^+YT%O1EbV$n& mst;7TmXp'DƁ`e8CB}wi'}p=FY -|+Ng#;ٺ0a1TYъŸݥNmwHNyuW$>e#ZhP+|=") VHbNDnjFAwvjV1G㑚J;)>zE63Cv. (奏yL 3T 6wYJ4_F4I̤ԔkHXZ1K2 ,agXؚT«7zZ63Xw cH2oiaYr_E5D5xzXQ5߇ <Է6]yz(oЀ|.Sx+w28)  _՜XF X5 >u%{˴}ՙOb3aƥŔ"z+fGUYJ ἦ9 kDr֞{ m eMg[mVAxyӤI)R9|uEzW~ INq(H=xL*,UBz7I[P`ַ7ᬛpr%ԏwd"ِ﬐_#_!R*{QΦһw9"S@MB|Dd?cą4ҝqvC&ERyͻ8@CL:nڟ 751GZHYr@C!?wP'40#Aƫ!ӄmIGfm.>ⱸΥk|vuDiV,i|EQZ*~;W5+gJyϧg̈́"J9R<~[9EǿiF!d G͗d>+_AfGP MhgT]̀B!E.!^Xa?]"W6ڭFH^%N.y[Cyk~;W"&.]4k|IݖeV(0 1Ag(b`GYKA2!oG4Ghˍ5 >LPN&b&1XkIv8rQo+&k<nl!#f)haMoʃE7e7Z8i4tKSx qo# V+0?,&=V퀰؁?Idž^#9%r 0-gBVubN-BG'IWc |zW!Y{k0 {Nr|jt{3Q^ (ߤ*RC37q)&6i,%Tfvm-߱\u@ +p0n5ͼdW3LdZ ܇ #:0IbW֩}'~ < ]-筤lNz8Z " f"08].0S)& +9|4 OSOL8K4 T[qb8%MEpa V)e1} WV?A:%("u(?[]2=bD0t٥u9>"t:Q,% y F@@28}o<e\txo*rԺ:VAgCZ5+Ȓ?fekU@Y#R1jz:[r~_+GL0-O*y%]_3"liޠPgr33WT ѻ#Hր1Ekۤy5Q`L`w]-S5'os=H4.z>@uE1Jy8%;H*6\xp(\;?׎c@U}#UXgD 5B[?!\F2<RڂR1 ͡QY6iˆlةaۮ$Lž@_⣶#i5<ˆҟ+VU=͔BHzCkB^p+{+R+͜k`a&`.JԊy^H2 >ܣU\!=8*רkcc^vL6Q{)X큾dlY3hs gvǮ1%~|#.QVFrLj$X藿+ktmAb %{4 oɶxt" 猖#{uAC}'hqq~QEgG__Z%·&I: ($GKn:k okAH&vL+w}I-cJV8جwTl٣Ԡ*ky4__HϮ҉`YiWfŪJȏWrGI#%7sH|[_tHO#2v9*] VUA1n nl[KF(?˂~+A쌠UC,ŀ -Mx)3甜98-=29el⡡sH6]B'Y?xHABR*J_r7tWVd&cdX.b H2jګ393tVe0/LZq) 8z)d9U13Q`& {LY-"µ?F>׫ezo>Ƃ6Ðq폅/.؁rb^\~{֟5&\C]__աAenSpd8~ti#;Bo3t|牑~yt<9O{)%Cy!F)~7D@@$$Mқzi Lr[TΟ5I08rP2 ;1&@b7R89Zj ] -dҕ=|r(i\ rwEVy+zZ"ZQɠ"oRgvXت5|ߌ(\a!.4{HWH Uypq[Ss&F"MM6޺+cF^T\)p+!tN3_gQYh J&%ƚ.W쒒$ ޢOo6,u3rJbDЇw +g$iQq/祤ap*Tv:20c]czYD@}NcIf-&sx,$NVY:v I2:(68dl;d2E}~F.=*вQi^'ď gblr- rG#80Z02_LLY'6+Z]ZNj xMBEЅt!WNI b>{o]tudXdz27d˒۩BQi*y;ma [M񥮎iq(s _jDGR zB5,E┦vOԉ,> R9z0BQ(^5|тoͬu֑feI1-#uz?%dm sl;;BZEVI7d,#@p@G919)s3]Jֲ n6= G_Y^9Odvthתm-ϏI=>.a^T(|' Gl aHA%8em9^§CKs0ʖ+ooX\ݩ}yi'֧5Î~^SJ9&@&UpN[Ae5-M|T|^Qt/Z_x̙=eHd2Ӭ?kdbNj$Jl[x`H')0֟\k$d۶Dd01˨h  WKG1M[}g;> k U^1$Kx4w8[g{O QY$$! r 7[MHt@ՠN_ 0|"d(pňe:I5>?|X1[Wr"h7TzcU̖b]Ч$`'Tw!Z;1NG&˚e/)XA҃7GޤOW?V戀օ%g )$ޝ/àak\aL>Hr'1_zk=7ww99^9s)+C9E /O:K @unN0I|u^Y)m~We &bV}9E+hz BѶڌs;bT#HdO~Mq_]qI4{JW6[_0y> SeI1tZ\[`c cXF&=g3F1x+snt\iAֹu׈a4!?ms舆m`9X0]ˢ2!yY#픀As~eq^_/G 7p.%#Ő aM^ :wj(^ZٗQI2MMBfcRA9yB4>#{GJH-)9?.'{ &/WY&7gjMO/.٣Nu%*6a`ODlvnZw4@M1~)-7T/:N$7=}i_񳹾9l#|q 52)xNr"ؒ} $_6$;ޢFZGY0zѽVWғ|lU q;i !Jj/\!v#ls+ )Vun2o0!A63oI}se fSmO&7}`8#DiH.KMM/2iUMh?SYi]%V!5+'9t6d%9E_LA(^լuWY@K0H?Mݰd!#Rl9FKvhs[٫#3X3rYb}D). s)nM;8>޺krhEĺ-e>]2"EC 8˖T|uI/tjm{[:[,6#؍;كU+5"|w %y'aՐs&ΐaM7moӟGΏ~j$Z*;98h(?u."NxIVx%k6y Pd=9E챷c!v͘zټM|Dse#{Ã7#B\̅&N[$:OњF]#o ʸaֱ\577͓O=y|z=ު ̣pCehCW-} "aw#_阝CQ5ס>щщ}#eC!Uvr+UE30|6Sl@O9 7ҮB<R8kmMT7Pȃ9?ݘw8ڍM,̀]Vr0:;ȇ@lQd%S\xJ*)h'].bB-zp’0\jZeIql:˚ %'-]ʢ2_984J_`4\Y9豫z&M_W~ }=͊ofX(-zGw W~Sa:mOC@VH B'ôI~G #4OY󿒃-C!cPHQ "cZ7|;S!3j2(?5"hͬtxz1P-mރiY%a:pT>Y3) ڛ$]ʅ58P+řtDU v0s0Ń'._99*9HsYr&[{y_,ƞr+@j}8k6LB_8m(;M!9[vTދ2UbkE(OyBf8¤'0 pϧJnF 2ckrPVrWp 7ȡS ]@ 6 nKP'vn7IhtFvڼi/ƓǑX&E]q7؋|| UZv qPR0){7: [bWۅۼ+C.',O"+^M{Ω;to m/>x0=#SV^Um!iY`tAAm\eEMôI/MXD"GC"EuDX߽DLr¶s9fq66""1 Rbॄ|rQ O,ci#o+k]MJvc+p(V,&c`'.34QƠ>"-$sݟN0f|DE CV*K N; ^}oknVA<&S<0z 6Xu/et96\cGw[C'.5ϣ3%!rLo>]Jx]Â}ЕA= :9bf( M=Q/j%ή>]_wi쒹ue?NJc@Yف{Q2X<$vx:< MxTFw[ >{$ m۶mm۶m۶m۶m~w=g5'*㉪,_AӸG =RHOnTwbz0UNfJi#zN'!W#谿;p3Ň/1"еhP\fVoN(4%'WZ. BZ-4o؈'{{zQ7U0&کp5oZ cr#.0i`!N(7^{ڳ"1*^ ]᝺/~?t?'Ḧl~u+N!L!Ü:%:ޗ>Z }7Ͱ8)G)C ,}pUH$A=6EK~uSyky.Pa%Z"dD5aڷu2+I(R2O[jJ3+Bಞ`O9#lBS Ԡ@Ź-׷'3YRK<:7ͭAObGeR %;rIܞDW!dG-?k13̶Hd G?R# ߼h\=rdƖβV50JAy`vOu ճB;*bfm [ Q nK/P$p'5&b%Upc??SPmִ 0ݶu4jI+Lӆ١9^=e_{F-P'0Mu82i6pˣL;x]5_Z^@ jvah! qަs` xd*oaV$8wr:ثe1lRpr ll-=>5ks##/9h(T^u$ѵ,@m4PSf@XGoךbW6 {4J$p0mO,ՍNݦlP6! P'7#&sbX>-E:ȻgkH5! nJGfK=bX3_Qz`󒢝z Nc~_k.T>WXy 䦝m%{&筌^L2x cE;H'A+fO3p g$zg/{zWgI5upΔ6<>)Lnk ב` 1YͰj=ںn< hs%m^WdXQ{> H;.׼;D1&]q43VaUM>3b_4 ;VO=0B}l3RwW"-֐f0 w5$`GNrۡoGiӚ?v=l>4\|B<ugm%ǰ<}M7(:0ZUUiƪYJUȯIx!@V0 nf<N]ÿIfSfU?)K{~vx]~c` N8?>sHqPҥ͚@ hoIObP5Խh]]ls;{ď-}{:#F<pʵl~2AMhn. $8lYzcEMHȦ?U(?Xe?[AkU ѷӗFc0.5V0۳@\BXV9v&OY;u2k=] q02$7d=,6=z4zD]1LBYAo#.(eg&_oP хB<;2Y[򟡊 W4D(TG#*#]8|#cT j3У)UFBЏӠulJp@0yI-ir<}:퀂J޷1/SAW긞0j1]Pމ$6-_m%3'if`*LIpijp { gC?8 0q}OGPtf:`BO*&5.}nv[jQ2Xmˠ%t~J%쵖o y[kq_ (זLr3"reW]uxJv/n$N}@W`Cx&]}-$vxSOm߆ غ XxYXsW$G*-xFN"1-@H [$;z5'8aQ"} ~X[gq1d9+fK S-k=Fra$n֮"r\xsA:\G20GRb75=J'}x3)`3uz\5 2/aF o^*MD@Eƒv3Ƙgwn#xY˅^ HeS"ۙj6uէ-Ck^5\Q">zMXE>T[f,+j2alf=/zx4WWFW0$==\lʂ<eF1g#W={*.E I?+콎 .<4p@:b}Qk$ #+ *.kWℕsc Qg:<,aQ;D5~%e;5쀷vä u쳐#~RY_ lDoDv[2%dM v؍1_[am_g(`Mgש[0r/佀WPKDw݁:5[U*mX%E,ˍb -Ma/w Ҧ"Ațр%zm%!C)ǐT]~L5VmR:%W(((Oێv3W3kg|g]HC/hwjLF"rkQJbW礧w|ߣ2ҕ!G弭BQjǼ0.:!f' q;),5%q;~I%D!Lb-.L޻[>8L']XA){,\)IL CE8Ao (QsQRʅu[SV~_ OO&<@acW"k6&@Idq2 >C+P2 HXg ??ruks4144RL f*6* ;B9ydBG2`胄vp͈.er?zg?ndqz攱˹"\$-a)n*yDjQ8g/P*S] %VSU$ɑ;oo5:bUzPM!ZБpM̪ m;sw9~)[W54^} )x۷vSۊPͻ[,4xT nH* ֮e2aϜk=|bY@œD/#ж^ARXpGכf!{X2٧Zـ|ݦ(s=pr36%M7k4B ]4CrWX8q*KAb--NL#J,rr`!S'56"䩋NR(!' 1+q l)jCș/Md݄rrRr`Pd˻`Jfi1/re[ge,LWR9 UHV%s;,jk3_AN"4j7FȠs a蚅?h9l0h/5LU?AT^-Ծ0ٓ+A\)C+^8_Ku=GGxKܤSڽ"x F#WY" ‼&J5yX Uq:Ѧ[ARO?eT$Xl65K/qbm#l7ա߷ YFA2ܩ25I`2IU8NGG5GڽӨH>q$Y\- -_/g,dO~"g@fNIr y]mOԠ[dtܩSzB5)A+#IJ,h+F"6R%ux1ɋpң{hprSq1&;#b c%qLT5%?wwkQ~h#|Xl3].rw7~>У/$5Fk4%iͽ4kN{0QusZ`,PAL>mA6s?xXǩ Rl9-0waHh+>h3[ɗ 7¦ϐ-s)& YNa{+QZh~ \/G)^ tx㤭9PauJ}>WKϞJ:4#/@{P^HS[#|TG1+#ݮL˘J-QguPm%QFfvRpWK~ {c"=:;T FG9cfr61|"y վx|<9.IQ$:L)HIiZr\,,ؼvp\X}{'X[.i2UHPQ9 uo|nR*jZ1+vNZ?|ե+/35EI6iB.ILMG  csrՂd5:?ڳO0:l2 booC`ǜ5F5?Ki9V +wsNk:7B}C b 5Q 5`ŞMu'F)˕UCY׏\PxzD, L 0fDs$푧5^P1Zz\eXZ mZ0UA^YB^ӥ kڽ'i5(iZ^ʙK=nX=Wm_6VLeA5 R] T]&TMmMcY|M0?2h#.ms&24=6, w\xDʖ`&Wj]tKw *|;r02v@ІVbr`|_o!؟Bۆ$.h" ue7ȵg{!Wɚc|Oʸ/v,mxbG aǙc ='↻̘wz97a Jz0XV~tw';i뺝+YQ'CϜ]O_a¸FWp5M,vP{M$tu9~ 3x>]" ]*"W6۬WWg?4ׇZ.e3V+>]EK N>@;K/Ze؛K$W˫0AOu9#x3hSZ3ql363Ms+ܐwBXU2nӆL?wLqO@E +FM>4RF`miBݎv4]V[F). U~fq0cːV\ ä =꭫[`r.%htYb@ȓ}D=-b9G @\gbq9zbw]?+QErd 1 # V S8 1Sx/l"\o=GZ9WNi#U -h"9zYsb@d$L#`|4%o=%OkPue 䯗|1`Sp-7kZp]*$PxƘNk\{3C'PC9;+]jq`wRnꈆJ!\n+ک)&E%< LaEOc|hKD *40^cʹŁQ%sY]>UΔ!\´\jo b(Rx:D~ACejU(7Fo%K`Ɨ@~02ѨLl~8 <,6WH.׿U$"#҂NT`5hS7u;z$H:NG%P)mYBOb@<IZYt3li`x$doc$j4u7ЧQ.d1xV,LqJG^F{&1B` ?k[z}v o6mW$u a3j i: xi -&WܩɜZ5%]UG{ +-PۊԕegÕHxG-ۍ6Z F 8[H7&Ÿ ߑ v|li ޖD܎CZ-҇h 7d`R(Ly%ijTY&,Q"Ɠu`.V/o$.t 2<=!ESKbs)*x3 ?ٔʈ zaVyV?x4{/~-/: $5ZݞR^.Z; >eY ^|'}ΩOH3$LR>$ɶ-?;Rp&i-7}F8KE>8w7;eFUE80,Ż"1ee;,fdk.CVއVS1qn4j66Pt>aOœ񣈃{]9ITҪsdI(Zi !]_` vI7:wa߂V>J&pB8LI"n.eB360nlF!kuKY$XvMgakFzp7ulHz ΥS_ |T;Z5!q*'bsz+,bGSVDz&gH_XCdN@e1gIۙϐ%)tt?`w{T$ʕ4!sqt31jx 8.K5KgIS?.xNhS <ӥήa$b0뿓 i^l\TPqﷶW)plAj2JlrY_9')v0^c+Hi璐xAKxDszaf hG kfV~+Њի ?g8iaW& OC؝*35]aq[9bwbO|TA~b"עP >*?arB:>NtaPXAq?ue,P;jWYFҠF@-d²Lh٦8t$j;&*?%lsp2a\nrvR $xuu bT=qW}>'cp!=tTm cf?oSUd)~' AYş #V͈q ZT_rHqu)3MԒǞޠmfIJC^200ߠa<+i顔i5mh@bsWaҖ~s"t4KJqзVɲ^R.9<)_,S-K&RiwRF6Np^z\4vnRqY? ł>(Yi'&iRiU:!2UK0ymx1ߓlə}c]vB5]S>o7>ykC_RxKS*_!@6*EBK8dx_GBhh=0Ta75h2+6b(@y V:e~w; a yDw'Q5f-"V+A/~OX[0縤>T ,ٓ8gnNGpg*p̍ W0&(3ރp/[<~5dQ-kXq/r7Ifz>  3Beg^>,c/dzKBEKRIݼ +오q?'Cq8O.gMdǝ?qf P4;,}Žlvq[XSU4\ pŬX24wRcB575'}X~KF Y ~4 %B,i-g('΂;ldsDioUUD&hʏfVSI[Ea<{(XꚋI^pW,)'W%4&>g/8wred[(Dr 74 V[9O6,1$mg%Twb҉`*$G-y?~O]5 h4^5s:<}/I$Yt *$:$`1jI P9S6"{\ZN^GA2'qݾ"x ,_WsP>BbE7gBꃴh@:ifw1T qfw? Ӎ!ő%Ab@mm&fi'Vf)@mTHlC#aYeG˯ {۴;I\|m8.P{32r|l ·vHo"XJ)^y-ϬϏJ_ff*^ {/6n62[Sp%G44f+A-ku,Wށ F&ďycƒSq}4iF`&|WUMQU߂s1Lnir+PˍNdoZe`g)=vK4 䀉 ·*χ'oZjzX|!$/o$YxAz-cߑ% $2%r- ]N !|x.|7`gr@$cM7@QHLUa8gK@>IohFiޝVa#yg# bǼ-4~c3OBo^ hg'Na64 !ǂr 1Ə<)\my:dW0]4-rX%lևUo"F6o[+|%þp_e0mmQKPQ]d4ѥ\h1Gj'/!:3A?XOb-e޾P' XcQF_n12Ajfc|w uZ&$|l] X2mO!ŘRsD|A$W&~6*ƃ$OsllqfHN:览9Gư`)%.u\UHRM+ *HR"ѳo]MĞ1ZsTvB!?,#%OśsV:iAt6Y&Uqe-CĮg8[8m:2\jA8cyH[5XkנЛpS[IEՆ,ͼԊ* b?ڟQŒJ8m}tV( A!E.\VDG~I_|Ν( |G2,Qʖ<> eN[g:RpnzD&].,mE3ў<^CDcm1%4%  *q-%Y*"*6kH+=<^%#a3#?.p7)3x<+^EsFӖKgOP3K؍f:14fdrwqae+񪺢(_Jʦ?Ӝvpx=d&ig] R,ꝞSFAK=9ƹMP6-nGe.m2ʟJ/7߄rz 3b9fZ el%emrgP 8>|籿i l H+RC4|x ~Dަ>)\eD4@-L* M15oYFD%㿆.{6Q D=Md%#z| H=ǜw.9J@%K!gmoF|/'qWcw~dd+>fJ gp00.ˬ檮:r$# 2{m:/ศc+ #ޭb7]fB3j6WtRL{:]2q^' 8v#$lbnnV=(<`(4!$LBw\"Dzvv8*6܇útPg+ailY `@E#qzhQJRe"S2!`xIG*Ua|@] #VX> tP51~| '*senȞOsgm7H ._0B^ES󳈺!GNi9C|URND&o2Q TM7Mq|f=d[sK cђt6g4kA{m_:ƒ-J-gWK [c?o~XpKAy*͉Z0W?pioUJ5jd=/e(|}ѻl$,Rs'o)Kf&'Dp%# Uzc[E:P ]$nsnU5WZHBSoJހ2gU-',ڑʔLoG/> e/K`Zu@{WZo>aB&0L2ɖjd;cW%%;.nL-y^Uӕ|K]k }{ à:=tʩ!}F˽'X.kJeWŶ_/GJ#'z^:Գؠ3 $Ntעf]{.<,vM @l״dթL˲c#_9`%st-EmP6$/mpzU*vx^5\K:i g֏]l21` 9оx Bˌ9QÌ~/u&4 ?8X d+~胜\5B+. qf7A* !E+Gr<%}aܓ 8`k! 0ꅰ`:% VhU!3cZfn:qgd*/+q?'d̸=~<\obN_NEf$JӣHPm!݋|e4h]X^j[eա XY晬wCkc3FϜBBċvsaR,egE@c&Vn5nRWZrn|Xn1m2 OHMbeR^VhTB*ffJa<h-J$\:XnLT۲UTJԹycI.r|>4Q'[G׆)9K @IVR~H9>'6FDjAϛR +!Pg-ƠaDۜƕjpt+ᝋDUBQi aTtP-MȠ6][.`w 0ƮaOHQu'вOZ T.J~ʱNx?8n-A+ݞ]=F4qGx^YVBm3сySѱ=LZI>Ifh=/- -wێ0`uD5k k-%J񆂵@c আ^,K<仞{ȅn2wo!-r=9$ۄP*88,6/y=Ѓ\F=,Þ2QBsMU"bY ~Q]hZ䬮NK$ vUfAsqܟOٮX'Qr'u,Fڿ c95O7v| p9`&(>c3*! mgx_K:k6£al2 RRBYFp2cp.-,%~$v _{WY`rDԷa!_7Γ)!^j/ wNWO7iC`:;jc9vp@0_E+$[ xm8:cɈJ,n TGxN8Py_w"vz?*Zt Q&)8] NRFnWUu n_Nk(v׳ uAKFxIC\Ei dJܚ^̕:< >rPY|FkoOx:C!YrǀEE -s8"U&yxx aOv]sh&qTdbQY\}27E.8 KfNwxȧUuv*w7d䎈 Ti=j?JxރGF߇*AY:.ŷ—^@x}}B0m% \y}sh6NQ͸1,eٰk+:1oD{CJrtG*?)PC,DşJS}i")mGpA!V?@bJ=>bk6G2{J~Ϩ<EDO2\(_sҩ,+2m [M*S`Vi[J쌋?m=Xh8O|nHN.Exشz&R a;O'~BnBש1HiG)=f 8 Sc+9 *{?|a3RNLWCj$A36zjv6f/_k!܉E#> ${) u~b)q|;ld VBk'rmu%7dÀfGVWOYCȎx )t׀QZZ5kPOx]4nw N (wȍj3X@'!C}['rX|yry .8 ;cz?s]%w=[6{ AwM;3 s&1/lCo16"U)(!ET'$Z&)DF\W̞&Mfco6'魺XOРEd^?0 'i' *'ƒ!P*`M|]ֶIͲ+O - Sr % GC9rK!G۩AoPM RXe!A]|[Ņ,v%!zFX8Eɫ@L4o3r#-Bzm|^=![0%<*ז s I$'Mk_"g 5&*oVռ[& 9J-\"_9Q+$JDΔ2VyG`&3d6J>w7YvX=@s]gzvCqik-o3M}] _F3DOD,r}иhXqf#Ǣ6OQ2ܵh$C[xSU&idB D.~˷? { 3\9">-J.?EeW)FVV4,TQiZiڤ7i{=bwkV o\GѼf9ݢKx(Ψ$Pq<( N'G‡VЏ펗&UQRee.1} ]YZH&Zdه?4tcA?EQ%8TE{b.o4$@J3fΥ?Пch /g(EA%͘8VWUjϊ }О;ZeHWM"үA ll}[]Sf?qYeK%9,Ғ|X)BŶNiIqO=va8?W@ޱGQ5H/ԨG-Dnexܫ8fS Px@RQx4#)X3sB چ~X"%$?h֯v hZSXƐK8'ж8@p>"$ uZ4p3Xa]_):W_U3ۺC5+[lϓI v'߽pA,%0UG{m 3&7)֥߳ap(r5S)N)1Wݞ4̧a4WVjLk @w/oBĖObRtpO_r' 9~zH/_ZxDh_Cw\vF\jd;06+6nOɭoN󔄭M څG kT 8fʆ4pϡb}8RoTE1J%Ei*NRc.9(pyŎǣQ[j ϣ.- 6 c@ǥIsгFpP[v1p4h~$bV f&&.\A*8Tc2Z?Bn4fth4(U?`c1['-`ImR_L\}?W]I0>r"cfrii /֋-^ _hZ? `}V7-`lz]A`v5 PoN׷"IhSWh!k}OVc5*ЎhwvfտNSq*~ 3d _$Cx ,ZwJ ]?suLuG3ɶV#Irh^s5}pM eI} #!sjM&-࿪s" p@JQ4O񈮝-u>{ޙ^ilnETM|['7M9S.==-5nVݭ !'`2h"]X'%| 6bH_(4sf/Mfw,Ʋ\j ,nyDN:H b!'/_؏8u !!}g#˘^;R ,Π9 5,&5X-1˚e-O(Ea#DD<.(Ҟ܅# 0{"D@yܐj=D:-rQ?zeɂAFQi)}XrMp`zRtjX ĖEfQɮ&g` BT1$}B4˱]<:1*)[$\JXl4ӏmkr7Mt48Z|)Y\rL"BFD"~@UOX .'4cnyfgV/WMVw4Qwc#.Sh{CAE(FdE$n?>s(>,bMP+ǘ5> %W#?>lD˻KK'C0`*ۂ[͓hόaiLc@&#}Tk٦[NLmt'Y{x={q(D)lĈ!Z3$f̒GW~h4EdjO(љ7A pHjͧo.n  9p+&#a؟s6>EbV8pFbyOKō[m?%FK^ńbX>Z;3tJ_.S l%U7$ ' SĬ~ PΒՕ!P},0;,Fχ}^/h+0Eg`}05`I@ ēo_-Ziu4=&L[/o Ĝܢ(-r:}}yog#wr*`[f ɢMOi>7DZCtd@v<.ޑN{I,>[USΌ4pR",Ⱥԉ%  xOr[uZ, mPZ&ko!1(VzJZ_ )1ZAhA u2מFG;5 Htj/"xo/$nzev} ~ۛ\QM-s[ JpԓJ(EXDZ"|[͠Ԯ8z :&%0啁I06 by/5pz>k5<4Y5Oe(񋲷!RekLz, 0-|yw>*u۝<a6"lyܾZWz8<)T1^bq XmWf_L]Àr6oBP>9>X)Vl=XOfn8jӡp#fu哨&Q@-p}䉮U<V ;;V'`-뷼#q`~ 2nZ9*={fL l\=RNqzEl62=hKh.DT WWD0rE-%jwa>OU$tNqÄŊL,15 \(<8=>+m[.eҞȇ;N_;X<7D^ME 6~[gy@Ⰹ8 zB7#h'w -;2 F_US&TTlA]> %|A4$x<58"/>nzV F [Kvhg2m%M{SQUVŸ1f¸KC554~5y$TU h<4h?)n<#j#> .dG4msJʢv>3+;2 2ok em$ hn XfB[GĺCU(F22N TF26J4VlQh!u={ /Љ30zbѩܒmcFoivTq w1g= ) ˟4 p 2hWXM6n9-}?W0!oFﲂʽxִ\XSn1{g UYMKd!-f'GNs-V ؟$IJ,ně%}̩ͤR5#i3!J95ѼBKk["\%r's^\i-4Z1OWd+B4URbY^ j"ԉtMeX[!+ \+Hg oLKrGYy xRk|Cz8x*?^*b8IqM/&S`†Kkqʼnk}Cj>v!&t9''~Lzʾ[,Ԍ_bYu #m$c5x۟?6Os0ү{i w^eT{6,J {->(ʉTҏz 걩'x_2-(eQq40uO>f0h$nPp)"Fp!Y4Ll!_, Š)(ыxZ'<33K6nZ@Ɩ{R)!| ܗlj.V~}TVP Wk`}vZ%}j:Dbk Xj (¡@K +vgւ5:Nhw 0Tlm9tna6'@qA!$0^")>cģ + %'H7 KV bqs'LHY Bm 0Ԙ&D=[vƜۀ!{l:9oK Eu$K$7r[UF)|#Xupc^Cx"AY*mI?W][lK5CLVpPRނ3׸_'The.1q3tHuwMDr; -Efk `7P@oZk.v{! DؾuHVmkoO4|EklfQmA>Ca)K$='&O_-cނyswfJ۸ܷWQE!ilJ3q;,K1 a`8U(HWA6ԅ=Px?"ߣwxߒ{#cɣ2tK['xHA H;O~qlԆ5IRɏ@0}w6&m-ٶ4R=̑m E% /&9:G`~dqMBւ6k]yaƻHa{d7'!f0VP>G&!\V\4 K  rQmw%`SU%lc2 (&h{ƃ+Йh;#c^lZE*ٛRRZVPD@ie,_KjKs\GZmAbFlpZjμ6i(pF>SuT~:MKW \ԓ ]ߋ5$ G)$ؑ-լD,tȿR'4\EOƈ|R`Ð|14M|LswԫePeὓG97IXMa$=1XP谑-t"ATʍ\nGxf眉x1FX8v U(FcI\ @h9;PA9^[Ԣ2ZߧK!\cYN/%ƕW$"ng.F߅z($n0+?LJ!6לzHaJ>du'/2;K#q+}SЬ%BSZ VЩY97olE Т4Q$bY6xj0:P){!hB+9O2CO2,,Y^5o.DyUmy3P~;8XŒAj|elS~˖n"NՅKDq܅(ti4dtԲT6Nր‹>9Y0FK y:Lհish[a -Sqy)0JnDV#<m\=n& +I#No*=JSA;D.3O^#:i=0lz\C /b"Mqs: $ňξ YvF?4:Efmbqސ@MP".g*6Ngt7'}=6gJ~i2 þMTlBj6F}$QKJ3 XSBrdbDp[m2\SۑWЈ"߱>8D86x!/xh*aNv5o0ilpH Hi&DT)g>q(=PpUh>%uP,>8!| j42)X"Q1umaMD ":A:jh6f"ENsEt:~FD O1_`ѽm2^bQYU鑪PZKVnUKN"S,e6jw#s2DRQաl]/ phc{K~JV:`4b" @t_!ޠy5cƤG|i^=D*_2&𬧖S LV9Tkc75qq Tp:Ŗoocr?뎯ie[vRΟn b<~+ I-RH3*(K ;J/QgqzvLYك߽:?Y/vT|d/qv5 ^.T˼1iG6 bDMҵUJȒ́Iق^O3u?oTcE[^d@+ LiIP˄҄)lgSܠg/IBqՂÏLlbvx>,ъїԣ^udSޢ0=,z_^J`A6;謈CVЉaR 6Dz[~1|Ggk%G? 6vP 3ؤz%sY<ޣ[h31A@Sv?hd'`0C ;b.Vd_s{=FX"X ~ V=Y)ll,#x`GڨwbV-(sJd]z!1,!|-YrKJ#~I00J)~r#_W|Wtg7FAkʰɀ"8-Z P+.- i)'ɕn). K JʏJzd\[DpTw FnY*Xb`}%Ҭb;ĆD[uT,"ُ[{3kR&[P9;2+;킣jy|5NVz2"jDUT9It@PF^eD\%S{x9tq8ρ!ɫ\$(m`x|"isb3A'_ҌoP $U`+U6]:-Hoi|(-Kd M:BzյM5!uxxRd h@ wQU':]2`vT "2@ E|ͧuԤnCSHz`<8}Y$+~%z(}e`Q5YWKLR]6Oa"7RXxLf*ŋ[ 5qyYrmj"lՓC(uXa%IVYAa9.Ώ ? kN#mkat7Ef]ar1%xH`W 3$Yf >zyMeQљZ8aֽC0PWakg-v˫[Fl4e6ۨvXOMlrBwF }d+]&đ ld9#RV1D'Yzr&PoF_@RXtns%D @,:k/< jh׊C$gW=5yi2HL jKl'43r$Gfk.pV}W䑰Aϖƅ*Yv(x0R+Ul'jY< m\[C “XN5Hr*UG9;b;)jP*i~1k7:ӉDk/k,Mʉ[ѝBMR c_ 2>m$9~!+*& ܈ LHak _~'0"k>$PO !lwQ8+DSV/~Loٌsoe 0DA RnQ;_kO=9#` $1Ovlȥ6 #bB4*uUսaΡ D:<pGpxgCa*@-^{:mKDDmQ}rH{F^dѸFgn3[Y}H{xmFleϿG)|_MUZ/'?ˏTvx](2*ǾXͳYԻrsr{E[C\Ԛ˱%zu>0$0OBֈIvNC@Ƞ[b|btŷ#HMtP٨G0'YY|^Oځ` >XIVbSI( N8X@ oT~?5<{\|mE*7E9w | %,;[͇*UHDZ >6.O֨L栻&Akϩz^`|L|y:7>q-y֋}ƣbUc$ỦJ!dAK[5'7޸Mrʚxmkl yfo%5FtŸ6D'ʨvJ cp+đL>Q_^nA,z01lz&Q#NB] ?=/,ydؕ枷ɧ"vXUD(:Y3x ܸn6L`K=|sIp4bƗ1(h2DPL&i!?Zo{rvV 0ӷs5\QCp .u2:q0޶ α=xɸS;@<~}]gd5{f}5yOm u=d5C@:@C: }d8ZL c%L9r0Z>z*cg&FUɩ/9!"7:r (r&(g ta~z*c[I)e+&o͂^! 'D7'{*2 sMw?pZ[>gv!rf&=-j T6Rbx,o>WW!'\Ipќ g*EUk;o3=scaiwOقIp;m6~8s9/yTmo3C?VNMPݒYɅ?` gFY%%6 \8kSK3{!BxQ7۟ + AɱGz\ 87@I8]T@Wd"E4$?=xpصlόEm)[;lDv- ={0v&s2CK`_8he0KK^$tJLCO t`5z 7%{@gUʉ`X9$G_\TRI+k+eoPDK[֯\TڋfD?#%˵;#nzZ|rn}-ekQs`gsNVfeDy,+r%y7{u\Fm#HmV@\vέ:l-NM1YI{Ic[_: ̢#ulc;s% n t5jV+4Kn[5(=oQ1:ZސX;xsמxtL{.>o^P B Cڅj2~Zji$G J* o%-Erf-K"WpL@ؠ*te!%tWgPx*ί[; [r HqﻼwS b dҒ? #C7b3god3QS!*|;F?&4bUˎ/'xveb|ݝw˚ Txn!wzA2M4 5W> P n-*n?n (ɽ><9̻8Pf,}9Wm-كwA}xΨhK Ϗ]\p:7{Lr#"`}ݐK-_2FE |vNyg}8;[DJ Eţ (ԁf* KD0& 8:!^xPQoܗt6 au[WH#aF:%,G?.J孜T|y9.!5;7 K5ƍ{$۩j&d)W-UY^Ъlg !;S`-,Ih5Lhum&_8ȕq^AۦH 8E,5e|9N %qyHrse?+7F!~|mqhF{'p*q0mκ%zyzG\dabBQUa7Hũ>J#dJrk=E$is,`/txN`j~橻Ts2|P'́s K1t'z ̖@ٶՒ}`kh"Ryk$8n&EoוtMj/Z@AhdoVne hjcpSTmB喱D =@ ]NMkXtCĤ/Fܙe7 HK1yieB1֯0^F P)x[鍟5L@ ~&{GS!K;m Z1a[atP/E/M \:v65ӻ<_e%۪ɫ,vM}M ]oSѹ e^yffN(I vfSrW$Pr<77km)J2^a[c+kyn#k͊AIs꩸gNIl]mCpU eshtҶ(HeMhnZ^o259}j_PK=}{1o)vxÚtvL`Y9bz1M^]Xi[2]L,Vu0XS][2%B6 79rцD#^2OPOպɦqAj`F*R|bm@mQ)nfK':iW"&0#ӴMJ ,go1M64g> G.$(Z;yc*Gˠ7.01醈&ţWqM9{m}J(Pm o&4YIOÀЌtR;)%XK7Oc#s"f۞7 P74O ^ <ٯ\5\H[,n6nq16đ$'+6EH{e?yМ`m$$JۤV,\Wza1jN \h5b2fz XGP>Mzohѿ }ݪ6ټ!SE9Gx3g~Cp_a”#EzPj0#ô'1Ss705o0JyIcVL0w<&*$B#.`P\`RMaԫK DZAٺk8;x}EG[%ۑZK2-d8vtNg !* ځ,/ZJq#Mt'i|04MA.Ki#_Z~TLM328+Hk,>ǢMbQCCpb!V!) K=BPzgϘ4^MlYEĞ&kQ#3kQŊ=kQ9xڣx o-) :)7w}AW~-V4Q2-U:E)8py3Xh׫gz/^5w6CCE޺{]%9*ä8)&>:q"X7EZ<2a,Kw.Y8mgpX]sGV%)Uìdǿ)3avɩC L=zU2j4r|UAta7j-s eNMu D;.x¥0av>^R"4fOv}Zmς)ftG!OsPNDnh*IlgzGPxݯ5{ 8X|9c։879=QsYQV\X鿄װ=,'blcaB4|)mU"},ړcG쟶u@~Gy4D8et.^JR5+Dm\پ6g9a$ FRrX 5} 6ޓL01ol'Gi~+6:  }Ϋ~4@ԪԀiͮv46Uy!*5] Ͽ;z됹yqO0c%X+p `8r:f\믞,׿3hIbe2Rqa7\@MYnj׫ǿzCPl(bPf+-*V+ <HFɠwXM9u]ڑtPƂ~wa)PJO8@ mdAo^h+ BT8L]lA]oY籕{ĨsB+; D+ׇi5ue^pbz4F9Dl=A*Ra դ$SB( sv^:em6g v+ǔR6!b>8@%OQjribph6Kwfω "/'N;Y|@,CW<@H#"7"j'?:PaEŠArng1 /;ɽc/\˜`U |M0NgG${TѮjO+FWm89&Jl:m[Eړ9*;?4W0rdn˶ؠI44Cmjs=&{ELxmx^Dn֙Mo %$/HSz 7M54)ٖJ ,\Lf1!bpV580m ""|0>mxqƁ^Pw1/,nTPYkS鮚qԦOYWNưԏ er }a\/VXktØ^-?/]t%ԾTX>fx=3 `Ro[tD4viA 5y 8;3#mwu_;hA' Bzegbm:*4Nwέگ&RcظIU35]ȏw%%0?2{JD|ҍlM}^+0F3(e&qݘ16c0]2BJ|0Ҧ7/yap&v<hLkNj%i jBK*~^.߳Q0₢gv=-+qMz_NŹA_KW\ vEW@9 1{"NY%(s :7B"#5vqTn{ iʙ pTenJl9,4qlBd(V/ B&Ee}u?[̘k G2[g(7>\P cLtPGmҬO7{egVQ B`K>ࢪ g(-*C6es̀ept^|YDŽ](b(uo@)~Dv㒅SC\/d *_'L_y4~w 2bhF9| D {KMze۬H(mH> HB$%icu>.HGzom 4&7å2!-E$4Kx3_%\u(@=0rT]fŒpؽhD?6YD [ڃb[ZnԶ :["JHVچF`[}4. )ӥXaՖ遗_ĵ_n, rڙt^?I1}MY!\ZI}s[t Z9!iCtZak :x*uAIjIϫҮvp5MQYk<,;PLSb/U^ߨIth|wt_ V.I,׌WЍ~gݬutkT9a"[W+S1ugfʣυYWg:&Kc[Yx)Yha$^~ωOBU,C>gweXAQ9{(=p~h t=l's i%;u9 ,&lH܊F DaPiG%N_눥62P_*btxA:{)Re̬(dT A'aR[޻,peٌy5bY\ IIWQV]} ?;K7!v 9SrU)꽖jeee[̡XۖMmK~NAAO4~JJ nr;AdYE8YNET8cf 3 S2fe"V+5̱YcܯamHSUݓd}WE,4bOs)!q'YI|K`,8)`4Y'w~-OJ_dVEJ*5 J0{ 2@Ռ<= $|= -j;\OZ)e KEκ`FUQ벀[_/q`㎣Θ3(Oٚ)F1'Kbr3_TUW_ Pkz{gnш'!TSAd4 LczJ/qIC4jʁ !0?D_Γ@!`eʮ${( \E:Pv~'X~/g(mfF.Z[pognx*Paj  G8HNĊ215A0<TI!aE{ ւhLz(+#i4 ctBo~mY5b1.7@WLjitZ<쏬3t[s>7β~7=glk ?Uqu8_=9/{;^?8:k[D 6+GL{$.l6?}ݙve{I`s`:48"n*az6OU\*EU#/_Unٟb|f!%v0o̻*ϴ/pշuS7=Bz>q+ ɘМP'U)J.*9݂_E.bJR+y-4hSP0,܄h@QM @ߨևLԈID9gW>=+ɽItv!LȿQP+C%{̗7چÞg[Ay,n+^jʸ4(C@]B|7'c|^) SiPԋހ=r"hm* 3" 0UMrV~3fh9h ~m XGh{4] >BDk Lw)6ȐkS'r~g"LQ'/"ew|TNK9̮3UT'[/ٹtlKś \. d|ˣ+^|q%RSQ 5ɴ:&Y,'3:֨^l6&EnO"회ՁJ@W&!ģPI MA[ƚc3N$k\xRnr`x͙}ÿ̹-3mWVxM=$'EB* ̧6}nbF?R*}w'n5Le;gM?pf d6A6c)a^OYNy݂*ܘ@ڎ+W*hNwVֶ8N<kXIe/^0eHAk$+xO0im.UJ+j վ|`nLWTP uPϕƃfݢ}5^Oў2@4Kze7$ \ֆQzx+༔&j飝]@csidܮ #3qb ~>>RaW+-kR`GߋYK~ͺ%w15h*a5 |Fs1qaEMP, ƭqZ6~8s>`Fݎ=/N#]FeyU vH(0(-q[hD*۝Pk(z-pCBO9`^rO>4YbΐIg#*VSXK==x^X+''m@'7ꔴaIK>GLs&Eζ֤d$sEavExJ@o|qtPIG߀ EgS*+c(׹X_V+9M(<O(a<0P17čx#Jn,ΔB3PKbBf,A7/;dJp꘶\\z:Y ZAJ>'*"ail ];ͨi"[lBF6T!cMT4骚]5+[)Z"k!:%ԎF"F$(EiA_&M;5Hz}zWD{M{ Rׅӈ\8Y\u 㠰nA [%M4b|`=6ʹ@ID~ӝq"cЛD5jX'pI^gPD'1jXz'\Өj7Fcn o flU'!xј+k #B*H'KBsR㠓"[=t]1ĭ`H( 62 ~A['‰bJ6ytZ]3|CXDt@@14a] I:2Md0dTv[[uC)Z"Stwə %;=k@ ѹd*Iנx3C}/SR5`7+a ( Cw峮9H蝫"uܖQ (ctPGKOljIYFfn4r"i~u?E],+}+Z6ϥ5~[{0C}ͼ $ΏvKI!'5H,m`B}l%zN|:' 1n9SQ1ѨH`s*ba5 lSs/Vr;~;ykƢ5T'.Ւ]p7dk2ÌbB#y+#i9h kq@g'X;S۫`_LQ͋;] ^Ycx1v%!U~0bn~!/U6,񣘥B¶!;ܢ6kN5?|Y&ukX`ԘȶFn%$p 0,5A -:la7qq:t3xkv0KOdL*n  qH6 2ݑV[ek9NFďˋn=C y?9Gv,/>s`_lԍSO6nJBbAAH䛑ado v U|mp)) XqؘkO1ZI~A|[|pZ (ܩ* *^f c%8,ras$b y?$s%`UׂjYouzmkJ>vɱ(Q8.}$5/bճ 4?#/v##d2,-ite|h:!"ړ˗ٛ2}aX+2,{Zn@"[5+kҝPEɁ6yV,չb'ó6Og(ΎX7n$[3U> o~92{4aC*;E<>BxIZQ0znai%QZI ;Nw+7 uh@J? Ai^eK*7/ RScm$-BC0rX*&bTtAi\)F$Ws}dTDlwCȾ|#g3a9s+&BvO‚ڐ*!,}ՐѐF wLZ1n;yoZf`?un^"xbj[R\z#v Rއ) 2A;Txȼ%2`+ )]0W-yN wČլ1CSO*1sL2hoxǂJ? !#B8OCWV? G,VA250;zt"=E[gsO8~3?swgTwh5Yk*CCg蔞}5z=JbwSFSuZ은oIИ l<q^&GMZsXy:_O#e%j%(c,`)` a\ՒSk2';\D9 J _(높B>&R wmY,/cRK5s4/c&CO"Y, ^̍~=LUȊۤPf  ,"<ɉ@7,ӱ1MɐqtUǶF`2 N`nQh&HrE'hfi LYܴ0IH)kU)VUq\W%#l25seXwexgV 38jhPAjLZ*{*TF/csߟM8>m5HlM!Tj1YL+?Orhg:M28@-SwTeif֔-fg+A8lQ_̈Gqބz(1uYsBj lҳhMobߊ+WӆvXAם ="O6ثċww ^i)N3EB3Иa^)pF*J1%wi~#'8* O9fE}^kMfEamVyQD`9 N[Ar@}hGcK_%iKI<@O^GAgԾՖTq=nn]\WWu[ti'2?PRk$wr]!E]D[.d"] =)n,擉Ek0Cul M_rsW-<ϳ~ d-i#I;VoYrZ:okGa-Qqu_@y}yNPc;ɷ0뗜~Aa˔t&ZjI K4t`8/~^/=@Jd4~J,U|^בߌVg+3֪8]+/XJS@˷{fXnWۮorA{X|Es1{< ?/zBMgHb"EU)À9AkWTxC"m]i.*U߷zk; nC8]܊Ez/+ *#}0F>^*Mf^G  ?WcXRzrDahőPTm_ZѷCk4YSYO1*`Tw{!t,8ЭjUT3o#04"|byJQīo%#_"!" -?hd&M㔍=yLFBkg^Vg4B[֯-H@bC/PaNglVU-f +Uɒ s"/d4n7&@~ȃzZ9U @[~9owAC0s$iSMdД\iYf~׎6vwNr}0Zk-)N(AYr)MP'PCH_Cũ~-s_|~isQxHENpJcJ@+嚆axmO2wd~]D~目F'# .GbSH5sX͐Nm!Tٹڞy-mH$m1N$ uq; ʫ"D.l54Nɯo;LWp.p!'m̒.4?3k6~jqI4r]I8|{baS #LWU+S j ׌TA_Ēo`ё*UYE mUZPoל*<(<(0(oWvi Q\'on`pxXqel~zoK%A  ۀd*6Bѯe& LESҘ)mΞ`cEzk#k?q^K,ᱟ*<ɍo3-l<*N;1nM$vwpIԹAlnV:e!Ē֎ސ`Tl+TOz_۶Od +OJ1bDIHGA1CYs*#\a|Tym6s@id"_gMZhIE~4A|oܩ/i y8caRTwd5+"vQwŬlS VqnII;i3[YLlh©z7[\&`+S[QrT¾8a]cW ~bc/:f @ո:Ua  ǥQj `p+n@ T }U}tDvW797]L-~kv8Rdq˞vUM!kw V,xOI'SiX4=$MQ>"]5NANu6.2G1@LG_rd3h+\h cڍO7-#v y B(d ˕{jca Fҫg2_nSa2Q w߇C׍ Xj$[# Nf5(\YA3( хFq N#Ϟi \ey:?,SHVo҈S4gKHj}Eq6́ݑ?7-tg릒=iajHdzl/xL79 4(pu)_)wO_R)qGV?O#e_<>Ղ>PasVk6+ݸD yjI31jwb8/8Ģt+&ޒ43px]Z*ʯd8KC7K`˱t~룐Rq`1Njd'e+&[ 1(dLl_l܂@ZpO!Oܮ±u%.fS)c ޭ` 񩂧j嘖Tܿ-"^H*R! "d}?kIvprN{?:' RA!k+.l밡1~J%ݷYRm莳Qyb^ޛRGC.T˩T>yP%gz_("kboFMQFun9|[:,Ԍy6dO21:R(d28+&qFi-w[o6w9eRa$M|"mrZ+G+R 抩3ѾJr,ڸ ޓ_ni1TB79 [.Qx@jGUUK)D# ݍn!3%̬P9`p nWsjt@zz}b~ (?.5p@JX2 e퟿E bKEjo$~Txac%U3DF' [N9w ]{mcm̔xS 0@Gvg?T>JWY $x/d%6$lhM$eh]?<OUV|),dZ {N&/Ҳ> LIv[`=閚EjNW|&bvW^! #iw_H~)lc?š4_b[2VX D߱.q;rw}%aTC ԑѬa.lvCu4u $U`eu!LWЮ/ť|$h*?N쟢 ]{O[mPbsU1^\!}"{"rzQ ʌYs.yYߝqHr,z()RTֆ#_Du̬ŷ5 7i]Gұ<.?f.Ik.gv?;d+//j0̴4N9B 7xbɶ-$L!{U;%=Wo2#[!qԗQ۴09qtR ьn,H%Ê0K1#wgY5;릤$?%Q 9b,bBɪz8=)!lz@@4&ߨNxp*X"ml4H5d$PTO@"׊Y1z[fxvA/d 3(#fMu} +ٌx+9|!XzS &[<#n,N_`ix=mIYD98Lw/yB>K.&[~j'Dmw ACJ} $&bfH] Y3ȁ(O|ĻT1 :l5{dM s~,5~=B!gAwOC711Z_l,|-?4g=3 tȢ:A}Z9;W9!q r'/2L+ Tb4SqO}=-.XbOSB):1e Tc'.{>j%汃IDnಖ<KBnHGjom\x}?w R%Yc{nS?ĂU)o,n~b.z fSa'B[f 骜_(a#MpՉ(If*?nd!_L"hK}Wh*ԭjU>{U疙mȟ`vb~v=8$Mb"(m5/ P"l`9$9V8#iKc'm*_Z:Ai_HA7u[blGc4c'}9uHƥ~ͪCJ± н5eP!jإwց%s{34BtbˑRPЪ M;FkUl'>R_j]x|Bo.Ds4Gz.=>]SW_^t!Q oIKf>rpn;z%%m5vngX@ mvr_ vi ? f|V4ܦyY[p ,C$b9MOӏX +/Q +p̿?yav?U#8 |\"kz-KK ,zG9eORo0Dz%_H3eV5`y*bRAu$)N΢`hiwWU"G6{T/ВJ~Y4uщ37FO|]-1RP5>hu-PfBx-Y$Wݢ[6Dl$d0 *eFiP`88 jt̋~ϱSf5˝+϶jQB8B.*lUys O)5 `;WS=jJ?TtM2TšIs?%apײĐ |R v6J/'G Ceu!Q(1GBg`jtYs`N/;!nzz$uNZ!yǮ*WC!4Хx~+M<6O!91ݞ1$CƐ)C'ʸBSuQ#YeN ]g۰CC+Fm ' T-r^06cCӥ9?aJI3xN% Y=YY'B}#C׈6\D]܋|}\2(2 xmʕEf4SlL"߬Ηg'"i(Ϥ69?K0)p-='!5v 86:Kh[YQ |`F 1q_,[Xjܒ7NIh"kGIe/{ :2xs 0֚1~eyprwanJj@qde_bC<_t+ 1/0lj@=6-h6,s1gjW.<9d :c P ** }- | g<_B[k)"T=ĐGz9QHն-}(f&#їn4Fv{ؽl͞τ1 s 5i>Bز60Q7C0\Bؕyzgn}r~sJJmU}=[U1r83FB Ձnn>U1U];̈ 8Na7%iq56gLp ubA؃"A\w m2D޸2UvJKm#okDO9}|'x*kucG΍J[—ܥ^Gt(ۆp^`^}+!p&ݿDTpٲ;@.M{Os|,V]}t,α/XKv(%\(A!x+0LL+S=zzlVl(iFQ9-)q~P?vBiEJMci&3  l$ۜBpyJ7O**:A8 R<| >HOQ>_]S^a! leas\؇8YV`i5BGThFǻ:Ԥ\0?+8å /酜2, ôDBUn!6/]BZP8 6~!?F箧mH,ݚa3Vƭpq9wV'hJfH\6D+4`<ފ E]R3jG͕M̠ު dZW{,NFJ@u^goSD$΁ӓn7}S C"ǥ YTq̤ɵ,mnN8&^w,gorbzsYu+Gѩ@{QT}~aF0jsMsxhg=X4+1q-Is }]$\/ Az>% fNnfEg8]G <;Y%z%\u:/n]@{z,?B6®/Җ4U _g[Oly&:LxpD1bdU/\[|\B]djQdldl`"w̅ӆSzHHy! 1MU{7i%^ PJ*F}6S02(4G:J9Ԍ4-mf~<}wmV6fۀlV'|~5n)9FCCNCI ?H!37``Y>mvf<8:Wb2Jxa, Kp6#DQUaM3#! j:,w) c c^U5﹭|}ாdmtNNtHqbD(-K?7^$tYǘ_GS%DӔvb];V]X@"+JȒ0tvq^h5cs[xE@'o ;nbfPQAȐF؍UEOګsΥE7z ;Ǿi"!"im],V!.Hn D=` 8Rl)xx8uÖ%e!_EĔ&2\MD鐰ZgmlN7 _`Dcɇ!:޷ԼS^ܰٓ;!A./3%pB^M|sẶ~ohfWݨ@d,^@QRp#Иy*Ϟ=*- a);X\/lrɿk5 џ 0 \@ TKS^0?.j1\j k&EA\&&;KO8R+=qfPGhݍڃ$$iDhW׀Aպg  1qr-,Wh$0W0f<}.MNI%Z#%N7q:3!6<(r"BLQdfc}ͽw%3i N^,,AH~44g5/ؾU?1aZp@^- kU1m/p0AZiZN1w%?~,5Lqu\>-Ig0iPa)2_ӈ"@d{3+L@XIXnKQY.yB&{QPMM ƈ vesx Y Y?'be[6\-Nu3?I) Qُl O}(҃QƳ* k: i ([s@=.2O(y F -ayLDKŠ -Th/]}&LBH|w5<&33}t&4'͒-/`  O'{$S%g5 "zaOTl6eOF =Hp!j>gTmќfo9`ީNO;~_zlQmG߃\<9emGbsH[ZmH.) X=t+ФIU^gG0B`CQl\GaP{lDPRKSF?AH" Bq|.^Sf\&bij TLylsμLxкc` ЀkݸL\. {ӼbDޟhNSZ^S851W4PsӛIi-ΙyX׀˲'S5׳YX߼lvB|i0)Q.7w)+rJTbiΟ  ]?X_`},BTNk |}TEh % GonÓv \#F3gh{^-.#0)X` d4e0,dݰ"PƹZJ/>?m1+F x _R7fpb&300\54'ANoG[d،-8oCF!F(b:'|rI)?ˈPg[DaX(RtN_AzARAV3*Be鬩!l xXYO&ɪZ{L6;iT1sK\MC/5'r>\4221!)K7J \B1$wRw.DBhA vwm3׉s6M;>z w=u&>@L GJ/Uq[Gp| P_m]y;- x?byzs jwҡX<]ˍJ W?Dj09S `QsTuGJ QWNGv&@N^*XSNc>AuO5Z-#{fa6fz%^Q|(dUpjW30OױbXQc`,>FaqUJ߷FbxzCc 2Иwb.`$VJ *šv3(SȽG-[tATb{='XzTlxÓCL7-HShp7rSb,0}B%({y5Dg\z+׽ -~hw/g`Y$954eRu`t]}c3dC ^Svڣazv* hajCUrS4u;ϵZ-@: 70x;L V?-[i+@Vp;-[4"Wey$mnY碷p,3(j&73n /ŗ$JByyY`apfK+=`ܥ8GPd5`kߜRQ@Al26;+]{09a^ wCCIb\p)|~Cw;iX ,nHqldi_iҖSwY_2Ok 3RW{Ӣܚ 5U ·?96&0%zqbzIWl@s&}0LF{jgaCd \q|3VYw`c:/ֈVWT53~ʩ%<; 6ejYtF+2I\ ].8kQ>C{e[czqh;W-(r иa qzd(B9!EOÚ ]48sF w1/Vˍ@ʭ.:xhs23q" Z>łKե{O$SՅE{5buKzSWiR*VJ̣AtOZx6$&[CH.QX;o74 T zn l R{؉~).WZi^픝ŸyJSU~"4n5_>j.Q"J)Ճly#o<%  s,Joz*!͖]S3K<]B*KZ=+}DkqEhkh[@.!vq`mp.D;3YDspy۞h@vBԑP6498b=vɛ§ge;Fp&> wٱT!#B B$)M=- mZ9Qr&9L \'FkWY,5r[VY:Eƶ/3q{s&zkkN| Xp>rlVkc˾fҟ*5g40EH)yu#TOG%- xG.l)%XK]sY7k3ꪱpoL厶x|\2Ԣl_x UXҷrCuH x|'<.jK&:ׁG[3> fg1`~61aoDpV#&(W7o3a0$t|;כ~D $2"clBPY)f'~MN+%%Sݹ5d^QVj=4r-g̡|;<}!gc? 4:BSL]Ѱکf܁=0G7릴ik?ب|Rm ,˒(8WHgwDm-{I"Qeh3r/)oHWFRZ{o^e:ʝu6wˬNN 1:Q{uq0c Y 0ꗈEEFz7Ds(+Ruэ̹CC*ЙuXd2yZ7HG,Ltg4)n.:~v/q5ߺD)>{a8Ke?sp#@i?qRC#EэDV=`{ QB{J)| .'I)UNEF6A p>fG m&@$&Dh.]݋zt5KXbs 6R6aDXF[يH._,.{K#BfWeƸN`50JN`0s>sQUф*a Z} hׁX-/8 tîB50NQM: :|?}m)u±0$M]*4/C ]!VrGs.f *|.4 'c֩-ꪮ(3^kcHz!4§xH+|'Tx"Tf~bIk7uM~eLezbc .-dOGdEHh'#V\?d+(@|!mw0Jh_3hwUW/PʿUy}!f-a7MQ]T5 lM-=$$59{ac`?#Q]4M%DžԍBHZwxʆۄVw5 z"xY'U^#*U:WJ8x?ps;MB\ U'hU |*Akq wzP&{KjO܁t! zFgDA'?2o:{VBq.? M=O'IǁlNGPxnh,D OpY]f$XIWZjV4W'Ec]كO͑ C YM"~σejb/u%0VJf  Yh6/|_/|v$Z7@Ml 8 o.ڬ'RCW7$#.+ yG_}@ Rl;bVaFUـDן>L __~W.{u_>s8mh-dQNS)L;X3f|l|O9:٦wp<^閮,BeS]рO$#NUjX3tp_Klv=s5f3$ %gƜ>Ռrѓbgyѽ tiERՃii`?Ћ2C"i:?zR`5$lNx5'znil{t-*;{EW1_xqxԅGhąO[4 P~u grI Bu412&Ś92EG`%NQC Mk| yTZIDǴ]Fn"[3m<.(ͨ*NvX2uϰ!XFZt86_Lᬈb0~,kd>'tC_~_Ftp-vhh Ӷ2 C1GgʆIЁg]Yϋnw`v{){Mtur\a8ƏDVǿ޵m8h Vkb?Iӿwv57x{Se4qb- >2\jQ6kZEȄlΔ*GYiV7(+RdLmȟڬD 0+g$Z Xh5ޔ/p&9֯t$=L"]=m:*|͈}#.56glϐWbR]j2~ΣqO.lH4Yvh/\92N5N3Fݥ\/nqx<,'FtY` l3 u+(U²ϛDv%`=umv"nQ?ϔ>DaK;Z+!# _dj[-ͲEwy+88HE{(V?DqNxZT=٨1Δdzq!(&R|ٴl~Vdn)j Q d!+R*6-vFjZFF6^9m&$e`L=i\fBgz',wb{޼#nͶE!WDd"~M/nhr#Si؎-ԚIay#҄lk!C#}~3fӮ[nzOld/ܰ+}R"xZ'F8 KE]"64\i>q'iWMX]?oSu|?l.*'8E|!mVamOHPȻ腅Y$n5j-mk7fE|Hݶ+V[tuIʐrvSE֥%3s?~ 3K25IZi5kYU)>3~Fx\=+I o?ҟ5y7gq K&&h2@?]M ȨD$+}˿WA"\pZVm i㵔 D; y+0B?,`,0  k#s}<{DeUF?! \5{taKHbz[p*U ͼݗ8 B` Th}_'teQ Vtj6>\۔cG^HYVo,,D,`Ma'IN#iƦcP.ZOF#VMrUN)tPx8e:(qxRMT}Fl]3Dż#Gg *tb5%].'3W ^8Y|BDbP"q8|3Tfx~)tȅ9ϓGjh(uMGiD=&=bKgnE\R LYum#!zOm1] BkGณO|gA?57 "Z|+C 0ޅ,ґd#,eU:Dx (JKJV*ʒ'ok]St 9% $HҲM^i~ ArfTH!Tci. 앍ͼ*Y|NgKVm 7]WQ1V ff1w;PHM`15V v7VɪhwdV׏tQnG⥽G#-PO%7%t,A(x !uLxlf e kܰj4da"\MRyڭ@m ! k\a>GI<(<8{L"w/4iVuɠ|YBӦdaOBA;vdQʑڎ)fehhS%XGQZV=wN ۷*(R/se-+WDV@D䎞6T :S1jJ.P۳bRx"3l QQ<9t7=thd=i_d^4eMn 9KEwB8'N0C 毽*gOWK ` =W9&뺒EVn ŘɢS9s/9[c$uyqmZ*w0} &p ])Ӿx~JEw!(A-lpH"ӽblN֏ma$EP,yZP:qe@<ϟ X|<*tjv_^LyD6sfJ˩'8:3nA:c3]{߬gƫd@>'4#{{wyjOCJH.:z,XHq7p=DfȢ(`=,<[_^ID163^u0 xҟ2K|UegVm&"7I4_y}~.`\ 0+: Dp]bv+տt/7/T,֘`(oN!.q&6!˗95{̒R v~q=}*3On®!U R?Z6FNNbߩa,zk>Xo,hrFo5VH煮;\"Tuj3tD9 fG\7DkHzv>/B]^]('ިSA#=*3`߿3/}o(q cyBֆ8s)~8oA'1|lU(Nk-B/NBkddRc@&μ z(H2֚*}Q[e0*򐒲Xn}qCwG7ԅ5ڡ)=Z; b{GtIs -dg^n FL0!b!9 *YOP{Me [ +Ӳb٦Kt}9*I1~i/ؓ#j t|` gx6lLJ7U|w)FOr9AO.8 ،E sU54%[QFEm߶7Wlzec0e6SH=OdŖXtzDk <2 Mpr% pDZbeSA"QA˨{^AV$>3 mm>4ﳊ _OFñeUZ_,6T2[Y%)h$Q_rs[T}+SM#8# w9+ 3OrH?#]UuUho/?Mwb&=SPW$; -o.H cڍa;<xLGiLcYyt/$Mô(+ ǟ  Hq?Ǐ@SD{|_"H 1!moφ#n|fJJp8Vkz>Six$ń^OGWYi sQaTEvLP:{^\HSKY/,] pmO7g>& @ *L(+LJsȪEMLM4P0ih'ŭG/@H8sKӱT ]JKZ2OFT\y;HnY-59tw, ZbD1B`|$GS{}dQ721BEQԢSAؕAl BdE"BS^J6 $SӾ,.Eҙ~hJI>e)TqʼFfu,u jb8h8/tӉW^PEs7n48 #.P!tyAWK/?}iE^5f4P,#AM(hm ETPP,oGU@tD (gXx2.Z1WuDPsOgm,>>~V7.tucOaͭ9Χ?Kx|@M8ȻQ9|g3h 0VʙHRk:wxr*!#t^ʒyU~-BHD$R=Ɂ2ʇ@פo;VD'l4N"oS@;^*UP?v"U4Gf؈ mR V@P|d,'q< =_O#XI w=JUSl?~- 3ouK0NRp'F$߆'vyK1p! [TZWŢ[JJG@qm5|3'Fӑtes Fד d*KAܺwʩ<+GyPD1@CTƌвW 2Z`ekޙpЏʘ)G3b^0/Bs|J 9EH."͝zE&$L@uE;w4dH,Rn8~dt)lS;h=2\ gi:t qR<,:m˪=P B{q&oϧ_€,-ٜxԪvBuV Q%W$Ev%'M~h\쌥*édKM7bJCʙ\JU+!j<_r%2<ظq @8B&p]-J^Yp0&-`fЙL  DPKdw\ec]ĭ=,2Vi]" ;p&cM9^e ^&e6mճ cZyX_a\i6jyB[D m䳵ժ%ш7ƨq_΄8|JOk'qMya{ۉ|Jd:G"r$NgƢ0ʣ V7ATgq^)H>yz]"| VeȠ(4y-oc-B3UD4[CȦ~t*jLyrN&xWx+ֿ;&o{-ae&ޡ{>ܦ $oruz3`5F.7X r]:X>=aL ڪ՟s  ;6$`v|y 2Λμy.UzgTՂ<F~Xl1ܙu΄X:Oozؿ!ѴGhp7EҎYT VcM}%Y'UR(j~ `H@z1P#C@I9|xԵW4s̄9BI8m%׺pE"KνI2TEq O`t5ak @su"u]wJ1I%;gYUhΡ,V zҫa05e1.%Ԏǎ@h9Ȣ8[m \amU[^YH;Sj@j{Z9'dzH̘y#Th"y'2z){P |{]hX'x}BG-bLg;ys,'$s4["F#+e`+M^=ꘂ.8jx7kE`ՈPc)K:TT ׅݼ{[}';bw67/d&i=ջ@)߃oytr6ɮFG >ӛˈm(J |' {"+cqQٞ?&erB-db4%XS['aNA(m>fj)I+9&{_8U&!Bvc!^ U1_JkAާɮz; kj$OrD1.ii--*qmH[yifP)(ζ=tdZi\r/B +loXFTK,ܰSIw ֩>*@&V6 )hBΖ0`<$͘4 /ɋ6LQpYSYT 2b=r伿Z,ZF*vȿ1<,3q_;QudpM{Mo@} 62/?Fp11.? :>RKŠۼ :f,5}*L)Ĺ>W5J^[S-y&-@zٝ8R]9#)rD C*P尰3Dj*y&Z=N75yc2[O P5gjCuӰ7  &wO|i,s Gj*pwDj00?)šEnVuc綽PkZv#P]0SU/zے\ =P;Ɛb,i$-+ş׸f^:Up♫|[6!AGsIm#'(b\8H.jI\Nvo簰x$z 1>.`ɕ]#%I.@`*}d4w ']ATd_uw`5w q(F3؀-?fYbzKZ^A% ^oT+0yoP @͊19HS( [htQ+_:aޏ/05Dls#qլLX9 ht\}[t 9ǭ$#k 1K%)Zp6:Ln5лpS="V<[}Bt35!pğL g2^aHL  |Ѵ)r:@7,ËsIϞ<$šCyN\pD] E! 83yѠ_fܢ4%9Ga ݖ [>I/x 63m() xZLjř;Rct{ AMl^bj&x @EIJ˽~'-͆di}/  :EF@IY 0M_(-}D@J22c zzIx連VQ$n~\mҝk(#ZF7@P*(PwM1_`8LXFa|ȾbGK / 1Ylj) Z)E]Dp_^RRr(޴ƂtOqe]&mv #dm,|kODtj,5*%ONt\DqtoUDF׆ѽL6O^x<ϭ3eB]p8&^!@(U&6O6Hl& T@HeLWS8-̢#t%pHUXX be֎o'd4!\iR5jbܓ f%?d|Zes]iЦȾԦ!0-2&< :Tlu:'Kr[zEэ"(g00 Lԇ*|r|''qX'iO!E'/OۅR8>3e)&>RK^pMB&ç܈WQ„Ml8\[D!=MoU"B7/5?0u;D˲r1B6MQɩ%!!2<4&%{_YR$ӱt ᅖ6M^De%"eevlI &_LJ&ܴu]q4h)VY65UX-PS,=Դj_zoNe OK&hE AI L z:N4 ߮?1bv%R9vbID+QOsFpH%x=Bo#:Sqzg<47̺FEZCZm$2v23u)\08[Ҧ.uNTPqfftchv#8 UqQQ9 ^`I|bb T 5+%Fc~#}' K}Cʞ?!DxW<]ʜ2k4Zo㻴)tgc6%l5qMRy^= {nu.WُJ>|3@f"~So=RTΎM=? ܟiQjj$ O[xv3/^#Dx_q N/`"Hژ"{4^SIP 鲱Sk~GdsADD'i"31V<1M]PG"\-XnӺkE>mʒIKINܲӬ8r&K=[wZ .b445+43db)Cz-LKFj9^ǚ?h06"` umnüV1?Nj? dY_A8ռ5"Nmɫg1M"OЫx<:76e#}<]-V Cߙ^|f8ܧTU:'14PQ  aկj xeΤuˉLRNj2GZ"â~`Q(oU<3n3"뱡·r[FVaq_cl",M< j:Kd1x~FͭD5qiQP޵dX34I_iŚR: i|/jiSm sI'vxD0B@LfܿpF._*C[*UK|r2=/k,A*,*KӺP~=p$.03i;0ʫ/?u3VS n|@5ꜽ`}V3V#U`ߎhyLp(Wk{d4:V?#l4K_ GZǶNw?E!%h ŎX6]Uq2 /9t-X lf)n|ޭ ̐B0N dΊ9Cs iSAEy_:پ+QH5'yQ1{Xi8J;t]<+ޠfsUr`c+~9d:/gN U웾y:o,5vKO/aba uHjʊ8.Q QK|gK/z`G@0ȝCbK\xX[0p4 h:o;l#UqJѝ??8K!DSkFur!g[Yy4hv_㺼0 l)1nH;BUIdcgJ蒹:inm~JJe\CjYAGxvSG: {MkO;x&~Lp 3^&D΋`񃪫z7QWY%N榰lob2ѵ񸣔P qp\\*4 _8D_.}W#[eX&y%_bߦ]ZBfLBQ5xi$)Lh9.4I?B}l/+ A*maxⲛafB5b 3kDIJ6U1ADu\N~mcr!=)܇ IP+O\2]3ǨeҪh*)+%c;H>κa6CnN$SAP8?!:U{86qY`8B mFAIx9jѾ@3mBV!}fה'c|=YӜSiq/ZcbBѕFm&5adTsk$8$(?VZNlfbdlLuF Jm4> tI}¼ 3h 95AcqKeߨCμa(Pa8:m<瑪h@n,*}n|la$1'u|8;?刉}x\H9LǑc B@_Ǔc1 S!XlRԸux=1M]ْe8rQ< bFRr n]OT!< "nDQ̠n'b \*Vz=TX9Ye?"*5-vI@pJ=~;ثC8"](?>}(diov7Գ2W՝5fA|l;0Kf.~k䑏N~$XBeNbX>Ɗ3m>TMXh*ۥo[J"dq`b4$Y<߯mZ]E[ն}FU㼁~!Y4{1W(@7$ٸ0=_L_&PR4@َ km+0 R/Ya-ՄMCwmNG<6G%Fb"#pg !=5s"6)R}Zq߼4VLS1e"O+_&5[٠@wdžg.d 'G7t: !cs[ 6o sM6 _0ңM.ԅÁW~7{:>4D]Ph #21Su7g9bwnSOVB<&x5ф5<8dx5o)Tik~S"f31WO殕-^m|&6s? ~S1_kU&ڔ`6c71FH(stSSY~=mxAFhjE*Wh߉mX̛y!T<9-]A̗x Z8cZ9Lz-wǣ,L(%}UiOU{e? KkwڝݰZ-$è4~DL*DCWSm5 v(Ch n`oxJm4 1#pAO:FDtp%Xl$-BU 8N"]`,!-;+ඥb0fHta+'(aY򶵝V jSdOkXAJ7| Me{FU{_sT.W)0'4K- &M~w1-'b{yik`PpA+˺-sU{+g sRK$d6wiڸ˕w+CT#i4p4 C0J8vMA\~rmSw"ʡ ;u_6Y Pjrbc`:ҵ'QuOjcjñ? !4!gieWn|r`od:'jgLP0QhqP^ڧ VtIʩ/G*e>dO"?֯32O却 h)lB@EWrƺV(.fe£Ñdoh,\E̢,%C"xKpMȷ@θ"&g`-Q;&2?|#l D#eyؖoG9T2Y )6}ts:/](8Æ_׹vx<}Vr'8Y! Рh_ 4Cc :TW&ϮS.qE0Al˙T~0Cսy +$eeAe\y79|@'N7c@ی}9MB{œdeycr*^Tk.͊XSy!@N'ސKl[ !"nD$~ vO6qe=1X\#WڦxlhAFCgd-q+ 򺇐8C3c=U. wSBq=tE=B,?tGg8#%5QΊ.RZnaS5N_pϣB,n|%{ԁf@~Ojdl~Ryz[ eYwuPsS,5CcH\G K9QgloV'VAKSAihe,/-VwH_\s)<"4T Qy5kjT6cr.vpkPcOu)lg ,F\zJHi E%!]vF^U0}?ҶkoRm(+*0yYOAJk('}ŃZ5ͷ(z2ƣ+5Σ<]J{9\9UA-z`]}nGDd2Ghpo3js;P- z|s/'/~WSvxux|&p1Gٱޠ8D sG%k6'TjX]?a)e(.܃VǎY'%wK//7Ŕ_䔭FM:n5w3!ͤk|g#\V(?$G ć΍`FFfa ~ :c~ `<)`e|T}zjK 1 >ü~yQQ?CO(&dI˖Y".WS-.i'T\e 8!_^_뢓Elq |e ^URw8e$cՍp*ؐ/!quBh#pĚ&S'#]06{%f䜻y4cV}NST<4Y̥%+ !-Mn@TS1rtr ~ff'%S/7ԑ@Ͷ*JsZ Al748w%}7u5߭_Kx5Xgy8X׎H,.5$ "q tfXb4Loi_cM$sq_0P}n/ jG8(IaF(| Q~-nG:vݳ|9QNȶ aj 4O5 f$pKu {:j)%d)˓*R`#_{d j$$ ">1-Y )64F~knExj~l%u3Yؿ/57;H >y,A,^VE?у̷.ׇBJb _"xܮ-7nا$|8S͊?2L*F,V?;',qWQXRǿ+/vChQ`~(`cY84y9_ ,Ȕ, p; G&ձڮWl^((bP/^G* 1í}HqoGwu?L׮=.A+Q.Ȧg2 "C@š4Gäf _MPسV UTP"O# qCOM̻%;@A6z$CT2oZ17DP;Io=NB.릅oA{`,oCojD60dh0!1zK @z?{s!zL8O@WX>,w‡}m)## K D'g#ZuNLײFSCvQxq,.{{0` nΝqݘL3bh.?UwWsKK<:@.(fܙh>/t@z%h̿4gisfSi6seph')nNs͡!ɵ,-K6:x|R{ Yq>勀y2O܄PUtuK#=S a2BY9D}1NhTZфݕ ?@*ktK֚(:GTW縐&f 5_Iɽ'|r*4tPx%vHH8lޱQ*~6v{G'$) @"݇)G9. c njP%9f9i~UYjWAxF{W{1Ik>C[0b곥f`)ncv.ϧuҩ˘=b|PO&hhs݆iae:WpozFbB8_jX'nXL?m)0@G_O%=2KGݺxjȗ傷s!3$jM* vD܋{%N%*Yd25nV6G'(cm&E:ށǦ#vH|"n1Q!0OލPhEk0 W1<ǁ{n@dbjƍIv3m "dRMĤ4C|R 92zd96%hߑ;>p\` 8;eĢz"Zh$h-8ΨRl]<Q5dCZmcObj{A#RiZh#gwe~&;.u"5bC,LP&(Ǫlpוrm]%GjzLNi3iۨ;̨ 1Tjw C^uoAد,{fp,l^{a^iF440 D+ u/wc1,;3.=C5Jd8:1pC>`SxpZ4 u8]pp/fVqxp(83`k&LY⌛oKhT"<>ٔ{F[@R 肿̡.Y@wіƁ\rP;;SJ؍7G>ͫw$qЫ`mŚnOgUV ˿^?Woa`i\L6@$p}r:$Tr.(tT[FL0@[z1NqM0ա" 賤FШ 47*D奚LjR 1֞;|;4o8hvTg)DzlPp. }QH;sv&dT4Y剪.s]go.=- JQETǕ$FH Pb=ĸni K2c_1]7)x[.!}~D%9[r(& *~9˱uME4.Ć肵Befsb0 ̃nfȆ)s?n!+bA5[GMf@Цƹm.Nϣ]hA"|gqa4.8I^yOs qomG8@>IkG!˞DZwS bOU 64/k7ϭ܏u=p?~ V{ '9<K>7*QsJ5Y~ p!%-itAӓg9dϒYSt^@wRj։T9bx <ΠpOîҢm{zbf@zGcq!iWk~jg!dٯAԾ˧r9L_4Kѧ|>k z=:rDB|Xf%`8]X7++2*LuV66=k$.A\xRO_ӴKYYDrKX\&~Y~u)^+ 6 l 7a2-.2B.lƐ?s+85 c{Q [Q+7spvX4cC;B$T}O5=_lil9x* |y`HM tdVZKUW;_.jG^!v]SA7*|5&NB!Es`JL}3aOf>`rN[c~whUoas?|(C#+Ԅ<yg]HG: qMAy "JMLHv<C[4Edb-21`L]XhS<`?[OQȵ\H%DyGZ(}P ̺'d%8\ߍ̼4m6%[mHY~ܑmcC>JL|p^3):O$48d,e9]Ml-$J1nrd{%N[DVaePM>P?LFn|)`;Bƶ,l62-ʽp˥nح*]^M%.%CmGAj^(&(3Wtʏ 630EG2 ;;8+~ t\$4 7xs%~}2V| 5WNb=_U=ɲDcdNq$.)+~oJqhه/L8m9m_7*-=CoӧKBUh hWk5EkMK}`\k Wom݌`4-qǎ/ֺ?8f FCl1fH]#j kO.䄈'SWW!s!f*[YFPHyyeZ&|uRWzY=O7n +\Qz8^ΩkFWFq`t_WWw)yg=$6n)#]T:S&dY^ 1@V$3D/[*Zi8P'yRC2Bև#j̋k[r~4-D̙x5XSBkB,PYMv[A06%6 Lk!c 1 ᨸ/HCfnȃ|=6gJFLqL+"E'kҘW 6/h pqٴI;N-rfw n"qzl q݃]$22۠փ|K)V&~:u1N.lZM,O%ӋvfK6# Փzx'D@t:WBFG,™^ LvCh+z!n D;{6-1#9fW Q)`l>VWokbj+ȏ9R7W|ݮuvT ǀ c鶍t <p5͒N}O(JVvŐ*w,6C&9 Z[c2auz_i`wJb}} TkgZ ~ ; G.[\a"b*U #Pvp?iNd br2<-]"r*tE9NdKrzxHk1p]^X<|5xQln]_KoOۇߟXE X V9jHJ4vƫx( *gW3>cd>SQ@{cy8ǔ*>gғ^ݢ]y!o+)οjD)PzS:c8+75UNT4B@),YM@qtJ1sz v"51ZP  Pv Ͱ|2<ڌix54$xtxb;3(Mb:ad)] v3ߥ 2B@dlu邭,=ou}ZV9^/Vή FXOlVHs/{ )S $V]!%p^t SD}j)\u*b|/$CJezŽf'/L IѤUٗSa{+fsd'>bXR^.W鲥ޅj;pOPW  tob5+QAurTF Iw?8IĒ7#uHwR'YK{rA9Cw7c[b8@[SQ*So(fN;<ރCd2 i <9 6'fVB}AP9)[=3~X7ܕth"n,SV%5s{znRt E-uExQ^df\jg$_)>spr啣A\@g7# C ,A$b8 #`kUp/{bZd,"a2ڶ 2W?8U'8BH-Q,Q"#XQNSKoH~ʾI.h+UmSP_2I<04#$Bz~ j(;VHx)Cf[0D<30W@E!)tAlC!I˙)dX?&1NN Ry,g.쯨$W4=e}pGҵ3E/hHV Ӏ!IzpG ;E" ~`{b^cs`cf_ {ds1!hllQaLWN.#4ZbuӬ.^; x-)uZh%eHڣhB_Ǩ]bvGjh(읧5N1 mvw;puk_WYy^rW{|ϼD.EwD(60iaDPʥ`_XʌLRD2-|&?PD4x+n8l4C,AAPh.0Jj& }߯vE:@V$ۛAιPpGw<Yi.2ݚiNsh@E bcٶv&t& OAqP-N$qAR\n(KCE*zvDZ_Ev3m(jM,W`s =d݉W-{la:^BJ?-8XRݝl}s_XʟB3٠#F?PA( )IɛYydJ ]og~A`A(V՘@o/^sNV:ưP{n]J-jR?,K-=I3u=6P! 'Ӣ!syɡc~+4m};31W LD4Sz@h5| я:NjJ`62(kvFP>OR$OmUy-زs)ICJ™f%MAA_j CQ-%VgIkj dEP (DR`0hK 1ix qW2xcY#򥫨 qɕd>w-q+.D[Pba.2=\(?ilOC,+c'5g&8,MIV2;\D3SgyRG'}4 <W@gHu U P\H10tC(DwiҝE f́5 TyZD"8u+#4H貈As2MP$Vr6:ll3B%Arwݫʜ:"C0ջOx<Q1+ғ=5[ч1u;teS=~) <xf >guꞽYTTJH_!}\'K˗e=wZڨm۶m۶m۶m۶mm{tz0LV ҕ+6Y`7(O_" ?,CǢo=Z,C?{"S]JɹW)zm$ .zkًc1,0MtN\<%葀)&qTSӏV$p}(~:Vȹ*滨^cwc g+9[dwLL$jծa^ObT=;8KjxNܙU kC(h)Z.5TOi.KܚFPD'0?RT -^ < t3~ 39DQCG<Q#>?oX.DMb$jKV>A\g?N(!>&c01w.k^nsp$\ݳdqn.3 4Ib~{8 9\ _ѻg1$/2 ǽ=3gF:JwKȫ3;Hc1ѡh= sda!:-3v!y9HgX ݻf?:"/&1Ɍ"@W/x`ftIYqgx).틐˰1\SrPIs@_eIUNfPKyNܮtXմm'pȋO?L^N)"juu POr pGd6kcӕaW?D;UZ^kҢó}W=9B*mN؉jӥN>خH}sG؎݁/90q""3e /} ܃2r5t-~==O7("QYylxR ,o%'ܒA *O5Oh=nn[:e&'"Iy,t {и7&Jf70^λ2ރyc$ϵB)H;2kR(D8\|؎MP{ځ_G_ cRle A}7ȫ9k W<= NG~Płw٧Tc,ۙذʷ<[֧OEaP 0xSRDD[yjKu[=jUURkAo@P1YSUy0 [?%0k0qI|!Ke* .0I? ohg &@̾l; 8 )*; nUP;5 o{bhZA= #{[e|^o1wAkNF?PH6!'\ Ro U7OK)}!Y^9[GH1t3l6ZPcӐgѪa0z1e,@Q޹i1V+-Y{{DEL3д'׎>;TH3F4#\=Xw[Ϫ60."%@+1t? Bt[:ČқBzi a@2[,*=d/Jՠ\DGЪrjLL]hh yP3PŗOo40VH1Et45/Cn*-j:WUMNlևN_ZndNC;yp>wg%Գ/}H0quu.?AED1hj_>I%Ƞ3גj׽ٍ`Iثd-K FZt{c8KU3OA<G!W3 K+s2juxc? ~R [Qw4~/PET/k0o[̏ 8(siJFL< U|5\Iac]8e݅& jpTsR` vYrg~Z8)nX$?al+18A}9J|mҵA !J4|d`,N8|`[3\'x`v]'0LMP^M],]Q:EBP*$lkz1/޶ #{O?BOWG`XZRS x eІ{ܴHB0Tֱb1TeGW<@t&GNu4C\]Pn >I\Kn@aJGf7Wt)C4N50 A!5V~}4!װ U2)+fPnCyU-ij'ޓo5d#ZDhmߐ 8R)˼ q1Y{ԨK┸9b<S^ɛ/[]u2 )lִЋR0@]ԸPrGN3C~G#vV6% ^ PskRq% }s+vNGUuO?b˸ȒBZ|[Y0Tp^=菿>dnxJ}נW#a'cC??dMP:f}^,l)}`8d!*`,O:RtTMN5WFslK|6aSAP!3zlWQo'a ?x@`E@Who, ՆЍn8lX%m{5%B: ˵(҈^=ҘRuX7* ɕkZ0kjOħym{/O;m!_V &U ROJ7ǧD "Sb;H DShU,Ӵs{arHnם!2+Y*1$AL\,zvvAJ<L|SSGJX ^<*z1˯ ]* Q #Z0yOLuE{A5B0P9PfQEW =R^`tT|[xÆǿ_%b-qVX xۗҲȨC!~ǿQPH٠MVf\Qom4[*7M+m"h+`#p ʑ"8Py,9٢.vb[~cWwkv(a|eS+g1N?!;3u@hK>Qko[{I ,j@So:*ZE=\خ؎F4d 3J@@ɕHD/1vÍq+^NFÀ EE£eSCl[S!'U4gniduZ?cb=DK;M]4]OTS9Q# S,DgC:z E=IPB dZ '@(?Y.ErDN7U|o&W߅K߂[jrt\;UDž?h>jldrl:>Yj + '?%3"`.$@Dŵ e'$q7.1-Fu:Ac[}_ ʜ|U똩[F\* Nt}sWߐ&qYL SqgkA?A?V\<}4̄0Q΅ͦS^qˁnB^vA+ٞ{MO|OpqaRW\t%^#K$t`)F@z1 CMM!Kyz%7pg _yTt" vԒH' n4m>v|\um7;qRI׾ @rA*0-egOx.Ì'MDbx&́~1fU_[RnR>m'f:4?J&`r֢\cćRGB [ Ky j9Vk9GtCZB2;Gs.RZX]>ó*Y-ɠ)JytuݘtUxSjNG=BT_2#Kl I+ށ-FM9&RSP"˕/o"$$恱%mW@?BpBR\q Kg[K+Ը̈˾1f)g(dʌc}iqh;,pZs]:/&vtƒK\(&DyXXjeK1bW`C @,@SrN(-òW:,,ŊKSZxl:°gn#~=|E O48}Ty K8Z͖OjEc@s.UЭ:# \-LStB`+[_) w><1)zbpy8; O\#tr(Ȣf!|ސp`~f }ok}Y뻾3#a3f8HDQ~'֮D5fnp0\W&xKBKP7uXbQV7#`?U%{yPCL2T ( _hI>^%)XzP"Pâ?Vp7tqFBpZhU5e##v/MR\L#e@XBܧrr U,ek)]5KٌSvDV$7V_ e/ڪ] Hl-vLd6 x"aN0p8o+:ڦW_4wveE9ˣ)^3Z`{{C{>ʁ RdJ_Dr5Gs.Jox|R cگb 6Pa8\/buH|b"1z_O71*E(̀dq+" "lgu)XZ985РZI"1/#m!djne&Aka?` a?+{|Zk 8!yxSf Xy3y=Яa9#~2#ʍ=c2qbv#8}}K&8Ue[ɋb_38HN`dm ILdۍW|僬^wrzп4g<'i[2Ti>ƝN_L=7}$N=ފٜOMC_>6S'Q17⼣-+|T5Znlݪ ϗc< Ys3=6.m*Aޡ5TV9W5<˲Ѻ~WSɭ[SZ W jti򾻰v5r 膠2^2N~&T]8/XΈύj0a pl h6Q)0ha$ pmi:c>Ha@DҖF DQi8 pQmw$q~o4"~Wg GWL?ZHěn|QEM+G*c h`c 4o&"CSj !8 f$0}OWɌ޹MS>'Ն{`{Yǁ# KcqDX(01?I"#ƚn'DK{)_x,i1{aT&g|@rov!9rC5@A)wx E4Eí9ϒ8 &4@nlqbvif3,+d]y(lWa_g `pNyB_G\>5u#VF1>H(]-v&?[/]R"$/I(2ObRǨK0\ލ #A^zWjx>r%\f_ +ɮ_H`+6͖ȓKخ3r{ ~Ayf*zT׺i>Fm ^[;c_<{`]Cquju}"hFMzj^>͑cC "*ؓV0KWCkpF-I;6{o/!٬ʘ!R(/٘r y@0jukC7Fy8sƽ*9vUi { xWADn)h:H_cnV\M_}Na﷦ޔ٪5P,5 jvmt5bZ. =P@eDqSU4> #cQƸ6\ќ38s-]Y2Dn#hL1<]lFm`pɯUMQ=b{0/5A];&%nP|7aGSLqvOn;{mض^)sQiRp㟵L^҃b5X)9m&r{KIXk ArZԞWʤ ߻pм~l-p #x,Ĥg}_DrcvAYn0H_ Arz4elgYؗe, 7H)45L'M?$]q U/tdIh?d?RJU|͐{i, 0)s#zˋdHH.[Ga i? ubu]{֧V\0ٻDÛn^W{QxmAQ.19AKd%p dotQ3۔8M4̺ V$#\=&O.{`R=sdbFFL#L87KR<⣧~'s t&"vGi|VVnՒ3/5|M 0Da1#움Sv|D*noT_TUFqdtLu *$<ĿbOr#sMDyӯ\!d>S=]e_M{MuI IzQ½ˇ#1~@=`ͤiV- anS. sNnx|OJȾ)8X˛+Ef2O,\l*k~[V9 -"yG<##^~E$n E]W=u/<،q,K8_Țn+(m~ v-,!5KL9Kj֟(ℋ?tW,N" ?0r]E=T^GcYel _簧1ui_kx;L@`/<,l ǴԉDlmO M6pp qRak=ؿUy\/CΝR.bhQV(O.Y&_9Ci?bn8Jev#ُ;}<_ ޥë%w(hj P d;ZsN(A=4lPB|Yȷ`19 ]0@.j1,%g7z5_RhM7n2u=*s/q)jdgش'@pɀfm51*qBk,e8hY[!Hܿ.0 +r:jKwNC7v6PyqHuƩb>0ީ Lڰlx(Fec6}(r)^#l Bsxy&ҮA[de B6vNuڹ׳J7 j\'r)(ʟ;)K~f藿WeMVt;o>GqI pC=ƟJhd? Vlڗ#>Ggs7"-aӞA2)ruu9ʒnĢY䣳dl^.\.b8&Yߊ}zTZfc\ KƚxԒ: u4}(lbmENRW_'d!.Ϲ$ $u卜܄w' p .kʊ11є:Wc$-cIL\kLqIJQ-Q QGלdX2դ̺25B[kR))ConXwL0{aF0W > h뤫s.8tk=!BEyjR vJ䧻gM~SORf*fLczU~QL#8f_HuQK=C$^0V()TDc {1d'$%չ3cxAf|컪j6$)3P/T!IJ;NS'g )er4K|"V @#Mܞ[` GOoD9%]'bM۷ccپm;k5?7͒RѠ9@P]2x$DꉒhO&n}2>7dCkjfARlA#h@DUhj>@oDZuç"wBRs3tH5|uɘaDC9w K0(n~ n? ׃P:YgUM +A|k3HPkX* cuIW+-,C[sBS֭o\Y( K9·)K}\nUT)u\ ɲ|AnAǩY.Drr^+sPy@GS|XԆ9("uk1é9U"{5| l?G87vͶs<Z( ?(Bͪƹ %3c.& $mX ኇ<+(wQd.$Ǽ@y|_Qianfн`1ԻUQHԅ:WxJuq+ؠ$.Q`Qݤ``o粦T=nV8v[]'\+aVd"۷;ӳ;Efz=SzT>z0 U!t< 3و ȴa0Z?w1HѝƦ/:}<GZ}F@ۆbτk,!7 ;Z#6 7j8yk]Yl>p3K^ٻϤ"( loqpSf!/QB;G,"jR9RTCt7KvA΄zr XMB9ӇamRRP춫,ZI~MQ!_UkU7!fQzٱƩ=xRպΑ@UM& 2ba|Z 077aK}sQvW]c%}/j&N;mv^#cX8E06?[Yϕ^Vp<)6?ؐiu~WÐ%^\ꊡzO +3rjv@A i`ԔQ ,K!A렭.gw=WzKT$49 t=`.e3%TiʘdQ NKenG̼Pe0%_^\ fls~$egwa1쵣-&+N[9|b: QU*l[#>o7mR`#6a) |V*]Ӿ(t^s7b1F6&.4X& ek%.ѡCk|9թ$@KK% ~N#vјR!vJL24x%!Me*K _ ܮY]w5a'IxyqWîR~<=-q-_5uxm"4pit0kٖ^.#iv$_51о9[OœDomuoR-q.YְX$PNFbgnb3EMQD !DZ9&f!ב9ѹ&D8QLsX^ /'!Zt'bV_#0Mi#.<ܖt&KjBZ\X#w F˙Z,كz\1d`wv5Vb22S0_?+IRMJzm Xfa@2mҶ3"*(6X32P'(2};C_V=H'7Xxf} Pm( e4 yD3L@&:`s[u[Ct#gX)gDOɜ.`!c g3=>_ik !w'cKj^K)\b (,~膊.bSDL]V|.=T\Ux.w%2RY"`VPw'^e61*PΗ?O7IMkX]kiAtVXW2#{[# f@uݙmKىG1C"X3Ey@XU׳7J!ѸAvRKu(*MHt;\%8;ɗL4QgH"pzQ)@e -A#3}j8Ԇˠ"ChP#nBN@6!DTHk)tXNCFpڋnV (Y ^idR$icRum”1 ZV a Ǹ):17ZPpElA4+5e~~Ih%1Zz}4ɔU6z XidFkg۔MJWΈjwʔ@uMcRXI{~N lZ4sN* 餧S'&JtovUFjFT(E ҄>rL(n ;=Ukc$|VވJxXL)l$sdJV2b׻2 0[k]O;A03j CL^-!o:mc'.oQ4A.^;:L hAP-))Ӗt +Ut*6>d{XPWC$VA.M^5-OFkIXEݾ]K9^eZA#~t w=g:o)4c[΍&'t7tɴ);ޭWb H;JxNe|Kg6^t[ݭ=]ynXؽ<ϒ7*>f{8vId-H྇(]-8 JD_{f钿S9yF.5jqb/,L}6'%M<`YY3)+R Dڵ.d<O_5cXGq?>zXi!cw*HM⍮U 8%li'P`:1}fĐU8_hwo7oO?x@g{d9.tлN 7gnc.:HqC.A$a1YB0v)Dz'@swPrS6H[2o1$1'.󥔤"^<" qw1"FĒiOˀKd*ҵLI[qi* ,gX234Uåfh\dI3CsSm?9︕@:C2=V7:VuH8|61u@#^![\@SrtCWM}Ѩ?SţXuBX`>=GlirФ!EJ]Pd!ҷ7 pGZR\?#6F~FYxdB%TMR#- !*._qaI.AkYxHCnVnz n VD,/n%" '(?` [?_ žkdDJ\w_:ub IVw>KJ0Iˠd&2m"X% d:h#`>9!Yc}6o::SF-TGRߡM ̱&KWmp9^dIL"S5$#9e iXzѶzJLִ8t4K#&??^ߞ8a gp1TBgyonXOl2\T']g]`?Rg_2E?=0v#L]'SvjqQ"Au)u,DS YSAbH`%.IIlC~/,g1d#喡 gzftIգ%L׀_tw5~ v䐖5OXE?juSH `/ij$f]^D|VK<} /j^zP6:\X C=)7Р"ܼg3 z_sЖGkO6?~G >lyIW|OsTP[?Вk4cŧA᫉XR *jiIل/5ox Eq"P`rͷ c־/p) 翪2uX=5u0w?aS(CO+nH-hoD rWُ6Zұ&gj#@$K*_eS {EoF:۱)GG 0(X+cڿ^n!pP|/vB`b9KY[瞃Mūg@S >۝b0N:##OAQG">w4Q1qJrISKÄ}ݩ⎲*p=@}h\w9 %1`91) ̫xR{ Yyh/i>gN}&fXY4G3KR!1т=IJvm=oL<R1!Z Hb"R|PTYg&B24=Pj+IXGzVKKSK"a"ۍi=g JE^X'KH@xքmZ|ϗ.PD ;&*ÓӶ^1XnkkуǶWYΚ4nTI=;)$R =CBZԂr}eշz :c67y !OON " ḇef;K>H Ky<\1vn\;2Ʊ¬CW]L)eR&_L%f̎$\ڭ. miX2>xMI>(c:[ !f pR Kfy[M/UJMqwcf=9mgB6y7VA({՗Ѯ.9 O Q\=g<&ciFpF[9=˪iyRHvc*(gI\zzff )|A ׼*A8Fύނ;6շ։ 24jR".C4 ]xѕ l f$#`h1FJտZt9`(vъJ^bX (O˭8}H; `h ﵏$奟Ԕc"!v G`zqf޶H[l~ H*N.ϵh:6;WNPb)^ I۠'ƺ_8K>̳ׯ0;`UPt(M ঐzCct8T|nAV@9gph:}ZŪVYx4ynΧ;xMQZ}ɚ(x1d7Ջ+!U8D #F)᳟6EUl~:Ijzw F_Wl'"`a )gIJ4Y]V˂FH+.#;ho#፾;fGйp'3KrZtgP@{UM,kI< }BT8Q>8L h.ю\<76>_aua>b2g&͏q!IjGiA<:w]Iʵq+pkY- F\dpRoXv!<!?y# 2G Luح7͇*S>vKebqLқHZn ~ b#TċEsk k{xx ~dTGs#f6.nJC9.[; |l#@ %甔x 0ю+R>ϧe-B2jN-Yǧ?ZL5~Q=~Qɘ}ʼnx;O L%uݺn6'sғx]4HE< zkMŋi|%T]`]^١%$ε =Y4/}2bgw#{Dg^ms9߶ !/+DxIݲd4Qidl5UMw@TRkN^`rZlӵ3[`ʣ|ՋT^`$^4hser_ʇiIRR38]U8MU0[630%ZG7ߧ+BERS+<,^tvԥxZa1NC M⥙uxزjq0^)_8v1%c"hHWP1^tmDjV~_v sKo _VP,&[ 7a:GD/#%;7iEd/}|ؗU~ p oya}ӵÆE(Wosx"naJz9L5@4G0:zgdc ?\Cl[S# eНP ~.6[Ht`.SA;فIC2=3<\чUFЕ&vgT ] {ۯy3ؿN*|olmTr/e,N;(a\Ff m/S lv͇[C3s؀B~&fyO JU26mjP u&jS>OK6&Œ#[R?Y`]U][Rkc<-hLH1*oآ.&΋wYQ( uZ,-k(&/r:w_ Mq'm֪D_Ϻi핥8eY8i5Wr͵&9\h޽CvA*j~Q^N_Z^8I6LHi|Y.c Ss@i@tgb\W}'[*SwKVFennjryଢ଼e5)c׭R<:>'14c<*w!\vȏlefRtDbP@+kq3T㫁yl O [KA4ĸP~Մs%9/YKWˌ[{m>xT\p kpfҌZʤ@bC8BD13j~oK1jhҾ,})Pgda)LRƴcK*:+`V[ VKM8]“]YRzA)Shրn,QuLRoc[noxF$HGU \fuyȒ 3p: @pqolS}Nl!q sҴX2B"{ ,4;sM& CQ删_DPX/"~*|kC$´ms1RtKp6/zs9 n O1)k6=pCpPIirZzl<4Ҿc4NaJ,(rD"#"IBʳT1RX)Y G7O1BlwcIN:V+p'Rׁy1cw1ϡc(>qXBRhԺlO!^E7 }3N"ް l>:fYbsF/Ȫ=9:gys 3El!ZL&sPڐ-\{D:OAZ93}nSZv$1Az^mG7{1j/$>Q50ӕu~̖ Ddx0{5礒@z{/ NT?! ƆE Z:u{[ž1v 'wZҋ<W;n74QR;S>|FңYB(i)V>W՟ jn,B+mr8| r#1d]269 nJuNI`LoxS d Tj%%*E98RUZE6Cco<;5_Bs[0C~e^I zUkr(_P`òu9\-jztUP;p6WE$z.nO妱,Mj)D" }z$ǂrYCxt6ORE &9i y_^?jO ^Pڍ:.ޓf(z%r ?Zq4)hNTu= CG4rr9}ӾfA>tiT.Us[@bǥ~(dk#'*/$W>>2bwMCA~? 4EdVKukYC"qJhÅZGO`r` ȡ!e"LV!Ôr|m0z\9&?0vҗ.Ъ ogrrlէ՛\9_gi8=*%;!]2nSO3o⭽I4xuAmF3i%dJ#o,@d7*pl+CT&Yuw|襗_ttZyywPqA|ye/jf H.-PwbL$Gkٶ@Ry&հL(7 D'pS}Aـwl7 ۈżc&p<. 3x;;۪2Ne"č3p)g\7aTU+YC4XX_򸭎 `ÚTXM (`(ph+Q* O&L-Pmvݾk=Djx{n(զ= -٤= $#Q tPhk$7#uEvMVKuzA|kfՕaV ݪ1k~1ejg|M{O5u{+[p&PKzۓ%(Com7x9.lDL[t3xi{b[,Y`I~w%b7]u;Fd،EΧv}9FduGܚ"F6]Rp\1 }."sԐ<S? ]X ІtR׾lS2?GZfs~vMn/:9XV0=WzWE“g r0 KZ'vR txڑOIdZ:l#Gܬ iUO@sC[:H4&\G]\+RŮJ! sƨѕ]kx肝QAAt6:"22gH!6zPK,Ca߷W@-,-A?|N# Rd,bF^Lkʔ5M G'0&*p&!%e?8 *+3{_dwrƓ7:? O+DɻcfCj#LƉVqc< _q.ĎdwEE o+zi ,iI*at[g䬘_ޤޙ'w0&?!E},- .cbfL!ɲ)~Hv!'zG6i§߇v2of y F6(X; AP]Yo1l'BDGzoK`mUPMEKj6Ltæ%jTm#;0D2Ԣpi0zz9#N֟ +W(%(Q=XCD/ ?I/Tr(UXg{]8hS=,5SL7.F\:O%=ٕ!0U*A +dHk*zj^zMf_l;kڒFYcPL0M'+3 34X_0%R 2?@Of#n|9; oĥeޡn u%;oxgbh\$?h&d*IY0^&((H `Lr~ Hs uTko&[}cŒrBv'@Mق&(#U8;?LB*ܴ_)jO[G>}Aw" 4! B8?2R`]PHi<0|Jl{̅v6Nk8*r*vC ,XR@S#VYXco{`%]R/.5Iu4nD_?"&a%֖ohdz_ϩԴlW?/R?43+A~CfI9r 61dWd` Y8 # CQ%P?zrKV3[ OJpR-lNX\Of.ҏуrX^goWb![vdCdX_ʇ@  rY'-URl4#ZZ1t=Mݫ؅$)1H)zBAy!ٔ 4D}6H5SۛB^(Seֲ+)Uz8t}\ȑ'kc#9Y'.R24q5y?5̮{ M|]hӤ=D롹)t4uNL=YR(tfyr `.&—YRQo;rVc1D&dYWХ?sWˆ%(+f&Th(qzxO#ϔDmp}֓12x)bvʠtݯ`_met:Զ${'SNG+a3(^$=W4P.=( 70i!a2m".밽 92JE()0iC*""cb1kA"`Kd΋R6 x pՙp٭r:Oaǣ3wH'erKy 3ȊkNgD`ge<.4o>j<Xo9!k f/6MCxY)co;ZcQ];R _y7rvuh+Op .ѱk9 t7!&"/jfpJFt%P[LDGO&~c/xo%eݙ^tѪUv#7L`MLsaDYca O#ia;uSO9)"k Ȁ* B01n;R؍vqk6f|5z_K~ba@>U7Cͣ  m?fߥ[]hKΖ,0+ ʮIM:[+|bh5@SU7 ڥp =>'2r/uƶ28HN$z&Zh#T,1#G>@"ErBcWafkbsT>#j۹ZSŔ{h 6̻tYC"^d>Jk e{;~<@O_Y/3~TВ k4 .bܮ%Rtcg= ƻPh( O㴷L F*+n4@ A9U02ǮX#7f_i~ɩ5=3=,(b=sF8 D^ WaGH. q6'h>ӿkrMYM>nt^6$O*{?f; -d0^S<2K ΰQ9 z$QIUwR搪dYܦn-2V-i,o@v9[զ}pU_6:y`9[hP ۪l;SYLTN=BX>"L4wL9%x3drO޺^;*赖vWk@VAO%T/eLe:r 0gSN 3(Q>O!T6@*cox+)XB]~VAI9q~͝ &F+_`cډ+_<okM DJɰ`ZKU[XBN%#[xφSR4^(a3WA9nA؀cWI@p!Q^'rg>wd% `?4Q] ݫeauӁvIw BH9>)Q^'X6,L)v <5 sQuG7e{崮ms Y .& nZp8d,Gym$ic֚kG"XĚ3/݉K*w} c]11{ili 4$rJHK`X{*[圴AHgER=$Erv#-`T1ؽK8G/VʵCJcP?xŭj*L{j"9<6?@1vZ@18]mL&!cDC{ h?/H69}kJQV-NZ< N n1OBC}Wf&r:e6zll#R>@axU 13-󷄓8bMA΅btu;9Jq F?ص+2I1ڿjqjvfT"%1qYЌ }Kx.jq(CFwJWKKc7V'}Eob1XP]V2 \c[*V&HL1w79kT'QjJS]k՛oQZ :3WnZu$W$n?T֌ErFh Y:0ıFt\{)f kFL5AoQknuնqo>XbVqGCY~$67N`,?6_fLh<pw3Б![>X C. ./ogS ?}!^ƽ=mƃx'5w6.zf(9l8vXkh%cn۶m۶m۶w۶m۶m:߼GI?P"3vBL=&t')v:ӭeXܐ1X8hoH=u֬_ʰT>Ħvwm6FMD:Jw~%g#ɿy9DvO;Q@;yX(E EU OZ1rѼH .Q9N>/ۛ/̕XYb᎑({eUTU ogAc@jςC(;jliPGK&\p/b `aMxyB"HzAbG21#U5jԿ&&%B6\;-Y[a3w=brl{IyWH 1{OBD}s' dL׬Ԉ2BlK񊧭M}?ֶ)R$[[I]JH]˄jhY.z8Hy`~囵h,5QMsPg.ho)L.]I7*t|B -rBH4~5K1Xt&XɠՖa&E-ڼbN?@ Ӓ~"Nn?#8([R@[GWG^7pUA腳d"+%!/Ol;!wwk6a]8},Qq Gb!>80Q#V[p),^T 14~=[#lwo83̛:vfM$ʝ 1w1Q'skC@MƙLKҜU*4S8;'4,`/"Y&䆂|gAYsLT@+L\wI5=W$GjYz4lF3av8u >O&5tb+]W36͎dgHJ4-* SOQ&#iiźcoV[8Bd8 KMr Ikk*PHdO\ em%gHt~y3~ ®( 0;I=aWRѕ.˸5,MA! P~YeD2Z{A`tǍ/RsѴAw.b ]rvJP_Pٜ2C +ܱRR:LHDn@ ~y=[ǝ:OeVl > ]; H$9Xh\g2늨)kM(ȉ7Jxю[B@<7(4T wI֍<\n+p/& \H" UO^H6]= >FY~zxϼoBFBejoViy9j-=Y|+v Af ^&7DD @r;!Sx(1ܔ͒ef.0xf 4J:EF$?(8 *CV5Sȧ(;SO_ qWK& Y{+wVc M[? |Y#ٺuYc1P9f8yMP9uGbO>ASK(5G7lq? [گbArfeG/uY Ʃ,K7umg% 6z*W@_tBN8h0>#\5h%x.?[ܤJ]Y5U/1#ŏb=z.՘.JHUD=X779$ml\0!TvVzUqpk_dVvP ~vo;5O ]BrNv9q14&"볻 ,@ëDeX!{E=WvFC;Rk."3:vKo;*Qv=r6i]-MB\EHq;ّQ XF]R`sᕛNV9?֑u<[I*N7OͪXņTN$ k ,4+зtD++V1VJjw kMc n.]o(|D"zP>nK{~VbgMH̭5fإo7>;qa?B}jIrjIخQγ-sSLB)\fB!k8M:>Rά;;DplZZU$e1FeDb'0\Cr{Y#)5媴Q%N_4e }@; q*ϟRƀiW7ٝaE6c }9+R~@G&y~(Qɞ K-{hwvps\2D4(˧Iu0/Mu?`2zj;&.5~$dYxusKף[3dB _hb#m9Zs7hy˕4WȈDk Fe4tMjDFGY I>j%Lk|N`eP2c@[ȅ5]\p8ACeqU{9cg[eVc_+9VcC`IC#ڛ4# 3Π)HhPbgn2M$q?չ+&Y(IDb)^yCMRنUffdPZՋ,(.U5gΌq1fr/ Zz+ʃ')>pSZo9^΁{j|f]cF[’̟Pśge>{p.@ 'yIK,u4e8axj`i&pZgMl0cze0+T<1ͫ~~l|&)¬5*y9oV`Y4 r/NѳڟJ z^1n1'r%T%'WB R7PDҏ͜1?07!z`oPG=БNQzTCN9_mhJMi³>(B: hpEn*]i7Ua=7%[9%A/h e=|.* 舖ݽ]ʀUr_FzM:xϸSޢ?VijOXF&*-/] IV} wZzm{꤫=(D!q` F6s. \Q(Xx$q"m`n&I"GNxז{zlQ&kgAu j3RbJNHpP)7"l]րM@t#RCS.}z:/_*^T-8y`MKy u8GI'/}͸4xh% +!]&-j?eAR.w@)n^J{BU^5M V[y "̮j!T1:.WXKt 2?EXYh]d>@*KFXpA#-?;XsW((X&(rD@ f߭؛TaXˇo9;Ilv0|zooR7,4!&{ՌX~/ CtDgv(өcE?&ѺyC@7',=5icܠJ@I/G ڻu«5,5].bB8*]ȌhtJ'wAW/-~WoLDSQ3PU0iYcņͳcN ^p BMخ6J%+?Sn79_VXK-SvgL[[k~؜@[{=1g0 @y$û ZyZ7F7o[ѧf^GI"G;# f6%!;!v2A6fg@d݃ÍkMQ'/MT/OS% ymӢ;["@ڴj:b}`ťK{mtxљj*"M\l1KյVT'НЌ7PH9}-٩3NT_R: [Mꞕd}b V]yp7Z7 ݿAѾwm v\S`lӮ |q V2 !uz(RFioxQcuݣCcÞ"DEd>!=iMmSRNۨfC3߲Jg¥=mX$2%r;'r}g%ՇʣG 8q?cv6 j5^ܹmݑTuj0=V@;Ѡ -rN +, :X Ca=}kC}^[$My4=#jK`B,y,-8׏?UYĜl%[+jABRo(=漹Jg1& 0AwUc1hPz#Lq:ΆVy~$:f#.? ɗ+m k{z=v0+dŘXrz ~zh \m#.3a7V ҷUb,H24Q=9I#h8y5+eJu \yb@j璔 8:ؠ tX_!T_Ag<^HBþJt1`dgePzڪb[;O+%-a\$h7 T]-P>uU\9z|OF> 7}@I>MEleD}.'%2kFRC (ؾGrv"104]vfx߆%vȶH;wGOݍ%CjD544G *Bΐxz ᜍ`t;oK E*>tC]`)8r}aj.3X~ lBTyT ; d !k,=9~KK~il!t@%7(~Bު#Ȃ\6>5V@JWt i`kz-aUmɏVxRēTݕb~#'Py~{ b@[S ȘmSPH #3$ka<qƑTX?(; wl{>&a`[7-nf1 \_X Nx|gܬ/m:*q&k\$LGm6 KQeV6J4:EXuB[}$]n)xr]ud)Eq3)mcIy]\I.J><WU()2".198Z$S_߫>}N{^D9n;9|:4;aF˴wRH5Mox:DV?DDOpNg Lspɦ*)p$lI ab_$'F1ݝ" 䲘' >nN\Qш·˝߆N-iJ㽼Q>b vpgK >~m%${$GMtBg᫤pgWQA^C#Oаˆޯ*&352Hр+@~߮:^3uPa:1TFɀ*Pm޾oa1E?MBfƚrj|uJctEs<٦5w.q`D,kqq{,USĆy l>ƹMl#,k+ AKF2%64B5$Ώ!L.eIB)C޴ln5̡#zm(Z7AևJZά;+hꋥEߝ7s|`ppH nڪmgtH˫=vvWaAG˭ne?ڜڰQ5zfU|ݿ Td֙Hc[zw[݋EiUe-6?6LR&^ֲNY7kv*] 'hlx*^vWP'Oߙy'eBu+Z)Ao>ѤJGx7 hdsU_ lkU2[tQWRcffg(\ڳԄAy ^EKDx75_3E/w3ܠ~|lzPwr*%my(rUOGbC~1 H73*SP7:}-z5X;Ts* .ϑ$gB*^X\e[~(|fY9mb 4JcoJ7 ֽQV yM敖-U hOC1z\U x4$$Y>|f"8Ӎ&[)M"6߯^[ٴziʂ"gZgNeGvt4˅jX~5Saܓ;FƵxoX洉y"Ͽj"nyW9eq\:67%0ɸH-|[!+(yN2K"\AOn,l8=8oܤE(">F;l㠵b񈠽 UqiP-E &\.e%jR&\]b;:f*| +T=?JNd.\M+gF+!%luI~ѭoHcXI ,Y NB8h'83ƕdоQ%!E(K?ŭuSx)@5,Hhrxs:I3~QA㯫 Fn Я) D"~mm!%GWc: 4#dB0t(Qri^nmq,)1;\= ^[5v_1h&[ \Tfrm_1ΓO $rЫ `t&nVA=_Y}VVoQճ1Gذ[931#ltW]HK\kT=.&.Sg.ᢚI&vzi_霖py3AktoսτJiw+nߑ>X+'i(gŸ9IjYLpX:^08H^7Ez]) Y)<%%M =G})Zd:C6Qmo|3c:( 'o>ڬq.9?z9ݞ yPɺ;"h0/r(@̅V]s0+KO"PڸDfB;TrǴCӔRda5g@Y?od uYJ.[;(^YʀH%$kf=6}K=-걹',٩P&ivW|&R-"z< -a yoJG wV\y&pAʋʂN'C\z&OJYkHeD}rr}Ma%XN oMovwhdX3Nx4иh`=9M'>&gC*5iuk"/֥ EP ]2⸂I>?? /y 6&j`NVeHmbN3# ЂToxEڭjslcX=Kv 5q,4&V%$]W%J֔b̕O6PV&-tHܸ W' X*"А.$=itx k\ºz&עzYOy1?8-<_*[K$ʿ~UXdMw7SBW"Zl>vԘ[<$tcmj?Sv!Vh6X/!;:7!qͨyA92Q8pQo\yjtУ~JX?K7/9>d(!,"h^X0>4d} 8GHJ{ k]G9냡9vZ}ՃJb̆ haY뗁DqHg2: SE;~nx8$,dXv`]Ƈ2k@ugb$~cpC'-ʽP~"hs֟O\Vk\#%ijz+K6Ly m|fdhBk8O=]MyfUt+R1"̔t]|/l֝޵OFiOYj5GQôzUO2;?C8x``Hb)^?Bxj!F9DSF ¶C$4ƪ̒W@nyEHs6[TGhN\qsYɔ^8 F˻*4ا?Ύ7?a1@ӊn-23\xdY\T2,jW 눡y Xa8a`XK󅦂b~^ *eo?T!Gj4{&Kqe7ԣ󁖅[|RaHk=@<ך&7Ww'KG7ip}ߡKUqsXC )'C^D ` E+H[Z迂}vC#wy#FZRףV@4wUsj!肸J~v e~nBS爘UHf-Rdj7[ѬiQWڔ6s&Dn+Tw99}3Y{6}+ k I< R霚)舁PY&~=K"Ͳ:ujxW :^+Tj[̪:9p`y>otD1 9zt h-E.Fr_O}tnaҞ,^K 3 z #T,S!xV 3UB,~^+ŤĥQÑ{@qj}7[D7J޴ 4f)1A{y{GlP5B9ȡtZ>Xgpn"pX{(@`)͖MLv\v5V+3͋1K5pW~a u.H)Z4{_^?IGGsxIaCm kj"C(vU{\pwE&b;ЭMk{Z?W(`HUHߣvr c@#%⬎s+bi 1M=jge, g:ԅfI @RDi I~J=Nܩ~tVR0C 3] ;aRTb2Ja`Jv8"-Y/3.`g}~S,?}Eˠo1)R@dϞPU1:~@g4;`n%=N0XZ[Րnݝ@tY 7MD.C.3 J^ ;Vu"扛'5@r *44B<TYŽ04jho(o<_( Xy7>&)|Osې3 IZѫSV6iO}^Qs?<5 *R߫l_ܑܵouO}X\ȩ EwOA.y `w*ۢlJ V9_vQUfUZKè_D_JwOZ|Iș}UZ Ew-^dqxI(7"rS{^[g M;mbĘA`F&j1ٵ !BaJ+:dN-GUi=z!?Th&Ai/9rӎoӆNc|HV4/1L$g@yڶjUH= '>e9tᗈv'Yn~hh#xݑ; PH3J+\rhWFl՚14Cq>:*m_$_D^__jP <Z΂G ]wi ,2P蠚$0w@-cOc;wdbS@^`|QA֧UNe4ך'in-ޤ0O̴!8i4ŻB"VИnZ1 t%izpI.-hK I\"6Y~z>ݴ|T" '3st̘ ߩ!~Nq;N)؉] ^򼵷SDc,&ruѢsމ&vJ~!d7ML!("y\57ĩt7 93U7+~FQ0;,CE-T蜙Y`v@)!T-:)1?0*+z eXWt5z%Kn:7߇}  KA4O AXF\\M^uu9|nԶvpnOϴ-t<,Z'h9gc?iOIF{ق$}"qPX= Pn2=4Mq|;e3! PfrZR<Һeb.L7\T-( ?G^7hqfp8JsS,زm}!&di0JFU_ou?72l;. xЈ/\@rΗΖ9}, (}A3YNܿ Q"pX';fdR_g%K@y`m}UocNhVMwl?=㼇ENv7aVhIm"WO Ӯ;c8]Hkx]B!\_ma֧Xb2ѿ`)zIHK\!ae*T⑗W.gZ\zI/kп,n T58r- .:`9'!6wyo ;gG> dO"Ct `4/+|H Jz}kCI+|7.VećfppL|C5>"9:Cpxьcҡ 5PH|tpҭδʮ}W^#\X~[3,lKcnt %$ͬ2=筛(֊RR˜57~4#B{Tch({^N7.sl."1KK~ZU؝l|w$< G+7T,MOc¤qz7j9DP> F~ g%B^YxSUیez_(#,UǾ%&FATs2R;L&Dhl6(#<6Rq/FW?ZPm&b"R+c`@Tz/1#.VQ#rX~)B.Bp#w >ƙ+f<` ,pCXIhj~[ ",6uy_}s.DC27`U4v^sb˩qO[N 3 ﵖrUѠ/V{-aY:wlrX|gc,ꉊ),&HV2-=|h j M^8L?>ND?-זϱc`C?81v[;e+Ȕsl(\p8DpYAq-n3CT="=CA.wd,i72ʠBEyǀY~1oϛbco94ժs `JiituftM% p6U[W zsT,IzKZ^xxB!%1 L> ELOEۛU>w6[$h($t>Oƃ!3pER|r-`h:J)WbKdȢ- ڬǯ+=.Ai\("fzl\(Os]16abV+ֆkuA,唆/gUO|7R)|F`pRj.;H8"@{G[ƨ0 «ao^/l>t{qq욽Sd{tPXlq7B^[.A*n C(Dkl,J #cE>~|mRHkp+%ta1~QO^R9Th8;5_\򲃼{(Fky}OeqdNO&e  vވuؓ:h"hJy$yYXĺxq.t_ڿ9=,$-}"f jHկ\u}yKAE)<8-TtUn<*v}&3Mw ܧe# Ӝ$jgw3M59s(%T2['tʩ? ZFfv^fa+鞷w {Ca\` ΢?5l͐g8\ne{j dݏOxׂ 0XP8#JV5\ 4-,ˌXi&eސ hi3'"?UT"h3^*| I$K^<9%ٖ W^G]{+B1noXB, 'spӂCY%7 'RG(+Lh* ejTR8%v*z$mayg`vLɵMt\Aq'2k~wBʍڭưɊꘪ`Dc3;D̽ul q#H! l}D={ϒ[f >OzubZd `LQѻZg 'Fi+r@PG Kfjz[!hN-?7`Mx+(45msM;#(vUX N'&H%Gg@ffD%8?F_ݍ ?Aȝo%*W>r/U 6,?= Kr]#QҪ Sc{c;8@мh姍=E~@)+Qe:@iӔ$*-FW;v+d}6b# vu5Rl+rp~|.nʃPID?5v/)yn{WSFUò}0;VzO!d C,zOy"ISZ o:׶&#By%;v!I2㜿Nw+MB㪋jvWS|A$DߎU<=WkD$Z=Zɗjg|RLFؿ@.ǝnd2erV \_ŢF^/3ŵ5 A]2ҙm@1P.ɋ<|kYORg[,Zy s˫/Q{*#h^C82PE z[0&ۋsv(Vܸ!hb9Ib6 s[N0o|嫘H3x=ݢܖ͚Ym#B0v# ~<"n2y?Xׄ0btGP8E4V'7KPHF)6n^X!]- ql@/7S(U5à#GLsHj-e-schlbЭzဈ@ M) Yئˤ;!mաzHa/ C<Ǽf,n,K,r $δHLgoIaԖ;~9)mܹ|}sLh%ޔs[l]%[6a`J^"@qbJTk'%#x ۯ=*I $ԸzP8'O}0n$D$4{x =-w$9y$x7HL#Mͮ OdrPI V]iJ}ƮK_,G^ mzo}*OVaBAHԇ05#XAMsG;LB4tGʹ,tP!n/ AꂉMJ% X {U<շ;η'cƒ9.4*_-/Z-R7R`sKPmƼ;?_iY _=*O R([,S,#3; ~&= ;?,,g:dOݥxSm.G7bI/ (%|rfj+A5>\3_#]r+W߾J' ~^Hr:KE~u1Y!h+S?Pr.AgOӑOUjc>L y/*qQ2`*`xX ۈY;8f&Nb̴U3 Y@Sݥ5h{eeC5|ca]sv3{H~#JYXJgr#' sMVdL¡Q.15ڎ}%1JQ= n\\] qKO,zGKξfissX*+` {SEPc@LbgR5O%䚇5D4T׿diĪsdK4yyjm dpFfy-XU5=5U? %wĆj 6m{2ikK1v:\~֥PF瑻=)X_+BǑ3 ȼ Hw6ҋ-c(*&D|IõCpwCz","B4<+1Vzz3U1Hf}T2pأ;UyO3|r[٬,aB>UD6<~9q㙱"'t瞅4;jV ]k!Wɉ>j6p]F% ݛ49”xÀS(xlJ;G 7p>M&Qm\%nƍ0&P#`[єE='tZwGV;9vR My4V&VGiؑ7&S4L[%]X3QL|OYWN[ :1;0[ AZ9YXHco(1dv`=s¡ֆG0k2nXs}~ygah! c҃AհHaMux]"ә88ڠOj.EM'S˜WPl ;^ T9Bxy},4ZyE7!Lঢ%U54_sSo]AO-)t4ܽQ|w 8/^)3u*k6mbWŬ3yyMdbg7×ٞd/mQ؄/qiU]R(nPW ufj!=ǀy7;)+Azp;\r˭w (b'lp=S>.Fǔ6 SЕ֪40G!̚rAAGĈK\$<խpRc~Ѹ~:|Rp o^ leUo,>aA{O_mwWl^8І}x-E' eu|ʡpTf'>z 궘%㛞^{L*vuvh&[ ;oic8^Y<>W:$  ǻ*\W°Y eҲw"(Ä3ez_ 㻔1HLM)ZN"`s-DȊM+L}ب|44;oՊ" 4n TH-MR4hnm$S.}l$ 0g0)MO&潾T{cVV CWI$fjЭ;sugUFˊ;_vMg*2 dTjYFtYu&&a&Ġپ/ 7JXbځO7B<"IҠ]Íz]$ڔtQOnŸFUm=R, LaFh(ڌs4 ں<._vnm8{w-ڌ t1ofy7ЖɄlI%$LI"d.@)^ .Qce }¬u }R![(tjC,XV8ZšFW:zZ|4s7jߚ6{G%1#L1~ ltR}8+?ؙG+bns,7 "1bYAT ܲ>8hH|v{֝3xKjyNx"gkJ 8\y2bAɘWI_8B}jG[9\^giUwa),Uw-+tO1+*,ps Cq>-Fxm݌^N֋HƲIުdh7uhOy gw&g֗o} DJNm0ĭ/qB/F'2NI6<ii<SA;&"Rdf';7x6 jv@jX #^Zm"| ez¨FlsdGfRM}hw@03y/A6`ݞ5ykSc ỴJ.8[bC9sD,Ӄ 3ǫsWW;)Y7/WwRs5a"6BL5A-qcQKf=ByL'0n5-dW]nyʝOXi j^7ⲟogWI9B(nq])5%3~x>.8M7|d;dEd"{8-064,w (v"6nv(X2,NP}Ou A}@a/yO?" W4aq;Е0r-1t{րEŖP[>٤X3Z[ <:Օf6Drf lQ\ȥt`Im[g xnZBTNES;IJXlڢS áEP~( t(QP-4j7,#M ,cU2 CNVg& ;d;v{,ޟ_4ӽm;}҅.1ÁCBhhAWm=^Omq_N-A5r-olȻb{Lk;F]-k89eW`PcI Ay<`xٛ9&;`C%-P֍==܆'^zg"{BP@dkmK kq+63QfTs8&eǚ&^%?|q"?^1[R)1]umj^tmc!x2tS;K-˽%3ٻ=(@?S,Zē \g [{ ewuX-Yev\(2BM#'#j2 sNJ֙cd_>ڴBr2X Wp(;OSmPhfDLr?T VQ?* VlNؙ%ZN?M3Af7F[,j oW_yn̓=9֡>6rq 4Ss ({X{I|P6\MWyNnBӆy}QG'%_FKozv01kR՜e".fFu-!qv64K?8M7},ܮh-ZԦz*E8($ɳul_?a;֛b*:+Vj#\XY:=6e bueZ?B|qgg&gRǔ$QsJpIo8EB(mQ<#cJ:$AB Q5,裡s0NGC5B\2U9.[z0 #^T7b@ %a:; r!a#歼2#' DԦ+Yf:Oω^B&t <Ӑa[.8<8:2γ #فʂS~u\T@֫$ _Jzv$>ML ӂhZ6VjSvo+0]^1v,],LS6VΌk*8O6؞xEwj"rԟ?$8zJ&.lc6;C+b*CY#1aH+߀3?Պ+]&7s)2̓"r1 aFp6>YH5_fdCXahX5dȸhM wPQ٭9wM%3Q(# a(wZ%Ǟ{jMWJ4ctݰRb\OdU}u"piVcQh"TÑe `jC`ϛiS.>s%njX(v7'2GwNs[0Uw&? -o㽀 ^yXzQe7eM `^*JemiI֬KcHF&6?l=ޱhXͪӸ<[u*l1jExXjC5Ѓ0挄(__6z?ÛcF"(gQ8gZҼ0(YO(bЂPD:4NEgYUn?!D5>LI|2jg2 *ʬ,!9, ^t¥)U5cLnԬ\Sb<O '%~gnȰ._G0ĺUm+ʄx# .ڏy.D+NȖfqCu/ .#1ej;bV’e7`~:/m_ώQt_ Z\8F4&y"k Za8Ԇ Chy'{)V*ARb{W`bPliC á8MJ!r:˓_klٚb.Zi\Б58݋R!둼[uUH%"Gպ8Xldz|;&?NnyBjXn`G`+?<_ZRMإFZ]^6ܽAp<]mCήnrsK<.VOKHr׽ԙa}"v DzN&7~ީq8}f 0*vȆnEq0Oyc3¨`\xL011OMYr3%Ԅ 0ߔk%t#>ΈګѠ>iyC= dٲًSmթ],^zZǏŚ6IFgKײxJS?%^L84Ԯ_ei,;$;oG~t\ ߭!@٫2 J |$A0cK(/iXRd^=] ^(?_ i[67Vj|뺾3uW=a3Ghp}Inv{zzkضl[K(6?51q$ar;cb %e!_-rHH4.2%8`kU %ף7KƱz־dFPjLhy.!a~UT+fsB*t1ڙst^.vYdVgZ|P;ĵK=⾼uk v:x$'ҍ3ZiK%s_Ŭ|֬ۿ2bhfҎqyM:R;Rwn4i+Kb*4|h6!ZWf?eՑ:rpW0DQ0m Ӈ#(u3K؎c٣i~2靰-'OqԦԪ/3M5{QX"20doL sҼMu-f kg}-~75Afb1LYW $5nI'Cn{vύ VR2Bӻ \Mm= *Tq3.gݥ^Q JEW|bFvtISHN{ycQCnU莓?Et̄X^|3dZ mm.1 ;UʚVimZSpQ7g&cwmEI;4aUFA6~$%&8N  Q'o;&zW9|FP-0[C6Z&Hl!JFճ35Iu"lJ^V& t0EH.VRh3{ll^p@x\Z]` )}F$=P]V@TɃ ]j٣yuXb1EZ-nƵ.-gaa, d!r8ت}}P4,sfi$Scdl2)O2zJuQhJh8+Z*z;f#@*1i쇛ThExNi{J+5Lw0"<߬:g BjLL1τۂ"eS#ץ/c]~B:s{&K;3٠m Lie#$  m H(Ѡ.cluF΂N^jEFR֒?;{IҌ֕`` 5{V"K^Ll~:{=IKNVVtI ӓVlqy\4h S Kp" a{20b^c1`\biW`\\ , )^W*Q?J,R??yM<Z9y5g/E)G&}'Q1cco1o:uwj/7& 1Ҧ柽轷=/|{Œ=?C ͐U -pCJrb0_swFDOoHk(%ocO|` 7ϴ7/lmuf2C~:| ZwbJVb|GHE)nƸ"*si b7=xQTެf^eN,lD&>pϝjCȮ7`r"fflݶ`6 w^<DIEXlM;) oҪkOj,` ;hK' bc*\"EcsxrȬ>WD}kX[ Q-OqZ& (YX/\ftM+{ l(jBW VmqPcJR9OĖj&he{M{mM2d瓢ۛ Cq ԟc Uv ؊сʧq#s}٢ |]ڔ؂8rUyf( 56R `{$.B5VfBRw7+c( ND9CU!gGPc! sչb`|_ Gi:懓{~ǜHa1.5S'0?rB3|&s}-w~5kjqgJ'Ն\`>WnHمV_|rpQt`?3K[NrV6_Pfz*X5xC+5^lhNo͚ xzUC\eA&iU~!!|"9!Pno& V|A<T`LF4)澶z 7k'MgB,KwJwllaJ!a;Y[6suyp` F͟kr#<f*["KD֓†85'h"U#g |kCk:nQbgB¥>vTͣO 4 Q3ɻN19O*fL:^B̀i)ۂAIr|g#9 l+d2ޓ 4536/ (yRn^D4=h&Mg(|s_LI3I]QZ1.겕H{3@G ^lbWcumD{1phm{$JD2էrL`ltiC˖JK2+[v=2tpRdE@-Wp6*&)Epq2.%qbn<@R|rm z9EN Pve^?lIA#ĚngS[ƌiNӌjᡌQ6aټs[$ڟ"6!*LLwVPꔱo'oΟ|Uv_D.8^*S+C/R"4mzr &t t-͹qj3IBlog;rPjga@lo[ܚ"eY 1-~Y }U1|8VG@2P]jP8B{hu3+V*bmMxJ!ڪ_~!Bi#s6L %l7o/5<@]o҅1v^HLRLq9MROV xIBwUu.F#Ť(CF3PGź)q5lРaq/?Ajl}gƬ0n ,(ha3MHu9xLHOov(@0u: %hCP4x(Pd3|ב{{^ܲfItFN]q^>.9|9*sEjml . ])ǜZc,̈́#tQ;f"|| x"L ]om,ۓ\JxI{Scs; WIDb/>>nl2+nz6N).=j!uQ,V iW.ƴZQ_VCKKэI Ӝ66մ9}7 qE쾌RVTNZ.HkNI] mnRo8ia[%hVg0vT{Hm -dGu{b~xQɂ?dwU; KlO3q-95:m(9톤*ib4JyI^d= Pt:{W/MrUI ꅎ 3>_p>@zD yIrA`ܐ0I7Hom`1ԅ7g#cKw4V.P&^D.Lu}bYi@*K]{0ɹV1>H|OGajqbr%6m YSmx>F'Abd ^{hm6m۶m۶m۶m۶m̺NJ>LDu^JW9.J1W n 2„Ћ¥/@Iγ\3KzˬmǎnNSVz%(%1;gny?? LCLAK85>ʺ+-IYp]3ʱ2!$sl>PRɻ节dS9::Bd 38E١'\]$==@={Z>u4)[Ͽcp;Of$v^nCGa\V<:7{׀LۄF)Hu6ʣh"m"B4= @6 'FY(!;zjc/M20.z̧XJ97C??)U` T NP"mI/r0J>ݣ6`-RIΑ¿2Gܐ}~@դrѭsH* ej‘"=$rJnz럆ic 1t^EuG ercD(3БI)noBI.6ի.@\h37MIrsceDBLrEA&Ơj ֋fŷ)ﹱ9'0gE7p&YCBjy,bbDz YWJi 1曎7q>[ RieǁΗGH笧?8\W:,Td _S5U`I:Cw;ah` pD95b&w:A)}-hj=p%7Q;ۂ[F,h⢎MCyS ׫O4 J+#qV!M]x)]M솊x4O*Vw|wy=OMݵ^==a8qJn'~!k}y<.p^(XclRgF}uO)im!,os5b:S]a Iu!ޱi^,/MWҥdRoa46c,,R|PS Òa2yj#B{֩<]QbzeJKZ#j$4ѕnޔ%u.`ڕrֶD׋#gqO} Nd?YkOPWes2 @nNqQ2/5`W?4pݎ! MҧƉKp c4p! !k!?h4"',2̳Ŕu=sGJerݣi%FO@=W#>t s׷26#Mo0{wwr'+ 8|[y0j i7Ům? +USi\Ah ށnpc(+瘞$ ɚ8Gk^Wo1SkCǣ3v?詨^ Uϼ@ > !ƨVwl?>bY}AaH(vSJoD F$[U;vq[}Z2εETXFrOpM: zNy#z3a#{Umk6,0m vHsoD9=[VDER%ä'I&2l5KM(0xuj=S'W 1l6؃3D30I=:;j0QD gAcgS$WR|z74c}Y42-!D>PSv/ɓ,u=ԍt,].yo#}Dު k,x0u:r{*^ViС(G b,0sk`m޻ N<`Vtq0]~[V߅'1)뷷eHV»H}suo)-?˘8hi#qTVq\O|DW24O!I!Yh&ЉD5;ɫ/I~EmeR>!j1>rJvai>ؘ/yE˂ L<> 0Bp|qJxƳP}xrvG3q-%2|Z/uZ~w+G>Ď4ZHvT { q$? Z*dY ]QY]Vx2Nõ%2X^+*օ2*w-lfWLCe(u@8x`05@^<%@ؤە-"Vm2J^2@BXcnPBC9yf:\6#?0JHY:zh22*gJr јԐF*X~] [_O&dUf\~}ӔkBC}}%j{) ="x `׼7۸S*jI F/DwD(%W|tp"S@l (;0LUGd\Z\pn \ĕv \*R 7=ɠ> oX}0ې(`o Qcr:QЗ~~FaqFȰm8>@~3 Fco; w ۈ+uyKTsPJ !N6p7ʨ[r$Up̫-x68GRPͰ9=hAkviaun%p7qZ'aO e,xjހ۱W] 1EJ|ٌ|5QtRK{?l"0ZaC I+:zMex 6)˫Ѥ4aj /ǿg ;V0qnZ̷1l9,v`EaU<΄Lᄪ 3Tz*zDCn: `,A+ [Uo| ->0G t(&*uw5R ĝQIb/nA@~Oq:~!d$wѻ`>x<'QJf xζy/]|wBժ?ͫ4B og_KzFC3=rLn-ß (VGAiO_=l9mmb}ZxgF1\&&ҿJ_1!lYnSpߚ{ 4wT=t aƗm'k<8lD*`()HOCmi5XĦ[. q',Ά/_L8fіq 9#ZLQ{-ҥ+l\nշsQƓ΋#7N8yɯ=ExS)VVwbj$.=| =Γj\rbȟ?c,OR?ֵD錗b,.~v( 6U;s?F8DOb.QEQ~s|17[Q%1k}uQB̯s;8 ^4ѻ:rN15tk]ǶjCMBa'ZbFf.{!ETqo^YqbsAUFEPQqW#{%HfHz)=C~hC ^WͯCdӲ^ 4ѕ8QPЅc"2߂t;:vt ;14|0 RzS G5s؋Ţ&z\f*S^_t;|̀`P*AKi8 #qs蔪n @5+*1z#CZǭ Q|seT%2b?TO(/όw-ky= 3Tæչ2!4x_]/O"NrLܩk^ u Aw+m*oɍDYm!-0֪J,eEkdqo:ڥ5g4-b׊^7\CE,4-i0 FYuQ$):9+PxDz:9.q9-9r4a7}D-@ESI^, 1%_S?, yffɜ :D,"nV/po!uVM8;#IߓxhuߛcGv\)] `{iN0I?V_ּ`E# 9hͷpЬy"&UP]܉j2N`_- z}/BobNhQk|o_g;h1Py(%4^c5KK-68WHM~u''OP{ܷs%7@;l/w<|f]ưKmuF]]w%cBڝZIx^NQCmi*weZ1K\NG<J7wV-fS~A}]qh)TI瀭y"}9dGjt땼gbNAgh5|u'I[cs2]'AZ"]8ƿe5'şͧ~À[ w\ĽcCfPBB2-p>P?lG~87єbf gqT nD*vr=Т(,a03D?GSCОYEW_Gkm[rcZB޼M*SKXF}-F԰+uzg0=+^Rv;zzrw j߇oE^*0ځpѻ0F\=#'v*x3%swcɹh'Z쓔Ýu^:h7(Fernq5=;Q}ԴFQ ꄇC !3yLk^սLG1])ONh*$"‡Hmo`Q/t] ϊ%_X=ZmBzGJ GvV!ǢkjGZHDH9ƒ'aNG'G7",9w$)G0r* 0|3 +_5蓘/61 ] QU#6qGx lk6f0S}+qj?iY_<2C3xs?н\;@)۵man3pM ]R#jr!(|FOE ȁfJ]G|v}R}0adjbCaauP6\V,D퀞f8:ĄsZ*ZdΨ;`>' huk_o61u.2Ih( 9SٛDP̀(tH3Hs1m }v4Y;l!sbMړ>Q#{ oa['tPm&IfO5]Bmr^S7" &w+TF-WI#Nv6U$- tjB:g"`A5mxsO^0b  &g'E[voFe s(fFڴXQ+v, Pj!D- 'ه!0%޺|RGkѦȞe:~!"_/l-L dIPKOaDdj(5@v}roAԸQ64\>}lOzy_B3[VņzzZ-#eS@BƅV?5Jl.kաdNKJ iI [5Ksa|cF)bEآH-SZ ;oHi R~<BV*R`*\bݛHoV)cNRU;߂R w0{AyUЏLhN(Fq/uAaX($.n7 T@=qc7tcxKvtnt[E.x5M3Sm ƁeƽM<_>ʆv;rg*BL]ȍjẺMEJ!-RiӀ*ImhP !Zہ|5!jb( RM= 6:oB]'w$Wnltnr͢alo[0-\4sˊ>.R_>|SFawe -.n.k> Ғr[C1ݴV*v2%oG oyxB!897/().Gb~q (i' ,9GyduPqml⚣!ݫ*n끛|d!*561jt>% 8f=$vfqtWP;TsU73ӝ}YMm ]Xƣ$tM(8S`9줳o4;@m"m%X|nĞgT`t"o\ZQXzQRYt\T((qN K1fYa|;۝r\,Meah1;F:]ô'EMQfͱTNz5%J)ST< b1O{EHA_a>155mc<ӔeQvQVc2 ڽqp:>X>hb^[~$ nĩYOYv>Z⃂Tw< 1 ɭޛm /*{A:A`ndl'v]NSġXd?QzC:9vt*o/Yyw8*롞xog3V^Ř y<25{ PډFbn/ׅyZ[%FY,PS:`e17*e4x 7[}ZSVfT*^E{@`n۽Tȣ '1qz-R <49_Q= WW%+1泏 Rse:ȤwoҘl(f,+((ն x4R{XS5q{?+Kke@n` ,o~rB?`TVhs@.+!I~.,blu1)I=٪P Y89x.k=Hp۳ĥE5e@ 7V4{LzOj0z}KSd.^(՝ͧ|p}pS5_"?V J+0jk|O:D!l|MOu“$'8mij 4=iME?L%fbJh# +c*EDoPɏ`KK"l_ "K?J `(IpXpW-Iƛ hC:ǫnK{qO=n jor1g/`c@yŎ[W~M[s^y(U&$<pLZJNvNp Ȅ6M*4FdݚDŽ$4;3&NC`*ra|]kNy-B>vhsKT~e<Rl] Kj,ܡXC63jy%Wk-IsP%*tt6ٝ} e$6i(G#LAP:W.cLyy+E9:Y zHUś\e7n |{xj~"&WAѾWeт^7x;V`hϙ FJݻjq#]t'_?" ~IRF oE(vJ5>r#jo30o)M}Eė[0js5#vs6P-ޝ7UuCYQyCSWE)Ѳ .8S;wv[Sdqy/, j a4u=NϢ׏m"Ǟ)m1q~5o] גWh٪hb呾 Pp)xZZ$ H1zdĨډW%ٸernq̈́2di ؆0~`-͍"՞& .\ 4[dH$j]2/{Okw`zL&ۻNI >z#Gِ[}P%&D}j4}' h9#YH]ޤ\PiB|`},5AY#nrYS_պs=QuTQ 0u1Q_ (= y nx#JD &p`8Yy :CFP&9XaўzB+JZ/mwc_mb_UUӶJPN_PhqwW=Q|GZ*7Z v=p*f̿Sc:zX)5WބTɑxZ2_@}޹A}ܽKh~+2RT%S&r.f9 j|Tiu *&ҴQݠQ @:@'itSܮfL}zҷ'_9iJ}A2^&i=Bm>]tP˱SK!8oNal^DtՓuM!qdl J>8˿ʎ!lq5f/N$ni9CK9 #bDt9.&Q;ǤBXtzaW d"P}܌N=PUSE^/aq*?g4]&LuqYdDt; +?i~g8y˟@:PͰ/}Wפmb*^q_;| }:(U&)=4g'e@s&_ֺkJBk\@hy;}ң 1}+Uw#QNJCEjdˆE73~$pf@4LVLJP{-UPU%sDL(P͒#z ->IWT :vuj!P& 9qVG"0>zbWam ezB.i+\ݕSӦěZl "j v5dpC\T De/v`-<:CF`JNNBWBn.A7_|mXsB l@jyl x/C#E$#E\u}eϏk\8Ǯt*LW%@0侅s ,* ޜjDxKaci𗛌fQl(?̃}4ad?[޼S!V-? )MaqN֗LFpTAI'@08h:xYDq+IGȘe '.y,_qQeʒ_v?ip̡ JC4RJOK7TΦ iђ#QBG C@A?VObqWS F]9=DB4ATܤS=EcvoN})8cC諒LRSoH0,[+VFĤͣV)~OLkn6ӽWKlo<}c}[ w$f1ڵi68&B%y Y9^AP^дkqNC+qYV5zb/y'`D-6>̳xc`i+%ъ>gS\P#J0δ)BAtkEh(>N7Cǥ, 4Kshc!H'd5Uydz%+f#23d}-D7m'O>[.o%P6pa:( ~oiyBl4?8VG lB| 0sZzL5aL13HgME$+IJ1N=?@Jk{AJāx:6D0{H |c~Y^wͿ\H ݠ5ӏj\mGU`rD˃Z%}S.?`5䣩e'iKqbቧp5K:98yȘ7"_UW紾 WN7x (Nf{ѡj- rGTaV;\TG^4W'Yp"sܸJ-s}z5 !Qp IKG"6swSg(57LYiҁ̌Lիa,3>L*l܁H]",7<2uOH|W@6͢f\Mm^8 hqe :_9K(ֺWޟ?-#X0z u6&ѡQ2o!sugPi.'z}Ƌp*ϵf%Cob l}f;>=k;4:$68|hknrli:@ВȀ%-*5V!ou) b֜ylKyb6!t11MW4 ne')|yq޾osGdFzP_>(S7zwSIy^q#({JsG*Ʌ %o]ȝJ߿)/_Ґ3UV%\7J'&u$p)|Gc/*dwY}aƊÒeVnGχm۴K%KJw04+n&7j}>beڨz7}8o* 2i-[yE~r"02aj^(gX&3=&WwUo Gzd/a\v,>񈳰0gyOzl8hӃvTh8͐. ;PYRN. V {SMF?dcQ|~Z[U=lv9y2!\tw8"6]^}-#r0WXA&Ķr֡/1N#(=wld#z)ɪAP?_>ޕleǰr201OR jNM[b=\mζQQ`eS`OO{FYRDEnj,J[s(4u?+g.*U 8or!(MMhmhNvO㓧FTU_qtm7^8c{F9 ~[W$5X%ҧT2*KL~tn,/_$LGCҐ6Y.0' ͖Mh)42B`ˈ$^röIHV軔8B]Yb3hzО\9 M{ݱn\#I~Sev/J31@ßӸ*~,4}Js;|7{O.Sc2i@Z5,zR^٦-2_$zza&cN Y~_Q[},ɠMk]/oDI}"!w)bchlEz^K2BXTZWH='C3 >t.(ܲY>1굱b)s:Gju1 pVrpm?gk=Tih* wѕV%_'(0UZ j uCA$OR-6G$kW w-R=}-v >1ݪ"0u^_K N/R2NsiRH1#q8=e)¤ngHn!ǝZoh肿IjUpi؎ksSi$bĹע 'xlO|57lj9[+>~ VX\fg+aN>Z2 @7ͳ A&I 䴅= U#_&%%࢝I+sc\f-c{%T^Y&cdAL! j|Eh\P(S-[УJ@`[' wݕP3 (o9K;O@v{1 m`!3΋rdChN'}Sf0/s *.*UgFr2_imOeX6|O`OHrɕ3HE]Cug*Sҵ3a!^N&O"݉zY~vn%~^]Cߘ ƔF7W&i^S[%vlX=TIFw2Z}Z8(1#_]Mh(ܻ!Rv)k2/[kc@c8'1ƧYS:+ ~ (L/ 6?<NC#0$jܚwN}fx)B"Jc9!rr 4%lhIT?Ln&Quh~Y C{x+bt9oⳡ"]d n@TެCd[}0ꁄm.]wsZEJR< -E~e {7K F gQ2g:zAq[=VVlN H]++&zR`YnnEuبK]J!\n-5&t/=<"nFU]X`[N* B_0'I:L kGqXM5de(gzTL p+ NUJrX ԃjqBW1u SHK} eOW;ȼGQ|G.] HOJ .ؽ;]Ļu~ kܿ̔/.c]%}i;_e4k`h}oe!Sb ƹ}D͟C 6qvNuGG8fLTzxoS q%6lآZʼn<9-+S0klaKܚ:;ߌ,l笜;TZqp ;;BBaS2&tN3YFݓ$ 8]9)JRW,/IWV{fyoS$2U v!&l?chxHh[;(Q"D߰mrF(mi XqƜ5h- K ͟dp"wm)[B3fʈ?؇~g'S =>ή ^6 .!Ud*;>m&SAɣmũ*SVC߻ϭ,na^7qyn! ġg&ukJa׎@7}UCtPJ. lUncr&b%)DF7lT̈́,5d\ B:o 7PI=fyx[y7a(!p CrB/ JtnbXo tBt85Ni34;0I;e)}hcyhaڎm1""Nzk0Ӷ[uv _yh*~F|C~TNWXC]DsU|( 9A[{qHL+'XLf I}'}iS9f(K̃ ߀)WcԔteg_0fi&y^em_rel ={HFktg$,Ben?~H=nQ"s~\gWM4 >TJ f%ePkTB7aUInbI=_p/js>|9{`U1qqF9H(l i S>/*Ӫa~,sg˻F%MܘHYpqA\Q,wYW<܇ 7$j>]% RTP0|4cMH4,8g|&m 2w~*|s{qKZJ.\"(D?T?D`z"BA4clo|R\ Gn*_n +2$ßǗ$٠΁,ɴu_hcj㨇 m#F48 Z|s@|`=1(i-LbAk%c%ԈP 3Yy>j@gR55Vys{|E) c"cddHjߌҳOHރOzףF>nyK$ Kr%5%kGph;>mQ>і3c%+g3Ҽ1j|,0Z`g S[Zi{`9Pd#]FC9PՁaAϰM\3b^r4)=fi2.p)r$v+RX6POJ_o1Gy1€&@;Uh0͵[Ptshw9OT{ &u'd0Ncg!(x:%sqۻpY*D1EE=_[գ9fOډgm) K@$+&80Dk|ZntA҈Aي1B/%V=CXӸ5:CR[APa,Ҩک^*/\/SȂ R_<5hc3#tLj}ĶQ:plME*VacbFrLힽ';/z6C*!{^S\x ܓbw*k*:ۢ5fRruF:JT稊ײ篱MVзhઢ0Kl!GiYohY2_7,\m٬屪ҷCZ ZRJɝpx! [Ʊ- MՀ_MCO>-G%O|2Ip>ah_ =feNQ|sr hbnD`ɛLJA<.Kj BzVei$!!@@&) @~BRg̞y Kxe_̞HAa`1Yph^u8%*%y`2G3g~ln$Fâi1VS4*4ԃ%kGbW12Q279@z#pӶy!<<|kceY:h_{YL>!Ig%JtG@ɶ#LUp#G!o&*s`?q5H.75 ;M{@1}o/J/6kFNnh NmK@vJ&)MRةF-_4kN5_!"6>|KDVF jyUJWôG`J~`Mn߿vMBzrqQ5;CI[pg .\K>ҡ+YiVj{-BgrRMQ{Zm.*}W DTMkS8O1ՓM{ T3^z 1P0_TmkH4(oH():H+YJd_[oRB\B:ʫM0\2 #6}=v*/]0S#^29'`^Q;% +(7|gewC噑gxrǙ;fze.FeS, x e;dfeLj3FHthт^; 1M6?zy t?3J]Dn P]aYZPEz0vx$-x)/F2{kR@^|2utmS˃/C }qfߢ>S=w>@"WyA#Bv}X;_ߑu;(+|:.TFAad&1rP6uDo:F:(zN!7I4q9 Yz!三]lxB ;XG\ŁQ #A}c)Yf 7=Nx~dǃscv[TjBwmT݄ig)]x7f7\c\ݴ$p Wj$],k0'HCRX(][%,|wDGȷ4in:|/d)(40<|ϰH7j;@)fpewE\vѼM\\11&N6GR.]3 d5/uWTXgXL-ؽHe^y#jߣ*ec[W1j>(!V!`h,scbZ(K3ZJTT!ͯL Aa;#VV a!4/#c?'U)8>{<۫;4QaImƣQ9E`4,UB\r4:3s-C\pPbH0rL762Ȍ"4&w@ ,d({.ToGy.2D{@Noˢ-`&ȍ|Gi:AfV8Y*E^Ns][9藄SQ!}L'*jq(YU HI-J2%g h."34ɼB/`# y0D@Q;o=.+0ed$$f/ p7!T.̉uBgwif,7؏e55D_$}H+˟vѰha4P%G T|6=%IZBPl&Z(b9p~sJWq)Dyۨ&\𱴚YiYgg㳨1G'Fbܻiz"+Tud'y4+z9N1\}V!4ƂƢU[u[.mcw dY-<o? &{eb6{3!~@ s0-m8Uv˷FLrd 2ΗJF a7(wƋ|p:K- N۝ ڸQvXK#+DNE Ի]U1Etw"֬-giz9+= Zې&6R[Hu{N ]ĸ,YoIn(}bjw41gnN ]o*P־MD=,O{ɍoκp3NIw@QMe*C Hcٽ56iB,& ^>˰}R|Hm%[nҍ=+ mNNjOg5o8Mzk.D3ƞd| J;Ku 6ěwmF6hRy2<ϸX=px V(_-  ’jG^[u0̴77?ƽW9kMS_KvHdL`5s;y%X2Z*jmr583Gk6`"" gO ,vmUlpܜ#cPh)<Ύ{ @ #N'y ]7wU7D;KOK BhC M JVEKil4d!H +=4R9ϰCmHZq&6O!ISqㅤ8.HG F!,+qcq}WpT$`}y7R@{8K6p´2y\oo%%\P" =C-"\ʵyz+naEΎu47F* * #r*9J|snIGm}>+FACGKmP'(gU%# aI!4Y3 iID4羍_C(% g⊭Vs2,;,ӘjK\+ͼs/߯'Iv5?$P$ l\๪JoWhybwZFĜ;v*Ul yax2Pycr2輈Ȣ%A6MH*,zcNHʆd AmΌj5r1D&g 4IMޞHѼ~`mpk\x(ȱiٱ#ԢSx&ye0E*$xtm:x]=ڥcjc3"sL37Q $<7cn/ygJmHpdVijėE,Ze?9 ,||#TY9B8/bnk1"Guc 6rxpfwvRZ(A"DBdi==5)-Դ?IC3Fo([pC;aBȎuϭb'ʋR,"o"S rtβB~89fI>'^*rIߥnM=wLRGVӗ1W!ubD&:³mz  ݯGqV|<ג)UNm&9o:ȊTF/{\ =13z賡NF Ƕl.*[l5oW&_8]l$][nnvC[`r-lnP9s$6)TrQma~nr%?@/'!eOkWL8{ ԑ \Bj*敯3c(A"08{LzwשJ!ݵJ>./(tc!8SdUQnj.349+?ЎەK=5:r% v|\h<pɯ•y,7̀p3O"]9Q˭!raaxy)`̨"**.kn`?` abb?Ho^o- &T";={CPsyaPJ ;k簠pk}ߠ|}$Jwm'5-:sj$olJShE ,*709G̕~k2gH7pa?"GdMX%+{ؼ*Rb#+ ~>4 К0jؑݧQT1 9B3m$udn9Mn(-0!~="ϽZǪ^'^A 4Ji#oV>th OnGB͛M  Ħy[5ƪvώZk"ѿX]OkPIBOJhS39Av7c(DyjpĥNAXGږ8m?6ngəV6Ӄ+Tw? =e/&ViUnґeUm\sPy5X.rr]2]rї.B| 1DK֞_'فԮ;(3%{FN=QE~#$h5egA^XKn ,T5\=0VP X&{ކRbҶ~}گ+"H9Of?IZ=^iJvTR8)r዇ZvppQUMLC&| c4ɯiƄ!` uh$(+46N͚"zWH*Y˗H}Q64qkpwnF4]-8T\n-`2ˌnx 3(Ic(~;vݒH]j"sQal3t}+7gL\ j-g.7+[˫ Gt6;Z bo8)<*pE5eFm?=VIX6B߈w%=k6f&ZqU9,:P78Jng,:/OOR+ZGM9j 5EF232I|dw'.ԎxC`̪U偐;1d]fN֓QguX}Uݿ@3!W1Qɏ|N SFr֪OXp3F~}TH S`\ƉN&&h.\ԹU&BDu^7t*.s]N*mET*a%Mk:Fϵ2TUG/Y;I7A>N$Y~p\dH^ꅎaγ*x$)/S]T1 IK>dK1fB(8~U?lLϓE]Bp.,V~G0}`PdAUw?@ P->j3FǪ--~ʝn_oc !:auq]BUځзVY`r۞s-`zD2]7-u-[[+\"X<f`؀>^apR bM tRB¥ dsՊܴ֜R0ϱοe}.#8M>hD֡}"#Diy4A@rxכ8 Y>UіF"z9܎Ǥm[="l|1%c-&c^hh@` r Oξgp:lBrKά lONE5r4E`1(B{G2/=-&nY߳ߞ7,o#)iI3wN@Ymn˙^;LP`3ܕJw ԉ71 .(<hȄS|pM5]u)LJPw]y.'YpI.ٙ0;]a#ALН"o ?+'+/힤qaltA3/0; +E\72bc5yFH"zĪDMj; <``eR\pw~y2<_NKpD ?#Y%MH;[|8Y+Ο/OU[X%1+㜼1{d)$ PݑEVLm3qca|'ܔ tܷbVmSw`)-ɢG.y NjuRdQM56<&rٺF c[hHZ^Y_l[ѽw%b1Z;ҽ?UdUiBY-xa+uj9jߩU"²'O${A0#̀~$Ԫ,.G|D鑁)/MT g7S{?~1#:,V?ʔroWzn\uUWI Ȧ풂ݩښt._ :(eU4ƭuRz;.+QǒB q\g3 ?đ^< ZTbO8ptLϏrRcIouw512#_ETT"kmhh͸V5 P|X뤻[8iNPO(p2JWq"ҧRalu~+z &k<ТFnQqqت#@k*1ھsV9160O&\qǥ:zαQA/*$D"^lXPʋ`P_(Apt>DΨ JbFxz FA4c⑲j5gi2:;g ~&j[aTXūM`@xN\q=`рM.)P9Lŵ/aq84zVv1YQ[k eB Yq'w,R}$e-)f= x}O@ޘ R"w|SJ&(ˋua#KC1}U1* yהS-RY'%v> ⁞@ 4ۺ ]Rv˜n/ڦיPwKlgd\/IQZ9i i~/:l P=~LFS['8ʧ*%oDZyr@@ʝm;¬)GmEAϖ%Wʼn읿 H$rLn6{rh)@V݋*J%@;aԵJiƓ \SgQ3x&E H8) sNYWb ^7w%)"]2NP q[~xdlt!#Gz.7ƨ9 }9ٜT Fv>`R~Ռ{z3bCxX&-M z| yk7usG51ܫ?+I3 O7 5Zn^:@^1b_Tg,z%{y"JbIw,HP2 9RV\ҍY/$L dh)ӖcXortN_SO6`5O<-ػ&т& m۶m۶m۶m۶m۶ռD'2;H+:*E mUvBؤU-vP؃KKDosNG fos| _ۇmـ৚i\, *"P?ՠтK\L%^?ϛc)Yy_6K\x$ P!.m ӣtaJt <55jI[ŵ2JW12g= OIëH LP8b A6ŠяcI D7 Q'` Bp@{E)1{v?`.4f0+}XdHI4{1kpYEZE'/dKr2N0QLçDo.^k\w6j k7$x  HIP&6fN_[0OӲr>L7apKK7;t: K2LIoWŽ;)PNccdN,{K]j#QU0Ƒ^GWW7&כk# O׾ !w3P-AEl$'<?INjsꏻ$h{P˹/wʻԒQfAD' \|ޏyZJƴmr1FNkE:b?Ofb uRп&%,ROT yFl?Y a3[~ץD.:ل(d& hs>@TdUM/ iF4|"ӥ9MJ/z6}f^aߴ ;3VyCy̯i*-p*i*skiF{tR/4R9(f |g߫\pU x"ɂ#9D[}Bxxztei͛=5y4XVYr_wgN[ų|}Tř{-G܋]yJxRୡTҾzic̛TqIRmÇ,x2bn'llmYq\Cr!}cu%]HbΉ^:W],:ќ<\tovF>UCer]K=Y:f%2{V8 !g;49u0^s*z;;fVt]t!תߋtvam- "uPYHq&qp{"qQt26e69U P tο@ڍa(rYPFgt>USZ IXɰ-{9=0ׄq1ɔv<0S2'@ӥyĶ^m9';}xʞzjDJ'NGfFؤ$MP4Xŋ dko\%z#.XW$v⩓_ƥ@',\Эcp*n46S EACOC5! 7''3$XY=+;}_ӱ穑:b>#r)>q!a=(jlsHvR jIڥ2cd6Yq 4CȞueKހD0aiJ-#>PԇLdU[٨1}Bx6w EvW#:U ^ot%4a@.H6XdeIaŪs8NL7[=ܩٟ;j $+2 HІFeOj/JB3 1<d,q)" @mGYۇy18+ߐWl'8tM`< ;r:E5^.$'=u.?qC+vcPXn %?]i.dduJ1;CP.Ջ RYYe?0 ˣw }01O3OoIK0˦쓣e[kp2KؐŰ vy'vw'L0wgH: |N3˪tk lYPo|NpIfWZ&݆EC-S-G!o\^|CZ'U\6wQ0d^,u"yp Hk|Xq 32(>c#j_jq~wlGy{!O caqO?' uj8ܱ=  !Vz+=t[t g^,SY~ΛȂ{IHGܚgQzP cH9)HP|>"0i6b*N6,`>~)Ƨϟ"ԍPQ -Tq6"r*GLaaGAG`ڊl`D[)244_Qʄ1Lm܃=#W(;bqIy >+싎FBE2E=&'w829& Jl{YF15YWϪ  ʍVL9\M}L[ހ`ZgIhӗꩦ áo}j>XU=141s棲õ4hcw8/!fA ӠAxB6#i0 BY's&׺< 6}ng9!A!Tdlz3ߚ%.CȀ{]u|Mtk䓿cZD[vA?<HUgPT_i?He)7?[: zW&@biPht VfB; v.ss1/CjxW nO7[.'M AxA[ٻ!FY\r *ZK^2< lT4;6J. Pq`sHI~˨9z*FuΌ%K\y:}>Z 2И_5}:|=_jH:4_ݧupuw_$ ^^2 ̴7>֊LfCUN^X!_FhRTz>.9-y?'Rᦘ+Z?h0 M}u\ץ;Lv$z)EB$Zcu`t̚(!#:d>- DwN+(keT|5|Ƥ=x%Xxs8Yd,1QC 5fSK3.'x@(zU,%#=8n=vZo\dW[,%č? EF%9fvV".oj|4!>o |dPSV{jrK[ B)&Hh}ܿ "Wnr't-lrPvk8$l.AqWcܶ-.3"6j1Gģx# '&:Me6~pO5ں]Cnj$1pubMڭl&^]<_J:Z!'oF~#3BÑg-˺%xjT_5/Hnc-g!,ǂ:R$҆S.u>nmIe R3Y8gk4i4S>qPS;LKf[ oS68pAR#4r66# Y;8̶|Zjh`'w8;O]MCXGKya/p<ᙔhރյj잰ϗ*V#;V+|΍|iamUvD`Z2!S ڮXKsk۝2;d_–}»9\qrkP9lYWKgoXqϒV9^>0xlm)}촑%߈S%h^tƽFL5d.s8 ^ȤY}42"?@2iS>. !2٦u55E_Pkd^KpN^.ڂ6xk= \i[3$8/:4xqNg5jZyB:xJl.pz/7 Xk{ cd:(t%۱B.(I3o(WWm0O@:q:.3?-ph'/{^1太vWַsƦCjtfKϏ'$4#!)h;3G4 ہEHD3PppSV4}膂jl4܎!EA̅%~pU_r4Gx#cL=6[9soMWF02F CkCF :YI;"7LT KAI*| !A +n8ہtOpR D2Rk~7|#Ӌ8_7IZD^ rtۑ`'z4/쑨"4@f ^vtDݮow:mv-eZ ̀ Jd;Ȇ6cѭ˖X!O3"e#v%C͋H!<о\`)"yME)E[CMܱir}LKk |dY,G%߃úNg*լ}׭WvJtF66ՙ=ߧSoVseKoj]!9 ]߁]4QrEiTgn0>2[9|#ysu U'r_hEԋĽ?G\aC~ .8ճ$goA_&~"xlY`Sp O7dtgr2l18t!W:=VaQȉ/!BK+";jHeXyl f7<@9ÜAJ[וFhRii~ओ&Q\xjPe"EI:f!ZL MMǶ=3Dݫ!㡯GqRa^8PlJI.HY=>4DechKei„RE|^W OboNr$ng٦mmey$GɅVJeﷃ74AۮIyD9^H@\wRq^AƒyNF_[%ĦS8L-Uɍ @D$D_EdlF:^DunnaaX7'uőqz42$迚Y3?LLY5g/{sT'*aD?WFh7-?N8of@ 1ڋtޠ!ZVb$e;y+ZF8[G ًtEMc#$\N-їDp0BSfX`G9}gHK!uK֒A!r0Is$"c,Ɏòamtf$}OsdDIaHi[DΆʵֲ2{aO12_ѷTӼ*Мz*RoEN{jܺ3i5>9 an!$*QcB1Q?xLUXDYbMKvXbSa(?=^ť,hF;HN^]F*ܤS0%ⅸr{(l2 E(E=sBüp iA'U<R/<==Ą&P7/ge!$g~lާ|F;S=jХl 1yV6ÝPW6Eϛh" ksf9m}Wr>S6xw52T!h.|heŨ5+udop[dm]i#!Oh[Q)hzfjKxPGܙ*6y!M\yf+X"x݄d"lk⦐Wə^UQ;n6]0lKH-f~A}C eIXl!܃V~AWgWa(4ˤ,G4+\[2@WLSzc2&CJnQ .s}E\*/4Gpdb an t!}Uv`A b1ZVJfUK@^ѼG{ @l*+6sVn}oT` slUǨ0_.$©wU UkIqsUd=E&\JCY6WE'PFsɢ͹Ҋo7FNdtJ;ͯscu;$ENl1w@6s}w# oƖ>ic@r"3cgDw&/⼈(Hԃt5 x xGz(Yat2Ğ mSk{m5[tdGPYwyKASs5iriI4+C V3> }}  Db㇦1بi,'M.NFm<l9e\,tf{PGUdeB [pn_$6tەm?w~C]D"(@+NNt0|"lTDDQαarSD-t"4&vWb;E5xd$)|yi.)'!%sy;C :jhW[KO[ƝKSpg.'֧?Ɉm+!U1;K$M3& X wIJk=͔H^ʍ&T)=·wF7pϵw4lWT-,5 T P &{/BㇴF ˍzM֍p̨R}q{ٌVi] v [sEr_✫N>9*Rյr MoN}kdR]gS9 XY7[A!v_fj 9O-$:aDowwTN(vra+qZb9w"ipYeU|{h-ȩ %y6ΠXCsw?meR +> ;[F4uw=f^ކiS- G6qQl ൛tS Ôp홋+ؔaNy_4 .WҷRb:CHہKև ;4鏀ndE!MY'0< CɃ7 61&wqJX<eUcgզ=i-[L[O,>:2ZKl|obik3VY=5ꈳ[CYqX]g0ͺ2'{WSO)&";.R,!8 "~82kmګox4jBa.r5)C)6oŽ PCݿ !7xiM V|q]Jpi@8X-*LS9n[{eȉKra=s+a%I'7q2``2bp4_0wVC7-r @ tT8&FkA3,W&kC;9ԸJɺ:$r~FĬho]f~.]KӦADĆ{&C4XJEi޺ ]ݺP|fxhCB}85fR Īz2QV vȳFM#=6oa2m7g"JG"ھZ=7U`';N@ @w"o͙<56O<S]n7nkT(ƘVk7~D/5ś^^%B…Qȡ2ƜI3PES Uq__,եz6!W0IaͿ[1'8j?%J, =4(aPwBcT(`§TkC6f-#PBkAL+˔QrtUN-pn5/\y8YOYr>93Myr*ϞL~A4M6%IVO͆WG08.:GP<ΰN"~z½A7\#tq8f6㗁~~'e֤mKsD/ZSVkJC kS4M_56XG[$J.uK)dX 茦+"_B7G+745J׍1HC4Rf!0zlN,CE*ǡ=QJSVuV8A m>͜\|vDMFʾ)f-1K=^H-hZEvqoByRN&n 5H2q94]O,9RN4=9Atus2!<\.Nư-#xk&Eo0IhvTeURp,\3,11}CaGfϣ)ϕӰ/ضv,Th &5czTsϵ;}F&D_4@mnMIt:X7 o`%`,AFC9as18`b5e,i**)Eb+~<$){br/,VrJ㑯awTP'4?XRfWk>!] mE tš܍*[2E;ɐq ⧥$PD6~\}/e7V,ԛf30tԝ5nނl^CdGHzaDFu§1?R0^+x,i@s(4YG1єcM MŐVبʞZ/6\-PSk/Y6ٹ EyM*l%4Hl48Roj+";"InLX#j͍ .laiQGxyE!rع\Qoli7)j\ڣ)%p?}qތ_,D/;}?l4yA45WbAAqsw\F Jk"̲'7kyZ28hGڊ=gxyhoHk^ΉnLyo>bE~=yr[mly2)'Ɍ5xu9ID6i9C|vcHⓚх6~nD+(3a:$>5}͇p7F\uFQx ^k ܯ!tȪQd쬬[X*1LuEWR|)ndk)Q& AV?tLQˢB'Zzo0a&sZmWD,Lq=*gEc]UAz) /ڮf6|oZ~=$z%"{QKB>9wCCa;:w-,"X@Cf |[t}ϽӯQ`8 O&l\g%rҘO6q23g 4z욦u`n-5/ct8#l  DO9JeԲ:>Of+g'zVY!a>%oCV8inG6QGAh/M*&Kyc$"E'ʁÙ hޗEpNG|,pNmyb/ S'3mИŝ#0b<{S )9H&(.#/h\d 4;|ms/ [#AS]nl9\P >L'H%v+x92TG S*\ܹ.RÐҠ^gbVF1Yԉ@!Hap+(CD+o4<amgD,pۆ̕0X1X)ΐLw2~EA%sq2#<,; YX7z ]oƦ؆O5J#MSV^Ih, 46>"IQe +x噛>]ͻ_~jE2*p"J^: JF?]oK]4jrL®5᷊<;@cwpmʊ{ÆHr! G,KV)fܾ,ncIN|Yn2|逨AlHUOmU6"Y$s3,b2Cs:to ٺ1ju05;I/,l#)_+h3\Ҽ?j+e02z5阯bL1j0lC;?"a^:yNX+P|CPw_G9E}%:%<77?4aӵ`&i%FVw+`|;l @y˜09mrә}HSVY"#ZC$ gvXjg[(K{CHr+kTvo!!Cp;o Fz-5!dˑGuL|2e1~OZɲ ?V1abb29b)VVqǟ:2ojh%[]w PXxdqT$6T6kn8fXH3^Ot'NL(msu }v_|hJzw (KTtÉb!TVc 7Ս2B@I顖#u`GԐCV}YlUPXBXWLG${pgmt+EcW__ETk8<#˂t1DO@0iyՃiR_]G-z$|6!jMFʮAOӟH dSj\i&sP3ZxzRZM%HD)+B{NU ](*I> 3*G ^!O穌`[8bvnycm82 uDPpةY/ ߫k,Ü`:6+ &,g75 &CcOCKzFscb'AOY4%}^_@FBx +X $9o)纪7w01̓Y=?Q}ċQvdP~t‰[5t`(Jê\mhvLlITS1/t>"Z$8ٝ朕6M& bYCΟzp<`!1=3{=1#|'ܮM=J&v.a1ˆ}N au:w`=5e60*" A[u8+3fGoP9]@Yx :Zzwz(,/Ai*4Fot 13^OjYB|-mH'YyYv$Ǯ]]LX#RxB>炶,;"`m3^v^2843aU[$5N3"-mޜbP?XeN!ީt.cbp\0L~\[ytj$̾-bԢXٮeiYבߝG=Kޤd9DSgc|}.C}Oj&3E0X3C_L;eԔaYtwlpg}vM4GO]ڮ\C,>1NnI![I/:H?y0{ܚ:9DC7ǻ e@rR XЙO 'ފF"S@ɩS۳Klːa'k^N ܈޵ GdՄ"0WQtsPү!_l 4jTޤ[TDcQf\&;dOiv:\TF$iGÈ]fYOܾ{nPKH: Q_v5Jb3 8W4<1;DZłmvhkN䒮{gB\hLֆ! N x֥}D[vCz`°GP:P %a=)ϲ Pkݓe>KhdoNj&ҡ2 ]BqL 답w/'219yci] Z._`PE%-.dr {`*'`qf% GVl 4P`cD\wdQ5p\p={+Pk`鄖uD>aTx.G܌j`r2{p'Q?&yek7޲ AsUߓ@n"LMk*jn_gq`C ۅb;;q#k78_[z+TGB,k\UxvL[rnydy'd;{-\(@ʎV7SVhD@bڳBI CR /'R_"S2ٝ*+s kx{̼|ʐp@>8gm[Մy>4뱄|?,q~.~) 7Jflό`(t3_bw(IC2G).%.SK&'Ȏ߽@kI'Uo>V"E@>6uhsMN) mGmLXO*?4V: *.,Xkw3UYoieaɤ5jK[:̎lD~a9Dq>wp\:V 㛰4%,E'|35L0&50ig QT4^A{2梌lh¿>@*'vE3rY/MT 薛$,'8~ytǤsx20C(_P—u|twwc6>E>@GЙwL(iz?{d'n^R;rty _K.YeMs^O=QTJS wv$Z:@g>C-'qiUȸ՗s(.֚Rqəlu#9r b :UJUt2hҋP{qӹ^rn;qې0V>TBbyM< t8PtJ|guhPOLWb,&Q ,vzBXsz;/M`=3LUguܕY:D&({,Bm=ofwV]>_M?x:F#63k@8q鿱Mٛ xbH 9#\@lqi5$[oC.DeruZM~1N_=F܇hp ^ɖh?}ǜ癀Q-AxZ `Euj>-#,96~~G$g(4^8J2e<=]QAPj>uQBOz@ఴҀ9G?ju8:keh{\-۽^eol˯R.`t -i/;QpJh%Ak ^;[XQp!DH8rRBHƵ%5y}*fkH *AU)Wؓ]z=Ԍ㺻%c rǟ'S~<Ѭ#~4Y!P 5o 1XWj{אn M,C vt KQ('|hy΂E4 XI.ɽs\fgUB4 6? 2BZ;{(C 0 k5`"ҘZ>N%Keh=ʃE Y8C;~7m6J^aRc֝`fr^b>bh~2Ʊ 3Q%PZvb')Ug]6#Y\C}>JKT'b;:a1-[O+؆ V79kHfVg= +t3 y(6uv#A<>i Jsd-9sl'g'b6щ8ze_LDZ PnUUkȌWB otk5L5-H%_)蒹m!bl=V~GIw;zu*}\[.{ 5 )ʼn{# `/^J<0҄эC: 6hnqX 7Дh^e=3 8MTn\.\ރUO=7'61]fy[]e)|S8x+`Db'-X^K.2F0TK'r-`ik1 *X?K~glGH*9ߥȂVnfA nai H"X:@X=5:K8< PgѼ2E:߷Z6p'^H?хb@ԝYrA*z4iWPQW"BȴI. ͂@)vP6,=ZK+ G9Jko- b9J [.Rܸ%÷R0CKt)!`& 8Awe.)S q0ddz.W"\X. KvBg7D$x޵0Ht@rTĹ$r~ O('V q[=XN`Cmbx7a{0z[^S ᮛĞO;zTs.=娈k?埠o`QLq|lKc)TRm7tp:^l; &YZ| nַ,\U/"É hbtV)Z}*jgĻ颁0! z'}YW?kv$]F3 [9|FZ$g4>5uRfrV(_LP99]3 g'B4TKq!hLZ2NKA 0M4#iGBI  Ry)ο-Nq*0~^V@1 i#5R; wHl3BX1 5>G|{尕!$#q\ !s@P˕{ѓ+n΃S1`geӅ;6~=:e OXЮg f3.n5ӟ%s, }9HRp3S8";J7\E QI'V68/멞b2mL% EDɀ%ۼfg><(`0[C14FQW" "./(Ic>F(n ~ӋsEnz5;&ѥQ\*T Og˅;u!O:ѐ#Zd-J9/9aaɷ|pu1)B\4Iz'Me9M:ȫ}d4fz~6uT&}W'z[1n3>!ge|3g I] 1w?K{)m}DH75 |̝1$o,wdw| xマ1.*_?D:ݕ@dK..XY((%T/WYPtf2_cGMrS J∅T]g״*~'guTP *  u!swsñnR>latH_O$5tC=vgGnS_35M6X%Hi  I݋_?߿p7uQ [E`rVBr]jkLPg0QoezC=h)H^H9Dl˵ F%ft:( [T&1|HACwՆT13]$b.S[fw۬3N$j? YIm&QRqYK%1Үf;Tss̆ϺF qg(/&"9zҹopdSaR iYx==9HNeWX+J݊it;r= K9ES-ѹY(V&6Tj)!+2鲥Sp1ԚNM^Йy/Nǽ3"ޒ7a)HM`Zw]NY#K殳2n}k@HS81r< K>P!;%0rFۛh OlI8K 1:RfO; ī%,}#0 y*ǵ`G.?lsPNhA..TDF=g6C茊ܙ ydRn5c˯㪼Wk /yef o;8I(Q;U3l=4Jsjc%;c;\SM#k%j8Xwo]췎t . #BD Y zDqrLבWF 226WW7'(ci9U2P/mA7퓰>Af/S4iYBkfE3`m*|3yNnp+ yz+3߻frmMdj` $,G U lW365b)'1O?RNjǐLq9tіZfugHO9zt~uBf?[4[,N )%6^~A2D2ZW_Hd`FOr}FN0^8k&j@I+$׿3IB7KCoG7|ҽ?]%GGcI.t`a5 :k)vqm C<m<=6[}˦/KNlCL")U 3φ\)aNTG2*+pyW)zn " =2|(*̽(rLO֭GlpPo6ݭA<DX6S!GVe/EF8MFFQ*Y^,mۃ# X>+zT6Qe,O9ŧf (>>pMyөLtQ{)؛8\WHy OUDԒ5'QfK";M45xSډwwH+J @pGm6ư! o 8.XlQu0HOhnb}q?_/LuP^"KU~ Di7@Uadv$ZQ!..#G[m[|4NL4x/e6X)ۨ>]7Nu`;לޢT_d&AUBDŽ]1dfXz%ĮeRth  )W&V"UV'z้Sdr=vC\1ZԒLUp[3p8NǷsL5xtD&QxJf-wB3px&iB!̤۝Bo2l$pJ1RR% ȥw[IwU9>~~9TUv6̫FJB}̩|QnmS]鉇cV q{;:I`u.]ƃ?Yp%bew~W#sP"l%661?TՅXrVm藨gՁ B&$@N@Ri%{{i-mL15gG&h,$H4 q.E4y%ݎ+z7<$pwTe!f? X2EAj]bI LEuCqV"Z屌% >PRle$RwT~}lE2tdzFHTҲ])=Ю(SM[pVT 4tێ$^#..rvrLڋׇ*YU? |@pDdL1֐L_*8PI Ɩ&cg(I@ 3`KRQƿf !IƠ7P2A?6,C$AvMbYiAj*[0X jVPRS6^r|'E2,òx(bZ=Q|"Zgw6 %I^ۣWqs҇OWI-Q=uNgg- 0ŋ)K?r2MUuI ِH++1GH ^ %6SMxYh NUB31=ՓG\b"=Iކf'|] * .ڣ(jL+ Ԡ̼b kN"lP09ѥCg!2^1Nr!dO.rTr_Y'S/%H\D},%9bVR" (v o]t6EBcs ?J!8OhRHW%R4 H=7X^[H8nMG^E!V 5$rdT߉F~^MQx(z=nZpK ;5M;q8fKTXu :VkJG$UM n\2/u|{,t] KOp9GP29[ZA᪾BA"@mrNTQ|zʟdI}͘/]=sJ9:C. 6nmz>II Z+~U \GnpE @8|j4T>,sBD>_ q*Low]x~.މ<4B,$}|܇ ^*t`U/;ddX vD76bڜ޼%d%@7k^.HDDggN3뙵v_ZS4KRx5;]!W+m\̛MH}K2ث~?D D*~Apv;JP"jIM+ϝY+Iqr-PJDߚ -1ŏ޴C&rgmC*du97\(1 6oL.uPV+giu&! SKca?' 1] n1krV ܩ&Ox6]BcX_g8(>5+8'0yWOO!nFP; 7.CX(ۂsj|z2!BP }*O @NtI_=pG;l}eZ5mgl:ӨRfO?)ʯ3?Zo\DŽR)CX uU80Ƥ@xs[@|"VAJt 6lJX=rN;Y\?s&Ua3{#=c*[E82Tdj Lھ#́rݲDZ^o#36)$E<5ķv5{|(>RGk`3IFNoP.wBf%FMN8]*[O"F2(S =}B.sG_T5M}&ӣ 3\|͆ B"G3 RyMd.r7A,[aqluLT\oFZ˴CwJggC9xWo턁Rk+$:mxbGGZsk# 3azp'9`1E S$wm@Vè\Moedx@Pw55y?M k ЁĖnͪW4K*R@HRr0 @>#sսբ']jel X/ְ.=GC>`ĸ_@zdx^GJ"iK}gB8wC-)r47 1ȩ.U4,sVgEy-zs"0[5gtW4Z[;!n"L6ݚQ49Qb3;r{$a/.o9OI{ƴF}T_xQ-l QFX_D.o#Md()iB4n}0lr?z-5QB=}ֱTL-y{lCE8ŜHpx 57J(/ }eˀqzK]""Q3-z :՚bj纭nrQmPL5ɃouaL` /uٝr1Jk+S1x+€@"3%N$psZS ?׮^0)^9 c8YZC{v|=8]V,I֣`>b ؍i")7) kЌlCv9,~XߴH''0v!| [ٜ;k?\zʕ e+4~p&&HTK%֓j\F)Eb9 mް +EBks 9rT9Mc F[h2}9ZG+ \+C"Jߣcll7$C18U !ǰ@O&l}]m1]j揭UoB~H0tFgnQK|[PsJ܌'uo }ÊJ hPT_5e.xpV$0ʍh;_@B#zrC_^j9l_ <f%%Ӷn!E2 +"5ޑpv4"FLbzqV!pٓFLc\dҴ"1UPnƵzQ3x'lѢ.F6ШX֍ϫ>//4S%,uD>߉2O~ bl Sar:QUVIRȤpv.x"BSQpnm86ˍ ҕ6ʓԯ$sbV6)zk-":AiY{&QS|C%mN^=ɠ̡ERmtTBw'S`ɳVӃ>*8k>ZyCmUaֲ~(s:US7K+ %8+Gn9٪=0),'E2@5ꍤa+# hE8,p2=rE{Q6:'HH$ jR6!s~!yA-6[pcﭻQ U+0Ez7HX@ߍ"4uv1e=پդ7 VE|/A*cA%̄~o/%(N/! r7g.뗡:I(=‚Y 2<\(Vof'ކge]XtLj`4[Rܜ0!JvbwPkm#肍;7gz=C3"Yڊ=r2QOUݏVX_9Rgc0MíbϭYcgiCZSdPQ|h-p{QpuB֬Ld6|'@=ZG5¥a›@F^z2CKz%ɠd|޲VpWptN*կ)E';0 RA+ \ ,a]V|1(6L{<KlW_|/$}b^n%D蓼{?Ixi*y)kbZԣV.v)QqK*kn7O#SlL]IbgpRwpĞ?rBO<9!zaMOj^/ݕ?e7 zD['C8Si/b`!ҳۗ['p-j3Ƹ,_57oWq'֣~[KG ;òU!nΈu WYľNOҽF #8J6~SYQTj'c/TEApIǾih@ *Ʈ&e[%c%АU-.kIC$Oo2/w"N즑YGUĂ\ӎ5y{i@~/bX'E #O@@E:=ݜQ%dFŬDnylMh-ᛊtyN]kQ$RSV!X2GTaA[TlDJЍU[-P:iL!=iv7>;A~%-n**gL壧eVcqVBnƄ҃x<&uj68b)"NS< U)P=D'r!Jt Ј#$FfUG?)ÉfW\&soNz ŭ97h)I0z3ӯEl(n1[euĶd|3G5&wqg7]< v :tX#y@7&HYW}a[0!ִT$͙gN@PocBci$$,.&-` #aH~輄-D\/zKs&mG!sYpRx6Oo*ȥQ ^Eg'Ήwa,R'3M%`i;@+Ŗw@@mm۶m۶m۶m۶m3K2vn3<й?&$R[[1{IAb=61dT 2~r.,޾`MfzM!L}S-+6,chL0Q(@.>U6Y{`_4 6"G'c/FM%UY+_f_d4mA;0k$u+P[1:}Y`(IH#B>DT`>Տ#r`,Ɣ`+2n擠$xpv!} XEgOTm gvPEc0r0zQuP'>&;GST}PvZ|'xK CfK ݸNYr+[?twꓜadl}i6!楠]jT؜uЃV!?sk8䞅!/nENuK2>\JZ HΩb{5t4_e< yzLq:Z ('V4.ahUG8D@F>',5Ad,/!{{#>[NQuޯKG-5Ī2l*l 57Y4�JM|7Ȭ\4)ؔk]b=i~SaiB` fx)pB֪~R8R }3 [1vmMB|ub7t€$9/3~<=@8[4hv\*t'\i8hvsF|;Ϊ:#-#ME W$RE\[~n`X)"vfᏯև4?_KrƑ?HDi6fɬ?1"adủuk"S\AH@V9-Vo"|:|Y6L/76f YfQ Vhv $I DOCbp@eϷP*w. he hԿTlVFL[-m_tmhb."n}ٰQe9 Hf}-dr$tmJؽ֓>OqWۙȠ ]>ȡDמ6~q2kpteEvyFs9j|R{gLӚt!2_EWK$ljEZsF32%tM\8"&~ z!!XxqA]Na:S.]]zX8fQE d@5IDp:j(Ֆ!CB d=+>lYdϭtܛUP(%[(eg6bE0NZ)Z,ldQCL^k*8AKȶf#ҷ6 6|8Lg*5BSR I3T͸3pZMTvR \P8Ljo)&&Zް'\Ejт{y7;a%55@ʮ)T]I7ηDjwz0&J鯩lqߥp/To>~٠Mp9{g"Q57$DH?Xyڭ E.r66QQ\CBNRBi"z6Eڤ>μ>"`˨hT>養WC4S}M c_C`@D$]%*3 cmr4U ^|3/̷C޷ȿaY6F<z03I,vE[<$`;!=yA je̝^xp4 as2$ | =d3 mQӎYAaq4N[eS]շ;W] `~RGOt Ϟ(ބ7cZ` NGES]z>|߱] s y!bnΦx,%5JIpWXDsPQ Ify,hՠgU99Q|\rӓ(_T)*D*=~"ucm/f*"AYX-r2*VxpƊ1P=G?m_Fi̩<,݊ .J.i~KHr٣aÊ,rU4:B̏ h_OS渀:59{Ӌ=N Ezf4[kRc96X/G9M jAL)bnr6r OL+[cߘ/zcc8(H'+Rl:7)&Q*W5*hnw@z/UW]&k3wv }r !J_D 1QL! 3[ bV<(GRNCv?"OZ/Tg}H-P 1s:AQU-5Ì=}ngJ1dښ>k #7>_i$!zC/: kW2$SS~؅Rt5O]7ZR a҉XHZw-]AS)\)$qYI2&vد{(]5Ֆ1O_ 4E+G٭qdYmJ0v:њ10rKY|x4o[{jU`8'5NH" L2X2s"Zn`}bL6nJRr$r?( 73 fBXV{ Fj h1bfʲB?&aU2 6X~0ھ>(\:~O{b2ٖҞg8Zs`tVA ~)»1SH*I`ЊRCR?ZͅmS-?+n;SFQ+%㕰W)dʀ^M$FbxkdBZ,Yx9b ؘƬ,Dk7ە"Nf>s_|+F`$Px@QU-~z!sz{4Wu$c5ZhS!=OY􎪗L\h|c`a_HڇJLYzϦdԏ<{v7QgWte*.BAa'?zy.u!O7 KHPW^] dO V֧FoÕ"S[@z#I܎uCn-pl\` jxrK/p|gp,rKD~Ӗm+Rxih{R\8NI+2smACBj0YDDHg7k;sEݖ`&NаrKaDyȶI 7 UG j\3H^z666[;әo| yx7n t @\pumƺ3;Ryg> zŎ,%ǰО|1QuzQ6t{? {@9^ʀQreIa]`(>uPKғIPɹ3vlQhpBWEf7Ɨ[73-5Z5"ih;% u;8x#p߯snQ51f%s2]omB톡`K*HjiPfC9̜? D ;ڃ_$qN+ KE 9pU4| u; X5}phCrBV2yoͧ1-yG[Խs!ufhΜ?ϐv-|ng) ,0]!Uelĉeq0z+q0(QN DIwiƮ亙T5 8XM??8gg6pwmT]m15J'5Lj2%Rڰ%8?D.6H&gJ؋`3>a'!)YJ| &pL1BށowH)<͹-&N?Rx,鏶B\*(}Uj/TcE!J_q,kR6ZXD& Cd5`lVk6oquc Ғe^Q-,Ci|򬠺E2}n @X_Jx1z*;.1pC M'mu,Y_B^˼{N v~a^|KҎz= ti ВYqr>cE~M‚"0ZxI0UNJF[`ހUtsh3/g%Q0Jr"I I!9d cPa<";^Z2]{_jE}s .坄DK-u s#P jFooWx`u]AHtWếE|b-ag@!mTV.{\oYɬJ[9 P1_K/Eosd.wx 6?-z.a%zL_+E6c`!tZw0+rM9ϬJqc x!aĘr$ ="S Z)#Xi O&m\^U$Fa|J#Oirq&Ucq뀱7QyJQ)Z1 kVІi?ZtӱWE;Qp0ܤ]Y".? Wlo-ѥ{E=<~^Ά _GjX>Y"ΤEZW?X8hYcn$_փ*)NA1`dؑeD-CPA]FI0Z pѐ>J>z82٩k(k&jkg}?' $B$l)u:aJ)'6f_!.@z'N{Mgux49-VfJ? *,@#e- z i!+'iaf.HW<'&;( z I;cnD)V8: k7QREc7 -<66zǚ&")k~NG|0V:Z㠙r[ۂ/!~ Xٮ!fnF]`)sȷcy쁌Ç b>\!VŊW3RQn]6,\P|xkpte'SKZO-ѬSV? ba<:'੖!¡퐱o8 rI!dvBy  @]NF{M!c`bKEjwن;Ga|UF{"C]x.v(Ft v>X %%`Iٍm]CWX5 ò /xz'?:=H4E5}eХ9'bZ^H x A Zô%T)BT.qAARX^";r_ uh?疌sIp)<Ck/I!3aI8 Z}v~z5Z~+c!0׿d2u1?ΜhiBI?V ԎVv7~ b|4|&ƀu&*4aSt$:W\g\xzt,sӼLI=E^j+YFŕ Y9kCAٷZ7瀝SwT?ZC jZp5ӡJMcHv~F-07J.cQRԑ8$<{P͵_z0 bu&̈LՊ5gO;`^2!Sg"`lv*ܻXC}FIp@60ёX/N.~#S#>* mITJS6BV ;eX~|72!j2u*U;ԫϪZ0<*u@w}IClau"+˰1Sb^L*㹦 1'.OÒΠdb=[7H e;8X{F%-~d$4-(}7;zyU$1N$|_:KKNet!8ssSqDeTD]+Nc v0Y~vOaWem )7< 䎢HȘ9i"w #Z|u( MtdU2ZT/1?)%))({C7h)-X٨2p_:( 69}6Q)jqm:>H@ƐR^#|f98a]c/zEB!lMcAɩ'AJ(, vq_x Wo,Ll@(."kK4=r6'fµVN"儱ڢ^d_x, Ƿ>@Spp,5yƩL3ö; @. *!Z/kET,#۱J2^i m3'7ӧ9jnsX#c2L}S})b}=D#Sߺ&=hXd1A+P*#LULcۚM"m.v!IzB7,h/SuIqMw)?/QU lYG|~_#y7B "ڪ Tέ~Y@/$Huu:Dk|ϩQ)v>SU"as:XXCLS\yP]S:֜ca(0Z!sI40‘^x:Bȣ|]aa¬هٯOUq۬)|;̲ ZOVl{lXe.pM5ü[Էt[! N8VЫS$3^pjL[æ ٳ"I st}:[[)=T/+xnLq|W {'x֢bgj"j8SgduN|)VKyB3blwP_kcNrꘜKtQ8sBTq ƟNߓ.,hE -A:{+a~ >=O`0S_OkbAfyCX$2yYؑڬ`-8P=0M;[pN5,T.Z4R}H۫1OF]ԔmBx2Y@^GUl9$KO>)+0èn;8DsV-+?-QN`G9(vCӃM%) JbdM-CW U 7are=pd niQ\&EυDtBK`^ eG/s51~PЪϸ"3{5h}.@=DXC{v5>i={GFyqo4] ү3\nM=ҧz<^[ =1ByN@1KГ%1M'uMB ~bi0@}I~OKN Jcnt̽ADJ3 [ufu A$W5"L_AN(|FEwLJPp_u?>M\>a YݗZKY[7u Әʕ+1To Gf\BN3{ZŤ7ɚzxmt4,il4{4d(8Gcov[&wGk,K牎JU=`jvXC/.dK.CkەFѨ4jl,ĺ^py{9;\dzFv8dJ I:.W–z= ~JTdǒj%`Kj-peO!"'*܂5N{S"CsX'Xק#2FU >J iyqSEO!`C&}spv!MF'ћ9 ,D#zfg MBjTրӨrke%'( &j/ ánjCeA95&]?":"cZ*FKruG:!13$F9pt;>~sG:|x~ lKw~8n Lm%\vRZ-M- &vJdK\o ix.\%H_ .L0%R)#[ʑ6Oq`Ú9,:aiDFzώ4q I!|rOڐoJNT^o=/gn{:Ss_ .sr]f&C B.k]^K݈YjYjebfuXΘ;_XزQ1iw ѨgD{mGMPdd~Cۮ)eH`SՖ8﷋wP *Q{ E+YVX_Ί(cj}D_uyU^yPIi9LS$Zi׋{&;kc$qg(d,r 9K&;Bvizm^w<ߗ5)c;u3U*Mմ;yTzZThV`ZqĬ2c hjÎ>s:/'T+'ێ {t4A|,}0~ ()_<.yn ;SNȟWg" vdc/ z ӋMԫ˅Ow `q7[ߐ a,tsу]G1`}NG)<肷m(BJoi?׋6zE/%$$իVGޑK\oZɾK^JN&w4]ImInӹ"mU]5V' SW1Ґe4S+z.\ c`wq0-{1sdCm!}fD9ͦ{LO&tA*L,R64,4L ?&Vp@KKaՔ eJYiiV87lj'"GZPRGdž˷?˾= M")ikNz)Xf.|U< ˼"JIz@ ʘN$7Gy X=\$NiTؖ>``Q~ECϬ D\|*rS,e3Զ9M—Fw%a) q=;)>[\{b3#3Z0v2gM]'+3̐Y=Vl!OӺwg3 |mp "L8\~á&p<6U0pYGΩ[Bc?ߦ֖ _^{b_o+KDG7Eh$ NV#8_KNK\7ֆ bސsGD-[C6iE6QxEZ )oBـ@E/%d֊3@3@X:Z6 DSeXùkOZ#ɏ0˕ߓ넂}mlć-[ 1 6.V.k_ aġI{'] XIִ2;!պNc5`{\jlHjO0:>RA~rC ݣÅʃw7ŝH”Nݖ: ER6$*FMv]LIgGb̫$3r_6w|UxfF C7^TnNjFՏ㘪fGH=עόz0gr9y>Z.eޅ#pJ HqЏ%sհL ~a"H`cP-[X'Q6vcP~̛qd>@#tY -)Liz-@HTV,azp\7uO( BFBM&w;jnp2YUDxF+!fH+=AAcMf~w6^׹cLxJv8} )ׂf(IƅS1NԖ^ F~vw`^*;c Iyz1th->F!J5`XjaCU_|gqR0+=m%4dѕu M[S<1.;x+[byX#&y-]*w't ݮXȞ >=AqFKξ)/E7o=6a"M ~J@S-=فq2I zSzkBϒX|?zU0yQ+aۄ ?O8I|bEE܀ jJL?`r|:ʽ<>es=xV`J[DjtܓYֻ\Q*e $/Xe[ 't_p}$pxD( /W _rڟV:ISy'X^[͗cEg7Ѐڎkޅu\,duLI\UfjU~ nܭ#?B&?ೕ7DIГhEtgu}n͂ˊKi2(>QnFKtzQjE?%o'9EC8Z MwQm!ܺLDZ/Kk͸#t$Mb_'? AY!/!ҙSpGQ%C%'1,< f+Wiۢ9OMN-vj n7M/tX胾|]dHk= _ :Oaj.t;o-- ElyK),REG3dCM++I@,>Ĭ4坶pҼ⺅D$qYh hT-],1Ox5Pټ+rP NGWe%;)"6{$m^H!q '}F;|U^,Pխ53gRUNWk(QKR razNJjm,^cMA8H#ɩkbZ?MREe kq{7?qǐ1\i':@h C /lE>mF;瀝^vq,{[ko`;c"'Ȥc;,8ӔN"c1YYȕY|%KXǫUPx ^E'P$l0^ip3*b#|ub̥iu[5VARWiN3*O 0Xi;U(8-#[ DsǛE;K M`n!R1>! Ze_-O頶qrn-o+u9RѼrTq'>ޛk* Ę"/TہZ<uYY,ojE-^h56s]GV>I2}.TVP"ZdcSou`S},k+юw|fVb$n2d5 V+Q[` LTM$CtVn|nĈ nk~|pyPG߁-q&UB=#drruZ @dJ Ht"^/3-TBtw1 uOhLwO~ݐ&^%-x&>Zz$3s߄^@=|DvT&(]=#F;,MSDoUT.\}vC;5xjhMۓ5av&T=BOxY!6TJf 28ص+*)S2&eƓh!RjxitrDT/B)yln^X]M&m70xW;yPŴ`I>5?wbVgZS3w#'p*a*c 1n5"03 m>;: 3,0h( vȀwӑE6LxnzBa͔diYja|/g#u?> 6bQφ;_k iԕ&jʐea.GL.xY+LoGk<ӻ[϶#{pk՞fM^]rnv| 1X`Ad IIƦTFueQKfS.H&:Xv9Ni:L+z;`8к7V+KΦzd0=RݘXϦTfE{{o#llM#j)#+,, "` afdO#uO@r̟eNw<{éJR(+*/&+1Mv7gt :b2w G6J`uǢkІ]%EUΏQ{Z_!xdmO‘ȓr)nbn \% *P2}Ӷٽ&+2Nܰᖒ.Tn<+okp!"c߹U7KÜ4vt\'k ܅njAyNlX)tk};:郲n7hfG5F t̴FU&t|JJnc>_G{胑d7] Qʝ)*-durc(0UI8ع6flk% má-$ZM^^B@V`$|ug=&$r 9>Lx14g?l#Ÿtsƞ%;e^N"qQ}LK+4 }Ҧ8| bzЏ`돯1Ӂ)(!]$^pލAr *2vЛE/+hXhr@P͊ŀjH/y9&stN]"+KNC;BZEfS&u9ww~%I#7peT-ׄ}hgK 4ZcobV-J] ͼc:Z-nfjSqF #㱳_rqK0%zޟfpj a:m/=恌6HCuAFQR<7n2~#Sʄ6$Yn8kz2"bN-Y$@-X#n4SXgF܌Ӓvo~ Z\.}Fvp^d <3q7}P1qPq[V-fīkɯU1q~o%*]<2L_ 2‰x+`Y';IamX6pv⍖ vMV?QЯHn ׌ŕ4dCʈEKGN`pRaV ۵bh33tVU11\ m$;đigNGIk<( D[|!jx`4:_:@=w7 *.`k+d͜0@&)DK׍~#wCmji"ag4$nP,7N,A1âݍzW5^իn<'r6=7>T/ϵiELl1sb}MT2QABIhƉeIooNʕ,>R"UaŞ6qm(Tu*9|Π]Y֝Z[L;sUmTG!L9㣺Nض!?lŅi&ͰPZ`fV,!< 䋰h&/P*~!|.1|2Bt4 roDݭ5$nJs!kq%l=fT'qk-盐 Eڭ7$Tt+'+KѴ$|'K*B.ǧoÐG|>g1@|ʻڟ"8هDe Q6qVwqm&@9|OU/OR၅9cU䥷v/HWW<;tl1 |tLo4'w ʥaN @/zV7XWA%XgITnM>ԠEsVQ%L\*be&aƵ7!djR6\E'jl!h5',!N7U+hki"֙S8f:el;qjuޏфs-`D: \͢Hѭlr}'EGq#; &H_ :7@Q6A 0{USUُ>'wVgX[-cԆO@so,7HI5*sBr~tx pbjs#A,b#ѿM*UŢf lTՓC\8R¹ m/O#zƗd'TL73w4Pu_ޛDb) 0]޿$[AQ,1Pѥ Ms'R|[\Z='\$W÷"yU Jw[,HҶN)uAj#zR(RٺM_6 ׼c$ѰM`N>9 oBuYў?{Q㠨D%C]!M gQ-Ct."7;wɛ:7JӬHA>rX2lAK-cys3դ8VW jk"DM[}$8N,vٖeJa  ICky9o;&{՜K#vi]5LoJƬ1]sO?cу3C?u/UX𳚑Jq?NA 68Z:[\˶0.O"0# ~`-#u6yo^ߨI: Z۹YهG$*1!ʠ5:-cVjFhW|smlP׽zVʛ2Á~ZAܯqΒdD1Zhd?c=xo@$/Q; ֵ0RA*C=SzyV}$Kh ~',9(T$vmz@=G\Lj_LAјKyuwNye0/B|?!tG%?ZP\"P=nߧ pJD^ǾdWPEy%x\uj3}Ԁ=n~o%Tpҭ 3_N Awg^©^c[G$eTxDuw/6o("%M\ӶdX3~'}Ǫ]"9I$:'v/ JuM yB(~)[h*Sn룺[Fx b#j+trhڌ ּ#N5ڐo+~Ų/>P|Ɖd-y(P0^jOוLcgЕF0q h2T9f?c+0m`<򕿒:1= 8&N -юm=v8?&^j_yJ·Φp0S7D8 Ewg[GKUN܄xNymqL'HDmAo N|He*Sk ݶڬQ-'݁p3ǯŐ}7 7DŽk`MKNWu-zQ5-Ze&gj\Ttu.#5w ^zB+4iIN dJQPڠC\[J! x.%Uۺc2u*/oȠ-DtC˥?`]11co?RR2Vjz ?E?'0<6ڙ>d2UuV!]$:k#?bEnouAp?ŲB\DǼ&b@xH,W97hZ Ai?EqQbe5uolŎ%8/uE/q𦆓=\1鋎/drEBX;:J:7zZoo\--?brU ;"Z;abG^<ڮXaPbHye&>?_& %*<*1=f3Kke=TqTkAT$ipPȾw% B1[;{FĤD*g{Ie_Z̷b"6cH$zU{<1Bt,C̀72D#Ukz'k8$hptf[E­Rf;Λ(F2K36s4`ĘU*y,0|\~7t5F~[VVԓE/ALz$ 9tVծxˏN{lY@bBs$, n2ӬelPͨ.ͨ*mU6Fľ+B)[k忮d)h3+ԣbƧkY^h_`H6ov :>۹9x>Y0h+nE\Cծ{ڍI.PM 1YFiY~,!TZ8C=,m7}v]9DETLᾆ[t޲jt采9Yc]Tۈʦ%mK:W.QJJ`^<BH0 w"M׳Od;{_vܓ> 0/PA.f%vQe|#!v*y_}w [USR$K7B#ȅ BOBJ&.b?SO(gd %ݫ§ɟ":rn6+T|ב>,e;ԓfR* rdt$5 f7{ P}/q DvRAudas=Ֆj!C4 eU~歔E/3N8dF!fڒnRpC. v;뜃>Jh_vW(LƬ$ˁ;#F:( J\]7ׇ6=<97´CQG` fn}I ?j3:vtw!]"i k%"ĖZ6Y P?Ixe_fA)k[Â~xn"΂%YCa.1Sߚl=s[4H?%|xv-*%[ےZ'Y:/8zD>K;{CK=TT}R$L$[rIyngm۴kJ׬7@ ] c鸌g57hS]uWjvT$ů:Nw_tD#&nM|1Il m@ '(~ގr; aڧd޸m-}hn*H~'(a/?],+ț_"<={"LGG8tc' _:@l {|tłL-#~[:ڈ3*Lԉ-(䒌7 !Dҍrh [eʜy(r9Wüu' thÅPAh<|E0="ki{d & cņ{b(񍡺Z;@qr40C*QٷWN,zcyA dһ黪ГıH0*u,sp! Y!~i4!~G**߯wZ)4^(W,lWӟ_cg & 8Z5,A9QeQ;z*~alYa0k>< ڔ>V꠱戢6L)S¹qC W>JV nDziL'Xf l2P>T/阒L y6k .n&p(mZ;p`v%o>M&>J(Q1ck-U}طv!Yힽz̠C~]^OHz5Ryoeq5H~}؆?EI8 wlV27j6kwqҏ4Glnj[񇓚C3Glj'DߞZўz,Sͳ `6Cʵ"s\76s-%rlU8{47 |)yj"uwpne5!"! ~>H=2N{2Wh()[(óq֘F]J+RPdžC𫍞FQ{L@ }byk+ẩBL ^uqO  e?soZB !,kQkH} )3QQ!@B:Z))ɍp_i!CBtm'^|6I(GwMhq(1-Dz~Yǖ*J We\L9۱EWcbŚ,ȕe|zBt3gɅI/g)qha޼ Z2ȢPc&fo*y& ksX ״[yoֹۢngqРWxV3bxF..:*\]#0>KN`~FJ^!P;m-1"3(rk-ZnwF"]56/+دԻz۶yP Wj59Jd<=΄b#`Xp+QpcIs}1^򮨤_?V'Hyd?(W {J`?nO+ibHe6P_ܳs5u!wudN,MZddg}q0.#D\|Wbh"/F:@SBv% &8ln=Flfrev%QX7򏢨++0 D%-Yw H%N/7q_aAu yEuzhx@6 idAܡt4CJ5P J=}}a.`>Rcu =E.[Xjg Y~JMyȔNbjH7ȥI_H2ઔqظ/.Ao@( k _O l@@"(xqZrw1) as`2ֿXRֶeoh5'Q]-%ls{4E5dv*p-ȃ~d"3ּ|8%#0 *\[2zy :tCP8MN QVEPo4>%+HI^=V:ݛ_&~3}jzOD;5EϜB%p+ !s'p;ēB`n7:Ė VJ/8!ߍܧ膉.Wm'F7]W]r3P, l/g;KsQ5R-p3Y=IX0S5ڧY‡4mι.I-:GTrρ̿lmҝ"тS!<m)a.gXĐ䊥mch,7]٪,S~Fp d<,p5G?KW ;HR]F _ rw3Dr;uE }}}I, AvV@CXWIP 3scԤd3m|jZv# ":aOUena-'h+2/+:MSz#VMfϺ"0<;yVV๲=Ӝd֌!{ٰkON 68B9I PcY(lcY[hqGm5ZP߯D9HoNPFgMV{1\Oά J6 _3'QiR%l_<20%|Q60ռMBπ rfш/j\ AnY$nW62bRgʟs 7V[DZu$P92dG,X{+eӡq K/Zfr{[s%ieԸr,iH_Ki5P7n=]fZK= F˳OXFf#ߗZBWOBS{䂵䶁-yRQߣ$6~ BTSoQwbQZJkp jhTIvPķ"jJpx`3)o'b P'3Τoƭ?qd56Esne]b*GM"jN o'T|/JwV̪)QZ3v l# hm,$^rG܂$Xԟ߳lꤩ4>(iPO'o逵nǐɢ>T&"bK OUo7$]s SviXsYր 澲̯H?f A!{>׏1c)J[  \e^TJiXX>>X0O 0,SMc.~q(!<ح>HPkȋSum}o.,^tSO GlU LOkD*>uhu#&޾ };caX5I Wjr:2_Ҵ#]HT!n(LS;KGϞIW7iwve4.=cB%-wt4JI`>^ zw߻OG}_:i{6~#Sog @Ӊ sRzߘګ'W*jE]+g5)r]T['L%( F0v#3^60($(GJϹ />YrE'_ٸnՃ](B#а/H+k#bM{ qTN&ǽ;wܻ4߾c52 r}.،VCY{Ӟ &#׭sօWwތ](sWFhbilPN$$yo&6ydg : %KNY@#R{{z^(?M\uw ڣUvHA,AU/hJSZ;ܘ^l=5kE7gU@Ȱ(%n6;K :AqewFW DzuUxq,w^U d(rk.O0]| ,6a+mxEf_ BcZ03<mbV1rcu2Evyr(B2Ү&ϏmtMW;wy'EmkE%~)Ȩ/H .T/I;u۶m۶m۶m۶m۶my(V r=t ޙ*07XvZ =k@8K;? ̦BZJcfԿRL.~Dۮ!rD`U0a@}Xh-t5P`=k^̓W{`1_XŰ=;u ,I*y$T׹:K$q78X ^hq{EhG&Oa!5ЗZE]sU^|2ǃ$\)lɖmTyR溱{F4G^LP},a9QU+{,=4ܺQAg0$-@W`({sPa0h1l߲dtxA݁5AbԉCCIio&"iilj&c;Y67hgv7ڰC0QLYR0ytSʸMܧNLfp[_ 8͛L'D2\Z5Ѿ!%z*]n[ XW*C:#<Tw)bϷFC"},BآN]. 1{"C_Ky`YD1xuiI rΧ2}E2c08+@פ~\H~lW6iF*N0czXgӊ6nSjq1QuL2+N"T).POЬkZ.͓.'13Z#w9dX&+^r&7N5x m:.M",FI",MX7ba -ɔ:LSQ$>!ǚ&đ$߷p "bgxLqJ xG}ٶP~9C?en'_؞g@YK2;C^1dhH/z$WеoCU*/lS8+)9s` 0k~ݩxw+JIs嗔ZUrJPQNY0_G(U,`25DMC=L_*0.,a2,nws]si3N]OʡX(a0}fĥ a3+4'!dTܭTy'6##"3=3XySA5ȍ-%" VC d<_!igtwNe1omXk?mٜn2U>LEJ[/jG(c׭;r5%+Ĵ=GBY.f'BYlh:Ni)}1aؿ@6S{Z0ZoAF58Y([MP7%Rl:{:lMѳ6!l{"zbIK *LǼsT[_G1p xhS蜳H*<ǪJJ\/5` vyNM (v,>Sᣉ x~鲾U¼A]A[RI݂cOs8 $Ԟ 3{}Iv9{<3xB|h{!og{-gDYmӶ=kU5+845'5{MbUolܨe顾LΥxBl-,e玾E g3VPdi|Q+/2h\-| ~"ÓdNADAf(sNʔӁ?(~W:8!$y]M7褪BAޡIK |hXg{+l 9jDI?q g.oj.,@X&13V)5Jtʧ7n;k&5%Es]z.| =xtQEFOVMG7/ᅢ;gѵE4,!/b+bكJiyg~ G,Yμ/l+ߦsSɱn+Һ'4:]|ufu&'X0O)x%c:r 3(oRj, r3Qwʾ3>^b՘с Pm(J+/KxF\ E-hMt|'D*3!f;S.7;5:0-]*?p -s^})+҉!5CHQ>8ik ٹhͽI8Va1.K30kM*ې.0.i$qŞ>=gD/i2Muxȴ800˶aPnM.=b"%~Χz[V-""1MtKM `>k^G*XF}I<%mk_97ɔ'5 Yb)G]̵R1b&(fk C@H<Ŋ9axUZW*b1ڨhbx!KDpxl7i;L0񌸾C?F/ÈKB*,v)E-(^6I?[LuVkkճL8A6۫ .Gg>?6b>(JaT6\$$cxXW:EgF٭ YryjD  4@M#iM1z3Vk;u$K&&1+K)t ʋ41a\*IDC44s EJ_!lۭhI rfOJ>|^lC'ș"J$Ś^ZK~$Źe-uh]LZ#72P6}gh*/5<íŲiW v_˿ZU*s(v7qfg7l85!ARanΚ>O&~_\l#~i6ArHD^#5$LIUUx!ƨzjkI54ز^j-E^ 71\8RLH6 $}H׌aDNd{.OeåHט@n#䅨{&_z:PL,F Uk ,jVK o"!ؒ? b!䛚%5'I?#㬢v"c XDIuײx9uD*3 mHcV~ײM]4>MFe6aBL/,AyR0kf=i|vk8G]ZuawW(Z-hVD:%U-e, k{b3;DҖ䧊㇁$1E?y2k.F NiՇat AZ R{f4bgńNgPQű!yElpE&k5VWly)-bw9@Qj&EᯅL+{n,h!s]qM{4G{<]9iy٣30ΎWcM2~-ܬ?ГD\=|A'#'K.P&o8tM\Fd%ٿ>eSsG+m"CJ[^ \kA\q'3a{퓑%.8xVCHTpJ>DKl_Bi -q%ܪ^ vNG#i=s UTC&ttIܟU>^IÌr_`1ڭz)">0t] xM]4YЈ9^/(ge4PD+WB8B[~lp#!<X;8'50 }c.nadնPg1JTfVs[A |M@ ez٨7uum`[xO49I=B_[!Z4kt`5Eii0uAzZBXC uz8Y =#\8c PO'bыPcrH%J%iOZІ57#*]>-'._u% T>5A)T?#4|#M͜?ퟻKuҺBzC\57&r.grqU88!%Js֓TP8D-ZEvƱ%wrȃ `TY_PwY{x'RqLfWHܷcNax&ܹDxȬ?O2]R3u;2cwqQt.'Mv f#+\Dm>ӧ(M@ XL0( yT-]/,80RϺ|?Mg0BJ&~&1b}-jW"y4 ='jRjf*lj_AJy}sY66"76pb4,DQKʄ eQcgxU)(AۆTb bӅy_HEb3H:T?qn%%w:-zWZvJ2;vOx;hd67mbvP۸Vw<R+3+m XM RysEʻu[=㸌$[S>‡|׷L{Hy!F*C*lz lJnx$PW.r/SYy^K zP61GZYÁ*k%3^!0>]rq+kbKWIb&<-,0F?6&32uR|5[0w9 sFa`,3,ׅ8 \NP5@Q6Sѽ tm?߶TuBىJ? )֣[ng`]=hKӛH^G8fCeiAH#) :+Gx2^ XtqXBppƾဃ$m*ZU9e`fG:|d}4N=rrAQ lQqair⩐Gyh 5u@CݖY7L&_* l|7`*:~ħͿ'ȕN`M&n$q:2dφ% G 0nsCĞ|3xs@LJOa & ^߮Hb/q_$MR<^ _;g!`UT"%#:e1kBS.%ͷ.&_d=.mCqh3_1haRNRPn g7\֋xSm'Ul %kfZs!^Ȝl< Z}]AzWUBQfqhJ2s"ŅjaHqfLZ /Lm l|͡{*ηQ~c5LH2NRR86 }Kx *j=n@NIR lhOQm5ScYO>n}j%N% G;^<- itށ5ǔawQDOS`sS.ɠ!V3-bKRĚӓ lXěT8flnNn1Ǝ1!e)e&uQ*3D^b Dhh$Gb~ǧB>*dMC&BE oլB^KXصTc&:c*zI X yvievMNi#K'&Y- zn]Jn)1}~e S97곔tC'AH0l*T2y @(­]M+ ;e.W*LNMJ՝.=w>@ke>.9eGULOH[HWڛ> p aYp 1[1* xޝ}t&)rCKԊiE)4+~nP,`KR1_ LD~Zr]P4?ť*6v3/UGpm qtLbjƜEe^- -b:EoB*1n0[Mz3*) J)iz=L Ul1H!sg&R6[?\>\"(u.ӯZya} g,A7&k-t qrXg \'2mo~$}(ȀHʕR;jK'A%xv^h+ϯ#l4gd?tb.UheL,1܎u^~?E~x9F0[VC.e]pxjR򔴝ܵ|vR@rEC:e\E掻[HN} ZΚ-h Q!4i(ɢ[V=(.,,x:p\Nv?gĈM:{^Hy9TE{t3Z3O೧*ݧ-F#q<^xhU%?gQ.E[L͋ 2>䨾ߟ갰ތɩU_(kd>^~.O ݞlW͏I"iD;Srb P2}1SXPWa(Iм5~r]NrT,6CaA)JǓKle{ Rӷ[OAd ApXoHc(npvo\Ŀ;`*8i^|Kb M`f*4}L_8<8V.H%4DMG;n&CQL[i6BaiN`9lO39(<1[FiPA$KTK}r 04o\A?iqϓQ>ULQ9.[|O}D[8 kQ,%zh_Spp=wQ,Z81yŮEbt6U*J$#yn& zMȀ*ʴBV㖶mb3H,pO9|ڋѬڻ8-@xQ=GLG 'AQkSw2GcWT7 ),1@k9{5%p()A9VjCi ^¼ =xI/CGI!g'/ ~U=s/)5Y[FޟayB+ xeoE.yo,D(G8+pcLżw=$lHbA ׉B)JGJc )D.pj5tqe〒7*x\#arTkk 0ɇ-hF.Q +ʀ$mXozϤ$$}Q6)Kt3ħȵCDO]xyV>cN>X0-C6 N]o%KMD_Zҩř)diNz(ۑ$mZ;gvASs6%+l)T럕_ YZ3( ʩ/H Lؘ;'pw-vK $rJ- 'gBvOJc& Ɯt"wJ,EՈoK,U@]7]Aǻ=<~Zˍ`u3ˈ`d8@*}w#9挾f8SƐM%@>8YӞkn>^,E z H_3S]Sl7Av_ö%~ͅM XwDQپvjji_2.E:&IUכ7U+~}K]S8<@< x΅a #kG-DH; T[bi>&p4^ydW  7(=bOvų鹑Wfg#i6`d7E DvXx؁ӓ ?nmv@Xh.d;OĬT,)&Q A":?TVL!xV+? 4&qdt;9P!qcų1!$؟bdy`/CgqY9Geg kvI1O濘%sh:r[($"LYҺ5vXsDdWEx\uYe1L+p )U~Y\8(zD=8#M8pQkhtG A؅A8c|>YpK&lϠn+1PJYE-j B%\l8 ~UFZujgt1 CѩBDSԏs A]4L{F|u#+\_+1n8U:ڈ@kYԽ}:*4HXp{9FqL2WS8ʟW<UDHU$p˯R4y/Rh 2}5C-U=ylOjAJYV!pEĊ(R[_y dbh`T'W[nYaa&t!(9n40\ZӺ:i@)XR,'VzOGrY7Y`ZSUbMȯQS [*͟*x 謢LA% e  P5̡B -gZo2>XR=YϺ;j1eؘ D??0zH _ c#@Az"\Ȯ*3n13UvisEW6pXPf)ᠩ8oR0mrA@bPbz Ɓ1$!}uMtuMDhfӖ4B)7AUK9$i8SLH'1F|qZ <_0fYrmCּVFCQRxb7GͭuG{s,zX2Dˠ:l4Z,XFd4J}G9n9[*0ٺ\?eK- T߰0]ܓz[''v32Wu6}J mxHa~%&"MܘB 238zT7?S#'Q6%&DiH=; RkoNFl{Tk-pd" Dyla]c~a (G|w`Z[}`'ַ#R0A'''{w]yù|.Q̹!j}r ̀+XײNNJJz^vAKsjz`mn(ИmbwoO/K7/e \yWѺ\+GrK+AzI?bH8͔-I4fhUNM-FɝNلwG PC$``05Ꚍj̺j48]>\[ïU-fuCp?BsQED{43`$Fx.ZA%\_:FӱM>#s57BIhOwvWȎgGƿ>%8ܭJ*F&V3LE(9]veuZƶf)"T| PE 'MTg,; ›8/$iezT{{q 4~KetN*xVI YiN)1٦Ը V]Xn GU-k 8؅93MXu!jHoֱ|R^Un UW9ٖO;˼?9:H !S\0ٴWڝa8+lS6;Iԩn9O=ai(W7jDWU8 o1f ~B^f!pސJI5e>xb+ #`8<a@n "^83O + NCI/G:u[Pw\]Ճ9%>N򥣊cІ`,>Ẁ1wz&DpՓ>~|M3iQFO |? Jb"kA%jKr(;":wk1QdVhlDniXvt-@wVh+6pec!N  B7ˋGҨi=l6 bm | $=Ə '4)I >{t0Q |H_SV`>%s@Un[p*]t2p8DS%C;(9UCdxU_qkD,c oݩ\uCM-dk$+t+nsuvcg s1.'iM3󩕾 JyEڶ-j٫ EE?p`:?&הFTRwmBo*F9bx  єH(Ĵ|ɷS`#&;]72gw&;ֶQJsULl rAزG5z7b^_E=,=Zj( ٦5QnH_oð_f`ZvI-"Ti~PEdCgO>wP^`lMAxWl4@'!TUOQ:Dճ,dRK"=+V6,LȭnM3CsOF9&v"M1\.[LO2xB@<}\F`*iH:؝Sp{ p7yY=BE꼫cT:X*ZvqIO@#h&~a{R} R7~GP5 h^:]e\+̢v]Q 0]gJLw'lƴ:}c0`=a&5p%PCVεG\!8_lNGU%q$#Fw5i_<9\I&ƻs ߩb(%g!X5։0F(4nݒj nbCy :{ⶩ=+$yA%lZ>ɚ2J22o ymaU1qv|L_pd]}uⴴS>rgG eYzG )1~#1UN1:] ^M[^~גTbOP $ #9%,gt_yp_VF84U†:=J{{̀ʂ%>(=-4K~U׃i]oO׼i14f&,e,v*lxeb <75X%=H5<0Ӛ60MfȺ̵;;::nJ"Fs`wM ^ff^QBZSű_b`_Piaþnax6ז6aݦt=4=.ht[Tiӣ+ȲXVI/T%!D| Qcc>ÂzjKX@I\>/$VпYtxޒw7MJDhտ|MLe~e`{% ] g}!v~v"Ж? U~p.u= ^P8- a ƤI?3H+8}@h ]e"~‹n'700k.$LDP]Aj6<.lnfW⤲pvvb̯wU'.KLުsGHV&δezSm*%VnotERհA3dOP4|$*>Fۿ;CJ9Qjy(޼Qljv$qTP?}{K5Ӕn_B⒓3ۄiz#!79¢24JO;*=j_tf͹'$?%V 9Iҡֺ ]n8*b}]Eg¢E -Cׁy|rW0ɇD 骾~Gӹf|xL4@S(d!2xREۀ>Uy*GPWUAm;716743TV7"K9x˿~9AQΛ#9ޛ*bNw4gr!> աBhu+YXJY}.n)zO9-"fJ=TAtD.dwe"@Lw' t/ӂW'S)aJvK{5z߉B=fhkM0KX*c6c,&Ou.LٖzC:<&[X yړzPz(rUo!ǝ#łI6𜢻 ۊ]2S^=4@(p}>xb#T l7Ww0bWU.<*d~1ƿ@][S'_W;QI#H/Llf.2!Zu NFnxD­'OQ35 Wp\.XmtHݳ ?N- U SP^cÙ=x_57uUK t%_V}"p(7)$){JVr 4v^vK 胫G O?uB"c7*"|+!Q28>Y79'CWYQ a{ Y"$\r? Ս^#uݔոA3%afe^1.oҳ .2T^ol`vUPtFAK=N@0>uL]M댰2<0儳A5 )L`R u EpqŽUCjr"A6s̺f`%~L-`%A)T8eZ=6_} >RG9;P,rpY8{^B@8Ds`>~pɪyB J\ٳkI?d),(z\'jU'0Y3>a@m #8 'i==8NiIH%VR+#@WMcNSNvJ y \93r+9ŏߝUǚk>VIKJ9-!m_r!tʓB=#@;x6/Nq~}E%(8_y0!!`j^4Yꊨ?ty k|NNowm8orsP@"%Q1 em,-N$O-n@D;np6}m6hfYp[:HN)zjOMW@R_3QCA&3eĵw!@-FxbI{::@BmHmƪK0XÈ)2Z@uS5׵1+SؘZ G_715Chj8mE7F/QJ #$K8(qTc~͚_Vƣ3ZM=.w}Ls b˾hj0[GB_.q gfMmL2hwb/'a^NF2,r`i%l#8bJ$թש9x#Ӫ f0nsTu,(rY;_;rii^йR=Aκrgܘ>aǺUuՉ}ӖBᡌduy>gG$; (oByJDC[ 1N(b&n^ʈU8q-lBѪI B|Ȱ$)Kr0xPhT]xqPcŝE $87!ؗi>/}5+(->>HsI*Ȯf)U2S'd$vz4U%YMҀqZ xt: ih/9>c$w;)K ˂Wݲ@P5M>`"Ի}U)P څru4P7=fsNMqXcgp{0.gRу\jBg !q]>7a3-t,iQ/xlޑ m\Y4dEdSDn8Ο[GO`hvl6=!^Js`USr6c3 ;6%&aѽ%%~FX V#;Q<%}N "JNIe | < !kkW@d ^Fi{/@uç-n6i!̂Qx<q,'&8MU7S>)F e[vBȉgfav9F'xG@jF Ha/MXMt>VڒIh?ПN5Iu#?H3Lj-[ž Od8t}1 ngb[5홐B *<Fx3knPA,OgF"#"b1OwTU@4/Վʅ wq]FE~ɨ:y9LĄ.xgurVke&/h[k[{tXӐ$˰kP ՝'DaÛWH2l(#,3Xp\xjwD;ȼoSV%ã@aY?}Ŕ:5Ě吏:/ e܏BU>&ʒZumr:Cʈè{Z k4ҵ<ޕ@\c/TpAlو7,<ޖ>I쇮TvᚒP5Wġ{džȍEgkMd[EhQ惢 @wk3 4X&h(u BBmmY PZH[UO7ԉ\SfbL5¼'\a TcPubZ luc !&+&{pdh1I[aZ]׉&4 _78`@g\OϿؼ 3T> DH̟>R|g)?N9R]cHeZ,"Kj"^fOXz@~Wl;z?’gmQf^6ge,ȿ> $_E{f*) ;.5 sA"†;"VEQ2jy@p>[1@5}(@b E-Xf:LBU^4|W*}CSL75ċ Ɇ_x,C?1pk qV?E>__3;U׽8ϕJiSA{2}9mgQ߼ LboRvx =EkJʯ׽[0[-3_ ~:Hu/ iv?[>唝D>:ny" (onEHQfYRv.3&ܭͨ9V΁gMO^O|5 qIXOHBCX,=;BL1LrT3}2L*4e/4ބkQWij_}\է$_Zdd{9N2"[ФoL~ /JyzN~>Im^`%Gh\1} S\nRՃ>r,W1bexSPs 87ׁ*HD+Wt|~ 1bBbd_ MX;PLhS0 8O(.#+ Ey9vC;frVx-Fv+OFu߇ E(RPB(kiD-8:xir c90ً)?7PCm@scs8.jiԔu͐1ۓ*L9Rㅩ< XA.EYʬ<{s}ݷC 2l1Ɋzư tJd0DIH4! )YIU,RAk닩u/9Bq)wy X9C"̩xؙXH[Gv=fDL4Fۙp6juHhѯc"vW5]@m TslEiMB(6aBj81++!8)Nu8X9 2_\D hDĪ17`NA~bE @mhE#:CeB"({08!a#gClv[,}Z'>,w䘢~C"'$o5J^>)doR}mBy_NG^hV|!8Gu1pl~7.wTaQW2ysK ]tb*WJo s[ 6G KuYX7=άw'&1[2"juucj֢>mͨ]4W&< ?Й~dA648H#M9ЅㄔF4ɦ\NE{mXW2̓:EQ@ߊ=cTh/RG&ev%Ei탃ΩdWG?2jN,ʢ &b;Fwb|ȡ gE+k:vRVe#F%džvvfѲ > :v$i QKvGAD][YO?>ŝ+bȟF~u3_-}mv/0 eOTW$#= ݈ D=I|v`!\C#-¹u5agf?Aj+Z:kbI-1 p D:xĄ>Ke (> ho_G'!l~F kn$}Dx_ ){^lQS~AU4CdBp^A^ E^ާKTPF `DRV3۫}ݷ1Jkfg'▕3BPdY*1t2I[rf|ud>%! U^na];_29#1A~5`D\B ljF|;4} ˏ?f)jX/\U.XB3Gzvњ5^g)nv^CnsviB4[PqK@tRGzE9CrN1cQ+QP< J`e}ucxj:&* cJiN{$ɕ144YL H1rK^{NsaϠGP~&rxdne=-G[:(Ѥn}-CEȱrDS~A|Ld ]*ʇa[yS>|G,잊aPTj *\ji)[9NYpvk yMy-d2?2t)%n"g83TE%BkWpsmחۦ4N%t͞j,@tk3X(+ǩ˾VIt3| %~ٓO'<^ϖ_]6CxqOاLhB')LJY1N;Kh'2(iIa0F|mKE@=gjDDp uƼdUWW+b|ś@U6;p>s 31!/SV {DOp}@hZBZ+3[\\VqT}, ~9ywpV ֵk?HnrƁk\X cmH.5=@(giQf^zz8%n@UO4?'a{5Ue dn(]B0VQM ֔!`P=d J aSm\-0:ezHy´ هʮSqw yR3@S;)ɸ٥j8_Vg\\qlC*7Km-:Y/}| BKtiʡx9A9I@hSM8׉QRz\9KSR R>2' + Saq-ZpB_mNE`T-U$J|FfE&+Fu{ZKN14el]t:uZ9nNWh$C{3nd̜.˱(cɌI@aoJyg7)*kMX3N'w#8bv Rr{vȤ)j c|Nx~IbQa^mK??1O-h#v,-9Q:/`fcc˿@+xԨm< ˸WӝQ@[Dv&塈Ckb#DkуĴXE6mG<+,QFb̀uT!P GP:Q>?NTK6IRZqlNZrgH\^WS2,!̮r! fvaHDM\SbUR)Q/P ag/f  T +fN4dSyq⺟/j*`V#΋V PPgpYC4t>~P^8%VvbSΘ{D:uh"K|d'p~IAX$/TX6G(۹k@ۘW0cm.1\NxpQ1]H 'AoQpտnk"#@ ||н^C{%iL^ QXS~ӎąǔ[C/(6'SxDdF6P S O;ȉoYBSMGV{,L` x۶m۶m۶m۶n۶m\MMլ_]]tJQ,na۳ d2v}•cP5nwN۞_ MME Uq h;-<Ō_I'tE|ɶN2xh>>F=Q&ҴIQiJDm$w~K] ֝%p:XbTN{0T$ʰ<)9rҪn 涝%<Pa2e4fԔP0077]"vCȸڑIno-V]plBS? wDr <fꯌ/@28Tz+RkU` =/0f< 0L =XNI7y1-+,=rRb4e: Q`t(=vnYPE5'7J} s9^vPcy2M.1ǴXj`/( Yv)HbtNfAf2 qq*5_k[قt)y:62lWo~]qywOb}~p$)Jwid*uQBi:gǿu{TDNTOq t--DB&)MFaExbKG6BJGE1,`htDᔫMB汎Dw ;xe3Š~ț` c˟hq5H6NpOihTH9VO7(i_/^j ==VpBOkfV+IumO|GaUZp\\fw"ʥl2gwy;E!f̺پqj r?7 i}PRֹ?H<Ɗ..Z ($^5U:$E?z wK'hYOeIBb^2í w2U $F~21%#˗w(UT]3Ex?>TmA1Z뙁h;X~m܇%%8 dp5@1kH@dz͹__dC'!`y'sSCeqpW=n'i ;49ޏ hWe@b,5&8;#@:Oݡv6ʑ j9\<Oi_?l ʤ^rzSZ6OiTWZ fhI?򹢎QRKHƾȈ9 ;B-5O{F[s!,m6]k O%^qj-b$BB1ꑀsV&ػ쫒ؓ{8~8@Y'*̝\8` Pz4(IPWmuJAOdt(3 ԟ+vU Uÿ9?I܇5 FghZZX JAkʁSIeWLz@k I1 ShTG U7svsàƱ%#[v;FŌ/o>Q$/q_X˄+Ih8%;eͽ$No䡯A(s ϼ_bNc1dU?¤k]ѕO=f]Ơ;e/!&tO| ᶰXҮ*@9JPn!;ʌ+搥L\~Xh̴V3|$! :wDc*03!=L3,Ud& _BuĶ<#eSNYo$㘰|`Τ8' zڦ̒AJ4K#nի'TDhI.mu$.jl~aHF}"C5=,_BxJ!6풑2=WX39)2z,i>wA$*cA<<\԰N;r<$`)`S$LKKiGV.x4k~v)u}fWW'j,&f{A2%oiޔ 1Tm:_ح=o[XYB!;Y {\QqI3j@Ɣn;6+&)Et;Go5!R8:X}xSfOo2G=@3y Z JW.סt+ukPtWW3^>kjtzLO_*2{Prk:@!eZf5=2w@`YHqq%A9 8K:.N&>Q=<]uR<<<|.#ƷTK?W){s}5/*5س ܻe=5Ym?NNf@CmM0Szs>-Sbl)׃sO,r(~6YA{$2c =q1j#m(`zM5V g& … txc´ *UA-v9Q/"l#}R(7x*eRs9jkѷbh0Qa- D)œYE,{η2yH@$Vgݫ5AvLyw}j8}'Bl^NV?ξ]Q,x<%$oNXb>R񎄟#3MkSH S-;q0tBj~nAM3rڕ0wX b:02Z ;LZEI+ J+yJ#(U-g"n+f0 @<]Ei=K'ɍ%9[>]. *fTJODm[8dS+'B]"̐&5}+d+I\r [S)1 %ThF&Jy!?$!yAiq& )oaw rL,gL-FǜdQ߄42^+¹8 oei<8.j1JhR{o3P&a. bֹ|]U*yX/|_'WČsԤI ˈ.ږT*#O/JȭNR,_%x:A)a L5>Ztwv=2bkR4 ˱eϥ8𶬴`|C!d\?G~ؤ>0벏W>НJ ܰSa%{> 6O԰pl1T9E]V-LC9V_ȉfYl*ccMBK7Xp#e0hQ-?hZ.{@B1L#kHQ/20B}sg2z0Č& gOsN q:9 9VǚwW;5n36'(LSYl >\ʍX̔γ COxFhtS7vWO¦U&3fRm ]_zz`٢^OJ@}yx;ʮS?]ylx&Jكñks:7PԂ1%"^60Qq1 T_lEYsquWb22 -1SI4|cDmR'њ[{"mʈ y:?`Jң'I'<>8Ypm#ΌȀR=?eu_k$))/-TŒi[HoxY_G-ISLTUWh?V%MdųyLI }1dh)JS7³!OR|zAJ15 e1lC|,PGsmڰ'1RΊ _g }po&OGcokfkJBrEf'hlVO҂(?}5^vw2ia_o˗;6QH}.i4ꚋ%Ӻ9AB?d6* O Ro`^9u9I8{9n9.ۂ1.*2#΄*C92HkFٚ#wGLr 5w&ٍ%F0g7jEEe@:RNk`)[əh--D|LvX41D3mHDx `j׽-N% m78t>kX5XF`Ya$v(0℄ kUˍ0y cD b-ɱ7Zb/=&m%kJ05HT2]1N?e'FKײ5[b|ՕK6<N^6TxRR5R,EcћQ=[m1 u %lЗl0lـ9KSzEy݇lz3Ld#l}&H_!RU~VRSB@Ǣ6[V,*sPAǀ3s<* ,1࢞X+A)ޢ@VF|w56b.ټZv&{&8Z uӣK{:Ű?/ ُO j&ڬ''AEKf/Y̶"5LyLDW1irm~pk.NҲJpNYm5e"TiE™-X)IB!1&{MS\AqT5n9 xK=sonqGH,|GOy.a#OBT<>v+?e|JLUiªӁ1kU;(R%+}k}NQS$yA+E!$;Lכ}FI[~ژ-'u^8N^Å! ;dz/ҦkBn4l£3W;s\+{ ^.shvl"i$z1zok,^CR! f*#O!"c^..ɖ1iĂ€b\$w2sf6 6~QT!UT`M`R XyH|)6~f="|v]Ukt"NY;·1x1͚(BOo=/4Q6:H珩onMNY=S_bZq3(>rO*PbXP]ZpbUwe%D9|?lcZA`=òͩKqKc5/= [o}:wF Js=2{})y%CP5`іbFα0hID)T<}\>X‰J(Y(7T"3sxFfACM88I$.;OMV!>}w2#)wrL0jI2*l,l|" _޳{pJq!J/6GnvMDaC.H,Z+=/<&ᱫiD /,v\5$rvL,ѧ9;sdyp)3Qa7%D}#迕S2x[ Y,gFBK}[ j6M6Ûta|>b:~_Q |&nz](/ʈtU*ɇ!C` l:m XĆMUЪHqg?Nׅ79C='R_s!chl2u[mc%)Mr=t1fBTlItOD4aIdY@X6ŌyhܾGK^K u)MldASPHJ5DŽ`ɞԠT BͼYH:ryhTKqɧ/p@iC[Ul%`m kr[="p#;ߧ$Ķhh8ስUci"8||M1v1qt+!$Ny'p$0S'y8K,Dt_vtQ5@{.f1TTƫӍx^+*j%,)J[q B2bU5x V%@:040 ݧ]alJ,gt"LC(OC1Ru\R6>{RvnnPLp=d֠d}Z {!w{g8GݾSg )!N۽7ޑVX:9h@?뉧"Ɗ0 Hque7{T!DqUrwzyT,竔/TˠwsLvTEc]\ Hkk1dг&]zӯX$ B0Rxe=Aw7 t-(VQh g`?{cǬD\Zc}RY?c~^CEk& =cIi$/*!=w΢k}$gkWqlϦkKPT{~I{ޱYPkp3*E&aw~*X1y/:ˬΔcHu[)e2˲?qλ1S4"@V&%"jw@ͫB.Q='w}# ܏ *+qFnb|TMsF9[y %+ m4y,ݕW{@W_g\ĬVaЪ{ƭh$ Z. o $'젾jWkʅӻyw10.tyֆnفɮY_rWښuu_")&h|@AD\kJ;eZ 0%9q>SFI"rj U6zb}/주X ymUX<i.1qL<22:;&)&yap-ž5 A|̺:&`lx!H+Q@eS"]=s9^'OeQP!UBo}DҲ zkڛ܌ۄGQY_ߠܭқ| W8]}*዆С<_h@EzdNh??ݸAbȻy@(G@][:>懴j` 5K<~SWS̑J8D=A,+ͬD\5).߷'ˤU(,A@ *ؕ> q)ڿQKb~7# d sq?Mlm4!fU`"4DaVL ?6 SK}EXraJp)s}=1YJo|^kR*:[]-¶lW_qSGu}'_SfhQIx99py͆~zjg:U"']/uY{[g&7ņ=n`I'.k%2DڊK@󐜔 ݰhG3u:!ZqՋԇ5cEGJ{(_u;؂dP_)AʿBJcBi' mNßnOp*.l(r!ݳ0 I C_L;y+!o=7=xDg*vqSod oto ݓF^wx.|$L@WQ|^za)`C28\}珪F] X,HLtmu@Xn?1 s6,ہޗ zv9sy>Ik?.C=ef4ofq ahbppS:t.JQ#{{6)K;TZ⡔EcrrC?Ɇ(dFö M1RgFGASUi\UIp.?7 UWZѢ y0t[s(ũ,ln1=8o8nM]^/ģ7;BfJZm C Y')nu\ ޥH7Tzo@c|Jq ~5ZT',n^TH9Zn7[h/p?XV"C._wA" HA8<%<[ aBKc5/SWAR' -۬"t6(,?~B\~ |ދҳ԰% VBxNKQKef*蘣@P/"`U,yzWR K- $Sd'䕛 vb&W?'h"9~uP#p#1uFl9Kz"6$RY$x uY+>}p*FEGI&fDM;B+(#Ҵ 0l+#Eg[iF%1>Vn22Syūl/Xf`֒wK1,4,a-ZpC>02@hAu>Մ/j"M9AqѦ?~iV֯58I8&\G &^<~3!:6<#r5ɐu8u~av4r?PTfRu*ydyӀtB]Fάj*{G)_$!`PCAk2aDW2lj'm P)矋:öFJ!~1'xdFyi0;ZiAQ?sN8#c񦙾+$J.bA2{/ujO:RG~8ÿI'ή]Bxi8'--Lmּ5WE3频=iX`z*jy{l Ylʭf /Wd J\V&rZ5>DŽo9-ݛ`N ^9o1?(`s @?Y^ǺX8A:V]dn@\2ew7}:f|8)!v2aNRSj|u lH&hx7Ss8Mv]ȯu'RPx;WuB'MO=roh fS1CQV-\{THw~/y|ڏ0b͹X Z<*"RKw<l5V+׻b/W&"7P弱bEo+ ɲ=*λP"e0p.&Eq]76:>ю?301V@ITӮor>WA)?KYxeON AJ1vǠ).!dw"b{a"Zxu cݙ̝nyB悹gt8cyeNPI}`~TT/ԴRzF8rǤbq#l̵gdjpʆCdaN bXB&L0m^ SHE3Y5v=SH>1Z52>Q wBKvo0Uwg65tbt0_%S+ʁ)Us8PBl-/"ءSgXZ3_8/Jϓ{)f#3)MثJLNN}qL>'4N4鰫Zصzd>Lh ׅܡ88$qdM\5 3m/S nndžkJF#fa4<Ib澑 " lkX 0c喤fd&v Oj o=2{k #1ZvQQ`&+kC {hDB CX+#&VJYc1؍@֊~^#'dLW .ȳc~Tw)[L:=7[ꆸ:t2`?ĉ rX|9es `cg)5gGSY⫞鿹G@Ksd{kZsJÁ"Q.w_ KEBtet]V,[>xf 1K 60~A]hO\\=B&u܌*ҍ >}B#@ŸS@3+Y?V}/5g!'8jj"w DknWE$tMDchκY6|y[imWбEiŦ91RW}Te.XYFReiA`@ڇ=WE% WlS\PibI'*eYC7&x{cHHdn_ MUm2{'SiD;N188h@^r5VֱHDT1G*l|'H d.Hj]b;f\:F$׻eQO}vȇ tGo44fi%ȥ /gy]~w9κОɔSlˏDfY>jCת4 ';pfs3@чi;F m>pꩇWU3)njvmxs&S:A `Lփ4Np~pD1qB @yJ C Uʕs3J {a ?lp4sR}\(ch%mZ 89J4OxyţScS%BR76SnH, j }w^6Ֆ~w&.tF WvN]1uv%i]hfq% *u%g籰V]-'$@e$;.0B4iMgym`KbӺNᒔya!lQ"ˑK2.GWwm# Q|KĂUBJCg{2qU`-@xރj@XC9prrZf60(7޲smV 43?rfA| aaImfFq[4ж4Izt=chVkf(T/0[+1dL9r0yޜN@#OD`'O'~j )t\1J*<҂`1{mi9:i0 8aKicV[$MfwYU|*Q*AuR㾞m) [KF*4{3i*N/ӧOthIq1"-#R/۔JX07KI } :̺Huj?]e=87 b!/c!:v4E5AkR%Qzn>[v_V-)2u܅Qph=b2ɀƔnwŻ[jtAuGk|HN=~wfǾ@! ;&ج̢Uo _)ӵUw0K@hWu 0S ooPǮ_Τ("F?Ij,S`ivgĤP)o 9 衵|i\̹sÁs V cҗsWgݮ1Il5 ˺7[>jR.pwk9qw}7Xzx3;-1%TÖk_53Պ]ck/w۱ όCJ%(xP&qȷ.j!h_nʈAӃ7>q[E)=-Cq֖FLI׾zM@X]~+S-$$ܓ.lVypD Щ*S<i+N[I{$?+60p*>)zזZB3 m`ݥ0MVgAQ)cs*-eBقNF߫oTeR73IU*:_mP ?ɒrlaz; ~V}t3P`LjB/ixޢY1+mC~.Ho9v sm@9Ʒso2@|UUH\ -v& Hq`)5^9aŻXbO00 2Z=7y+:?H 5o 8|I4@}0fj2ZA<6w}}}ޮqB@JMM_AYTYmbzQX퇊iPeJ'=kqS|:$q7^/Dzc9&h5"`ks"J?+3g\ d}Qȸ6ų> qo>zܵVNvNdt he{y`U8̩Opq=(л+IMd*3<) K4o0̪цGs-xSﱒb4)cIQ`=Z̫y!w`kO>+Buҳwm]_ٱ )>T0Qdo4.@3w]燬E3JHche܅_;G_LSH8w{WCDleb._ccicmM'81 +;yne"^" ?UȿD]C1/#}w,hi5\rD,qCG*  L!j_G0Ⲹ dzWF;_J"s5ɻ@9'T^X)*ڢ'>@A8żE)7\19C8<Dnҡ˃t. kƲDRf o7W pvT % 5/|}y 4jy"X}~^NS3T$.%ǯf_iFD|urN4 9UukOe*tX@,ZrIpOx S 2! z\RV~X{B3Ј+dT (LvSwz@gw1'hHjE CG^ Aw)&="%Gȸlws:f5$N ýEm)9IeW3WZrO5+}D(x[f~:X+}>5^d)6+}%b1aT 9|!Ѡb"p?Oɢ$$q8w)ߚg9SC_<(m3+3&chQ@YW >Qq~ '?eXAH<< { zs_u448N|:L#]1Ĝg8,; jiMNp#k MլNJ/ \ʉSFl\cM%7ooTxUƁmmCʃxpTuc9 ݐBs` }5f% 5B?Y89r~( $m0a`2.z"y!q F=F^>4 3g?)!%f&zGp2.܍=HSPX7TPg%C,|DIq4d3ʆ,9QZ`/ =Bp5oMƔJN+Js vqi)j g]*D%ĞL9X̮׆'B)iٍ""#_;HέBNLmCowSd1v헒 MNFCh FR@&,yq c8YWA7 _d u55p?x; Q0֖ZZŸ`s[5Z#z̊Ȯ'Zfi2nX. CQzě|>cMX3SN#sHjL*Wf KԤ/9_,c2KƩi%sݿD_E/\' g -7WͦTG m-R5o7ٰc3%;IiBQb/YR9I0խV zZA;Bm- WWw{(ã=ïKL\V> R6Œ@\K?;Nc}B:&n3&JĿZg$7ꥲ9q{4 &wEmWMt>/{\:T"H GPPZ a:g$]b53>@+j3fL%bvY~.BrR@ Kߋ"ԛ@ȝ5 dqŗdo)誸uN̨A N;1vh {?-z._8}7nJoMii2+"~~fPr{HRXGZt8ha`Len'Q[&(XZ ?2 o/c٧Dhy$]E: a@ҿdȇ6B+I\G[t0w kaxVv$$"O\gO~a(^2ƃ^"@ZWVuÏ]|]ں-~2H F1yYȦ;7Y/L``X|Dx2@= <m%ZeȝXІ/]<\h*uUx5{4L*33RXF+h5Qd@^si}TIܑ {[`>"qnfߊH(;i5i/Wuؗ`A2b6DSͦ^)m&C\-8?bؘ>WPNU0P j+]QɛbhJ1w__ },:"qoZ鼄\K_(OnA\)z|.쁾| hTVUeIx4 {H8r aaڴJ#)3'e#:V}[rZL\pPLS%j\<ޚ^ HÁ#He[ڰ!ҧ͌?rb})~㥽N?pLtؘk pHiYu}dhvK +Wc-`+LMH)WhA#Wkbcv^HHW'O9ykk$lfNdzH[^h$ޱ %c;u@OwHԩȫ~.ljexۋ6f=(%hs"y1$}M}]zz^9 g2'=K}䇻# s2xq" $y@IRl̳Q#I ȇ<\U{Q˫ɬ;Bm\:<:7͘O $xi=ll4;7G #_dlMHARŭ͌#&=JحDr'+Zch: V݁zw@OȘ3Mdz6c\gWe^H~ cYx:-_j1~!uQxPσ쒨 YF;T>BIO  $?E~TXVGcʀ#5YF|q~ pXm\Q_aS>e{UJˮN. o]\5#3lQpF<L5?1/ Ñ5ɡ6kW%`{ukקEG :'Do2P~dқSbrfYﻋ? +vc]lX6q͉;Q7O]lK,6Jp›W\h6hȸ̐Coe>,ًٽA2 h6ȋ= *Ks̟z8)Y ?Bs!cE75>G3 FCKi-ksJŊuHM$d?Fo+-?1asmӋ)b}aH E Cf`F>0Wc|w]h g&ipu?~ XjG~_6a\7Itx*SCh}sQ|K @0vE io}T,+o7[{cW΄*4mhE{&4lё7;7_0!y<[&" >. >ݩ3׭pH ]ШvD?`UZ W \7䍇rH wVb$_M`oK*=,J=w6}ѯw4VeVĜ*l6HP6RK #[<%Pq~)w'(>Nfuѱ]Ygr-< OF ?@͆Ez$ "l\Rut *oe@G% 48:+jϴV$G%etJof|}JtTw>~Rfz쟋ajmU$QUrZ7ЏSuMtPC>Z \Bppߝ)vƎ!&Fi|!-hDۊ{r>26C(IWw2o fwQ3_;oVu`~'HzGpo}x.sťlXSUyO<.=hN%*P!ƜST]k bB 5@ՏPO[0b9GtN(R7=T_&-JQZva/*4ёwkԺn򃀃I0 b1\m#Xvg./Nrzع\a`iN:)ĥ8.3-@%5Nz/)/F5PJtw5?n|\w{"n:٢[ M1GӨ\i,Ip+s))oGj}ƟӚ }-ёœf,+eC[{ Z )m[^ ,4y 24]-1ƤSjySw}4jr$`+?ѦM8+.ǥi]IJʩ[wªXruma,sݣ|>DSLNɡ6澩;$ȴL{= n[ Ov<fg5ݐEp.C7X7!`Yobo>azy`tCa쏃y7 +c8 %cQ-ZMfx0+NuS6ϸG~"iNӓ^){9n_"cζ;~%Cɾ:>XCBC}پ%V|԰np9u&.l?\JHocD겱cpy_p E-c;|W/nvM՜׹ķE[FbI,9v]V fm0@16JW)CDmM:/y>ͬa`]gAMܐHaΠC"i x^`Owo[I ml,ܦk>v/">:wHOkA":‘JS^!|E&qu<=mnsܽyuG,.P_Θz0QkLq1.hu0{.mΐ*"%XW_IlXH.,.Ѫ<#oyiͶk`5U,[Er#bu`:*#he\'3~!8!QZ$(rk0\949׃u0B,e& {2b&B4hEԠ{fUCĈβ5fH )yzA.[.CC lQ;3@CGɗ5yU+Si%h)*jjz9* 1X7BR~f箺a=۷ߕeہ4">WZ=R&BڥM͝ʆS;AaAܗ1j=iRVߙrٟo)B@N0=ak$}F\**]njV|9qΊ ROНIRa') K]cV7Lw2 RjÒuYF')*) t\ T-X!V|)c!2/(\M>U`ZkQRhjg'TTX|He$x^Ȱ ,ٻsOGȌ0N=T,?<F^fv 丅RtdqU;Ya_n oG?>qr ?3[ WQj;~O.@:]hiIۯ6bGҸJcAGAs0폰{H5aw!6Jݐa lN2nAF;Ww +0Ğ/uF~ \DGGyUO( W?L9jF{x$Bžs)5`.%3P)pv ^'Cf`CSEʫBEω wOiv"3oUY2q寿Tڲ pF]ɪ٬Qz*ްU ' oX-kD_G?a|rKz+9t:^H:8AISF ( uO~x;PB[0 J$Wy_2WUk/[u2_ᦐ$KM$'wP5%0{.D]usFZ8R#Ff6sh^tI`11 a`k]ŇEhD*!Ydiknti jxD8v˧C(#:ԕOC;'w,ֲ+&ᦐvT]e:+邐"6c`K4C3ԂX YfJ N3O_{NI3GRXp7u֬7s!|S; C۾R"!uV 9L <+R#G ;uEtϠI7+L[ExD@Gי2?/TS?/b2߷2=_TGH ©5qv>d$L!ǠKQP:juha{Pgg"s|7d-zA?iG%"{Uszp$!%p[8M"(ҍ$4,}{+дd缌Fcb.Kk͌u> E8[\\ssJ^YV蜸k~rpTu"W ;ԏ1Gh= L?>%ܓŶOyW(ԙQ@ïw0(& :֌YaXCΔ\a7fP}6T$Fi W69L{ќt%Ae 4݈XY1G>T6d c=WX)=1O47@ y|ե@ Ó J-d; sT%0V۔ aY @LEQJOs:*ޟ%`Ö_wUg!uk/6)TJ|1I)|N6n:)Xt =BT)TILCQgS%-_/P6ĈM)=phJ;XNw eS^ ^MBp/d׍Z ¦K)Eӏ9#ˑ= jO}}\%XTlxid_c,e`n+5W0| }X:J>ع&Q4[i۶m۶q1\E ftl5j+Rɝ*#|~MB8`tk3ܙR3ivMA4R`bf"vkXf]$Pho#)\*!0C2} 7?B]W*>Cj\<8yQmoF'% aklw@T U!柛`LVSup'EP֍^H_0KA>^?_ +h͠Ѿa7媖nod9iuq[x6fed*aًX{kfOp;r2ݲU  O$'oSTvql~W2-ڴ޴4!qyā4tXE nzg.WuQ$exv*yJn0RO~&7́,dkv_:&l[ "}rDx](IҩBסuLd/!_Փ%3?@f`TXo኉nt+l[m7x zEKx2@uO,gjHDžu l[LK D// 8vqpO Ӫ 8ueÎ-t2\xvj-횩s*,v:BbuSi~Wl눲ooKcdxœK~#qkWCѴMo&D̐BoChzy^u8hk<( +%H>IjXHR@~l[r)24a! Z·=Un]f4"g ~`7 nv!G$]JW3]#F3FUqXIx~.C׿#hϲ{墱 hh$hQuzjJf[,F2D-4/!$k:t <Hi +aCOL[i}y ~DzZ&cV0gpv W+Ңޚ9?.kn_#<*##ڈ;z8K$^]٣iQm3Su]wi, ڏj _^.:쒷X_,]x9=C#tJً ^x&>B/b1btzم *`_)g4'TńǡVԏO)kD!篱?7>}xxn Ȝ'!2Xf1^>FdԶV0hA )nAת⥬gHBYFd.| 2xC#K\xizZeΤ"w0]!""ȐWhq!p`5E[7~EϊQĴg_ZD@(A%WT"0aI $P5 PwK4h5p^;}-qd c&7%T~j Hpidfly}w[ a=}vņrtM^-?hKiҨ`O20aŵ_'{DRYFv6*5>ZUTWN\<;Hg4Vd\gT,o`ui# Kc|S[!(a?;`qioǫH+;٠\dT>'5vHġm> 62sTRnhs.;Oq+|0I,\ і2 ˤYKB=瑰{Bo0uիL `=&j"7ZѮ&%>y|"`6`6%GHLF[ …5jۼی_s:pEsj敍I ?RGTJ/q)aC" 7F 4NrgBEȇyQp+*RʹomP0 wk8EE^ ,by@ԀR4NE?R&ЫʐZ|-8t8_be|9;Ay4n?~וF[]f+LZIGQʾQg(`efe&6FÝeI#YI·UDlXf jYoHW\@zhǜ_>?GAA\U\ĕ?` aaJTg Wdk2^HmYwfd !O 9M8pBlf`4&n T҃˛ i .4/MrWJQ Ā@ dq2sEZ| Bhz{t#%*SdŒYtl˥"ToVmtհR1m;@7A> Z "eG2b5qڄ{:-vP7IҁO֪:jѿB]Y^V&I.bÙ1,p'V-1oQj3@ XM²$G<^8,mvLu[EwIŘomfۨ(,`QܝaG/a3U)W,Q^EAg VXɈa/Q!Ϯ#N7c Yǁ/oԄ!޸39tszĪ$^ʔn(V>B0&Yy[m',zՍa {77jNǵZ,2ہ&;j4*ȫ ^ E6(Jl"꺰 j~0œA&r# ]Ÿ3L"/'VJkXkY[ u,#R %gd~pm\f$e%SaH%Ti4. ]w0 |`O|i?ia=:MX#^uei(K(#S8+g 7BchFnkC.C./_Cp_Z01|BFXPo-5^Dt>YZŸ`|9*;%=ojbRT? 9X IhV&JR%#S̑z uBh߂S-{).qZX1Yd˴40BԮv_qH6M9><ѻa!FDFk7kf?).Nd# Frx !u&Dy|,1`S u"gg&ҏ $t%79!tWl[%4 v9Py= goiHy%gEN3Y{Sy{C&)eϽO_V HR(bҝ1: & m]2;GN]DFjtj"|=t%2%xgA d{ц$hKreփҤՊtY!0 n冘`R P;JJK{٩kW( e54[↻G-v#eOVha2.$ecA\Iۖұ!ZKTn:Ly; J<oeϾ)pyAlR$'_gf8.ӏ%<`zSKT#=0b"cW?(eq?.1a=2(#}/enWFIoxffшENs6pG$B "׬ZOmw핔k޿ Aq<3XRݕIn%le{{ iO892<\q;PF1"=\ZnʫjtjfÐm'^bfґ-ԣjudȚt"puFut xA=;wbrI!nb xvЧTO>q3\z:Ϩft|.hwqa -m_*IˁG@+'B3R0Q~q>o\0p9(}}БQ200ArCD=]yffFz#ɑ;4<¬;Ct O=p 4~ :",\S-ӿ |Js 9 'X~ᵺ\յj7_ڥIm?[/qÀ-YSh]EY3.AEq,.9cy°L$b>J(>Ce‡ꈺӗYpӆs (w}uW0'NA^!O,# 8a ad˾xoC B4AGZ[zk?G 5ݨ > ĩF^̘ɈJaajzn4L-GQ-ӜbD_1* D@ GG5%?!btQx07 jDw\FNˎȦ=]]9TO.>`)kg ,؅);ae]s;rL)wzNZ^ZφhQs鸐Tzդ'AVH 4[r? Ffj&NΪ =i%ae?o3is釫X//Q"C2HQw6UhP&~;A؏@ EKULp'Eym$Gn0>I#gSa}np,X=!L3sѳk kql\pC'KSEp/ە!?6sr;c4m@P+*5n9M+C"36V($c09xIJ*X(3+:)YO%F (H;4ZyWɡk|ܹ#t:lJ+|?>s$HHBv_4Dl{Б*#ಋUHҡig3e?SXr[ִ@$rej?/0l-Ky_4#h&qv,/J׸JQ4c:R.QWK+D+0,u S!;3 W4zN1r7_v!]a4zGȔ ɀʂR){U[]Vٮ49E+{uLe,dÖ́|ؚCLlBE܇%.d Jo'j9 ^&lP@S*r&:;A-•?C.w0tH]Fü) =qw3|ߪm]8э,x$sr.%*+>UN +#ZT2.:?TPv1eAq>-U-nvnӘOez_U|4~_eqdia5Mt1Y®MIYbgL)a{ NT{e7lr@68D)GM^Zh\ QsY e}6bҪ 9>d#3b63!AWQ"D<'򢽍^,4rCuUD|kMu8nN6i`hS ZiezxEz =iVi3ęyXԉG2C:]܆Uoi<46?|GƩp`{mFeT܏2ݝ$(-%ޜpOI8R;G!HE}4R_"!Mr411vZz亃X]yWӜ麠:Y:d/)pR!Sgb=\B2lQ/Gw$H5R^%'*c% o7&B[W.w183#TFwD"ӲA;"8:LD= vTzwY%@Zݗ<_*E*,A88q,0pYi-;'e]~C6VڬpR|m;Eq@n< 8 .1Pk!Rۈe-%{ˉdhUHYn _{Y }Ҍ_.]`-h5=-@NS l$%ՊuyŸ| +cʬK'|T5^d *nq[њ:xfRjYsGuF w/!{v8( z9]g5{7֠sx dhLIS阄%͌u2zŷЄZ!Q 2SoJ)*┟N[seN *( |6%\v3q2i2.&1`]_A/yK k8TEϴ4RI+u}5W_iР|jعi?On;!8h3PpC~ /~guA-9 ojU@7EB }=޹;m |tNj);ȱl&=Df~O 9ǂq6# ]CH9q4wI'B\Z` n8"PwGB$**'XnzH-`fq>MVEYg(mHB(vFәD"m\S&}e5 m>_u58܀v$.&3Wүq(ĥ]0&6D8ӝfzR0U獍3b̩p<LՂ0k*~av*nIz%k/ȎC]zJejdvOU.2qe%8uB`Fq^jV>zF)LasIe*pd`ygz=8!Hwe\Mn$P;~L"b]9pp \=0dyC-eo|@_[ 3$wغUBUpX ͤXdFoˣ\1<>qI *,lQ7=a1M+'4%TPV#޴q> ] [u ԐpiP?Uu]BX5`lN@={+7'CG37r'Գ|2P!;ZEjM~p8=[1KE(wT~xR3oo٢NAĨKa#ѷ1#P ^cup`b}5 F' CAzxJvIm'x) 9^Gi K~3(tYd?i0w.*`a~K@")y*'4ͭAANe )n`;ζKe@h25fSZW|X$皞V[c9Q/qUTX_tdRQ5!ZGslULHI LPiu /\*M.ϴ̈́lȒ(;ОbӾ! =!`$Ļ>?VTŢ?3 lB% ⏍& ^VS7&:[Eln>Ec?90 x\7)" #Ջ)aw3N8RӀ~iEq"o H, 7F8G hNf֞/8] dDP$n=-.BD+#||''ʨ!ik !x'C]$1|O#b=rFABڴ?!FiYT^ctU CQg!дKԠaz$}ȯ۔QOW-0Nތ! 8fG<- $`d쒫䯟FnQ9 줻cłpZ$\n#ڄKe05DR^8z!R}ƶRZ@SBt#p)ZBVVjn.+~.F{¸]77naH(c 8"QoŜϤb-=S;i~ QW|i3s8ʼnp D 9SD\JߓAa+f鹾:QlE i{M#.Nz%"VyS5IV=UY,tCEs}v1dyv"Gpk)MykC\)ώ4QwL."}c,3}j{\`=o%&(gE^>h V9 LK.Y&O`2lv2U1qG@ikĹs!Z4DZ4_9~D9BAI'*_.iy\VeTƜ IAOorjLK Fu43J"X ΁rn;Ѐwtv!6= Dϳ!8=b0?(ɦM#HQ*1uAy{[N{%7dq\5.X$1 Sa1;.mduiM-,@Ie˒ "::yH ?cOiqEO?̨g֋P­m=h@};Y}/W綪tkU=k3&H xJqad Cq?y{'k-'^ F+kC ]זG*QV/hZ#$?ȡzsM'[>{iwW\ѳen^A&9sWf~^7!jylN;."E;AN*F6puC(=4ZGJ F,ҹV# oXˢJcf-zUZ\Ҫ"mq+ iߦ CMXR9$FM$jEZ]$'~P̟'lݴY(NU4[4}Y>rAg5ɨWAySEb3-R[h4-H/z`+  z9ᙹi1ow'"km!bӑUUxJ͒ j#Z{V8wq,J(,PnPVRSXN_ h/3 "n7 *9k$l\JA.'E8Sw̠:?d Vv;4hj'^uՄerQr}/w?%v]T2I_k?Jn96!k%8}{S𵌴mYpz@SU,vPb[ C#Ӣ}g'SpjvZiD)(\A ):AV":y$u4^6hyрj/ 3&Zn3EY;Pn@sQ 3vBml3cH$anxCM5.#?%r+})nȭ_:ٯ2Y)+Uu[NNE4HY詣/jmiy:?I1 býUV̄f8C}2n%2K4w*'v @ N$?aiRJyh#12n9 9@ dl(T]"=L,us|,r{/ABɠ}VIPYX/ЯC(om/fWBGke1[P|hu$H|om+otmNVh#ddn"QܘB@K^/^ΛŁux$Ӥ#{W.c=V Gbkd(XnZ+ECWz*}/[ce1 :BA*xGN+ñg(J-nTt% z-f皖֗B^yt Ӓ?jz,JA5`]9"jq:^hW̋ ^Nasq\EKia5Od5[r„I'kY`\߾ƃ떭S8nY}5b Ƈo-I֓U`;w~u륐Yw.oy~I<؝ ;Q2|V&,Vhv oEҷ]#\ 8L_6x܎;c\7y~r D5HXLYOm([ VʰSFnu٬F㥤˂.H* qZC1l6fp"Ųܗr 9]CiC^]EfjriفƑ̻ EEW$En߿Qa-JA4T?J,W?GEYҚ^ՎA?` agJW|[ZU>JK63Y5o=ˡI,pN_g\aE#j%l󸤞)URouzcT5QbhAhK2->Њ$Ӭ`ZlEP Mם%F&Pi [лf=O t~0V8 (FC yZֆG"oZjȸC[{i& XΞ. N;X[Q&Ja1ۈ~vWycKODl wfTUA-a):Em,+MfH+@:s *D4eޢm{E,oډTx`Gcopa.(?+!-kV[нO| l5HENo+F"m NW -!//OhQS&ʣB@/(pv'ȰITB|bIL'zBJHNj"Eww⦊zސ<Fmoֱ;xƋvEr|mQ)sLe]j l!,=Xf>s7tQe< ~M {ad B͙s3Lس":X{)dhd,]FB4L=l)|5ZMM6TenOek@ p _B !n;&R/J3Ӡp=-: }y@{ mE!xT+AUpa iQUqE5g:fXl4{.m;3gDh¯}ӕP[c+cl֜?|zC^ъgWpȹY:xf8l'&bl?3nC02^)q\-|vdƛ#,%lKUxbyn1ץ"֜ w TvY[᫪0" pA> j:wʜʺTAQ?$Ag:0jW*My3߃4âK ɯ4{h`گJfL ' uYI{L*9' !J׳$|M#̹xtaV#AVK?:Xّk\/yiWGD@u*V\ @9CAzوǫ b^tJ)d?MEybY^ڙp[V[Ne0`bHdׇ=A1߹?Zoc?~dOy0^_~C;@%h-)OS̛R.oL4 묻 Q*g`{,!;*=SͲ;BF/{+E?9*܏oNkq{MZį&&m9Noxcu屯ˢ|U?WhL TT!3w9Xsx=Չ6u0]L@>b! Qv]H74@yC5[$AkjsLw+7u͡їqƪsHˣU޿5Eߍc1/zq l7lNB2)DLV$Ţ֫@Ҥt$<ӥ\4EFe$vEQY 7W2F|%ZY2|sqө *=z+6}?5gVW!8GP^Vx20H<0~rXs\kq]£l+Z j7*_մڞ!->^0wf~qY8UG R۪ MRDI^Yp[xx[b˷ԽSX M=F"oSx05 b(߱ԀMjSTҨvnUX--)l{i9R ˾GqԨ c'Ҡ_k6w,#J|! `Ajy PN'y7 uGDfE7ѥNcޛ̈S89nF0-"5/\7ρRD> BTTpS' ^Ѱrzʉ7jy| 5=^tu:fϦk*gf&/MMRC\<@V:QcsklIN&ܨg0:S8Kl9*|ʷp9y'(POB#ys*`Ҫzfh2DE%smVӔ^ح5j[,Nf}u0-<%J}L U=-RQK8:g2\54A~9pm-N2Q St\WŃELB/4óO}tk\kN1nVQ?nԁ$Ǣ"Bf(ŘΌzǨyt"Ϻ䱺ŗyϥ2o$aY_!v;Kлi4$tg{났hs734<~/O\r}Z:n3=r_iNi͸ui,S/ӊ\:4H@J8Vs #_ of,Xz8Ic=-Pdok MASJLYiGh&bWYZ0#jCJh5&-5̢f󝏍,va*|v K+6$دj-Q~X8 >}xwh\}Ypn|%ngtwGfl8j-/EB{jS;^`xF{Sr2/aS;MQ(\8Ee<'!`,>i2=8"1ZDJQB^>U0^GW@$i^m%Ti_z60t=4K?;hP 6r Q3lb{]yz[Y .JŲ@G6R;UQ+omEVCzr%yh o-YX5N ӻo,B`S/dI朰b& l"rir9}cp1:>=魰vf0wX&.?$)#Ȯq`x&>ar=jF7Ń#ێkWaf^w;Տ#̵"c!#F++PT'[:gwvz酭6b1`v'rBn.7"Uӥ2 6Я/~ W(R+6Z$xP-%H-2rM>}?o' *ADyGL{4qFN\>tXQg,H+*q4\N%E6Wv68N*̘s}{^|  M:r]d-,GLL{6A̭\.౳ 񃍌~9~sW63Ȭpe4&ӊ\ br'es).`R j*w,M~])(>-삎xHU7:FlBQDG 0@2u0&nB8u%> `jQv?L,D/C2UJAU,c~%|ZB)(@{J&#m$ )FE.P&NNj;/n% $3okmN,bTbǨ=/'@{,GՀPd{O&>F|H)=aێ4m6*F^>{Hd%:FӯW |Nz[cxW6ؾE=y%,Ωe&d0037RU`%+[,xr~snUaãRf`At#_6OkΪ@88Ui(N8&ʷ,h ,7'9 i{!SF[x}Mp?ٍ<;Uȥz`Br ڛgpǶTyxT'iGr`$Dׄ'F[+F%j=w_WYN\>H 'څ34e0Fv9nl2c"H'{?eTӢb-|]u{ri[qj%&͎97[Fz#+^9dw6 >)4í%UY5"(0{t(VFݨզ7E'hD!pQ45bO:&b/MA>=F9SnYo0.BNꮔbn7t8ɉ"6XM}42oEmn|o70e}8k`=ƀ':,yDƤAӈ@>6Ou}˓,ļ~rl#A[7 㘶4 ԕ.&OJ_GԚtR3zЬ.8yԴr<-eێ&f\ҐN׻,z*G?b9oCV˕F蒾B\)*PƗ mm+wʣoB=df0+ᑥ/$F+'c8k9Ej)gv E%g'Hj-ғS <#B讗FR }ƀ1 ı")NqwSUEvMM Y{X^P'xX㇊` Rj|@g@W3IZաGGŊ6֎_hڙF A)_>֒OtצARH0=%n *ܞhZfMVkkK>\G1;ƵI0d| a_`vp&%0TqX ʙ]6c4N 3CTp֭2=U3~wqw>TJ˒ _}&3k':YیsfOזB94tKԤjb!1P"@mGf[Cs^Yܯu 8#SV2VYS~,f;T҃]s JO EU;KMĤc'T:,^`! O))nrE$5S;1QfoLx?'u7dCVHp*2ë-2  s̞3j, "d4n} {`tGQ)l3a``0q -r,Fv2پYB8[I<:K(Y}4Wsx#;89ЙT?D 8. H_UH8E m$bYe,`">5+LU񶩊y[9Ļ,zoRLEfƈpZޖS}B8> jh ZZSHT9ai'qO Uk3Me8kcr+Su ٚ!EkMlN &6'h?`a#rߍ}P>ub߽FxQR]:drtDSoMQźoF;=A} E-`]TvXahy P,@39k $AXyބyh5T^"f*k$u4Ʊ[1LTr&h>m5kF)أᏕdDQhP/l!s6̪}eHjs!פ3hv 5$k dl{Zgt7 wûu_5mUydBs@udU*hcYҔ@J{[xY8a 4Y6HhR,>k6@@;gqJvޑ0}ۂBܞTG٧C{ѐ>c4 b%Ht |]r1fq10آ=(E_]yC?:Uh7⩗α;eONf+3z.3|V?NV|~׏%ڀYoD>ΰfc-Dh[cO4L wVDy%r(ȵ&uo2G #O{JU zrN% rb S()T{C˼z2H?T"+Wj.T;*җ2daBmpE8! 8:6g[HH1yw16evˮԿ$s{pmxhi.sځ ]ﲄy2.[%"699 /( _i'ݽBu ԖRxĭS-QBqXVaЩ3:yJѬ+N 0/8xE,WbVKM3-&/ {@\cWCbY-v gG}WgǮ Fŗyf3c/&n-;t^c6J2r&ep=`  U'N>УHoI0|d͝`pb1~^\@m]l[8o>nj rG')^ #X:|UJUY51=bsJFtU"Ǫ@G~--ǻ6_!5 ښa5nͧK81ճ Ba>N43,e2Ra^SU=uI&%1w8Z2" *g5_=Uմjhk@,3Jg9}'7zolyjۉ0/ˑqb`zsA鴋*@du'R@^Eѷ0&|vv\QUsReV`WRf`Vt|Rƈw} |xMsl eksb? QGዞljhV)-&{<ћ; pcxrbfؚrrXϘPsn,G'i)"HxLK]/'5+Z!;c(g#=2m-37%dp%CZea6ZEKsj۞B{D,Kw[DtkOu08hé3c }ܪ_POPQ+^8Ĵ"7%dUwBAɚ`2jƍKڔ[3-# D9 /Cv_d5EЄ xRsEda\8,&4EZWSZ*xPʁGs1(÷1ꗚ`ʡ " 6:_lPx& s]DT`FUHjWڽ3e$|f|M7xڍg, b5̎_gng:$| +#9;Xv]Kق؏CfgzHdcJUS/ 'qFN7]Ebl6WJZ3u&-#Xu%Ғ˜W0x:_0:{ )5(8 vS"m3*hRx[,djiQo&ew7$ ro_'?@\mC䰊kZpDɛKOP41+e9*5M8"Ӊ@V?EC%R0z%zs>U p vHydO> kS.\/}`xӨLGE)$oVpldQS9Gkڶ<:1x)7ԇ m;HScu-!js .FaJ},WkG";&h?f @PL;xLF P7ڼcRP@g_lAr2Z GұµZӒuy'NEGл[[K6` cN+̞%wTs ev@ϛ|Ƚ)楿W6!E8?v#1}ox4#ms0e\co0$qTGSP̺&&aM`{ޯ,7+\pCSq6llhSw;tԂ"gd*h=;;[did/@ J0Wv_T۝s`uB qJ2|^ s8J鵎XW4w!zKvgzK2Q'<F /ڢ|e0&zc)_͊1ή`9@b>zw PqNlAڽA _v]9)XS42r8{eLn$>)s_GY^bt+Mۣp78DNHDhv`K~d]ٝM)+o wJb ,ȋU n&̹o/SΜH6> NPG-Mw B4ccL/ΰw^ 5Hc4ߺ;SQ~eB>{#t$O[ʒkEO6a6t1T ӄ!mKf:-J 樞" `{ぇZR+%x UH.VS"P #g`.RHDxّDjfF/,Hڱ52b,ZGh lg=K ǔ`ڥtCj r![qEu7T%`G=ޮ2Uޚfa\ŦJ-t/tA\.1WM%kR>ދq;uj8 э D#,+I]_ҟǴ5 Q0p8,t^0GhA#qqcrpTfVdf+O_vR^:ղijtн R&E1>9ϻc~9Q,NtrZ>L D^yўtq5?SfW>]g@.ʹ C "4{SݠQ6@B1A(~fsb&0TC-ǝ-wgdǓobm )zz{Mo@?dy8+'Em8cнxZ0qy(0߳A6@Z?\y*p1x̩1 ŗma*K,I.\EP8Vv2,sA|C ߳v;!)mky%:5{V(F/]P^SG#1eyn"%u PFZ;;,i^'s_XAa˦a6 7pnkӲM퉎Q CqLv0،B$GW_ŎlO}ˁ D9ʊ2w# A' gXiku{aXt!+\Vm|K잟" xjkAUQ \hFqʶbEXPIȎX EN!Y8W0/F/V PnOX>IUvSK"0K2m jVgqZm,ETT// gŭcc bɆǐ0bX,֏s߷=# *[p#Z mh#C\D7-5Ձe1(`RjAKϷ+U#Opgj ;z H*JEjq oü{hIؒ]ُhP$t0ٔS tߵw0eZqP',3_; #8ׅy86D1458E*=GNx!z!MU-Xǚ-CNYȽȦ\V6Bk1H p$r&iluEBSnFʤ},CY* _dZ5פҏ$%%ZQ (E+ZMj='\gE [* oh0cSͼâ_=<|Hly{[zy!HuGďN DgZ;(X~s%ץ!) 뱏U1$OwEϠ}ӊ 2 UtN8vT j'ޚ.#9!S <9dz7C(Ht,Z˫&R֛Bܑri}=>Ȕ*h¶IlNXsqs?j:ejrb|#|D'Ep7LÎU47ICT-jL-|"q"lN%JMpPl`+M|bd1.B<՗!&.DmQF w@p2w\o4;ӷW5Ds,OR|w6:˃|lynQyNȣS3!<'$fsnՋ[aga)MSb!M'U ̇s)*15U)b@MLhuw/LZ#sx*dJ< =rph+o,&u,LG7s/yZkmG`VW[tSZE5 *3ڹ%b%u(~LwC:&o`qW7;_A,J*3Zը➳`S@c1εNeMPxTYh=2QZ]4F5PVAQ{#ωIyj]!С~S8gKtտ+H+{+kgvxItɥHXaU[x=ItNqd=gKK@T%}'Tl:6QRk85j9Kmz@7CPN@gT!dr(Əb\/ڋUy1`f: 9?"B5*N B;0nqHQOR5v쌿Bp"|p): '¾ /Չ|iƏ?39QjIrh)h3/B5>;ٜ7@.d҄sL~,ϫY+ߤ̀)S(]*], zCcVbkYOT Nâ{uh8sr̺`ȧ2v,}=~Rj0<tL9o*v|xP'0%g hƄp5rBPcNp=idyZ, c9Zᙧ-x$H$h  $U侇ſͫ Q3^SF-P^6mʼ*{Džm[{ԓWOb62[[9vP>b0@JvL n*>cCݱƾM~* F M.Tt (3YĿpr$E};FExL#8cMm% 2XszC! ]~;1g/TC|$FW Dv@+î7/: C06mڶm۶m۶m۶mgu'&nWE-*jDfU[ +umdD$ct_( \D)4)֬x"@d1g96ofZJwVpEV/=&G-?&DCف.sKgBEgB0l F JAMdwsvUPk..|x~%RPGbndpJe>ta;a.񁍐&]NuM2A1`6P5&?-Z-;6 FPd\ fp I *uP +f>8(è] }7DF,?ttf]hi-$ݜ#+0}߸%l@W:$@?V3C5yp^b i|?,r z}?` ؅qǙ4VH6tpkY=Z;oΐ/ǣq! D2' ;iz< ;gm0rMaL|@"e/ 4tN,M'I~qŞ~ҥSIu';9co4*!<3v n!l%2u y, ~-obKnJ İ8 ~ϸADǓ>Qv]֯3s#Nj(Ӭ^:.pmh Bȑ  kq|/yd0Ͷ1E뉧IyZy^h׫TBrJG dT~Ѯ5:Q(R8bfv?8л^s29:&>+λӤ[#څEh̊h IaV(# w51PJ #hj3 95Xe||oQs 5u _%M#Y$uJo%,:Ӝ @) ɝ}բOZGGIuSBy#VQ(홽P%H):&Zh|YX%2Oi?I*x@b8/DTp4q:<2 vOƾ#`>ZLWcJ_H_T!s RYIszUi$V477LSotvTe;ozc;#EAG6pO 'B# .0v@4 rEqD%[TsRiw6ba!kψ}oO@cwZ-ы9Sp,\ Mc _c7|亡KM1 o/ݹtAty=-^Vu^|nߨӅSj|?2N/]%knNZ۞\w+|>R쥘N͊/ݡ1G:1v ]YrJ`ǹU! †kbߕ(~kFD5LxBj`Jc)Uz"lH)AB a6Ud2"b f3^1YBu2z5{Yn8gÀJG*TA#:_> 2v+}xi#Eiꇮuf"=ki$r3Kpl` qkʛ+-D1=s)j_nK]@skX5/|,麻$rp[jG[՘y1%/5d ֠.9 ?`v^c)Ƽpep8:ܴ4$urE(US eN}}E6bYw?jTK,UFQei,/ąJ?n4z٬N9=1!&iDAZI.j?]!v ^)1J؄`2Ml 1,86ຸvǾS6+b{~~"xs^jmYnLhyTE5DZVȩS=3UCѭe灣c/gXˢ,u.jYjld@"Y\P#'骦q@%qfJ[y ~[o $chKzD]n \XDVd_ef~ ([nxOy mӪjP^>"eQk7V%n_ BU*9qZmB> /Hhӝҟ^p}XQS6܀헸?f90jɰ$hŒ,8_ޢGc ̧SQAvH45佲SpdxM pKZsDV69I=64p3TpBs; h-|,S 3@rR>ZEɴ 1 0J%Q_tKF% b@` _ &9,FjwNk5{N_+j.01\F֐P:(ݒgtb l YSJ+ '}' +4R5xVGHRDF|R,5/ns Ԣ26'5 '3c8SZw{0wkOFS9Dp2汼ށ310g\8qjSvj7?9723Vʗ?fDgvR|WO>d6s'&܁p}FP͖K>E+!޵Jm}&Mp8耼 0hldzU ocoX,uxOGx҅~:7 :U]i6h"pH\> n}u{Wuu\'#3]( X2 ]X@wv`;'L/]5v =:̴MQN{7DX2W8Ui<aJWFq[䌾tdB{b_" wIhzvxBvLTAģO(oIB̩hN1e<5+XbSʄHS9B'BbxL݂o7+t%|]౤ ': ;҄c1"Ag $%TyZq2a.oۤyKEbYi`=_'vWwCϼT LjwQXA q6&!1Qۤ8(M$c3 "SXzDRVxls26-TP:fIps=a@kVgICEMxei?G:j'>b=$0 KrA ( F*[`CY|;uq0ʈ'ܺ˚9xC>1a`$k]~l9%O s\7xjq 48 VI!UYu׋aو'DG >AޗP[H̍!ąb.!gd'"??F?11o;8~/ X5E~pOpFC3ǃ[T PF, Rt0MtڀȴHi_i:sL;e#2!I9|(X3$V(T׼TLn:ށrJtOߩg-q%nw1xM_ i] qb$L4dհ#agay%4I{dq~jp@s#\qss-:bx:,Gns d34O^i+L9ېoZΦzALGV<rS~h IZM5s)$j.^? DpGay}uMz+oJO0x5A`H '9YÐ $LH؉h UOV)P,,Ɍx2V;%#s#v<^WWҺiLm9ömÅƏTJ'S> FFL.{٨R{qD˜P=+qza??[=:6Ʀ+j@Oo)K{H钖\tt 9̀ױ\?I~xgfCI<·V%ʈ@ 4'd`8{VxVKRw&eO'DW|H,@MCu mYԙ_÷j0F%Pدi\B\A@wk&gzc( fO.+ULw;$ќb?+k3P8nR/>BK s?:TMwh%pz)yGؒk>{pdrbHDUD;4mSoBuP9!d.hĎ'ӟ뻋%̛ 1w5w)S"gC()yF)(#$ 4޲G'm꧳bJ;l\Kx(ߑ?zyi5.=+U"Y\#ԟک 쳇ѷYkF!xYx/~xfd_T[R-jLE7y"CSӺ`iajÛ>Z?̤= M "dB0!Cya<+I%soEF5WMrv&mq(dKZ(ڲw1}ߵn-JCnvڊm;F4 *ZKS?` YȃYlQ0.\;Z-b,|z~Kq}<CŘeUo%f0߄^ GhmjлR'20V9&t~LUaՅ*,fel?93־Y ҋ.w`Xu¸$pt _a;\mX :MI "!0%qUo9!UNbBN-=;:>=-?%)Y}OB ڹtj*LBY5ke3@A/ rR4?+6]76mT.. nk5LWkF/(% 890((t{GD_yLjBO`CUko5Cr*Z>cY2*ߌY i7F +&CওJ vKoMn 4R]K7aII 5?tUؠfeWW=>LSJ1M?/ &tKDƫ{nmYWR$hFR T(,cT0 h]k2JA<Ayzea8`Ug$qϡr96=TlqsS 7 { ΛM9>]|_w:vo'g˻v 6%\QJW*zLj1S"mg+NSJµ}R!:@'BPB-~[:onKU (`5#(..ߎ+>=suL%'&է2@^K9$X-n,JiڧQqX QE5̟[hMw8uS{)dںW?sg O3Ȅ#D4&:j0~8PXrQy+*>ALC.A6Zld'!,'f'6lf8@p8:l1q"“)^?{asLjjGIT1]e]f]+cяd_Um-C*(X!DQlT<|o|ؤnk;A}sF0GyH7zrTޣTC: pQE#,N{x}n:R,G>N ,uêq{/p9,䝹`4bOvPBy <<D8rӡ%нg/\_YFb4 .F݉dy>~#sM/IuqHdm Mrݺ(e!LcYP<|q$³^ =Q=1hUА.3/*AՂgVsEz)^7,B`/G|)@-~qk1w}LMf5"y]J˝ҩGV7r yjpRNamt &gϻWgLiƟY@9t4}1K"똙8A+[D#0>v8nel\.f𼽣1k?|G2滀q `Æ0Vo8?<q`f~zзN׵䎚2Y(íUsP b~NK ;y A2)lCz5op3Я]uY .05Rp"gf< qKcD[2# AAق~W2Wص]V;.8lPH9D@1EzݨO4 J =6IvVAU[ #RfcqjF^QMDܶʊCBG &|6mX%vHggXjJD¬GJ0~o ~ t Nbנ &ytH(m6 U2hVTBIYKãt^t؆Xtp(ҧ-erwPQe ` \Y D~Uiwq~X/geidהyVS A'RvREԄ]aWI"i6KRfM_no"fc̗F pS`P줃Y NI 6%?j޲O7"= W xqYZ)q#i!*+l$.๒W$.hq!H)hر[Y8񷶐׿,,&aQ󐡩evJ8 8' &q&J pdݾ™R^ ˾MN}E!wSZ- m9ۍk0k38;KX&|ja$~ XL| d5-5wW^S׬IkÊ\Fp`~[unD|V8-Œ,HFӜ`jz \ (FI#/WJYSIXBq(Mt/VVfDbd!'Gq!QLIv^\َlYQB=5e3J紴RkO/UQUL߮5R@ ,Df3;\]b ݍwC~+Δ xj(e7Gu[ ;7%d3,\a񛶛mle.96Z}[FhvXIe/˕˜UmPm ш=x\ rbn*,w\3 DLs);6NZXMYl\hZ8 yMQmauQ8_ =~47mƲw+ Fŷ~+ذt vqh)|:7&hNֿ.J-_ FKU^lvd`4x!W*ctmo4&KĻ'OAYck/(ڧ>qqfx6CԺP1zXjKknj,t;`c/rh^hIn {Eq=: ZL!H&$ 'CSio . ?i3 |{ w6,X-"~W/齧'o'DDc3] S <.5ڸ_B-' [+V$J4x:Y]l##,W-V9) WL^'6 .ڝ|kv@RmA2b9&W~VG38(y7^ Dee֪%|嵐7KJ+Rb=h~;]߭)̍AeY3,&Jf8SHEnǢ7|Cs 喡`hN#_#o^hBRbe:oTwgk!VmF9,Cy jllQ%! : Fp-IřL!1֜nxj+ yǛs%'/F,7TU٥s*|. 5E36 $ِwe>Uhr}ylVZ#2?;>\7'+wADşB!80TNj}" !tyc}w$]e~鑁ٍ&UJ|qkwBRkH'(WLD ukURvJB &;j'MN Jd`W`hPotʲ?T~wH0XO y( ] e?MB5VL)_)&bB*[A@M}9um,Ҝ&ϢnU"g7C73hwI)Crs/\XX֤*G8]SUCcPj$Os0?Wc]̭ĺ wڦdH 4&syKc!$J o1f!0SƑ0Y89(Goo_<;i*HL݅H2ʾ-?<#uqm&bź*m6YC8RFA4RMW\K0BRܵh0(iM8{A{)lx{Xh>-dr/%+xSČeUD]w'^>r=7>A/1'f(JPPFdd^ase@g6|ҁO_&0+E<V#T)^oS6y29NmacN~T zH\W€+"-߿2[L/8ˌP.1 $4-Ε$9֩~XyH"#, p{S#l nF@(*{~ec.a)dlfch(?` kO)n,#]qQ!Ɂ+}.tuPc4-gt&Qb5b̗U1B(wQpF%)蝧+OK$5. D!fXh97ȹmI]47PR8(˫i {9Q ,Ǹ DWUi1Xo1@:e-_)%f볐6$L Y~Vkp.P1js8tA^{zMzmZίLL9x̛m`v&hqs`3ɉEO<]WVG xDž$ک!7Ttnog/-o'/ l9<*Kԝ阋 >gr`g8ĪK<.,0ɮ=$!)og=鄞#= 1?U\NĴ3V20MGgV^33gb)!ޙ.6z#+=>mv|ej3ɑX6un/c+Jl6J>e$bkH9̅A"wO Y|2ty$mNHZe,g"cw#U[?QCHk5~oײoyn¹Nʓ _#cp1;bb=b5E3dd a,GŕC#"Omc85cl MR$sqOS5{h(A^wLYk̩e3V|aewź?rR/@ua`?2R\鞊= >p; e3)IqvE4QCB! _pX‹:ჇbHjRIR&TאXmLu76FXV4O2dsD5$$+A4Վ$%G~C X'F_Z.h1઺xIrɉq[m'| _xf5GF'-=Us`2ܔ:Ѵ]C>.*3s)ݘ*qGCaEŤR?hh¼#"X2..WsH8zkkp_It^NB)JIdW`BH>Rʤx\٩o8e -4ʋRHAUj`Ԃcfx{/ 2S1+<4 qQ h=ξ@we"1u̡8Q"]ǩnsj$4| ʩ'A*9sh*,X՟K` Ab- zHk gWZ.f HFI*TR1{~ 9"܏{bZhP3+)S4> ˄#OϘEuyQdF\Ii}~=|dE&kd,K_s: 8Q~; ! J+z3Wy fBǓzK*U= ~bţiM I(߃+.Xږ/b@MO.Ƶ+7uu7dcH2p e](h@X{!(>{4>.ql ڇMx&+PhX(% Cw$f<Kh?_~IM"+ w5%)EbB~RWCfӒĠYjY/{2PeR9U%(^&6u@8ZR$mvǷoB5@x[B@̿_o)+& s:Ȯ,wK:Hc^Ds `6EGM O TزDd ?5\bRC%WMmsw>P%C4zAfDrQ ePo-Qf}tBIѩ%E7ϮzGAfc6|Pgx+n 2ԇfhcc6Ww;uX`RTr 8I l_J*vn[8~GDTD$^8FiXjIF8%3~5#Z?~bS [{TQHQbyRpR|!!ҩǨN˸Ѱs3%6Yh֜[2Ja]bC.k< F*S>[^|~ W٥Vjct|NX"=_q&IPmG';y}ڈ)W Fy(yYQw't߮=;V`He;3Kc¾02Dh }äsn V*3T3 4P}DSwL,֣F`FZ2 `Yp͢}3I =P+_gU5w__22l"msZi&޳10^y=pK[ 7T=k6&vn&|hl`ByB[_G{SAm Ũ}tOҏ j-⽿"ހ!/R[^ '(Fcs(# 1'/E [a۵۪TUϔ0&Pk{ܘ & l7uɡ&9 NJnyˇqeN: : ϱ|~ x.o)_#(fl VzB`ŕm##ϣKd@ &ڦ΅E,4^%EK&=P|ڝ ˣa1-UX &^B*QPV!Bfp5(I᠆IVN/Sc)8S(bPHׇDoUG8<+F\4xOșyD%Α@P̒93hrx*@ôwJ+4^HaOp!^>"<. ẹjquY:Mnsfpj@XѧaMKʶQ%"n#"6T~w ƪRz*׸EG(S@QgE@K%2};"F-v@V1B\ JrBV__{8F/eza._d@A4]ul$G~ Yr@ W~&}it1TQ 8e=c]T;g_/,IP_}#nYuqnm] B1Ӓ*EF'O.e&ܦMKvɸ6=`ߧT0'U"Lxp*e1>$} >Fw ;k=ٍ;Ca21C[.XNPAyB|w5]m9,gN''E89=O4|>, `]՟.q}VVL&ހeC{QGvӋ.8?ԕe0}Ej@,-/0ql|p㭨4Ո%#| vr8#'>;XaDJﻊ>uA~+s4Q,U+rI":)|l6XʈB :ٲ=8xիG\P~JŠ33Z]ҁoϗ01LHKײ;T#>K oŗU' x.8~| AϖCRN`Vk]1Wh>3:>GѕL~\\SXm͙~+ _8Q4:L^;¹JC5XO}js8|U JQ_rۯ$f9ڏU>nF 4XAaO5g9>+$ߥxK լ@ZK(++ B;+h:_,k1sg O HY/aܧcUt۩/e3 _ ;q-R(7q 9 Ylymݥso+e`}W w;763rlc ou'i?Ռ! #€ ٭vʧnt08U- r )SyKzK1I냖w[09!R7n(#xP/"/t|+C$4,Vv)⺟$de־ _|?}!맻aIv]0 [ sͰV-bF2r,˚"J;!ytBb2:@&Ҷ u`|8&i} QNӻ XA@"UIiskaHS EFD%T :,By K5rc\!3Ȟ (,CJNJ!Z3v7WȋnWdOt A~wuǍ AGٜ_^0ZG1| 1\0ez=[$Q[̊kA7:NP$SxC5. ڷ2FuuoZ R~G~4GwB}(M`YpVh%ucS҉>w,qbޒ 'uh8 Z=F 8 QO"%K,y\aڹaΨpU8l'g&qs mU~f#9P$~ 6%I| q$tZW?2>U™ٓ 6IGH2vV8(^OQP7{ɒVR.Qf}|c>!" E]ΘZ?J"fzMş:\ <㮋dUIDZȁyB[v(jL$'3% 異71Y"ߜ2#v/=\tºx-OF)Ra+X`VW+L݊f_wB$auaKvW6avg; Nh&Qji d!0 o绬 k PzԴ4?N6B< yb .(PJ߶@Y,Tp0v%hjSYՔ(6'Ö?0MV@7X*nuM 'LXQc6sL gΟdFx9h$gU6{1ti,ٗڀܘB;"FHTǿ1)_*AD]jkkP;X(Ll( W'ROj%aw_XM__nV LG};DDžԽ7.uo:%Z6Wu@B]Is[Y%8~ RΒ=@ UIe~6ؗUQ$#^peHk(Ru{Зf\ m}Wa?' icOſcZCT83Ǩ)}U Ox.2V"Fk%oh[{Ukdtq滓c \ +9 Fz_'u#>xS, d 3E_j{ޫJ<ǀ-~-2+o}o/ p,,[{i5/\H]oX&z'kE>~䥪iuuY%lcdg5& 3:)r)Oh>Ve`2` dR`k;Sb[D2&ޠʟ%"miR$hzqh3hb+@@F,Wpb,]5FqmjH jCwTfs<w:O <2of>_űPۙ-Nt޵2h~ !R#SKm'6QWZ9 'E!j%gE'2&aj(s?Nm:%ujiί4CB*T2*ⲩیw|ςRsy#F..C/ [eWLE.thH re6<)=J$a?NKv4iumgO`Ż iա 2%Y PK*#a&Fӎ#ɌXHQB@ vL;׌&A#!*fn|\Ǎ\?0ayyr9#fDԄmenp_KsHmδ+~P*3#0$R<}^lo[8v2׺D ɷj򲮰Ycz,0TiWm߬q^|ui,-ۖ%^jX4x{KբW%[xa ejfx$AQ` P>ܝF݀kECnRɷJA&j>(2HZ)IVٰNZ@SI[ŘBBTJ^A+?{ 1^OŠY~A&oAv禼J$ʫXVJ{~s/g1]LR"ȅ1-ZR$zV^ BAz_}N.ܑ%_M'O{v&z1D̴nk,^p[_$ 'ej\5 \/ -Y]lvז,"OTP'ҹnhΤ y2\) [-CX@K'UcTC ,QTC~&Upc1.VWkWҮy:͸&<dy)NrLa5SA%})( ]Y8 B72jP-N?J!9J"Co\я ghu{đ8N*xKVFd)t ڰCͶ&i8Ef=31ĺ$gQlow"5~mzbi4Xps͡O˯;HxkяZzv7Q`@{BOA|?jjB©Y|0eq,J!j<Bjդy.FYlnP4 X[NbC\FW?Z?tFRVӠٕLzR̖.hui7fh8\"IR 2dI8^ѥlgia.tx2:E'DOwx&9ē:q )9iw>,;i%9^ YF8,sp7 hڳ@j_>޷b'aVO*'q俁=-F"ܒb7IkI;cFڍH̚@Eqo#毗.WxNeNB ,Ss3`38ь(R,;B]v_f3een`{(wo _C7=S4MJ| 8oǖ-(~;$=l;^ntZP4zL2JdWjnH^uUDիi?FXΈca1KjӜz ẹć.h$# 3rz @3e/ #xFVdaڱQ+e˞Z_NZJZrb5n]sHw_[/aEvXԐ`܉o)H.G?=.]vcV65FuVL'${l1$ѐ!L(“HO$tՈ:IJHe0ďYUQ4&!_2|u^@S`_IK=ko"344C9^Όp7_9?(*YڡLPQWtBoY?kjy#hY֐-7*="})ul&F|Œ6b/z<ҏ0׌찌؜͕߈]W +j-I)vmxLL7m0em2f^}S6P该3e$Ibd ϸ&c޶NZ+dh埲S{~ b5YhQ%쁦^}R1F W;: V)+Kػ%( n3jLzo\[eCa3l:fAvwLdnW6,+t] DƬdZ\DD>ߗWjĒbNG[&`̜Åj{Q7ʧˮMɖ+&yQ4V$7?5p*:^ǞX'D6]qg=KҬD)nWpq'.~1fcO.U5Jހa.b82/p葝m -* Vِh.\MH>"R`z^!o}?h[}, ܝy##4}xvr*)7>[ iȹ s:tfnz=DW!^n(@m/f06hgc^S/~xx\Nj,lgKAMlAp`M[fƿ[]2}bEb̂d! s,1 gҭB6nJ+[=6tb#9=ǸaP7Svbk߸Zt1Z燪fA0E Z2Ds9iDZzm@Hm&Loexsdp_rH ~$3T0s9"r\^JM[1?qF J^_Kڙq1Tr!*%{6@j='D : P`q:mj{hJYE ߄\^yڿ} '] qpQHC 1lTE=)/4ꪭ>x9ӕJ|M- ,~X Oqi ZgoM|7OvS휺\* kq'XuU:sh7 mcQDN*ؾiIp-]2w>lUGqMRI"rsJ(AQze,8b.a &"Iyp߇׫TpCZ{Jn="?i 0v;b\"{ntU“c8Opk΁ v?Z0-"=#&Hpa*{"cwnf;` f#jַ9ܓt4ܚZ[&QWX5u׺h??w܆*MMX A7Y|w#Xڞ2-{O꩸3t{")$Ь č% OUq|u}4dw ^:?lWǝ/Z)!YֆnyiaET5PJ㝄 uix-SWN~g1 vui a"0Sm͟:<8*5{jHdZ~}}1W>Yq{1Y :g(zJoz-c4~Uם mA; aXIԟ< ƘP7s0 %_H;7R}7g匫dl٨_E>Ӯ|Q+MT//.^";HU.nd]Q9:hW+=竩-~l.rGF nZ?z}Wi_ 7i~<2Υwx G0fj <W~P^*fW .)3Unm=qdg&t*z) SN> -BU!ګ Ū3Đ-L$f(J |^ BTWO[xb[4(jq*S~ d!G '16Nt|;͘}\x\g '5r>to* ;5ƈ\U9xW!^1lqxf o`ƸOPp)6AP3yu8[LTUXJ7^~Y=rUf.ٶ+v-'ܪueO[Dʻ5 .JC> f+U 'b]>+U~YQ$g+c]>xHTvM)\MLi F8{1h_^ A$#Yz[e|\|8?' _1qK7>MAw 炛d?:f Zd:: JoHO=1(--#܇ ]Ʉb ~?vRb3NBj"j,+I9ptLK;bNulqՍo}ӀNɕzNR3ŪZlۆra^ ȻƬ݇ +rn-ET$æ 譖mf> 7աi@uS(U z^g.kN4즊_ijIێEA:%E0fN.zlYd$o?% tgaw0b"= <<%=$q֥(xBfSÀk/S OPGY]'K#}ItI/SZ3 zm2MVZPlT7o|dw:3Gd,4Υ. "l`>#\wʦ-ȵtm)r(gШ`Hi Q|s_b2 5⟠&̮m50$ry&1CiAZdsdy<&p11s(ED1:[(HE[۬W*vd,ж#Ϣ{B{m?oH% L sWRD=Br^"(tғs`oOpnbsJfys g&;9(Tyd>B7 ِU8ؓ\A+BUj68vQNmv;rYuC{̧濵t -PM'}V -F\Q!c}!}&r nTeG\ Y`,+Tfvsaafh?pz?ciofo&Ӈ܊ F$pY$zK=0LCz$ ϔd֪42V|=p$8#׉p~wK fb.wFR([6#wDD\-, b^5L ٽ WYH4;sPfpU-Y=9/d}d0߃Qu2dur6xbQ9uOP1{Ғ%G*v9q1sp4fGpAKBB5_ U1?M+/'ZY{4.ay="e yDeO`S-^ڻg>_Mբ5a[P!1=%T4 m%I)ҷ &&$四dТ@sݑҍmXx2[ ubb F#\#%0{"IJi_8;0r,Wݳ JMSdO#X&ztu~S%1dSnFX12,ԧu%@e/Od a2 E&y-ub0K̯$yN|%N"Ԯ0by25CEGrJKr q⋺t_a@օV ȃ|Z]j C WXeIҢWޠ'V:cӈ֬ C%mѳ/oi@p4u)_u┐L#ROR*ˋ+O|J[?۳+9żyHSoQ3{>N62 <]rd6k~VBgAlժ`o8~CAO3bNY Y\r6vc+$$PSh!Yu;q37Bˀ6Ɔc[lTMdrĻ?M)daxr&בKf0u9/my< af5 I.˦tq{WN]1&=ro3szxx mY7o2`,E5(%+:3E z+.~GJu3K FKy/8F9L&߾Ku6[o͛/#1n!3t3R12WoD$XKWo} >4!b:x9I&~|p 9tJYP`-&+x%yIAϴ &ɅPkH)\OEKUnH@~rNcD.۶m۶m۶m۶m۶ٸ_[Q[{l9#bB@ɓPK?c VaՕSCԶaT_j5Ay%o+tLWMA0"$/ט> m[Q)/=Y23lёe!iMwBWPuu.) VYmbrA`NUgv(ٮأ>e-rq$<Qk.J\\v+:bN TMJ5g}tinѱm "|I#ǒ$'Ij*Z ֧ V߶HY *U4P~BS`BVmQ4"݋>*={{&h$NJAK6#^l_i3uj)IL;Lc^' W,њS+,<iG|1/6ؾU\Ҳ΋!r 㞫k1(8;G=+hQ8nQO`!}㌀Y*;SI,oyAR#I 4L W%zJ&u>Nj.E&3^wx2PS e`s"_@T=TK8S 3ᨩ,n*feda0 aee'ZolдZHŰ-TrDLaV8(Fyv-v^QfLoDŽG%9iCr b2EPP#hnj0unD] w*L=CRC| 'ӆ{6[%M$I 4;k4ba{[A*O} DC~솠e xs}$(X=lw)QMfH2薖5kǞQ2w \*qϨ +\]ºAت Cv@l'{b^(ZPGۧfs `/ebʈRn=^x(QfChb 8ɘ{^B]+]"K}4fmh%ra*i s#.^+{loJh5N/}ѕЪ g>j ˆT>d1ɱ%K"|PDG̝LL+;4ZϤual1aZr,#1gA )nJN,+rۍг\mߣLBdʷmi;[,M^e0KP/dl^(M^L.R?,KY D*2GS#UحoVw>4DH:IM *01@9a867ܫLޫ[hDC'Uڔ՘&%{V@ҽf M"LN'NDWxaLx11q'A. SC(g@,+َCVXnI'zXgm8Dp8v]re('l5__u,#<(ae5RO,z1sgQf83:vzq pQ ++Ǹey9^yg# C1ET3_2~qÅ՘1Y< 6peONi7AmS3 U2RO7I?5ٵB C7Ifq4tڳlMQ7)6OщHj}lIjɹ1sj$P\N^RiD_?$0(͍}oA`GjKaolKMTV| .yEX2&wceE՚馧nEvI͊SA#x&vK 6B6&ǪK/qaѝrsMs ZiKjZnDYBjϲ|y>] ϜLq6Ēz 7(rz&7痧o4HʼR ɐr<ڥvk&  E_MMM |89oOEtm.]\6]>)_+nrM <&gs&蹗/ 'ϐAp9} Yك/)%gt?{ N ܫY ,a HjuF^um]#أhjq6 pZr!e) Z'[W G;$_ .~׮]Cɇ#$xS_!ck2V"TڙLR)\i8%l3gq֧̗ErX<jX0B\qNp,Cgd8Z(8>J+,Sgs1 rEEO9pc0;8enZ8 R+. %IND\ALʿlҌn)s_==E j˛׵.XmVmNV`M\E^=(NΥEI!k7ؑCxK:G2wj\Cx?xɘT %QtdIUviu~.WEG*Jmֻhßl@_V=g'HyF7>ɗ) LVsDC\&N ,), 'w5I(ct蚺STvC6QVXe⢦Ó9g}ߋkͧ^kx?$'ݰ! O`S[Yi+fꏻIWy%pC޺.="p&gR;HuQ䣰 )w~~&A*^-V,z\@ǰ1ɮpψ&OK" w bx4Xf(':7Z^x^rAFj5Pjro\9|k7/b& -Am$hէ^awb ` o.'hZ퇗vi nw =^"Pл쇣E %c>l 4ZHhВGY@Aevﭛ YwPE|`]al'HEݗNt#R!A%L 4w P-Y[pls|r\=Vێ#) ZIVDnL,QC f 1e|j紤'c-"|[U=jpT~AZo-/4\N"XŋȻ`6!,>4. x`lp@++W!e"B dF1ywZ0-n&qd KĽc4`! ҎH VP-rOx}L/&(2h@ Ҿov(ϫ n 8oW~Z 8-km.H%4p,".N >^R壣fU2ͮ|D^N*'H`,ad.T٪hpRNPF Cag2I - b)DPWY~q ĵ@? & ]4LHP{ &ӊr0ɛ;O J2ւ}k_P ȥum@A:-|CY%lkK[O`lL.d^Pw0drĬ .^jG0/q<*P۽:p}Tiא{4+26훌s|bvW âgk@R|.aVe!2KWue;<,Ln5Z,-}st9#*w.\ ^Abdz췵sh.؎pbm͢ L{cr "!]<*Ӕdh%r5(x?BfR6NN"LGBe\Vr*)Tot7Ur3}1nM߁]CJE/UE9=zn#đ)9yoV}Lϣ- ЪsENa&"t%$/p= lռwc`t}Rw|x $3.n 1h4*ZH?C}MI]RrNIO zD\9&g^0~0:螘B3StCEcԉMZ4Z !IB1.Pҝ^y"oGH&I28Ss;}*'yݖkRHKm|*52'Wy `G)˻٭L{Ҍތe7~7,Rii-\]+1/ `)ie sJꆉϖ*_X:6nQ@aeNaty2I.\Umϩv8q!\K*YnvJNgW.W`yD2ӬMl6ฌpYlʾhK^[IS> cd+s]kr9|(oV_O$ G%5ںSm٪D ȴ~KZ(du݌9IĚ gw];9yo+[Ƌ3mwXMwi2!Q\O3^Jmf5"BQ$TljިYN*sޘsjHNG6 Bls|yn9 H t¾(dBLG5aǝQ5Vݡ Gel9ajVCy@;]KejV}$mra$AXNݢ8FJ^LiiRr>Ԣ7_2i汏x0{l"쭨^-^n^DTH% .ZGh1d5oIEM N3xΆۯ~=u_m :;x0w}-4\ܠ.MjRPEc^R) b'>nNKQ] "F{z;EK [% zt BмJ5_)TX_Bh0!٤!ePZ0cpPx_cB%IIPu%ꯜk9D |"*QMg*8cJ+ݨN98o=i͍ ~'mA_^@w`U&stoSc۫9rQڬ5G%p[cKd&L0S4" ȀK !ˑVz\0"t u̸aD &2(wAoVԉdLa },ܾ:*CaxxuXZEҶl2- >!+]okoغ€w󢯀G5WZ8CMQF+Q]U3A8ؿ -̊k$zooB刕@ ds03g3?s͡N_GV*D!JdJv႘ w!:璔5yi5P!\Cƫfg}9(ԎvBDk>,9v-^=^ؿ͵Hј0^uR"!H.KպykF#8wZ %>J-BTW= SpK}4HWJ ` K)ƜKaz 1`|%xiWT )T12Ň,R0ZEKAa r %b>( G,OYOQLG='VL6U\Ri9'޿:6I_xĝ |F,Ґ#MgfMn͝uxq5SA6ƬN*{6mKhvy)/0w#S|i2zmxSӸE}VH\z<*OXP_ơ~!s j6RM~2mzѦKIdnդ?w<\KY ;](3*)MblT28. #x|zXگxۜ $Nk$sZ#M4~}1}mEZ $jԢچ@Q53AvcA/׾ke ̆FfHRN%7(UT~d$ԓRClbg'!8r-es *mk<̥ZzPc%XŇE@U2e7Fh59уcloH/8:挏ʽCpQ3ŔNXT81^_7Ke׃J~ U,+ I&)QDu7P~OЗ!XQXmlXs2:Z5͊Ov-(2 `T# 5cJj0AoL٣B)dZ@ W:5*=%p~р[489>7g&wmKϪk ieL\F梉Ni?]. n/6=Gͨdit'z;:݀A \k\oMwvW)yTGybN5 C}?V.? a -UM.%e:(a("ޫij /|aau胰>ƕDZ9+2  U፰8x'ġBNBZGP݇q~]`~H*ndׯRw(;}#c1h/gݞ>LrKMA\ $ /1[CIt!e :|B=JtZ `#h8._aGB Sċ4iN,~tȽw):[WItIE&\t n8$ўZSbHzx2sEn`H>κn^fԦ-I):*D.NHx+P>JѸ8G:R@y/\l9,&MXD9x>QnT!X8wc-3{kxfG`O|{ڶg㰘tˬM'R c=١ OpHi"(?$IWai~٢Rws|OUMX]A t? ̢=ꍿN] ʽ5<ϰAotS*cԏ*BЋ .|L~DSŰ4K%[M%-4=+dP# 'rqݭhR?t9jrog3 :aCtjBEa큮XfauvͥVͫ[2(䮋L젫jK.x8-]BL]crnʓ:M}Qv)7GKu0~0x .kA8A}*RI*W(BHl- `jG' Ku_X962;*yo7@[{SQ'@tAK?wlQKm6)an?9Cl+ez:j'\EE{c_D_ֹ=#z;C{4,^uMQLCJTk.H1ۂ,G2;iNf'bGFu?ɿ"pj7͝9g.Ph͓Mdq7!/zZ[ L-XǶΝh!}b,r=l[E1ӭ\'A'I!bCjP{z] jā@i6%:ẒG:nw}" -M"4@\Rwg_VNF˘IIq"Xs"~t~ U &r(1)DdkA&˱qډ喤ݴ_ +_rh{Q ]6 8HQ5;AC#2kFVd"ztPK$[&P%F@JuP&}ٷY, ȶ[$*WE;o BR{:kb*3vms0zKpRBdāڙAoF69y5𬴐)Ps:8Ft+7 k"w𗍽#.>VOM]G@>~=TV;jؾ`r +>'Z.8j{! ۪񜟤ǔd-vTg*UeArY|@;Hd{mP7y(`qń>><İ@ }V}enECj+\| ^$}nyZ/o-x*q P 4dnSKt#<;.%@jIv@ gَlИź5QN9]ﻷ\gS.'YOxhAsPD=ZN#03ՏBRD0=Ph79Nnu*#RF5J ApQ6y<5+=0z2 "[R`" *V[!*\ ΐ8۸u!l^UOOJ~—ǚa "`?'4O6B^DFr_k:|z*eFBk! l [n(X.\,^lJkZO;;vr7H]An],>Si`n73>R$Xin1tSW'Oqw-Oz 3 /[#Dc t|WqmsƜUX@{u]0T8/ 5z&\ÚnKw1u[$җ[  ߕnZje]?Z1Z2 qpkX?hd[)K^'"ԞM7sg!Əuܳ.J֜c1FҬU):ymџx޻n.wI#g903ʹF]=${#:])<~3U S*zԙ>GuJBX%oklw,ii&۽Dz~B&~G6uҬT%gIBxAyˬrIcž&7j"w^ʏ0@vf"﮹VmT?D欋@-7گK-]߀ y;,<C++'{J%DO$Int$: JtVezA}ԈKe5 YYr,m^.v M~˄(DpÂ' 5ٻ >xt;䮷ZiM囃(1]:1\rdPva,u)ell`d{q.G7_S -JG&ypOg!d$C׶2ԡ=JqeJ#]=yאi7@ٷ,*d֕l 7; 0Kb\t8qsy|k;3?<-!+ k_WK'#dDMUZ> <@ꉣ0 ]!.:ǹQB9yELg--q{^ 8-T)y-vk=WLΛE0(w'}Ollkz'o =3|[^5"Opdl.9oq1gT"p@= 01-CnIERWG|Xm݂"D*(3MbM] ㇓ϧegzybSv1^ cVu?!a3"y*c 4*aK9&6z22Q'Ɇj σr90PFhGGzdO5r.ᇆ9ղ@Ex!+oOl s-GVq+}[o*R4SaWW>̨(sQռJFr>6ŞF]ҜQf@6(:y%Ϳz.DVb{ yݨܥuSpy=<3CAҝp{Ї^5ƉIÕZ ,?Wqbke>([ H^?ޮ~~"'Om{j/4t݅}khZD{( #>\-n}O6<B#$"5;)*߱C6ފTy#rf U< Y53ŊH5롚1&ζ\5TB0`̌~3ҏk3:[rL"ҊUMCX~ (t"j~n1"gܩcP#SvHh%vkRIδhԋUAğ> ^31)bi-C2Lw0aNm mhb?gS̹lnЯ]' o@gtm5Pp$X`oo gqh@Ëбh@~k@PM# i_!?7{h034 X W~W킩)-\P7HVoRf5aW1Ӂmh/[+gIޛAD6BC"S wZ6!P\;[)q|F"5JW%0q}+2J_cH4[h%0@ aʼ2;<8@x"#zgcG?6N>m"8G°Qy}0gnEOOe\#Y yEe%f}JcWU&mɧ_kv%Т`3!Njlr/fm#W ;YŠ.Wu eH|kaD|\0P f.KO􆑹ia>dD;̚K!4(]|I٢&jſX@`s3ditL(B98<໡uP!.soX0 ^k ܧT1̩A)D쩀Њ閌5.@>OU>m@6u{J)Y/dClLc &q-yHR-F+-CtQJqX?FkȌv帶L=z)F˄ٟ,PnYEq?CI)fsyȵ&>IraCMfT &y ^joJP޸rGjBa?#ͣaA~w+' O6) Y 8mj 0 5 x7%cDZR3 ق>y":qJSL5F8dhN oԦб\2TeG'Wj췷kR̽o|_$wTQ;Dg";KQv5F;ҁ!bj+6u'ʰs*܃p:T,;WT>v>ʦ56)O~E?Yk+T/q1 v=!ZLOInJr)'t"k?5ZݎU/IikAHKlSq^i'6̰M&ja)x<9 /g z󢷟G5W:Id'nOUͺ՟14E(]^KR $#KLT_Esa岞ē1iWxѪw-G׸ Za ]@_%#ϯ?D6N051H`?,eUV#ل3Ùu@xjv>*!=cߐGCD[M/$l-fNFn%C1t!lr@-燘}Q-exh(!nzԺ&fz'~{]aژ:g ]7 h#t=NaᯛzcL4sl WedP1[&GuOD1 .FHVZ6z}=i;Va U\'X 5gN4b"Uӕf g(ht+r_]MpEbXt.؛Mhc4Â4>QuN$7Nq HkڣA>AwuVJ^D\ e7I ];ps9A_%_U;O7Zun{9m.+K1"8vd.0PFH9gc PEQyi(,p60 Μ+iɽMK'|\|;HP" QWJ/HnHV[V9ze\=wg$ Na*]Bic ś=/b #O V.UL7ӯt V*߇pW?SrM߲&C.|V.8(pzj8Kؙ?CCG=$3)3i 8suxb H3W84v#cvV Du+9Q{铉7s&W F$V3'#K!"PȔ-ց tʞ$s 8>pF'B;sC= hsEG[a L&A_j3ew?9}T?2GAZ>֡Ppkvr$БɗӔQk|]ī8F+rf| uTjj@uY1ZN/72bt}Axc'&2AW Z3wlwe0yd_.Zvfi5w_ Kv\]?iOW+sP%sֿw P'DQm<agѵLPMH}8q`-~j%r _ugm;3Ɇ!D 7 `F~lϟJկn}XE؞`5^A_-~aygkѼ^r<>|X+擲fL` ^Chp~bp;ۗw}-~FqӤ,s= 9%Ҙ+`;3RSϺϜgb?]~J +Z8)d}&sJYb2DX_A;R-"dJ /T,6,A@ _" ŵŠĎOL<[պLO2v< i^b0wJOLϭpN+t'xF"_(8m"%-3Yn:s?ގ(淈["&QCuG6}k\FarE:D\mB_鍈H %;_۳hZ&,52C;'򜵺k.υ& Ec?s]"*OK"r=fʈ@(F`A Jn2ZI?-2:@'E)D18b Ƿ`)?3(@ҽDaIbZ؆7a씮>KK3$} ViRKdzD銛h+|TdN7ܟ9_<<`M֝i@:EN[>U3 H^;"&9clY=,n3W!t ,3@.w5~فm$]>Tc:*Y{\S&qp{ؽwx8 il5an't@W,ђY+#IpnOe%hbsUE|jVW6&u>TzWC-.'kѻPFzMxTdUj:N96]IY|5wG.njD7JH Bn&UOcL )+l}T`! *<۪:l5d''YޗtKR)p}^ a'SU0DF)#t~ܽ2g+hk[3 3v}ɷBm `}Ͱ*ZP-gj9mx>Cv׉ J;4UkTQ! I,,}BT`gG[M*E\Özlł$: яrot_JEbј3c[z6/H "^3X9r`~ʅuu$\oK#cqx-MihƋ} >ml7|=XcSuj{Y1|\J K}o~ u+MNnT j ߉DX(*ҒStF^TZذ"r}ys 0.3n-pXwx:$5*֢*yCZ/SqG'f>T0DžH>.T #r!dznżWs3Uh*cIٍ։ ^6 3i73|k/{r+1g3?% J% s$NZLJ|kT5j`X䡵O^٧u ٕ IU,Dq,|\CHͰ\tnm_[z1!Rt X_ 5{§YCq,OC5~J.< {s1(\Sh|Aq?}zOT KZ!}n.܉&n{:oP(*"&:MЮS+ ɹL(wuɜIPv2Pل=&3&$ J|'3)žѿT܎i Z+Y@# ^'1 y9<ߦq\,qPZ`04TӗSRRP?0T(ÅUr*Y8:Pާ/^E %A>ʄwiµ_Y--﷟ `Kɱ s;aDnV=!*DLև[c0}]]-K-[}5r)W>'koo#a+:F7X V M+X">Kt3'.m8M>r pUЫRhe=P )7wc 0mhg;;w5,_WMGcQLȊ<X80+w+DT: Ƙ~Jgk DjZFNUӛ>kTi=Ec!)~-730k^d}dg,t^h ܌dًMƫyTʣ7W JRޫ(B9+C lw4tg.~ٙgES@e6iRߎ;w973\X[8DY#ؿcz+4|4c _&Fbɏ\Sk9Ux;+4S1Ⱥ"ȵTѓACyOT\%K;\YZRa3R^縙Iؐfk`͚<G*E@-~',m!xEĐ[E hڢBNW~uAߐnTCA \ ,B@q嗡DmV5dDE0Oz+7*"THugD50 AgxAC)pV`"~bOAhs#uV=c$iAPCl"pxN} kFe~MۄmZs@y<6 |j6j'3m0K~l*Q^g;q`~jԚH )'0 -̄ų߁8/8hi4}f JOR'N ~.da/r⣉Tkr!cU jܧ2-#7wSV_oaK)hzPNc6cWb贷iVSԞy[R9/ ʡοAl QUF`ax3#NA5=KW]v :FN{{ hmn S\܊(aQp_O`}sJùsLoWpg_"|p^CP)T ]28eprDa$+@[ Kf͢՘m ̅^"4?\H*W30 CRC&耫NMfH$pv̮| כˢiV,gRc,=ҧINjh`q_"nETͻ`1~VBf'jh.4O9hK+zBpYL5W iOqidPK+%~K.G[n %M 4kW zbw>+DQd׈3}"x"%k| i᭗!0冹. cGXIX‰r *:5W bQv B)#2 b={bufIfxb!, 9:(BSx+Nn8Ȯ\g`̈́Py/j %n? [Ƌ pFB-`X3kN8/BHG=?)iV#-I.Qv\.)`W+8:LZ߼iV Y{@qC߭:9ԉAKwie!?ehU; f +4,[*R\9Ex0\D7$aZn;_ܤ˼̊^̠Vô_SC4)ͱ\Լp俷{ O9*Yk0%v"-Mehb-G2{=;iI!VU;]PK_HRK4K'ijF khHUHU J׬zbavL,񐦪dGY FPhn>mYI T0 W@=lΕn_L!Hwn'_EEHl۲,!xQz5͇{~ZIj$.8yI>5w4LS{XNx"_ tkENoBƍGd? ۫fDFAUWnٱ앏|[\Lڋ~B&AӊCu{B?~.͞t & h;;<1 z/rnᑩ#B c?dCmH.?TZT|lpQ Y;׾h! v/#m/dOi7LcfG P-;gin{̀x5ih_W1 #;m](rP̷{`C@2gDi֜sera&hb iۀ˽40Ui=q xzUGM]zv$?~N6}2جľ{u֨ؤb5/227Ro^c EMyg!O?,&Ei$]_8; }7M>k[DЏ 'j= 1*E#llCeE4G$dzW{z: }zqEBۧyF T֎ҍQ_1ty>RlN.t2Vρ7;gF]/G vd:Xj@QL/ag>hG^?{RjY,t(6ȗ*^Paf dl3l1`z[Zg,qv9wiP9yXҩ9&.T-@TW9?bّ#v.o3z9Rm~tnF_oݚrΌ}7ar-Fh/nE|ŅySBu [U ΉiCn蕰RcG蕐p|?~ޟel1vYXپa=L5h/<ǗjyFe0T3Idw=v-JL9dR!>%ӜC7)zzLw"XB3 6s.S /<"׻A}[[z| R$9+TigqE)%}ČD&׿RKx.ӂT-#:MkL%x:+V*ċ|m:u0T(Z5?[>ZH.#-ZPx0R{:tST<.2Ah%q|OsGQ@j;$ߺL>TM/d&Hle$n'`^Yl醇O2dc̾(!h=)l1I<5A̦ {[y/߆#|?X[<,y6mc\5(Ī1m K ",iyE؜.3 &sbi\Bbe0VwbW2Ox[]OJk=Rv5i6L{e`ʎ>= U(LAb /C_u`׿E<PubE)G,66LVOQJfB$:[ {{4^m!D:(wʒ|snͽPXLfR_fP$S4vWS >pZM4yB 34stAj>0X'9՗Vp-ޏ8`  hay.zS1ѡ4X2([}$wnѰ{9"XŞ{dT3n-0^ßX}˲@Pݒg1u+[6ש{_jOm>$#;>sL=t \R],Ex`_7̭tt3HvRf"_A!nN+Dh,s ͻs̷ꅌ"sj ɁJˡC=:(5Ыn4~ܕ0Eo!#:D}`]*ueUEtfm/$Fp\fgxXAcwHZs*T|bfBށhp*% >Sl{tB1C vrn+xجfpVMC|}s^r@}`lX N{ADpg ۞H|0/](ϥ7Q@ϯ0_>ZI#)wygPGvpl{++^[@m&B>VݖJOMJ׶c >AATlW;ȭE7ĢV9b%/Czlߩ)w;Unn Tϗ}Z҆)B3R(!v\ @fj3`?'MVltAZzΠA ʀ9ATHytm*'zBO*ٱas/#_\et|pI׻=EN;< NGTT zJC]0?(^wŸUK5u&i7=1`kOEM}`Gv)x(L8sٕ%]W[$ᮚe7 Ϫ=n iTC R$ִ)I֊?@WqY :|}Ƴ7G]?!9KiB\A\I Z9D@(2I\)iā=k/lFt}GD6Aa^{ySLFԜpZ×1u5!;6 ÷wNx1JzrfկFm4Ce>L6-H+Lwv(us8E)]$"7$fWOL\i/$[w76"@1V;f̡(e>-kШ=;P+C _\]sDNRǩ \ӮvFM CG9JW=/d屫-xO WY"NفB;45l#ydE>@I7^F1o󟨉SDsBXFu}o.*Τ,Óh)/'x. 9umĔ9.\Mh&xBg{썓d؄-G~nX]UDA>hCWl1#q {dšo$F4ږnEKqٻ&I90AOƢ\kX8挍>JL-&3 cg?Aw&%/9* 5'u6wq{ƽ%"0B p`Ze@3W@ ҡGLlE%rZuOR =ֺюsAsX1$b ˿w E} 8Z<\\6Cڕ@e`x(gGm2uj3[/@Gb?8GZNz $_ clh%WBP,_ ct  T",E F.qޡH<:b)")Yhg":}cM1"<7u=\=Tw^A0<cUDFRtDD#ՙUOJH-U6W1ջVwo™?VdQr{?hlIbb'*u(2@0\+ `&VdrVkfPF}n$FU;ZK$ q¯a-k1/05Z5Z e <.ZYW(M@+ j0{OҒB{vnGZXkߍ[Ǥ:yqJ`y?ˡ_B&M8ik^A 4plUGԚ6q~wP3&p w/s`evT::5e*}ar{-J)ԫ^<{t_56M0eWBG=Gd<^BEa?uFx̡q ! fr\ cz1VJa߄&v !RoI ٧ WK Y{5z.uqGL1Bl}s x\8aE"?d͋< AOUd}U0WIZ -t칪'.[܎{ V$:f73e$,C}Dm%z~gc?]_YБAGg|ϝwnY0=)J$a!F DA5 QlPlZh97KT>#7ARCkXV+{ԑq߃s1clf,+ngӃ03(U㋹~>c޷Mx/WC][&xLŚoiY(d"~XSĂgwмPw\1ŶWbc @eWhrfJe(gO<\ǧPA},s€Y,(-rIzxOZq~9v>;]9HrhuxhֲJiI[KS嫨J6(^N./zEXE{@rIeY_~n Y L(0 ^i~lG7utm? 4(i|V>^$Jxe/h@{53o1e߼R=ždR!'uq/*NNgsh:`ޤn{Uk4~d:9AAqmg 8D/ Qd{UPb:T L}}Ȳr!&j7=T.hX͘ 8\YewA6 }% Tog\BYr*Pa/#..ȶ8++ȧN'2$*wlZ#AM\=8焍 \u߀ -79nN-|:pE4FQOzx }gnJoֱ pa~n׮P֗X;M2h}8@dNB+>*/`bYkxȺW nEAA AʼxVCn;!)^HB[5`|~{a֎ܰhL' p9Rҫf%Njeٗ:t$~y:wx}Ǐi[ (G"߅ǡ5x`ߓc z.P,*3;'7 5߉v\5UfDңyc`qy`Pw[ p>6鐷 H/;cDm9jjG>RlTe=-U@]x.CWg>1Z䋠y))_1Q>&97sdc[ͅ|iFv"مoLs&w|8/AUJ6(@ ߘ;btfM_ %.Y&dR\pXݩREcKGҔ̚0oqzW󂾂CϽ:Qq!P@][փIǬJUّY: B::v3Ԑ_kmj:Tӝma p fcwHtڝ_Iv0.{@ٚpGx%nTqϼy-takH'UKQb0t*ݣXJK+" ;5λ^ݠGc/qI/pDydZ&k6-tV## -q+c[;6L:yO5gm~#e]=ִӝ,WlL͊Z/E:2 OnٺEmYOFo!"+P7B\=jKz3}gS mh{KEr#TrS6.u(yɪDK!_o| T~;ij PԮpFA'}Z4 f"{Ђ.M|d@%3Đu<gbwn):R:$rJ*!M&+7PN'8aaR&5]M9k Eb4\rzBR)ˁ)`)D yy(2Q?Jwi)fXnLA'߲jE'%o(N V^* *7*Bc >DJ\0/YHL{ Gu`*`>sJRtp쁻pv {}(2s2'[F|2Z?F8JC eگ- rʫaIF[8 Q *q4|ē@Swڶf=P)!(v!k[?Jĵ.oOrNGy\l+ cPHu.g7Ȩ:p_J7Tk8{C!*(ưN0 źz[3Ɠ9^3'==]2F'#6\}$ RBP򥵆,c6pf P`H*rn)ͫ"]*I}Fw(9jQ٠B84Mzi*dM  ,釺7k \#:dV]ILڭnP H)tJ5*N$TŃ̟^`bRQ8X z>z|>Pf餐#mlJR 4p5vm>[`t ->i -F(ẍ&> Tν}piwHY#`.Vjb𡤱A ]sNwD3 u<RʿPt ԢHG0;iT|lhU`3CK(f4/NXԄWT/zndF |bs$_#DSuϨn'I.M&͚;O0e>64\uGj0H@xPt]iȦZ\{4]aD"ZdKėK|L{RgZ9mL].I,rYUKT9x8lkZ-_bG@24$@DrޤzRf]n7-b7R5pL,(L#YBDj9qfr8zΡ*V}*fَݠ xFi'-9P5lԀbK|^ ̇, |%A4ZbO1k=k91JGC(@25R.Y6c:>p.R1db@=0~="? ^RQmZxkmt&RB*}!qzk/~rřU5iEgt^GHK1%; >f8Տ;Þ ,+y \w:׾/ 9j?L fvݓ< czqR ǂo]tU<=a 2PUe(px2l@k5y wl5 Ɣ\ԿKn̮MH6Sq!J:3VpY_2VAQ&߹žrݸwYkZYK˙cTkXL&G4*0qpwx_+,C6"#?+2%5_Q6ۤϤdby~yͿg˗cv1<\$&O jIR,FB7_LrMWNMӪ;A;VUAO &% c`:oReD>{lue(;uk(Kp-ގl/. }3HjQ:Q]:tUdj Kk)oUʲZtfuukrɅA% ry5lySYwֶ֬M-3b)Y*6֟viQ/_hB>%} e ' ۱XőWg` Ϗ͎-lK{ [0ZC֭{^rVi F=6bSȂ+ZY26UPDj*5n ʀxK{)20nrA&}v'S11oy  V*ioq'j|}=^ Iם"ӛ_8=i!2PMWBޛeq fnDT?(` B$ z,nb~y~{1@JbV;Q@~h":0iH20B0b(g W%tr,9{i (R3 zB$H Ht1Ͻ:gcf7rqUQVKŵ!1D%zSQ]ӛ2D&2pA. 5LjWYi74 Omgg3Y&QQeg>Y"&Z=lb`(m9Aa MI*4 jK TGFHA\0uUy<`TU ra.ݚEI wδh(_䃠4>+o6«t/f ]]b1`;:@ %CRq+tV0eH;^drUAg^Ĭ=CeB:! G _ ʠ?SQ   DGERqz{ɇXO܂8pVt[=a~  @6dɫhU"ɅgCdH(ȟE {WZϊ1hzrI/@#cw٭@u:դC[9CCI >QX z\!7cD@ZD&-4EٿH'QKữ<EMpq%{/qW}@NHN ݤq@ ÓV;gc,Qm%^!4Oh嘟Q=B|uyO֕rP "ܫ$l<}}w*KLJA$,a%} DyumpO$l=|,NjWvS<X+6nLj!62w*E_.2M eRȱRD,9Bhu\q7m[SGAV[3*/92ܖemҌ4!27n#_53**c#]Aˡ 'G_JLze'sw" YN@<N"`7b]x\LgJ7.S}| 'Pp L JDP[+rijAџ:Ӿ2jBШǤ]'V %J8v3 mA H_GRKTnzҫ*~mQ 3MeB:5 [y݁6 iߴ-p}`\ FK_>~DnKNY_ׅ\'320qy&3t=3sujfVx̂ -IuIeEeO3򴄉FZ} JnD2K:-<2K힭'ޙWr%iUݷ%-СQۙq]}gE۬sq BhyMWsW V6 kw3cO2dxMZβz B(B5zJnfd~{ ]/ACa_4_fsǪaD*Sdd͡ӥ*}D$N!Wb~2x`n*4.=}!V-,g@qHp;#r ]TefD3GI R67|X?T~oOinwHTνv1,+[ǚ` Yu@J 톛IwWuIu:~.wuɭ|x*j% -3tǧ-l2NM}NЦ9Bg zDn?@}F8b-j F@cYJ3U;S|pL64C~>-|4QF؉KBn4\ѻ/MlkQ={+} q|{gt+-3+nS36AyѸDMq?cD.’L6-΢}%->8ln(,v> R?mjԘ>ђDŒ#0JHemQC.\]'_rfS);}`VZĺ(f%X,ȓmȻNChQ (bbJ*mto3cjӽu|O M!S%κL#ΖC bW>LxLM;uXH$ y4mWN0V́ ,\N6&[[+p(yY R("kGE-n^δp>T Fx>fCDQ>v&hBaJt%.: J @%\>>s@*@pj=7жE< ɺz?$w=pQ,O[_ sA:Q 0|u/Ly@{Ot12_W51Uȷӊ!! / sc[l{*^&2S4eOѹBh7Ұh˜$Jw7ټ  [rDHޒI~שG$־OѤhRvXogf^Laf*E!8woX9_ jdԠ)IMx3DN, #Y(删g#OV\y0H>#d7vfLhbTL.f  s5.թo2^?6Bx%ΙJU,HV/z>iWQI"JNǿ@ԃwxCt.H_ (|Qq@: vL9[3r\kܱvBvIP䧃`y{u_g < jē+2 [qw/'˜^Gr%^;h:b+8BY55#br4 Q6-|Ro߃-,(iS$|jzsBU=aBCYW>+9mdH6dL"Xj5Z3UOvT<u {TCt%[ϐq!o׋D>̨RaY<wH횊vݑU+ϪSEI^t(~rXy;@* ^ٍ)B 0,%& h9B)sNDBZ_[~ߋ]31 oh kX#tsqe֪ReNn5w7ֹj& ԩ)n;rU&;(7°@=!ŌwQڥx; x 09Y!7ܢ?f(ACSvg{O+K>#It$?Aok5XHDLlBVYk=B2~̆L|.96SK,r@N* *],?xqXY$#$EKp$T#SlT`;?7m)DY'bBNGi)7 !爦N:]tOm|(Zw?NE ԖDM )&'5~)"e>,3rCrNS<{Jtީo?Eq1^@pVsdPk@svZ^ R˙Hԅ Uֆ1OէeaF]4ĚuPUÜ2:+ f6(υA5LOǷ#4aXDZ//S9?Ʈ?D|g?J nfWn&LjC[ cgz촕Gs4aŝTeQz*,Ԕ7T$PT,=`KXْ2JzfOG2#*7hF-a Mb'`V@blDkPQ/_bm "c5(Ǟ,e| RЪxV2mTە^3q!Õpd6e Lʡ`V4"H *$3@iCqZ q:=E^sFX_ 'RY_ dt_V\{*-3SBY'Q BaZG DvOy vkcWL=8|D8]ۜOz\1Xn6M)B!v!L*]l1T;bN4-y]2?d^K4K%3fvΤCH.ӞaՂnfnF|Bc:%ru xp>NJ׽oAe0T_BXlxfS#,}HLwlAu ZGLd9 cNP-jVV?:ӽPG-v6xl!T~6'iݶ/zU5  7mT/5\3ٵr-Gz`8]^yMt#Ka-<sQ[j PD1xˏd4)J]VS2AIڝqV\D9:b\L֦ 1/9})QcyfiAvc d|[>*&;:bDkz z)?h KA3]"4 VƟfp<+Æ4v/Ki"l|f0n{6.+BG'hݿLps.WoǯB5'REPS LlVڞ5x~H3o\sVo |RzB]O|ŕwoUeOL6on xؤ:Civیoi[rY4'2'}a Pv⭫ G(PHQ k9wYdvG(<ԩj_($TThG;YtD`FBc- =,JKoRg~"XupQz.~34L?uiD Hmhxűy*LmT"0Rn7Fp5^{W( pI\[tj¶޷@Zv G^e(o>|ol:;M=1ie;rʘcNNҋpB,c*Mֱx?c4:7S=y@zʯW^ߘMI]b 3ۻӻO*!T `*" b"ڑd\#Yu@'E4v~aa)@gdmw{zbq 8gRTL0.N(  y]@_x3 ``:wǻ1+K%IɦįUrL3_ˏhYzOy6_^A쌻A]Q5_f&[clp{o 㬻نPqwe,^"lZE~Af7VAhHo(lP)#?)H5slEݰk Y9VfU2A؜eJO6WPտȽ g:x`}:lN)DpdW0v`M/ud Asp)}ux^lIֈBj2$&k$׾ogҨH&y\H+-H'||v 7~j~sh2|P1Q'Bo;aY)6 3WϾ"8C { ϧCJ{llY{nɑ!"GLÓR~3+i V嗹I,A :Gxo n?IJDgI됣QBɳ2ue2C/S}]ø9X(Ѻr y0bo|h%k[ݺZ\?wF…ųea/Fk!!cqBNXω7(E S_%Ko88+~!#2 |(\M'I`=Džo)tO9kGӁ Δm廷BD? m ׅD?. I=0IAijMmR LhӇ EwC$1Q}} 06y~쀚+ GbBy} . `Q sH?LjN:p3KyVP1>_"x K+4˦WL4 ?!R5Ș]nz[l qGUTd@qq-οOcJvqbuYB|9)w67mTe.I9E{ʁۃkv"[1kD5,U4AP.s #V-Qˤ:o):^,8S w#k›ui*Xȡ |O$ľMզ^@ sY/h{max̱QT:;i{;eE=341M \YVȕ2HG4>k%X˅rO~P:Aƪ|Wx ^ PtCˡ'VݑϮѮvz8&໅-csɐkYa;E mѽo$cR83-Hbu}|RiF¢ ` {y9vO=d5N<'"-..uԧod[>P\650 7zkjv"k$a@.!E~v 4~*cj&;O־fŲ| I8A?aw`m9 #aAr73WpTi.ɵA4V^HnPtM88S菍Է7M)3;H|LHuLY(m˛& ԗ/ȈqR|lk3^raRi$fstUe-yr[{2>|aj қ`e:k21zv JðrgLi8.D JhʪdIo+q3o4FKge/Z JZ1:S@m(wƿὰDThbeK]h_ 0vٿFKx6̀6ANlG$g6Pa5tEw._3c]2^i-5V]`oXbmiF\AL ]q =CH@\dNܣۿD]!E^()[d YRPURl,ve'D]A/DO|δ,l=z2VЄ97]헴T^ua@FeM1̶_M/#A ЙGIoqN8S$4G 䵏-qQ JPKS6&B~ˆom9MDCܫ|lq"1|8*$ a(n*e ` af!T\u\H|r2 <@S:c-8ȴǣLQ%(&|ԣA| !-[ R^Yc_ `[QQϺ\k?^ ués03:( w:I)A˪_|6f&e*{!AV@[k}1e5D^2me:jm\qfӷJa]6=ܩFgO 2Y)+ wWJ"@^# T0)DIz j$1}*TI6'@p:CĩQ"E.%pY-2뗨͟HԠ 2`F-c6#eRN쉿\1+P+onE&beYZ)j0eID˿ .*eoy:م~o164NsEtR.#B:g86Mlp=YlD̜(:6Mk?"C$*h!c<6Q%BAO/NçIˮ*S@# ҞX՜PA$"1_7Ot{Qn+?h@O?D1]!J3^~$p8B^w­ȓ;E٢J _⯐8, GX bɧ޼V+dʦa ;g^®jUX% fCGcٿ/9g-b= oNrkޞؾۙ=Ru#ElpҨV |5֥(c"/]7H{Q3FW/Sn݋twт 9[Is!bd]]z]?0-l)jqBo@s>/W$к}>(sjeRǫp)XTTXmc*L+Ca_0sPSwN;'9Z~wwA?;vD,_˻7lg:UpA10m++ӟp =(WWtxTL BFAE P؆U$0FDJS0y ؕ)N]u^}ܢ~ض0QSacL=gm1σ"d}j \iVp\n <^UJ\餬9 myms>H j^:r}g=Ԍ F0o.Y[4IJ+QseS^¡,*m?40~bD@K粡P:*;j'*+~p|K"x/W& !SJx![vt2Ns ?S'w ]^B@ 7k5eCU%cLVDE_`% HV^a!WRUp;oӃ~&[QѸmgka[qk;&xvbuI紬CP5mL#6LՊ6 J }_W{mKaʠiXn[1Ε쒢VwB2!^t3taP*"=ွ/t2ߠ<9E@)1P>:^<ͺzT?0.ԫ?RR9%-\]EG?c5nd^j|ꪮE񹍙ZհqX](Hf:'ueJopƅH36٢KV.A@~&hFOܙvK:uc܏%iᅩFЩ%U'v,嶁_'y_ q91H7qRT'PJfWd|Z{m8rh,{# E@\Wkُ隷pSѢr}Tp;dcgU}ͬ`*coS!x\77#=A"d뉴] q5%'h1\kk0Bb`}}IZig|')cXmX\_A8>Q`|"2iCօI!T3M%v[?y*05:F+j|%ݿp!%ࠏPc`cl]>pTR@3,bξ.͔R^))g~YƼ ~s!tqV+Oo0v)3~ 1wTx`iB8t8Q +rB-/ /m@_! W'mKfE~ht^{L]FÑ3W3 ˨*>9g҅˰Np%Ng|P uȈR.H˄HVLhJ" N 6u'aO(>qXK4'z)mu f/d$PUJ+&ӵL;F\̲݃L ;Y165)u<6 G~ndߨ8?JD{{(DF TPTeh 4v_d7+֧_EC0m Cx+9'l7{G+Lm@hB@'Xy=/҃7-Hy^ED7Vz4E0Oz8% {i#fiea&n ` gFH{tMkk%67|9}yzbJ T1LDNmN*ILKV^G Yɝc+7q + #n:7$}Eǽ)Vqw2&R%zڐ]AП N}&qeDYd }ۋ5nt:տyx#*ڗgyA5Yĕ#|n%zxwכ]7E ,?aM$ 0; uZT3;M)z!/i-@= J,_\M: |gv`iUj>M&4̠/E :L]&^27.(|0Q馾UaX$<  0o"GX=n[2BviȐZmxm<9gm2É@^E  ?<ݾs`Ϛ[lZ|)6MV*i [)/<$or2(L71Dyp8(++$o\XwO =rչq$F:սMeT:"Cd7m$Kh%@Qp 28/F9T>MsŬ9 h4MHin8@#h7`aNDnBjWH&D^ Nh# djx,_&e#M}zz}ܘDu7t% /4|[H6 TB<D @rĤm2|hWW;-9:*./"^{t7s2,AG=<׿={W r;HBcI"&ztF1KI KSɠ-2[Y`o*:lPǂ,g"rXL!I5uqhRVm@QYwl]>KNGeAn/?N˳@18`-йLmB"aM<05UF쬣j(+Q=;Gߎ:T^&"FwDd7CJxg!Z+ AczC hL9Diׯ0ߟ"|z!w0 TTf[|NHxYYsx0ئhSg:=BlYGJ{хw7bKUelWW4OgG;ƌ9dݸe GY>rh0/ yyJײRθƭ?*ΰTTY #rbey|Ν ]Z6@Ƽ&&~n;=*„;u?h\i4|77rSRr{ LKbÁKn`O1DpyX`4hA4yH7VHzE_rM,w '+$ZpWr o*9Jr!v0],@ /g<(( ",4K= 0i {vXG ,>*K JHk!.x~B+bؒҏo@"ȅ^$e hTҷG8XCDl)sߓ3g{rLn/,Cg3Ѣn2Kǻxh0 w]\bLM0a8j=-F%ԣ1d?Jg/F_JL dfu-^ zXw˴whz1_آ4Rn +pPAi5)bn8lM75;jyL%TJ:B/ /g2:RS="+?bY$#O$0zkJ8 &`j&61 I$bP& I-E3iR,7$o|Lq, ݾJʦ׽qble7t _5h8UXz:- Q$<ѐa<3=ied?L'b/63p?M&e l[pH=>:îl\3GDvz+5zRF2!@V|:_M")=91 lkN@Gh1Ec?Lq7'R!=nrsP/qaY9bLđID0<^|ZL-M:1LF\.K3–Dl]bfn&>NF4ADZyo K o@ jl@G-HH3[A|3TSbܾ e \ ȹtcKGB y9pU~9A8=Z~ ЅRM017 FT=!=kS)h? e*_zheV^8lJz'8mC6.m?31|#j8{ZM߹ejٲwԀPQ7sk2Ax+JbΗH [@&id5cLJd#T0jWIU? ow)QAsu!(9}(Hp1qM TM]C2 5XSS[ !DRy_WwdɡWDUē'N5ffĖu堚Rԣ j4;;E@@,"ǻۈeEj1•cz!~!D#>m|q2p0F!oml1c-]bM/VGxj:1gxq?MTMSuؤXJ_QC`[!nVmi=ٙ>c EWe#iVC*Pwإk$S NYf/E G`~|AiOV[j2с 7z){ QjX~-L(%`~.G4>Sw͓9 ve ,)u%ܰpbdmXoF.4s[2N1'=5w9kx߉I$]v#~{$uduE!FчRQve+tN*,8Atەw"O+*v5deW gX,jI=%ar(_Ij1. Q&x/mk0n Y+;⬲rִR0F TWN5FLGnDm>FݙjkY *7+uL LQG}ͼ@{Z/S-x g܁An nA"yjK.4<Ϸ˻]zVǀۀm.PgU7- %) m4/ڜ{Y17݂KLd`<'Gm ZͿχ flC9}̧NJHSޛGgR7)wu6/MHAr :yO}R'՞+ȘGGq0El$} ,eI7Iޭ7*ZB=Z}Sv~gԌغ[:iqĚA BK"$]_TPldn+[ v2Aθ>ud*, 91|"{#J`qU/] fC 3:nRݟDp6xԣbɌd^S^0Vv (0,EJʗ"}%lQ4b؎.FŃ<]aG.7g\okR/z N^{1]+GSؘ\&z40Dkj[oRYgSn?cؼ /aW zYܦh62Af\$u]S6*8EkHSa*~L_1Y'Bi+ڇt&ÆXu>C#O 'p[O.SD@4U҇KD{<<>ԚgW*sgGC{4N)6A|ط{n2nO y|uظ0Fz=wKat:{p}^侼M3Ba;AWs76n_D2OZ DAsb,URkz}җC>Fa! }gx׌rr3 )FPk25y"h[7"f]mTgfz&ϼa3}ZI/FH) `T+j9>Â&"PR&&7Dg bR.NF(iDc>),(#=5z?!-~Z%!`,Tx9PzwiDvT>zL۬!htGkDE_LA,|c2 v1leãgt\=/j x5u@SgĞ;ۗOsDQoOoq'; \*x0`*ـ̴P:nLNf?Ǫv"!T s(/|2p@TśKK&m9ەޣBݭ42hf!cj0weU7 [xgcwK Ցa<(z`a TiWV*:џmfNh1FB<]ŔVvPA;Հ+[:1YCldD'U5L>thS;+JiZ>l,S{8.Ξ$DMb9>7w4Cs*|JE+*F!_3'i08a|CatrS1n p܌kT3$? Y̒/G%*gk$o(L~P8lK]ـ7^nsbⳳBذ4S>+|*^ar|Bn Jin1h`Zځr.5${*(̣=0+*8?&7Ϳ=[=%0_u7?v>+v:IvPhq^KtRW1VNh^uځ1jGrhF|⏀Ds{ETC i4WJv<0?@su*_wN\r"Tݝ-B!"h4 ß|ڼԷߠEw-eӟ6ʥ=wop95|ȐcÏȣ bo:Ԩ2VS̤l˸@_*06%)a?d/1 u+S9]I?9aуCN >,jI/iH{dNƞX~(D6.18PXS28` NI]w.=F 3@" }$V h{R1X@[]8==sF*!;=&M ^upzM*w 8VnӵBo@,Σ(HW'2alau.)rxaΝCl1 t7dW6Kjm" :?釅^JV>W67V=v`U `ÖޕAePչDO,qqV`|h-)DĬERr}R(ηdSvGl wvgiɳHaŤ WDu3޼tssXg:ņ^/X2cvYx JB]g=";tN?,.@L7^zn‹*4@/4PnTȗW[}m 8wP 2-apI,_-7l!Kĸ.m&c"L}wDcߛҪGΣh֠^]R%*+<3d~99ZZ?_}6PL}XaHd2 DKz.t,'1HbBgDشk{oekӎ˃6b|DY032g,ڈƔD>YTh5Td\t8o$nNہ/8&m2-+`ã }k 5j/X'fq?! rGR LhYމP󷙖T GVJi1-l_d8/8#_u* =8E;? 뵙1w5 Sm}yAEFobwJ>eSxu_PҢQpzj[9**oy"(Ru;`fv J~U$d1J:& &0wHv]NcOI 1;m1={vS@k1Rp;8сy?̠謿an.fgđ'{-Pl`3?/~Fo-iY@tjL`ѼqJ.m,{_K =4 <ތfX&G. -c]^7+|f8Hލ)Ap2/I= k7`̬in jQ-/^m/kؿ{k֞:z툱?RϞ 1m?lV83}p ^(΢V|,(Ć򧍥|(Kl1b=SesHBM~ؘn`tw] HY~^pŷ[0tC{Sf`"{21yI{&̜°n6+ &^!c0CE1%##QڹtPEO3\ۊ}b3L8eEFzQ#Mݰ@q1ʂQ{4ro&+y>p/\ [i z$I^F2>co=B:l)܅t׾20ј,~Khմe cuM::nzfG0rf3Kj*f"wF]w[05m<4o ukK_C0.m۶m۶m۶m۶m۶;7TTetWzh57.ֽu_o {CEVQ;[-<7_9KqxuXC4|অl| ٟ c َ~\Ct+^ oGBD($ԥqW[˥~w{a ] wc%{* ")yӤKzNlZ ѻf\ȎL2nu#GI'P]-(H3Y 1mm]_e]Z7;2?0Wز}ŜYs/Uq7*!ǢJ:o&S$$} 8dUMݼZ8oa!V]{"C .W)=GY -77&GIHwE&1JC s%3~*=# )<Œ( e#ޗ;CL TcP q+Ne2^"bH묾O:_tDڥyk^B!ll (?,eFz܄Z3:Z1jxznl#~c`ԋ=ϋc*6&MwJw* n_].`E0?RA4tbHLC%A<&bpB[g"i?:8浊 ׀y8lǛ }P4sءZ1gA|Ul5߬x聥l<<8(tոbf2i= ?ټӦXp] $ɤm<оk|g3ATԅINV5`F0\SD,O5֓8KcJYB-fCX漱٥Ml9]]>#r\zN7VtRϓ rc?ffJlNZ󿒃쟈'3d bdQ ae"}*53dS> 0+#OK;+@;mz/&fvt l};n%paص3->%O*`%ؐ(֨eט=9r͍I%Z8_Y?`:t ,7q :ŇϻTqdN$,^`􄼈l`2E 7̻)ѱٜ_pr , a*a :1['E %\1A`x:toAD ,a@ s{Z ?fU 8X5 8t[%٢_λm - u~8 Nw>a/wӂr?yT~-2ޫf? ~i7Ie3SqPb'^颜_]xhLFG|IY.W[`@ɞ*eU:ibiK6K5 /s-+ڴW)a L[(4{؛li9ӉS" +!h'dH!5 q krq"3`\l?֢lȿ\DM&MDb[Q0{m4C à$U۫K:Z8BW0ῖ"'Q":5`RG_"4 |Kr6CzgH)bvB9Ɋ‚ NH[!JYA*LWʹ&W~?ROH 7m@ӉZ3KI lo%[m&m6.D}﹒&4Cbqt8MuSr1Wc4ɪA"Ic~WޗiBn[\?26)PI)5mQhȢՏ 5\1jO)z ε/4~DNd^kx- Uo-B ے6gBȥuW!ʂؠrt9?h RVmIqN6N~Ӏn5{29I5<4@As%ӽIDPLefX \֜Ʀ05"0++^xJQc}S]OTh:=_AB*4 /Q*Ŗ7TC eƨyGX zl\ 0԰b[5tjD@tffFL9"f͆5=*1?\#hTioQTwgPz׬!\.8ڪFei쟩aLz3v[+蛌c\lgP MCJ鳙LEמlĒǮ]Y\I5E Գ<& f3,QHhw/^T(zae/0nA9|%JBեOAP Jfӻ48 ~0.?aZ_@TnY Cg& Ĕ&snBv.;G뻇 ~rNA[~b]_L9ЀRIa;6G?lSWJ >.9+^IaP)Y{MD!>I$D|Di!uܢS}`t0łfD:#W.|mÔ&IυN=v`qj lk) [SnAX`_:~lCGpQxx|H{=ŲKd0VXU]lUXU_ K(v*-=J=_).X ۩'޻C~P CT^ qو&B''R;tZY0Z.$P==GbV=r+t 3%#CYWHSjw %cƴc>q(H|;nIkmƾJ.{Mm>PZ]e%}><26kh%Lѓţq%nC5aPDY{sxx,?.sK@|44jIzuJ pN n<q ڔ:duCvLɣ*sw Gi ,2GFZfxFCOe #[H4!L @ I/sTO@&)l>Qµ6NT*5Z@~p*hy`{?:AU‹[tӂ ̨? sZyQC2V9Hm8Jق;GS,{76yUn!7a#}M)ߤe12X}FEU~d˽Ak&dc:׻ST;⦸7|(2D~|.M-1d iwKϟz@Z( )80P}(K>A2ի[$zcrlWRaa\fBC: e|,~7k*3ۃ[:V35f 9!q0Z}ioN=Pc 2MX Q'9r5$Rʈ",D܂vB/\cU v ˫AS Pxj7? >笀a_% u޿lPi^ o $mt`G YtnK2fB~VWӅ2u,rF(zu40sе)47C{+H|%ôx%ձ IoΡKOVW^*(6-\q\7_U?(S'HD; p .Q@r܉k&;f,r9+QNF:3ֺ6)k`\;JKFYB\B$?o絙oyVGrM <^~byi¹bIgtќ2_?N,~Z"ڋho`/V-b8C2peܧZUKUT[z x Gbi6Z9uXK'W">%@r -Ê' mf$tS[gGB0T0qWN?dlPL P3eGw0b +ИQs=b&'?̈pT!я]TbΑlQ`omp3; qӞt]*q9uq6wv93 ,T2}vf$=Yv(DSvy.uF 0(= [>8;ԥ*ΟudNb; !f###>O 2١%Y'G#<)l^6|8jݵ96DJds:*-vPfi$s*$y$$j0b ǽ\sev]n5/e\@=>v13D9Iaok&GrȔVVvǒ6 Ċ;jTZumw`\6_wс!ܐ=G*;dƮ#!$q-;npbb1-\Ł>k\k8+цago$g\Pɦ{3_BfgYFoT ڴG X|:m^څ/5d*)So|y~(yR$hkfB?'1dQcT4rL s?1|CL̗kw*+&pu]7sJy/nh/.poJ 3@޵B6]|z՘.%z ?6$e3cwnSeț1LːSBRK!ۭ5VbznXN ;fVflRСHQ0׳DJPj ~d~;G- |n^Ws-(q,gE 'V[T 7;n؂0e6BA=Xe\CSGvU39BYBlRn'P#CuUrK#?ܱGꎜ@U*.ip] SZuV)EZ:{A>l2)pmyQ"ug!҈$kfޥgl6_G{-mLl^)E ⑅ ԙcmE(9 xxH[qˑ*RyތH}Ò++V~Uj+y)%L:"emsp.Uԃ  [Ns^=[x1B4ٜ`2 aSBhBq'!dּiVV9Y#rO@ǤgXe tf\ٟљ&"Q õ@Pۜ">Ql/@\uo]EIڭVL FLVLl빉ʶh-Uf) 77~b" lD,_~"A+>Ȟ\%_}܏Ji4F,AmWu`Ω[-]ٮѸa:x |'>ԉ([ɯ4}Ā@Y% KJ 9PAfZ6rߟȒ5r5b96Q,/Z+^|6ؿݪ\d~Y€ ̚]tnhRL}B,4zozI$r}z<4E>KZ ߽Y޾g|5ot:`dhoQTwvͿdq.x$u!)i z.?)!6A)Sҟ5 ?ԎL-kWB:E83"8ARMtR>0  ʏxJ?SQ\N{"aA6n^ PP= 6(*8oqfT=|ʹ]]F58%?\Xq"q3Og nty Sg2(v /e ~XMh3`S(ϸRI.?y^`5qV9@,CUO2 'ٞȴtPE)$ 6aC}~^3Uj꾉WW᥈ kϞ=V>|,48s ؓK_hEnZCRRݢ NXYXjEx4< b3)sy M-C& @Wy:ne Y=I/D?.(@o"k-!>Qg ARxxxۦ2DR&[!RS{RqZ9LJP_8|3!Oos,Wގv MF?ecY)Su=8\Ȱ8'E.ya4FGMROnӣ$-(PcWF(*^dSWHe) pKݞ o~fEH餹]m= !z!,=9LC3 _R Bfb:_OZBAw5xO8U;ql/Mu< a7ɂ}tLhh2\ `#_tQH঍G9; TdIe K|JUobYl| >:fũ(ꀳj`$shJVLdU*=]G)u+2̘,h1Mgm#  B e-qF ;P@W?+'e 1Cg<3!Lt,[Y 9NeK^gdj p@^ntq-l`bҟ"ei@G2WR_Ĝx 0KjAmp[3{ʈ2nJ u|0 pW%gr4KDw TƐ2.M ·3oV(yAbw%iנ2`x03&P9B+i'|24ȍ~x /uGF*ҭ u :(q1)SaP;S[%M-`9A/1xF%uH xT:–-Oq!d]Z q[%_+EXrw(EڌLQkt>9v8cZKDXQj?2Z?B| 7*aM`L>."%5wh&hJ6=hUB[]m>@k%f)HPƹ8s ffG)=rwXH4V~߾ER{Lm׎rvJ(Ee- Ϸc(n+6f rj9\p[5 t7ҕrJXE/ZX{D&`mć=C +iG&<˪3\JBt-NΆ3h FKQ簦>*^z|xs]lΖgqn3\]Mq^$9΋LٰJ gxӋ (2#/}qV߆+tauadp$Zvakߘ>K;o$(#^WC! #P2 mBaδD PRC h/5+vYl)4;Cv \ ޑF&Q~fOyRT_,S>^܌:RUl8|D"#k@p"|{{qksz¿IԔ?`7`e\(8CKGN}ΩM TRT?EtzSocURb2wg?7Tkg !6Uag4fVRR0? ?:oQGGR bv5~1N+9bޤr&d/=5Dq9lhVtg_n&"+QgżOb΁n)wʟL, ;52eHˤYhȆxӍ'ޝ-hsklT)#UR3vΗ΄cH(+RpFwyN('N*% S@Ź^ j" |kh!R4IU\_/dq)^e3 =G5lo(@i͛ O!d6ԋEhq̯}OR )Gou=fX.JCC>:OħvwbzL$#:\; Zg p}̨zXغusVa -xwT= 񅌟LI- X#~z2n D.#Tf@RG0RʀF]wpĚD[X[{fS30.g#|Ntm$z5vP-'١~粑i0X/':MT+ⳁ%"_+Qu|X8 D\6cB .86$3pEw64_5&?.>+uZh)Gtmx#%).,O K$N4<űtץok=+a:Ōbx1_{kؾ$4 z /@>;%HT v.F/DoŠ;tF6QϩƓic;EOXM)kź{1ɋY~ᨉo:F@j XC Qm t-YWq3 A $iM$뿸9ə`q'oUMNzm AP2\fMQG/sPչ419vR:C%CISa6$-/ZM|BhI̦¿Vl 4wIMaiEkloXw:II[+QF\^l(8-bCU;'?bjш,画;2K@ju줚~zO%G<3Zc5A`"J ܉{лnꂃ6cB@1hɉCejߢN P5QO7EA~6&#l$**5DjGynU͉P[#\G̜Pb#LP#`bmqb> ^c,\ 2m#,Ӭ1(wB[")c4ZVRq(ړ[1ZlO_ 57ʍBd d*4OiE|= a ΍q$?WpLv3Leu7S!Pֽ9ObgGxPۣrBnIwS ק6yu}_3ww/YNrUj1a[/W !r?LLhP0KSc#3*ع 6r׫xvGtBpޕxMAZ ±[=oiCmXc:?!FT(bް11Q,_v%W:3#"bb/mfP3mOB,w&Zz-S^GQЭnQ ܙM,5hDWb|F=G Lfg˿xZIRg_zPC\~ WZWNcbC+eĤ FI!fvsT.X ìMlȗAүhkU'hh> px<K˂(S.0>4&nS] tnQ0ʾ=n_;7{ivґb(-v+/tzQ<$CuCK2Y/:ʛEi (s6pPO)ġS_UXّDHODf5- 2 u` j kR$`' bΩyT p a:iǷF\ea0ec -d%lLetC+.шjx߆U@<}WmC YQ5Eb3& Ӵ(vakKn<؆GE̵%h1@|LK 'rFצ[d*j5]S,&$bqKq~){P׫ |yc=7 ;8IA:*aE906O8ݎ*ULHHl8"n5ԡrw`Y밺$(0"?hJ+,P\߱ Z ^`ѵ*FFtrA@'qZ M[bt,dӻ ͞Lg4gWi=*|tPCLddŢmϕR=PWOP<ZA*h=5glBj#1`l,:;t;'8Y_RWR 0CL*I`i̓+b*db~(I<}R %؇yK6ԥ\өm^?ukGs R/ uF?Lq14Υ4TeE_>/xY<u6^fD /kن]]6j,CM6<R4ZPkPqAwnB@j|F)X1aVx;tC&Pv2cXLs{Wc40y(/@ ιz+pm{xAM;oޒa wQK/YE$@;O><iYZZ87Sx.S/KgcFkjU,^oUl[rhf Q~! MR,oH +/[oadnѻ@C{EnQbӜB-*{ëJ ﱣo Y`n?y"5rl rSiuGK)0\l7FZ p<Q$~+<.uԛC c=/vvGoH3vp&0uy5 UC1yFDG }-?Jx8qL*oɒ&RTLlEY]8L<_+}:80NY 0mqtཔ.D쫟ޏ-#kH"x#Ceۣ|Տ0RlZ_9/! RYSc?}50|8Mh"꿀R_{D. -[E_,(]J!Z岭x "F3 6.S$8)xmw6J1w|TK Șo>ځLF,1ayVWBI>lwplWvыXh9Sܓ~6m?T ,mg4?*_>] 0 d=nBljj06tl>ْpr[inХl>u4xD4\`ق|wE{1+oކ=d"H[IܷE?h=p7c#vc3~N">Ma|*7Ot<d3{bڨScHS-W()^sT9F-썡ϿbIdicA^f3Et1>m.OlnPWELm'4hv_NmGHrc7Yab5RrW vr*F0]jA|kp9^60La7?^SqYWu_*—ky!iGH_xGykp#x;']~il7*[Rm_jL`tޖ^OE^}trhkP'|pKӖ:h@^#L(AЊ6w'd7no>l_Cd$oV>$$T#?j =%'5B߂\)!7r>dhnBdE5mJ[o`C|9_xXv@t`:hP.,ÁH :_(r/oQ:Pu$L K6CT!hvV"{aQ,ڏswF@QW:78؂a|~Z4Sхyns᳿b~o ~IzM񱕳c``h:ʙ[[ZBb#;M Zszv 󫒿sq䈄l"b=MRQdMQ8r8ss2:xn:uDj= >Wٓȭ%cVyIvٕ֛aq^fSU6 X,+eDѩ6a+VOkg^w;z-|OFp^b^\^"- >&txfI2 xc,0A%Sޜ"7ך&Î>;;ɜ9i=~}=_es&ٹXϲT1}ɥ5;B$:B\8zWxE3QgðZa)} '[p}-c+_[d{]lHQ 3nVɻPXjNĶLLaKٙuJ7KeCTF~k=>#JS`[Ynѵ$nU}+li[1þ޺s=v_ưR L) gmLܸ@v?RF|ϠSWk`,mG˛kM9Z\ FD7}SNU @Tr׆׭I.90* X`ss6~$x5Y/;%s_Oә?PK%TaT2 `NYQ)i/'xag:>qJ]R8ڊ.Y|!M,IG۲{:#eu8Ff]+}Y>U8hۖڂQqȼU 0ChMBWaZ咛> ǟߘoEB\qxsUЪUw=xTA4@F^8DҎ? e,5E$#\Dd9r`'"}Iܟ/㌰6b:\X=2?6):T̍z",5wqׁ2>u&65cIdBԾ" UOnl |p]gyЀąZ#zଞ(y.Pi3h-|'PpG0PTH߷>h.(&)$Jkm)\^x*ͪ@by Vnvʦҗ-V&R0Rt'S<64Hb!cw_!v#fv,־"t˛4&BZw_rSEדlKG;A@>t3<-`8馄%q!!q<"0,9:3@K6}Ꝑde-[<K7\9ҜHF-G PX/ߓLZRa2=w8o/-/B6Q98scq bx حOqV:@qYT?L.ئs=͎Ċ4Of`ܘ]2U Tа4Z4Q:;aZ>.JVߔoh:VT!C ~It5BAN3=]PIѵ = 5)4BTYYN%rM})۬+bl+ؿf5v`q`Ca[V*r NDZI]?t?pC4tS4w ^W$9 ,!.O ~Jx]h^!;\B6m%Yd=hds!UKMVE/U/8 )dpEa[v@.Q B}l I<&#B3 E 39PkRE2 QsgPNq$rSP/BI*0Zx:R,s?T#֬c&3VzCAƩTtNjV6FUCKZkEK̛J$ri+UZ)Wn)zŴ/_XL]LTnV/9wysK a < |GSk(LSKZWm1a#`.(UI4)Ar6זsGZ^{jbGn) VY{"ۃA1KOO(r=HAwB2N>VӍc3%%=bT@,Q^o[zs۲MA-5LWgc5$pR][4=˨adK\ 3u9ljr?^J^n{N]0,IS cd= 5'zwl06xO18ȥvb:V@E,WYO/d?VPɁCߋ]\SF4YOg4{7=Bu{=iO_qqV%5%qX ^aֻZa7a_u͡ȃY<!I9' /aRIYF6UOЕ&UQ4#{4:{tPO~u~N2*G򃿣߆WG&M{A*d˜ɑ z*i/dܣ5 =OqÙSHl¦ճ.:nyHG3{mJ:r%{&w]x6нg@"Q{$9JުXF;|׳B}o{QTC}xO7(&e1 R!~;tpyj ܔGd(BX Q JՃ)9Y3OZ8OSF?}wSڔ*7|huoXVPn~nl6t ~;Y$bKnR!`mW5dďT=$bEjP0SMAj[uP3񮽑mI\ 0,bRPy)C!QVsNN#=JrNFftȸO"%¹]5wuaʻMzwash͛?/,?n@DDgߐ.H@(L]18]lv`V RevԱtSbyf'{ ׭ E uSN-K3F䇈- Ԉxg3lL47PMUvmf/x L{&\x~́5kL_+.cM MHWtu] xn 1%0F+#et[ ^NЁ(cNawPT `VoM?:G:'Sv# %7(pُ=$D#t]K&;~I1|8xD8v.08_(>hcp?K[`R{(3w8E.R<s4}E#K ĝڷXeS 6'Vu*LL"1U'o^p6%AnEb{el[Y%2GӼ}4&"Hm3ɹG; J#Գ P=4W{3Ř*M}@ 9_D5Zh_Q"!!?>:o#sz֦שPNt{b#6F|ПREfЫ͇n9ܶU0{biSvM!y{L4OГ4:)^S^xxb]e>ͣdw(?e!513ݝ=<:ح /x$U"M%N0 HGO{1λ$\~#h}G";D51^*kTVXc5Q܅t4v9u7b 'RUya&%(^}Jp5ըa.$E˒Rv>Dc,5\y )uj(P/9) <6)˜щ16j3<e{R8eTopnLFt?VP8fL)N`,,9ǀ.g?OP5b0fQDW0TNAe؄r杅E6FG S4-"¢tҡ9-99B2{k}otw|kQY , @LzjBtւga|H \J/ c7;"ʢ捓'_`sBUh0l@r5H:=k:ɯ!֍ "l3ۤ2l+{+G  PYN ʺ+ƘIXR\Ak=vcs)} 2Y fsy.:;g yKsK@dLF|![X0>r1 >WNIR>Y!ڿ2^Fσ1Wﻣ?*D~s$ ɀXˆXt5Cq`K,o"U?|-&b1Xؠb|p6~n t L[7Af q7>if (M@J;C8F >j[hqЩg^+5\x;;BE :B)nab@U[tUtuC4Mvon0"nE1fsi~4$NJK 6BmwiS{[U-`SFOZH PiszQhS*dWmiwss@?5^©jw\,'97<66< #[[22MS"+E(l(HaHTmNi|I6VyzN1K )rkIzsPq,ȇ)-FF-/5.Jk +&\%x;ODa8/5tQxV$CG Jvmpv]W àVV`L,Ql vn~)R+(/وᐑ2qQ24w270N3Ft06X ϊaax\xGQT5Q&դs&d}}6߻-+@No1w HR,鈨Nj a\O:h DV.:Y\G"1%-+ 2]A$U=[OmyDDH(ɽ@biH:'X+cZt)Օ(Q?P糓K;Oa =yu;%A=uRnE).[GMAm]?u\Aћy#ښ׻M}OsyeLj,xM8 6M06HL _9Th*#T^|OqK30#ˤg0iqgX{ &^5K2؏L6D2@IAA0m3*36DHRzVTF%Ah=LK\r$뫊b>p`DB>"уJŽ9ˊSiGd& ԵiΫYz8\WJ/m p2Jg ݃ &@t]`($ ۅ~6ٟ֏C޲P'b䂈ͣxir̶Hi}BbL*w(y9 %Y+Cu+= fϫ=qJh"cT cckQb@{( lC|!~hLoa%+^ךyCz[ O+:#(LE= LV:H?C ä4ݠ\],))ÛNeo 0TXc:NMr!*v^ڔ7 X v֑s6 Ciaf 57uۖʷ"}i]v&0n0W{>KeB̫d.X@${4v Z GNư)6CHQ Ћ #!/`7bZ0xZ@# d>eiǁ+(DoVk!b@'s T۱d"@3hCzGq=iiI_V dL*Զ{ܿ|t1WJUq!"Z.^ntQSd-2ZǨXkRa"k?*Rr}b }K@l)l 7 ac|?Y$Yw*u##9F0eH6M#Vgܒe#'\PXȘ6}kۥ1P S sōuEJf K3VT؟!K+l6`*!IsrkLLK i̺Z;Xy~YC3mt5%BhY\76|S/=xd)B9R?$Csgz7hDi/X__ꅫ#֯W?1fbGk-7x/D2یDciUں* %nRxlhOG Y\In5ߌs\ Uz7/fLlj!Cju%=d]x|l͘{KRz;%G$PokJ1dgd#{a[ ms:?GC bɲ:`cu.ISޓ{E7Tlj$qD0!RsأXzyz%ɊOL64#xٰL:{\B7v|rChdd bDLg'޿XMnn-%əع {*` ̾w/ݣg?3_P(-2w$ tksrɖ/sC:a"_ qy;̺wpG{Y:JI~`&xI}n1ُ'&h۫"MqE, s{TuC9h|.DY"?U mZT<^_H;}+ Nz@ 6]Ȳ7J x-#2ފר)ۡ>l.zP4q1y8ZD +#ybm[:'3{ X)D촏c/e{w \-3x1jSm$Vz5hw90X21>!Nd;1!]~Sx¨bͲ'a/ufw*qb+-.8MSMv_d}Ӝۏ t/Wif_h@gIP0?ٟs64$[r=맸r>Y~HO9U"NJa5vK]YRCqMw Łj!;L.eP"3(G8<Ɨp3Ib@#??ZdO%ӟތyQh"@*.aPB=47[Is#PZTnb\u;Vwp&i)*|iɠK^.Lb@˜rҗ~ʫ{jXMޑ2"*ܬ-crG <) JxhP"wAkG}s4os nAŭ=m@b1Bk}6tF+;ȏ()$»E,ꡖ,棝}F5zTP|}QHVQ ܃J?$>'uڨAuF,qЅì$Ûu|9LR'PJubh'N8 0WC-{iL݊N+nM'ܯl n-3qeC`oT섄8hM=| $fonJ *I!$Rdgmy-КMo \=L A,oM y)෨Zzҩ}; 'e#$ nabm+ecΙ;xa{Ǵ<_1 caM1eo wgcGhS-S;Iv,,؁dy?sG:+( Tb ƿTߦZăr'X&)W@nͩCb?Hr]4q,.Z0bL\jD4|i(k17kR> `j,4/M8avHWt=,"ݬkKs厓#i)#J}ccœ¸luЊҹpX.zzKÇLŋ"T#bH' T_vĀ)t Q#}XJ7Qg]=ߌ `8@(OwpϿ4` c9ܰLTUۏzhR{\>_R%ZGQV/IShuWٞ!FoEܣ_ms$dA 7bU g(Uߘr)|$sͯ4\58''*22y3Z GMOa1[ &ʞxW8V/)r7]6\#N "4`J{nu)e c輢A>-{v_ 7OBulh@m%SP^]5C&JvLW&MJkiR9VILNԳܼ83«4+$50$2>` -Mj 8܅lJ?Cɰ~E"D !Q!(@a& 3biJxF3-kRTVҡ2IiÝ ~r1$p Ǟwy$ `> Sc=ja7Jג]|x)Y>#X="o&Rrf.dC*W/ ﳙ$e_X˔>>FycqB#oGjRU thKr(g?ȅ E{˳|KK0SFg;={1I>$h^ipc?%UoɶY 9̐X2rwC$)%2JN?Xx[5EhY4tG=L톪^Og꾋*j>QPϑj\5YM.1{Ժ7IR:`ĸ  -:.57k8x*q3T b]!a,,fip/qk ? Nʙd 躟%'ڥu1[( HS)ܔv*;±ۊ?Sb0+";~X+}\f/ۇghow>gZ5l Zg+x84% MR… yQ8g~ZZ0=e{BecQ:uZd\s#(o [$pXT|ыu,0 A}zVatXkZS6ؘ6 OS[Xۘ -Yl˜2IU_= S Ҋnm+l)e&/~֣pP V~`7z$-)PXbqiw /.+;BZkun;7 Ӂz%|{r݇B,C {D m_=l,895ByHb'>u9$7N/LO~xfdo|Ŭj *vzcwfĊL &]IJ`Mf؃3$ī݇$̅IR;?ah+KTY߁ܿE{!";!ǃ{?Y0Khox*v!ʕ˽/[/-϶Itͤ2ݤn';DkuH=]pRr)+ 36Hc$@Z>DC."fC4v(6 ltB,{Gz<E90q1j(qnFm.#W=;4}j;Bڏ)"Z@+S qU_@#Icai] @cf]8C å"I ո[ XlXdϤ˟R>(]3/wq.Ӏ6x8gp?-L Ce+}ZэdgȖ`cAqGmk\ptV\z]=mv(RgH8=f&6ɴt5 q' 81)/3ۯv?JNX7T0d^gdY(I-# _’ ykI@ky>AP 2#iC2+ TyQae*9*GlL3ׯLv7VKNUC *|75]Ex<㈪y8:> 1m ՝,ir.'.VwKljDd(j)%ׁDLB=iWiXQ~ݵTn# /nm#s̞$ܑޥz Ύӫ$P ܟJjHhTH޿*"MA!/3!f7Xԇ r. ~Y_CꖊPnGt8zG+)!4Q}X Q+'feO6yd" '*dT5X[iY[)sȰZ9 *z1XY7bOqV{ ︾dn{CMv<(}l̆k.u`7u\1Q}dSLj *neȀ_~ia腼LSQR5(7Ml{!,IL>U!4S|BsX`OogϠDr-?'Q?a8ƭdGO"4?ř<o1ɷ :I_w ?~t쎹/cPk wJ&[%XX2(Fp5ʯUڊcuh;֑'ssg^ڢ 1k-8B&c-CؗL/B`ٺh,+{5W T<D>g \خ-^]¬R򽀼2ϚNE /tbU}]AFh$Qq86ofyAT5:+c]Ć"X ]&q#yrh7o/5$nN@_BD*W}*1M6m]`ze ~e`R˞:} \-i;q T0[ǵ_fTܼ"vOga_TE?,O_ᖘ쟲%4BؚS[C;.i![DTQ3+\*Vr%gQ;E{V>fy=]'!ɕ/c9 1Lq '= V]IZmN"Kv1Wr)vRmglWE*/0ID9(+*f]1ڞG+@*x8Zn0qLvAXkKU uw+ vgʍw<S )>'LbgMm5e+gU@{'I0J\Q)D0 SŊ^|O8b3g y `Er|sܙi%Bj1:tRAJ)Y7V .6 a¤ēk=nIMM]xRڒj-G2DN%uO2 ݂c:5-ʋƏ0aumctx*#fG *<_,iQV?ւ@P"T6lOgjelrI C␘4tfIZF>w-Wdw§UЉT"B{iejvo$Z+vщfMeJoXʀ熣5 5dh [v!qc~SoA:h$f7zB2PjOw0IQI)W:ϊmR~7KTiM㳴UJ)/$le_B, RuQ)lndeү$5ZaFYR4R:g#?A eoGC?L-z_ԗ-Uĺ'y1azZte Lu֯ʒbP}.Ǵmz|(؝PR#2DX&@&_ '07?K\%ae4s Ze:Pw)ma{UL6P4X-sOsf()`^W.%&~ക'2gYo]:ݛUbm SX XH ec ڬQm۶m۶m)۶m۶mvwt::LzW^8ȎԞ~;uO6  {Y^? rvBJ}(<ł&e b2\mMO13u7{\4e3dFP<՜$YZ11Su#|'Å4%?VE9+^|Zk<mhxyO}nőE=^H]-}rҴ?-wbG;`b)&_춞<6{;3Q0Aily#x!X;1nRnMi %$P\$6cƏpp4/;)L7xD|y'4/u0ĂjV 6El~RA.Z=s;\̿cj i>ydk!G 5x(NgT/t aή%/QI|wL7K87e}LͰv -?IMbDjJG&e7⸶(Db:j Zw]#TqnD$KWǜЖ%^j}*DNJÜ$YQ3̨+D b&0i/L33Q׿? w(4G:z@n'W5-1yWqrRټGCܮ|]}M+x^`JAuPp1#A밧 r@iN:sz 4mltƦ_Y;| @ r31/0$`%(=7,.OG&&cė@7kЙv*QXa">'pa`z}F Oזrq)N,ٰq2ac𖜡uEj8|JQ`r lrw j Pk-UBU 77V}lO}<헯|@VDPC`LͅuXco}J[ ˏ\}Q7Xo?>tL)"vs.ֻ̈,IUͤuH#e6> `#-M "QvZSäe\/agfHAE֣o{sJ5v#ݵbޥjuLErS,PWLRzŵi xؚjbG3"/T@]M_zL80N>6Av^!SnNľ#:X\ M~" WBki.HE5ϖag fx =&{Jk2GB-"3G$ n񗬴qrdy05GcAS j%^K?9rz8PYŠ/}xrAG:ڰ@zOz0?h"~0;K#r UZ\Ζu%s̬`Aq~W: ㆥX . ?@QvTBֹzuDxK>cLzhwAO&?|b #nspg^NQ~㞔uQ#A"MFTΰ}[xa0 _e6άt$*lsR9 t{_ Sn:q5S lk+n҄"G91j |ZH#Z0/KQJ¾w/ci1 %p#WeU\pLwoQ)juƻXT8ڶP`%c[']!; :&n1 cX*Aftn`*c|BM `s<rxjv_xeN&td~n nZ4QLH;g9uZBb俣ݕQG? M<%cjsW VxTOP[UוK;2x Tz+@&u 7NAϮ^;&aݰNJK&'UDžM@3{mr1;U`4JCY\QӇXHSXp.XiA ..}gqtU *WŰ!?! چ6FTy;Q ުL(f岃,`x^dޒ z΁AΛ[Ng  /XȨkj8{l/T" 8a[(#"ڛ\@-~Lu:W6xC\,To6s(fG%O8C:NU,MP+S%t S}m$ԍnl$1Y8?hF + Y!ۙ][^i,7)+oO덫ؙbۓ.j qRDP^FSѬwQԮ̦Ȅ9vk)JnisQG1_^Lql.9! ܯ.(7 Ε :I&\SN(uKp jg"k9D_X,9x·vTS1=6l 549c6R+\D}mz<3U*j)kd#6G'&%xɺXyUM;_u+/(p $r'*PQF /Tw>i6]\ke5h-d^p)#FXnSc/8:Ydo"DiRю,uDTxyJ3)>3Ưț]I4v`zǸ] JJezs kШĞŶo+ԥ⌬OnâуSAiz^G9m4cVITDqDɣ*pD>(Ij/2r,Z/S)Xn%{M/2GA`FY[L Ȯ`T7' Y7hKƿ_ՈDs , ,jDSݏxV@V"H\ּڀnu[AÆ\sUnvu{Y¾U0_4NM紎>G"~&pmS >H^ΰ!%C @TI%%k !4TF9OE7 ]oV-Wxu)`a:*fB"4ϵ,o@t" ?5fc%|5g3oį,Կu\紐M߿:CjBNm*B?NCQ\AD^V {5D[b+Nz ! ץĿaC%.4AL UYyo1`$כq R($Wn(Lǚ' ֚>$g-mWF9 ƽjhjƛC`"w#3qiDI8:P\QXP%fB z7ad NupF(U@ۛxpc^:L/܍nznTnG3og#DVO TzU>_rF:궅z\DM`^(;zߓH~E2}2 _^oZ"WVܘNkԥ$%J[a: {аowRib &l䕜sU׻wH: ꄱ}M 뗬کLL!c tRe 9/JpXpbI żԲP0c5@xD>W#GڰG!몦MhnXV~/6@ТBy.2~̆ǟcPN7=44.zXYaJQ[n'JfR2]_ *1I+uU OC_ݍ 5/0vd⫪TuPk}@ _GIEB}cVHuh\0J?I{NޖXw*- u _ fA1 O 1+c[Uc0rö*7 9d]KxvFwS\}vdac-fY~ g D7%& ;fƦAuY65k%mND'NGD0t,{N:|h)&mG`MʲItTadhɓ@ͻ&t:RS'-Dh I4r{- n pC=kk6׈.a?lzCdX,*іwø}dá賗o³Rs/PSkThӊ>[rJOCH{ͱ}/qE^R=8i@fʕ|q4T6Ҙ.fɭ]gKJiS;\uv7-/cP '7zWэ[[h5HqHDnf<8gBϿgͭtBfxm6D)Zh5&=\ߌd!*魞 ǣ6/IVC[h̐qGkJFF{8_]&[O4}?-_8w$JTgY^C qik*W#: "qfY?o .]DBgV<Psa' 3ytCqT^݁h),n:=;p@IMBEӿzO|R#0n eØ:\(c$[=f.6+r:]tcc8._'ˢ6r}j_D 煊 Q1eF8 dd'_R+XbR%1+wiyQ)]M;=Zo}Eu|ad* /Fʫr$1WO 6q%=.(6$#\hs[A遟 o݂ZʡFؤCr=iΊ؊ʹzEBI*lc[悦7Q%k}4Ȳ౛@%9(̱vф[^eC_8?${#<76t((8g37gxsVBeijNb)`W E[T]d!g￶*@(F%K(Q'raqBrZq Qkio:j?+Z? B$ C}!#Ԓ4W` AT׏S!8WCߵJa<^QDH^FY R{ugf?Gk9 8pbRijcz7h&k1'jeY=G%TG -I"|rrX+ٙE{4Jet(/e<Hoa8it۲_(l'@בO> *BE7 cZˑzcWSL D YA\Ƣ gEX DYU#ڢ$}*1Qzkd*lc7uHSG0Va{ K_dJnqb]Pma{0:ݐ5 e<̠|1CрD"Ks".'݈)&.e3Yoۚqk{jA2d H.'g/Eær0 -)RR4(͜+7U zp T8g7oa ٱNڡٍi>H|?Kzh:aY!3SnD/uBۈk)6"\8T̨hgI%2rR^CB<[{H_kgF;4(} ^=0tn;ç2{qQַt}UUw_gO6`X]BBs*⛤F"N(_(z oďZTtg KڻKU D\LH&Tw4/D|s)Lzhnlݓ 8D@7M;c"TR_B<&urѺj((R)RrJrGCTgP(nOkQL3q0J$xa=%̈s 1 3-}X+ମʆ W#+EȚСd6'bR. ɊS2@8S$.*sdc8;(|hƹWo%do0o(r |.T\׍&F&k 0mGÑMNB/52f?pm<+F:؟} eRYCMMvI,#/G$h-9R0*V׳Ƞ})&UӽqnCpù5fY I%w9}l-al u^i~2ox]_KO2/vX_q 3)4Q<9H<*-b~DoDpSJ&YLL"l?PjZ : aQV5,DnL)1vyݠ}ՔAP7k:XNHm9IBϝ7 FլQ3FO JuNew(\$}JG͘)JMm3sF$0F^x }2K~<>Nl=HXwz3I6\` x7{*~&P_9NNR ;1_Jtb~{/cF88qYqTBϿAx+alLGy1uOuHTsWF{u3Uо2v4֧!s{-)fu&(4엥ng#+lZ%{ ]z@`4 k45L7QxV5V@l)` XQq׃#vb.Y9%{H5 ׫kd3.U;eS47uXM޹}X?CPiu.r3 c.HW2m `()  [ 4rk^N9Cvk;q '|@1EJ4W%hO4 P')K΅$_!^8j6#ImGX:'`ހ|Ԅ/£QqKw--BIƍӄb$=f4̑lJ>']No>_: sgR4G=AQ% UyO[5P]_YnY^~+!49 sm%{C 0 _7Dx1kM0N.'^{=3ݭ7AӿdM= (;:v}4a2Oys"ϋQ,-pm-WR8OCXFI;V)h@G:u6)* J }.R` #v~2@6+ fҚ-oDc;KI&ߌ!# k.hȠβSxۊN a4> 4wv!U+TNiޏ"'El^7K6B{8ɩo*[c_C?EgP+C;P7_4Etͫo@έRXqN$%$ɣϼ8xz-ϡko$wVqݜbpJK BIQտ :+ 頢'=C5pZM&~0RY%M)GUW a)tVtⲰĵuUb1e`:k6CW/:oQ^XPG&t U0,=mZar"˱|+6֞VP0w3͵[IN!xXiՒ) C±)k7(\P, %r ! 4hsϪ='տ3zO:Yr:R Μf@q߇◗n MXsp춫Wj OaKXj 2K 4˽ j@wuDq b$2׈=VTF(!R!Ko 8,`ÎTߒVo>6y~ sU7tӷVfi((`%'єUg0߃RT!zosr<|r pX"wjK~JRux~ke 1Ed5^õEs}ܮV)[D4"F%{:rr'[/񁨈bzpG[Ѫcxݡ yOpX>x PVk}oCuFP<)0ZT.-gN>ն[c@%@ jm 's1,JỴR xv6?@ /%JFb"71H S MG.D^)jf+N+"p$ފ/.Up7HWpL<,]*۸Sq*[,.:ݫ:@#X~hf Z^< c MZߑcm21O Kϸc~;:1WNk`x/BY?yE,3q?=䷫Q 9uuR-:=D>/'겣!dLޤp@lz5 Rwy$ΐ˩de!nu?}:qU*-ڻOm~iCV-k'~Gms}N5N"!cPsWtʨÂ%\ĩ2ΌAکr/j"Dle;;XDA)} V y?"7%0Ϻv!Խ9 RњzK!}beGl Z~ 2hxoUlWšÉmP aHND:}/gFĆ_<;YsMaHGd{e4rψr֕O ]b5^ ,\EZ,ԈkJ$ՀK^BC6N|}]t+*ʵ9AcUkK v47of5,6F.OUM&X k-N+f8{RBVd׮ZGd`h¯X*XlDXcqa\+Zs 4rӑ }PdѢμ_Ƭ9% ΫqXc˴{ NO6՛eCYAfnWui#o;2L,,# aSMf"$ pZsaӞ :)}k=P̿~Yol)T|I Uٺ%Ա*eםҴ\%nBl ebEe\3L)ą/ wbcޟV!H!Տ{]@XsE%8?²RVa_+,.j)H<\% U!O,=g'U!%P(G.(s%iDo8|~S_ЫH cWAlyݴ U!qT fmz?h1f[LpHٛ3@4xo ZM]R& gIڳض >B4|pRhdo_8\ ٔ^HI -$E`Y5>yL SYd%\`gn щQ}MN» S*t֙6F &3uG2q07-ݜW+ٔ:a/׍k~wIÉ;=cΘMo1jx\)RF)ZO'Tdc$nSN _i˷_`iͬNff 0vF J!Œi(+:tls"\2TyK}N_)K7՟ĭҲUIkQ^ꔢΈ^Cjh8=4eBi*6WVM VQИr]CpM(Jmp>jp,zl0j`1{EVEW~74dI,*PyO;z(?rwLUuoM)#,OReҊzd|CKU7s. `sBήP[Ŭ4 b~P֧ʉrI9TRuZ$fo4hyASz#2|3Pc"yٍ!ē{j}0BNLl?̗C,IFT"#.@Ɵy-vof5H n}69~W{t#uh699_7Fty`6(Ǵh2-?:E1"6+YG&Il7?ُ{H%z;#hR9΁pB:J=y\qWP ^"09|k',&఑jbYש[< U7n{;]qԁw@`٠9*f馞T8;ރyXbWHTΝ{Z]tT2lwtBw7Tr`45}>jIfC%W[)vXXi隔c{E,j90ۉ%[UMP!6ӅyEEҽSǜ m} R@#Y4 0 _#[$?^}~GHKEAUMm xqeP(ƾKqǶ}ÔT bLs˽W*J6`1pR"YnΡ6 Q^9YB<;7M0_M|;Ά>S,YAģ5pkw~5J|6ⱨhNகmDpRlWv۶mA}{Wd)=y鹇":6#_/Oi›RT. K/Es nvDUgez5^Fl 7s'M;J4)4p:usn;yo.?IةD8Ã0dlix{2BG7CX:ɭA7zKӱR >!bq+y .gPf|Q!}EbVZxӋW} j;5 6@=%J9y4MzhOuNl^,*t(g%mv45d PHexs{i?V:b9*e-L)3}HV'J@!HTxZZcA'e x/:ZBNc]>z0OI[~Na,3z OZF@+ Đg_+PG_rJ{ƝeGd?3wh#GЕ٣-&])s;q+A B Aڨ++a 抔>Z=Du CEMwmmqzSk8^y2(9cmu~K3{ Cp\]&aŖ8L6" z^D_㽃i_"Ѐ\nd8T 1߾ #jﶴq]H kPGIZ= =eI^IՏpr)ט9ѐ ME+D!jhؠD2pX q"pjetuoㆀa_NK(-~TZ//̬5yet߉#T豯r臒[xMx&_B'Srhp`k,)P7`r-;+859uǞV`//HLP%\)ppU*ԴX-[ɟ.'7#33@إr%UA & ֫N(HNk ^ D]ip  ɕVZTT68WEC$f+|C4>F]6[s^=Fu}+_ZAG42< :,,Wq$c| ݧp>[. Fn^cSH{3F넂$:Y.Hre!k_@ ;!TBF[;`)y 4pE܈FUZؑ}:$!B՝y&x 㺦b[ږ#Q0佼n/h0Cx'M4菇(/ yWҩR!{*r1bə}r sQJZ (7,5([+A* Z?C0kɫZ`X$=d7U¾`>; ǻ'σ da>GQ?Ȅ7"1f #z׏vBUz&h,{suܯ4zkN9(>Z"8Fa:tF+9F02ZAi?d\,ŃN.%oo0VF1U1uYgkۈ͵'$l}E*v.nN8@$2!\=HcMa8)]ӪZ]~ P>e=됥Dx`va^5#&s^_}~xYG7L!Q\^Rbk;ob>4gCˠ4Y@4£ #Gl'xF怃G @H9%nSe Q3%NVV)~B_(\VH{ޙovt75۝,MeE\,? Um,Bbh::*Oѥ瑴xhk5?Ud4u!; g^Re5 .uj#EUǏpMuWw1l:Vz*jE]N~(Br7"]M5u;ZyȉzbEhzZvCᢌ˧`N"HsT!!J^żT/l دM58KԾ=#Ead3:wL9O2oU'9GJ*mRI<ޤ!9p쫍:%H-dx\>Ui-7 N݌ݖq`IsiWܼ7ZxV)MOqTЮXQdg ) sj"Dx>dcu R:_n9vmtȼ]Y:Nzx!|%-f78@$#xuL ~]Af"|H-Tr%&vZй K9$~?8jT󫖀3,5j^oDԃZ?~ژa>ACYQG!Rѥms"I/Ml.`xw>ш{Ft*, ow Aw Rp #^YZCXTC$D~:ΒCxUpT_TsB4ZP mtN޸N6Cɥ*evٝFj XBt*w$ +[DEN'l䪺ۖo ͛z zGYn-ڸO:)#EVѿ ETr`2Hϐ 4EW0Clۗ"V[Z4꣡s%gNB;k:<׍m77 ۾WRۖv^IPhwYejF:rKH\_Ń"`nhpIB;eG vX%&Ȯס{bODT:zlq"C* 9^7p0խ6粓И?X 'r[J<If:h8,pɱJz&/A(R޺=aʰ Y*9kT?YkYBŨSmvta=m jԎf>~nx*_!&yCa,HH|K ];*R㲾yйfK,L͵lJ]"f;6f ^FL4M*p'y'! 1J;R+/J( Э3hm2gd*oK z_DL߈УƔrxcuoƨ0 x=K8d@ 7E"Z7hVYyº\azD~=uc:ȐI 1Vkd4E{Z#>[/Q^S\ƉWuM,SͨU@hZ)鎑o>!,5y"y?cɸ,jȚp'&[CiwHJ,s9L/Ci٫~]22vF 1SXA޳>f"qoYb#Y0RJ)>cӽ & -k"Нz a.sZA+e'$IZM)lCf)9Z%ߐ%c|;4 ;3;orw=a+ .[fTh"uo0>UcHc1GKtȟ]cXDa~*PSi|F(I- AǷ:G*▃&B {ڛK=CJM6|Kc-Owcz4i wM lcD@Kogp9DU0'b+wc}RtaI;_\I< t#MWȎ6f1%3 e·svi TJElrek* zh7 }dIOvR:dW8Ű^,p@IUɖE0co|q7.~nR цD-9:7N!5meM.䐲nfTK!1$JqŌlP?<"Sb(2Gܔjk-E7Q$d'_E h#r5vgBunx n̝|aҾ՝RymkAaa 'gk H'&˙`]2%iҮCNma.pA 9GŕA+{FT3JX#9Qq%FM Ge^ j!(ray}l#D}-~4:L d-~XjN&c^ z>i9"}7'NR=) :Kq:O݃|3u% .Yɍ~t*<7^'~޿QdS -뺍Uӡ'PmݧObx%dGƷ &m*[gpdd<cm;L7tr('~H4ΒZmj|JMYaARITFa eiz0]֟ X+t{fs"1kiiJnQV(yۼ4Yxr@ZfjY3{ ܱW 9}^!y|i|bxXvRuia&~{شlo!=DҚR)&hy=%oo楈zt|4gh)So`_Fh5/0" ێXh\3sCK)RK$k`u :̋kH%yΚNF2qȮ\|g-%|Bn;d4z}Q!]xKmQ^Q|y%~`#fs9-_W{ToGI/M'02*J'?c2rE;`Q'ݷe[ sؒ!6O3u<2B_q AdKBocϻ\x8SgZa:\;2bq=-`xcS9jG֏G|yA&@|b`b2,!zE`IThd×5 uXGzkw+6FINvw9VcWr9g8'/ՋْٛzSX,wCJw&' dT3Dk?Ͷ!ܰ1xZ-C%7 Q,@/OM'𺨍 `Kb>_MԳ˜Pi۟*w7C n\KZGys뼓[TRĐ Ӽ3J`CHؗdi ;0EYg2vSv8l50f%cj& - Yo=(H*{0JK.t[ZtgIy+] H/Wsz~3=o(9+{ˤJ%_ g{|vUcos1 4kx!uyböYT]d!!4T!rwO F0ɊAxUZSa8+7%#kE{ұaO|6xP \ ~!0pEKd[ T9ygY}:A7| {%J GԬSN.j`(6lt~ _y"2G7[_@T(TW9wOe#mISt8Lz3g핚#~gG_SL4F*ߋh [EylaRWh+#>R'}o%h?EO;Bn. o}3M.*Q+ya,d_{`w$o4›29F1 ~lie_tSIW@#X#FaU+'q<"Sw^Vt'hYCxa/>P0BvEb"RǁvrSXuDXd5h%jdK UVʌeh9Wȃ]%u*I|@5 4Pkh^rXD7 g-o1#eo?PWȿSh? <kdH!ɲ0'"$njc_V9G.|Ղg~yL2 d^e<&N=݉ݺϷK'[3>C:H>6 Jx~h۹rșC8j$p-(i6Ս<kϓ?ШL@f+ehTE9%9>Rs6 Ƌ:j,Y? eP9X]p'n3.mU:Bּiv:ىJXXK.񑯝7#2[8oa*+b9* m`]$ {[ŕMo9ö 78vF: Q4q6kBu|Q_oIWk;_auv\OML޶x%I<ٓbo0@" !1nrty4KiLCTY{w+D iK_*L@iڷ~ 1~Y̧9̭+p *IZxUD S]eq;TRq]8++3)3#қ/ [TK]`]%ZalG`.Dv^>M d2T:^̤{bYS/rEq{˯ttf7wȝ66"uE0@VȘ̥jcNP/ 5) k~Xޝ$N*g<Z g@MIBC* E#x˝ [VP6Gvز&A/p̓i l/Esze7A˜?US{uJ͝Znqαs 3CP/"_FJ[5iORv(%Ưz4&{lh3c:Pae 9(B]] s o0vuIkiNd*/dba>q <@Lb;HHȮv ,mwl52$. B zn2K?;vbA`= j&Xh"'02wnӽ*U{oO. jnc@ 0\P8Y~0^U`j~ wg90;Ӫr" G-ćVSv d!XSa2i.01<"|c#OkJW+-ď-) quѮm(1%2:#6yij9}̰cދUb3z<Wx~GDG}a?@45_ȴ!N%M}8ꁎ C.8ܳ7x$UU)<̰+\s 3z1G߼q]+qMV^Ipy4\鋀!yl$/ggp!R|޶Ͷ Qwlyĵsvxl'O]]=v*ۨ0Os! tS 5浭5aP-p)A <U^2շmC+4iPcRY?,"T8Kg l0gdNCYOWϗN9uI1 "-x.:owK$c3^՜`܄ qD+j,t8{}dՁ|^`?2X RBB˦ {_"i 2!hqgے1Զ;?OcUäQ}Ŵ7``!^Lq$s _WUa|D69^CqHΪ␿o9nyi7T~?[IaHIkR?t9[ lQ\~9h\zӓf;󥟎(Sk O!D]1EDTcz |mhx>;9K\mVRg7ގZ !i2yl’^D)h '5<3 k~6oN o>yL4{0IIdK &,\9УȯstfmH/V-0 M2_癭.nQSrF&ɲxjR@+i-~$ן5綕r[k!l''gy Kd*@MG.bE +uhe8%(.*t0t*1ē hQ⋠ z8CNk%HZ~m.1, tEܖ ߫{Qx̖9Msc}ԇ>ܻ+Q4Xqڕkkeb~n\[ SY4lãt88ٌ7ْ,5GUC195֎Wx3!o*l ];-A&kZ\a/ip݇B q1b0{_̋b^n6>wo -Q0'D_uygzkhD՟/Xwg#;"en[hCnu|X t^2( s#jlOh 6e]F䰃kOFG|jIZOHE3vF6%JK1[tF(41r_w5p9*fǔi y&+=[{73Ǯ΅ l"ȱ9g0x>/` !iokik璓x͐B'WcXD&wI`%v{*[khlFv\S[g\ ˔-eg]$7|$yK-.Nʴ]z;Ta4xCQA *J8=5L-*qF*f4=jD!K1 ٠GJ>)Sˏ` c|/tX !r?(l©o])H;0 f@|=-9X̒N56Ѽt[`0n;䴬 }J1iR#(fosY:lYp4_'᭾ hLū0uu3- 4`M Fpܚӝ9~}6ne&\k{|BYP4G>pɖrA2`=/0X7h7W oLӯ ^E2!RmY݋:Q&$G3bOF]5Fҗ*0okG95i")3#䳼[ )֙p QkYڜM+q~!$۶lK53x[Юd!7vV(3JR!θ01oK|ji|uN[ֳ7ᬼpFXHFӳ,bxzK|Q"+HE-=mo}lӤߤ݀ szA8"[ PYr1\ ZA/L x '%|= 2NJB̅`jk@ qx|2M?Gڍ:PƍWl#x&(G) 3ۡY0P;.wzHժޒ!uU,ȃ`Pgս: G.4%jPfpR2R5zK&J?2<$ƶ^$be'\b"DR0|C3'8nrD"QTvH!p NJWSrzͱ"qӕrzo4C"͇\5c'=Z0N\Xi:쇫e$]L"?$Ԓjx JAA«r*_7_LFD !akڠ&kh}z%.!c;?qRHζvaV_vSLVJfe:34}vU $Bk=-ނ4ߔt[}l؍1&|4R?V1Vy~gѠI|:nѻ&=:k-cŷ~Jqr:wxi> sҲܖ? VXbտڐύF L`%uvqCY8U9IEXzޑ T4EU 2w-Y${muݯF1Mw{F]n K47y$)qpJ@*`wu~K;%qLHDZ‚OG[54^sjnl>k܉8`KYB2C:jDzFaJ,xӯii,RZ2VQ]jk[S>ug'9$f'Vñ"bf?AJ.V*YK_ ]a/U1:,;EjM||g@8o{uTp4>^aB?DHFMJ>lKt >mX ԥZ xu4wkviU$.{Cmѐpdi"&(qLfUIdi!xm)^K(s%$q}ڧr}uQ#MK7d3lK;{T|OM|8.RKR4*Cfe+HwUdc+.O_-z>k׮%w/صc [p[X? rSAⓜЭ ίw<@$Ss2=Tlظd[?Y\?Ԗ"itŬC‚z# v  K tdJ2ֱSqF)%c~&|JM2KctŢC;Ӹl`J/kWMT :!J$b/(P~?bCr6$KAԑyF P1;.\+Q\yy/ԴԷ\笘LȊɞHፔ*(zhŦT`_i8֐PbԌQ@#>+]擀n t({Eyn ' N~@_wB[u/#ctGxdz(CY#J:XL$JW*KImk*&RDFOze5m_r/X̦/Rd89azNJ&AdO ˤJRPf0T[Y{d~J"h XxӋi4'dBM5 H"+{՞| ]픷=P6?$jtbq- ۅ+X E||;LpoqT2>hG}|V{ݭk^3sE>m}/:ƢE4;̬/z{8݇"Qi9L+ H6{"{ .VMs3ԓZ3uUcOx=V "Y ><ᱪOĮoN(8RKh>WR3^F-Z8t;xt7as`[7@JH-ѺAKt|MȮ{P>0Cl:(-m` 'Ⳗ-gp3"d!=Y l&~4'm7pWp1b6] b#J~2z^CSD]Cldr,([˖ڳ诮 R(0o1gs{0L.v{'R[e'APi!teoo$u9`[8M1aHqv{}y).m-Gɘ< CL1 ;1^ cq8Z'*nun|iRU\f8rN& zbF"ڎ8?=N cgDXP`oow4\P&u:6M!5Qy1#SЫshɺ:2R1/JqʭaU/9As3$s?@7d&) l鶲en&Mc2m y~=1H bF5q^cpcj.ݕDDb ˃ P іHDx{Rs]Y={4pYN0ʹGˊL; ]y݅414iÝmZk1kKBv?C6olr3`ȫK'N9j'F}"yڹ(DiT wާ8/gc{] sǧ' %@W:&ߐӪQou %u\r#'ߊjOv6ITh M[{ %ԧlD^ .44Pi=t+ԤWTʐb;<1Iޞ#.N|9~%^d|)<ιHTIA݋}@j=}՛gʲMö1%wYVsx#|-Mm}ƾjc .eܦ]),$0qPI䂎#)ruim+T-0S8W%Ca˽ߥmю#I M7Jz-ަ$[g\2E#l31LG5O"< čZP|C%PPEd]du OQ_7#ly#ךC{Lg$G=s{w|#j'q񐱾9 \k -Thh2̻ɨ[ujYC yD'3 ,%bWкwԃh4 HT&Wc(5|0n3*Ig?2&ԻhS L"m jlf2,Jv`=ҥ U"f7Рſ[ '~W7tEJRZDl|HA}"Lڰe-۳rR`ϩB%tw+Uﱯ6.ZGhoOzFtUFCHl 糐*֏m,ºy݁@[Xi6.rG?[0ygX I\NL =QG q,Gh(C-i<Φ5$-Xtv~HI"h#t=^ٰAwUg8 )H*:{x _I%9j`në ; X,?u6}L:;:]8B'uԤrLy8DŽ[x-Q kބPS<È9PpaI ]EQ M5A%> ɾ/.advIl])zxT%zՀay ޾pwzVP?!Rc #[:hfw<77cgz( Z\H@FwɆ |0`RG΍$hH>n@r F`< !}DWKe/*N)Ո1Pw=8+r>ӕXDj9^^ ͱh.d&"~)MD껗H] *ʗ+Vד) .2i&ݳ+'D70 D]!q!=f)L.ivyU˝)W z&m2u|)@Ƀ ăNEF~ljR}5">wmKO-q(,iZtBE X?ߊ#ٮKPب)seX~5F@Uˉ0[L9XVk?rO"#].c9ANwPFq8h$- :cg-^H8HQƈ8kI$mde0n [*58*Spo=y3ދ"XPM*5m(T.v_:lg@\l X)GT~O 7XCېiA G;*?ul'Do;M3BXd'[jCх۩ U"G">:^z</zz%缜>J^z|x_*5x)w(ת֣&@j>UFV_ pcKؚ0yUBN쳨#Fgk 0x3qvH*k6 ̨Tf)jV\Fx%Zİ;˼:{f.5ZFi(s~'t#Uؽu1HiB6X]H1гy8л3y0Q/vpFWZ 4z3dB)#{>& O}.#~iVُl.UMi[VLC6&Q`t)` ]8eм.SwDοj1VKc11Zj{Gl" G@G(vA9hs=ˡCfwGQgb_2 H8uH+$BдB0g L d>tLpwNkr3o2v9jp;Xt{u9Y$44ĚCW"VN[|{$H|?/B3 $;J 7`2W~JuC4 Xk&cvAR#F2b_O%* ? X ͦ' + |э{'_yE9ڐACJ8^l IAy$>bHY/!`ujxtG.-i0)!%rTbkP"xÍXzjoƜ8qPUbkG8k&C9s+a=$f9(5w ܉v\3-z㤪U|QX>ib>o*\RYJXTos!8{Mh楚lT"*/!\;?2*%M+[5ߢTߗq8*s4fCwʓX {7':E1P:]@07L۴ &ܻEb6ɾ 8X׿pMu1Օ$GO٨HHsǴG_;-!}qEG {@iBY|gl ų<.)6V.ƃϧN[ j!GM%D(I% ՐÛ<釛f=QWu0&Eo@7I˱EFa"b~ :]?h@ߏ0YeEmuwIh-ݤ=8NRhn"9? *P>cImpg0u) I7rd1v? !q+gRN-̗ZZNq|鯿Р^`jUN~s]tl,i=$* KLM:i x&{kxZ2nc?oHDyeAd*W /, Ą2@+u*o4Wσcn~HG,kvdH +k >3CnYQp !˔9%݇(j(;V+ K p6J!;ƙG rZR7ɯ/K8A>L5U~Vx⽖}|U(Bv JoZ ^#[3@B|a| ]VLʂzӜ2ȟI`OژWȴ<\\iPt@QnJk8|#!L$5NO9M=Q") apeaϖ%X vϭ٢Y Ff˥gP~=pYօ_Yn. =y' mՙSl`|[7! ' 5M|*bq XKdl;y ά俓OJ.!#؍Wq?ѷF7N3#,fB w4_4?|mYI,*z.Q-ftսwJS! V '䄼åFR:anћՊCm2óG3j [1w7fH{9֕VR/w / ޯtS2*SIEIp:"jԢ}.bZT<4R4q(.I-rvVZ%=RwT\~-U>tY d")< (=TGëQaEј*c THRh 1ȍ !H9j51 n+6o]rqX\A;Li&YAwM8(Q84Btg#i7bۦ&(3းsb˼aW0?>*AUT[ (%]iojUpbo {KRj ~,[*E!a LR͜~22)-ßLel-AQlV(I\uU~N[5+ַsVNama(%V܎ lyɭXb+oy: i xzWWŔW!:e 8ĎR?=Vx`鿜!(Ld#<7Z/.I2򂯁xfE +.lfQaFm:/Qa.P`q>\1q1iUKȠaܬ91:c'wA3pscPRX{(_ȿYlAp x焾)uli& s"W{UTܸZMv8a>zƲ1 Z8j8eSwԡi&giԘ1r h7⥭1 v8^e۝ގa,<]PWAZ7q<J6 bsW&aꃥBoNlb N((a#W_S9bhi>[+G_iF )#8x/︻fn2z!p~3Hߥ_$[tlP'!JѧS5K54v[IP eg"c=,Jn,!ٸ GSB?uu]]SLrĥga0?P a!0ʑ՜NzgW xJBtdf0} .xjB# WdҐU,jU2+3SEC"AYݧ_Fl֝]ŀMG 6U͋EIj؁3z{a& 7x[H3s .-ELOxQ@ aۆ]k-Z~Y{:in%tI(L1/>A{(TIiu+3tcT|B6ҕ~4&.SXOZӾ>KNYAv>'Ϝ{5s):E0c\Ru{drk5A|Z0dީ˭QC 8ǏLQ,-߽^mn q]ED aکQ!]\ۜ)%LG1U}u>}N<%OR".,w?lمbwJV ;=H h뭦3Ȋ`duf W@ wE ×C-Bﷳ[nqgѱY,YZz [q'N.RwGsˋ:pꉳ("4B*0S 49u,,uRR?Բ @o<@>3<"0o=\^HVDyٕ?e>&kv~ KgϲY^kzW8v(!^6d<@%!!ڲ\'ϡ؟kI-x1YN,#jXOṖ]V*#2GI2MXɷAHWPƂ@bD%}V}v{O8?46w=*#SDٰ3W3|!h5 Bsc4PZ7BdP"8UO,=_[={T̥*RB]m4P%ݐC=ī(1Oir-p[a.SuvI-VD㼜 qx̩&qke2cG6iyd}Wch* 2~FQ»UM10+? eNwhnZQL 1wW"kGbhǴJ\k#[jլu ̈́$T H@bAH5z/fDelmN we =의n!Sg݄ *'jTȊT_-"jܖ;5KXWwyMJwsϹʂ 4-r~} [&6"#b6 _T?PQK[ &j ˓O)Z mh~ MLNBMܸ 4p*i 6Psy_N~c5NhŘ׋&[( Latg1kg[t%gq`ky , VR tvEj٩7I%_ғA t..bS i*)&)>\Fl;>Q)ɟ1ո'kJ {&HԲE,۱w7ˬD,ܰsG q"{~ \-%M ~9l˨J_^z;pر7$02}RA `$b룓 "ݹYz8@YdUSh^1ZyxƗoOo@ kmlʩ_3?m+6RmqӥuÒ4-p r~yoP? >C7{$xR3 2C06hdVd1^'+P<3yl}֍bnaSpTn}$}twF@A Qam tK|k\nLF1 =gRnH ]-dIp݃ҦKNƓ@w t4b|n 'oUK9HJpe+W4RyФJ㽱ve8^ҜshZX]j)28aaE4oC x:k!~d ea+l{$pnF 5 ­xR&zD~=LP !#S#-QvQN7T5[ї:XM5[z (j8eUx 2l6 Z` 5k`OաѿLtT  ֠GUC|bmj*s|eQdggݪAV`KT5f|Y>XADllR _*d MdEb~*"EÚ3!o:5&MS/QUd 9_2no-W~ 2 0l~cgێ+ HvqX.e&;%Q-ݥӋ ֡dksoIY0ᒦRٺ v)Duĭ:@ay $*8ʶCRP{MaS3 1f }Ց]0|O{r,^wt}̀Lzs, j$DQx'N{qgsXرLxWzmxD&YX&w ֔)oDА7]."t+MjԜ\v[Wex(Wi3@q<ƙWvU,)rh+/J+ҥ-EӏNQj8ku)<Uj K%raSV$r{h *(u{^$zE.7 62Ýkf`Yn b_lEeWZ(2ts« ǨDFp\R`^X' Xx??Ln ڶAk Є`tVWvΠk{H^Ŗ,Uoe;,sjvשBԕyL#l2  '@$fm2( &埱γ#c)Hku6[n}u|r|H¬2mq6Sӗ:5iz gj?}d`j1FL0EHCUtMF T`8%}DȰj_AXze kdI~CI68<su%*9NJw@"BqβQ=)V ѫ4tߊJ~TlH-GDz*XO 7s/ӭu!%mʗb%{Lf̑UԩW5o 橧kH% o26ى e~cX4p?A:d["Kx3O*]"v+EluZ6uH/E5nudaImR)|IV(r GHcy503?s7A[L).(7%?K``$ |ҀN{ߥ|ǝrs尢9bE-zL`#!8űf2=uQSP) Cwe'(8C[guȯm< A=k8]EoH e쐬H/@|TeGX! ]ӈa.w)/{@ ב@y,wbKW_9QJ {#.:6Yŷ0?%ql~Gܸ5eJy-KkCA}autIr5'keς(BS,E(6lBsizbM{Ǭ%Ũ)B<ZfX'*#%6>[_UĪ9w'Y0ָ?u Fddmr{0;ғ#R'ϔXdvt^x$ ܒXA' p>-ju|1ݨ53{ su 3,J:jA^4[iNy*NR+ȪؾJ!dbPky%Ia|snA54Fg79 7b((r 2s7g`iٍz$p7ϝCxӄMG#>>6EE)hO#p\a^Y"gCi! oWୌ=ҹS4*Z@OЇ y*A\)}CmJ*޾3ErfYI x w @R:ȟǺ,#avw)Tv}FR^00d=k d=X!E4qrP#.@$q?JlnNJ^b?_ Dld6Å.}2x)c _墊]m >x[c?xgA% `[Gv"ú2*R>afXՓ͆*sϩWk2BghUdK=8F}"Udco'I,lfn~9A( %?=$>S8!zj[@lYAz5}Q¹#=`b#}G:uHJsL{lRQRFKkbnC!X9'g8=#3:|\5aҳ/1KφDI{5: aG2<hdh)}x6Tz0Tn!C՜1I%Dz*P.,I+˃ע3m:39V"q~tAdiyR(ʷL3Wm!}@T[y"bŁp~5(֘tG%gLOh[k֔pqNfq6a#M?^e2UUSƥz/wT\BVrE!v3]a&P( ֓!x 6阠cA`$ 3O߬J M[(`ܷ؁z"00k݉=qt0CIobޏ!A2TpGk\s:=tY wJi[g/"Զ1'npixWj(.4=D YҺȴqNI(\ﻏYhx+&9"|O-i( 8/|^)\U_aiWYv%(11O}n +nRDeQ*uEoѯE;o:ì& H:+J([]?_(O rBuWIǂn^n/$]LLLjjm )yKtid80ɷߺ `GcQjV=uM|3bhjf.eN` ^.#yJ㔛faE"DiV!-U 8+.rV~<@BbOgc uᴘHyпϞ8)R7+L鉳"\QG[>5oziJ_rmL }>[|mt2 >K?qCYƮDfIeATߎ[.ѽ8۟Onpy7ݘrWֳ Ӷ, hT0>$|#GsxQi=/3SS/ '(*j~Y Uȕ2;둩<&&*Zd I{[6 B߁lӮi/1E`06I`F:_6uHh$f0AXr3cY?UjL{u qo?’@Jk K@Xu,2T^zFW;&098ΠSNu߂.B6>1MZj?ыPxfTKs" l(q> dcGU9IMR%CfϤ+AY 7\, \9VM$B _}/D:xBBn9gR$y^ۣ򘡝>ec7,.ͩ/0~ )#ױ}'X&qWqJpm |eJ64BExQ,n4N{řoDv$ޔ?KM0d-3]ѳ%aC`τIxƯqOZ/8Cۄ7"_"Ly$O&c1wPP' _' M<ы: d0'7¿PV&|SycPɭ  ĨON,4nRm\KWC8Hgyv4F2KП̆_ނ`K$Aj/ŮwAfk ٴS791: I\amhdf"Z$JtX'u("@e{!uaEѲy'2߆n _ys-22N|)Ϩ+=^& $Q]WSlrZSx-WLǐ`qbV qӂ Lz` {^CHNftrOfLjhIr\HdV}ԡ6YTv|9[ƨ& j7+ix8sʵ*AwfJҖᵨt:OuvxdW_qa'`1 XqW]=k Y2l1##QYyUۈ009(2"ބ j(]g X{n%y;"L1nifT\#S`R*kN;HJ(`9wvҶ]f8]Wjr*&dXdƷ啃8W{[Yڌz'y(fAܱ6t2ņnY,%s@h"y[d|1xȡy.)˓wpe/b),49cu9J}"ܗ.rƋ>'C IQފܯg` F5K,?koOA;B(dƂM"OIfWP9EIyn{ pZ=m4pI6xo4UUs ^jhʼoL~};xryU;U9Oz5[s"-[!R)_V#8[g|Ṱ*Jr =Z((gٴpuȿ9$;ĎvWҽuVQ @<ō^"~_:qM@/Ϝa![1čV8; C*4vCIqQB5X1MA A?diiYN-.p~CgVV:o s5;PxXԩ^9gҕdmyʪ`-Kg)?RwތS}bx@hr=qvCa}p ϒ2Zw&ql tJ@26CuGU:a.0&Lު>: xe9!dp?׆ r1L]y`A|KQbX߽RMLrXVBbz f^|ke`%4zQO2~Wi['%b"ySS3AkWWþ c>–7_uܛgd '\C, P (84Ј}0T8}!e%\-Ϻ§`ӓгfV,2' 4=̪_ Tth_R*e1vW7aeEGb!3F#22;Zߑϴ+{ ξ&mn,Xmggg\#>=B4 :mh>&O"pU+%L]y_j $wS4O1G_5OoL!-lJl lLK#1tc =|=a?jj#氇ꃉP3cԩ3t;.thO2 O*HE06MeSqgJV:DKov^dp;m?z1G^E"}\0n]ek]PJ=XmEq~~G+"Ḧ́:^՗!,A!'%S4EFďÄM$j/˗xdVq4"J=#}qmiN"L>KՈt@h܇8 ؇D>hdu EWd4*\q%NQuՏ>W25 e)KS,[UoZJ!aiiđ4˭qЈR~d0%ͻ]Lzi-cAbĤOBRUiQ_WzF3E]:ߜxճ \尷 H~bSq-Yt |$X;:![R/ kZ^h.d+yV`K" -;q~~9Ӹ%OWx+uAQ D#OBĒ<kL$__8Mp1kn3ZP,COX,a*A/>)) +dd޶DCu>`S8/5@;jj2JTT7ԦcB-[{0EAy8.aOzM0(`c3MxƯ>V`8産$0 \ꓽr%(,{)`F&8 c\5*- I@SI.g'Qn*<ô\XZJC@e#(xž%ܛ̆A•p@㭦k.嵍{PSY0IW|:h]k_5ޖK,& R#50 #!Suݍ7+=[ k&S8J4suٟ6*XJ)=ٓ{!z%@X`+}=R,@ s3W|ͫɯs؀esyBlmr:٨Fm h֜oK{G9L_x- >0GXA%W?v.,@` 163l7Qd0X r OUyi}6?t$c9 ce` ~BcvOQtѷRX̚";u!ٝos{֑yp(׭A .Ofc^x`.'\oιQHlY)ˠk;K XvQOE_*XcP;<٣k657[$Q0RIei 0@"fk4cH~QV,El3-õn͙_*h7ij?GM1MTY~4D^sVLH1=}B@-N גwwZWTIu(ȱw!E s:v}#`Yxr6nBBSk=mA'.J!zhbB7O1t125pWg#򈉙 xh ȄPD{D\L6^S)*uor'D"x.,U濩WqJq1][T BY =yUU}$I8}rm4TY۽Ȯ!u2<˦(BQ67Ɛ:9Yd!C  Ra@5KM 9h8{5ĚޯYEݭSMt˨k,,7Q%&Hc09\D ?" ɥ)G9^qbaV e3BI/V>cHtm$}Ge4:} :M|KRg->t[ A.ez#a&T*8JDx;BLϦyt絑!!!2Tg[aBA9krtzSe ] "Q *DW<FL=+AUyiZc:橥U{^*T)1\ݹzoC.u];c2S|dweٔ30N\yT2<;ݍPN l.S()gy(XH 6o<4VRUR({ZMٛg;s2`d]%eD)R"dtШR4*V%恶i%ocɛa *gWN v!YsLB9#1-L&؋5H 휴ۖds*散RN #m%_ӼװQqJ<dDlNjڸ9oVz&7Zi׾Or2>)UA 2m6japݕq5߅Ęz[k@)ޭ]1;h ⦭f|vM"тJ\vrOOh{ n=3p7Zkc=VS ?Y]nG˷DMُqÅ,Xb m.hWfSJǤ~gI D&d= f6?*\_ajfJ8*S&5$rJD)l(%{ؘBAxk . ~M(&yVUawi^l PO"6Pf*F"Zx 5qJŦ^.VV<]s4&uBn sѕL5$n?EcXE=*xq"R ~cc(rfӞ951H_ Ex#)--VFٍ<&bY_a)dlz5q-k+} hi +I;f3 Ta8]/즨AO0|6e7pLnY|TZ5M:2) AG`4۽tЕϤIwxy~vL6X OA`RvDxnV)*V='x2ܓ'g'1"2f ?SRRF0& oY7tIX5pS"LJ{/ůn`O޴LgpB{hބӡwҹ3{ƓJMf":tO̻jĩ#\%9π̣-ͷGwo }5rce*] 4Ԣ/gZ0kwA i% Xv7ߏm9Y-*hf93Y~mMjhzcW\H*7x4)3S: q?>,gY*0+5~\l~W >0?@[Xo| Sսs )Y@q)}XW54S)j&/_7hf@dzRj6XO޹ˏ{&m[Î, |um =[C{B$ WNh'e^ܧ=c- nU@_;Očrq<7wAG]i{/wGkT[ɑťkL@`^󑥮\CF2)ׇ XfKTٚB"֛HGUdV66j!IsZvUe乡!sgMwFc{ Κ|cv$X |/IUâEzஜ 67ŽSCTh_9dQ+k3N}}81m`ւsz)Xwpg n8\/( {$,0 FyOz6?vMŧhGNY/Ffq 54ܘ2I*YPgzE&~OGv[t"qd02P?k<҅q"bj7aMߘ|J0ZTo:c/{+?JWGڐ}uwڦr?y {FR` {n@ 2;FF% ?T.*@'IvEꯢ^%4ʫfBw8"^##݄i'9s ϧP]չfR-UOJq m[[+tsG+TLG+y1/5+֌#f(t 1ٹ{pѥ#=Ě N`RWtR 78Sp&3GY&[y{RQ1@Аבm#P|aj:dt/VPA%"ϣ !vl^6VE_ޝEX^YPR_,l?m=l'7M}%%o;uઇaI8ȧj|.4n%*0H>V -Of*hWh[o(r ^oKzoyTH,ߪ4vZ1hƨ hAӴN1}\!i؎'%¹K96&jd߅9nko3Mk%aǭ%w[)G "i[Q=grjme~;q M;+8`;,#q0Y52=7fCLh4K? z&Ari 96UHFNૺMkM,Cx1Jze g[r9 혪K;'jU w}uJ@- B [#S[E!.J^Cn@<`A׷[zƸYz~Soj/itUq{2uјMEb@Cà~YGdSm03;m)Ua˅eijacftaS6 ONO}*I!Vl!܏_L4̱@W"oC-D'VNgΩj1ԑ2˘ 1>(TGnˮXKG"mTȡ? bPK0MH,}bzK@w*]s:Q&,A?*|SJAgnv+$O9YYWXcj@)} A7' TlJM<))~g G9PP/uBF]lV'U A"觶NѴ!\:G$h ?-gtYOύ~R`PZN rQ^Oer",GJvv>2m|ͬgDcw=q}ZxLiP))VjBዪ fQ~(ٽ@Q@<-]0{3 #hl dg&n+`H[QeF)' =zŤJέ!`Jt>,.֪ps:6-sN*x6#:;fn/D`vcU80ԩNDS#IC̿W%7=j˅xU[+̶Q\; 4^QzE}@XPl Tp+%&аhƃ!13*; MS(O;m9^fj(ۜY YWlƃ؍ߩYU| ?l ڰ̬..?9JO,Yӊ)|5vƙrO,l`PtzG׬FFIrƞ$e)烤- ufLؠAIj!j]|bfzT"5ǍVi[JKoj1?d*,(ҖZEj ;-Q0ne-T7^]ƒ.`\g%V٪vrB2\a> QEYyI3eTձIE!zAXV0sq)]$ԌuNbDL1rMP.}R:Ȣq.H-8^!ĢAIԪA5i_;֣FDYVK6Rk_~k,XnK!th)eߓ(}~ /;kz mc8& :У;ٓhv-SiH#l2 y3W \T 8v|op@lvf\lCl@ٺ&e'54lG.]p3}3׻]r%3>.™%kx]M3y4d.^"]V_;XNlwkqx_P;D~Ü(:P@~۬_ћH58h?>{y8jݳ\6 X ULMy1 GM S;.OY 3YO\8'jDuSPdtIUBiAfT-4`N,EMвz$[0YK5p8CUnrHZjhH#;JdtCS[CAJdyeB~KХؽ,C3=zMB#.c?oq[t\|K6kfKR؛H.vKZZ1(Rn?T^GB铆i6Ê㸀X= W֘ܲ$B^oo?pلX > q \v:r ҽ5,.1htL7wXS؅O/y`Uˀx|Xʽ} [)00E v JuufEGq#& % -co3FDKSNewrܭ+گ#wz~21GX0J$H*5!1T;bSb8=T@>᷷Dئ츊y IN h-5vF%4Qc챿O5~}LҿH4&A{ߌapM?wnl}-Y_Y]5Ŭʷ&`3Tjh|[ 34]SO`O}`zh+%rW8Pt+wg)=H!5Ý$O;4=XL8OWZ0E8SK BRđ7Tjd|?6#+ԾN1w}j08DpYPVDLimћ~2(CB 8DdqРсP'ݩDVLe{_k䞳OgG]J#O<<t) F*оS#YE).dP*X|-44ꬺ!_d>VA[hJ-(-ծ/U53Nl ] j4 knnol)"apA[*BiP )uD@*oQgcI֬ \!'v[Su@(]N}voL ՚M<%]20gf8.\+y1En K>4P}Mџ')vV~K +C⻉"*ˁ<] O䉣`)9u|.L9I(Fy;x;ہM{,-=2-p#ϐlS+>˯J߂vMg#|ݻIZ=#q@{^bH/aDnH[E,o߯:i]N L%{!zW3%Ê KIWo7L~^kڬ/W(u\>_x ]EWĀvb5}vHu{ ޿d-0CGNuP72۵baZհ<2KyWR:7x#&a~Ta뺐Ÿ/HYv@0;ZS4rÛ/ 0~ZA=֩f~Ӥg:3 )jr" GH 쁯r3qksAtsH=(ə 8ڎ$f˞> mYM,^L@/=dԪ}Ep!*%ϊ,Fv2HtB:Ә<͎lHhd56M5%C WɤC8[tm$䭊cЯdyV1FA6GG £Pi0w^QվCd*},&Yn&/:5e7Pv i7|>FͰTAVl1ϻ{ewL<ʪ[: B}ԮeX6I W@;PB]R/ pz]orO LƟBʎ\3:.ޅcɓ4HI'{j\d[xUO,0f~?p~]f;]+\;_JtP},9Teэ#j;3!KV4A˱ ?bncOӻP#44lӯ2O&Bu-QzQ9{1orS2AO,Qq 4Df5C8bYT&|ɛ6,LIӭYU:H͏Oܚh< \򣂵dϬF#v7-!Uzz zL$"/Tu%:˅*,X/]dv`]X(d$д]yl]}0߹ÞΦyR!;JX2cYb+f?O@Z#Ff^&nG4Yd~ӘY} Y^O,6y ֯J9zv}ok,aKpWs-:zpU&Ӕܷպ;<$b:wVDKv:5g36W.u K:N5c  ''.M6IfFs/)lKg`1'ĽP8Ft$ò0.۶m6e۶m۶m۶mn}:D13s ðuP'J֏\GI? U%آq3>OeP: vن,!C|E;Ut<ܚsȣ:w:q v_acM_jO i/_6|ynK&d/($O=[GxT5PA'^=3bh/{K; LhIצa۝ `$ipYɮXWrwh0 y/0^R9륦 eR;CN33Au3l@F´jyh.t*Pc,WȯB(&PbX\ 5FPAL>8)-\WEBS} q nWiqTƋҼ ғ<,|A@Z1:av-v9*&tG{@ PylATS˴Ti=4ӧZ"y%-mN}Ņaۡs, ʁk)$ʂdmiv׻tgJ{;x-g]!z-aJUyL=bx;mݑ ֥.d"ԇHr<$=̊kD .8y~\嫅C(MF䰻y{vRtJlF=4L{!Օt. ]Kf~,#f97o(3TӔ!)LƟKWؕb$"n.Q0#Ux$R̊c|6% VV2Pf^݌)wjSŐlo?e#w;X)pk=we{aWTf-/Mx3Kوp3/Xo4"E<ڸjgz@o,SS{&ǂK_ CWoVkؼrEm\OeycIrjSG51FWe8o 26"N{ p#Qt/| lh !`w=7<16V]F0XL#huM悡3b=HvO K}JV:e2?pRg LF J1!? >v͉J^QmKѯdw@ ?) nip9ᦆQ2[t,h7࿦6yըf?BGξ!a?Xr UQi`+]cBːEnl1F/7WA [j%V{9.q*iP.1Sɵ%PSv͙~g%`m h(oz4H bh ~ucz|yElE]Ugv|<( }ŝf8mV$3-ء-K Jt{Q; `V|!F585A## kDž*/a-PKILg| 88x6@3E'AeRWlҬhCwU6O{9 O|enOcTLI@*NiLRE_fKDz2$*r@ldR/DG[vCKFMۯXN|^sQ<%1-Z]L&>|A4@ jԇ;Z"=ǝL|-vnNΊ'&N%){^.7NHv9B^3q=ɠkCc3;I0jI N3s'QLRc&tN! #a*o]S\pFfײ>.wdApɋS rr[%ܷ?DLF7`N{ÞGe3>lS#M P[ 4vN Z *%@2* x76]H[6 bn TrJo%P_xI;G|,JךozOvKeP"pחg"Dg CC(b֓R7)8ca7{h 5!{ZT-`.:q=?*ml3W W?ј Ƈh7]ەyqvLc?9:3+S|Ճ e~qct4S2v]23;U);C#r4A r 0֦9bGga.5lS$8ԆQQ@P(;"XVjIx7asdXˆNiο ?e;edʡeؔH]*1)[ܝdJsmԫbݎ?yĽE^Vᜓ["g5Lz-|׶-ˀ"=!{A@5$K4N4Oƚ'Dlh-X/,ߘa91~'-wk f8\['61:Mu@Z`5-{-_8?Գj}[Є_x* -U$0K _S:K]sL HLt,h^:hEӻmJzO25tw W3a?d\"r7 \Qu Y[Ipf{fKN/ARIA+7 Ѻc<).4 ~&P\4mXjQ2%}wsL+ʜRѕs1ˮY{V)N2Rg \d[$Q;y7J2TL,/}A\ٗVDMZ7P4h|3bͺ0IL"Ļ^קt`Կv'ST{K1g4~ o@6_K +έuN5*8'x|D^,zk;Sr[.`iXY"F{ôHAWu es\=5DKnف xt P<*y'蠯qkY4ɽIǕ'i{ST[e7ݝK0v娢Ziv]]܋`@YafƮsFg ӭr[☨5 \7IQqߜ:%â1W[YD~pȻG 0YhTe1 p)S=Uב!=>&kX~4c 8gbL!^ԳM#i⪪ZSHbraQ͈Z*zMqTXGLA`4v(rm$G,f>=ӊQ)Ŋս;)Y:3`5Έ ia1, ݭh-j89U ;a^IEJ+EGd/zT<]絩/r_یN,Դd7((p/ɤޥ񞂹}!G;J幇.fY>&fK H>:o =L$T~(Ρ|SRt~g8&˹L0 5ZcrN \Eq՟ɝ[|dO95 >i.$].|X.AgT^+.W[y|qʐ7⪄]9̍J1z+@AПI8v 5dpG|ۓw}5˳.4+$2-𑼏1r3gX!o3p+u=ihDΌX 0_&N>oD|k4{ Bl :bUnW=TÇL!Rv$ E\/¨n%XJ;lo0s'sR#)ܖo~^ 9uN|}>"Þa4FO.Ԍ$| ȝl:|43h X?!PhLmݕs#lcL- q^h>xRFu>"5N_7`zʰKHЉ$V]pNοHXπoRa&)Nɩ}wc5ل>7[ĬfYG篃ŔFJK>#ەOGn^GEQ[ayo:lʇ؃|Nڲd$Ӧ{wM~ٵxeJtT+mgow:G}bCz g>n Ƣ߼WԑBw]Zw\nlQ -ιThi-ACK*#$>5lxjk'< t.:Y꒰rk9TTZkLM{@`?Ql6\A(!Fwi 4eKRo vB>]֗}5p2?A\XIBNHHO?L?pL?=T|/ޝ͡bT4СH 7k37b4kxűP']gcOs)cGqYJY&=-VyqC>=7 iW*]Ɇc"N/{*C>pvȞ2l&MSn8Py9*-vw<;yuRI&KbHXj;zc9fWLaFj[%~Row'czL *%Șmݡ216&{@fShCq?XA>c7$ ryub/эR0d_;m6;%P*mӚ L9|kd(II,=K L~41b1wdp9L؄`H!܃aVsbcFJA v3Xl`Ma[ Q`PgL ՚I4l,-ӣ5N/au}3ϱ3cnЦ#r*D}:gWdtL5WӇ:1\cYrQUlz#ZQe&!SL%* nb EtorCM&?uIw_t̷9np”Ȯq.a-։ሽsNw荛CV*@jǰXOY?+fҸOp4JW2)_CQ)P!f=JM?񨦥:=j=l&CK-:ynd!kݡ *I @|4=iCm5&@o=LydiaGSYxG8\ ”. s@mK@. HȰ14*0,Ncl;+ Aq-N,Ÿ$~K{H i^q#d;W#I>|ŒU+?Š,=9BD7<P :Ȭ[saD%[Nk,1lWU z|=CklwL5,Kq,s^O .iKCP˚o)#qYЊo}dNds-"2aO#/dq1e~J"3fݹb'];)&QU,wQ+MV͖|Q=#~_IEF#y>+"E$+h3̗Q&ͻ.EY*'*0N9BM|Tx!FwI

oA|nmt'u,/3׼!:GYpQNW7jSȌA=&5v$"WE0yfH_ 3$H[9T{}2T&/8@n==H WaI4ŻtXBڧh[Vo".3_O1QiNF|}|p9eFzJ]VOWWT9E>@ ^ ItaCr|kbPMD쿕$ddmEL'֩XTwJ3 M9~'aV=TjC6YtF1 7D0;qYfVW6 )6`m_ FU|Ƕl V3J6~بw FEQ2Х*i 8wy|.%] jw9hTbmS+]z$eնVƮ14ͳ~0AQ}ĀaRNꁃ.=t\F̡ kS`$[B!r;Е\cd!au[0f` 芿jXV·qvN,_jHZiԇP&f 1[qx#29ȃd=- E :!Վ8-1v~Z<z&e;.م0"Wv˜Y9\-~ktOZn#lZEᩜHQ>Cqem@ Px/8REI/bsӛH?`ྪOQY*4riJ;c|(Nl";6J|3O@ĈqrWqI86~<x8π#G.:j*[rf 2eYMC3eF qb݉Ug~ ypo8&r9!DDN Q$q =X?&WZ1TRyyeL\k^!CaPD PLOvIB zf jnkH{ m?:WzgQBaC2_s$|R+d}>SXr$Ogruq\(VX7҇SXnO0"_EI2""UH\-QvAAѼH(JAWU[7zz=p'AT|m׷Mͯawp2v_LRl?(F Sگu=_qGɘxĩ .zTaGn@@Q( {$wN8d CnI.,Q@X(A.ғYXlI cyj ưbETk_X A5>dX.ܵ*e,4nmF λ-֚/Rx<OSX'wÉ,G\>83)y{We&j\&b6Ck^>Sĸzpo$yLwhCHy.I?eӷ"_8WUa{fe/qçH`JpTj#֫?\-$*1#m2sDQIZޥ\O%s.låUo20D78 ZDcFƩ֙d*'2NFƌBnʦ ?6{_iW#avt ʼn׬I4 &Cc8LTпf+sNN+=ztuE zڅ6ϥ"? 42#>w 3+lU,6woD\f'I@&W@68:#e}.24>`3Xe`]*&ŗ&)k.D#)Qwn_D12Cu\ʞx˅%nrŨjOqC)ҍT~-ΪrH0qIc ih?̔4蒉Pd>Xd:~=;|@G&\ATqflj LSVۻ-_FMXK`x_ {\P"Mu{a|X>-nn2ǸE5ḼGl^`؜o4)4v߁r[ = W_YrP?)\gW:H bX/1TiO5O>:ny"0aɍν^xx#jõi,6VAq}Q$pd L9\\%ڱNҰvx>Y =\5iy)]"w<ʃyM9l7yf5-+[k\.X vh;(,NyIݡI歒Up{JUD9 ԗ(4(ȧMVwJKeKaylQ.^(wB&\zG i(Ǒփ ׈oȹ{z1ALhjߒ s&>l>%O- @!O2KF}(%i#~2% Y+( 0nx;1\2Y\,EzI~3aA!0_SQx؍4Y 9 6-n)&!ŵXןs8OI 8`Sʿnyv./RdWRHhTL ,h;L%+'TD0~wn'ƚ~e\#ݫ1v1*š\7(@KS*bk Fsef tƭ0 G>A%~"Ɨ:&SPeɯ+Zax to=&"uD֒5$+VP;)yQ|څD_܋5--8WjFL3,,u4s f QB{-zJ+6£-̶z׎[4r8S<(EaS4{_rzX6GĴ9 XJg )p uPLjX'~ u śO?JJ?VqRΣ [Aӹ^|Z L5k RU7HeYŭXfx5SF6f~ Dh ETCh Lj #_zԵs=a[Cfp!$SFꆪ)#I)`We j`-]^Kh#a>̽ spTpػZ8/kugz SŇ{U3$&կ{kƙ 'XT™.&3‚m<&Z>Btg>e #K:8C !d*t:Bl7ۙdIݦ-HGCLU#fuWTb3ӡ'G5!Da^Jyk\tIZtVGN['*S6O(NVɚ; 7)].塄>vΎyH砺৫ly</ òlh1Q*9yU78{m+^)0]ͫtD4{Vua]#*cԌ@eAF-!:Ҫ戊%Yh2t(lQqV&y+[_^󨼵Ð.(hMw η#0Q94~{iёofgX-;E '$}\I6D|{2rAJO b?f2.5Biwͷ>a i+~ρ x4kxirɟytLu6 0fMNTG[Λz /6 +D *!/$kP,~E}H$C 70l_VhϨD$~!nB)ҵG)R`k%橨]ԌbՈa珻h0y,bPx~7v6 KDs %^RN4PDF֊*ڜ_3RpaAҥ|XmcAͯa ]\w>tTMB*95JV'@dsːxx$H#sđ`=mvq!FDI.Uj'N+tuuJ"8:i*,ha7-Eh}:d.x X#29 !n橗|+%g; k]:]14%xcAc q/x(^p|0de7ւnE2C{luE *ݏ;\fkj̹TC/`~7d$n!l(%n-nd%` aa?ofUܑFЦ&Z= RM:fyؤmHD f7Rk}~/P$Lk%eϷD1ۋDW*H/o)u׬د_TO 9"jNۆaΨ%ns[zR?[S<;l 7ss˝Tm4|dK}a 41Pa2=a;;ʲ$`y֣eߞkUeS%=y,(G]N"S]u׶j`M)'Եukheb- c")]ȭYsRАa,2So5g8O{R|mwu&%}8)}*6i:|'YI8.ˊ(5'bTpbr{Q¾gb9rshK[F6Fp]'Ri2t[ڥ%Qtm Ɗw,A`v bZ!dmKt[ڄtTFLU̼z0@gqޑEE[6r%x[5ͤ7vVuNCm`)=:9nȏ 9Ȫ:JZnB=d/5V47P/ާ QScCFn(y0BD{78(t:w|\F6$cfXG\ 4@ɡŌ}+dp^} <53@BMS""br+a3hq'x=Ci(nSsOAmzj: oW$)X`&*pWfC[f}`d_I_tSEiFIMZE"MV=rQMJ vrLf,w7a#8i5Ww%\%G'p.OJߧ"x%,㕪9@Ob㲖q%aJ}2N A9~ƋzT68JAWfpaͥOgT r@6jJnk舼ӐfIڤ/Z t`y 9m^Az*TKb@ju2G`į pvB m=;p](fugPFzF>2 MI 䓀d3w0xJ:6KQEOʗNQ?`g"| 7G k7ڎ֠wsNo$3f~]I,pk[DɅd7=Ve<i{5^|{3n+ GMhQ`U?Iaw#1CEOTIV +C>b('B+OM~8.iτd e= ~L.yvDvL]x8@La-ͬn2cE7.& ܎=s^3{VSa Py.d2cO5~' nU`*Y]x:~й.UL`f9;q]KX*B|Cu򺞚C5+F Yg܅}R+F?l -;ݡ3},샧kRƵlp-yΔ'uq=HU@,jee%3Fʲf{ `S~?eЀ0:4ͱL /Y\.Lc 6YŜP2>f:a׾;Å A'?h=2l- .D>1" !_G7Tvt"=6NjZ$3b ݇ü9%?} b; Ti]q%4`?VE==}AK.4 OfR)8O+6DKtP{|oX9XP<@#a/drK t:eFV5=C)lHϹy%4tմ$k^ک9:psϪĽ;ܴ?en6GU}e$;hM^ ڲjt7lR 14ipB⺺ $Iu?WM^ O'ԗDÄWɰf]gF`^Ya%0@t-^G wGd*E@\xO5ew5fPzy4Vv %K7zͫN*\*Kzt 1GKAp_p- ݵ&֜Y'&+d|mbx Hzf8'b~pWvo%완,0T:$CAUZAŏ0 XJ~iнv̧=$b>\ozN;IJ?Ejg4*+PóvF.6<<ų E~iUdPn,5(x`k %$hOv7d;=NioVP>OVNEB1T{קʉ`p?׭onϢ#E>A9db ]>W:( qT;'M, pyyúK K>n^:M.MKq`x1/e@D$*vrvk$O߈~xN*eBAnÍ[}cPa@9at5?%y #" Y_jZO.#,6 8ĭD8CȞK0RjAٯKH)Q.q@AvEH%V#YML r1h',n.43w_DD?vPJ];ЛJĩ9VV39VG8Oxm/ܔEG 5)&W1O%̋ h_}Z ~;22%U%P~e;zX*,Fٹ>uʲ4gpCסKxh z b0{ZbҨ~s5bXt!ܵ(t>\hc%>ȣ+!ʙm2nivZnnӛGMkf<䰹lF,S!xdJ񑯮?Y{$,?ea1wWA[׽lޚƅ:) X'BƵoֶ܈\O!lWL*S nv,ZKf=8̠͒EgtY}=!k/:HV6kXc섃b\[(0}$5w Fdntջ;d{IXscMNcqj'ۀA7s In1oN{ !]{הjt3w3]ȍZq`f!#6{IrUU%:A ,vq_ȹȬF(g L?okJ.dd䊉oO#,aW!=5VIU2HݷnGA+ΡD0xt}șƓ%T8hwt.ꤡDEap;'6CRF楞Ү-+ jmιMEp _>t{U93.Ol~Ӎ F$@<05GnR%}V>T PJ&/Ib9{DUFj!<+0]vI!\bnchXrp5vr>7(R9 ]iz(t ȏnī|K5hg5ITrm[TBx+~T*hpnwR&>xϻxszqX8ҲL“k䞒ߋ:FC"?|Y?",2Xan̥dPcFm=qh#Eye,/PZ-lWwMFMM\JmU >,9)d_;lJ4b0[k؛B \H]큄39A͘R6p\ ^=a6ܘPc+lf`a ` a?v^ˠT&zZ>|pxk~hw\]P]=XBĒV{?|mֹYm\(TNmp%t[ RG 5n DR&g.ŝn*/0?؉*TժrYQEIascgso:`UAZ3uRMW^@1=;g!}ӄ:5_*ѳʚe=Y?AܖpQQ%InXXV9P0 8^s6:Xm*6aheɲGW=WP߳.d˩Z=wŔ ̫iPy*ÀuAj5V(ZwL2z~`3wF!{ӂ'o1UZc4NJTkCu6ZFFݘえ0- 0'p#TOl?7@4Z(oWh< S[ȥ &6e(e[#@®&M}Z"Rne$5'1S:Ał-y/bng8L|S)Pաl9z>5v+M5̝cr]M3ADMZ5؟QNM><3A8U t귉.Ftf)xݙq8?Vw!y -9tn zs8*\;*nYn5Fr,"QSa}3Jz#MISQ$NqSܘ13²vGFٽЪ$31ȩrhU!*˺c| ]IVL\a%knRk`9¥p,skH#m2DụMrƏ#xX",$5(eϋdnҠ;ɅcؼO/ .[얐.)KWkOQ5+jOթ(-Gu/^S5Niul ~ps_@Ja[AAqp:gQ}}gob D8_gSen{'IfHfkhi3=ҹ=X3!k( &K(u~9# zQisC޿ H7OdLcq2t6dTnm\}&98= \@b䕞zVEVt\,gۻWJN>` hJ G7T οD/X({s N Mu3#VXO-ϵ2 ʷ]YD t]t~6Ӎ+@P-rqiɴSYmQTJm=q"gkITXYbt^b]Rp+RmY9%>@>^**¾qӼL Od# " bܳ0w%dgn6qP;_P)l}W7 !8hKYr ]Ȇγq9GC|b)%as$4ħz!;udF#pW.{ 'Ӷ'15XT*ݜ;ʩĿW dqo>kOrNXq`Bf< 2w v" +FƫٞM ~3Bms?S'HqTch Ԧ =FA]x8~m9dDm#'}1)Z՜ ȗ+Q֚5G>W-WGV1GWȬ-Zv{albaYd/cM7RmB.䀐R~avJ84zs/lIY\n6%B)#{Hih^Dr.įPϗR]"vOv]؛z1eDv?X}_d,f~4LTwVBmXUE7 {Z!d"wH=;A5mGrj5>5P]Kx07ANѢ|M85Ww0No0S:^^I~úDe$f~&*NtC[gR\nz]y_'242ޞo|yAt3.(KtN̦b$1LShaNnKO3.`KH)( G[hӟX@݅[Փ~L.Z$2URʵ*yq26xoA5#en 6vyqɹ3_@m-` w`#Eeg#6#צBE`㴢hr !XAij+۔u툭^WJD{G:sN $i18sbҟxJ=  8?`m{o$N6 Z;i}|Xd"x?@^ɐS)(v?YY+2N1&׭E7H,E;(r꩹)[)Sza :Jzrlq2,) iۘu%ޛ%<5Iw ;8%2omU y8Fvx廳W).ܢ%# ( LR. gM=]c?A5ӊ?۞dQt| [;ea ^;!=w1ٓݱ:*\>𼨘SFt)<7pt6!˻[%hTWLMlBi,PQ$1͵/ '~ 8xMy$Ges-¼1=.#OL9HJeOy߉7`YΞ>!r^In=aFWtȚq|ؠ( ) \򪲚লmLqK$bt,0E(9 CpJٌ Nəub.8S?{EPCт=)ZFXсJ5$c?Y8X)N9^">@y3_zX 똆0sÎl_M H3kٌsȲYةOkmg_^H̾qC{U<`= Raex}UVH߰j&;Aߟ|w.rGġFl^$:}nfNc䴶$mK( dҍI/h87J_D-^ȇ|5G3g( |=JjX)e2לrMA:kI&~ )#O1LKVprX쳜C%1y<]bVnX_8&%l < HU5#2Zk$Snm\{\;؀$`Ԇm}m]gxN0,딳fA [+oHm\iSbJ̸ka@ЉC>s's껁J>ЂQZl-xRJ`'ܒfN @Kk܄w{X`[xGi:@Խ8leI:AHv#ׅ_(f (fpD x`(DWcRU6OË%֧HgWc%"m=f0hGK5(ȭ\'/{W%mAV/%5N5A$.\Jd&ؒ-S2] ,~}}Wfҟӧ7r=𮑮?Y6S;#"P5-uo{xaV-5V"mKA=rዣ>ei`b,VI/dby3 'Sx;HR}мiH M0 og9Y~1nXnr,aNl}q'@/ 9=`5,I?\ LZw[/3HHWq(Mdٰ\r902 m 1 %68#W-!O?ʳZ"\cx:gA^y( NJG,c6,ãygξy]%le=IIA| wev (j ՟<4^@fG'Q J3;̽.3]CA8\ʃBhZ+存hA8mmznP7vM5/a&_d_0 d]w" CFE;ͨ|`.ꔐvnsM`p&h\B[ 3wap3,zL1J ќ;ӭGkNǰeԈz5{|\{brh& FsBg'6ϟŞI˔>,/"&<ڞC؏1T^Ev~+WtBp . be^ pp__T낊g+%y4g󨻗$@iTk5|Q{DE2# |8@ͫT'l,X9 $T[W7ΐak%2.Lnn2w@ANgj٬u0sdê&q}G)G&k &BNP-bgT.q7d _9ظ2aRs]$7"ahhtʂ>rx+ܿz']rOCe]X>ILu?.:k, jCAvg*.6TeC b#)5xenQ4x!_K"UEN=DrDηƩ1[ ~pU^t[^ԫawYfa4t[m~W/_1=uymP}- O2 .JQ6[0[< \}ôK  <jD:!+Ioj<g$ 'n.eBuH,|Ԫ)=MO j`7luGl^o5JkrMlޠ )cҮ 72I8Ӌ 2kFG†Nn{ڻ1?MS") uUn+}QQ.o=N( 2xxA!lbc&3LS Hn/k6JJ.S)w[j#YtԗVԝŗt؞%nPl%t卻- dO RVn\$&,3^oܔi0lHƙYmD'hsqHA]F[%c*p I '"b$%L`0X(ػ ^6J&]OmHG8[GF( "(izyʋnE$IRRv.dD`g0d %jti)Zw_g؁a? AT"f?I,1?l ?~ʉ"k< [09!&+͆wcc41Abрp>p8~`x0H+n8]gN=jvn5 ~<_g1"ofvAeta;E_bmY;Lda} "@tʈ&ЮD}hDR}"OM4ͷ q$z|"d YᅤctNbUWn`m?ǯb Wi ,`wHIdz0/dmEUSCVL5U{Ҩdbn"wlWEQ͔HXeghLҀq0nw8oMF9m,DN߃=N"'Sr'1hlݨ@"orm祃DhIK!;粐pX 2{νĤS $ɢZW] jTC߬H j>)Q֒Urƅb*ʉܾg~\#ۅp_<`˛ipj\Fe4zi| *uX7N;Ω*͇͠y:A%jdOEijh5kESYoUTZ~rI,ljs!A&5: 7~UN7I ||ck>$1ŜkSx72xR|;#z~S GR+5o+Gnz̞J""̽=LLstA{-V_Y~]?FD7Ji#@e!s>˧oԖىw}k5! =ۉrINYh#Ss&r}\:/п)?WA*Y:c+nt_˧*\+q@aek/A=lwyƤ.\q@b>8N0ԃ}sl)b? [v$2܆?yZ/5-޵Q7N^Wzyw[={H;[VhSE@紑'2{O]_Dt1mU`;qWV;%o>_x/ Hpw>@#cT7]gh|np]xׅwGOз^d9 9T $źk-5f:Z$_m Q&!?c%۞ lG6,4( -ѠLOk=i8&FK%CգsLsw|dv\R#fZr9OJEec\O\Zh.9m]ȯեmI|ՋzXkhd WvޠApކHnfG|Ex[^;pEQb1E|vAA:lezlek&.{l.|[Lǹ4\8XoNY,`Q%]o]3>YIc#nr"k另1s~ZrW=-}YG:vwhbP sA( FQC $sX1k*{* L B陙gVs-L'Mif)I+SŽSsĤ=% sڔQǀ03 UMus%?|ؿg{p8ALC~Jm<] P3~5<)mͶ%+)dh+0-_#ܷ^C`\t| bۑj{EDv>)`@֞"Z`TW! ьP< @L@}V ,gU0-"KuQ[+Re}.hA6 ()1G qc=bLMh qn8j",8&66_ pȘBpUh|0o=DHE3{n<Sr@>k0 ңM@LUse^) gZe>atZ񐻓n }E_Fi* _(ݼɖ&r8 @|nO.l>p!JA _^hƞ]B>(7&?XƁ85p^ab}Gtmy**fH" 9W"ܒ(Uo;SKY4*m4z@`,(MOA$?H#;ֲdJC(s`.{WtLG]Bcu' oփGP,V?!cfR Yඇ dtI..|Nl))h '9k+. .fS%،TBO?t#?%_psV څ7ӧS `/`(7 znmk i{/Zj LWLd~cN@CE3Kgzϙ: BOzb_pGB{odq }NO#nV䕡Xg)G:^t|E^A(p3‘;,e`f:+J2mJ4M~r{W `>a猠dǶV,bX}}EJ"×1i kDup9èlQiaUYM*D_c05JKc$TEOw+'; o$Ζ>/D@.+w/m)O_}3HAff;`Jz Mꛪ/{6{454CE-_06 k'F njcCZv?DԄ+| i~L0rj3L\]Ta3hTĬ1d<胁EKGi݁slz ׿{'j?)b*s)\i_YԸmm1("HxqRԆOڞ.|EBtT&PL#B'҅m0)Eߠ?.0tkss (\ 0|yi@:arp];t %83pV}RN>:M"/r?K6 y#Ca)JӀA:( /\א0څiY{x&37xvN/Uo 0jxFt,ʴǷf!}IՔUwW"<4SअG~HVuPkUIõVKX;z̖խkJDˍǥ4U0"|H3k rG+SЊ5Ihֈlb[_m7=:Yg =xU`: (p-w #JFtؓ*4&cPቿ1c*Drzad >t w p$n݁?.Z 3z4dkk&FTg>N>Kx<ւe_lMpb#͉ 'X!;|8L~-hQK5.8P- [&{N/[p8/7gUV,(irTeW~GNY"H4\VF! Wъʗ=$T4eIA;'qR ߲E~0v;;%/DA&ƍUV*?z|iKUyA4(ӞۄYPn \aQn(o!u&Kx̦ Q= tP#v2ÝR[=#0KD\!~ Wq&@ề*Z5!/qCE@eLT{$ ͮrp2cy={eϫ&O*3rFhL,ыBd2I|*?}Ïps/جmEAxK/d䦳:<@']*}w C:NZ8=<pCsSljpao//euR@;\Mu XӺX\ոD6Ֆ z4/*|b ri mMsw%D4GFwwjiw6mǟOһy{)2M7]<2Rֈ?0WZu %&cmkm,:ѷ Cl$X:a-$xd49uƞW]BlfC5P/pװ$P]7,jw(貴Ā!kXG.fŒMh;h$-伻fi޷b&%ګ<呼2BOw-hs׷aT[F:U\ 6֦}3OfdO {2ټG)W=1K-]e7 ـ$/5779z-L֏j,sɎ\ %m3QAP|Kdc#oMWvb;;Ȅot}f& 8 &n ]XГŸ }st+erJ~fnz{aEkoE˜c_:(ɞ?; Q}ẹ<';1*}ds؊,Q>,=ݒדeV*U#9bGz [#IuTSb a=vZH̄&mAR͙#9"K8󯆟c %v7k$"!ҖVG]B?ܔRhķu-][E7Hk"Kr}pG9[|b6|ŵRtqOSx}n[ f?G/Sh/BlHp8[8m|N rSO 2R =K3mn/2D˦, I}o K5`c?u\2vB," Gѫ) C G%՞Gv E48e%I6_yb5ۖNP2f0dFV$3WUS 6ʤV+rNw5]Y?>y(~ayD<Hf@u!S*K>;Za!@@=á1V8wGTyZ!T$Lctm۶m۶m۶m۶m=ML/?K{FQZ~ 22έ!{C9R5^ϖd a9Ϩ.jdę1dm69*DQ3&,t`w8+谕m(GCᔛ]?`ϲT2MMV:~o3yXՉyL6;j%GS)g !+1Y+o5yaY q˖p!RzzPn ZȨ&: n+\ =rܕ4C`:E'`"Pשׂ:rU:N_f}: a]27oּFL.1<0񝑵Soθ0{:o%>QT[7^"FB9ot9x -j=9̉M.>-&浗}}V!GhCz :bgjiX3VAށ "[:7uĆMuC˚d7.mJdAL֪>4gk9}|7H24IO$PJt {@6 p_kHE5I,Hc3%8dVc7`~ ekjcA6j OUbmi{-}Qeaጽ0"꦳Zxu7p/3{BZ Pfq /-*N#!-<ľKYAGҊݑ/["J+? ܁` e(~ݳ"_JLS M7!aj.?X;f˧ jrˆKI ҍ@uz"Zǃf[-1|uk덋Gk #!G5|7`Kh E~a^\D}>xFyBN?K\:O)'$P wը+d}ctd7k2+|/K8r[4jN<#5![BLS|_\ͭfZJc`V+@? `}Ͻ4Ymq&ݯq3k>#"LLhNAX(Ě;fO c$i!x,憐#֬F }kC Fo~LDS)OR "ͪC:ӪXQrŽxEognBl58a6~.1Hΰa5!@Mwf<w5P\ 鲉mƅVυj՛oy`gmlv@ 3?3q׶o@ΟK>O(wNǗ{M%URj1c ±}2]ҒMnAWY:c,#ġUiSt OI{]o~t玏\0 ]O*{wbB)8'ZrQxe,؞3ؽ#:EA!,Qm# A?*"2lRFfbe?H$ a],Q;M Ud D%#~Β=NsFlBr).z~svm2ܡο HiG z}vom-qFB!q_]8cHѹjɓ[ѳ'dOv&x!Wŗtڥ("ٶ=Ql4i .?Qk/+T2ِcRkE i-O]qYOB283Bkd*1^|Zlsb3~Ҧ_*衛ؓ[:r<(݈f1xH*O )dc?mNG̀B%T :oU,߫&|_;y(R [8SY FS94s( .fmؤLDž-ZOIfp?B["V@_)+hmD9o寮f-7Ii/.f 3@(@f@-9Lko\{v{WSS%1.ՍIԦy>s4@=MaXYiăb4|<~rmY۵ \e:?~JN^eiiipʶZ}CM5+ I;mH<:PNyit[#HxPb^;[;fUʊqa4kl&UV(.f3dJ)pPɇ ~)n T]y DPQEL^p.SET,5&o籆2 bl'$TwW }moܒOr!M?2_DG~R1k\H?,B4sE}rW8~G$! !;!o4_n 8ʍO-7Sj\AD𧞻Ow=oY[LD.ҏgAك]6ԻY=R1%- V.9Xم;j0Œėt[|'+amyz InfvyaCtL@WڊsNu;k(D s~I|jr"W9`};lm |mBQn:?Tum)k} -{ވ'CD{ddU{(4R¨,IRG(q^8o͒l5[kwM?F>+TwQeGAj`΃5igm !wfKC Ecj1[P )bDEz?:^s@rer5@zګ>[ygՁxgnPi٨m4Jɹf C*ޮ3!q\H8&1 i!oyT"a aƇYT0@֤Htw!楼Z '9,(*=C -=Ĩ88MX:P0HTz^¨p%>Y#׏P%`gP$/bt A؋wI3` p4ϐi8{o,9/lv"UhИ[:~SNK.?ÚaO0d<|#>ovk>n<^tv/yREX0[kUHwoV3 pix%JWcpɑ@Zks)"cg.ͩ;9T˽;78li`- INxh`S?v7fH?C}/2ێ+N ?Qayy9B=!n.nߜ8l`{9 %4)N]%B heBԧTN%OՈ2!4E#*> 4,]ׄƃ}qFi̷J.ճ'`Q=rN>GH ;Q'oTM)f_rG.ܲbCkx2 h1'Fh@1!`˿TW])̯-n۫;'Q%gQIpIs6~|4CCℳNQzxq*b2pM<+Ap'ȥkeo-NOp:(q;q~bcDҥ7xU K&tDW`yU^gJ5h(QO[\SVlM gOKSkpfDF,, gj-2[-Z :C33-zjc"V`?Fu_%skc~ n?+03/E =(su1,E;p# ŢheȈ}xb/SVx|*胟'+ϻp`dP?^BKLI1I!&/j>WC*I8H:͝x& !d4!jŵvཡQ&!5vGe@! l:ЋEQB o$d?D@@nAM . 1uV0ƙB)0'iKk7;E<-DLW"DՉgVҴ!6FZuy o+M0$M ^0iR! IҢYvG!s^Ng >/Ǧ@A2!1j^o1"ϗq[ǸقfY -3X^,,yHѷvX/qVnN^64ff&acr-mqh ױ&!0~Čh (!_7p/|zt*|ҭhRƿTYgʤpn߁j7vҀg 򍃦paO+M(D8B )AfcU],4"pʧԵ.w@EcVx`H~4vHlfx t=Tq׷ˁ:8ɦ@K"1#J[5{(7[[}[ .RnQ]Mȡ_ [\`5Oaw`vmANSc0YnjJ?,1p؜{SNQsNPny9~|_aAK9f9nY'|ϱl+9J#د?7ZzrP=lD&] pmK:tVJq4_ ba^}6A`KP);ݹ<ҕW[VB N 7#_M.S[i님z{c~T#P=ôD8+8 KVJsZuifNy*)hG 'j <_<"L^9F%"VV6$3q`f3 u·ҍ˾5>esdS336m݉naqYg b4| X;Jw9 0̽4@|uwde%'rv :Y1$UkJ/Qf" T_ S"\aIoºwmz!ɷ_Ʌx:motֲB9/ ,gđMlu# PӖ$yv^Vx!n,՚i>_%sgroZ!ZH)p=Be_ 9ܜR%Yvqк^Ӊp%=껼 [ۚ )Ul{>b;m΅YpnfY?Ζ79R7KX~^)ώD5EZ󤶉T}[Rld'.-ͺ5l՞tn[? uMTJy"APxOn _|=cET"K8 lQ(}<|v.6a®x&+|4j.;-iU]t탑z!Sst|b͐ J| l6ԡ%ڬgLHXuݏ|G7݀;3MoUg辎seQU̎S*I;1"oV[t"cK~r7uJBqGtSG $am{ Z̢E c=)Mwp +EjJ p 4\c~%b<>Lk7}UHC7Cerlh D8]^8|~ l}Se jO)A[[MD*2ӽU\٥ r rYy5*æt{ xqH]LHbO4[Hz[_͙Wd;kÂ~+ o R?,Rް`KEѵ m #ߎBڍ ym 4,^N\\dҜv/\=MMC{ &mF;)RS<:ed ,ًJ7VͽTy{s=3.c^wT[DF 2&5fRCE){Z|x3?6S(E}Yc׌ ϫ# ~7쭀Y9*3ӑQlul!}Oļ_ \8޵jD'Vē6BXO\9p09W9\㧔$0WyPT!%z՝?8t#5BӈQ5+J%5end@nwHNb4[=֑LG9K#IVم X[n4ޥ[ls ;L-wbJ)NȯnD(Hs#464ٽAnS^_: I,4D0cNpAd~mf94,!De=XH:)KD;Kⲗ>E)q s}Y9Mе8+|-ܦ ~}8%+]Gzb#9ObS9^{rmqYl.G4*Wu~3RYT9YtΫI:3}*!4Z˜t;[dtd;X97eFR >Q܎0?_"m005 ɷ1Eٽ!Ux|1)9 o:ykSXº0+k)d;yPG)3Ԅ$TIVK56~ִ# xPbW=Pݱ%|"/YC[}"1?4ͬ! ;jD܆+ՂqŠhq޹7rvXo*Qc9rOW|#ƳZ=Zlڗ6L'HbΈާ,Ly\D"$mHЊnh~ h_ݘڋtixy0(ռ՛I\SAyt4PlC[>#T\8e`>iCcEg(͌w RzlK6!P". gcruG?a LQsYMl.!4*aIJRZjLl.]1\sQy8w؁}DYa~hg <-1=:FWo(DY8z(y;XªA]< ^4))e%6؛WrA*gnJ^s? c K BNށ"YL!^MN$Wv/P~ Lv{l%(MpeV.|$E:#wESCGR84YЬEa:J^p2yMx[Y]16w @RC8ל #zƼ@uz_uO1H|kbu8Q+uLgdcz<-^07_!Gj~>LNaY" >'F,rR`[\Kx,ޱ 5oR<:9F8S2  ;/ Fdc^_)Z;(qXW1r꩛ RN/]TAebDyq̵n_XbA[ySQ# kYػN="ف@NA2HGEèsyJ;;kOJ&/?t!jx96EHPrZakyy KR8Ԉ1XYq6Q̦Ի,3}\>,/N80W7z3Z!χQsD8i*2. }\{w[Xs~8N>ye~g}K!g]3#QcʝcZ^ip_ -_tĊ>-f1R=NL\XkQOf u!ΊW,ℐWe5n)8+򋊣E`b ɵR蠫]нwdECۘok7eV-J kSHQ{q2" SGWaj<|DJmYuO>M`VwBPE>6R%Jl;d5c(9nlTxhmZٟ8 D' AE />|Kh>Q<$a$ AB\c(P/UU4 gVLfp"=G{G+͇^Zeqb?_! %lz:B5`Re«"f^NhV-~p%SRpC* {zvQHk J 3HϰT.uXuK-M^KyVbm1O=*F8ts6WZ<3a blwGa @ c'Ldx>pzdmUkz~S34R9;Ss`F{W?($3uд 'ޚsX>xDGS{QSpݠ T7WpÛ߬!͘b)w &te.O $ܧ%d6p%9Lnc-~0 \UI(khXtHə7\6cx4{LcfgH#CNWXAA}=5'od˘n ߪ%GB-`u Q ]$ռ vL;/\Jo\JNvsw{hC}eR|8 fQYRԤ t6jjPw6 ^e!ټ)2"9EcوxyvG#LiM*& .1f)[c~6 @Qv`+C@K8m#(G:XZ-ۏ~%4ş}x"Rh 7z6<~8/9Ƃg|p(R 8xmcYX&gS뻀 /3N/KvwwVg6tM;u#rLn9YE@ckh6D$dz:̨2_)E:n(I[B>6LbM'sz\v#NіL)K #Fܜ =vLRose!nL),ݤ-{9u ӱԡT}JF_ z~tIp*X[FCF@pC&5{lKIO\ W;8EDd'C9d&{)c%Y; _Oo.L1|K7[? /E|,ۑ2y]noV"$F@fAn}UxMizA8YYc"y}0ٽ'-!൶pSgt-T&J g,9Zh{zPl1Mq ZmD)Ɇ )R縶g)[1Ǭ r@r6|@]s3϶YE1}}taG^<,YAo(*5Aո)`nH^m)=G?c$ i1"?)KD t2snp#v*;kΔÛd&SG67%Ʌ!b7<]OSbѻ*tA'd ?x^R5*5[aD? "`|pm٦egоs#jZ?/0OBZ,{+\+GbɇhfR/ڜMm2D_7WtUM4(G)]/JrzS\[(?4AߋL; qQMd` }p2͟ k DuU,XXbP` GrGbA m}О^$ [gpK~򠰻$K=<*LZ1n.h!]NvYV& \h]NЄNҪ9]ĥ99 @0;ѴD !ف}{,[8u+aSĹ~1!% N a~Ǵ(bul_)!W s9 0Vb |w4U`Gl>>x^>/WF:vC "o~>b9blph Ҍ{wÔyd|J  Œ&8HH;pF_< )WRBZib pݤтG _ 7+t ?[B#V}ϐ$OF)5KwMruXR&˺~/ڋ/([.WlDX3O$|֥䝎j)ɨoNV?zL{$}")qbQvFKf^,jd+jao1/&KPámQy cV08SGi0_WO0z p'Y9wȸQ4]mbVgQipRFTZa7*GN Y:>[~3sn1KgB֯j0zbCG fDܔOV~``wۿt)R|>CL4}I! wƷF %~ <uזӹˬVsݩt(o6Hڧ̑5 kw9Z+/}lBh_uHQb4<1ǀ9om$D@ahE[f>[X2eBwdFǘ߱r3⋰~[oHv薼w_9*Qm7/Aۄjb(CȅT'1{Kаkd% ZN/=P#=C,+kgO@P/zt6G L.n6q5 }쏠'^Ytf 2O*Hi[m#u΋͝nc*Qo49S5V_56AeRxgNeO2Qvuc5*ET1q1"3]<RنE?T#_cn+>xKO<ޠ{]!ԋOxguMgQ}^ھbhCޖ53Q!]rk:bljd'uO䟎d93+xAV("d,9S[[Wڏ2->L0u9 [0S@L׸ǃbXDe42A6k>M:z4o$o527kSV>J&1"&[Z741ВnxKQ^ ?Sçx3S*{~샖͕5h*C8h?J"h7<`SldGC[[m *{[AKr|B1:ѓR_. -,<-"39Qv5)^@aR/MtrJFU^ g'6z&e2CHN1T!&NkJkoj&{Xȧ| XgP UZ ŐrkJ}^1-h1][`ε:l7T{;C^:2gu<д ]Շ5Lӥ9x${aj+F@' %C;/^}Ґl6s@y`%1%!F͟A7{P2; (WjlKeȖSӸ#zL 5˽Ё%+G& M)BiE*م3zCr)iAS<}r,@|Zlߕ"_mZ}㘵ȝ{+n{6paVl}F[N * MZu]/dd;'}ǭ-ZprVJA% ! tbo38:ng|PX /@揟z˒ F|a[lR4R=[{\ۀWLV_ԭQC.zP>T6^]w0`gT!s}OTW)Y:go:Kظ=2GJ)Uc2N놛Wko0@ec{2_6RUt"LpD>f/#z * эwe/'TxȮ(z⇸r9hΚVZx-ʩ̼K'à90";{OR@  kGϸv?.>b,0&gEڋ~#_tD7 ˶^O> XL4Pф&S[בXϐkAB(U|1Cdy8g>l@?3S/vQx_0-X>R 8:hYQe"O$OqEp;f:&{VE$0MRq[GnX0Wqd5^:%{j ˁ'嵰Om)&Q{4z!i~4𳠚WX50Huz{G,~,6/%la1_66߻:Y ]c/' ʟ?L2Τ>4OB;,Oe~s|hɦ,t6*Qim/RmТ]Y$Pm$p-BGFz ׾z?P`&n1uNyT[Tql*:""UMv^>=-a+"25!s=%ӂZVyEGI,ヒ%I fd'iQWMr͊}_loꔁ' Z˰yA"WS^F$=( Ӥbvp3X?=X2`Ya^%jURHZ0'.e&+ 2|ii qɘSq7i'-6(84ŕ ;߾ֹ M3eK|Lh087!k4#pFB69ucC 6l zRI#KdȄT  5peQɀbǸ@u-F,fZy%WP#Ѝ ;}\Krt.` U䬨XMmi^lCaQtsǛC@I!F쳟sts9kP@@+0??),0{Qri:ss?!H6V[W|"(;nrPǽ.83ֿeǙHU >C-:ퟂ;`J~HC0),M=~0ڐ"]TQHמA춤(9lE" 5eMD8߾ѩ[= GClIn? +dBsd^}_ TtHp~C}%H;)5o%MmH ԪI㝯$E`q}k?H]09? ]"v559ʀV ck]]Yf2v73~kwtKXr>JO?u(1h/l6v?6ا˘_5|z"kjˣ~* ϭK=?9#x Vb-҃f,Ua-{MDbX=;T|uM| ʁ#),gg"1%i.J}:z@NgIP͒W=ֶ߫p?TmٚJ:RzRd.5ʹIk@^% E3zm 5`8>Syp\8YK"&UE{?9LuGgQ'v::B@;ߤE+:&/-Haq/UjSh/: ^l@q e"ĭ@ peV2uSF21+UWDJͨ|&O?:i0$lvQ`>yvB,9cYo5=ܱd5B{4[I_noPwRוTAo)DӖu|ۦؽ@\衶?yTPe֮as ;YoRC6 gO'%]FdwbNئ^M\4$lM̎d\npa)E.UxҖ1aDCߤL"чVti4Cb./RH=]61MMe<> HBM".V5:(c^cB^ Վ &o7z{/RS6 HSLe65O~6Tvȣ]˕+4_ȱy>ȫ=t9Qڷ7Er)(ȁr'c$\dKJ dC[6zK&0~Ha'28hbļ|-vA&s1IJZ _]pbeiDK"ϦܣAǣ=Zx-.Qb͊]% ?r6H8jk)#K%l:4hzHLhS޺Bz vg b2vSrZhAҜS $, ?CYPe/w+6Є.e2KfFx,Ԑԛ9_Tw5yY q iIEx<:KTКNyS׾R^0TY3 BpV"ѵ[Y/ɇ y\ߦ锦ۜIQ~h酻̈́H;Et(] 1 R l?: 4?BwBItJQ 0dWpr"4brJLu ml_(z^U!Є+!8>pJBUu O.#kqxE@޽ 7ΐnqsGfNGm &}u rP#pCWq|Šm*@7eOya1|!EOݶ];v#Gc8IMlHM<:HY }MZhS",ԟ_h(z]rNT͔xM֜}Wy ~%9C^9Sc *h#Y&մۭYNiY(*k̊$Ps%Ϲ?a΅Wqh êhN SM\2=ur~QsDXx-ihi'x3uY(re lFg]:X6,6Ap+{eW&A߆г"k k@jQydl̬3|xf;UEz>0}%!@ 9Фʴv TsП:f W4d;?#._rGۏu'!9gJ:>`5L#oB*M0,?8[?M }[ n6+ÌLSu.d~(օd4Wą jތT=$ Mk)[)5ɠ2d`7/q%pd^H[ ۠!K)6  6G\O+l:5+qHZXH! h%ڼiXԳ܀:_@Ϻ3ߵ{|$ƞcZYO F8XY齄G5szaЯR`]u ۇcj=*FR"%~jJ]hюMb9Pc8LގA.!tׄ6 MB+A hԯebYrwiTr"c^ziZKE@PNlʠ m>x?ª&2nߙW"ikTsȗy oQ&KLjxtȊ< 3ָD$tަw%B]|>3C^ɳ#x(\}Twi ԄN~F!%+ hTtF{51MVl X+ "6 '`12Htm>%Ƚ,Eua$+8ߐC߾G#T+FU"QVi9p}@سEXF g7H0Ax:11CJ=WN)ڽ/K~Q'v,p!)cʊİR(OSi,Po5*A๨Qt*uL -Cz+<9s\権ֱߕjN[ҋ>,ix6A1iUdhBݿpjoʹ8<@ HZ6Ց M-.ů,V~Z~H,j硰,P۠ BNM!(r%W{UIx!(/'- мK_`G%b!چq}8[{wY7gXL$ c6o@Wu9}!Jh2`✾&t4<IOlkH޿G'OFlb ESoA/^OA MDɵF]OQt!q=㭚a/Dak obd촻BwI$1₯}~N z:1*Z L_lX :jX|1v%&'>fWݨvw8Ú%uJńTfn|4kL712 t|2o@!>~5.Lc3,-7r3s J汚 tK]rT@#xnt^u 2g]>Ni^4?? }Fh |++y=[˸6T:jgdsZE =<((AsS[H&[>C1DfR\#u8 :TIݢN*"Vʤ?Цd`;-%s|%60ȧe$<5.i|k6jSmL>D-s{}ճ}^fiba}tKǭs= wP}\ +1"&Ӟ?M9S\?-pMxIUo^wo>=m!r"beq25RxPBod4NRx)Vrƙw^ I_߿eJZؾ8dClC9m_2?j@a5/6]9[lj Y /")拾!1ψԯJj| _@Rs2fm;֢vcw:|' )gh5wmzsBC>췘A'p3A) fYɚ'I~SF=+<Ƶ2*~)T_HCef Wh)(/Q"E:WRӴ҆dlV70|wPd4JHmKnN>vC/ &{7I s-Xjڎ<7+w[!eہa.= TVRPIhXTMiEFu 0,[9dwT0dwYӖu'vjnGzt<{q+.Ht'?JG-eDפ_lG N5nx8pGХ֗SDBE+g\Tv-9]@A*csvr/9SMwlGb(ÌG#w15,"x'p&$nkB,L^gR?. ?7ƊPy磅#nvA9d qfUk[ 't M 86!ƙr݁니d*/ 4..EgŢ0*NK6lhN]ɘ&0`eWYd$M]D <M36JUG\c =q|,fT{=&eS-)DAWjAn=GX|MVEL'T,. ^)ք~P "eAni߃t>> կ*g}aӌ& Sz}YvST*rnA.?8`% fZHQ=ϧP>>M1||P8И1*ic;|%UY7\U@d_`n.#@j2|_)fL)GHD,ʉq>Ranpc61vlONy؅?Z*$)Ӡe8Cyㄱh I[#(3}R7H^aW&=Iz,ZpJKµ\A%vO~Һ)37juqF: B^ _EEQ՜R2fB! 1&ڎ1|Ey=v(ʠbs@W}àc^xs|`_Q2 )ZnmU/'hҗ1!oHڵU~"f$_76E ]J5Nkꧩ?oǐ+I^o$rA7/+%O #qv@~|@WsI8"oNoTb9e27q_e-A&P"(k`}׋=<y!:m?jAIK97I yt^ )[ӘSDOM_I`G #)w(/MєVoTP8zQlзYS ̑lye <(-aP|ƛ(&Ovi]_ Vb"^[^+jK Om>/=;i^hó'}/[JkyGۅ 2[Ըg@秐Eɵ4.]^=䦭\ U?e!T M3pvCrI8~2/#<SD_wp8z"$`i#i@ y-Teވ \ PTfaD \RI惎tHꚄ]ftSo\$PL6nDYx+YuNwPTm$vLFd|s脶2[6f(F_-;R(_07ݷyjO)m/*>Eۈ?A(`WF@ׂvE2AV_Z^7m[4jY Bʜh Z@99 fJ]1/i-nRcD$Ƌƶ>TC'#3(LHP31WK9=j tʥRø/l l̄_\V3{ˆٶ&n״Ak}ԇ#p2\n уLC?Z.\kb@Eph Ś]#s}+cL-P#I܎W|k,<lexp._nΥC9kH\:}}Կ`0"U[DiMGA6 v Q@ $ʯu jr SY yA?])wZ6v1pyyBة7^?c'DߢxؙQ.2EYã{_Йfr]~0< dE2"=߱N%P! QUW*j>|na HBS:-r0㲃zA`1y|eNֿ9@KZb <"V JP5m"#;[3|nq*n1 |ݙ[9CSP5N"n6c!ka)ʓby6؟Y/ޞ:/\O|0 k82_׼-A#TblĿ9y@7EY5ꙣ2A:@RdL~Y4깆R ?$WE}9/3=pj 6 7 W}"UK9S~ K/-S 4fR~c\Dk9c,V"/o bv'&W̓gO.5,C7_x{eYd>w:ij8}T;Ql/x"zEk?{ <V@(5{ȗwrXKDz}iGb >4X:>[:-$€wBLl*M VxB(+]^/Kx Y~ziϯts=wQ#?*yq},':q¶$$">'~yKTLtkmc#=:H 7Z8T,|(3_o15! eYK-I+K2iXK9C0lf6 ֚5uKTJa*cV!ѵBliFr-BBhaui禎yeV$(=%םlEHAߌ J~WQ aG ;{`Wbkj YGQC;ⷀLyF^1"Nap9A*HӒHt# tual.h VUH܆f)H W0!42w)U-ș@V?Dkflexk{b8GB2%VyxLunչP4o"@8ag. =XG19S!|̞[HAbRe'hsřb(ttf^Uv)% dX t@'ɤDb+ɥ籮 ;8;/#-z P ?H1x\Oq)7kph r';i@ Mv=yV6`cA Ek@Mܚe^GΖD]cOlE n/N3 AUy| sȹB_>M923fSn+Bu/]$U" iڡ;Y~嵳qF+Q.O/=r2-v]B cP]aCf VՕ+h ›#Sp|iC .鶺!U9duHDkv)1pؓx!LK7ig~Q_㻛=/oTE,6utPSaT)P73SE.Mz?dv1 mi)PʷĴD͆&Uw2(I6[-GJEB0;[Cn.ϛx]}$(b|۹VZ*Bl.I!6d.eA[_D޼m)wpLN}uL=4%+Ö8=|՛zT3Sw<|^,h6RXޢdaE}|:T)gz D9[sIR*$xn+E\]ӜVxUߋ[ .0U5JXRv愋U]vFuǪo ;+;e(w,WPKzy{FS90&[܁M} *n [g QZgH69o >h@LMӐJyd?ޕP?w C37b zY͂M/7k/oqkj.)hS'0Y*_-W+$p2<yiNm>s0bcuȆ|L Αv蚱 S*e /;ů 9(ArZ& =Bx0@U,-` d 3տ'(0yG'wy5yM݂1BZ:R$^Z~4:Qd`ಓ,krUAC-ltφ=GR7W4FHWJ*1J-BŅS݈RG3xs$\VBi%ϷlM͕:JHﻤY,== nDV4`>Zõo>rŜho<[{V2taCPy;f9 zńm^3ִ# <dO _M3Ǒe;ϡW(R(L'ɓjf 8 wl,#)>K6n1j+R{ l5g#sSnyq]8-}+O'+ S|rIX5jaHrՀy/kזYɘCy@yʐ9%Ϙ #Lnj%2ɜ!C)"CD{Zsk߳{}׽0MMrͯPc7mԧ:++p8f8Y0{)TT/4ⶃtvKǾ,Z2O[~ߚG7AhhFq CO!3h]QU×7b?SYV-2vpFeS~F0bsڀIc5J:qLkFKB\VRbE[[h]DuS "CsJx=Ӆy^ScgzY`JQ;hi7Ba,}.Ӑˉggϖ AI[zW˄zÖj \cAUg5:~UKA1Մ́32eK2L/.3ZO2$w|g7W[+g6-\ *_pP-(ʆd)ᚭznr]ωSnH ;34̽jeႾ.r}Ԟz{3Sub}L)~#m{!&]dTy#\5|:M;vr:/Z0t$,j&~ΟuT9዗a*#.VQ/z>cTȽXxUL}DFe2e,a먅KRYn*5aıot[.}6|y3fhhCbYyE׫݂7hXK3sJE܊ejῊS~GS Rx =D'1dd&bi+l…cxĜbV_Mw+SѶ'^S%gfźo^hidH3m*,Py33N K폕ܓNyb$>1[ZPժ-Lzw_1W\:J)zOu9؄ >0%XU[HtGC{}oy|?>R\.`~[;kT,ק} UDvI42¯31&-[, 3"ߒLYip&ZyASq6Ʊ]% IuоEb>8vy;:IZFHjvXG+Ԗmx0}Syvm$ym(f K}u.rU^4%vL}R:Ku%Y  ܦ!U < 8a|,J=:ka-.}Ǫ=i+yb`EJN9-؛{Ӯ' ]P {F}Pemy8ldzvce 熂BUU ' Vjft9j>;=JEVZ j64]k\ya^;y/[ÅFk"*wt,?~R>u stK뵉M8NV,~/!ZXMѯ +  xoZ햝ңOp2^>1.= &W0I~JZ"\K 3z7rKYUVyY~ݕpuJ~j}wq=4HMR.b+uZ+SWDߣj]~mo_ѤwېǾhM4tD7)Pnm66u'Br0|g+۟zϡ>jo(z)"Ptڤ9R>GOL䤾"R7I&:v\*sm{-6RnŽΛ+fD%˳1Y<0VRKdmRq{w#=YHLbwܠy=WZNt8"ݞ^QJ-D#|sF:UB_N+Vn?o~+622{]D@_x:[VX9%d3"CsbMJl4EƙYYY&59kԪ lq2+O;6irKcxҧ&fy#d҇^)hw{WSi.m~s%X4= >bz#yf\/:W뻹'l$䯃iӪ,¯h9l*&pV Ql8viޞ^U]2_M/]j19~GH1*)|grXDz=;d.x 7)%(|Ej]vot}X ׂzAN0a.rjzG[yq<7/g^[ĥc"&٭ڟ?zaS&iO[Zu)!JI_zvqoE^\+R2ep~wƴV|3y:[F^?-Q}}Or!o`^*#%MTMt99 .ox>R&nb]y_b(y8eiQz1l@sg+SWx%4ְ?~J٫w[*w^+PC=h o;m7!믳ڊtaeTwB1>NCvhsPYy<_ S~ol"ëpն-KН_iPuyýcf;Cu{Ϋ Gag6$O0\Q0ީQ`J#Դ̘ミgD!E/AR㨭تCr^r֯E\䲿pZY7MDyMk7̏{Xr_R;yӭ4-:WW"lм> V*'Wyu*'Z^B;BJ]U *8XEhucCÅoַ dɝ6,yj'_m;] 1 _pz_d{ۓ0RD"%;0 飻V9kr~JېWb>>axΧM1#4oeQC-(deH!ȷyIިgQ$9L R.}[&bcy'Zj8#σO"EWKw;r\{S>tTܽ\z8!?Ưҡ6t`}^|u(|@3YJXhN]ۓx8{s^1UjܵJ%0>A^u/1q~dSzơ'gJ7;,dEnF3hbF:3,vqq>W #xn=5_X[p,1TuV"1qή]J//?/ԜMcaojp3Bs[Iڲ#o[L> k,e`:'xxfMVUڗtv*XzRܫZNIsuȀMhha^e_ŎwӪ5GxY|QB|}ukįw䷉CjZ&zSjCnޘrF4U*ɸ&ط0y!)'"ϙ [^Rt穀LD== \Me&`+yKdqMweƩg㲅*yψ4Z^>cj|GF1b;Z䳄)e1[jC)j&?v'Vh/Q פ'qf{ipHf{ƾd}|ڽ8.7NvN`ؠviΚCw4'܉xgg>'Z]>e ^lr% \/+LgsPtzVU_ߦGqiars]o2DKl=5z_.|ȍexd\cr5Sn^̯O9?4Rؽ3V<.9$j3&WX\,ѢUd#E-r"iJ`Eͽzq>ㅯ e?2:}T tárƢw׸G =!wDIĞ:׶QcA:v*붪WM։b8Wu1N>޻4ϵۜLusTӠ#y~2MiPG_MH4_'GXuL8дqm*TPE DȩŎqd u! h)H>u:+5Fg|mGYXqϣƒ$v S}Sm Mst+b6;XicZ8|eEw5. MR}1o7"WB-G5j~dΑ;8x9Ѐ=AYPj](ᾣٷY(pGwͼWJauC&ܲ5qFt\4Ě_;x8NLsY:t/^|/ć\,Ңvw*)WADJc]B{JƵUl_ GL78GU"XdUTtacMoʙtd=h#= T38W9Ȕ*ߍ^}a/ylSS!ܑYnw;T/Kq*[htB:{l)CJ `fRsD0xVx WO(MRք3&74/'ȩزT7Vs˸sYnViun8c;3K#vqEFw&jJmӐmPC/pmox:6"Bim Բ X;\6z}^֢VW+"{a6Yib)7ߑ}<3F&~;+eI*wŤps1,= O6b$uHQͤkhv6wM I&Իz냖vim4 ЏIx\1r>+B]AtS㩤6mZǰH*)GT5s?ٱnK+ 4%M0.;63^ }:>[x콙;}n"fe2ml_~VE͵gܔ21QYqa)}?d{p;iA<?O.n AxO w1O%dq0m(WF/V;rc;ͯ~ "b&>p˯|%2OGz\֨iD\d?38QS8c;kWTIdxT}ҫ qa~Fr7<ܷiO;߻^72<2v'tV?Nl4Ys^UkW0 Ms96HH`l!SLӌRWpY&B+xwF6e粵ra"e)t+ÛjhC'MJ;%(wZd}Ŕ\OMV ~"^Ir %,(a[GrD~zƖasyvfQjc#bº.?;m0 aOՓkvg&>vZO: 쭼a^IHA2gfy"1Os%tSR鑄Os\wHW[n[ ?>>waq d}d( X 7a?H׀@ꮨqۉGZSEZ2F6rZ/[k?EXM\DWsRZ= g K *YBmaL6f j_57@ysQArD{lԪ2ܡ=tްW'YBmw#ֺp/v,4 s+M<ߠ#guep{"ilӒ_R\:SaG/GtІc6|0CoqMiQؙu+%$gڧeG iz-֑_PwTl)$R7S :}tkE*PD6cޗ b]rWO0^Z|9BwmKkKſ_}Tׄm45a\[ Nf_pW_hZǬf= *CE,IVHk:9ͩӆ8ffŭ7t KtRn-FvH pDܩ)WMrY|ܖ菜VkuAѩ=ք̓UVQsSڶwlDm =X 8wa@ rhԍGez5h zPiɓ|y? 1z)Lo6ux˶e4yxAy#;t&IjbKσ v*'^=UuU;Xz!뭮mz^iZbM{wԚymw˸wHÑ6 n䰪flJ+w(ŵ/FդS^f/L|Hg7>\;KٱN{8ί./ 4je/R\7GytAMGQw"I:aAN MvoEg|UڝvVlkVP??`f۳̨ڧU Y4r#9ILg=% z )+KD/9(Iy? ?(l's\#P@P@P@Pß?=C?8`pkS@P@P@P@_ڿw?#{vR 8ܯ9;鸓Dl`Dký? Fu7g"<>,G|j4Vӏ?ؿoϣ ׯ]v:Orqn Ł=:EwHr5vvsQW5' !rH9? ( ( ( cr. ( ( (g!\?-ßO=pO=iy|Z?V3 ( ( ( (ws>5ա9_\ήDEQDB!@"82 E#apOġ G#Q N0 #b1x EOB0ÈD`Ax< '` p<$!H$ ! HFƃ!D$C!`X"d Kà`UA<IX1$C x, A 82O!aH 0" "Q8 XG(8@$"A’h,!HC`< # $: p< O",H¡XE(#(JDPp `0%C(XcaX MFG`8IP(pB!p$ Ac!p$ 'G0C0$@B(̆G!h,hB bh4CI$ %aEXGc(D`6 zÀ1,VD 8G@p( aX8FOPh/dN}08 ~@ H@h:@4 ,WIh \EC`P2F !`BP8!D,<"`84G" d<B>0@aS<apDFDB$HX8 DaX&@ x`Ca4 H@ 8 8<kBcPdE&$Ad(G f '@AR!"I@A^02 ֍HP@qh"ACHd2GAx`C!@j KESAbQ #4 /PAa02`tBh4dp%H˄a 9 {)N8; @Acax( )$Éx" b` a;*P$4(p CcP8HF @C$@} !B0D$wN냴4G ((" H_H$"P qd" @2P H$`` g`(FC8A GF! @`0'0("LĠHP P*D XDA 70OH`b4py&,G$"8 "d>jt&2\@1 e'B$@@AX6 cɀL(< =VH<"Ȅ@ 2XȴV\=t x,-|FH(@$.AbHP$ DaD(Od<xـăGch( @H`uÃouyPL G`&bqP(HR@AHk, Àd sd AaA\ Ȁf!H FB@‡5R#᠔,t#GBx(Ha@ACÁ(, " ! ,-C$ÓA> Ga(&c!d,$4|0@ H@XP0^D8 #B`[3 ؓ Tz 2F‘A@D@5v zm<#@ >Fa!ZPX, XG P}0@}@`Y@ꁋ &|ZN'aDً  Ā- ܊@_`#DIu Gx$( xT@zC8+ G@KDx2 x( A.xB,h62 k"/C!($R qЁ@B!d2ptG024ǣѠVaԁ# װF>ld Yň97mNbAE H p ($K母H@8`/HF#{[a-ۦŖe[m e ]]xh@wQ#9yy2< H&8"8 '8l֤aM8h eխ{ouo|oX$#x\aCSm  T8PpQl$0FFx1&"3LG5 Ce+A +G/D\j5tĪ(`PРq=x[Ru#;18C)D.ȀJw OXrO@q&dVF5-Me7a9Eν` bMA `|DA@+ sN3&#N03y,8/-E";Xsa ¨-s;cҺ :$E_ZbaUuT822AcF1 ~%H `Z!8>׾ؙ]aejg"Z>b!ahE-D@' Hw\ULS/clvzz M׌`D5zD3#7Pd"nl8;+O=H±G0?k5<.@laХ\Jyn;ICŅu#t rGHW&ͤh $Y-7*ha0XݠiѾA"]e!oY\x)yefa,r3:P : O9z@MJD QU,cPmoŝ04\Re@]$AFO,|&X<P ~ɴeD2;`*Df1:|*C $?C*b2?qZ󰡀-- gG]R j3rB6Ǥ}@Sh6 `l03 Qq,1õE0􈗖\;Dȅ}1 PB$m ~Y+ԱQC!8'K7p!J;%ae֭ _,`%T"2*nȈ<3PVQ8aBi!-v&& Y>)ZapҼ4?lcmYcZȽ"&`pXE0ȶE`5`l88 -dP%E}Ŗ;Prsb)f*!6Q"<'d98"۪5.6-F2MTpg/W&;%Zz\ <w]l52K.2hE 98VVƞkjd G4sw!^]g\@De05WOpU %BE>&69;`*!|͎| je/^'=SNh^

)%(z0u`6!(qW÷!VF D5c a{c2Y-Q8hg,̴W]2!_CꐭRBA$B+@$:A4Q&:YH#t8n/U$ɝ$(I>#- 2S` D t'L8.=i-Z=.:DKH=kgByAE--C{V% c 9P3 "B"tzY;IA^f\$̿mD ![ضBX+yhl# 0X_\lc~e8&tȱ#Jd3N䙸׉p!H@Z 3 e}3UCI(h$ԺBVqb,F+6Z#x2Ęg؀@4nʁV^lڡo\It9|ڣeP^G,ZSӜ@@nnZJee;0plI։sg:ab]kz0;wPhm ֠NL n, w*sLF9 y ]#x/1uࡊ7VYc<#ˀkSa 쨩-ڦ;7ŴP >̅'vlO&1" )Z\}P{%PBpp-fvG bFrZy.Ȁh$XcCS%;-aIP3Hpfg7Z*jm.c'}`\2!`LhU~z,Qqؽr  foY7j7e2ln(em`x=_ aĬ@dAG* vi$æ5+\C)bgȶj`f)ZDi1PQ `jJp~+@Fc!Zh15N%sU+q,KZ JЉ1Dlr,K:eeROŷ1PsE"CV>$*LD?+nN r C.2t6&ui/Yjkf[ QKy^_уH=["n@^'WؒJYpHM") A!uA- z]xT:%#HYKS#9{.2 ɠd,/2rihb!0&yfzr3AhOЈ%-o@j$hr$`;M{3Mv1U;b #i>j4dK>]\V;v&hdƝ&qum:2蠵#4!敶 ?$^ `"ꀈ QF#c ddvj]2_f{/jW_]Jj>{5bsP.0#ץ9g. u!ӣ}&O,npe 8QVng<;/VwNY܃nvf \Xg+!Si 1pUte_h{Ifx(•j]kCcXKb΃yU31jn_3<Wm@ ƵYU*Z `[6II?[s}j]pRSe:KZQs#PnK%0se)2+MK]Z%r:lx-c;4<$!'J່}l{Κei&Sa31Z{,Q=%R}A#\j6*%Dّ gi;ZV홴ғg-FB'O8V223HCEDZH=Nj#7&?smuA$Kò2 P R ;k nq5$@eI*fR`XNK%Zj8(Vom$4:. 4VJXb%$ ȐS' brͯ)T_Aftՙz34{E 22-/`#,#*2i/gp8L5,eiAFy"y&0a(vcgBȄoLoZnDiFj&q, ?:}0887"LSs%%4\nQ}_dY|+H-U ãpDUd(x'cFl6«vI-< 7wFXܢe-g|i4>Fu%od,~sdc".27JV,fX'2I5`Dt%Ϭbyl(9w)%` YZbPnnaSuc_mJ((OSZJѾȴkگVZPR%3p ]Ԟ lыqh7Li,353Br!V j`oZ|nFi,͚Tn}Hfm08xa"&( U >گ͛HILqUHko`6N$%|![A6l(T񶚥T^I n3^fdwkD6jQMlTFSCԫX0r MiAj\y8ub]S&+Њk&X^xlGU~\,"N1JHPjҜpgGj nBF[66Ybh™>C+,;a&tԤXN3uiu\Գă3EUPL;D G傼'kR[#HM2hLux(3᮸؂X=tDW@|pk`JCh+!}P^K(e 2 pԝt`Y ͉Z-ˀZ}D L)iUѧJAu:,y*ʘ# 9H){M`  2 ;/Ar:BtJ;[HNXAEI* hK:6H"`ӄ8K y^ZV =p'ƒN8muBg/E_$-'R cph+֨/ pArZ`6S1 (6NAFw|0(O9YVl .3J7@[%&$aCb7D#@TO--Ja6'zfeDqP4 YVAL?nmBS F7#t:&XsQ: :AnV$P!B`~4gUGEia~0ڰXujx5qHԂ Ǖ0E9#9J5uUkuh+Q.٣60Cvcy` o-D-u Vf&)}Gr2b>BY9trWl|vg.ڂF= :a++A6V&n1p\rzmqy9NHIZnb^I`,9&/|J>{d-lL4ԯ弆~YM?PP*II" #J\$4>iDHюRB[( JҲZYr<FN #l8D/qRe&5dq#pRFPqEmd (LJ)A1Jkb lHq8nޤ9@ `Bcn_3`xRֆ)trJ;|'-[:,\ZhcU!YnmdhcQKHzC@@I} Y @5Px"&G.[k(b4kC#Z\f hq4*|&r@LGF'I ҈HNkZ^t ]C#\"N,X:&u|LP>/~Baa%SZl73w9)fliQH8|B(ˬd\ 9%۵@<&Ye¿I:;v\clZ֚ue¾0by "DAc 6 +G!iQ24FVUn3q,a^إH=N)7>$ʤ73._uCJZS_a;S*:g"jw[[}Cy'v(~S`̱0]V cP`:ΠXdSPT8$Szw@ :2yC x(lLz%w˰F,c3^vgRegt`y lʂ ICeǏ:ִ@ZVzxJYөTı%[u`oHiէ r"I`BβUay!0%޵,Ca"(Gސ#̄UP b$@ ^cF%oA8݄  ]s: )("jJb*B&1 ںЅ=q*4ĮQl(m *9h'leXl/MB/\ʔMnl~OG -!8$9ih-SBצd0+rHy~SĐM s[2uReЁR84 `)|pL ht}ƷϢDM`h(&O,"Iف dEʔ;MmśPA=ueY~jekCVct9*;,@"(A h)@wL~~X'"J9Jl@f|2ڒ{Of.;Ĥ&3#|uA>|d@+Jtզ@-@6\ :fTh&Xte02(+AՔOfE#aitHUc U@;QӲ29;sВ6qyZ`FB^ _)0 * KJٞ75tǛSQD?"$lU8C=Κ*o{"_i|?f"X2,=DCs@c_6)L{/ (XfN̨RFE-q¿F*{,?FLGb, )֤=Zg ~wmwyݗ/a߯/}3ŭþs]_}r;my7P]?Om}/W{>8+b?`2BwoB[G7n|vೃnl೽_ p()G<ΫT0-:`# :\M{Mc cXvwh@gۯlrIW||r6~O•b_| /=Woo={9=|B9\Sy|Xӝzfo57?~[_Iә*A VVoY=xi ad:t>2AxM:봺ji5iXUk8Oҝ)p%f*y!"fY E2SV-ɉ5e4TJu Oue_܌ywnlsÃ?v]0Go~3+V7GP{ycicMvنI{>7w7#K0~z޽p\}ۏn<|otW_~22{//>r.|q慇ϼx%]8;?_^޽|{Jǀ:yXAf.y֪?_65E{yZ+?$T{/7n=*{~=mŻ[wo_g΋?TP]rЁ3m U,fB=f¡tQ!)kQl1 ކTypM:Qs* :5312ΈPX)T?|{ﲽ\o}ޣfՋ;/x$r$7sW;??k볗͹_ qEl Nc]ޣtTJ@ 6bYCB^?yz٩j>zh.]v[.\zj{wSfaA]ݼ.FD{h \ -~'n¹ݫy;o=|woM0Z<.;Uy_xT />on9~&-1LF o?{gUf.6\"_ iJu*ֳ}aZ-::߂M]CNy CN{UwnB;JUNZ2PN۬2ϳr>mTE̹IEi«탃~:@?cu?IV#EeTT|֯k_lP6W?@/4jN]8;U.Đ 5cΛWsk_fP=񥅯,6$ϝ}`we9`WepyW[ܵg=ﮆW^~xkSQ<ҞNw*)󭳻Ҳs3W+ݺsXή?yM[W7u3[;O?:ho\緾Vxkv?(7T 9:i]r=OtGj&Ƣ*weqL.{/^^ꍭGg^ x!w1oVsJ^֍?Տ­Q{ӿUock]IUf*n7NJӮRgk1OPo ;Td4YL{?olм7UUƗXCq1eJnaBszP.f\њM 9rt#eQ:kC?q}/~yG>K-~~>~ʇOw;O{{cٻkG/m)??.Yx{k?oy??n_w&ȊŚ鄀:&aSt~YsY&ճӒ'%/j0JV)1Z޵VUL=Oժ̹_泊&U"7gˎ"j^JW `qIy*y4* ~ wU{q-N44$/̩7DW&Gw1uj?)5Y&* ]e$1N:TFME&c!juRՄN {L/ZTz^TuFH}2r|Tk0XumVGf6cPCO@U&w^(NMAN|wl8zOJӥ* T?1 nbj0cZ_wi5NcQem%YhWP:?zҪbPK)EuȪٶ)TIe-jۙΌbsǶU69+й1%MŒOf*VyY5YUl$?Q-i=Zj]FMޟJQ.?daJF})*sdZeVdMU|@(UGAs;`٫( &̏5`jY"xժ6Mg~٪|ʩOVabpSBsB5S#•ū'kѮ #*W9n[^T тtffQ ߛG#e5l$XuHXV61lrsXaUrfPaUAs괧L6U,xp$^ٝlfTvت :/?{*EoZ U(OH-4 Ed!3_Y50bi,zߔ\]]U`L>ޫ>fCfV3| "ꢣSճG׍EMwhL!YuO{~|_yRQUYʼvեf T BUԩTm dNQCEGl:1*DFtWպ=̌K,*+bCOENl!" YUM.]j4ꃸ"$#̫eaCSm tBTU8KԴ*N19ap Iuv,k*D_ bX58BF4|!ں><8 FXT56*^j[FMtUTƂJh=5w xqSIQ~GV!=pT ѻ&g-im",WqK*U*ܦzZ7jWX|n2"#vwS5f3NUTOWߪ:sՑN5XPl{1[&7L涁 e qc搟8sЫBZfgǫES/kW˔`Ep*NTCZVudXLD{ک-F 죰^ HU)Qq2- J;\&[BEƢ4@5`|t % Von1nj/JӉh|JH+pE,PyEa05$@ͪiLyxT`da[y VU*F+$ E +E%!ʒ fFe]U9Gm&DeG`_sW~W߬tzYWqUPoIuߓ[ f^ڒUSFoT6\R -N * [j0`!椚XAƌ Fřj;95:$ Zhhqe\VA(*ue"Mڛ:dE8R*Y&ѨyR f5s_3chȈY+:ּQHJ(FN )zuuXEUܻ6 .01N06dȌj4UCu#ɑzoTw5%uY'$C^j^-7]kG(:1԰Ӿ޳JYrѩޝvW e@AzȖ8VԲ+lM~Pwh:2N-UAymU85͙f5Y]PUPXL*giw`l-rU풢c?xD jq ؠ6D*2jEQ}3xWD 8xWlf6DdpcӵVaf2i;(T|0UX.k~}6@s?TU'ZMwn$Oyflo©&ZLĈ*˓ڣ-h15;j#C@8V 2 .ڤ0|?VXPnZ Y9`j3z?Cjci0.0H&P*?=vuP>~"WBVY]dt@0 FTf͍em`jhsQ:%I=w w'~TxL jpą]Af oUL/7e1DEK梭S2?{iT/VȔdސV8O5|ԓ^IqkRu^}Y/cC MJnڤ|>6=]5xA}Y BU).kܠAV"g LZUWr [F-hX/; &rb V%X$e+XL"q '4CLIәXo5J!B#чkh6e™UAG_Z21AR)*<5 }Q_8Z[% XZ=ɜcScuKVvlmnfz eI]B)p( -T4B|, ȏᄺ ժ^ 0<[§O1GQZ:. Icu L%a!%k}ZM(ť2LWו+u4)\)pؠFG3!.avtz Tf5V'+KZP,Iv$qwv 1DBC~:VF3U/"qtSѺSgsG[0Nj5*q Rhxf]"&j_ɜa "Z, qAX&QE8c\ 83jˍh;ՖѹN6 'Ȕ#-Dٖ EEDEVgY~"^ZKc-C\,4Kv1弨ؚO#:H=YwH:~ A]ULDZV#z18܂h:0jEfmcB8 cQ?BFAY,Voԩ \-xLuoAد+2iq#񌪝1%>jH(Vh԰0 BcMՉmZ?Q;VCYS7oҔ{RiRicx6aMC8oZ (JeckHfiW[#Z;JMb^iC:7 &uF4F25mhuY˸V|њȪ]*ޫsW#6eI 3r]Jl](AQ%1YDVS&O,nSɹfoQVn՟3/VwNY܃nvf \Xg+!S)Ƽ8Bre_h{/-TMkD5uJQ1ڥY1A]Uޙi5^TOh6ZQ\ R*Z `[՞FM$BҤtU:;f`,W.9UD8-` j82h8a*Y=e]){b<"UKDΦfhHf_ ꣹DMa]>6 =gM2Rϴ^\Kzn&Sa3)6jkFȷJZ*re~eذyY9ȩH=KkѲjϤ>k1z7=y)Ibhm |Vm}fg0(dyXVFAr0Aj|g ~QD5>{ZcMшDpMU62⪺v.pyEaࠆC @qabxAtUߠE ݕو,(# CnO$M2xR6S!~ ~UVe6{D-Lv4UF)tA#]Scڤ-/&5J.GƙViGUdҔ^Tqj6=82 ?M<0kqJ;ű3!d7r7-7u4xԱ nBOfQ;V~ 0i ъ0ME\Ұr5IF]]fA\#5 Wa4J&!WE0Gmc6f&Fw;Z\qyBroE$ZpB! 9h|(RJY?5L"(.0{R#ȕuqQ+˰NFI9d;0]$ՀA[dD>uX֮B`݊:Pєi %,v 6;]=զ̃A jς`TK)76݁ym6'XVM*XԂcXU7gmgmӪOٕLuXT1GLAr {:QxU71h$ ,CId:6}$*S>b3pgF`QҠzW1) D10jql5%<&*&̤:6H"`ӄ8K y^*T\VUqyZթULt|[Eh9:HH;1u%s2HYY Lwi#>/uV@͜Ub6Iڭ?vNS۰![e"Pg jld=(fuu7>ǭ StjSfDNMNCp۹({[}m7+` !@JglCg5Sq>fTJ-O֝XVN /:&Z2Ɲ0y"'BIӰjy•@9b%J%VvTunBC>A A׊0#Bj PVnίᆢS6 f;3mAxEyx畕TG ]fUS9=ܶZC'VH-7d5X0oUe%:M(_|02/VZؙhY}'1tjRFFNJBM QJUu~d@A&"E;J m(՜Kj 8@pk##FZG#}Zf//5a=k*VEQckm^`óEQwjPUт2S(|DIU3qOC䮎 o'i 4F̡9E'еX=4%j?tݎc[t7)6V1]r73w9)fliQH8|B(ˬd\ 9%۵@<&Ye¿IӦۼb9cӲ֬(=TBwFFԑs3!,fVh WexRzG'؟_/U2?·@Y c;^JBw-0(DMTz.Jv/Ѩ<ldUz68K4nW# 2J/ATh?&N7aj .@9mdyAAJX1i Rm]ž^^bר^X66v\_x2v &oh.e&7J6>˪Z⸳^/UŘәXT2Dd+E1ZGJTh9rZQ4eL6xL::F|J'j|yFM(vTW#y9|QU@&=i9l_ &~Ĩ(Ug1 j Z>a'0GxLrZL. %V_ìIK#9S!LC6͊x3 mYKAJШ++¡YJ>e dQj04Np'uVҤ@ceʝצ6MZFeF4p"*Wuj-4$r]UMNӥ`6e>zQV ) ͊G.6鐪6]Hwh=TeQervƑ%5mbL8˝F{r LSpIV~aySAxY|Q9QE#BV5Kc:$ޓ謩2xH "cq-h!S;Aҳ\A4T1! 8Em2ϴeΌ*ei_'kdɯWHNc4 *tT- "!miMéU Q./z;e=o|kP :sv^v;76On]|/W{NQǯgC@F(o Vkj&12nzR^b?x''J1/}z+緞|ewO<34ә-M da;DL|<^N:.ZEMZ8&zZ ΓtgǚjL%f*y!"fY E2SV-ɉ5e4TJu O_?՞6Dr.|q慇ϼx%]8;?_^޽?@'vŽoW=~ݭ/3kio{: \9P5gAX(zַͪCS-]eCSjA)٠cP  m ]S/Қt*<{Uukfb&?|!e,Aw ~S: T?`e{}{|m߾2G{w^In]In.n= }wx!~ֹpg/s[THW>O/^=;[GWE7Ο}K_~/T]ȇ|a}.޾޽sn\yΏ͏{zd?X׽T\N3//Ez:@ߺ?D_^x#gs]2iOybbb|y3/9w7,~s^:?{.;j`˟һ';U_9ܽﯜs-ymdH{:?ߩϷWKέ[\I\va9~7n_^|Nsl7Vٯ5Wݝ$p[sl}|̽n~a//}~֣3/WK|ﻘ7x%[/_}yWkZ_up#?6BN?G|;|“1/?o|z~~A{y*|lw\|6w~lO'WOo~XmOWi2­+op1sΛ+[7D;>ܼ!ϯ噗;wߴ 3mzvwg%p4_3-O~&wȽH:}N__?lϜ^7IfNIm*OI#B*>2&N&N&N&N&N&N}7N~4qWNNNNNNNNNN'4q4qϞ8es>8YyS T1z*G̪C kQ^Uշq͛`&gQNӖHӘiB_1:o?PUmC7| 8G%jd'~pw5u5KgmOnYw]nuQ_}g]|x͍go~~ܽ=_]^o[G}.;͚ՙgu?~͓k[_Xm?OYۇK7Sl|^t<هeywWoIߢmYSu:}N_u:}~?'?W|_?Fۿ_~>ۗ;cU_bfqJtVپ6Een5-ˤe8-y[cR9kٴ∮/idTڜeԗn>ibHyq](rs&\hѢWZNjLoG:y4S\Ƶ4楪|䫟kժoOcMCiDj1zd;D%5Sū7e+!,%h-72o5ʆ$L+*ེuSe6_ǠM3ʠTW/㋩H\dUWxCՙ{>msYFT7&BJ!TW]X:7̥7J6Ǯ+'srPQ&Lm/~^0/82boy|r5A;֬\-sahqYݎmbT2فWlo][zTGڸ͜h'F6j*իC873NdVM_5GR_fШ)Ի cO]|4Ӣvӣb'7嘵p~]pU͋ah uV/uMR{BfH?vt4o^/cLM玽w|U݀K.mUm&:مMVf׭oQ*cRW'n]s'3[זwGMʭZv/FmdE[J+ښŒ#M fj+p.=N4O(dSÎVpR{'uX9`T"`,vAMjc0՞~VOyjVu5 Os~4tp*MxZg>֤.y؀x?ԅL[6[g;S2KMqhW#/3֓5Uj!=X D KízrWDQ5Ɇ?( ұ,ssAOU3QmR qP>W~Ѯ xīޜԷ-:n 0Eeh}^͆ͣӚlq*FzYu0϶a-tN͢ 3@ڌ <`ﵩcU]6@%OujezMP,c^~Ό)gxRf٤jjY]PD8 v댐ƂW`;z V"t1#pqm]L2tĪ(*AUx'(.6zKG2PuCp(Eut8K2GWq}%q!S'Lő &5g-im",ø%V5Uju5V`MA `|DA@+:jkW%&#N0P3bYp7uc6 Xjh5(,-2W:Am#v˄S!Vjյf< Iէb}iIOLMn-3Ϡh4D uLE˩ڵ JШC!}4–< }鏎pixFO/VUPgI0㞘Ԩ-42ཱྀj/際K= A̓zi&c0* `|hQH+pE,PI>ckUӺ8kf`da[y"Qu+G$?#i-z_jVmqST3j觃@$rV261iD%،(zaaVF g% tC%n6b7\cj6dgOEUظN, 5Vi` 35вa;x_WP 'XMlF[-?n:>#N*9Xs2ދfqu~$p 8&y]PڔFF~j2 xR 6dzר>5@ jT eeE;'uv6Xwd8PGı&? / ÀP+` R@W&у_~iU|NR E%9Q=B?F$׈vW`2?, jعwr6aUC"Z~Q'O4s+H24~*8Q@f!G4p|R}yb8w+jy5"r zhh7Guо ٢ꠈ&sc0M<Qt Z{V;SW4¬b8DA!u~V+,G=pU𴺵¥g- +0גt>pf}U[2+̋u@FKH4jדּM#jS#KLS+h ' A<]$AFO,|&X<P ~ɴeD2;`rf1:|*C $?C*b2?qZ󰡀-- gGrRTjIԶMBbs~nnh3x>@ qVd.h9#Ô14H:Cy/-v2n b@IڎAe/QC!8'F13PؑWYXf;Ԡt,HJ2B]!grNЅ)bZH6B˪θIu KO \Z@)NPxb E'Qe$`X[rXgW6jn"#f!3MUcWԀ=N7f2s;+ou$ssb)f*!6Q"<'d98"۪5.S\Snt,0˕;p$Zz\ <w]l52K.2hE 98VVƞkjd G4sw!^]g2:`ǝDhe&b_>^CJdQ-66 wUC#-d_Oz%t3'ѼtyČ.reW[N"+&J !@bڹg% /] D3U'NASagj * ᣽Bl*xv 0TC`wtN򳨠eJx,JG$LDz>WmBQTωoBDQ-}{ D5c:S1l(ah3l}ƌfZSp+!"[ꐭRBA$B+@$:A4Q&:YH#ƾhBdXW4 '(' L\&1DHPiizB\% i,NxOZ{ }U S{]tቖz2τ Sh *ߨ?jtNణ&ˆ@!i/j<Ϫ%OX[g2-'.@/Kg |g2Hش9k&C-F qNnp NX/8aL2b:Έ vj l,rC=%7g)#ib6{Ѫ*ci*5F juӁPB5tD,4aE'5c?}A^! UAd>XFICױ{h:K6_4Р5 3"G\Q5y .0}~`E*.'cs- {GZV~p C)j{sQ[&L1lx;jc/ Ô jh]@mrrl>WqUPoI}ߓ[ fF-Y$KmÍW+@7֐mGTfPf ԃ 1'|2r7f1/<>>ɝCl$&QLF(pEԈ+۸ڇf BAU+=u/#dGB*R-‘z\m7$I-1_(>Pk-E@FF Qt<Z PMQ@Sǎ69wm8]Pa\C>606dȌ;~>vހ0p d[I7 ׁW`Ik-NL%5촯&\f\t*ۤfhs j-4(X@ٲGpŠ_p| Cc3ه劷`CP'd'5Ǥ9vD)8lƍ嵊p!5@Z 3 e}3թzUCI(Gz5,>i3uJ1n#ChQZ*9PB p2F AX9k#P;;4h_Ql趲eP^G,ZSӜ@@*W.-b2=݁ccNŇ'c?xD  `]Գ*Ry(wEKwfaSkP'7];]k9m&sBGڪ sPyxwQ/xࡊ7Yc<#* 4,> UZMwn$Oi2| O0 L&bDSIBі@ QB w[sy`i "Mc5Nh촌%9Ot@3"#SIk(VHLqA2UAa/>.GQa5@;C* /QҾfި QL hsujKbw'Q$,U"`A]Y)ZA\N%dVpVrQCtPd.:5- G,UhZ%zCZmMrpBݧ- Y 0<[§;1GQZ:. Ipؘ!d-Ϻ\iV<0=3j%uz-N Ό$2/ dR ABΠgڅ'q HS2DO[#tP!>ՖE&V.M\_,]؂ Ǥ:]On&@0p-]cu ѱ H-ރZNdLwioyrV9 Y?+#E.wZ7fp bc TY\ڼ 'JaP{Z(hVNR(Pj_ɜa "Z, qAX&QEP+05qg"v"zL)=BINXpd(bV."*:1!. -a֘rF-4Hdf'f?aafiOjҌQp!(?+0IhQjD/f[p Ƭ{L'| 9@((kъm:Cv qF$.#3n~Q0-$Q3G_xpŊUM=vց!CsitEp`|8unCW%tK@sOd`5 i++qՎ #qI\]o :hH+~4My <2: "nȘ)Y3݆ƯZkeYaދUJi5bsP.0#ץ9g. u!ӣ}&O,npe * 5xv^T@h*3kho@@|**uWB<4nocwᔫq`Γ 58h^ W w>XUHa.͊9WmΞ3f7Nk$F CȷJL" pe~eذoxY9ȩH=KkѲjϤ>k1z7=y) ̐A.E*r&XDqUp6k# J'Y:`Q܆2LZ0Y_pD1<4_[m&e贤 \ryEaࠆC @qabFRHN3zI Bl!V@ ?u &7XI BL1tlJWZ7CGPT**#SkIa[<!h Fpi?2LIN;", r3TRd'gbf-Bi8v& ^LFHath-#c)}s.4k>]RrI&EKArٛ2^ +=<G\EŒw2^ɱiLh#j⊋#{~gX-Z&Ƈ ACaZWF^a?Y'0Gm1&9"sdbu2J!؁A" █Pܢ$ W; }$n %ڽDu3KTY-Ս1lvz MR@{P1%멖Ro%2mx&Ue7TUy[b8-S=?ˌ'z Aik)([F;۠Qxm&9Ǣb2r?Y[R/OD4$8be Gy:8N)z ) 6'XVCsV~2Dt8px*Q2tknZٔ];ϔNZ%OEsĔ$ 1c: @#a@X`0u=W@%HnQGNTig ɩpya 13#(iwkLJQLL Zz}7Я06#1Nw4,+yisi[n_Px N,>~cD !yp^c6mu%s2HYY ̦s*fF |t_2Hhi3gU>êuFRvϱ䣓6lVYlsi#E@~"DϬ(f >ǭ StjSfDNTǤ;v.JAV_`:h͊*BP@YiϪ)`=`iIaQYk␨A+a)sGz+rb)4 k'\I $ј# V4\Gm` XW[(;~Aޠ[Zt:L4R(d|rs~U+"%?i5h ǃ,T&?XUhuz6s"u8"%Ejy%-`JlJtP)`d^ﭒ3PYg5Q@UVBX##'%ᒋ(%42h| v2qQA\1e uՍFFN #l8D/qRe&5dq#pRFPqEmd (LJ)A1Jkb lHq8nޤrmLAvU~͔IuVZ&)SMl!薲pjVOZ@dCzE-#> %&g-P/_jvOS))3ppi߿Ӿ}N}7Ni߿_ :i߿Ӿ}N;w;gB9hU7+k뭦 ƶuKKʹhYgkaDUa٩?t?STsTsrAG 1F~ɯL㿿1r^`tM[Ur,8w_pO?tq[_>~~ܙ+g߽bnmx5̣G;gՋ'/_^߻AL|Szu:}N_u:}N_==8vw~)}U-o+}bQ>7Y6ɧxO߼h OI_ݮ^oovt`/_|ڒ~owz_N_to\6\7w~xoz}tg"}~P~|Yy~3?A__?R0v89koo~R 8돾l.:Opu.~_OYwvv,Zt{o4@16\w}6fn{,~}lrãw_YT 8K~}_.r9Z]uӟrH7 _ɢ2*5?7\OKؐXf;|25^[un~<n]#߿Ym̄?og?}]u}ko'_m} OKJY¿7|fHnxPޗŎj86pb79[nP"/a lPbrmޛͻmܼ /A&} jo]xhG67S67zG ~﹣=16lmD-r_tvn3vv}[GPۯ/~v׾oLgY󭭠G`&Sߩo~טb< N?/wtcGߎG(DGxp=Uj7 ~?÷d{q/¯}G~7{mGg~KǠǿkw?a ~t?;{Uw|6^?g?4Rplb{l$<z}3C NlݾW%?/UZr5Dȶu^ӨsJsm^G3P8 /[?8"g?V=᫽/{@!˺_Ruk݁^p[?b(O7?se.osw-ҕTrRӣ^/;Ro>o}x׿^~' )$wz:l~>?㩃^w~^ߏm!>{??S{w~ɿ{kO_ڋ_=N_u:}N_u:HL׎skw^*޺Կ?uWV{oI>\xt'ݺ}i~IֹN|?+w? RN\Gl}~7~wֳ'/ׯ^>?w=>{lݍWΤ7/Ξx|v7ݙ%_~wIC {ʫ ~yNz?.u?/n'wnzwtɵWWnؾs__zVΝ>[~vק:~s\^yny~u{̇zN|?n\Ez?%:O{Gv'99>KgN>{] /]ãw:o^{Փz՝O?N_+'ow?:cz{r~|{c}9{o[_8{OW[_8c7GW/}^{3'V.[^^/ξxRVݺq;w_~?}?Xon}k O/]'lm7m ::݋wo,ދIg7Ra\zqtt?_ݻY?Z88u]{9[ϼrܽ1\|pҋ;g'։[/]ʛ'Xڌ땗֋sݶ39?:|}9= }IܲnO{I\x}\{m]{uo8__k'pɅ܋_:oғn|>:<;N<>ٛ7l''ۯϞڞysq>u)߽Ï'm{ѫ /:û{v{wͅy铳:utvhr {qt}uՋF;>~j}ŭ˷N'v+·妿ޜ._y-¯78KcyLc?>7scɱOGccro뷎c~v?<_{[cޱuc[M[/ͳg͋'3\j/οNzjϬw9Cw_ozE}WNptۯ._ĭ'xn{v?>^֋~"_ݺ{ tf[8?~|x}+ޞX'3~Eύ3nןk<:~c{:kWWnNlM^s͓N̗~bN>z'auu~o޾vo~vƳ_|BכK£<QS;7=xvၯ_ywѵ>u{NGכw^OW\|8_f]b=8wgzꉯ}0=z~Ż[wOlOw+ۻ|8u.޼}8{^7fꍝg_WWߟX?m^=nJ}:Xf=9c=xo=s=q\~V냯.]{W?^Ѿ ;s{{_x}yѓ'r=|~ulgU>vO/Io=~}pz3'X;Ny}~<y/_ͯ/^|l{}_?}ƃo:w߅BN/6+_^֟x]_>zMܥƋu›˃OrIn_oKz'˯>?^~ж_''ޟ|c?sb9N\gg{9s}~p}v=qΓV]xҹ/ggö{O{w_{o/_䍓ŷWx>^8>_<߻{{.^;ݫw}yVkrw{r-v\̣'m{tO\wvً'gΜ'ɇ\gz+=w?n~`{Ɖ;W[wӇù_myww_^'x{ޛ{oIWWw'ۿX-b?9}'G_N׿zѱ|occ?9{c>[+w^,_{~?~Yn{}s}/ t:&~|boa]G=o DAqD؎'vI N8q^p򖷤S39 ZjfZҷ^ 3g$ֈe*?Udm"9AE'{3w%Uim=wL] ؕR S\c[ O^צmAZ7_Z~VvyF5NfrP\|HR_V+dFJ&VQ9~4?˧xiy9_6M,dژ7"C{?4o'G]w]׹_Z&B7ҕKvrԎVl9FKOmvXfދ6.׸KSq^d$޵\ۅ)yZ17U+2󳛜' LTn&&Wren]F;JX5|Bv܎ؼEס.iz>Cm2HGwn3^lo=׋)b*ݛ@l'i`{3Fw1 ׸#Oքsm&cn>ј5Ӈg;NgFo%>brib/ d/Tbƒ#OGU E-:Κ0/C7xXY篌JS>Y]Qigm8^4!mcr]liy*s̙yvsKTuu'rY=x8B1YCH,5e݁Of[T!ΦXlKqVxSZ&y8˪TyoLnp1UGV6w3#{Uq >LvS2<^ə7HRw d &R06Cx^+F9_K=o^~>?%t <δ%LhJL5: qL{ȟ"܌Xˣf~3Ӥ2gxrYU۱/Go^~Oj}ZZX???誎YQGjVۘx,V亼kezr8c<΍}q68KeL?GuW #EX;F;t^G(K#9o=wJeΐ\w< ;-M{va:$[d\ismIjqnH]y0wyb]47rnm7yvz؃1/W]@3G2`:,*(u ] mž'l=X9soVLF KV4!Ocf[M/g;GxN`&#cU2{#قL;9{=›׏vSR^1+m"׿-UlAc'_3Bf| 9BG{㛗_|Ϫ뿯şOk1[׵?kjǵ\ſ]Y[XZ?翩ŷZZZOkj_jK^y-ZZZU_?şb}}vO~O?ا >}_y jwjwkjkg\_ﭞ?ǟןqe 0 { for j := range txn.FileContracts { fcid := txn.FileContractID(uint64(j)) so, err := getStorageObligation(tx, fcid) if err != nil { // The storage folder may not exist, or the disk // may be having trouble. Either way, we ignore the // problem. If the disk is having trouble, the user // will have to perform a rescan. continue } so.OriginConfirmed = false err = putStorageObligation(tx, so) if err != nil { continue } } } // Check for file contract revisions. if len(txn.FileContractRevisions) > 0 { for _, fcr := range txn.FileContractRevisions { so, err := getStorageObligation(tx, fcr.ParentID) if err != nil { // The storage folder may not exist, or the disk // may be having trouble. Either way, we ignore the // problem. If the disk is having trouble, the user // will have to perform a rescan. continue } so.RevisionConfirmed = false err = putStorageObligation(tx, so) if err != nil { continue } } } // Check for storage proofs. if len(txn.StorageProofs) > 0 { for _, sp := range txn.StorageProofs { // Check database for relevant storage proofs. so, err := getStorageObligation(tx, sp.ParentID) if err != nil { // The storage folder may not exist, or the disk // may be having trouble. Either way, we ignore the // problem. If the disk is having trouble, the user // will have to perform a rescan. continue } so.ProofConfirmed = false err = putStorageObligation(tx, so) if err != nil { continue } } } } // Height is not adjusted when dealing with the genesis block because // the default height is 0 and the genesis block height is 0. If // removing the genesis block, height will already be at height 0 and // should not update, lest an underflow occur. if block.ID() != types.GenesisID { h.blockHeight-- } } for _, block := range cc.AppliedBlocks { // Look for transactions relevant to open storage obligations. for _, txn := range block.Transactions { // Check for file contracts. if len(txn.FileContracts) > 0 { for i := range txn.FileContracts { fcid := txn.FileContractID(uint64(i)) so, err := getStorageObligation(tx, fcid) if err != nil { // The storage folder may not exist, or the disk // may be having trouble. Either way, we ignore the // problem. If the disk is having trouble, the user // will have to perform a rescan. continue } so.OriginConfirmed = true err = putStorageObligation(tx, so) if err != nil { continue } } } // Check for file contract revisions. if len(txn.FileContractRevisions) > 0 { for _, fcr := range txn.FileContractRevisions { so, err := getStorageObligation(tx, fcr.ParentID) if err != nil { // The storage folder may not exist, or the disk // may be having trouble. Either way, we ignore the // problem. If the disk is having trouble, the user // will have to perform a rescan. continue } so.RevisionConfirmed = true err = putStorageObligation(tx, so) if err != nil { continue } } } // Check for storage proofs. if len(txn.StorageProofs) > 0 { for _, sp := range txn.StorageProofs { so, err := getStorageObligation(tx, sp.ParentID) if err != nil { // The storage folder may not exist, or the disk // may be having trouble. Either way, we ignore the // problem. If the disk is having trouble, the user // will have to perform a rescan. continue } so.ProofConfirmed = true err = putStorageObligation(tx, so) if err != nil { continue } } } } // Height is not adjusted when dealing with the genesis block because // the default height is 0 and the genesis block height is 0. If adding // the genesis block, height will already be at height 0 and should not // update. if block.ID() != types.GenesisID { h.blockHeight++ } // Handle any action items relevant to the current height. bai := tx.Bucket(bucketActionItems) heightBytes := make([]byte, 8) binary.BigEndian.PutUint64(heightBytes, uint64(h.blockHeight)) // BigEndian used so bolt will keep things sorted automatically. existingItems := bai.Get(heightBytes) // From the existing items, pull out a storage obligation. knownActionItems := make(map[types.FileContractID]struct{}) obligationIDs := make([]types.FileContractID, len(existingItems)/crypto.HashSize) for i := 0; i < len(existingItems); i += crypto.HashSize { copy(obligationIDs[i/crypto.HashSize][:], existingItems[i:i+crypto.HashSize]) } for _, soid := range obligationIDs { _, exists := knownActionItems[soid] if !exists { actionItems = append(actionItems, soid) knownActionItems[soid] = struct{}{} } } } return nil }) if err != nil { h.log.Println(err) } for i := range actionItems { go h.threadedHandleActionItem(actionItems[i]) } // Update the host's recent change pointer to point to the most recent // change. h.recentChange = cc.ID // Save the host. err = h.saveSync() if err != nil { h.log.Println("ERROR: could not save during ProcessConsensusChange:", err) } } Sia-1.3.0/modules/host/update_test.go000066400000000000000000000116601313565667000175440ustar00rootroot00000000000000package host import ( "io/ioutil" "path/filepath" "testing" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/fastrand" ) // TestStorageProof checks that the host can create and submit a storage proof. func TestStorageProof(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() ht, err := newHostTester("TestStorageProof") if err != nil { t.Fatal(err) } defer ht.Close() // create a file contract fc := types.FileContract{ WindowStart: types.MaturityDelay + 3, WindowEnd: 1000, Payout: types.NewCurrency64(1), UnlockHash: types.UnlockConditions{}.UnlockHash(), ValidProofOutputs: []types.SiacoinOutput{{Value: types.NewCurrency64(1)}, {Value: types.NewCurrency64(0)}}, MissedProofOutputs: []types.SiacoinOutput{{Value: types.NewCurrency64(1)}, {Value: types.NewCurrency64(0)}}, } txnBuilder := ht.wallet.StartTransaction() err = txnBuilder.FundSiacoins(fc.Payout) if err != nil { t.Fatal(err) } txnBuilder.AddFileContract(fc) signedTxnSet, err := txnBuilder.Sign(true) if err != nil { t.Fatal(err) } fcid := signedTxnSet[len(signedTxnSet)-1].FileContractID(0) // generate data const dataSize = 777 data := fastrand.Bytes(dataSize) root := crypto.MerkleRoot(data) err = ioutil.WriteFile(filepath.Join(ht.host.persistDir, "foo"), data, 0777) if err != nil { t.Fatal(err) } // create revision rev := types.FileContractRevision{ ParentID: fcid, UnlockConditions: types.UnlockConditions{}, NewFileSize: dataSize, NewWindowStart: fc.WindowStart, NewFileMerkleRoot: root, NewWindowEnd: fc.WindowEnd, NewValidProofOutputs: fc.ValidProofOutputs, NewMissedProofOutputs: fc.MissedProofOutputs, NewRevisionNumber: 1, } _ = types.Transaction{ FileContractRevisions: []types.FileContractRevision{rev}, } /* // create obligation obligation := &contractObligation{ ID: fcid, OriginTransaction: types.Transaction{ FileContracts: []types.FileContract{fc}, }, Path: filepath.Join(ht.host.persistDir, "foo"), } ht.host.obligationsByID[fcid] = obligation ht.host.addActionItem(fc.WindowStart+1, obligation) // submit both to tpool err = ht.tpool.AcceptTransactionSet(append(signedTxnSet, revTxn)) if err != nil { t.Fatal(err) } _, err = ht.miner.AddBlock() if err != nil { t.Fatal(err) } // storage proof will be submitted after mining one more block _, err = ht.miner.AddBlock() if err != nil { t.Fatal(err) } */ } // TestInitRescan probes the initRescan function, verifying that it works in // the naive case. The rescan is triggered manually. func TestInitRescan(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() ht, err := newHostTester("TestInitRescan") if err != nil { t.Fatal(err) } defer ht.Close() // Check that the host's persistent variables have incorporated the first // few blocks. if ht.host.recentChange == (modules.ConsensusChangeID{}) || ht.host.blockHeight == 0 { t.Fatal("host variables do not indicate that the host is tracking the consensus set correctly") } oldChange := ht.host.recentChange oldHeight := ht.host.blockHeight // Corrupt the variables and perform a rescan to see if they reset // correctly. ht.host.recentChange[0]++ ht.host.blockHeight += 100e3 ht.cs.Unsubscribe(ht.host) err = ht.host.initRescan() if err != nil { t.Fatal(err) } if oldChange != ht.host.recentChange || oldHeight != ht.host.blockHeight { t.Error("consensus tracking variables were not reset correctly after rescan") } } // TestIntegrationAutoRescan checks that a rescan is triggered during New if // the consensus set becomes desynchronized. func TestIntegrationAutoRescan(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() ht, err := newHostTester("TestIntegrationAutoRescan") if err != nil { t.Fatal(err) } defer ht.Close() // Check that the host's persistent variables have incorporated the first // few blocks. if ht.host.recentChange == (modules.ConsensusChangeID{}) || ht.host.blockHeight == 0 { t.Fatal("host variables do not indicate that the host is tracking the consensus set correctly") } oldChange := ht.host.recentChange oldHeight := ht.host.blockHeight // Corrupt the variables, then close the host. ht.host.recentChange[0]++ ht.host.blockHeight += 100e3 err = ht.host.Close() // host saves upon closing if err != nil { t.Fatal(err) } // Create a new host and check that the persist variables have correctly // reset. h, err := New(ht.cs, ht.tpool, ht.wallet, "localhost:0", filepath.Join(ht.persistDir, modules.HostDir)) if err != nil { t.Fatal(err) } if oldChange != h.recentChange || oldHeight != h.blockHeight { t.Error("consensus tracking variables were not reset correctly after rescan") } // Set ht.host to 'h' so that the 'ht.Close()' method will close everything // cleanly. ht.host = h } Sia-1.3.0/modules/host/upnp.go000066400000000000000000000122301313565667000161770ustar00rootroot00000000000000package host import ( "errors" "io" "io/ioutil" "net" "net/http" "strconv" "strings" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/go-upnp" ) // managedLearnHostname discovers the external IP of the Host. If the host's // net address is blank and the host's auto address appears to have changed, // the host will make an announcement on the blockchain. func (h *Host) managedLearnHostname() { if build.Release == "testing" { return } // Fetch a group of host vars that will be used to dictate the logic of the // function. h.mu.RLock() netAddr := h.settings.NetAddress hostPort := h.port hostAutoAddress := h.autoAddress hostAnnounced := h.announced hostAcceptingContracts := h.settings.AcceptingContracts hostContractCount := h.financialMetrics.ContractCount h.mu.RUnlock() // If the settings indicate that an address has been manually set, there is // no reason to learn the hostname. if netAddr != "" { return } h.log.Println("No manually set net address. Scanning to automatically determine address.") // try UPnP first, then fallback to myexternalip.com var hostname string d, err := upnp.Discover() if err == nil { hostname, err = d.ExternalIP() } if err != nil { hostname, err = myExternalIP() } if err != nil { h.log.Println("WARN: failed to discover external IP") return } autoAddress := modules.NetAddress(net.JoinHostPort(hostname, hostPort)) if err := autoAddress.IsValid(); err != nil { h.log.Printf("WARN: discovered hostname %q is invalid: %v", autoAddress, err) return } if autoAddress == hostAutoAddress && hostAnnounced { // Nothing to do - the auto address has not changed and the previous // annoucement was successful. return } h.mu.Lock() h.autoAddress = autoAddress err = h.saveSync() h.mu.Unlock() if err != nil { h.log.Println(err) } // Announce the host, but only if the host is either accepting contracts or // has a storage obligation. If the host is not accepting contracts and has // no open contracts, there is no reason to notify anyone that the host's // address has changed. if hostAcceptingContracts || hostContractCount > 0 { h.log.Println("Host external IP address changed from", hostAutoAddress, "to", autoAddress, "- performing host announcement.") err = h.managedAnnounce(autoAddress) if err != nil { // Set h.announced to false, as the address has changed yet the // renewed annoucement has failed. h.mu.Lock() h.announced = false h.mu.Unlock() h.log.Println("unable to announce address after upnp-detected address change:", err) } } } // managedForwardPort adds a port mapping to the router. func (h *Host) managedForwardPort(port string) error { if build.Release == "testing" { // Add a blocking placeholder where testing is able to mock behaviors // such as a port forward action that blocks for 10 seconds before // completing. if h.dependencies.disrupt("managedForwardPort") { return nil } // Port forwarding functions are frequently unavailable during testing, // and the long blocking can be highly disruptive. Under normal // scenarios, return without complaint, and without running the // port-forward logic. return nil } // If the port is invalid, there is no need to perform any of the other // tasks. portInt, err := strconv.Atoi(port) if err != nil { return err } d, err := upnp.Discover() if err != nil { h.log.Printf("WARN: could not automatically forward port %s: %v", port, err) return err } err = d.Forward(uint16(portInt), "Sia Host") if err != nil { h.log.Printf("WARN: could not automatically forward port %s: %v", port, err) return err } h.log.Println("INFO: successfully forwarded port", port) return nil } // managedClearPort removes a port mapping from the router. func (h *Host) managedClearPort() error { if build.Release == "testing" { // Allow testing to force an error to be returned here. if h.dependencies.disrupt("managedClearPort return error") { return errors.New("Mocked managedClearPortErr") } return nil } // If the port is invalid, there is no need to perform any of the other // tasks. h.mu.RLock() port := h.port h.mu.RUnlock() portInt, err := strconv.Atoi(port) if err != nil { return err } d, err := upnp.Discover() if err != nil { return err } err = d.Clear(uint16(portInt)) if err != nil { return err } h.log.Println("INFO: successfully unforwarded port", port) return nil } // myExternalIP discovers the host's external IP by querying a centralized // service, http://myexternalip.com. func myExternalIP() (string, error) { // timeout after 10 seconds client := http.Client{Timeout: time.Duration(10 * time.Second)} resp, err := client.Get("http://myexternalip.com/raw") if err != nil { return "", err } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { errResp, _ := ioutil.ReadAll(resp.Body) return "", errors.New(string(errResp)) } buf, err := ioutil.ReadAll(io.LimitReader(resp.Body, 64)) if err != nil { return "", err } if len(buf) == 0 { return "", errors.New("myexternalip.com returned a 0 length IP address") } // trim newline return strings.TrimSpace(string(buf)), nil } Sia-1.3.0/modules/host_test.go000066400000000000000000000015011313565667000162530ustar00rootroot00000000000000package modules import ( "testing" ) // TestUnitMaxFileContractSetLenSanity checks that a sensible value for // MaxFileContractSetLen has been chosen. func TestUnitMaxFileContractSetLenSanity(t *testing.T) { t.Parallel() // It does not make sense for the contract set limit to be higher than the // IsStandard limit in the transaction pool. Such a transaction set would // never be accepted by the transaction pool, and therefore is going to // cause a failure later on in the host process. An extra 1kb is left // because the file contract transaction is going to grow as the terms are // negotiated and as signatures are added. if NegotiateMaxFileContractSetLen > TransactionSetSizeLimit-1e3 { t.Fatal("MaxfileContractSetLen does not have a sensible value - should be smaller than the TransactionSetSizeLimit") } } Sia-1.3.0/modules/miner.go000066400000000000000000000051011313565667000153510ustar00rootroot00000000000000package modules import ( "io" "github.com/NebulousLabs/Sia/types" ) const ( // MinerDir is the name of the directory that is used to store the miner's // persistent data. MinerDir = "miner" ) // BlockManager contains functions that can interface with external miners, // providing and receiving blocks that have experienced nonce grinding. type BlockManager interface { // HeaderForWork returns a block header that can be grinded on and // resubmitted to the miner. HeaderForWork() will remember the block that // corresponds to the header for 50 calls. HeaderForWork() (types.BlockHeader, types.Target, error) // SubmitHeader takes a block header that has been worked on and has a // valid target. SubmitHeader(types.BlockHeader) error // BlocksMined returns the number of blocks and stale blocks that have been // mined using this miner. BlocksMined() (goodBlocks, staleBlocks int) } // CPUMiner provides access to a single-threaded cpu miner. type CPUMiner interface { // CPUHashrate returns the hashrate of the cpu miner in hashes per second. CPUHashrate() int // Mining returns true if the cpu miner is enabled, and false otherwise. CPUMining() bool // StartMining turns on the miner, which will endlessly work for new // blocks. StartCPUMining() // StopMining turns off the miner, but keeps the same number of threads. StopCPUMining() } // TestMiner provides direct access to block fetching, solving, and // manipulation. The primary use of this interface is integration testing. type TestMiner interface { // AddBlock is an extension of FindBlock - AddBlock will submit the block // after finding it. AddBlock() (types.Block, error) // BlockForWork returns a block that is ready for nonce grinding. All // blocks returned by BlockForWork have a unique Merkle root, meaning that // each can safely start from nonce 0. BlockForWork() (types.Block, types.Target, error) // Close is necessary for clean shutdown during testing. Close() error // FindBlock will have the miner make 1 attempt to find a solved block that // builds on the current consensus set. It will give up after a few // seconds, returning the block and a bool indicating whether the block is // solved. FindBlock() (types.Block, error) // SolveBlock will have the miner make 1 attempt to solve the input block, // which amounts to trying a few thousand different nonces. SolveBlock is // primarily used for testing. SolveBlock(types.Block, types.Target) (types.Block, bool) } // The Miner interface provides access to mining features. type Miner interface { BlockManager CPUMiner io.Closer } Sia-1.3.0/modules/miner/000077500000000000000000000000001313565667000150255ustar00rootroot00000000000000Sia-1.3.0/modules/miner/blockmanager.go000066400000000000000000000152261313565667000200070ustar00rootroot00000000000000package miner import ( "errors" "time" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/fastrand" ) var ( errLateHeader = errors.New("header is old, block could not be recovered") ) // blockForWork returns a block that is ready for nonce grinding, including // correct miner payouts and a random transaction to prevent collisions and // overlapping work with other blocks being mined in parallel or for different // forks (during testing). func (m *Miner) blockForWork() types.Block { b := m.persist.UnsolvedBlock // Update the timestamp. if b.Timestamp < types.CurrentTimestamp() { b.Timestamp = types.CurrentTimestamp() } // Update the address + payouts. err := m.checkAddress() if err != nil { m.log.Println(err) } b.MinerPayouts = []types.SiacoinOutput{{ Value: b.CalculateSubsidy(m.persist.Height + 1), UnlockHash: m.persist.Address, }} // Add an arb-data txn to the block to create a unique merkle root. randBytes := fastrand.Bytes(types.SpecifierLen) randTxn := types.Transaction{ ArbitraryData: [][]byte{append(modules.PrefixNonSia[:], randBytes...)}, } b.Transactions = append([]types.Transaction{randTxn}, b.Transactions...) return b } // newSourceBlock creates a new source block for the block manager so that new // headers will use the updated source block. func (m *Miner) newSourceBlock() { // To guarantee garbage collection of old blocks, delete all header entries // that have not been reached for the current block. for m.memProgress%(HeaderMemory/BlockMemory) != 0 { delete(m.blockMem, m.headerMem[m.memProgress]) delete(m.arbDataMem, m.headerMem[m.memProgress]) m.memProgress++ if m.memProgress == HeaderMemory { m.memProgress = 0 } } // Update the source block. block := m.blockForWork() m.sourceBlock = &block m.sourceBlockTime = time.Now() } // HeaderForWork returns a header that is ready for nonce grinding. The miner // will store the header in memory for a while, depending on the constants // 'HeaderMemory', 'BlockMemory', and 'MaxSourceBlockAge'. On the full network, // it is typically safe to assume that headers will be remembered for // min(10 minutes, 10e3 requests). func (m *Miner) HeaderForWork() (types.BlockHeader, types.Target, error) { if err := m.tg.Add(); err != nil { return types.BlockHeader{}, types.Target{}, err } defer m.tg.Done() m.mu.Lock() defer m.mu.Unlock() // Return a blank header with an error if the wallet is locked. if !m.wallet.Unlocked() { return types.BlockHeader{}, types.Target{}, modules.ErrLockedWallet } // Check that the wallet has been initialized, and that the miner has // successfully fetched an address. err := m.checkAddress() if err != nil { return types.BlockHeader{}, types.Target{}, err } // If too much time has elapsed since the last source block, get a new one. // This typically only happens if the miner has just turned on after being // off for a while. If the current block has been used for too many // requests, fetch a new source block. if time.Since(m.sourceBlockTime) > MaxSourceBlockAge || m.memProgress%(HeaderMemory/BlockMemory) == 0 { m.newSourceBlock() } // Create a header from the source block - this may be a race condition, // but I don't think so (underlying slice may be shared with other blocks // accessible outside the miner). var arbData [crypto.EntropySize]byte fastrand.Read(arbData[:]) copy(m.sourceBlock.Transactions[0].ArbitraryData[0], arbData[:]) header := m.sourceBlock.Header() // Save the mapping from the header to its block and from the header to its // arbitrary data, replacing whatever header already exists. delete(m.blockMem, m.headerMem[m.memProgress]) delete(m.arbDataMem, m.headerMem[m.memProgress]) m.blockMem[header] = m.sourceBlock m.arbDataMem[header] = arbData m.headerMem[m.memProgress] = header m.memProgress++ if m.memProgress == HeaderMemory { m.memProgress = 0 } // Return the header and target. return header, m.persist.Target, nil } // managedSubmitBlock takes a solved block and submits it to the blockchain. func (m *Miner) managedSubmitBlock(b types.Block) error { // Give the block to the consensus set. err := m.cs.AcceptBlock(b) // Add the miner to the blocks list if the only problem is that it's stale. if err == modules.ErrNonExtendingBlock { m.mu.Lock() m.persist.BlocksFound = append(m.persist.BlocksFound, b.ID()) m.mu.Unlock() m.log.Println("Mined a stale block - block appears valid but does not extend the blockchain") return err } if err == modules.ErrBlockUnsolved { m.log.Println("Mined an unsolved block - header submission appears to be incorrect") return err } if err != nil { m.tpool.PurgeTransactionPool() m.log.Critical("ERROR: an invalid block was submitted:", err) return err } m.mu.Lock() defer m.mu.Unlock() // Grab a new address for the miner. Call may fail if the wallet is locked // or if the wallet addresses have been exhausted. m.persist.BlocksFound = append(m.persist.BlocksFound, b.ID()) var uc types.UnlockConditions uc, err = m.wallet.NextAddress() if err != nil { return err } m.persist.Address = uc.UnlockHash() return m.saveSync() } // SubmitHeader accepts a block header. func (m *Miner) SubmitHeader(bh types.BlockHeader) error { if err := m.tg.Add(); err != nil { return err } defer m.tg.Done() // Because a call to managedSubmitBlock is required at the end of this // function, the first part needs to be wrapped in an anonymous function // for lock safety. var b types.Block err := func() error { m.mu.Lock() defer m.mu.Unlock() // Lookup the block that corresponds to the provided header. nonce := bh.Nonce bh.Nonce = [8]byte{} bPointer, bExists := m.blockMem[bh] arbData, arbExists := m.arbDataMem[bh] if !bExists || !arbExists { return errLateHeader } // Block is going to be passed to external memory, but the memory pointed // to by the transactions slice is still being modified - needs to be // copied. Same with the memory being pointed to by the arb data slice. b = *bPointer txns := make([]types.Transaction, len(b.Transactions)) copy(txns, b.Transactions) b.Transactions = txns b.Transactions[0].ArbitraryData = [][]byte{arbData[:]} b.Nonce = nonce // Sanity check - block should have same id as header. bh.Nonce = nonce if types.BlockID(crypto.HashObject(bh)) != b.ID() { m.log.Critical("block reconstruction failed") } return nil }() if err != nil { m.log.Println("ERROR during call to SubmitHeader, pre SubmitBlock:", err) return err } err = m.managedSubmitBlock(b) if err != nil { m.log.Println("ERROR returned by managedSubmitBlock:", err) return err } return nil } Sia-1.3.0/modules/miner/blockmanager_test.go000066400000000000000000000126651313565667000210520ustar00rootroot00000000000000package miner import ( "bytes" "testing" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/fastrand" ) // solveHeader takes a block header as input and returns a solved block header // as output. func solveHeader(header types.BlockHeader, target types.Target) types.BlockHeader { // Solve the header. for { // Increment the nonce first to guarantee that a new header is formed // - this helps check for pointer errors. header.Nonce[0]++ id := crypto.HashObject(header) if bytes.Compare(target[:], id[:]) >= 0 { break } } return header } // TestIntegrationHeaderForWork checks that header requesting, solving, and // submitting naively works. func TestIntegrationHeaderForWork(t *testing.T) { if testing.Short() { t.SkipNow() } mt, err := createMinerTester(t.Name()) if err != nil { t.Fatal(err) } // Get a header and solve it. header, target, err := mt.miner.HeaderForWork() if err != nil { t.Fatal(err) } solvedHeader := solveHeader(header, target) // Sanity check - header and solvedHeader should be different. (within the // testing file, 'header' should always have a nonce of '0' and // solvedHeader should never have a nonce of '0'.) if header.Nonce == solvedHeader.Nonce { t.Fatal("nonce memory is not independent") } // Submit the header. err = mt.miner.SubmitHeader(solvedHeader) if err != nil { t.Fatal(err) } } // TestIntegrationHeaderForWorkUpdates checks that HeaderForWork starts // returning headers on the new block after a block has been submitted to the // consensus set. func TestIntegrationHeaderForWorkUpdates(t *testing.T) { if testing.Short() { t.SkipNow() } mt, err := createMinerTester(t.Name()) if err != nil { t.Fatal(err) } // Get a header to advance into the header memory. _, _, err = mt.miner.HeaderForWork() if err != nil { t.Fatal(err) } // Submit a block, which should trigger a header change. _, err = mt.miner.AddBlock() if err != nil { t.Fatal(err) } // Get a header to grind on. header, target, err := mt.miner.HeaderForWork() if err != nil { t.Fatal(err) } solvedHeader := solveHeader(header, target) // Submit the header. err = mt.miner.SubmitHeader(solvedHeader) if err != nil { t.Fatal(err) } if !mt.cs.InCurrentPath(types.BlockID(crypto.HashObject(solvedHeader))) { t.Error("header from solved block is not in the current path") } } // TestIntegrationManyHeaders checks that requesting a full set of headers in a // row results in all unique headers, and that all of them can be reassembled // into valid blocks. func TestIntegrationManyHeaders(t *testing.T) { if testing.Short() { t.SkipNow() } mt, err := createMinerTester(t.Name()) if err != nil { t.Fatal(err) } // Create a suite of headers for imaginary parallel mining. solvedHeaders := make([]types.BlockHeader, HeaderMemory/BlockMemory*2) for i := range solvedHeaders { header, target, err := mt.miner.HeaderForWork() if err != nil { t.Fatal(err) } solvedHeaders[i] = solveHeader(header, target) } // Submit the headers randomly and make sure they are all considered valid. for _, selection := range fastrand.Perm(len(solvedHeaders)) { err = mt.miner.SubmitHeader(solvedHeaders[selection]) if err != nil && err != modules.ErrNonExtendingBlock { t.Error(err) } } } // TestIntegrationHeaderBlockOverflow triggers a header overflow by requesting // a block that triggers the overflow. func TestIntegrationHeaderBlockOverflow(t *testing.T) { if testing.Short() { t.SkipNow() } mt, err := createMinerTester(t.Name()) if err != nil { t.Fatal(err) } // Grab a header that will be overwritten. header, target, err := mt.miner.HeaderForWork() if err != nil { t.Fatal(err) } header = solveHeader(header, target) // Mine blocks to wrap the memProgress around and wipe the old header. for i := 0; i < BlockMemory; i++ { _, err = mt.miner.AddBlock() if err != nil { t.Fatal(err) } // Grab a header to advance the mempool progress. _, _, err = mt.miner.HeaderForWork() if err != nil { t.Fatal(err) } } // Previous header should no longer be in memory. err = mt.miner.SubmitHeader(header) if err != errLateHeader { t.Error(err) } } // TestIntegrationHeaderRequestOverflow triggers a header overflow by // requesting a header that triggers overflow. func TestIntegrationHeaderRequestOverflow(t *testing.T) { if testing.Short() { t.SkipNow() } mt, err := createMinerTester(t.Name()) if err != nil { t.Fatal(err) } // Grab a header that will be overwritten. header, target, err := mt.miner.HeaderForWork() if err != nil { t.Fatal(err) } header = solveHeader(header, target) // Mine blocks to bring memProgress up to the edge. The number is chosen // specifically so that the overflow happens during the requesting of 200 // headers. for i := 0; i < BlockMemory-1; i++ { _, err = mt.miner.AddBlock() if err != nil { t.Fatal(err) } // Grab a header to advance the mempool progress. _, _, err = mt.miner.HeaderForWork() if err != nil { t.Fatal(err) } } // Header should still be in memory. err = mt.miner.SubmitHeader(header) if err != modules.ErrNonExtendingBlock { t.Error(err) } // Request headers until the overflow is achieved. for i := 0; i < HeaderMemory/BlockMemory; i++ { _, _, err = mt.miner.HeaderForWork() if err != nil { t.Fatal(err) } } err = mt.miner.SubmitHeader(header) if err != errLateHeader { t.Error(err) } } Sia-1.3.0/modules/miner/cpuminer.go000066400000000000000000000053601313565667000172020ustar00rootroot00000000000000package miner import ( "time" "github.com/NebulousLabs/Sia/build" ) // threadedMine starts a gothread that does CPU mining. threadedMine is the // only function that should be setting the mining flag to true. func (m *Miner) threadedMine() { if err := m.tg.Add(); err != nil { return } defer m.tg.Done() // There should not be another thread mining, and mining should be enabled. m.mu.Lock() if m.mining || !m.miningOn { m.mu.Unlock() return } m.mining = true m.mu.Unlock() // Solve blocks repeatedly, keeping track of how fast hashing is // occurring. cycleStart := time.Now() for { m.mu.Lock() // Kill the thread if 'Stop' has been called. select { case <-m.tg.StopChan(): m.miningOn = false m.mining = false m.mu.Unlock() return default: } // Kill the thread if mining has been turned off. if !m.miningOn { m.mining = false m.mu.Unlock() return } // Prepare the work and release the miner lock. bfw := m.blockForWork() target := m.persist.Target m.mu.Unlock() // Solve the block. b, solved := solveBlock(bfw, target) if solved { err := m.managedSubmitBlock(b) if err != nil { m.log.Println("ERROR: An error occurred while cpu mining:", err) } } // Update the hashrate. If the block was solved, the full set of // iterations was not completed, so the hashrate should not be updated. m.mu.Lock() if !solved { nanosecondsElapsed := 1 + time.Since(cycleStart).Nanoseconds() // Add 1 to prevent divide by zero errors. cycleStart = time.Now() // Reset the cycle counter as soon as the previous value is measured. m.hashRate = 1e9 * solveAttempts / nanosecondsElapsed } m.mu.Unlock() } } // CPUHashrate returns an estimated cpu hashrate. func (m *Miner) CPUHashrate() int { if err := m.tg.Add(); err != nil { build.Critical(err) } defer m.tg.Done() m.mu.Lock() defer m.mu.Unlock() return int(m.hashRate) } // CPUMining indicates whether the cpu miner is running. func (m *Miner) CPUMining() bool { if err := m.tg.Add(); err != nil { build.Critical(err) } defer m.tg.Done() m.mu.Lock() defer m.mu.Unlock() return m.miningOn } // StartCPUMining will start a single threaded cpu miner. If the miner is // already running, nothing will happen. func (m *Miner) StartCPUMining() { if err := m.tg.Add(); err != nil { build.Critical(err) } defer m.tg.Done() m.mu.Lock() defer m.mu.Unlock() m.miningOn = true go m.threadedMine() } // StopCPUMining will stop the cpu miner. If the cpu miner is already stopped, // nothing will happen. func (m *Miner) StopCPUMining() { if err := m.tg.Add(); err != nil { build.Critical(err) } defer m.tg.Done() m.mu.Lock() defer m.mu.Unlock() m.hashRate = 0 m.miningOn = false } Sia-1.3.0/modules/miner/miner.go000066400000000000000000000211251313565667000164670ustar00rootroot00000000000000package miner import ( "errors" "fmt" "sync" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/persist" siasync "github.com/NebulousLabs/Sia/sync" "github.com/NebulousLabs/Sia/types" ) var ( errNilCS = errors.New("miner cannot use a nil consensus set") errNilTpool = errors.New("miner cannot use a nil transaction pool") errNilWallet = errors.New("miner cannot use a nil wallet") // HeaderMemory is the number of previous calls to 'header' // that are remembered. Additionally, 'header' will only poll for a // new block every 'headerMemory / blockMemory' times it is // called. This reduces the amount of memory used, but comes at the cost of // not always having the most recent transactions. HeaderMemory = build.Select(build.Var{ Standard: 10000, Dev: 500, Testing: 50, }).(int) // BlockMemory is the maximum number of blocks the miner will store // Blocks take up to 2 megabytes of memory, which is why this number is // limited. BlockMemory = build.Select(build.Var{ Standard: 50, Dev: 10, Testing: 5, }).(int) // MaxSourceBlockAge is the maximum amount of time that is allowed to // elapse between generating source blocks. MaxSourceBlockAge = build.Select(build.Var{ Standard: 30 * time.Second, Dev: 5 * time.Second, Testing: 1 * time.Second, }).(time.Duration) ) // splitSet defines a transaction set that can be added componenet-wise to a // block. It's split because it doesn't necessarily represent the full set // prpovided by the transaction pool. Splits can be sorted so that the largest // and most valuable sets can be selected when picking transactions. type splitSet struct { averageFee types.Currency size uint64 transactions []types.Transaction } type splitSetID int // Miner struct contains all variables the miner needs // in order to create and submit blocks. type Miner struct { // Module dependencies. cs modules.ConsensusSet tpool modules.TransactionPool wallet modules.Wallet // BlockManager variables. Becaues blocks are large, one block is used to // make many headers which can be used by miners. Headers include an // arbitrary data transaction (appended to the block) to make the merkle // roots unique (preventing miners from doing redundant work). Every N // requests or M seconds, a new block is used to create headers. // // Only 'blocksMemory' blocks are kept in memory at a time, which // keeps ram usage reasonable. Miners may request many headers in parallel, // and thus may be working on different blocks. When they submit the solved // header to the block manager, the rest of the block needs to be found in // a lookup. blockMem map[types.BlockHeader]*types.Block // Mappings from headers to the blocks they are derived from. arbDataMem map[types.BlockHeader][crypto.EntropySize]byte // Mappings from the headers to their unique arb data. headerMem []types.BlockHeader // A circular list of headers that have been given out from the api recently. sourceBlock *types.Block // The block from which new headers for mining are created. sourceBlockTime time.Time // How long headers have been using the same block (different from 'recent block'). memProgress int // The index of the most recent header used in headerMem. // Transaction pool variables. fullSets map[modules.TransactionSetID][]int blockMapHeap *mapHeap overflowMapHeap *mapHeap setCounter int splitSets map[splitSetID]*splitSet // CPUMiner variables. miningOn bool // indicates if the miner is supposed to be running mining bool // indicates if the miner is actually running hashRate int64 // indicates hashes per second // Utils log *persist.Logger mu sync.RWMutex persist persistence persistDir string // tg signals the Miner's goroutines to shut down and blocks until all // goroutines have exited before returning from Close(). tg siasync.ThreadGroup } // startupRescan will rescan the blockchain in the event that the miner // persistence layer has become desynchronized from the consensus persistence // layer. This might happen if a user replaces any of the folders with backups // or deletes any of the folders. func (m *Miner) startupRescan() error { // Reset all of the variables that have relevance to the consensus set. The // operations are wrapped by an anonymous function so that the locking can // be handled using a defer statement. err := func() error { m.mu.Lock() defer m.mu.Unlock() m.log.Println("Performing a miner rescan.") m.persist.RecentChange = modules.ConsensusChangeBeginning m.persist.Height = 0 m.persist.Target = types.Target{} return m.saveSync() }() if err != nil { return err } // Subscribe to the consensus set. This is a blocking call that will not // return until the miner has fully caught up to the current block. err = m.cs.ConsensusSetSubscribe(m, modules.ConsensusChangeBeginning) if err != nil { return err } m.tg.OnStop(func() { m.cs.Unsubscribe(m) }) return nil } // New returns a ready-to-go miner that is not mining. func New(cs modules.ConsensusSet, tpool modules.TransactionPool, w modules.Wallet, persistDir string) (*Miner, error) { // Create the miner and its dependencies. if cs == nil { return nil, errNilCS } if tpool == nil { return nil, errNilTpool } if w == nil { return nil, errNilWallet } // Assemble the miner. The miner is assembled without an address because // the wallet is likely not unlocked yet. The miner will grab an address // after the miner is unlocked (this must be coded manually for each // function that potentially requires the miner to have an address. m := &Miner{ cs: cs, tpool: tpool, wallet: w, blockMem: make(map[types.BlockHeader]*types.Block), arbDataMem: make(map[types.BlockHeader][crypto.EntropySize]byte), headerMem: make([]types.BlockHeader, HeaderMemory), fullSets: make(map[modules.TransactionSetID][]int), splitSets: make(map[splitSetID]*splitSet), blockMapHeap: &mapHeap{ selectID: make(map[splitSetID]*mapElement), data: nil, minHeap: true, }, overflowMapHeap: &mapHeap{ selectID: make(map[splitSetID]*mapElement), data: nil, minHeap: false, }, persistDir: persistDir, } err := m.initPersist() if err != nil { return nil, errors.New("miner persistence startup failed: " + err.Error()) } err = m.cs.ConsensusSetSubscribe(m, m.persist.RecentChange) if err == modules.ErrInvalidConsensusChangeID { // Perform a rescan of the consensus set if the change id is not found. // The id will only be not found if there has been desynchronization // between the miner and the consensus package. err = m.startupRescan() if err != nil { return nil, errors.New("miner startup failed - rescanning failed: " + err.Error()) } } else if err != nil { return nil, errors.New("miner subscription failed: " + err.Error()) } m.tg.OnStop(func() { m.cs.Unsubscribe(m) }) m.tpool.TransactionPoolSubscribe(m) m.tg.OnStop(func() { m.tpool.Unsubscribe(m) }) // Save after synchronizing with consensus err = m.saveSync() if err != nil { return nil, errors.New("miner could not save during startup: " + err.Error()) } return m, nil } // Close terminates all ongoing processes involving the miner, enabling garbage // collection. func (m *Miner) Close() error { if err := m.tg.Stop(); err != nil { return err } m.mu.Lock() defer m.mu.Unlock() m.cs.Unsubscribe(m) var errs []error if err := m.saveSync(); err != nil { errs = append(errs, fmt.Errorf("save failed: %v", err)) } if err := m.log.Close(); err != nil { errs = append(errs, fmt.Errorf("log.Close failed: %v", err)) } return build.JoinErrors(errs, "; ") } // checkAddress checks that the miner has an address, fetching an address from // the wallet if not. func (m *Miner) checkAddress() error { if m.persist.Address != (types.UnlockHash{}) { return nil } uc, err := m.wallet.NextAddress() if err != nil { return err } m.persist.Address = uc.UnlockHash() return nil } // BlocksMined returns the number of good blocks and stale blocks that have // been mined by the miner. func (m *Miner) BlocksMined() (goodBlocks, staleBlocks int) { if err := m.tg.Add(); err != nil { build.Critical(err) } defer m.tg.Done() m.mu.Lock() defer m.mu.Unlock() for _, blockID := range m.persist.BlocksFound { if m.cs.InCurrentPath(blockID) { goodBlocks++ } else { staleBlocks++ } } return } Sia-1.3.0/modules/miner/miner_test.go000066400000000000000000000216561313565667000175370ustar00rootroot00000000000000package miner import ( "bytes" "path/filepath" "testing" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/modules/consensus" "github.com/NebulousLabs/Sia/modules/gateway" "github.com/NebulousLabs/Sia/modules/transactionpool" "github.com/NebulousLabs/Sia/modules/wallet" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/fastrand" ) // A minerTester is the helper object for miner testing. type minerTester struct { gateway modules.Gateway cs modules.ConsensusSet tpool modules.TransactionPool wallet modules.Wallet walletKey crypto.TwofishKey miner *Miner minedBlocks []types.Block persistDir string } // createMinerTester creates a minerTester that's ready for use. func createMinerTester(name string) (*minerTester, error) { testdir := build.TempDir(modules.MinerDir, name) // Create the modules. g, err := gateway.New("localhost:0", false, filepath.Join(testdir, modules.GatewayDir)) if err != nil { return nil, err } cs, err := consensus.New(g, false, filepath.Join(testdir, modules.ConsensusDir)) if err != nil { return nil, err } tp, err := transactionpool.New(cs, g, filepath.Join(testdir, modules.TransactionPoolDir)) if err != nil { return nil, err } w, err := wallet.New(cs, tp, filepath.Join(testdir, modules.WalletDir)) if err != nil { return nil, err } var key crypto.TwofishKey fastrand.Read(key[:]) _, err = w.Encrypt(key) if err != nil { return nil, err } err = w.Unlock(key) if err != nil { return nil, err } m, err := New(cs, tp, w, filepath.Join(testdir, modules.MinerDir)) if err != nil { return nil, err } // Assemble the minerTester. mt := &minerTester{ gateway: g, cs: cs, tpool: tp, wallet: w, walletKey: key, miner: m, persistDir: testdir, } // Mine until the wallet has money. for i := types.BlockHeight(0); i <= types.MaturityDelay; i++ { b, err := m.AddBlock() if err != nil { return nil, err } mt.minedBlocks = append(mt.minedBlocks, b) } return mt, nil } // TestIntegrationMiner creates a miner, mines a few blocks, and checks that // the wallet balance is updating as the blocks get mined. func TestIntegrationMiner(t *testing.T) { if testing.Short() { t.SkipNow() } mt, err := createMinerTester(t.Name()) if err != nil { t.Fatal(err) } // Check that the wallet has money. siacoins, _, _ := mt.wallet.ConfirmedBalance() if siacoins.IsZero() { t.Error("expecting mining full balance to not be zero") } // Mine a bunch of blocks. for i := 0; i < 50; i++ { b, _ := mt.miner.FindBlock() err = mt.cs.AcceptBlock(b) if err != nil { t.Fatal(err) } } morecoins, _, _ := mt.wallet.ConfirmedBalance() if siacoins.Cmp(morecoins) >= 0 { t.Error("wallet is not gaining balance while mining") } } // TestIntegrationNilMinerDependencies tests that the miner properly handles // nil inputs for its dependencies. func TestIntegrationNilMinerDependencies(t *testing.T) { if testing.Short() { t.SkipNow() } mt, err := createMinerTester(t.Name()) if err != nil { t.Fatal(err) } _, err = New(mt.cs, mt.tpool, nil, "") if err != errNilWallet { t.Fatal(err) } _, err = New(mt.cs, nil, mt.wallet, "") if err != errNilTpool { t.Fatal(err) } _, err = New(nil, mt.tpool, mt.wallet, "") if err != errNilCS { t.Fatal(err) } _, err = New(nil, nil, nil, "") if err == nil { t.Fatal(err) } } // TestIntegrationBlocksMined checks that the BlocksMined function correctly // indicates the number of real blocks and stale blocks that have been mined. func TestIntegrationBlocksMined(t *testing.T) { if testing.Short() { t.SkipNow() } mt, err := createMinerTester(t.Name()) if err != nil { t.Fatal(err) } // Get an unsolved header. unsolvedHeader, target, err := mt.miner.HeaderForWork() if err != nil { t.Fatal(err) } // Unsolve the header - necessary because the target is very low when // mining. for { unsolvedHeader.Nonce[0]++ id := crypto.HashObject(unsolvedHeader) if bytes.Compare(target[:], id[:]) < 0 { break } } // Get two solved headers. header1, target, err := mt.miner.HeaderForWork() if err != nil { t.Fatal(err) } header1 = solveHeader(header1, target) header2, target, err := mt.miner.HeaderForWork() if err != nil { t.Fatal(err) } header2 = solveHeader(header2, target) // Submit the unsolved header followed by the two solved headers, this // should result in 1 real block mined and 1 stale block mined. err = mt.miner.SubmitHeader(unsolvedHeader) if err != modules.ErrBlockUnsolved { t.Fatal(err) } err = mt.miner.SubmitHeader(header1) if err != nil { t.Fatal(err) } err = mt.miner.SubmitHeader(header2) if err != modules.ErrNonExtendingBlock { t.Fatal(err) } goodBlocks, staleBlocks := mt.miner.BlocksMined() if goodBlocks != 1 { t.Error("expecting 1 good block") } if staleBlocks != 1 { t.Error("expecting 1 stale block, got", staleBlocks) } // Reboot the miner and verify that the block record has persisted. err = mt.miner.Close() if err != nil { t.Fatal(err) } rebootMiner, err := New(mt.cs, mt.tpool, mt.wallet, filepath.Join(mt.persistDir, modules.MinerDir)) if err != nil { t.Fatal(err) } goodBlocks, staleBlocks = rebootMiner.BlocksMined() if goodBlocks != 1 { t.Error("expecting 1 good block") } if staleBlocks != 1 { t.Error("expecting 1 stale block, got", staleBlocks) } } // TestIntegrationAutoRescan triggers a rescan during a call to New and // verifies that the rescanning happens correctly. The rescan is triggered by // a call to New, instead of getting called directly. func TestIntegrationAutoRescan(t *testing.T) { if testing.Short() { t.SkipNow() } mt, err := createMinerTester(t.Name()) if err != nil { t.Fatal(err) } _, err = mt.miner.AddBlock() if err != nil { t.Fatal(err) } // Get the persist data of the current miner. oldChange := mt.miner.persist.RecentChange oldHeight := mt.miner.persist.Height oldTarget := mt.miner.persist.Target // Corrupt the miner, close the miner, and make a new one from the same // directory. mt.miner.persist.RecentChange[0]++ mt.miner.persist.Height += 1e5 mt.miner.persist.Target[0]++ err = mt.miner.Close() // miner saves when it closes. if err != nil { t.Fatal(err) } // Verify that rescanning resolved the corruption in the miner. m, err := New(mt.cs, mt.tpool, mt.wallet, filepath.Join(mt.persistDir, modules.MinerDir)) if err != nil { t.Fatal(err) } // Check that after rescanning, the values have returned to the usual values. if m.persist.RecentChange != oldChange { t.Error("rescan failed, ended up on the wrong change") } if m.persist.Height != oldHeight { t.Error("rescan failed, ended up at the wrong height") } if m.persist.Target != oldTarget { t.Error("rescan failed, ended up at the wrong target") } } // TestIntegrationStartupRescan probes the startupRescan function, checking // that it works in the naive case. Rescan is called directly. func TestIntegrationStartupRescan(t *testing.T) { if testing.Short() { t.SkipNow() } mt, err := createMinerTester(t.Name()) if err != nil { t.Fatal(err) } // Check that the miner's persist variables have been initialized to the // first few blocks. if mt.miner.persist.RecentChange == (modules.ConsensusChangeID{}) || mt.miner.persist.Height == 0 || mt.miner.persist.Target == (types.Target{}) { t.Fatal("miner persist variables not initialized") } oldChange := mt.miner.persist.RecentChange oldHeight := mt.miner.persist.Height oldTarget := mt.miner.persist.Target // Corrupt the miner and verify that a rescan repairs the corruption. mt.miner.persist.RecentChange[0]++ mt.miner.persist.Height += 500 mt.miner.persist.Target[0]++ mt.cs.Unsubscribe(mt.miner) err = mt.miner.startupRescan() if err != nil { t.Fatal(err) } if mt.miner.persist.RecentChange != oldChange { t.Error("rescan failed, ended up on the wrong change") } if mt.miner.persist.Height != oldHeight { t.Error("rescan failed, ended up at the wrong height") } if mt.miner.persist.Target != oldTarget { t.Error("rescan failed, ended up at the wrong target") } } // TestMinerCloseDeadlock checks that the miner can cleanly close even if the // CPU miner is running. func TestMinerCloseDeadlock(t *testing.T) { mt, err := createMinerTester(t.Name()) if err != nil { t.Fatal(err) } // StartCPUMining calls `go threadedMine()`, which needs to access the miner // before Close() does in the next goroutine, otherwise m.tg.Add() fails // at the top of threadedMine() and threadedMine() exits (silently!). // I haven't seen this behavior since sticking Close() inside a goroutine, // but I'm not sure that's comfort enough. mt.miner.StartCPUMining() time.Sleep(time.Millisecond * 250) closed := make(chan struct{}) go func() { if err := mt.miner.Close(); err != nil { t.Fatal(err) } closed <- struct{}{} }() select { case <-closed: case <-time.After(5 * time.Second): t.Fatal("mt.miner.Close never completed") } } Sia-1.3.0/modules/miner/persist.go000066400000000000000000000042061313565667000170470ustar00rootroot00000000000000package miner import ( "os" "path/filepath" "time" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/persist" "github.com/NebulousLabs/Sia/types" ) const ( logFile = modules.MinerDir + ".log" settingsFile = modules.MinerDir + ".json" saveLoopPeriod = time.Minute * 2 ) var ( settingsMetadata = persist.Metadata{ Header: "Miner Settings", Version: "0.5.0", } ) type ( // persist contains all of the persistent miner data. persistence struct { RecentChange modules.ConsensusChangeID Height types.BlockHeight Target types.Target Address types.UnlockHash BlocksFound []types.BlockID UnsolvedBlock types.Block } ) // initSettings loads the settings file if it exists and creates it if it // doesn't. func (m *Miner) initSettings() error { filename := filepath.Join(m.persistDir, settingsFile) _, err := os.Stat(filename) if os.IsNotExist(err) { return m.saveSync() } else if err != nil { return err } return m.load() } // initPersist initializes the persistence of the miner. func (m *Miner) initPersist() error { // Create the miner directory. err := os.MkdirAll(m.persistDir, 0700) if err != nil { return err } // Add a logger. m.log, err = persist.NewFileLogger(filepath.Join(m.persistDir, logFile)) if err != nil { return err } return m.initSettings() } // load loads the miner persistence from disk. func (m *Miner) load() error { return persist.LoadJSON(settingsMetadata, &m.persist, filepath.Join(m.persistDir, settingsFile)) } // saveSync saves the miner persistence to disk, and then syncs to disk. func (m *Miner) saveSync() error { return persist.SaveJSON(settingsMetadata, m.persist, filepath.Join(m.persistDir, settingsFile)) } // threadedSaveLoop periodically saves the miner persist. func (m *Miner) threadedSaveLoop() { for { select { case <-m.tg.StopChan(): return case <-time.After(saveLoopPeriod): } func() { err := m.tg.Add() if err != nil { return } defer m.tg.Done() m.mu.Lock() err = m.saveSync() m.mu.Unlock() if err != nil { m.log.Println("ERROR: Unable to save miner persist:", err) } }() } } Sia-1.3.0/modules/miner/splitsetheap.go000066400000000000000000000135431313565667000200670ustar00rootroot00000000000000package miner // mapElements are stored in a mapHeap. The index refers to the location of the // splitSet in the underlying slice used to represent the heap. type mapElement struct { set *splitSet id splitSetID index int } // mapHeap is a heap of splitSets (compared by averageFee). The minHeap bool // specifies whether it is a min-heap or max-heap. type mapHeap struct { selectID map[splitSetID]*mapElement data []*mapElement size uint64 minHeap bool } // up maintains the heap condition by checking if the element at index j is less // than its parent (as defined by less()). If so it swaps them, so that the // element at index j goes 'up' the heap. It continues until the heap condition // is satisfied again. func (mh *mapHeap) up(j int) { for { // i is the parent of element at index j. i := (j - 1) / 2 if i == j || !mh.less(j, i) { // Heap condition maintained. break } // Swap i and j, then continue. mh.swap(i, j) j = i } } // down maintains the heap condition by checking that the children of the // element at index i are less than the element at i (as defined by less()). If // so, it swaps them, and continues down the heap until the heap condition is // satisfied. func (mh *mapHeap) down(i0, n int) bool { i := i0 for { // j1 is the left child of the element at index i j1 := 2*i + 1 // Check that j1 is in the bounds of the heap (j1 < 0 after int overflow). if j1 >= n || j1 < 0 { break } //j is the left child of i. j := j1 // If the right child (j2) of the element at index i (the sibling of j), // is within the bounds of the heap and satisfies if j2 := j1 + 1; j2 < n && !mh.less(j1, j2) { j = j2 // = 2*i + 2 // right child } // If the heap condition is true here, the method can exit. if !mh.less(j, i) { break } // Swap with the child and continue down the heap. mh.swap(i, j) i = j } return i > i0 } // Len returns the number of items stored in the heap. func (mh mapHeap) len() int { return len(mh.data) } // less returns true if the mapElement at index i is less than the element at // index j if the mapHeap is a min-heap. If the mapHeap is a max-heap, it // returns true if the element at index i is greater. func (mh mapHeap) less(i, j int) bool { if mh.minHeap { return mh.data[i].set.averageFee.Cmp(mh.data[j].set.averageFee) == -1 } return mh.data[i].set.averageFee.Cmp(mh.data[j].set.averageFee) == 1 } // swap swaps the elements at indices i and j. It also mutates the mapElements // in the map of a mapHeap to reflect the change of indices. func (mh mapHeap) swap(i, j int) { // Swap in slice. mh.data[i], mh.data[j] = mh.data[j], mh.data[i] // Change values in slice to correct indices. Note that the same mapeElement // pointer is in the map also, so we only have to mutate it in one place. mh.data[i].index = i mh.data[j].index = j } // push puts an element onto the heap and maintains the heap condition. func (mh *mapHeap) push(elem *mapElement) { // Get the number of items stored in the heap. n := len(mh.data) // Add elem to the bottom of the heap, and set the index to reflect that. elem.index = n mh.data = append(mh.data, elem) // Place the mapElement into the map with the correct splitSetID. mh.selectID[elem.id] = elem // Increment the mapHeap size by the size of the mapElement. mh.size += elem.set.size // Fix the heap condition by sifting up. mh.up(n) } // pop removes the top element from the heap (as defined by Less()) Pop will // panic if called on an empty heap. Use Peek before Pop to be safe. func (mh *mapHeap) pop() *mapElement { n := mh.len() - 1 // Move the element to be popped to the end, then fix the heap condition. mh.swap(0, n) mh.down(0, n) // Get the last element. elem := mh.data[n] // Shrink the data slice, and delete the mapElement from the map. mh.data = mh.data[0:n] delete(mh.selectID, elem.id) // Decrement the size of the mapHeap. mh.size -= elem.set.size return elem } // removeSetByID removes an element from the MapHeap using only the splitSetID. func (mh *mapHeap) removeSetByID(s splitSetID) *mapElement { // Get index into data at which the element is stored. i := mh.selectID[s].index //Remove it from the heap using the Go library. return mh.remove(i) } // Peek returns the element at the top of the heap without removing it. func (mh *mapHeap) peek() (*mapElement, bool) { if len(mh.data) == 0 { return nil, false } return mh.data[0], true } // A heap must be initialized before any of the heap operations can be used. // Init is idempotent with respect to the heap conditions and may be called // whenever the heap conditions may have been invalidated. Its complexity is // O(n) where n = h.Len(). func (mh *mapHeap) init() { // Sifts down through the heap to achieve the heap condition. n := mh.len() for i := n/2 - 1; i >= 0; i-- { mh.down(i, n) } } // remove removes the element at index i from the heap. The complexity is // O(log(n)) where n = h.Len(). func (mh *mapHeap) remove(i int) *mapElement { n := mh.len() - 1 // If the element to be removed is not at the top of the heap, move it. Then // fix the heap condition. if n != i { mh.swap(i, n) mh.down(i, n) mh.up(i) } // Get the last element. elem := mh.data[n] // Shrink the data slice, and delete the mapElement from the map. mh.data = mh.data[0:n] delete(mh.selectID, elem.id) // Decrement the size of the mapHeap. mh.size -= elem.set.size return elem } // fix re-establishes the heap ordering after the element at index i has changed // its value. Changing the value of the element at index i and then calling Fix // is equivalent to, but less expensive than, calling Remove(h, i) followed by a // Push of the new value. The complexity is O(log(n)) where n = h.len(). func (mh *mapHeap) fix(i int) { // Check if the heap condition can be satisfied by sifting down. // If not, sift up too. if !mh.down(i, mh.len()) { mh.up(i) } } Sia-1.3.0/modules/miner/splitsetheap_test.go000066400000000000000000000214611313565667000211240ustar00rootroot00000000000000package miner import ( "math/rand" "testing" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/fastrand" ) // TestMapHeapSimple test max-heap and min-heap versions of the MapHeap on the // same sequence of pushes and pops. The pushes are done in increasing value of // averageFee (the value by which elements are compared). func TestMapHeapSimple(t *testing.T) { max := &mapHeap{ selectID: make(map[splitSetID]*mapElement), data: nil, size: 0, minHeap: false, } min := &mapHeap{ selectID: make(map[splitSetID]*mapElement), data: nil, size: 0, minHeap: true, } max.init() min.init() randSlice := fastrand.Perm(1000) for _, i := range randSlice { e1 := &mapElement{ set: &splitSet{ averageFee: types.SiacoinPrecision.Mul64(uint64(i)), size: uint64(10 * i), transactions: make([]types.Transaction, 0), }, id: splitSetID(i), index: 0, } e2 := &mapElement{ set: &splitSet{ averageFee: types.SiacoinPrecision.Mul64(uint64(i)), size: uint64(10 * i), transactions: make([]types.Transaction, 0), }, id: splitSetID(i), index: 0, } max.push(e1) min.push(e2) } for i := 0; i < 1000; i++ { maxPop := max.pop() minPop := min.pop() if int(maxPop.id) != 999-i { t.Error("Unexpected splitSetID in result from max-heap pop.") } if int(minPop.id) != i { t.Error("Unexpected splitSetID in result from min-heap pop.") } if maxPop.set.averageFee.Cmp(types.SiacoinPrecision.Mul64(uint64(999-i))) != 0 { t.Error("Unexpected currency value in result from max-heap pop.") } if minPop.set.averageFee.Cmp(types.SiacoinPrecision.Mul64(uint64(i))) != 0 { t.Error("Unexpected currency value in result from min-heap pop.") } } } // TestMapHeapSize tests that the size of MapHeaps changes accordingly with the // sizes of elements added to it, and with those elements removed from it. Tests // a max-heap and min-heap on the same sequence of pushes and pops. func TestMapHeapSize(t *testing.T) { max := &mapHeap{ selectID: make(map[splitSetID]*mapElement), data: nil, size: 0, minHeap: false, } min := &mapHeap{ selectID: make(map[splitSetID]*mapElement), data: nil, size: 0, minHeap: true, } max.init() min.init() var expectedSize uint64 randSlice := fastrand.Perm(1000) for _, i := range randSlice { e1 := &mapElement{ set: &splitSet{ averageFee: types.SiacoinPrecision.Mul64(uint64(i)), size: uint64(100 * i), transactions: make([]types.Transaction, 0), }, id: splitSetID(i), index: 0, } e2 := &mapElement{ set: &splitSet{ averageFee: types.SiacoinPrecision.Mul64(uint64(i)), size: uint64(100 * i), transactions: make([]types.Transaction, 0), }, id: splitSetID(i), index: 0, } max.push(e1) min.push(e2) expectedSize += e1.set.size } if max.size != expectedSize { t.Error("Max-heap size different than expected size.") } if min.size != expectedSize { t.Error("Min-heap size different than expected size.") } for i := 0; i < 1000; i++ { maxPop := max.pop() minPop := min.pop() if maxPop.set.size != uint64(100*(999-i)) { t.Error("Unexpected set size in result from max-heap pop.") } if minPop.set.size != uint64(100*i) { t.Error("Unexpected set size in result from min-heap pop.") } } } // TestMapHeapRemoveBySetID pushes a sequence of elements onto a max-heap and // min-heap. Then it removes a random element using its splitSetID, and checks // that it has been removed. func TestMapHeapRemoveBySetID(t *testing.T) { max := &mapHeap{ selectID: make(map[splitSetID]*mapElement), data: nil, size: 0, minHeap: false, } min := &mapHeap{ selectID: make(map[splitSetID]*mapElement), data: nil, size: 0, minHeap: true, } max.init() min.init() for i := 0; i < 5000; i++ { e1 := &mapElement{ set: &splitSet{ averageFee: types.SiacoinPrecision.Mul64(uint64(i)), size: uint64(10 * i), transactions: make([]types.Transaction, 0), }, id: splitSetID(i), index: 0, } e2 := &mapElement{ set: &splitSet{ averageFee: types.SiacoinPrecision.Mul64(uint64(i)), size: uint64(10 * i), transactions: make([]types.Transaction, 0), }, id: splitSetID(i), index: 0, } max.push(e1) min.push(e2) } randID := splitSetID(rand.Intn(5000)) firstToBeRemoved := max.selectID[randID] // Iterate over data in min heap and max heap to confirm the element to be // removed is actually there. inMaxHeap := false inMinHeap := false for _, v := range max.data { if v.id == firstToBeRemoved.id { inMaxHeap = true break } } for _, v := range min.data { if v.id == firstToBeRemoved.id { inMinHeap = true break } } if !inMinHeap || !inMaxHeap { t.Error("Element not found in heap(s) before being removed by splitSetID.") } if max.selectID[randID] == nil || min.selectID[randID] == nil { t.Error("Element not found in map(s) before being removed by splitSetID") } minSizeBefore := min.size maxSizeBefore := max.size minRemovedSetSize := min.selectID[randID].set.size maxRemovedSetSize := max.selectID[randID].set.size max.removeSetByID(randID) min.removeSetByID(randID) minSizeAfter := min.size maxSizeAfter := max.size if minSizeBefore-minRemovedSetSize != minSizeAfter { t.Error("unexpected difference in size after removing from min heap.") } if maxSizeBefore-maxRemovedSetSize != maxSizeAfter { t.Error("unexpected difference in size after removing from max heap.") } // Iterate over data in min heap and max heap to confirm the element to be // removed was actually removed removedFromMax := true removedFromMin := true for _, v := range max.data { if v.id == firstToBeRemoved.id { removedFromMax = false break } } for _, v := range min.data { if v.id == firstToBeRemoved.id { removedFromMin = false break } } if !removedFromMin { t.Error("Element found in min heap(s) after being removed by splitSetID.") } if !removedFromMax { t.Error("Element found in max heap(s) after being removed by splitSetID.") } _, inMinMap := min.selectID[randID] _, inMaxMap := max.selectID[randID] if inMinMap { t.Error("Element found in min map(s) after being removed by splitSetID") } if inMaxMap { t.Error("Element found in max map(s) after being removed by splitSetID") } } // TestMapHeapPeek test the Peek method. First, on an empty heap Peek should // return false. Then it checks that Peek returns the same result as the next // Pop. func TestMapHeapPeek(t *testing.T) { max := &mapHeap{ selectID: make(map[splitSetID]*mapElement), data: nil, size: 0, minHeap: false, } min := &mapHeap{ selectID: make(map[splitSetID]*mapElement), data: nil, size: 0, minHeap: true, } max.init() min.init() minSizeBefore := min.size maxSizeBefore := max.size _, maxNotEmpty := max.peek() _, minNotEmpty := min.peek() minSizeAfter := min.size maxSizeAfter := max.size if maxNotEmpty { t.Error("Unexpected result from max.Peek(), heap not empty") } if minNotEmpty { t.Error("Unexpected result from max.Peek(), heap not empty") } if minSizeBefore != minSizeAfter || maxSizeBefore != maxSizeAfter { t.Error("expected heap size not to change from peek.") } for i := 0; i < 10; i++ { e1 := &mapElement{ set: &splitSet{ averageFee: types.SiacoinPrecision.Mul64(uint64(i)), size: uint64(10 * i), transactions: make([]types.Transaction, 0), }, id: splitSetID(i), index: 0, } e2 := &mapElement{ set: &splitSet{ averageFee: types.SiacoinPrecision.Mul64(uint64(i)), size: uint64(10 * i), transactions: make([]types.Transaction, 0), }, id: splitSetID(i), index: 0, } max.push(e1) min.push(e2) } for i := 0; i < 10; i++ { minSizeBefore := min.size maxSizeBefore := max.size maxPeek, maxNotEmpty := max.peek() minPeek, minNotEmpty := min.peek() minSizeAfter := min.size maxSizeAfter := max.size if minSizeBefore != minSizeAfter || maxSizeBefore != maxSizeAfter { t.Error("expected heap size not to change from peek.") } if !maxNotEmpty { t.Error("Unexpected result from max.Peek(), heap empty after pushes") } if !minNotEmpty { t.Error("Unexpected result from max.Peek(), heap empty after pushes") } maxPop := max.pop() minPop := min.pop() if int(maxPop.id) != int(maxPeek.id) { t.Error("Unexpected splitSetID in result from max-heap Peek.") } if int(minPop.id) != int(minPeek.id) { t.Error("Unexpected splitSetID in result from min-heap Peek.") } if maxPop.set.averageFee.Cmp(maxPeek.set.averageFee) != 0 { t.Error("Unexpected currency value in result from max-heap Peek.") } if minPop.set.averageFee.Cmp(minPeek.set.averageFee) != 0 { t.Error("Unexpected currency value in result from min-heap Peek.") } } } Sia-1.3.0/modules/miner/testminer.go000066400000000000000000000055231313565667000173730ustar00rootroot00000000000000package miner // testminer.go implements the TestMiner interface, whose primary purpose is // integration testing. import ( "bytes" "encoding/binary" "errors" "unsafe" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) const ( // solveAttempts is the number of times that SolveBlock will try to solve a // block before giving up. solveAttempts = 16e3 ) // solveBlock takes a block and a target and tries to solve the block for the // target. A bool is returned indicating whether the block was successfully // solved. func solveBlock(b types.Block, target types.Target) (types.Block, bool) { // Assemble the header. merkleRoot := b.MerkleRoot() header := make([]byte, 80) copy(header, b.ParentID[:]) binary.LittleEndian.PutUint64(header[40:48], uint64(b.Timestamp)) copy(header[48:], merkleRoot[:]) var nonce uint64 for i := 0; i < solveAttempts; i++ { id := crypto.HashBytes(header) if bytes.Compare(target[:], id[:]) >= 0 { copy(b.Nonce[:], header[32:40]) return b, true } *(*uint64)(unsafe.Pointer(&header[32])) = nonce nonce++ } return b, false } // BlockForWork returns a block that is ready for nonce grinding, along with // the root hash of the block. func (m *Miner) BlockForWork() (b types.Block, t types.Target, err error) { // Check if the wallet is unlocked. If the wallet is unlocked, make sure // that the miner has a recent address. if !m.wallet.Unlocked() { err = modules.ErrLockedWallet return } m.mu.Lock() defer m.mu.Unlock() err = m.checkAddress() if err != nil { return } b = m.blockForWork() return b, m.persist.Target, nil } // AddBlock adds a block to the consensus set. func (m *Miner) AddBlock() (types.Block, error) { block, err := m.FindBlock() if err != nil { return types.Block{}, err } err = m.cs.AcceptBlock(block) if err != nil { return types.Block{}, err } return block, nil } // FindBlock finds at most one block that extends the current blockchain. func (m *Miner) FindBlock() (types.Block, error) { var bfw types.Block var target types.Target err := func() error { m.mu.Lock() defer m.mu.Unlock() if !m.wallet.Unlocked() { return modules.ErrLockedWallet } err := m.checkAddress() if err != nil { return err } // Get a block for work. bfw = m.blockForWork() target = m.persist.Target return nil }() if err != nil { return types.Block{}, err } block, ok := m.SolveBlock(bfw, target) if !ok { return types.Block{}, errors.New("could not solve block using limited hashing power") } return block, nil } // SolveBlock takes a block and a target and tries to solve the block for the // target. A bool is returned indicating whether the block was successfully // solved. func (m *Miner) SolveBlock(b types.Block, target types.Target) (types.Block, bool) { return solveBlock(b, target) } Sia-1.3.0/modules/miner/update.go000066400000000000000000000222331313565667000166400ustar00rootroot00000000000000package miner import ( "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) // getNewSplitSets creates split sets from a transaction pool diff, returns them // in a slice of map elements. Does not update the miner's global state. func (m *Miner) getNewSplitSets(diff *modules.TransactionPoolDiff) []*mapElement { // Split the new sets and add the splits to the list of transactions we pull // form. newElements := make([]*mapElement, 0) for _, newSet := range diff.AppliedTransactions { // Split the sets into smaller sets, and add them to the list of // transactions the miner can draw from. // TODO: Split the one set into a bunch of smaller sets using the cp4p // splitter. m.setCounter++ m.fullSets[newSet.ID] = []int{m.setCounter} var size uint64 var totalFees types.Currency for i := range newSet.IDs { size += newSet.Sizes[i] for _, fee := range newSet.Transactions[i].MinerFees { totalFees = totalFees.Add(fee) } } // We will check to see if this splitSet belongs in the block. s := &splitSet{ size: size, averageFee: totalFees.Div64(size), transactions: newSet.Transactions, } elem := &mapElement{ set: s, id: splitSetID(m.setCounter), index: 0, } newElements = append(newElements, elem) } return newElements } // addMapElementTxns places the splitSet from a mapElement into the correct // mapHeap. func (m *Miner) addMapElementTxns(elem *mapElement) { candidateSet := elem.set // Check if heap for highest fee transactions has space. if m.blockMapHeap.size+candidateSet.size < types.BlockSizeLimit-5e3 { m.blockMapHeap.push(elem) return } // While the heap cannot fit this set s, and while the (weighted) average // fee for the lowest sets from the block is less than the fee for the set // s, continue removing from the heap. The block heap doesn't have enough // space for this transaction. Check if removing sets from the blockMapHeap // will be worth it. bottomSets will hold the lowest fee sets from the // blockMapHeap bottomSets := make([]*mapElement, 0) var sizeOfBottomSets uint64 var averageFeeOfBottomSets types.Currency for { // Check if the candidateSet can fit in the block. if m.blockMapHeap.size-sizeOfBottomSets+candidateSet.size < types.BlockSizeLimit-5e3 { // Place candidate into block, m.blockMapHeap.push(elem) // Place transactions removed from block heap into // the overflow heap. for _, v := range bottomSets { m.overflowMapHeap.push(v) } break } // If the blockMapHeap is empty, push all elements removed from it back // in, and place the candidate set into the overflow. This should never // happen since transaction sets are much smaller than the max block // size. _, exists := m.blockMapHeap.peek() if !exists { m.overflowMapHeap.push(elem) // Put back in transactions removed. for _, v := range bottomSets { m.blockMapHeap.push(v) } // Finished with this candidate set. break } // Add the set to the bottomSets slice. Note that we don't increase // sizeOfBottomSets until after calculating the average. nextSet := m.blockMapHeap.pop() bottomSets = append(bottomSets, nextSet) // Calculating fees to compare total fee from those sets removed and the current set s. totalFeeFromNextSet := nextSet.set.averageFee.Mul64(nextSet.set.size) totalBottomFees := averageFeeOfBottomSets.Mul64(sizeOfBottomSets).Add(totalFeeFromNextSet) sizeOfBottomSets += nextSet.set.size averageFeeOfBottomSets := totalBottomFees.Div64(sizeOfBottomSets) // If the average fee of the bottom sets from the block is higher than // the fee from this candidate set, put the candidate into the overflow // MapHeap. if averageFeeOfBottomSets.Cmp(candidateSet.averageFee) == 1 { // CandidateSet goes into the overflow. m.overflowMapHeap.push(elem) // Put transaction sets from bottom back into the blockMapHeap. for _, v := range bottomSets { m.blockMapHeap.push(v) } // Finished with this candidate set. break } } } // addNewTxns adds new unconfirmed transactions to the miner's transaction // selection and updates the splitSet and mapElement state of the miner. func (m *Miner) addNewTxns(diff *modules.TransactionPoolDiff) { // Get new splitSets (in form of mapElement) newElements := m.getNewSplitSets(diff) // Place each elem in one of the MapHeaps. for i := 0; i < len(newElements); i++ { // Add splitSet to miner's global state using pointer and ID stored in // the mapElement and then add the mapElement to the miner's global // state. m.splitSets[newElements[i].id] = newElements[i].set m.addMapElementTxns(newElements[i]) } } // Change the UnsolvedBlock so that it has exactly those transactions in the // blockMapHeap. func (m *Miner) adjustUnsolvedBlock() { numTxns := 0 for _, elem := range m.blockMapHeap.selectID { numTxns += len(elem.set.transactions) } // If the transactions that need to be added don't fit in the block, // increase the size of the block by a constant factor to be more efficient. if numTxns > cap(m.persist.UnsolvedBlock.Transactions) { newCap := cap(m.persist.UnsolvedBlock.Transactions) * 6 / 5 if numTxns > newCap { newCap = numTxns } m.persist.UnsolvedBlock.Transactions = make([]types.Transaction, 0, newCap) } else { m.persist.UnsolvedBlock.Transactions = m.persist.UnsolvedBlock.Transactions[:0] } // The current design removes all transactions from the block itself, so we // have to take everything the blockMapHeap and put it into the unsolved // block slice. for _, elem := range m.blockMapHeap.selectID { set := elem.set m.persist.UnsolvedBlock.Transactions = append(m.persist.UnsolvedBlock.Transactions, set.transactions...) } } // deleteReverts deletes transactions from the miner's transaction selection // which are no longer in the transaction pool. func (m *Miner) deleteReverts(diff *modules.TransactionPoolDiff) { // Delete the sets that are no longer useful. That means recognizing which // of your splits belong to the missing sets. for _, id := range diff.RevertedTransactions { // Look up all of the split sets associated with the set being reverted, // and delete them. Then delete the lookups from the list of full sets // as well. splitSetIndexes := m.fullSets[id] for _, ss := range splitSetIndexes { m.deleteMapElementTxns(splitSetID(ss)) } delete(m.fullSets, id) } } // deleteMapElementTxns removes a splitSet (by id) from the miner's mapheaps and // readjusts the mapheap for the block if needed. func (m *Miner) deleteMapElementTxns(id splitSetID) { _, inBlockMapHeap := m.blockMapHeap.selectID[id] _, inOverflowMapHeap := m.overflowMapHeap.selectID[id] // If the transaction set is in the overflow, we can just delete it. if inOverflowMapHeap { m.overflowMapHeap.removeSetByID(id) } else if inBlockMapHeap { // Remove from blockMapHeap. m.blockMapHeap.removeSetByID(id) // Promote sets from overflow heap to block if possible. for overflowElem, canPromote := m.overflowMapHeap.peek(); canPromote && m.blockMapHeap.size+overflowElem.set.size < types.BlockSizeLimit-5e3; { promotedElem := m.overflowMapHeap.pop() m.blockMapHeap.push(promotedElem) } } delete(m.splitSets, id) } // ProcessConsensusDigest will update the miner's most recent block. func (m *Miner) ProcessConsensusChange(cc modules.ConsensusChange) { m.mu.Lock() defer m.mu.Unlock() // Update the miner's understanding of the block height. for _, block := range cc.RevertedBlocks { // Only doing the block check if the height is above zero saves hashing // and saves a nontrivial amount of time during IBD. if m.persist.Height > 0 || block.ID() != types.GenesisID { m.persist.Height-- } else if m.persist.Height != 0 { // Sanity check - if the current block is the genesis block, the // miner height should be set to zero. m.log.Critical("Miner has detected a genesis block, but the height of the miner is set to ", m.persist.Height) m.persist.Height = 0 } } for _, block := range cc.AppliedBlocks { // Only doing the block check if the height is above zero saves hashing // and saves a nontrivial amount of time during IBD. if m.persist.Height > 0 || block.ID() != types.GenesisID { m.persist.Height++ } else if m.persist.Height != 0 { // Sanity check - if the current block is the genesis block, the // miner height should be set to zero. m.log.Critical("Miner has detected a genesis block, but the height of the miner is set to ", m.persist.Height) m.persist.Height = 0 } } // Update the unsolved block. m.persist.UnsolvedBlock.ParentID = cc.AppliedBlocks[len(cc.AppliedBlocks)-1].ID() m.persist.Target = cc.ChildTarget m.persist.UnsolvedBlock.Timestamp = cc.MinimumValidChildTimestamp // There is a new parent block, the source block should be updated to keep // the stale rate as low as possible. if cc.Synced { m.newSourceBlock() } m.persist.RecentChange = cc.ID } // ReceiveUpdatedUnconfirmedTransactions will replace the current unconfirmed // set of transactions with the input transactions. func (m *Miner) ReceiveUpdatedUnconfirmedTransactions(diff *modules.TransactionPoolDiff) { m.mu.Lock() defer m.mu.Unlock() m.deleteReverts(diff) m.addNewTxns(diff) m.adjustUnsolvedBlock() } Sia-1.3.0/modules/miner/update_test.go000066400000000000000000000047761313565667000177130ustar00rootroot00000000000000package miner import ( "testing" "github.com/NebulousLabs/Sia/modules" ) // TestIntegrationBlockHeightReorg checks that the miner has the correct block // height after a series of reorgs that go as far as the genesis block. func TestIntegrationBlockHeightReorg(t *testing.T) { if testing.Short() { t.SkipNow() } // Create 3 miner testers that will be used to cause each other to reorg. mt1, err := createMinerTester(t.Name() + "1") if err != nil { t.Fatal(err) } mt2, err := createMinerTester(t.Name() + "2") if err != nil { t.Fatal(err) } mt3, err := createMinerTester(t.Name() + "3") if err != nil { t.Fatal(err) } // Put one ahead of the other multiple times, which should thrash around // the height calculation and cause problems by dipping down to the genesis // block repeatedly. for i := 0; i < 2; i++ { b, err := mt1.miner.AddBlock() if err != nil { t.Fatal(err) } mt1.minedBlocks = append(mt1.minedBlocks, b) } for i := 0; i < 3; i++ { b, err := mt2.miner.AddBlock() if err != nil { t.Fatal(err) } mt2.minedBlocks = append(mt2.minedBlocks, b) } for _, block := range mt2.minedBlocks { err = mt1.cs.AcceptBlock(block) if err != nil && err != modules.ErrNonExtendingBlock { t.Fatal(err) } } if mt1.cs.CurrentBlock().ID() != mt2.cs.CurrentBlock().ID() { t.Fatal("mt1 and mt2 should have the same current block") } for i := 0; i < 2; i++ { b, err := mt1.miner.AddBlock() if err != nil { t.Fatal(err) } mt1.minedBlocks = append(mt1.minedBlocks, b) } for i := 0; i < 3; i++ { b, err := mt2.miner.AddBlock() if err != nil { t.Fatal(err) } mt2.minedBlocks = append(mt2.minedBlocks, b) } for _, block := range mt2.minedBlocks { err = mt1.cs.AcceptBlock(block) if err != nil && err != modules.ErrNonExtendingBlock && err != modules.ErrBlockKnown { t.Fatal(err) } } if mt1.cs.CurrentBlock().ID() != mt2.cs.CurrentBlock().ID() { t.Fatal("mt1 and mt2 should have the same current block") } for i := 0; i < 7; i++ { b, err := mt3.miner.AddBlock() if err != nil { t.Fatal(err) } mt3.minedBlocks = append(mt3.minedBlocks, b) } for _, block := range mt3.minedBlocks { err = mt1.cs.AcceptBlock(block) if err != nil && err != modules.ErrNonExtendingBlock { t.Fatal(err) } } if mt1.cs.CurrentBlock().ID() == mt2.cs.CurrentBlock().ID() { t.Fatal("mt1 and mt2 should not have the same block height") } if mt1.cs.CurrentBlock().ID() != mt3.cs.CurrentBlock().ID() { t.Fatal("mt1 and mt3 should have the same current block") } } Sia-1.3.0/modules/modules.go000066400000000000000000000014361313565667000157160ustar00rootroot00000000000000// Package modules contains definitions for all of the major modules of Sia, as // well as some helper functions for performing actions that are common to // multiple modules. package modules import ( "time" "github.com/NebulousLabs/Sia/build" ) var ( // SafeMutexDelay is the recommended timeout for the deadlock detecting // mutex. This value is DEPRECATED, as safe mutexes are no longer // recommended. Instead, the locking conventions should be followed and a // traditional mutex or a demote mutex should be used. SafeMutexDelay time.Duration ) func init() { if build.Release == "dev" { SafeMutexDelay = 60 * time.Second } else if build.Release == "standard" { SafeMutexDelay = 90 * time.Second } else if build.Release == "testing" { SafeMutexDelay = 30 * time.Second } } Sia-1.3.0/modules/negotiate.go000066400000000000000000000421561313565667000162310ustar00rootroot00000000000000package modules import ( "bytes" "errors" "io" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/types" ) const ( // AcceptResponse is the response given to an RPC call to indicate // acceptance, i.e. that the sender wishes to continue communication. AcceptResponse = "accept" // StopResponse is the response given to an RPC call to indicate graceful // termination, i.e. that the sender wishes to cease communication, but // not due to an error. StopResponse = "stop" // NegotiateDownloadTime defines the amount of time that the renter and // host have to negotiate a download request batch. The time is set high // enough that two nodes behind Tor have a reasonable chance of completing // the negotiation. NegotiateDownloadTime = 600 * time.Second // NegotiateFileContractTime defines the amount of time that the renter and // host have to negotiate a file contract. The time is set high enough that // a node behind Tor has a reasonable chance at making the multiple // required round trips to complete the negotiation. NegotiateFileContractTime = 360 * time.Second // NegotiateFileContractRevisionTime defines the minimum amount of time // that the renter and host have to negotiate a file contract revision. The // time is set high enough that a full 4MB can be piped through a // connection that is running over Tor. NegotiateFileContractRevisionTime = 600 * time.Second // NegotiateRecentRevisionTime establishes the minimum amount of time that // the connection deadline is expected to be set to when a recent file // contract revision is being requested from the host. The deadline is long // enough that the connection should be successful even if both parties are // running Tor. NegotiateRecentRevisionTime = 120 * time.Second // NegotiateRenewContractTime defines the minimum amount of time that the // renter and host have to negotiate a final contract renewal. The time is // high enough that the negotiation can occur over a Tor connection, and // that both the host and the renter can have time to process large Merkle // tree calculations that may be involved with renewing a file contract. NegotiateRenewContractTime = 600 * time.Second // NegotiateSettingsTime establishes the minimum amount of time that the // connection deadline is expected to be set to when settings are being // requested from the host. The deadline is long enough that the connection // should be successful even if both parties are on Tor. NegotiateSettingsTime = 120 * time.Second // NegotiateMaxDownloadActionRequestSize defines the maximum size that a // download request can be. Note, this is not a max size for the data that // can be requested, but instead is a max size for the definition of the // data being requested. NegotiateMaxDownloadActionRequestSize = 50e3 // NegotiateMaxErrorSize indicates the maximum number of bytes that can be // used to encode an error being sent during negotiation. NegotiateMaxErrorSize = 256 // NegotiateMaxFileContractRevisionSize specifies the maximum size that a // file contract revision is allowed to have when being sent over the wire // during negotiation. NegotiateMaxFileContractRevisionSize = 3e3 // NegotiateMaxFileContractSetLen determines the maximum allowed size of a // transaction set that can be sent when trying to negotiate a file // contract. The transaction set will contain all of the unconfirmed // dependencies of the file contract, meaning that it can be quite large. // The transaction pool's size limit for transaction sets has been chosen // as a reasonable guideline for determining what is too large. NegotiateMaxFileContractSetLen = TransactionSetSizeLimit - 1e3 // NegotiateMaxHostExternalSettingsLen is the maximum allowed size of an // encoded HostExternalSettings. NegotiateMaxHostExternalSettingsLen = 16000 // NegotiateMaxSiaPubkeySize defines the maximum size that a SiaPubkey is // allowed to be when being sent over the wire during negotiation. NegotiateMaxSiaPubkeySize = 1e3 // NegotiateMaxTransactionSignatureSize defines the maximum size that a // transaction signature is allowed to be when being sent over the wire // during negotiation. NegotiateMaxTransactionSignatureSize = 2e3 // NegotiateMaxTransactionSignaturesSize defines the maximum size that a // transaction signature slice is allowed to be when being sent over the // wire during negotiation. NegotiateMaxTransactionSignaturesSize = 5e3 ) var ( // ActionDelete is the specifier for a RevisionAction that deletes a // sector. ActionDelete = types.Specifier{'D', 'e', 'l', 'e', 't', 'e'} // ActionInsert is the specifier for a RevisionAction that inserts a // sector. ActionInsert = types.Specifier{'I', 'n', 's', 'e', 'r', 't'} // ActionModify is the specifier for a RevisionAction that modifies sector // data. ActionModify = types.Specifier{'M', 'o', 'd', 'i', 'f', 'y'} // ErrAnnNotAnnouncement indicates that the provided host announcement does // not use a recognized specifier, indicating that it's either not a host // announcement or it's not a recognized version of a host announcement. ErrAnnNotAnnouncement = errors.New("provided data does not form a recognized host announcement") // ErrAnnUnrecognizedSignature is returned when the signature in a host // announcement is not a type of signature that is recognized. ErrAnnUnrecognizedSignature = errors.New("the signature provided in the host announcement is not recognized") // ErrRevisionCoveredFields is returned if there is a covered fields object // in a transaction signature which has the 'WholeTransaction' field set to // true, meaning that miner fees cannot be added to the transaction without // invalidating the signature. ErrRevisionCoveredFields = errors.New("file contract revision transaction signature does not allow miner fees to be added") // ErrRevisionSigCount is returned when a file contract revision has the // wrong number of transaction signatures. ErrRevisionSigCount = errors.New("file contract revision has the wrong number of transaction signatures") // ErrStopResponse is the error returned by ReadNegotiationAcceptance when // it reads the StopResponse string. ErrStopResponse = errors.New("sender wishes to stop communicating") // PrefixHostAnnouncement is used to indicate that a transaction's // Arbitrary Data field contains a host announcement. The encoded // announcement will follow this prefix. PrefixHostAnnouncement = types.Specifier{'H', 'o', 's', 't', 'A', 'n', 'n', 'o', 'u', 'n', 'c', 'e', 'm', 'e', 'n', 't'} // RPCDownload is the specifier for downloading a file from a host. RPCDownload = types.Specifier{'D', 'o', 'w', 'n', 'l', 'o', 'a', 'd', 2} // RPCFormContract is the specifier for forming a contract with a host. RPCFormContract = types.Specifier{'F', 'o', 'r', 'm', 'C', 'o', 'n', 't', 'r', 'a', 'c', 't', 2} // RPCRenewContract is the specifier to renewing an existing contract. RPCRenewContract = types.Specifier{'R', 'e', 'n', 'e', 'w', 'C', 'o', 'n', 't', 'r', 'a', 'c', 't', 2} // RPCReviseContract is the specifier for revising an existing file // contract. RPCReviseContract = types.Specifier{'R', 'e', 'v', 'i', 's', 'e', 'C', 'o', 'n', 't', 'r', 'a', 'c', 't', 2} // RPCRecentRevision is the specifier for getting the most recent file // contract revision for a given file contract. RPCRecentRevision = types.Specifier{'R', 'e', 'c', 'e', 'n', 't', 'R', 'e', 'v', 'i', 's', 'i', 'o', 'n', 2} // RPCSettings is the specifier for requesting settings from the host. RPCSettings = types.Specifier{'S', 'e', 't', 't', 'i', 'n', 'g', 's', 2} // SectorSize defines how large a sector should be in bytes. The sector // size needs to be a power of two to be compatible with package // merkletree. 4MB has been chosen for the live network because large // sectors significantly reduce the tracking overhead experienced by the // renter and the host. SectorSize = build.Select(build.Var{ Dev: uint64(1 << 18), // 256 KiB Standard: uint64(1 << 22), // 4 MiB Testing: uint64(1 << 12), // 4 KiB }).(uint64) ) type ( // A DownloadAction is a description of a download that the renter would // like to make. The MerkleRoot indicates the root of the sector, the // offset indicates what portion of the sector is being downloaded, and the // length indicates how many bytes should be grabbed starting from the // offset. DownloadAction struct { MerkleRoot crypto.Hash Offset uint64 Length uint64 } // HostAnnouncement is an announcement by the host that appears in the // blockchain. 'Specifier' is always 'PrefixHostAnnouncement'. The // announcement is always followed by a signature from the public key of // the whole announcement. HostAnnouncement struct { Specifier types.Specifier NetAddress NetAddress PublicKey types.SiaPublicKey } // HostExternalSettings are the parameters advertised by the host. These // are the values that the renter will request from the host in order to // build its database. HostExternalSettings struct { // MaxBatchSize indicates the maximum size in bytes that a batch is // allowed to be. A batch is an array of revision actions; each // revision action can have a different number of bytes, depending on // the action, so the number of revision actions allowed depends on the // sizes of each. AcceptingContracts bool `json:"acceptingcontracts"` MaxDownloadBatchSize uint64 `json:"maxdownloadbatchsize"` MaxDuration types.BlockHeight `json:"maxduration"` MaxReviseBatchSize uint64 `json:"maxrevisebatchsize"` NetAddress NetAddress `json:"netaddress"` RemainingStorage uint64 `json:"remainingstorage"` SectorSize uint64 `json:"sectorsize"` TotalStorage uint64 `json:"totalstorage"` UnlockHash types.UnlockHash `json:"unlockhash"` WindowSize types.BlockHeight `json:"windowsize"` // Collateral is the amount of collateral that the host will put up for // storage in 'bytes per block', as an assurance to the renter that the // host really is committed to keeping the file. But, because the file // contract is created with no data available, this does leave the host // exposed to an attack by a wealthy renter whereby the renter causes // the host to lockup in-advance a bunch of funds that the renter then // never uses, meaning the host will not have collateral for other // clients. // // MaxCollateral indicates the maximum number of coins that a host is // willing to put into a file contract. Collateral types.Currency `json:"collateral"` MaxCollateral types.Currency `json:"maxcollateral"` // ContractPrice is the number of coins that the renter needs to pay to // the host just to open a file contract with them. Generally, the // price is only to cover the siacoin fees that the host will suffer // when submitting the file contract revision and storage proof to the // blockchain. // // The storage price is the cost per-byte-per-block in hastings of // storing data on the host. // // 'Download' bandwidth price is the cost per byte of downloading data // from the host. // // 'Upload' bandwidth price is the cost per byte of uploading data to // the host. ContractPrice types.Currency `json:"contractprice"` DownloadBandwidthPrice types.Currency `json:"downloadbandwidthprice"` StoragePrice types.Currency `json:"storageprice"` UploadBandwidthPrice types.Currency `json:"uploadbandwidthprice"` // Because the host has a public key, and settings are signed, and // because settings may be MITM'd, settings need a revision number so // that a renter can compare multiple sets of settings and determine // which is the most recent. RevisionNumber uint64 `json:"revisionnumber"` Version string `json:"version"` } // A RevisionAction is a description of an edit to be performed on a file // contract. Three types are allowed, 'ActionDelete', 'ActionInsert', and // 'ActionModify'. ActionDelete just takes a sector index, indicating which // sector is going to be deleted. ActionInsert takes a sector index, and a // full sector of data, indicating that a sector at the index should be // inserted with the provided data. 'Modify' revises the sector at the // given index, rewriting it with the provided data starting from the // 'offset' within the sector. // // Modify could be simulated with an insert and a delete, however an insert // requires a full sector to be uploaded, and a modify can be just a few // kb, which can be significantly faster. RevisionAction struct { Type types.Specifier SectorIndex uint64 Offset uint64 Data []byte } ) // ReadNegotiationAcceptance reads an accept/reject response from r (usually a // net.Conn). If the response is not AcceptResponse, ReadNegotiationAcceptance // returns the response as an error. If the response is StopResponse, // ErrStopResponse is returned, allowing for direct error comparison. // // Note that since errors returned by ReadNegotiationAcceptance are newly // allocated, they cannot be compared to other errors in the traditional // fashion. func ReadNegotiationAcceptance(r io.Reader) error { var resp string err := encoding.ReadObject(r, &resp, NegotiateMaxErrorSize) if err != nil { return err } switch resp { case AcceptResponse: return nil case StopResponse: return ErrStopResponse default: return errors.New(resp) } } // WriteNegotiationAcceptance writes the 'accept' response to w (usually a // net.Conn). func WriteNegotiationAcceptance(w io.Writer) error { return encoding.WriteObject(w, AcceptResponse) } // WriteNegotiationRejection will write a rejection response to w (usually a // net.Conn) and return the input error. If the write fails, the write error // is joined with the input error. func WriteNegotiationRejection(w io.Writer, err error) error { writeErr := encoding.WriteObject(w, err.Error()) if writeErr != nil { return build.JoinErrors([]error{err, writeErr}, "; ") } return err } // WriteNegotiationStop writes the 'stop' response to w (usually a // net.Conn). func WriteNegotiationStop(w io.Writer) error { return encoding.WriteObject(w, StopResponse) } // CreateAnnouncement will take a host announcement and encode it, returning // the exact []byte that should be added to the arbitrary data of a // transaction. func CreateAnnouncement(addr NetAddress, pk types.SiaPublicKey, sk crypto.SecretKey) (signedAnnouncement []byte, err error) { if err := addr.IsValid(); err != nil { return nil, err } // Create the HostAnnouncement and marshal it. annBytes := encoding.Marshal(HostAnnouncement{ Specifier: PrefixHostAnnouncement, NetAddress: addr, PublicKey: pk, }) // Create a signature for the announcement. annHash := crypto.HashBytes(annBytes) sig := crypto.SignHash(annHash, sk) // Return the signed announcement. return append(annBytes, sig[:]...), nil } // DecodeAnnouncement decodes announcement bytes into a host announcement, // verifying the prefix and the signature. func DecodeAnnouncement(fullAnnouncement []byte) (na NetAddress, spk types.SiaPublicKey, err error) { // Read the first part of the announcement to get the intended host // announcement. var ha HostAnnouncement dec := encoding.NewDecoder(bytes.NewReader(fullAnnouncement)) err = dec.Decode(&ha) if err != nil { return "", types.SiaPublicKey{}, err } // Check that the announcement was registered as a host announcement. if ha.Specifier != PrefixHostAnnouncement { return "", types.SiaPublicKey{}, ErrAnnNotAnnouncement } // Check that the public key is a recognized type of public key. if ha.PublicKey.Algorithm != types.SignatureEd25519 { return "", types.SiaPublicKey{}, ErrAnnUnrecognizedSignature } // Read the signature out of the reader. var sig crypto.Signature err = dec.Decode(&sig) if err != nil { return "", types.SiaPublicKey{}, err } // Verify the signature. var pk crypto.PublicKey copy(pk[:], ha.PublicKey.Key) annHash := crypto.HashObject(ha) err = crypto.VerifyHash(annHash, pk, sig) if err != nil { return "", types.SiaPublicKey{}, err } return ha.NetAddress, ha.PublicKey, nil } // VerifyFileContractRevisionTransactionSignatures checks that the signatures // on a file contract revision are valid and cover the right fields. func VerifyFileContractRevisionTransactionSignatures(fcr types.FileContractRevision, tsigs []types.TransactionSignature, height types.BlockHeight) error { if len(tsigs) != 2 { return ErrRevisionSigCount } for _, tsig := range tsigs { // The transaction needs to be malleable so that miner fees can be // added. If the whole transaction is covered, it is doomed to have no // fees. if tsig.CoveredFields.WholeTransaction { return ErrRevisionCoveredFields } } txn := types.Transaction{ FileContractRevisions: []types.FileContractRevision{fcr}, TransactionSignatures: tsigs, } // Check that the signatures verify. This will also check that the covered // fields object is not over-aggressive, because if the object is pointing // to elements that haven't been added to the transaction, verification // will fail. return txn.StandaloneValid(height) } Sia-1.3.0/modules/negotiate_test.go000066400000000000000000000063371313565667000172710ustar00rootroot00000000000000package modules import ( "bytes" "testing" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/types" ) // TestAnnouncementHandling checks that CreateAnnouncement and // DecodeAnnouncement work together correctly. func TestAnnouncementHandling(t *testing.T) { t.Parallel() // Create the keys that will be used to generate the announcement. sk, pk := crypto.GenerateKeyPair() spk := types.SiaPublicKey{ Algorithm: types.SignatureEd25519, Key: pk[:], } addr := NetAddress("f.o:1234") // Generate the announcement. annBytes, err := CreateAnnouncement(addr, spk, sk) if err != nil { t.Fatal(err) } // Decode the announcement decAddr, decPubKey, err := DecodeAnnouncement(annBytes) if err != nil { t.Fatal(err) } if decPubKey.Algorithm != spk.Algorithm { t.Error("decoded announcement has the wrong algorithm on the public key") } if decAddr != addr { t.Error("decoded announcement has the wrong net address") } if !bytes.Equal(decPubKey.Key, spk.Key) { t.Error("decoded announcement has the wrong public key") } // Corrupt the data, and see that decoding fails. Decoding should fail // because the signature should not be valid anymore. // // First 16 bytes are the host announcement prefix, followed by 8 bytes // describing the length of the net address, followed by the net address. // Corrupt the net address. annBytes[25]++ _, _, err = DecodeAnnouncement(annBytes) if err != crypto.ErrInvalidSignature { t.Error(err) } annBytes[25]-- // The final byte is going to be a part of the signature. Corrupt the final // byte and verify that there's an error. lastIndex := len(annBytes) - 1 annBytes[lastIndex]++ _, _, err = DecodeAnnouncement(annBytes) if err != crypto.ErrInvalidSignature { t.Error(err) } annBytes[lastIndex]-- // Pass in a bad specifier - change the host announcement type. annBytes[0]++ _, _, err = DecodeAnnouncement(annBytes) if err != ErrAnnNotAnnouncement { t.Error(err) } annBytes[0]-- // Pass in a bad signature algorithm. 16 bytes to pass the specifier, 8+8 bytes to pass the net address. annBytes[33]++ _, _, err = DecodeAnnouncement(annBytes) if err != ErrAnnUnrecognizedSignature { t.Error(err) } annBytes[33]-- // Cause the decoding to fail altogether. _, _, err = DecodeAnnouncement(annBytes[:12]) if err == nil { t.Error(err) } } // TestNegotiationResponses tests the WriteNegotiationAcceptance, // WriteNegotiationRejection, and ReadNegotiationAcceptance functions. func TestNegotiationResponses(t *testing.T) { // Write/Read acceptance buf := new(bytes.Buffer) err := WriteNegotiationAcceptance(buf) if err != nil { t.Fatal(err) } err = ReadNegotiationAcceptance(buf) if err != nil { t.Fatal(err) } // Write/Read rejection buf.Reset() err = WriteNegotiationRejection(buf, ErrLowBalance) if err != ErrLowBalance { t.Fatal(err) } err = ReadNegotiationAcceptance(buf) // can't compare to ErrLowBalance directly; contents are the same, but pointer is different if err == nil || err.Error() != ErrLowBalance.Error() { t.Fatal(err) } // Write/Read StopResponse buf.Reset() err = WriteNegotiationStop(buf) if err != nil { t.Fatal(err) } err = ReadNegotiationAcceptance(buf) if err != ErrStopResponse { t.Fatal(err) } } Sia-1.3.0/modules/netaddress.go000066400000000000000000000120561313565667000164020ustar00rootroot00000000000000package modules import ( "errors" "net" "strconv" "strings" "github.com/NebulousLabs/Sia/build" ) // MaxEncodedNetAddressLength is the maximum length of a NetAddress encoded // with the encode package. 266 was chosen because the maximum length for the // hostname is 254 + 1 for the separating colon + 5 for the port + 8 byte // string length prefix. const MaxEncodedNetAddressLength = 266 // A NetAddress contains the information needed to contact a peer. type NetAddress string // Host removes the port from a NetAddress, returning just the host. If the // address is not of the form "host:port" the empty string is returned. The // port will still be returned for invalid NetAddresses (e.g. "unqualified:0" // will return "unqualified"), but in general you should only call Host on // valid addresses. func (na NetAddress) Host() string { host, _, err := net.SplitHostPort(string(na)) // 'host' is not always the empty string if an error is returned. if err != nil { return "" } return host } // Port returns the NetAddress object's port number. If the address is not of // the form "host:port" the empty string is returned. The port will still be // returned for invalid NetAddresses (e.g. "localhost:0" will return "0"), but // in general you should only call Port on valid addresses. func (na NetAddress) Port() string { _, port, err := net.SplitHostPort(string(na)) // 'port' will not always be the empty string if an error is returned. if err != nil { return "" } return port } // IsLoopback returns true for IP addresses that are on the same machine. func (na NetAddress) IsLoopback() bool { host, _, err := net.SplitHostPort(string(na)) if err != nil { return false } if host == "localhost" { return true } if ip := net.ParseIP(host); ip != nil && ip.IsLoopback() { return true } return false } // IsLocal returns true if the input IP address belongs to a local address // range such as 192.168.x.x or 127.x.x.x func (na NetAddress) IsLocal() bool { // Loopback counts as private. if na.IsLoopback() { return true } // Grab the IP address of the net address. If there is an error parsing, // return false, as it's not a private ip address range. ip := net.ParseIP(na.Host()) if ip == nil { return false } // Determine whether or not the ip is in a CIDR that is considered to be // local. localCIDRs := []string{ "10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "fd00::/8", } for _, cidr := range localCIDRs { _, ipnet, _ := net.ParseCIDR(cidr) if ipnet.Contains(ip) { return true } } return false } // IsValid is an extension to IsStdValid that also forbids the loopback // address. IsValid is being phased out in favor of allowing the loopback // address but verifying through other means that the connection is not to // yourself (which is the original reason that the loopback address was // banned). func (na NetAddress) IsValid() error { // Check the loopback address. if na.IsLoopback() && build.Release != "testing" { return errors.New("host is a loopback address") } return na.IsStdValid() } // IsStdValid returns an error if the NetAddress is invalid. A valid NetAddress // is of the form "host:port", such that "host" is either a valid IPv4/IPv6 // address or a valid hostname, and "port" is an integer in the range // [1,65535]. Valid IPv4 addresses, IPv6 addresses, and hostnames are detailed // in RFCs 791, 2460, and 952, respectively. func (na NetAddress) IsStdValid() error { // Verify the port number. host, port, err := net.SplitHostPort(string(na)) if err != nil { return err } portInt, err := strconv.Atoi(port) if err != nil { return errors.New("port is not an integer") } else if portInt < 1 || portInt > 65535 { return errors.New("port is invalid") } // Loopback addresses don't always pass the requirements below, and // therefore must be checked separately. if na.IsLoopback() { return nil } // First try to parse host as an IP address; if that fails, assume it is a // hostname. if ip := net.ParseIP(host); ip != nil { if ip.IsUnspecified() { return errors.New("host is the unspecified address") } } else { // Hostnames can have a trailing dot (which indicates that the hostname is // fully qualified), but we ignore it for validation purposes. if strings.HasSuffix(host, ".") { host = host[:len(host)-1] } if len(host) < 1 || len(host) > 253 { return errors.New("invalid hostname length") } labels := strings.Split(host, ".") if len(labels) == 1 { return errors.New("unqualified hostname") } for _, label := range labels { if len(label) < 1 || len(label) > 63 { return errors.New("hostname contains label with invalid length") } if strings.HasPrefix(label, "-") || strings.HasSuffix(label, "-") { return errors.New("hostname contains label that starts or ends with a hyphen") } for _, r := range strings.ToLower(label) { isLetter := 'a' <= r && r <= 'z' isNumber := '0' <= r && r <= '9' isHyphen := r == '-' if !(isLetter || isNumber || isHyphen) { return errors.New("host contains invalid characters") } } } } return nil } Sia-1.3.0/modules/netaddress_test.go000066400000000000000000000215121313565667000174360ustar00rootroot00000000000000package modules import ( "net" "strings" "testing" ) var ( // Networks such as 10.0.0.x have been omitted from testing - behavior // for these networks is currently undefined. invalidAddrs = []string{ // Garbage addresses "", "foo:bar:baz", "garbage:6146:616", // Missing host / port ":", "111.111.111.111", "12.34.45.64", "[::2]", "::2", "foo", "hn.com", "世界", "foo:", "世界:", ":foo", ":世界", "localhost:", "[::1]:", // Invalid host / port chars "localhost:-", "[::1]:-", "foo:{}", "{}:123", " foo:123", "foo :123", "f oo:123", "foo: 123", "foo:123 ", "foo:1 23", "\x00:123", "foo:\x00", "世界:123", "bar:世界", "世:界", `":"`, // Unspecified address "[::]:bar", "0.0.0.0:bar", // Invalid hostnames "unqualifiedhost:123", "Yo-Amazon.we-are-really-happy-for-you.and-we-will-let-you-finish.but-Sia-is-the-best-cloud-storage-of-all-time.of-all-time-of-all-time-of-all-time-of-all-time-of-all-time.of-all-time-of-all-time-of-all-time-of-all-time-of-all-time.of-all-time-of-all-time:123", strings.Repeat("a", 64) + ".com:123", // 64 char long label too long. strings.Repeat(strings.Repeat("a", 62)+".", 4) + "co:123", // 254 char long hostname too long. strings.Repeat(strings.Repeat("a", 62)+".", 4) + "co.:123", // 254 char long hostname with trailing dot too long. "-foo.bar:123", "foo-.bar:123", "foo.-bar:123", "foo.bar-:123", "foo-bar.-baz:123", "foo-bar.baz-:123", "foo.-bar.baz:123", "foo.bar-.baz:123", ".:123", ".foo.com:123", "foo.com..:123", // invalid port numbers "foo:0", "foo:65536", "foo:-100", "foo:1000000", "localhost:0", "[::1]:0", } validAddrs = []string{ // Loopback address (valid in testing only, can't really test this well) "localhost:123", "127.0.0.1:123", "[::1]:123", // Valid addresses. "foo.com:1", "foo.com.:1", "a.b.c:1", "a.b.c.:1", "foo-bar.com:123", "FOO.com:1", "1foo.com:1", "tld.foo.com:1", "hn.com:8811", strings.Repeat("foo.", 63) + "f:123", // 253 chars long strings.Repeat("foo.", 63) + "f.:123", // 254 chars long, 253 chars long without trailing dot strings.Repeat(strings.Repeat("a", 63)+".", 3) + "a:123", // 3x63 char length labels + 1x1 char length label without trailing dot strings.Repeat(strings.Repeat("a", 63)+".", 3) + ":123", // 3x63 char length labels with trailing dot "[::2]:65535", "111.111.111.111:111", "12.34.45.64:7777", } ) // TestHostPort tests the Host and Port methods of the NetAddress type. func TestHostPort(t *testing.T) { t.Parallel() // Test valid addrs. for _, addr := range validAddrs { na := NetAddress(addr) host := na.Host() port := na.Port() expectedHost, expectedPort, err := net.SplitHostPort(addr) if err != nil { t.Fatal(err) } if host != expectedHost { t.Errorf("Host() returned unexpected host for NetAddress '%v': expected '%v', got '%v'", na, expectedHost, host) } if port != expectedPort { t.Errorf("Port() returned unexpected port for NetAddress '%v': expected '%v', got '%v'", na, expectedPort, port) } } // Test that Host / Port return "" when net.SplitHostPort errors na := NetAddress("::") host := na.Host() port := na.Port() if host != "" { t.Error("expected Host() to return blank for an un-splittable NetAddress, but it returned:", host) } if port != "" { t.Error("expected Port() to return blank for an un-splittable NetAddress, but it returned:", port) } } // TestIsLoopback tests the IsLoopback method of the NetAddress type. func TestIsLoopback(t *testing.T) { t.Parallel() testSet := []struct { query NetAddress desiredResponse bool }{ // Localhost tests. {"localhost", false}, {"localhost:1234", true}, {"127.0.0.1", false}, {"127.0.0.1:6723", true}, {"::1", false}, {"[::1]:7124", true}, // Local network tests. {"10.0.0.0", false}, {"10.0.0.0:1234", false}, {"10.2.2.5", false}, {"10.2.2.5:16432", false}, {"10.255.255.255", false}, {"10.255.255.255:16432", false}, {"172.16.0.0", false}, {"172.16.0.0:1234", false}, {"172.26.2.5", false}, {"172.26.2.5:16432", false}, {"172.31.255.255", false}, {"172.31.255.255:16432", false}, {"192.168.0.0", false}, {"192.168.0.0:1234", false}, {"192.168.2.5", false}, {"192.168.2.5:16432", false}, {"192.168.255.255", false}, {"192.168.255.255:16432", false}, {"1234:0000:0000:0000:0000:0000:0000:0000", false}, {"[1234:0000:0000:0000:0000:0000:0000:0000]:1234", false}, {"fc00:0000:0000:0000:0000:0000:0000:0000", false}, {"[fc00:0000:0000:0000:0000:0000:0000:0000]:1234", false}, {"fd00:0000:0000:0000:0000:0000:0000:0000", false}, {"[fd00:0000:0000:0000:0000:0000:0000:0000]:1234", false}, {"fd30:0000:0000:0000:0000:0000:0000:0000", false}, {"[fd30:0000:0000:0000:0000:0000:0000:0000]:1234", false}, {"fd00:0000:0030:0000:0000:0000:0000:0000", false}, {"[fd00:0000:0030:0000:0000:0000:0000:0000]:1234", false}, {"fdff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", false}, {"[fdff:ffff:ffff:ffff:ffff:ffff:ffff:ffff]:1234", false}, {"fe00:0000:0000:0000:0000:0000:0000:0000", false}, {"[fe00:0000:0000:0000:0000:0000:0000:0000]:1234", false}, // Unspecified address tests. {"0.0.0.0:1234", false}, {"[::]:1234", false}, // Public name tests. {"hn.com", false}, {"hn.com:8811", false}, {"2.34.45.64", false}, {"2.34.45.64:7777", false}, {"12.34.45.64", false}, {"12.34.45.64:7777", false}, {"122.34.45.64", false}, {"122.34.45.64:7777", false}, {"197.34.45.64", false}, {"197.34.45.64:7777", false}, {"222.34.45.64", false}, {"222.34.45.64:7777", false}, // Garbage name tests. {"", false}, {"garbage", false}, {"garbage:6432", false}, {"garbage:6146:616", false}, {"::1:4646", false}, {"[::1]", false}, } for _, test := range testSet { if test.query.IsLoopback() != test.desiredResponse { t.Error("test failed:", test, test.query.IsLoopback()) } } } // TestIsValid tests that IsValid only returns nil for valid addresses. func TestIsValid(t *testing.T) { t.Parallel() for _, addr := range validAddrs { na := NetAddress(addr) if err := na.IsValid(); err != nil { t.Errorf("IsValid returned non-nil for valid NetAddress %q: %v", addr, err) } } for _, addr := range invalidAddrs { na := NetAddress(addr) if err := na.IsValid(); err == nil { t.Errorf("IsValid returned nil for an invalid NetAddress %q: %v", addr, err) } } } // TestIsLocal checks that the correct values are returned for all local IP // addresses. func TestIsLocal(t *testing.T) { t.Parallel() testSet := []struct { query NetAddress desiredResponse bool }{ // Localhost tests. {"localhost", false}, {"localhost:1234", true}, {"127.0.0.1", false}, {"127.0.0.1:6723", true}, {"::1", false}, {"[::1]:7124", true}, // Local network tests. {"10.0.0.0", false}, {"10.0.0.0:1234", true}, {"10.2.2.5", false}, {"10.2.2.5:16432", true}, {"10.255.255.255", false}, {"10.255.255.255:16432", true}, {"172.16.0.0", false}, {"172.16.0.0:1234", true}, {"172.26.2.5", false}, {"172.26.2.5:16432", true}, {"172.31.255.255", false}, {"172.31.255.255:16432", true}, {"192.168.0.0", false}, {"192.168.0.0:1234", true}, {"192.168.2.5", false}, {"192.168.2.5:16432", true}, {"192.168.255.255", false}, {"192.168.255.255:16432", true}, {"1234:0000:0000:0000:0000:0000:0000:0000", false}, {"[1234:0000:0000:0000:0000:0000:0000:0000]:1234", false}, {"fc00:0000:0000:0000:0000:0000:0000:0000", false}, {"[fc00:0000:0000:0000:0000:0000:0000:0000]:1234", false}, {"fd00:0000:0000:0000:0000:0000:0000:0000", false}, {"[fd00:0000:0000:0000:0000:0000:0000:0000]:1234", true}, {"fd30:0000:0000:0000:0000:0000:0000:0000", false}, {"[fd30:0000:0000:0000:0000:0000:0000:0000]:1234", true}, {"fd00:0000:0030:0000:0000:0000:0000:0000", false}, {"[fd00:0000:0030:0000:0000:0000:0000:0000]:1234", true}, {"fdff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", false}, {"[fdff:ffff:ffff:ffff:ffff:ffff:ffff:ffff]:1234", true}, {"fe00:0000:0000:0000:0000:0000:0000:0000", false}, {"[fe00:0000:0000:0000:0000:0000:0000:0000]:1234", false}, // Unspecified address tests. {"0.0.0.0:1234", false}, {"[::]:1234", false}, // Public name tests. {"hn.com", false}, {"hn.com:8811", false}, {"2.34.45.64", false}, {"2.34.45.64:7777", false}, {"12.34.45.64", false}, {"12.34.45.64:7777", false}, {"122.34.45.64", false}, {"122.34.45.64:7777", false}, {"197.34.45.64", false}, {"197.34.45.64:7777", false}, {"222.34.45.64", false}, {"222.34.45.64:7777", false}, // Garbage name tests. {"", false}, {"garbage", false}, {"garbage:6432", false}, {"garbage:6146:616", false}, {"::1:4646", false}, {"[::1]", false}, } for _, test := range testSet { if test.query.IsLocal() != test.desiredResponse { t.Error("test failed:", test, test.query.IsLocal()) } } } Sia-1.3.0/modules/renter.go000066400000000000000000000275101313565667000155460ustar00rootroot00000000000000package modules import ( "encoding/json" "io" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/types" ) const ( // RenterDir is the name of the directory that is used to store the // renter's persistent data. RenterDir = "renter" ) // An ErasureCoder is an error-correcting encoder and decoder. type ErasureCoder interface { // NumPieces is the number of pieces returned by Encode. NumPieces() int // MinPieces is the minimum number of pieces that must be present to // recover the original data. MinPieces() int // Encode splits data into equal-length pieces, with some pieces // containing parity data. Encode(data []byte) ([][]byte, error) // Recover recovers the original data from pieces (including parity) and // writes it to w. pieces should be identical to the slice returned by // Encode (length and order must be preserved), but with missing elements // set to nil. n is the number of bytes to be written to w; this is // necessary because pieces may have been padded with zeros during // encoding. Recover(pieces [][]byte, n uint64, w io.Writer) error } // An Allowance dictates how much the Renter is allowed to spend in a given // period. Note that funds are spent on both storage and bandwidth. type Allowance struct { Funds types.Currency `json:"funds"` Hosts uint64 `json:"hosts"` Period types.BlockHeight `json:"period"` RenewWindow types.BlockHeight `json:"renewwindow"` } // DownloadInfo provides information about a file that has been requested for // download. type DownloadInfo struct { SiaPath string `json:"siapath"` Destination DownloadWriter `json:"destination"` Filesize uint64 `json:"filesize"` Received uint64 `json:"received"` StartTime time.Time `json:"starttime"` Error string `json:"error"` } // DownloadWriter provides an interface which all output writers have to implement. type DownloadWriter interface { WriteAt(b []byte, off int64) (int, error) Destination() string } // FileUploadParams contains the information used by the Renter to upload a // file. type FileUploadParams struct { Source string SiaPath string ErasureCode ErasureCoder } // FileInfo provides information about a file. type FileInfo struct { SiaPath string `json:"siapath"` Filesize uint64 `json:"filesize"` Available bool `json:"available"` Renewing bool `json:"renewing"` Redundancy float64 `json:"redundancy"` UploadProgress float64 `json:"uploadprogress"` Expiration types.BlockHeight `json:"expiration"` } // A HostDBEntry represents one host entry in the Renter's host DB. It // aggregates the host's external settings and metrics with its public key. type HostDBEntry struct { HostExternalSettings // FirstSeen is the last block height at which this host was announced. FirstSeen types.BlockHeight `json:"firstseen"` // Measurements that have been taken on the host. The most recent // measurements are kept in full detail, historic ones are compressed into // the historic values. HistoricDowntime time.Duration `json:"historicdowntime"` HistoricUptime time.Duration `json:"historicuptime"` ScanHistory HostDBScans `json:"scanhistory"` HistoricFailedInteractions float64 `json:"historicfailedinteractions"` HistoricSuccessfulInteractions float64 `json:"historicsuccessfulinteractions"` RecentFailedInteractions float64 `json:"recentfailedinteractions"` RecentSuccessfulInteractions float64 `json:"recentsuccessfulinteractions"` LastHistoricUpdate types.BlockHeight // The public key of the host, stored separately to minimize risk of certain // MitM based vulnerabilities. PublicKey types.SiaPublicKey `json:"publickey"` } // HostDBScan represents a single scan event. type HostDBScan struct { Timestamp time.Time `json:"timestamp"` Success bool `json:"success"` } // HostScoreBreakdown provides a piece-by-piece explanation of why a host has // the score that they do. // // NOTE: Renters are free to use whatever scoring they feel appropriate for // hosts. Some renters will outright blacklist or whitelist sets of hosts. The // results provided by this struct can only be used as a guide, and may vary // significantly from machine to machine. type HostScoreBreakdown struct { Score types.Currency `json:"score"` ConversionRate float64 `json:"conversionrate"` AgeAdjustment float64 `json:"ageadjustment"` BurnAdjustment float64 `json:"burnadjustment"` CollateralAdjustment float64 `json:"collateraladjustment"` InteractionAdjustment float64 `json:"interactionadjustment"` PriceAdjustment float64 `json:"pricesmultiplier"` StorageRemainingAdjustment float64 `json:"storageremainingadjustment"` UptimeAdjustment float64 `json:"uptimeadjustment"` VersionAdjustment float64 `json:"versionadjustment"` } // RenterPriceEstimation contains a bunch of files estimating the costs of // various operations on the network. type RenterPriceEstimation struct { // The cost of downloading 1 TB of data. DownloadTerabyte types.Currency `json:"downloadterabyte"` // The cost of forming a set of contracts using the defaults. FormContracts types.Currency `json:"formcontracts"` // The cost of storing 1 TB for a month, including redundancy. StorageTerabyteMonth types.Currency `json:"storageterabytemonth"` // The cost of consuming 1 TB of upload bandwidth from the host, including // redundancy. UploadTerabyte types.Currency `json:"uploadterabyte"` } // RenterSettings control the behavior of the Renter. type RenterSettings struct { Allowance Allowance `json:"allowance"` } // HostDBScans represents a sortable slice of scans. type HostDBScans []HostDBScan func (s HostDBScans) Len() int { return len(s) } func (s HostDBScans) Less(i, j int) bool { return s[i].Timestamp.Before(s[j].Timestamp) } func (s HostDBScans) Swap(i, j int) { s[i], s[j] = s[j], s[i] } // MerkleRootSet is a set of Merkle roots, and gets encoded more efficiently. type MerkleRootSet []crypto.Hash // MarshalJSON defines a JSON encoding for a MerkleRootSet. func (mrs MerkleRootSet) MarshalJSON() ([]byte, error) { // Copy the whole array into a giant byte slice and then encode that. fullBytes := make([]byte, crypto.HashSize*len(mrs)) for i := range mrs { copy(fullBytes[i*crypto.HashSize:(i+1)*crypto.HashSize], mrs[i][:]) } return json.Marshal(fullBytes) } // UnmarshalJSON attempts to decode a MerkleRootSet, falling back on the legacy // decoding of a []crypto.Hash if that fails. func (mrs *MerkleRootSet) UnmarshalJSON(b []byte) error { // Decode the giant byte slice, and then split it into separate arrays. var fullBytes []byte err := json.Unmarshal(b, &fullBytes) if err != nil { // Encoding the byte slice has failed, try decoding it as a []crypto.Hash. var hashes []crypto.Hash err := json.Unmarshal(b, &hashes) if err != nil { return err } *mrs = MerkleRootSet(hashes) return nil } umrs := make(MerkleRootSet, len(fullBytes)/32) for i := range umrs { copy(umrs[i][:], fullBytes[i*crypto.HashSize:(i+1)*crypto.HashSize]) } *mrs = umrs return nil } // A RenterContract contains all the metadata necessary to revise or renew a // file contract. See `api.RenterContract` for field information. type RenterContract struct { FileContract types.FileContract `json:"filecontract"` HostPublicKey types.SiaPublicKey `json:"hostpublickey"` ID types.FileContractID `json:"id"` LastRevision types.FileContractRevision `json:"lastrevision"` LastRevisionTxn types.Transaction `json:"lastrevisiontxn"` MerkleRoots MerkleRootSet `json:"merkleroots"` NetAddress NetAddress `json:"netaddress"` SecretKey crypto.SecretKey `json:"secretkey"` StartHeight types.BlockHeight `json:"startheight"` DownloadSpending types.Currency `json:"downloadspending"` StorageSpending types.Currency `json:"storagespending"` UploadSpending types.Currency `json:"uploadspending"` TotalCost types.Currency `json:"totalcost"` ContractFee types.Currency `json:"contractfee"` TxnFee types.Currency `json:"txnfee"` SiafundFee types.Currency `json:"siafundfee"` // GoodForUpload indicates whether the contract should be used to upload new // data or not, and GoodForRenew indicates whether or not the contract // should be renewed. GoodForRenew bool GoodForUpload bool } // EndHeight returns the height at which the host is no longer obligated to // store contract data. func (rc *RenterContract) EndHeight() types.BlockHeight { return rc.LastRevision.NewWindowStart } // RenterFunds returns the funds remaining in the contract's Renter payout as // of the most recent revision. func (rc *RenterContract) RenterFunds() types.Currency { if len(rc.LastRevision.NewValidProofOutputs) < 2 { build.Critical("malformed RenterContract:", rc) return types.ZeroCurrency } return rc.LastRevision.NewValidProofOutputs[0].Value } // A Renter uploads, tracks, repairs, and downloads a set of files for the // user. type Renter interface { // ActiveHosts provides the list of hosts that the renter is selecting, // sorted by preference. ActiveHosts() []HostDBEntry // AllHosts returns the full list of hosts known to the renter. AllHosts() []HostDBEntry // Close closes the Renter. Close() error // Contracts returns the contracts formed by the renter. Contracts() []RenterContract // CurrentPeriod returns the height at which the current allowance period // began. CurrentPeriod() types.BlockHeight // DeleteFile deletes a file entry from the renter. DeleteFile(path string) error // Download performs a download according to the parameters passed, including // downloads of `offset` and `length` type. Download(params RenterDownloadParameters) error // DownloadQueue lists all the files that have been scheduled for download. DownloadQueue() []DownloadInfo // FileList returns information on all of the files stored by the renter. FileList() []FileInfo // Host provides the DB entry and score breakdown for the requested host. Host(pk types.SiaPublicKey) (HostDBEntry, bool) // LoadSharedFiles loads a '.sia' file into the renter. A .sia file may // contain multiple files. The paths of the added files are returned. LoadSharedFiles(source string) ([]string, error) // LoadSharedFilesAscii loads an ASCII-encoded '.sia' file into the // renter. LoadSharedFilesAscii(asciiSia string) ([]string, error) // PriceEstimation estimates the cost in siacoins of performing various // storage and data operations. PriceEstimation() RenterPriceEstimation // RenameFile changes the path of a file. RenameFile(path, newPath string) error // EstimateHostScore will return the score for a host with the provided // settings, assuming perfect age and uptime adjustments EstimateHostScore(entry HostDBEntry) HostScoreBreakdown // ScoreBreakdown will return the score for a host db entry using the // hostdb's weighting algorithm. ScoreBreakdown(entry HostDBEntry) HostScoreBreakdown // Settings returns the Renter's current settings. Settings() RenterSettings // SetSettings sets the Renter's settings. SetSettings(RenterSettings) error // ShareFiles creates a '.sia' file that can be shared with others. ShareFiles(paths []string, shareDest string) error // ShareFilesAscii creates an ASCII-encoded '.sia' file. ShareFilesAscii(paths []string) (asciiSia string, err error) // Upload uploads a file using the input parameters. Upload(FileUploadParams) error } // RenterDownloadParameters defines the parameters passed to the Renter's // Download method. type RenterDownloadParameters struct { Async bool Httpwriter io.Writer Length uint64 Offset uint64 Siapath string Destination string } Sia-1.3.0/modules/renter/000077500000000000000000000000001313565667000152125ustar00rootroot00000000000000Sia-1.3.0/modules/renter/consts.go000066400000000000000000000024611313565667000170550ustar00rootroot00000000000000package renter import ( "time" "github.com/NebulousLabs/Sia/build" ) var ( // Prime to avoid intersecting with regular events. uploadFailureCooldown = build.Select(build.Var{ Dev: time.Second * 7, Standard: time.Second * 61, Testing: time.Second, }).(time.Duration) // Limit the number of doublings to prevent overflows. maxConsecutivePenalty = build.Select(build.Var{ Dev: 4, Standard: 10, Testing: 3, }).(int) // Minimum number of pieces that need to be repaired before the renter will // initiate a repair. minPiecesRepair = build.Select(build.Var{ Dev: 2, Standard: 5, Testing: 3, }).(int) repairQueueInterval = build.Select(build.Var{ Dev: 30 * time.Second, Standard: time.Minute * 15, Testing: 10 * time.Second, }).(time.Duration) // maxChunkCacheSize determines the maximum number of chunks that will be // cached in memory. maxChunkCacheSize = build.Select(build.Var{ Dev: 50, Standard: 30, Testing: 60, }).(int) // chunkDownloadTimeout defines the maximum amount of time to wait for a // chunk download to finish before returning in the download-to-upload repair // loop chunkDownloadTimeout = build.Select(build.Var{ Dev: 15 * time.Minute, Standard: 15 * time.Minute, Testing: 40 * time.Second, }).(time.Duration) ) Sia-1.3.0/modules/renter/contractor/000077500000000000000000000000001313565667000173705ustar00rootroot00000000000000Sia-1.3.0/modules/renter/contractor/allowance.go000066400000000000000000000100401313565667000216570ustar00rootroot00000000000000package contractor import ( "errors" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) var ( errAllowanceNoHosts = errors.New("hosts must be non-zero") errAllowanceZeroPeriod = errors.New("period must be non-zero") errAllowanceWindowSize = errors.New("renew window must be less than period") errAllowanceNotSynced = errors.New("you must be synced to set an allowance") // ErrAllowanceZeroWindow is returned when the caller requests a // zero-length renewal window. This will happen if the caller sets the // period to 1 block, since RenewWindow := period / 2. ErrAllowanceZeroWindow = errors.New("renew window must be non-zero") ) // SetAllowance sets the amount of money the Contractor is allowed to spend on // contracts over a given time period, divided among the number of hosts // specified. Note that Contractor can start forming contracts as soon as // SetAllowance is called; that is, it may block. // // In most cases, SetAllowance will renew existing contracts instead of // forming new ones. This preserves the data on those hosts. When this occurs, // the renewed contracts will atomically replace their previous versions. If // SetAllowance is interrupted, renewed contracts may be lost, though the // allocated funds will eventually be returned. // // If a is the empty allowance, SetAllowance will archive the current contract // set. The contracts cannot be used to create Editors or Downloads, and will // not be renewed. // // TODO: can an Editor or Downloader be used across renewals? // TODO: will hosts allow renewing the same contract twice? // // NOTE: At this time, transaction fees are not counted towards the allowance. // This means the contractor may spend more than allowance.Funds. func (c *Contractor) SetAllowance(a modules.Allowance) error { if a.Funds.IsZero() && a.Hosts == 0 && a.Period == 0 && a.RenewWindow == 0 { return c.managedCancelAllowance(a) } // sanity checks if a.Hosts == 0 { return errAllowanceNoHosts } else if a.Period == 0 { return errAllowanceZeroPeriod } else if a.RenewWindow == 0 { return ErrAllowanceZeroWindow } else if a.RenewWindow >= a.Period { return errAllowanceWindowSize } else if !c.cs.Synced() { return errAllowanceNotSynced } // calculate the maximum sectors this allowance can store max, err := maxSectors(a, c.hdb, c.tpool) if err != nil { return err } // Only allocate half as many sectors as the max. This leaves some leeway // for replacing contracts, transaction fees, etc. numSectors := max / 2 // check that this is sufficient to store at least one sector if numSectors == 0 { return ErrInsufficientAllowance } c.log.Println("INFO: setting allowance to", a) c.mu.Lock() c.allowance = a err = c.saveSync() c.mu.Unlock() if err != nil { c.log.Println("Unable to save contractor after setting allowance:", err) } // Initiate maintenance on the contracts, and then return. go c.threadedContractMaintenance() return nil } // managedCancelAllowance handles the special case where the allowance is empty. func (c *Contractor) managedCancelAllowance(a modules.Allowance) error { c.log.Println("INFO: canceling allowance") // first need to invalidate any active editors/downloaders // NOTE: this code is the same as in managedRenewContracts var ids []types.FileContractID c.mu.Lock() for id := range c.contracts { ids = append(ids, id) // we aren't renewing, but we don't want new editors or downloaders to // be created c.renewing[id] = true } c.mu.Unlock() defer func() { c.mu.Lock() for _, id := range ids { delete(c.renewing, id) } c.mu.Unlock() }() for _, id := range ids { c.mu.RLock() e, eok := c.editors[id] d, dok := c.downloaders[id] c.mu.RUnlock() if eok { e.invalidate() } if dok { d.invalidate() } } // reset currentPeriod and archive all contracts c.mu.Lock() c.allowance = a c.currentPeriod = 0 for id, contract := range c.contracts { c.oldContracts[id] = contract } c.contracts = make(map[types.FileContractID]modules.RenterContract) err := c.saveSync() c.mu.Unlock() return err } Sia-1.3.0/modules/renter/contractor/consts.go000066400000000000000000000052351313565667000212350ustar00rootroot00000000000000package contractor import ( "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) // Constants related to fees and fee estimation. const ( // estimatedFileContractTransactionSize provides the estimated size of // the average file contract in bytes. estimatedFileContractTransactionSize = 1200 ) // Constants related to contract formation parameters. var ( // To alleviate potential block propagation issues, the contractor sleeps // between each contract formation. contractFormationInterval = build.Select(build.Var{ Dev: 10 * time.Second, Standard: 60 * time.Second, Testing: 10 * time.Millisecond, }).(time.Duration) // minHostsForEstimations describes the minimum number of hosts that // are needed to make broad estimations such as the number of sectors // that you can store on the network for a given allowance. minHostsForEstimations = build.Select(build.Var{ // The number is set lower than standard so that it can // be reached/exceeded easily within development // environments, but set high enough that it's also // easy to fall short within the development // environments. Dev: 5, // Hosts can have a lot of variance. Selecting too many // hosts will high-ball the price estimation, but users // shouldn't be selecting rewer hosts, and if there are // too few hosts being selected for estimation there is // a risk of underestimating the actual price, which is // something we'd rather avoid. Standard: 10, // Testing tries to happen as fast as possible, // therefore tends to run with a lot fewer hosts. Testing: 4, }).(int) // minScoreHostBuffer defines how many extra hosts are queried when trying // to figure out an appropriate minimum score for the hosts that we have. minScoreHostBuffer = build.Select(build.Var{ Dev: 2, Standard: 10, Testing: 1, }).(int) ) // Constants related to the safety values for when the contractor is forming // contracts. var ( maxCollateral = types.SiacoinPrecision.Mul64(1e3) // 1k SC maxDownloadPrice = maxStoragePrice.Mul64(3 * 4320) maxStoragePrice = types.SiacoinPrecision.Mul64(30e3).Div(modules.BlockBytesPerMonthTerabyte) // 30k SC / TB / Month maxUploadPrice = maxStoragePrice.Mul64(4320) // scoreLeeway defines the factor by which a host can miss the goal score // for a set of hosts. To determine the goal score, a new set of hosts is // queried from the hostdb and the lowest scoring among them is selected. // That score is then divided by scoreLeeway to get the minimum score that a // host is allowed to have before being marked as !GoodForUpload. scoreLeeway = types.NewCurrency64(25) ) Sia-1.3.0/modules/renter/contractor/contractor.go000066400000000000000000000163621313565667000221050ustar00rootroot00000000000000package contractor import ( "errors" "fmt" "os" "path/filepath" "sync" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/persist" siasync "github.com/NebulousLabs/Sia/sync" "github.com/NebulousLabs/Sia/types" ) var ( errNilCS = errors.New("cannot create contractor with nil consensus set") errNilWallet = errors.New("cannot create contractor with nil wallet") errNilTpool = errors.New("cannot create contractor with nil transaction pool") // COMPATv1.0.4-lts // metricsContractID identifies a special contract that contains aggregate // financial metrics from older contractors metricsContractID = types.FileContractID{'m', 'e', 't', 'r', 'i', 'c', 's'} ) // A cachedRevision contains changes that would be applied to a RenterContract // if a contract revision succeeded. The contractor must cache these changes // as a safeguard against desynchronizing with the host. // TODO: save a diff of the Merkle roots instead of all of them. type cachedRevision struct { Revision types.FileContractRevision `json:"revision"` MerkleRoots modules.MerkleRootSet `json:"merkleroots"` } // A Contractor negotiates, revises, renews, and provides access to file // contracts. type Contractor struct { // dependencies cs consensusSet hdb hostDB log *persist.Logger persist persister mu sync.RWMutex tg siasync.ThreadGroup tpool transactionPool wallet wallet // Only one thread should be performing contract maintenance at a time. maintenanceLock siasync.TryMutex allowance modules.Allowance blockHeight types.BlockHeight currentPeriod types.BlockHeight lastChange modules.ConsensusChangeID downloaders map[types.FileContractID]*hostDownloader editors map[types.FileContractID]*hostEditor renewing map[types.FileContractID]bool // prevent revising during renewal revising map[types.FileContractID]bool // prevent overlapping revisions cachedRevisions map[types.FileContractID]cachedRevision contracts map[types.FileContractID]modules.RenterContract oldContracts map[types.FileContractID]modules.RenterContract renewedIDs map[types.FileContractID]types.FileContractID } // Allowance returns the current allowance. func (c *Contractor) Allowance() modules.Allowance { c.mu.RLock() defer c.mu.RUnlock() return c.allowance } // Contract returns the latest contract formed with the specified host. func (c *Contractor) Contract(hostAddr modules.NetAddress) (modules.RenterContract, bool) { c.mu.RLock() defer c.mu.RUnlock() for _, c := range c.contracts { if c.NetAddress == hostAddr { return c, true } } return modules.RenterContract{}, false } // ContractByID returns the contract with the id specified, if it exists. func (c *Contractor) ContractByID(id types.FileContractID) (modules.RenterContract, bool) { c.mu.RLock() defer c.mu.RUnlock() contract, exists := c.contracts[id] return contract, exists } // Contracts returns the contracts formed by the contractor in the current // allowance period. Only contracts formed with currently online hosts are // returned. func (c *Contractor) Contracts() (cs []modules.RenterContract) { c.mu.RLock() defer c.mu.RUnlock() return c.onlineContracts() } // AllContracts returns the contracts formed by the contractor in the current // allowance period. func (c *Contractor) AllContracts() (cs []modules.RenterContract) { c.mu.RLock() defer c.mu.RUnlock() for _, contract := range c.contracts { cs = append(cs, contract) } // COMPATv1.0.4-lts // also return the special metrics contract (see persist.go) if contract, ok := c.oldContracts[metricsContractID]; ok { cs = append(cs, contract) } return } // CurrentPeriod returns the height at which the current allowance period // began. func (c *Contractor) CurrentPeriod() types.BlockHeight { c.mu.RLock() defer c.mu.RUnlock() return c.currentPeriod } // ResolveID returns the ID of the most recent renewal of id. func (c *Contractor) ResolveID(id types.FileContractID) types.FileContractID { c.mu.RLock() defer c.mu.RUnlock() newID, exists := c.renewedIDs[id] for exists { id = newID newID, exists = c.renewedIDs[id] } return id } // Close closes the Contractor. func (c *Contractor) Close() error { return c.tg.Stop() } // GoodForRenew indicates whether a contract is intended to be renewed. func (c *Contractor) GoodForRenew(id types.FileContractID) bool { c.mu.RLock() defer c.mu.RUnlock() contract, exists := c.contracts[id] if !exists { return false } return contract.GoodForRenew } // New returns a new Contractor. func New(cs consensusSet, wallet walletShim, tpool transactionPool, hdb hostDB, persistDir string) (*Contractor, error) { // Check for nil inputs. if cs == nil { return nil, errNilCS } if wallet == nil { return nil, errNilWallet } if tpool == nil { return nil, errNilTpool } // Create the persist directory if it does not yet exist. err := os.MkdirAll(persistDir, 0700) if err != nil { return nil, err } // Create the logger. logger, err := persist.NewFileLogger(filepath.Join(persistDir, "contractor.log")) if err != nil { return nil, err } // Create Contractor using production dependencies. return newContractor(cs, &walletBridge{w: wallet}, tpool, hdb, newPersist(persistDir), logger) } // newContractor creates a Contractor using the provided dependencies. func newContractor(cs consensusSet, w wallet, tp transactionPool, hdb hostDB, p persister, l *persist.Logger) (*Contractor, error) { // Create the Contractor object. c := &Contractor{ cs: cs, hdb: hdb, log: l, persist: p, tpool: tp, wallet: w, cachedRevisions: make(map[types.FileContractID]cachedRevision), contracts: make(map[types.FileContractID]modules.RenterContract), downloaders: make(map[types.FileContractID]*hostDownloader), editors: make(map[types.FileContractID]*hostEditor), oldContracts: make(map[types.FileContractID]modules.RenterContract), renewedIDs: make(map[types.FileContractID]types.FileContractID), renewing: make(map[types.FileContractID]bool), revising: make(map[types.FileContractID]bool), } // Close the logger (provided as a dependency) upon shutdown. c.tg.AfterStop(func() { if err := c.log.Close(); err != nil { fmt.Println("Failed to close the contractor logger:", err) } }) // Load the prior persistence structures. err := c.load() if err != nil && !os.IsNotExist(err) { return nil, err } // Close the persist (provided as a dependency) upon shutdown. c.tg.AfterStop(func() { if err := c.persist.Close(); err != nil { c.log.Println("Failed to close contractor persist:", err) } }) // Subscribe to the consensus set. err = cs.ConsensusSetSubscribe(c, c.lastChange) if err == modules.ErrInvalidConsensusChangeID { // Reset the contractor consensus variables and try rescanning. c.blockHeight = 0 c.lastChange = modules.ConsensusChangeBeginning err = cs.ConsensusSetSubscribe(c, c.lastChange) } if err != nil { return nil, errors.New("contractor subscription failed: " + err.Error()) } // Unsubscribe from the consensus set upon shutdown. c.tg.OnStop(func() { cs.Unsubscribe(c) }) // We may have upgraded persist or resubscribed. Save now so that we don't // lose our work. err = c.save() if err != nil { return nil, err } return c, nil } Sia-1.3.0/modules/renter/contractor/contractor_test.go000066400000000000000000000255671313565667000231530ustar00rootroot00000000000000package contractor import ( "errors" "os" "testing" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) // newStub is used to test the New function. It implements all of the contractor's // dependencies. type newStub struct{} // consensus set stubs func (newStub) ConsensusSetSubscribe(modules.ConsensusSetSubscriber, modules.ConsensusChangeID) error { return nil } func (newStub) Synced() bool { return true } func (newStub) Unsubscribe(modules.ConsensusSetSubscriber) { return } // wallet stubs func (newStub) NextAddress() (uc types.UnlockConditions, err error) { return } func (newStub) StartTransaction() modules.TransactionBuilder { return nil } // transaction pool stubs func (newStub) AcceptTransactionSet([]types.Transaction) error { return nil } func (newStub) FeeEstimation() (a types.Currency, b types.Currency) { return } // hdb stubs func (newStub) AllHosts() []modules.HostDBEntry { return nil } func (newStub) ActiveHosts() []modules.HostDBEntry { return nil } func (newStub) Host(types.SiaPublicKey) (settings modules.HostDBEntry, ok bool) { return } func (newStub) IncrementSuccessfulInteractions(key types.SiaPublicKey) { return } func (newStub) IncrementFailedInteractions(key types.SiaPublicKey) { return } func (newStub) RandomHosts(int, []types.SiaPublicKey) []modules.HostDBEntry { return nil } func (newStub) ScoreBreakdown(modules.HostDBEntry) modules.HostScoreBreakdown { return modules.HostScoreBreakdown{} } // TestNew tests the New function. func TestNew(t *testing.T) { // Using a stub implementation of the dependencies is fine, as long as its // non-nil. var stub newStub dir := build.TempDir("contractor", t.Name()) // Sane values. _, err := New(stub, stub, stub, stub, dir) if err != nil { t.Fatalf("expected nil, got %v", err) } // Nil consensus set. _, err = New(nil, stub, stub, stub, dir) if err != errNilCS { t.Fatalf("expected %v, got %v", errNilCS, err) } // Nil wallet. _, err = New(stub, nil, stub, stub, dir) if err != errNilWallet { t.Fatalf("expected %v, got %v", errNilWallet, err) } // Nil transaction pool. _, err = New(stub, stub, nil, stub, dir) if err != errNilTpool { t.Fatalf("expected %v, got %v", errNilTpool, err) } // Bad persistDir. _, err = New(stub, stub, stub, stub, "") if !os.IsNotExist(err) { t.Fatalf("expected invalid directory, got %v", err) } } // TestContract tests the Contract method. func TestContract(t *testing.T) { c := &Contractor{ contracts: map[types.FileContractID]modules.RenterContract{ {1}: {ID: types.FileContractID{1}, NetAddress: "foo"}, {2}: {ID: types.FileContractID{2}, NetAddress: "bar"}, {3}: {ID: types.FileContractID{3}, NetAddress: "baz"}, }, } tests := []struct { addr modules.NetAddress exists bool contractID types.FileContractID }{ {"foo", true, types.FileContractID{1}}, {"bar", true, types.FileContractID{2}}, {"baz", true, types.FileContractID{3}}, {"quux", false, types.FileContractID{}}, {"nope", false, types.FileContractID{}}, } for _, test := range tests { contract, ok := c.Contract(test.addr) if ok != test.exists { t.Errorf("%v: expected %v, got %v", test.addr, test.exists, ok) } else if contract.ID != test.contractID { t.Errorf("%v: expected %v, got %v", test.addr, test.contractID, contract.ID) } } // delete all contracts c.contracts = map[types.FileContractID]modules.RenterContract{} for _, test := range tests { _, ok := c.Contract(test.addr) if ok { t.Error("no contracts should remain") } } } // TestContracts tests the Contracts method. func TestContracts(t *testing.T) { var stub newStub dir := build.TempDir("contractor", t.Name()) c, err := New(stub, stub, stub, stub, dir) if err != nil { t.Fatalf("expected nil, got %v", err) } c.contracts = map[types.FileContractID]modules.RenterContract{ {1}: {ID: types.FileContractID{1}, NetAddress: "foo"}, {2}: {ID: types.FileContractID{2}, NetAddress: "bar"}, {3}: {ID: types.FileContractID{3}, NetAddress: "baz"}, } for _, contract := range c.Contracts() { if exp := c.contracts[contract.ID]; exp.NetAddress != contract.NetAddress { t.Errorf("contract does not match: expected %v, got %v", exp.NetAddress, contract.NetAddress) } } } // TestResolveID tests the ResolveID method. func TestResolveID(t *testing.T) { c := &Contractor{ renewedIDs: map[types.FileContractID]types.FileContractID{ {1}: {2}, {2}: {3}, {3}: {4}, {5}: {6}, }, } tests := []struct { id types.FileContractID resolved types.FileContractID }{ {types.FileContractID{0}, types.FileContractID{0}}, {types.FileContractID{1}, types.FileContractID{4}}, {types.FileContractID{2}, types.FileContractID{4}}, {types.FileContractID{3}, types.FileContractID{4}}, {types.FileContractID{4}, types.FileContractID{4}}, {types.FileContractID{5}, types.FileContractID{6}}, } for _, test := range tests { if r := c.ResolveID(test.id); r != test.resolved { t.Errorf("expected %v -> %v, got %v", test.id, test.resolved, r) } } } // TestAllowance tests the Allowance method. func TestAllowance(t *testing.T) { c := &Contractor{ allowance: modules.Allowance{ Funds: types.NewCurrency64(1), Period: 2, Hosts: 3, }, } a := c.Allowance() if a.Funds.Cmp(c.allowance.Funds) != 0 || a.Period != c.allowance.Period || a.Hosts != c.allowance.Hosts { t.Fatal("Allowance did not return correct allowance:", a, c.allowance) } } // stubHostDB mocks the hostDB dependency using zero-valued implementations of // its methods. type stubHostDB struct{} func (stubHostDB) AllHosts() (hs []modules.HostDBEntry) { return } func (stubHostDB) ActiveHosts() (hs []modules.HostDBEntry) { return } func (stubHostDB) Host(types.SiaPublicKey) (h modules.HostDBEntry, ok bool) { return } func (stubHostDB) IncrementSuccessfulInteractions(key types.SiaPublicKey) { return } func (stubHostDB) IncrementFailedInteractions(key types.SiaPublicKey) { return } func (stubHostDB) PublicKey() (spk types.SiaPublicKey) { return } func (stubHostDB) RandomHosts(int, []types.SiaPublicKey) (hs []modules.HostDBEntry) { return } func (stubHostDB) ScoreBreakdown(modules.HostDBEntry) modules.HostScoreBreakdown { return modules.HostScoreBreakdown{} } // TestIntegrationSetAllowance tests the SetAllowance method. func TestIntegrationSetAllowance(t *testing.T) { if testing.Short() { t.SkipNow() } // create testing trio _, c, m, err := newTestingTrio(t.Name()) if err != nil { t.Fatal(err) } // this test requires two hosts: create another one h, err := newTestingHost(build.TempDir("hostdata", ""), c.cs.(modules.ConsensusSet), c.tpool.(modules.TransactionPool)) if err != nil { t.Fatal(err) } // announce the extra host err = h.Announce() if err != nil { t.Fatal(err) } // mine a block, processing the announcement m.AddBlock() // wait for hostdb to scan host for i := 0; i < 100 && len(c.hdb.RandomHosts(1, nil)) == 0; i++ { time.Sleep(time.Millisecond * 50) } // cancel allowance var a modules.Allowance err = c.SetAllowance(a) if err != nil { t.Fatal(err) } // bad args a.Hosts = 1 err = c.SetAllowance(a) if err != errAllowanceZeroPeriod { t.Errorf("expected %q, got %q", errAllowanceZeroPeriod, err) } a.Period = 20 err = c.SetAllowance(a) if err != ErrAllowanceZeroWindow { t.Errorf("expected %q, got %q", ErrAllowanceZeroWindow, err) } a.RenewWindow = 20 err = c.SetAllowance(a) if err != errAllowanceWindowSize { t.Errorf("expected %q, got %q", errAllowanceWindowSize, err) } // reasonable values; should succeed a.Funds = types.SiacoinPrecision.Mul64(100) a.RenewWindow = 10 err = c.SetAllowance(a) if err != nil { t.Fatal(err) } err = build.Retry(50, 100*time.Millisecond, func() error { if len(c.Contracts()) != 1 { return errors.New("allowance forming seems to have failed") } return nil }) if err != nil { t.Error(err) } // set same allowance; should no-op err = c.SetAllowance(a) if err != nil { t.Fatal(err) } c.mu.Lock() clen := len(c.contracts) c.mu.Unlock() if clen != 1 { t.Fatal("expected 1 contract, got", len(c.contracts)) } m.AddBlock() // set allowance with Hosts = 2; should only form one new contract a.Hosts = 2 err = c.SetAllowance(a) if err != nil { t.Fatal(err) } err = build.Retry(50, 100*time.Millisecond, func() error { if len(c.Contracts()) != 2 { return errors.New("allowance forming seems to have failed") } return nil }) if err != nil { t.Fatal(err) } // set allowance with Funds*2; should trigger renewal of both contracts a.Funds = a.Funds.Mul64(2) err = c.SetAllowance(a) if err != nil { t.Fatal(err) } err = build.Retry(50, 100*time.Millisecond, func() error { if len(c.Contracts()) != 2 { return errors.New("allowance forming seems to have failed") } return nil }) if err != nil { t.Error(err) } // delete one of the contracts and set allowance with Funds*2; should // trigger 1 renewal and 1 new contract c.mu.Lock() for id := range c.contracts { delete(c.contracts, id) break } c.mu.Unlock() a.Funds = a.Funds.Mul64(2) err = c.SetAllowance(a) if err != nil { t.Fatal(err) } err = build.Retry(50, 100*time.Millisecond, func() error { if len(c.Contracts()) != 2 { return errors.New("allowance forming seems to have failed") } return nil }) if err != nil { t.Fatal(err) } // make one of the contracts un-renewable and set allowance with Funds*2; should // trigger 1 renewal failure and 2 new contracts c.mu.Lock() for id, contract := range c.contracts { contract.NetAddress = "foo" c.contracts[id] = contract break } c.mu.Unlock() a.Funds = a.Funds.Mul64(2) err = c.SetAllowance(a) if err != nil { t.Fatal(err) } c.mu.Lock() clen = len(c.contracts) c.mu.Unlock() if clen != 2 { t.Fatal("expected 2 contracts, got", len(c.contracts)) } } // testWalletShim is used to test the walletBridge type. type testWalletShim struct { nextAddressCalled bool startTxnCalled bool } // These stub implementations for the walletShim interface set their respective // booleans to true, allowing tests to verify that they have been called. func (ws *testWalletShim) NextAddress() (types.UnlockConditions, error) { ws.nextAddressCalled = true return types.UnlockConditions{}, nil } func (ws *testWalletShim) StartTransaction() modules.TransactionBuilder { ws.startTxnCalled = true return nil } // TestWalletBridge tests the walletBridge type. func TestWalletBridge(t *testing.T) { shim := new(testWalletShim) bridge := walletBridge{shim} bridge.NextAddress() if !shim.nextAddressCalled { t.Error("NextAddress was not called on the shim") } bridge.StartTransaction() if !shim.startTxnCalled { t.Error("StartTransaction was not called on the shim") } } Sia-1.3.0/modules/renter/contractor/contracts.go000066400000000000000000000415111313565667000217210ustar00rootroot00000000000000package contractor // contracts.go handles forming and renewing contracts for the contractor. This // includes deciding when new contracts need to be formed, when contracts need // to be renewed, and if contracts need to be blacklisted. import ( "errors" "fmt" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/modules/renter/proto" "github.com/NebulousLabs/Sia/types" ) var ( // ErrInsufficientAllowance indicates that the renter's allowance is less // than the amount necessary to store at least one sector ErrInsufficientAllowance = errors.New("allowance is not large enough to cover fees of contract creation") errTooExpensive = errors.New("host price was too high") ) // maxSectors is the estimated maximum number of sectors that the allowance // can support. func maxSectors(a modules.Allowance, hdb hostDB, tp transactionPool) (uint64, error) { if a.Hosts <= 0 || a.Period <= 0 { return 0, errors.New("invalid allowance") } // Sample at least 10 hosts. nRandomHosts := int(a.Hosts) if nRandomHosts < minHostsForEstimations { nRandomHosts = minHostsForEstimations } hosts := hdb.RandomHosts(nRandomHosts, nil) if len(hosts) < int(a.Hosts) { return 0, fmt.Errorf("not enough hosts in hostdb for sector calculation, got %v but needed %v", len(hosts), int(a.Hosts)) } // Calculate cost of creating contracts with each host, and the cost of // storing sectors on each host. var sectorSum types.Currency var contractCostSum types.Currency for _, h := range hosts { sectorSum = sectorSum.Add(h.StoragePrice) contractCostSum = contractCostSum.Add(h.ContractPrice) } averageSectorPrice := sectorSum.Div64(uint64(len(hosts))) averageContractPrice := contractCostSum.Div64(uint64(len(hosts))) costPerSector := averageSectorPrice.Mul64(a.Hosts).Mul64(modules.SectorSize).Mul64(uint64(a.Period)) costForContracts := averageContractPrice.Mul64(a.Hosts) // Subtract fees for creating the file contracts from the allowance. _, feeEstimation := tp.FeeEstimation() costForTxnFees := types.NewCurrency64(estimatedFileContractTransactionSize).Mul(feeEstimation).Mul64(a.Hosts) // Check for potential divide by zero if a.Funds.Cmp(costForTxnFees.Add(costForContracts)) <= 0 { return 0, ErrInsufficientAllowance } sectorFunds := a.Funds.Sub(costForTxnFees).Sub(costForContracts) // Divide total funds by cost per sector. numSectors, err := sectorFunds.Div(costPerSector).Uint64() if err != nil { return 0, errors.New("error when totaling number of sectors that can be bought with an allowance: " + err.Error()) } return numSectors, nil } // contractEndHeight returns the height at which the Contractor's contracts // end. If there are no contracts, it returns zero. // // TODO: The contract end height should be picked based on the current period // start plus the period duration, not based on the end heights of the existing // contracts. func (c *Contractor) contractEndHeight() types.BlockHeight { var endHeight types.BlockHeight for _, contract := range c.contracts { endHeight = contract.EndHeight() break } return endHeight } // managedMarkContractsUtility checks every active contract in the contractor and // figures out whether the contract is useful for uploading, and whehter the // contract should be renewed. func (c *Contractor) managedMarkContractsUtility() { // Pull a new set of hosts from the hostdb that could be used as a new set // to match the allowance. The lowest scoring host of these new hosts will // be used as a baseline for determining whether our existing contracts are // worthwhile. c.mu.RLock() hostCount := int(c.allowance.Hosts) c.mu.RUnlock() if hostCount <= 0 { return } hosts := c.hdb.RandomHosts(hostCount+minScoreHostBuffer, nil) if len(hosts) <= 0 { return } // Find the lowest score of this batch of hosts. lowestScore := c.hdb.ScoreBreakdown(hosts[0]).Score for i := 1; i < len(hosts); i++ { score := c.hdb.ScoreBreakdown(hosts[i]).Score if score.Cmp(lowestScore) < 0 { lowestScore = score } } // Set the minimum acceptable score to a factor of the lowest score. minScore := lowestScore.Div(scoreLeeway) // Pull together the set of contracts. c.mu.RLock() contracts := make([]modules.RenterContract, 0, len(c.contracts)) for _, contract := range c.contracts { contracts = append(contracts, contract) } c.mu.RUnlock() // Go through and figure out if the utility fields need to be changed. for i := 0; i < len(contracts); i++ { // Start the contract in good standing. contracts[i].GoodForUpload = true contracts[i].GoodForRenew = true host, exists := c.hdb.Host(contracts[i].HostPublicKey) // Contract has no utility if the host is not in the database. if !exists { contracts[i].GoodForUpload = false contracts[i].GoodForRenew = false continue } // Contract has no utility if the score is poor. if c.hdb.ScoreBreakdown(host).Score.Cmp(minScore) < 0 { contracts[i].GoodForUpload = false contracts[i].GoodForRenew = false continue } // Contract has no utility if the host is offline. c.mu.Lock() offline := c.isOffline(contracts[i].ID) c.mu.Unlock() if offline { contracts[i].GoodForUpload = false contracts[i].GoodForRenew = false continue } // Contract has no utility if renew has already completed. (grab some // extra values while we have the mutex) c.mu.RLock() blockHeight := c.blockHeight renewWindow := c.allowance.RenewWindow _, renewedPreviously := c.renewedIDs[contracts[i].ID] c.mu.RUnlock() if renewedPreviously { contracts[i].GoodForUpload = false contracts[i].GoodForRenew = false continue } // Contract should not be used for upload if the number of Merkle roots // exceeds 25e3 - this is in place because the current hosts do not // really perform well beyond this number of sectors in a single // contract. Future updates will fix this, at which point this limit // will change and also have to switch based on host version. if len(contracts[i].MerkleRoots) > 25e3 { // Contract is still fine to be renewed, we just shouldn't keep // adding data to this contract. contracts[i].GoodForUpload = false continue } // Contract should not be used for uploading if the time has come to // renew the contract. if blockHeight+renewWindow >= contracts[i].EndHeight() { contracts[i].GoodForUpload = false continue } } // Update the contractor to reflect the new state for each of the contracts. c.mu.Lock() for i := 0; i < len(contracts); i++ { contract, exists := c.contracts[contracts[i].ID] if !exists { continue } contract.GoodForUpload = contracts[i].GoodForUpload contract.GoodForRenew = contracts[i].GoodForRenew c.contracts[contracts[i].ID] = contract } c.mu.Unlock() } // managedNewContract negotiates an initial file contract with the specified // host, saves it, and returns it. func (c *Contractor) managedNewContract(host modules.HostDBEntry, numSectors uint64, endHeight types.BlockHeight) (modules.RenterContract, error) { // reject hosts that are too expensive if host.StoragePrice.Cmp(maxStoragePrice) > 0 { return modules.RenterContract{}, errTooExpensive } // cap host.MaxCollateral if host.MaxCollateral.Cmp(maxCollateral) > 0 { host.MaxCollateral = maxCollateral } // get an address to use for negotiation uc, err := c.wallet.NextAddress() if err != nil { return modules.RenterContract{}, err } // create contract params c.mu.RLock() params := proto.ContractParams{ Host: host, Filesize: numSectors * modules.SectorSize, StartHeight: c.blockHeight, EndHeight: endHeight, RefundAddress: uc.UnlockHash(), } c.mu.RUnlock() // create transaction builder txnBuilder := c.wallet.StartTransaction() contract, err := proto.FormContract(params, txnBuilder, c.tpool, c.hdb, c.tg.StopChan()) if err != nil { txnBuilder.Drop() return modules.RenterContract{}, err } contractValue := contract.RenterFunds() c.log.Printf("Formed contract with %v for %v SC", host.NetAddress, contractValue.Div(types.SiacoinPrecision)) return contract, nil } // managedRenew negotiates a new contract for data already stored with a host. // It returns the new contract. This is a blocking call that performs network // I/O. func (c *Contractor) managedRenew(contract modules.RenterContract, numSectors uint64, newEndHeight types.BlockHeight) (modules.RenterContract, error) { host, ok := c.hdb.Host(contract.HostPublicKey) if !ok { return modules.RenterContract{}, errors.New("no record of that host") } else if host.StoragePrice.Cmp(maxStoragePrice) > 0 { return modules.RenterContract{}, errTooExpensive } // cap host.MaxCollateral if host.MaxCollateral.Cmp(maxCollateral) > 0 { host.MaxCollateral = maxCollateral } // Set the net address of the contract to the most recent net address for // the host. contract.NetAddress = host.NetAddress // get an address to use for negotiation uc, err := c.wallet.NextAddress() if err != nil { return modules.RenterContract{}, err } // create contract params c.mu.RLock() params := proto.ContractParams{ Host: host, Filesize: numSectors * modules.SectorSize, StartHeight: c.blockHeight, EndHeight: newEndHeight, RefundAddress: uc.UnlockHash(), } c.mu.RUnlock() // execute negotiation protocol txnBuilder := c.wallet.StartTransaction() newContract, err := proto.Renew(contract, params, txnBuilder, c.tpool, c.hdb, c.tg.StopChan()) if proto.IsRevisionMismatch(err) { // return unused outputs to wallet txnBuilder.Drop() // try again with the cached revision c.mu.RLock() cached, ok := c.cachedRevisions[contract.ID] c.mu.RUnlock() if !ok { // nothing we can do; return original error c.log.Printf("wanted to recover contract %v with host %v, but no revision was cached", contract.ID, contract.NetAddress) return modules.RenterContract{}, err } c.log.Printf("host %v has different revision for %v; retrying with cached revision", contract.NetAddress, contract.ID) contract.LastRevision = cached.Revision // need to start a new transaction txnBuilder = c.wallet.StartTransaction() newContract, err = proto.Renew(contract, params, txnBuilder, c.tpool, c.hdb, c.tg.StopChan()) } if err != nil { txnBuilder.Drop() // return unused outputs to wallet return modules.RenterContract{}, err } return newContract, nil } // threadedContractMaintenance checks the set of contracts that the contractor // has against the allownace, renewing any contracts that need to be renewed, // dropping contracts which are no longer worthwhile, and adding contracts if // there are not enough. func (c *Contractor) threadedContractMaintenance() { // Threading protection. err := c.tg.Add() if err != nil { return } defer c.tg.Done() // Nohting to do if there are no hosts. c.mu.RLock() wantedHosts := c.allowance.Hosts c.mu.RUnlock() if wantedHosts <= 0 { return } // Only one instance of this thread should be running at a time. Under // normal conditions, fine to return early if another thread is already // doing maintenance. The next block will trigger another round. Under // testing, control is insufficient if the maintenance loop isn't guaranteed // to run. if build.Release == "testing" { c.maintenanceLock.Lock() } else { if !c.maintenanceLock.TryLock() { return } } defer c.maintenanceLock.Unlock() // Update the utility fields for this contract based on the most recent // hostdb. c.managedMarkContractsUtility() // Renew any contracts that need to be renewed. c.mu.RLock() var renewSet []types.FileContractID for _, contract := range c.contracts { if contract.GoodForRenew && c.blockHeight+c.allowance.RenewWindow >= contract.EndHeight() { renewSet = append(renewSet, contract.ID) } } c.mu.RUnlock() if len(renewSet) != 0 { c.log.Printf("renewing %v contracts", len(renewSet)) } // TODO: Need some loop somewhere that renews contracts which haven't gone // through the full period yet, but are out of money (yet the allowance // still has room to refill some contracts) // Figure out the end height and target sector count for the contracts being // renewed. // // TODO: EndHeight should be global, and it should be picked based on the // current period start, not based on the current height plus the allowance // period. c.mu.RLock() endHeight := c.blockHeight + c.allowance.Period max, err := maxSectors(c.allowance, c.hdb, c.tpool) c.mu.RUnlock() if err != nil { return } // Only allocate half as many sectors as the max. This leaves some leeway // for replacing contracts, transaction fees, etc. numSectors := max / 2 // check that this is sufficient to store at least one sector if numSectors == 0 { return } // Loop through the contracts and renew them one-by-one. for _, id := range renewSet { // Renew one contract. func() { // Mark the contract as being renewed, and defer logic to unmark it // once renewing is complete. c.mu.Lock() c.renewing[id] = true c.mu.Unlock() defer func() { c.mu.Lock() delete(c.renewing, id) c.mu.Unlock() }() // Wait for any active editors and downloaders to finish for this // contract, and then grab the latest revision. c.mu.RLock() e, eok := c.editors[id] d, dok := c.downloaders[id] c.mu.RUnlock() if eok { e.invalidate() } if dok { d.invalidate() } c.mu.RLock() oldContract, ok := c.contracts[id] c.mu.RUnlock() if !ok { c.log.Println("WARN: no record of contract previously added to the renew set:", id) return } // Create the new contract. newContract, err := c.managedRenew(oldContract, numSectors, endHeight) if err != nil { c.log.Printf("WARN: failed to renew contract %v with %v: %v\n", id, oldContract.NetAddress, err) return } c.log.Printf("Renewed contract %v with %v\n", id, oldContract.NetAddress) // Update the utility values for the new contract, and for the old // contract. newContract.GoodForUpload = true newContract.GoodForRenew = true oldContract.GoodForRenew = false oldContract.GoodForUpload = false // Lock the contractor as we update it to use the new contract // instead of the old contract. c.mu.Lock() defer c.mu.Unlock() // Store the contract in the record of historic contracts. _, exists := c.contracts[oldContract.ID] if exists { c.oldContracts[oldContract.ID] = oldContract delete(c.contracts, oldContract.ID) } // Add the new contract, including a mapping from the old // contract to the new contract. c.contracts[newContract.ID] = newContract c.renewedIDs[oldContract.ID] = newContract.ID c.cachedRevisions[newContract.ID] = c.cachedRevisions[oldContract.ID] delete(c.cachedRevisions, oldContract.ID) // Save the contractor. err = c.saveSync() if err != nil { c.log.Println("Failed to save the contractor after creating a new contract.") } }() // Soft sleep for a minute to allow all of the transactions to propagate // the network. select { case <-c.tg.StopChan(): return case <-time.After(contractFormationInterval): } } // Quit in the event of shutdown. select { case <-c.tg.StopChan(): return default: } // Count the number of contracts which are good for uploading, and then make // more as needed to fill the gap. // Renew any contracts that need to be renewed. c.mu.RLock() uploadContracts := 0 for _, contract := range c.contracts { if contract.GoodForUpload || (contract.GoodForRenew && c.blockHeight+c.allowance.RenewWindow >= contract.EndHeight()) { uploadContracts++ } } neededContracts := int(c.allowance.Hosts) - uploadContracts c.mu.RUnlock() if neededContracts <= 0 { return } // Assemble an exclusion list that includes all of the hosts that we already // have contracts with, then select a new batch of hosts to attempt contract // formation with. c.mu.RLock() var exclude []types.SiaPublicKey for _, contract := range c.contracts { exclude = append(exclude, contract.HostPublicKey) } c.mu.RUnlock() hosts := c.hdb.RandomHosts(neededContracts*2+10, exclude) // Form contracts with the hosts one at a time, until we have enough // contracts. for _, host := range hosts { // Attempt forming a contract with this host. newContract, err := c.managedNewContract(host, numSectors, endHeight) if err != nil { c.log.Printf("Attempted to form a contract with %v, but negotiation failed: %v\n", host.NetAddress, err) continue } newContract.GoodForUpload = true newContract.GoodForRenew = true // Add this contract to the contractor and save. c.mu.Lock() c.contracts[newContract.ID] = newContract err = c.saveSync() c.mu.Unlock() if err != nil { c.log.Println("Unable to save the contractor:", err) } // Quit the loop if we've replaced all needed contracts. neededContracts-- if neededContracts <= 0 { break } // Soft sleep before making the next contract. select { case <-c.tg.StopChan(): return case <-time.After(contractFormationInterval): } } } Sia-1.3.0/modules/renter/contractor/dependencies.go000066400000000000000000000066321313565667000223540ustar00rootroot00000000000000package contractor import ( "path/filepath" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) // These interfaces define the HostDB's dependencies. Using the smallest // interface possible makes it easier to mock these dependencies in testing. type ( consensusSet interface { ConsensusSetSubscribe(modules.ConsensusSetSubscriber, modules.ConsensusChangeID) error Synced() bool Unsubscribe(modules.ConsensusSetSubscriber) } // In order to restrict the modules.TransactionBuilder interface, we must // provide a shim to bridge the gap between modules.Wallet and // transactionBuilder. walletShim interface { NextAddress() (types.UnlockConditions, error) StartTransaction() modules.TransactionBuilder } wallet interface { NextAddress() (types.UnlockConditions, error) StartTransaction() transactionBuilder } transactionBuilder interface { AddArbitraryData([]byte) uint64 AddFileContract(types.FileContract) uint64 AddMinerFee(types.Currency) uint64 AddParents([]types.Transaction) AddSiacoinInput(types.SiacoinInput) uint64 AddSiacoinOutput(types.SiacoinOutput) uint64 AddTransactionSignature(types.TransactionSignature) uint64 Drop() FundSiacoins(types.Currency) error Sign(bool) ([]types.Transaction, error) View() (types.Transaction, []types.Transaction) ViewAdded() (parents, coins, funds, signatures []int) } transactionPool interface { AcceptTransactionSet([]types.Transaction) error FeeEstimation() (min types.Currency, max types.Currency) } hostDB interface { AllHosts() []modules.HostDBEntry ActiveHosts() []modules.HostDBEntry Host(types.SiaPublicKey) (modules.HostDBEntry, bool) IncrementSuccessfulInteractions(key types.SiaPublicKey) IncrementFailedInteractions(key types.SiaPublicKey) RandomHosts(n int, exclude []types.SiaPublicKey) []modules.HostDBEntry ScoreBreakdown(modules.HostDBEntry) modules.HostScoreBreakdown } persister interface { save(contractorPersist) error update(...journalUpdate) error load(*contractorPersist) error Close() error } ) // Because wallet is not directly compatible with modules.Wallet (wrong // type signature for StartTransaction), we must provide a bridge type. type walletBridge struct { w walletShim } func (ws *walletBridge) NextAddress() (types.UnlockConditions, error) { return ws.w.NextAddress() } func (ws *walletBridge) StartTransaction() transactionBuilder { return ws.w.StartTransaction() } // stdPersist implements the persister interface via the journal type. The // filename required by these functions is internal to stdPersist. type stdPersist struct { journal *journal filename string } func (p *stdPersist) save(data contractorPersist) error { if p.journal == nil { var err error p.journal, err = newJournal(p.filename, data) return err } return p.journal.checkpoint(data) } func (p *stdPersist) update(us ...journalUpdate) error { return p.journal.update(us) } func (p *stdPersist) load(data *contractorPersist) error { var err error p.journal, err = openJournal(p.filename, data) if err != nil { // Try loading old persist. err = loadv110persist(filepath.Dir(p.filename), data) if err != nil { return err } p.journal, err = newJournal(p.filename, *data) } return err } func (p stdPersist) Close() error { return p.journal.Close() } func newPersist(dir string) *stdPersist { return &stdPersist{ filename: filepath.Join(dir, "contractor.journal"), } } Sia-1.3.0/modules/renter/contractor/doc.go000066400000000000000000000113431313565667000204660ustar00rootroot00000000000000/* Package contractor is responsible for forming and renewing file contracts with hosts. Its goal is to manage the low-level details of the negotiation, revision, and renewal protocols, such that the renter can operate at a higher level of abstraction. Ideally, the renter should be mostly ignorant of the Sia protocol, instead focusing on file management, redundancy, and upload/download algorithms. Contract formation does not begin until the user first calls SetAllowance. An allowance dictates how much money the contractor is allowed to spend on file contracts during a given period. When the user calls SetAllowance for the first time, the call will block until contracts have been negotiated with the specified number of hosts. Upon subsequent calls, new contracts will only be formed if the allowance is sufficiently greater than the previous allowance, where "sufficiently greater" currently means "enough money to pay for at least one additional sector on every host." This allows the user to increase the amount of available storage immediately, at the cost of some complexity. The contractor forms many contracts in parallel with different host, and tries to keep all the contracts "consistent" -- that is, they should all have the same storage capacity, and they should all end at the same height. Hosts are selected from the HostDB; there is no support for manually specifying hosts. Contracts are automatically renewed by the contractor at a safe threshold before they are set to expire. When contracts are renewed, they are renewed with the current allowance, which may differ from the allowance that was used to form the initial contracts. In general, this means that allowance modifications only take effect upon the next "contract cycle" (the exception being "sufficiently greater" modifications, as defined above). As an example, imagine that the user first sets an allowance that will cover 10 contracts of 10 sectors each for 100 blocks. The contractor will immediately form contracts with 10 hosts, paying each host enough to cover 10 sectors for 100 blocks. Then, 20 blocks later, the user increases the allowance, such that it now covers 10 contracts of 20 sectors for 200 blocks. The contractor will immediately form contracts as follows: - 10 contracts will be formed with the current hosts, each covering 10 sectors for 80 blocks. - 10 contracts will be formed with new hosts, each covering 20 sectors for 80 blocks. Note that these newly-formed contracts are timed to expire in sync with the existing contracts. This becomes the new "contract set," totaling 30 contracts, but only 20 hosts, with 20 sectors per host. When it comes time to renew these contracts, only one contract will be renewed per host, and the contracts will be renewed for the full 200-block duration. The new contract set will thus consist of 20 contracts, 20 hosts, 20 sectors, 200 blocks. On the other hand, if the allowance is decreased, no immediate action is taken. Why? Because the contracts have already been paid for. The new allowance will only take effect upon the next renewal. Modifications to file contracts are mediated through the Editor interface. An Editor maintains a network connection to a host, over which is sends modification requests, such as "delete sector 12." After each modification, the Editor revises the underlying file contract and saves it to disk. The primary challenge of the contractor is that it must be smart enough for the user to feel comfortable allowing it to spend their money. Because contract renewal is a background task, it is difficult to report errors to the user and defer to their decision. For example, what should the contractor do in the following scenarios? - The contract set is up for renewal, but the average host price has increased, and now the allowance is not sufficient to cover all of the user's uploaded data. - The user sets an allowance of 10 hosts. The contractor forms 5 contracts, but the rest fail, and the remaining hosts in the HostDB are too expensive. - After contract formation succeeds, 2 of 10 hosts become unresponsive. Later, another 4 become unresponsive. Waiting for user input is dangerous because if the contract period elapses, data is permanently lost. The contractor should treat this as the worst-case scenario, and take steps to prevent it, so long as the allowance is not exceeded. However, since the contractor has no concept of redundancy, it is not well-positioned to determine which sectors to sacrifice and which to preserve. The contractor also lacks the ability to reupload data; it can download sectors, but it does not know the decryption keys or erasure coding metadata required to reconstruct the original data. It follows that these responsibilities must be delegated to the renter. */ package contractor Sia-1.3.0/modules/renter/contractor/downloader.go000066400000000000000000000150161313565667000220600ustar00rootroot00000000000000package contractor import ( "errors" "sync" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/modules/renter/proto" "github.com/NebulousLabs/Sia/types" ) var errInvalidDownloader = errors.New("downloader has been invalidated because its contract is being renewed") // An Downloader retrieves sectors from with a host. It requests one sector at // a time, and revises the file contract to transfer money to the host // proportional to the data retrieved. type Downloader interface { // Sector retrieves the sector with the specified Merkle root, and revises // the underlying contract to pay the host proportionally to the data // retrieve. Sector(root crypto.Hash) ([]byte, error) // Close terminates the connection to the host. Close() error } // A hostDownloader retrieves sectors by calling the download RPC on a host. // It implements the Downloader interface. hostDownloaders are safe for use by // multiple goroutines. type hostDownloader struct { clients int // safe to Close when 0 contractID types.FileContractID contractor *Contractor downloader *proto.Downloader hostSettings modules.HostExternalSettings invalid bool // true if invalidate has been called speed uint64 // Bytes per second. mu sync.Mutex } // invalidate sets the invalid flag and closes the underlying // proto.Downloader. Once invalidate returns, the hostDownloader is guaranteed // to not further revise its contract. This is used during contract renewal to // prevent a Downloader from revising a contract mid-renewal. func (hd *hostDownloader) invalidate() { hd.mu.Lock() defer hd.mu.Unlock() if !hd.invalid { hd.downloader.Close() hd.invalid = true } hd.contractor.mu.Lock() delete(hd.contractor.downloaders, hd.contractID) delete(hd.contractor.revising, hd.contractID) hd.contractor.mu.Unlock() } // HostSettings returns the settings of the host that the downloader connects // to. func (hd *hostDownloader) HostSettings() modules.HostExternalSettings { hd.mu.Lock() defer hd.mu.Unlock() return hd.hostSettings } // Sector retrieves the sector with the specified Merkle root, and revises // the underlying contract to pay the host proportionally to the data // retrieve. func (hd *hostDownloader) Sector(root crypto.Hash) ([]byte, error) { hd.mu.Lock() defer hd.mu.Unlock() if hd.invalid { return nil, errInvalidDownloader } contract, sector, err := hd.downloader.Sector(root) if err != nil { return nil, err } hd.contractor.mu.Lock() hd.contractor.contracts[contract.ID] = contract hd.contractor.persist.update(updateDownloadRevision{ NewRevisionTxn: contract.LastRevisionTxn, NewDownloadSpending: contract.DownloadSpending, }) hd.contractor.mu.Unlock() return sector, nil } // Close cleanly terminates the download loop with the host and closes the // connection. func (hd *hostDownloader) Close() error { hd.mu.Lock() defer hd.mu.Unlock() hd.clients-- // Close is a no-op if invalidate has been called, or if there are other // clients still using the hostDownloader. if hd.invalid || hd.clients > 0 { return nil } hd.invalid = true hd.contractor.mu.Lock() delete(hd.contractor.downloaders, hd.contractID) delete(hd.contractor.revising, hd.contractID) hd.contractor.mu.Unlock() return hd.downloader.Close() } // Downloader returns a Downloader object that can be used to download sectors // from a host. func (c *Contractor) Downloader(id types.FileContractID, cancel <-chan struct{}) (_ Downloader, err error) { id = c.ResolveID(id) c.mu.RLock() cachedDownloader, haveDownloader := c.downloaders[id] height := c.blockHeight contract, haveContract := c.contracts[id] renewing := c.renewing[id] c.mu.RUnlock() if renewing { return nil, errors.New("currently renewing that contract") } if haveDownloader { // increment number of clients and return cachedDownloader.mu.Lock() cachedDownloader.clients++ cachedDownloader.mu.Unlock() return cachedDownloader, nil } host, haveHost := c.hdb.Host(contract.HostPublicKey) if !haveContract { return nil, errors.New("no record of that contract") } else if height > contract.EndHeight() { return nil, errors.New("contract has already ended") } else if !haveHost { return nil, errors.New("no record of that host") } else if host.DownloadBandwidthPrice.Cmp(maxDownloadPrice) > 0 { return nil, errTooExpensive } // Update the contract to the most recent net address for the host. contract.NetAddress = host.NetAddress // acquire revising lock c.mu.Lock() alreadyRevising := c.revising[contract.ID] if alreadyRevising { c.mu.Unlock() return nil, errors.New("already revising that contract") } c.revising[contract.ID] = true c.mu.Unlock() // release lock early if function returns an error defer func() { if err != nil { c.mu.Lock() delete(c.revising, contract.ID) c.mu.Unlock() } }() // Sanity check, unless this is a brand new contract, a cached revision // should exist. if build.DEBUG && contract.LastRevision.NewRevisionNumber > 1 { c.mu.RLock() _, exists := c.cachedRevisions[contract.ID] c.mu.RUnlock() if !exists { c.log.Critical("Cached revision does not exist for contract.") } } // create downloader d, err := proto.NewDownloader(host, contract, c.hdb, cancel) if proto.IsRevisionMismatch(err) { // try again with the cached revision c.mu.RLock() cached, ok := c.cachedRevisions[contract.ID] c.mu.RUnlock() if !ok { // nothing we can do; return original error c.log.Printf("wanted to recover contract %v with host %v, but no revision was cached", contract.ID, contract.NetAddress) return nil, err } c.log.Printf("host %v has different revision for %v; retrying with cached revision", contract.NetAddress, contract.ID) contract.LastRevision = cached.Revision d, err = proto.NewDownloader(host, contract, c.hdb, cancel) // needs to be handled separately since a revision mismatch is not automatically a failed interaction if proto.IsRevisionMismatch(err) { c.hdb.IncrementFailedInteractions(host.PublicKey) } } if err != nil { return nil, err } // supply a SaveFn that saves the revision to the contractor's persist // (the existing revision will be overwritten when SaveFn is called) d.SaveFn = c.saveDownloadRevision(contract.ID) // cache downloader hd := &hostDownloader{ clients: 1, contractID: contract.ID, contractor: c, downloader: d, hostSettings: host.HostExternalSettings, } c.mu.Lock() c.downloaders[contract.ID] = hd c.mu.Unlock() return hd, nil } Sia-1.3.0/modules/renter/contractor/editor.go000066400000000000000000000211051313565667000212040ustar00rootroot00000000000000package contractor import ( "errors" "sync" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/modules/renter/proto" "github.com/NebulousLabs/Sia/types" ) // the contractor will cap host's MaxCollateral setting to this value var maxUploadCollateral = types.SiacoinPrecision.Mul64(1e3).Div(modules.BlockBytesPerMonthTerabyte) // 1k SC / TB / Month var errInvalidEditor = errors.New("editor has been invalidated because its contract is being renewed") // An Editor modifies a Contract by communicating with a host. It uses the // contract revision protocol to send modification requests to the host. // Editors are the means by which the renter uploads data to hosts. type Editor interface { // Upload revises the underlying contract to store the new data. It // returns the Merkle root of the data. Upload(data []byte) (root crypto.Hash, err error) // Delete removes a sector from the underlying contract. Delete(crypto.Hash) error // Modify overwrites a sector with new data. Because the Editor does not // have access to the original sector data, the new Merkle root must be // supplied by the caller. Modify(oldRoot, newRoot crypto.Hash, offset uint64, newData []byte) error // Address returns the address of the host. Address() modules.NetAddress // ContractID returns the FileContractID of the contract. ContractID() types.FileContractID // EndHeight returns the height at which the contract ends. EndHeight() types.BlockHeight // Close terminates the connection to the host. Close() error } // A hostEditor modifies a Contract by calling the revise RPC on a host. It // implements the Editor interface. hostEditors are safe for use by // multiple goroutines. type hostEditor struct { clients int // safe to Close when 0 contract modules.RenterContract contractor *Contractor editor *proto.Editor invalid bool // true if invalidate has been called mu sync.Mutex } // invalidate sets the invalid flag and closes the underlying proto.Editor. // Once invalidate returns, the hostEditor is guaranteed to not further revise // its contract. This is used during contract renewal to prevent an Editor // from revising a contract mid-renewal. func (he *hostEditor) invalidate() { he.mu.Lock() defer he.mu.Unlock() if !he.invalid { he.editor.Close() he.invalid = true } he.contractor.mu.Lock() delete(he.contractor.editors, he.contract.ID) delete(he.contractor.revising, he.contract.ID) he.contractor.mu.Unlock() } // Address returns the NetAddress of the host. func (he *hostEditor) Address() modules.NetAddress { return he.contract.NetAddress } // ContractID returns the ID of the contract being revised. func (he *hostEditor) ContractID() types.FileContractID { return he.contract.ID } // EndHeight returns the height at which the host is no longer obligated to // store the file. func (he *hostEditor) EndHeight() types.BlockHeight { return he.contract.EndHeight() } // Close cleanly terminates the revision loop with the host and closes the // connection. func (he *hostEditor) Close() error { he.mu.Lock() defer he.mu.Unlock() he.clients-- // Close is a no-op if invalidate has been called, or if there are other // clients still using the hostEditor. if he.invalid || he.clients > 0 { return nil } he.invalid = true he.contractor.mu.Lock() delete(he.contractor.editors, he.contract.ID) delete(he.contractor.revising, he.contract.ID) he.contractor.mu.Unlock() return he.editor.Close() } // Upload negotiates a revision that adds a sector to a file contract. func (he *hostEditor) Upload(data []byte) (_ crypto.Hash, err error) { he.mu.Lock() defer he.mu.Unlock() if he.invalid { return crypto.Hash{}, errInvalidEditor } contract, sectorRoot, err := he.editor.Upload(data) if err != nil { return crypto.Hash{}, err } he.contractor.mu.Lock() he.contractor.contracts[contract.ID] = contract he.contractor.persist.update(updateUploadRevision{ NewRevisionTxn: contract.LastRevisionTxn, NewSectorRoot: sectorRoot, NewSectorIndex: len(contract.MerkleRoots) - 1, NewUploadSpending: contract.UploadSpending, NewStorageSpending: contract.StorageSpending, }) he.contractor.mu.Unlock() he.contract = contract return sectorRoot, nil } // Delete negotiates a revision that removes a sector from a file contract. func (he *hostEditor) Delete(root crypto.Hash) (err error) { he.mu.Lock() defer he.mu.Unlock() if he.invalid { return errInvalidEditor } contract, err := he.editor.Delete(root) if err != nil { return err } he.contractor.mu.Lock() he.contractor.contracts[contract.ID] = contract he.contractor.saveSync() he.contractor.mu.Unlock() he.contract = contract return nil } // Modify negotiates a revision that edits a sector in a file contract. func (he *hostEditor) Modify(oldRoot, newRoot crypto.Hash, offset uint64, newData []byte) (err error) { he.mu.Lock() defer he.mu.Unlock() if he.invalid { return errInvalidEditor } contract, err := he.editor.Modify(oldRoot, newRoot, offset, newData) if err != nil { return err } he.contractor.mu.Lock() he.contractor.contracts[contract.ID] = contract he.contractor.saveSync() he.contractor.mu.Unlock() he.contract = contract return nil } // Editor returns a Editor object that can be used to upload, modify, and // delete sectors on a host. func (c *Contractor) Editor(id types.FileContractID, cancel <-chan struct{}) (_ Editor, err error) { id = c.ResolveID(id) c.mu.RLock() cachedEditor, haveEditor := c.editors[id] height := c.blockHeight contract, haveContract := c.contracts[id] renewing := c.renewing[id] c.mu.RUnlock() if renewing { return nil, errors.New("currently renewing that contract") } if haveEditor { // increment number of clients and return cachedEditor.mu.Lock() cachedEditor.clients++ cachedEditor.mu.Unlock() return cachedEditor, nil } host, haveHost := c.hdb.Host(contract.HostPublicKey) if !haveContract { return nil, errors.New("no record of that contract") } else if height > contract.EndHeight() { return nil, errors.New("contract has already ended") } else if !haveHost { return nil, errors.New("no record of that host") } else if host.StoragePrice.Cmp(maxStoragePrice) > 0 { return nil, errTooExpensive } else if host.UploadBandwidthPrice.Cmp(maxUploadPrice) > 0 { return nil, errTooExpensive } else if build.VersionCmp(host.Version, "0.6.0") > 0 { // COMPATv0.6.0: don't cap host.Collateral on old hosts if host.Collateral.Cmp(maxUploadCollateral) > 0 { host.Collateral = maxUploadCollateral } } contract.NetAddress = host.NetAddress // acquire revising lock c.mu.Lock() alreadyRevising := c.revising[contract.ID] if alreadyRevising { c.mu.Unlock() return nil, errors.New("already revising that contract") } c.revising[contract.ID] = true c.mu.Unlock() // release lock early if function returns an error defer func() { if err != nil { c.mu.Lock() delete(c.revising, contract.ID) c.mu.Unlock() } }() // Sanity check, unless this is a brand new contract, a cached revision // should exist. if build.DEBUG && contract.LastRevision.NewRevisionNumber > 1 { c.mu.RLock() _, exists := c.cachedRevisions[contract.ID] c.mu.RUnlock() if !exists { c.log.Critical("Cached revision does not exist for contract.") } } // create editor e, err := proto.NewEditor(host, contract, height, c.hdb, cancel) if proto.IsRevisionMismatch(err) { // try again with the cached revision c.mu.RLock() cached, ok := c.cachedRevisions[contract.ID] c.mu.RUnlock() if !ok { // nothing we can do; return original error c.log.Printf("wanted to recover contract %v with host %v, but no revision was cached", contract.ID, contract.NetAddress) return nil, err } c.log.Printf("host %v has different revision for %v; retrying with cached revision", contract.NetAddress, contract.ID) contract.LastRevision = cached.Revision contract.MerkleRoots = cached.MerkleRoots e, err = proto.NewEditor(host, contract, height, c.hdb, cancel) // needs to be handled separately since a revision mismatch is not automatically a failed interaction if proto.IsRevisionMismatch(err) { c.hdb.IncrementFailedInteractions(host.PublicKey) } } if err != nil { return nil, err } // supply a SaveFn that saves the revision to the contractor's persist // (the existing revision will be overwritten when SaveFn is called) e.SaveFn = c.saveUploadRevision(contract.ID) // cache editor he := &hostEditor{ clients: 1, contract: contract, contractor: c, editor: e, } c.mu.Lock() c.editors[contract.ID] = he c.mu.Unlock() return he, nil } Sia-1.3.0/modules/renter/contractor/host_integration_test.go000066400000000000000000000570661313565667000243540ustar00rootroot00000000000000package contractor import ( "bytes" "errors" "os" "path/filepath" "testing" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/modules/consensus" "github.com/NebulousLabs/Sia/modules/gateway" "github.com/NebulousLabs/Sia/modules/host" "github.com/NebulousLabs/Sia/modules/miner" "github.com/NebulousLabs/Sia/modules/renter/hostdb" "github.com/NebulousLabs/Sia/modules/renter/proto" "github.com/NebulousLabs/Sia/modules/transactionpool" modWallet "github.com/NebulousLabs/Sia/modules/wallet" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/fastrand" ) // newTestingWallet is a helper function that creates a ready-to-use wallet // and mines some coins into it. func newTestingWallet(testdir string, cs modules.ConsensusSet, tp modules.TransactionPool) (modules.Wallet, error) { w, err := modWallet.New(cs, tp, filepath.Join(testdir, modules.WalletDir)) if err != nil { return nil, err } key := crypto.GenerateTwofishKey() if !w.Encrypted() { _, err = w.Encrypt(key) if err != nil { return nil, err } } err = w.Unlock(key) if err != nil { return nil, err } // give it some money m, err := miner.New(cs, tp, w, filepath.Join(testdir, modules.MinerDir)) if err != nil { return nil, err } for i := types.BlockHeight(0); i <= types.MaturityDelay; i++ { _, err := m.AddBlock() if err != nil { return nil, err } } return w, nil } // newTestingHost is a helper function that creates a ready-to-use host. func newTestingHost(testdir string, cs modules.ConsensusSet, tp modules.TransactionPool) (modules.Host, error) { w, err := newTestingWallet(testdir, cs, tp) if err != nil { return nil, err } h, err := host.New(cs, tp, w, "localhost:0", filepath.Join(testdir, modules.HostDir)) if err != nil { return nil, err } // configure host to accept contracts settings := h.InternalSettings() settings.AcceptingContracts = true err = h.SetInternalSettings(settings) if err != nil { return nil, err } // add storage to host storageFolder := filepath.Join(testdir, "storage") err = os.MkdirAll(storageFolder, 0700) if err != nil { return nil, err } err = h.AddStorageFolder(storageFolder, modules.SectorSize*64) if err != nil { return nil, err } return h, nil } // newTestingContractor is a helper function that creates a ready-to-use // contractor. func newTestingContractor(testdir string, g modules.Gateway, cs modules.ConsensusSet, tp modules.TransactionPool) (*Contractor, error) { w, err := newTestingWallet(testdir, cs, tp) if err != nil { return nil, err } hdb, err := hostdb.New(g, cs, filepath.Join(testdir, "hostdb")) if err != nil { return nil, err } return New(cs, w, tp, hdb, filepath.Join(testdir, "contractor")) } // newTestingTrio creates a Host, Contractor, and TestMiner that can be used // for testing host/renter interactions. func newTestingTrio(name string) (modules.Host, *Contractor, modules.TestMiner, error) { testdir := build.TempDir("contractor", name) // create miner g, err := gateway.New("localhost:0", false, filepath.Join(testdir, modules.GatewayDir)) if err != nil { return nil, nil, nil, err } cs, err := consensus.New(g, false, filepath.Join(testdir, modules.ConsensusDir)) if err != nil { return nil, nil, nil, err } tp, err := transactionpool.New(cs, g, filepath.Join(testdir, modules.TransactionPoolDir)) if err != nil { return nil, nil, nil, err } w, err := modWallet.New(cs, tp, filepath.Join(testdir, modules.WalletDir)) if err != nil { return nil, nil, nil, err } key := crypto.GenerateTwofishKey() if !w.Encrypted() { _, err = w.Encrypt(key) if err != nil { return nil, nil, nil, err } } err = w.Unlock(key) if err != nil { return nil, nil, nil, err } m, err := miner.New(cs, tp, w, filepath.Join(testdir, modules.MinerDir)) if err != nil { return nil, nil, nil, err } // create host and contractor, using same consensus set and gateway h, err := newTestingHost(filepath.Join(testdir, "Host"), cs, tp) if err != nil { return nil, nil, nil, build.ExtendErr("error creating testing host", err) } c, err := newTestingContractor(filepath.Join(testdir, "Contractor"), g, cs, tp) if err != nil { return nil, nil, nil, err } // announce the host err = h.Announce() if err != nil { return nil, nil, nil, build.ExtendErr("error announcing host", err) } // mine a block, processing the announcement m.AddBlock() // wait for hostdb to scan host for i := 0; i < 50 && len(c.hdb.ActiveHosts()) == 0; i++ { time.Sleep(time.Millisecond * 100) } if len(c.hdb.ActiveHosts()) == 0 { return nil, nil, nil, errors.New("host did not make it into the contractor hostdb in time") } return h, c, m, nil } // TestIntegrationFormContract tests that the contractor can form contracts // with the host module. func TestIntegrationFormContract(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() h, c, _, err := newTestingTrio(t.Name()) if err != nil { t.Fatal(err) } defer h.Close() defer c.Close() // get the host's entry from the db hostEntry, ok := c.hdb.Host(h.PublicKey()) if !ok { t.Fatal("no entry for host in db") } // form a contract with the host _, err = c.managedNewContract(hostEntry, 10, c.blockHeight+100) if err != nil { t.Fatal(err) } } // TestIntegrationReviseContract tests that the contractor can revise a // contract previously formed with a host. func TestIntegrationReviseContract(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() // create testing trio h, c, _, err := newTestingTrio(t.Name()) if err != nil { t.Fatal(err) } defer h.Close() defer c.Close() // get the host's entry from the db hostEntry, ok := c.hdb.Host(h.PublicKey()) if !ok { t.Fatal("no entry for host in db") } // form a contract with the host contract, err := c.managedNewContract(hostEntry, 10, c.blockHeight+100) if err != nil { t.Fatal(err) } c.mu.Lock() c.contracts[contract.ID] = contract c.mu.Unlock() // revise the contract editor, err := c.Editor(contract.ID, nil) if err != nil { t.Fatal(err) } data := fastrand.Bytes(int(modules.SectorSize)) _, err = editor.Upload(data) if err != nil { t.Fatal(err) } err = editor.Close() if err != nil { t.Fatal(err) } } // TestIntegrationUploadDownload tests that the contractor can upload data to // a host and download it intact. func TestIntegrationUploadDownload(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() // create testing trio h, c, _, err := newTestingTrio(t.Name()) if err != nil { t.Fatal(err) } defer h.Close() defer c.Close() // get the host's entry from the db hostEntry, ok := c.hdb.Host(h.PublicKey()) if !ok { t.Fatal("no entry for host in db") } // form a contract with the host contract, err := c.managedNewContract(hostEntry, 10, c.blockHeight+100) if err != nil { t.Fatal(err) } c.mu.Lock() c.contracts[contract.ID] = contract c.mu.Unlock() // revise the contract editor, err := c.Editor(contract.ID, nil) if err != nil { t.Fatal(err) } data := fastrand.Bytes(int(modules.SectorSize)) root, err := editor.Upload(data) if err != nil { t.Fatal(err) } err = editor.Close() if err != nil { t.Fatal(err) } // download the data downloader, err := c.Downloader(contract.ID, nil) if err != nil { t.Fatal(err) } retrieved, err := downloader.Sector(root) if err != nil { t.Fatal(err) } if !bytes.Equal(data, retrieved) { t.Fatal("downloaded data does not match original") } err = downloader.Close() if err != nil { t.Fatal(err) } } // TestIntegrationDelete tests that the contractor can delete a sector from a // contract previously formed with a host. func TestIntegrationDelete(t *testing.T) { t.Skip("deletion is deprecated") // create testing trio h, c, _, err := newTestingTrio(t.Name()) if err != nil { t.Fatal(err) } defer h.Close() defer c.Close() // get the host's entry from the db hostEntry, ok := c.hdb.Host(h.PublicKey()) if !ok { t.Fatal("no entry for host in db") } // form a contract with the host contract, err := c.managedNewContract(hostEntry, 10, c.blockHeight+100) if err != nil { t.Fatal(err) } c.mu.Lock() c.contracts[contract.ID] = contract c.mu.Unlock() // revise the contract editor, err := c.Editor(contract.ID, nil) if err != nil { t.Fatal(err) } data := fastrand.Bytes(int(modules.SectorSize)) _, err = editor.Upload(data) if err != nil { t.Fatal(err) } err = editor.Close() if err != nil { t.Fatal(err) } c.mu.Lock() contract = c.contracts[contract.ID] c.mu.Unlock() // delete the sector editor, err = c.Editor(contract.ID, nil) if err != nil { t.Fatal(err) } err = editor.Delete(contract.MerkleRoots[0]) if err != nil { t.Fatal(err) } err = editor.Close() if err != nil { t.Fatal(err) } } // TestIntegrationInsertDelete tests that the contractor can insert and delete // a sector during the same revision. func TestIntegrationInsertDelete(t *testing.T) { t.Skip("deletion is deprecated") // create testing trio h, c, _, err := newTestingTrio(t.Name()) if err != nil { t.Fatal(err) } defer h.Close() defer c.Close() // get the host's entry from the db hostEntry, ok := c.hdb.Host(h.PublicKey()) if !ok { t.Fatal("no entry for host in db") } // form a contract with the host contract, err := c.managedNewContract(hostEntry, 10, c.blockHeight+100) if err != nil { t.Fatal(err) } c.mu.Lock() c.contracts[contract.ID] = contract c.mu.Unlock() // revise the contract editor, err := c.Editor(contract.ID, nil) if err != nil { t.Fatal(err) } data := fastrand.Bytes(int(modules.SectorSize)) // insert the sector _, err = editor.Upload(data) if err != nil { t.Fatal(err) } // delete the sector err = editor.Delete(crypto.MerkleRoot(data)) if err != nil { t.Fatal(err) } err = editor.Close() if err != nil { t.Fatal(err) } // contract should have no sectors contract = c.contracts[contract.ID] if len(contract.MerkleRoots) != 0 { t.Fatal("contract should have no sectors:", contract.MerkleRoots) } } // TestIntegrationModify tests that the contractor can modify a previously- // uploaded sector. func TestIntegrationModify(t *testing.T) { t.Skip("modification is deprecated") // create testing trio h, c, _, err := newTestingTrio(t.Name()) if err != nil { t.Fatal(err) } defer h.Close() defer c.Close() // get the host's entry from the db hostEntry, ok := c.hdb.Host(h.PublicKey()) if !ok { t.Fatal("no entry for host in db") } // form a contract with the host contract, err := c.managedNewContract(hostEntry, 10, c.blockHeight+100) if err != nil { t.Fatal(err) } c.mu.Lock() c.contracts[contract.ID] = contract c.mu.Unlock() // revise the contract editor, err := c.Editor(contract.ID, nil) if err != nil { t.Fatal(err) } data := fastrand.Bytes(int(modules.SectorSize)) // insert the sector _, err = editor.Upload(data) if err != nil { t.Fatal(err) } err = editor.Close() if err != nil { t.Fatal(err) } // modify the sector oldRoot := crypto.MerkleRoot(data) offset, newData := uint64(10), []byte{1, 2, 3, 4, 5} copy(data[offset:], newData) newRoot := crypto.MerkleRoot(data) editor, err = c.Editor(contract.ID, nil) if err != nil { t.Fatal(err) } err = editor.Modify(oldRoot, newRoot, offset, newData) if err != nil { t.Fatal(err) } err = editor.Close() if err != nil { t.Fatal(err) } } // TestIntegrationRenew tests that the contractor can renew a previously- // formed file contract. func TestIntegrationRenew(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() // create testing trio h, c, _, err := newTestingTrio(t.Name()) if err != nil { t.Fatal(err) } defer h.Close() defer c.Close() // get the host's entry from the db hostEntry, ok := c.hdb.Host(h.PublicKey()) if !ok { t.Fatal("no entry for host in db") } // form a contract with the host contract, err := c.managedNewContract(hostEntry, 10, c.blockHeight+100) if err != nil { t.Fatal(err) } c.mu.Lock() c.contracts[contract.ID] = contract c.mu.Unlock() // revise the contract editor, err := c.Editor(contract.ID, nil) if err != nil { t.Fatal(err) } data := fastrand.Bytes(int(modules.SectorSize)) // insert the sector root, err := editor.Upload(data) if err != nil { t.Fatal(err) } err = editor.Close() if err != nil { t.Fatal(err) } // renew the contract oldContract := c.contracts[contract.ID] contract, err = c.managedRenew(oldContract, modules.SectorSize*10, c.blockHeight+200) if err != nil { t.Fatal(err) } c.mu.Lock() c.contracts[contract.ID] = contract c.mu.Unlock() // check renewed contract if contract.FileContract.FileMerkleRoot != root { t.Fatal(contract.FileContract.FileMerkleRoot) } else if contract.FileContract.FileSize != modules.SectorSize { t.Fatal(contract.FileContract.FileSize) } else if contract.FileContract.RevisionNumber != 0 { t.Fatal(contract.FileContract.RevisionNumber) } else if contract.FileContract.WindowStart != c.blockHeight+200 { t.Fatal(contract.FileContract.WindowStart) } // check that Merkle roots are intact if len(contract.MerkleRoots) != len(oldContract.MerkleRoots) { t.Fatal(len(contract.MerkleRoots), len(oldContract.MerkleRoots)) } // download the renewed contract downloader, err := c.Downloader(contract.ID, nil) if err != nil { t.Fatal(err) } retrieved, err := downloader.Sector(root) if err != nil { t.Fatal(err) } if !bytes.Equal(data, retrieved) { t.Fatal("downloaded data does not match original") } err = downloader.Close() if err != nil { t.Fatal(err) } // renew to a lower height oldContract = c.contracts[contract.ID] contract, err = c.managedRenew(oldContract, modules.SectorSize*10, c.blockHeight+100) if err != nil { t.Fatal(err) } if contract.FileContract.WindowStart != c.blockHeight+100 { t.Fatal(contract.FileContract.WindowStart) } // check that Merkle roots are intact if len(contract.MerkleRoots) != len(oldContract.MerkleRoots) { t.Fatal(len(contract.MerkleRoots), len(oldContract.MerkleRoots)) } c.mu.Lock() c.contracts[contract.ID] = contract c.mu.Unlock() // revise the contract editor, err = c.Editor(contract.ID, nil) if err != nil { t.Fatal(err) } data = fastrand.Bytes(int(modules.SectorSize)) // insert the sector _, err = editor.Upload(data) if err != nil { t.Fatal(err) } err = editor.Close() if err != nil { t.Fatal(err) } } // TestIntegrationResync tests that the contractor can resync with a host // after being interrupted during contract revision. func TestIntegrationResync(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() // create testing trio h, c, _, err := newTestingTrio(t.Name()) if err != nil { t.Fatal(err) } defer h.Close() defer c.Close() // get the host's entry from the db hostEntry, ok := c.hdb.Host(h.PublicKey()) if !ok { t.Fatal("no entry for host in db") } // form a contract with the host contract, err := c.managedNewContract(hostEntry, 10, c.blockHeight+100) if err != nil { t.Fatal(err) } c.mu.Lock() c.contracts[contract.ID] = contract c.mu.Unlock() // revise the contract editor, err := c.Editor(contract.ID, nil) if err != nil { t.Fatal(err) } data := fastrand.Bytes(int(modules.SectorSize)) root, err := editor.Upload(data) if err != nil { t.Fatal(err) } err = editor.Close() if err != nil { t.Fatal(err) } // download the data downloader, err := c.Downloader(contract.ID, nil) if err != nil { t.Fatal(err) } retrieved, err := downloader.Sector(root) if err != nil { t.Fatal(err) } if !bytes.Equal(data, retrieved) { t.Fatal("downloaded data does not match original") } err = downloader.Close() if err != nil { t.Fatal(err) } contract = c.contracts[contract.ID] // Add some corruption to the set of cached revisions. badContract := contract badContract.LastRevision.NewRevisionNumber-- badContract.LastRevisionTxn.TransactionSignatures = nil // delete signatures c.mu.Lock() cr := c.cachedRevisions[contract.ID] cr.Revision.NewRevisionNumber = 0 cr.Revision.NewRevisionNumber-- c.cachedRevisions[contract.ID] = cr c.contracts[badContract.ID] = badContract c.mu.Unlock() // Editor should fail with the bad contract _, err = c.Editor(badContract.ID, nil) if !proto.IsRevisionMismatch(err) { t.Fatal("expected revision mismatch, got", err) } // add cachedRevision cachedRev := cachedRevision{contract.LastRevision, contract.MerkleRoots} c.mu.Lock() c.cachedRevisions[contract.ID] = cachedRev c.mu.Unlock() // Editor and Downloader should now succeed after loading the cachedRevision editor, err = c.Editor(badContract.ID, nil) if err != nil { t.Fatal(err) } editor.Close() downloader, err = c.Downloader(badContract.ID, nil) if err != nil { t.Fatal(err) } downloader.Close() // Add some corruption to the set of cached revisions. badContract = contract badContract.LastRevision.NewRevisionNumber-- badContract.LastRevisionTxn.TransactionSignatures = nil // delete signatures c.mu.Lock() cr = c.cachedRevisions[contract.ID] cr.Revision.NewRevisionNumber = 0 cr.Revision.NewRevisionNumber-- c.cachedRevisions[contract.ID] = cr c.contracts[badContract.ID] = badContract c.mu.Unlock() // Editor should fail with the bad contract _, err = c.Editor(badContract.ID, nil) if !proto.IsRevisionMismatch(err) { t.Fatal("expected revision mismatch, got", err) } // add cachedRevision c.mu.Lock() c.cachedRevisions[contract.ID] = cachedRev c.mu.Unlock() // should be able to upload after loading the cachedRevision editor, err = c.Editor(badContract.ID, nil) if err != nil { t.Fatal(err) } _, err = editor.Upload(data) if err != nil { t.Fatal(err) } editor.Close() } // TestIntegrationDownloaderCaching tests that downloaders are properly cached // by the contractor. When two downloaders are requested for the same // contract, only one underlying downloader should be created. func TestIntegrationDownloaderCaching(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() // create testing trio h, c, _, err := newTestingTrio(t.Name()) if err != nil { t.Fatal(err) } defer h.Close() defer c.Close() // get the host's entry from the db hostEntry, ok := c.hdb.Host(h.PublicKey()) if !ok { t.Fatal("no entry for host in db") } // form a contract with the host contract, err := c.managedNewContract(hostEntry, 10, c.blockHeight+100) if err != nil { t.Fatal(err) } c.mu.Lock() c.contracts[contract.ID] = contract c.mu.Unlock() // create a downloader d1, err := c.Downloader(contract.ID, nil) if err != nil { t.Fatal(err) } // create another downloader d2, err := c.Downloader(contract.ID, nil) if err != nil { t.Fatal(err) } // downloaders should match if d1 != d2 { t.Fatal("downloader was not cached") } // close one of the downloaders; it should not fully close, since d1 is // still using it d2.Close() c.mu.RLock() _, ok = c.downloaders[contract.ID] c.mu.RUnlock() if !ok { t.Fatal("expected downloader to still be present") } // create another downloader d3, err := c.Downloader(contract.ID, nil) if err != nil { t.Fatal(err) } // downloaders should match if d3 != d1 { t.Fatal("closing one client should not fully close the downloader") } // close both downloaders d1.Close() d2.Close() c.mu.RLock() _, ok = c.downloaders[contract.ID] c.mu.RUnlock() if ok { t.Fatal("did not expect downloader to still be present") } // create another downloader d4, err := c.Downloader(contract.ID, nil) if err != nil { t.Fatal(err) } // downloaders should match if d4 == d1 { t.Fatal("downloader should not have been cached after all clients were closed") } d4.Close() } // TestIntegrationEditorCaching tests that editors are properly cached // by the contractor. When two editors are requested for the same // contract, only one underlying editor should be created. func TestIntegrationEditorCaching(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() // create testing trio h, c, _, err := newTestingTrio(t.Name()) if err != nil { t.Fatal(err) } defer h.Close() defer c.Close() // get the host's entry from the db hostEntry, ok := c.hdb.Host(h.PublicKey()) if !ok { t.Fatal("no entry for host in db") } // form a contract with the host contract, err := c.managedNewContract(hostEntry, 10, c.blockHeight+100) if err != nil { t.Fatal(err) } c.mu.Lock() c.contracts[contract.ID] = contract c.mu.Unlock() // create an editor d1, err := c.Editor(contract.ID, nil) if err != nil { t.Fatal(err) } // create another editor d2, err := c.Editor(contract.ID, nil) if err != nil { t.Fatal(err) } // editors should match if d1 != d2 { t.Fatal("editor was not cached") } // close one of the editors; it should not fully close, since d1 is // still using it d2.Close() c.mu.RLock() _, ok = c.editors[contract.ID] c.mu.RUnlock() if !ok { t.Fatal("expected editor to still be present") } // create another editor d3, err := c.Editor(contract.ID, nil) if err != nil { t.Fatal(err) } // editors should match if d3 != d1 { t.Fatal("closing one client should not fully close the editor") } // close both editors d1.Close() d2.Close() c.mu.RLock() _, ok = c.editors[contract.ID] c.mu.RUnlock() if ok { t.Fatal("did not expect editor to still be present") } // create another editor d4, err := c.Editor(contract.ID, nil) if err != nil { t.Fatal(err) } // editors should match if d4 == d1 { t.Fatal("editor should not have been cached after all clients were closed") } d4.Close() } // TestIntegrationCachedRenew tests that the contractor can renew with a host // after being interrupted during contract revision. func TestIntegrationCachedRenew(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() // create testing trio h, c, _, err := newTestingTrio(t.Name()) if err != nil { t.Fatal(err) } defer h.Close() defer c.Close() // get the host's entry from the db hostEntry, ok := c.hdb.Host(h.PublicKey()) if !ok { t.Fatal("no entry for host in db") } // form a contract with the host contract, err := c.managedNewContract(hostEntry, 10, c.blockHeight+100) if err != nil { t.Fatal(err) } c.mu.Lock() c.contracts[contract.ID] = contract c.mu.Unlock() // revise the contract editor, err := c.Editor(contract.ID, nil) if err != nil { t.Fatal(err) } data := fastrand.Bytes(int(modules.SectorSize)) root, err := editor.Upload(data) if err != nil { t.Fatal(err) } err = editor.Close() if err != nil { t.Fatal(err) } // download the data downloader, err := c.Downloader(contract.ID, nil) if err != nil { t.Fatal(err) } retrieved, err := downloader.Sector(root) if err != nil { t.Fatal(err) } if !bytes.Equal(data, retrieved) { t.Fatal("downloaded data does not match original") } err = downloader.Close() if err != nil { t.Fatal(err) } contract = c.contracts[contract.ID] // corrupt the contract and cachedRevision badContract := contract badContract.LastRevision.NewRevisionNumber-- badContract.LastRevisionTxn.TransactionSignatures = nil // delete signatures c.mu.Lock() cr := c.cachedRevisions[contract.ID] cr.Revision.NewRevisionNumber = 0 cr.Revision.NewRevisionNumber-- c.cachedRevisions[contract.ID] = cr c.contracts[badContract.ID] = badContract c.mu.Unlock() // Renew should fail with the bad contract + cachedRevision _, err = c.managedRenew(badContract, 20, c.blockHeight+200) if !proto.IsRevisionMismatch(err) { t.Fatal("expected revision mismatch, got", err) } // add cachedRevision cachedRev := cachedRevision{contract.LastRevision, contract.MerkleRoots} c.mu.Lock() c.cachedRevisions[contract.ID] = cachedRev c.mu.Unlock() // Renew should now succeed after loading the cachedRevision _, err = c.managedRenew(badContract, 20, c.blockHeight+200) if err != nil { t.Fatal(err) } } Sia-1.3.0/modules/renter/contractor/journal.go000066400000000000000000000271461313565667000214030ustar00rootroot00000000000000package contractor // The contractor achieves efficient persistence using a JSON transaction // journal. It enables efficient ACID transactions on JSON objects. // // The journal represents a single JSON object, containing all of the // contractor's persisted data. The object is serialized as an "initial // object" followed by a series of update sets, one per line. Each update // specifies a modification. // // During operation, the object is first loaded by reading the file and // applying each update to the initial object. It is subsequently modified by // appending update sets to the file, one per line. At any time, a // "checkpoint" may be created, which clears the journal and starts over with // a new initial object. This allows for compaction of the journal file. // // In the event of power failure or other serious disruption, the most recent // update set may be only partially written. Partially written update sets are // simply ignored when reading the journal. import ( "encoding/json" "errors" "fmt" "io" "os" "reflect" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/persist" "github.com/NebulousLabs/Sia/types" ) var journalMeta = persist.Metadata{ Header: "Contractor Journal", Version: "1.1.1", } // A journal is a log of updates to a JSON object. type journal struct { f *os.File filename string } // update applies the updateSet atomically to j. It syncs the underlying file // before returning. func (j *journal) update(us updateSet) error { if err := json.NewEncoder(j.f).Encode(us); err != nil { return err } return j.f.Sync() } // Checkpoint refreshes the journal with a new initial object. It syncs the // underlying file before returning. func (j *journal) checkpoint(data contractorPersist) error { if build.DEBUG { // Sanity check - applying the updates to the initial object should // result in a contractorPersist that matches data. var data2 contractorPersist j2, err := openJournal(j.filename, &data2) if err != nil { panic("could not open journal for sanity check: " + err.Error()) } for id, c := range data.CachedRevisions { if c2, ok := data2.CachedRevisions[id]; !ok { continue } else if !reflect.DeepEqual(c.Revision, c2.Revision) { panic("CachedRevision Revisions mismatch: " + fmt.Sprint(c.Revision, c2.Revision)) } else if !reflect.DeepEqual(c.MerkleRoots, c2.MerkleRoots) && (c.MerkleRoots == nil) == (c2.MerkleRoots == nil) { panic("CachedRevision Merkle roots mismatch: " + fmt.Sprint(len(c.MerkleRoots), len(c2.MerkleRoots))) } } for id, c := range data.Contracts { if c2, ok := data2.Contracts[id]; !ok { continue } else if !reflect.DeepEqual(c.LastRevisionTxn, c2.LastRevisionTxn) { panic("Contract Txns mismatch: " + fmt.Sprint(c.LastRevisionTxn, c2.LastRevisionTxn)) } else if !reflect.DeepEqual(c.MerkleRoots, c2.MerkleRoots) && (c.MerkleRoots == nil) == (c2.MerkleRoots == nil) { panic("Contract Merkle roots mismatch: " + fmt.Sprint(len(c.MerkleRoots), len(c2.MerkleRoots))) } } j2.Close() } // Write to a new temp file. tmp, err := os.Create(j.filename + "_tmp") if err != nil { return err } enc := json.NewEncoder(tmp) if err := enc.Encode(journalMeta); err != nil { return err } if err := enc.Encode(data); err != nil { return err } if err := tmp.Sync(); err != nil { return err } // Atomically replace the old file with the new one. if err := tmp.Close(); err != nil { return err } if err := j.f.Close(); err != nil { return err } if err := os.Rename(tmp.Name(), j.filename); err != nil { return err } // Reopen the journal. j.f, err = os.OpenFile(j.filename, os.O_RDWR|os.O_APPEND, 0) return err } // Close closes the underlying file. func (j *journal) Close() error { return j.f.Close() } // newJournal creates a new journal, using data as the initial object. func newJournal(filename string, data contractorPersist) (*journal, error) { // safely create the journal f, err := os.Create(filename) if err != nil { return nil, err } enc := json.NewEncoder(f) if err := enc.Encode(journalMeta); err != nil { return nil, err } if err := enc.Encode(data); err != nil { return nil, err } if err := f.Sync(); err != nil { return nil, err } return &journal{f: f, filename: filename}, nil } // openJournal opens the supplied journal and decodes the reconstructed // contractorPersist into data. func openJournal(filename string, data *contractorPersist) (*journal, error) { // Open file handle for reading and writing. f, err := os.OpenFile(filename, os.O_RDWR, 0) if err != nil { return nil, err } // Decode the metadata. dec := json.NewDecoder(f) var meta persist.Metadata if err = dec.Decode(&meta); err != nil { return nil, err } else if meta.Header != journalMeta.Header { return nil, fmt.Errorf("expected header %q, got %q", journalMeta.Header, meta.Header) } else if meta.Version != journalMeta.Version { return nil, fmt.Errorf("journal version (%s) is incompatible with the current version (%s)", meta.Version, journalMeta.Version) } // Decode the initial object. if err = dec.Decode(data); err != nil { return nil, err } // Make sure all maps are properly initialized. if data.CachedRevisions == nil { data.CachedRevisions = map[string]cachedRevision{} } if data.Contracts == nil { data.Contracts = map[string]modules.RenterContract{} } if data.RenewedIDs == nil { data.RenewedIDs = map[string]string{} } // Decode each set of updates and apply them to data. for { var set updateSet if err = dec.Decode(&set); err == io.EOF || err == io.ErrUnexpectedEOF { // unexpected EOF means the last update was corrupted; skip it break } else if err != nil { // skip corrupted update sets continue } for _, u := range set { u.apply(data) } } return &journal{ f: f, filename: filename, }, nil } type journalUpdate interface { apply(*contractorPersist) } type marshaledUpdate struct { Type string `json:"type"` Data json.RawMessage `json:"data"` Checksum crypto.Hash `json:"checksum"` } type updateSet []journalUpdate // MarshalJSON marshals a set of journalUpdates as an array of // marshaledUpdates. func (set updateSet) MarshalJSON() ([]byte, error) { marshaledSet := make([]marshaledUpdate, len(set)) for i, u := range set { data, err := json.Marshal(u) if err != nil { build.Critical("failed to marshal known type:", err) } marshaledSet[i].Data = data marshaledSet[i].Checksum = crypto.HashBytes(data) switch u.(type) { case updateUploadRevision: marshaledSet[i].Type = "uploadRevision" case updateDownloadRevision: marshaledSet[i].Type = "downloadRevision" case updateCachedUploadRevision: marshaledSet[i].Type = "cachedUploadRevision" case updateCachedDownloadRevision: marshaledSet[i].Type = "cachedDownloadRevision" } } return json.Marshal(marshaledSet) } // UnmarshalJSON unmarshals an array of marshaledUpdates as a set of // journalUpdates. func (set *updateSet) UnmarshalJSON(b []byte) error { var marshaledSet []marshaledUpdate if err := json.Unmarshal(b, &marshaledSet); err != nil { return err } for _, u := range marshaledSet { if crypto.HashBytes(u.Data) != u.Checksum { return errors.New("bad checksum") } var err error switch u.Type { case "uploadRevision": var ur updateUploadRevision err = json.Unmarshal(u.Data, &ur) *set = append(*set, ur) case "downloadRevision": var dr updateDownloadRevision err = json.Unmarshal(u.Data, &dr) *set = append(*set, dr) case "cachedUploadRevision": var cur updateCachedUploadRevision err = json.Unmarshal(u.Data, &cur) *set = append(*set, cur) case "cachedDownloadRevision": var cdr updateCachedDownloadRevision err = json.Unmarshal(u.Data, &cdr) *set = append(*set, cdr) } if err != nil { return err } } return nil } // updateUploadRevision is a journalUpdate that records the new data // associated with uploading a sector to a host. type updateUploadRevision struct { NewRevisionTxn types.Transaction `json:"newrevisiontxn"` NewSectorRoot crypto.Hash `json:"newsectorroot"` NewSectorIndex int `json:"newsectorindex"` NewUploadSpending types.Currency `json:"newuploadspending"` NewStorageSpending types.Currency `json:"newstoragespending"` } // apply sets the LastRevision, LastRevisionTxn, UploadSpending, and // DownloadSpending fields of the contract being revised. It also adds the new // Merkle root to the contract's Merkle root set. func (u updateUploadRevision) apply(data *contractorPersist) { if len(u.NewRevisionTxn.FileContractRevisions) == 0 { build.Critical("updateUploadRevision is missing its FileContractRevision") return } rev := u.NewRevisionTxn.FileContractRevisions[0] c := data.Contracts[rev.ParentID.String()] c.LastRevisionTxn = u.NewRevisionTxn c.LastRevision = rev if u.NewSectorIndex == len(c.MerkleRoots) { c.MerkleRoots = append(c.MerkleRoots, u.NewSectorRoot) } else if u.NewSectorIndex < len(c.MerkleRoots) { c.MerkleRoots[u.NewSectorIndex] = u.NewSectorRoot } else { // Shouldn't happen. TODO: Correctly handle error. } c.UploadSpending = u.NewUploadSpending c.StorageSpending = u.NewStorageSpending data.Contracts[rev.ParentID.String()] = c } // updateUploadRevision is a journalUpdate that records the new data // associated with downloading a sector from a host. type updateDownloadRevision struct { NewRevisionTxn types.Transaction `json:"newrevisiontxn"` NewDownloadSpending types.Currency `json:"newdownloadspending"` } // apply sets the LastRevision, LastRevisionTxn, and DownloadSpending fields // of the contract being revised. func (u updateDownloadRevision) apply(data *contractorPersist) { if len(u.NewRevisionTxn.FileContractRevisions) == 0 { build.Critical("updateDownloadRevision is missing its FileContractRevision") return } rev := u.NewRevisionTxn.FileContractRevisions[0] c := data.Contracts[rev.ParentID.String()] c.LastRevisionTxn = u.NewRevisionTxn c.LastRevision = rev c.DownloadSpending = u.NewDownloadSpending data.Contracts[rev.ParentID.String()] = c } // updateCachedUploadRevision is a journalUpdate that records the unsigned // revision sent to the host during a sector upload, along with the Merkle // root of the new sector. type updateCachedUploadRevision struct { Revision types.FileContractRevision `json:"revision"` SectorRoot crypto.Hash `json:"sectorroot"` SectorIndex int `json:"sectorindex"` } // apply sets the Revision field of the cachedRevision associated with the // contract being revised, as well as the Merkle root of the new sector. func (u updateCachedUploadRevision) apply(data *contractorPersist) { c := data.CachedRevisions[u.Revision.ParentID.String()] c.Revision = u.Revision if u.SectorIndex == len(c.MerkleRoots) { c.MerkleRoots = append(c.MerkleRoots, u.SectorRoot) } else if u.SectorIndex < len(c.MerkleRoots) { c.MerkleRoots[u.SectorIndex] = u.SectorRoot } else { // Shouldn't happen. TODO: Add correct error handling. } data.CachedRevisions[u.Revision.ParentID.String()] = c } // updateCachedDownloadRevision is a journalUpdate that records the unsigned // revision sent to the host during a sector download. type updateCachedDownloadRevision struct { Revision types.FileContractRevision `json:"revision"` } // apply sets the Revision field of the cachedRevision associated with the // contract being revised. func (u updateCachedDownloadRevision) apply(data *contractorPersist) { c := data.CachedRevisions[u.Revision.ParentID.String()] c.Revision = u.Revision data.CachedRevisions[u.Revision.ParentID.String()] = c } Sia-1.3.0/modules/renter/contractor/journal_test.go000066400000000000000000000315041313565667000224330ustar00rootroot00000000000000package contractor import ( "bytes" "encoding/json" "io/ioutil" "os" "path/filepath" "reflect" "testing" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/types" ) func tempFile(t interface { Name() string Fatal(...interface{}) }) (*os.File, func()) { f, err := os.Create(filepath.Join(build.TempDir("contractor", t.Name()))) if err != nil { t.Fatal(err) } return f, func() { f.Close() os.RemoveAll(f.Name()) } } func tempJournal(t interface { Name() string Fatal(...interface{}) }) (*journal, func()) { j, err := newJournal(filepath.Join(build.TempDir("contractor", t.Name())), contractorPersist{}) if err != nil { t.Fatal(err) } return j, func() { j.Close() os.RemoveAll(j.filename) } } func TestJournal(t *testing.T) { j, cleanup := tempJournal(t) defer cleanup() us := []journalUpdate{ updateCachedDownloadRevision{Revision: types.FileContractRevision{}}, } if err := j.update(us); err != nil { t.Fatal(err) } if err := j.Close(); err != nil { t.Fatal(err) } var data contractorPersist j2, err := openJournal(j.filename, &data) if err != nil { t.Fatal(err) } j2.Close() if len(data.CachedRevisions) != 1 { t.Fatal("openJournal applied updates incorrectly:", data) } } func TestJournalCheckpoint(t *testing.T) { j, cleanup := tempJournal(t) defer cleanup() var data contractorPersist data.BlockHeight = 777 if err := j.checkpoint(data); err != nil { t.Fatal(err) } if err := j.Close(); err != nil { t.Fatal(err) } data.BlockHeight = 0 j2, err := openJournal(j.filename, &data) if err != nil { t.Fatal(err) } j2.Close() if data.BlockHeight != 777 { t.Fatal("checkpoint failed:", data) } } func TestJournalMalformedJSON(t *testing.T) { j, cleanup := tempJournal(t) defer cleanup() // write a valid update err := j.update(updateSet{updateCachedDownloadRevision{}}) if err != nil { t.Fatal(err) } // write a partially-malformed update j.f.WriteString(`[{"t":"cachedDownloadRevision","d":{"revision":{"parentid":"1000000000000000000000000000000000000000000000000000000000000000"`) // load log var data contractorPersist j, err = openJournal(j.filename, &data) if err != nil { t.Fatal(err) } j.Close() // the last update set should have been discarded if _, ok := data.CachedRevisions[crypto.Hash{}.String()]; !ok { t.Fatal("log was not applied correctly:", data.CachedRevisions) } } func TestJournalBadChecksum(t *testing.T) { // test bad checksum j, cleanup := tempJournal(t) defer cleanup() // write a valid update err := j.update(updateSet{updateCachedDownloadRevision{}}) if err != nil { t.Fatal(err) } // write an update with a bad checksum j.f.WriteString(`[{"t":"cachedDownloadRevision","d":{"revision":{"parentid":"2000000000000000000000000000000000000000000000000000000000000000"}},"c":"bad checksum"}]`) // load log var data contractorPersist j, err = openJournal(j.filename, &data) if err != nil { t.Fatal(err) } j.Close() // the last update set should have been discarded if _, ok := data.CachedRevisions[crypto.Hash{}.String()]; !ok { t.Fatal("log was not applied correctly:", data) } } // TestJournalLoadCompat tests that the contractor can convert the previous // persist file to a journal. func TestJournalLoadCompat(t *testing.T) { // create old persist file dir := build.TempDir("contractor", t.Name()) os.MkdirAll(dir, 0700) err := ioutil.WriteFile(filepath.Join(dir, "contractor.json"), []byte(`"Contractor Persistence" "0.5.2" { "Allowance": { "funds": "5000000000000000000000000000", "hosts": 42, "period": 12960, "renewwindow": 6480 }, "BlockHeight": 92885, "CachedRevisions": [ { "Revision": { "parentid": "85a32f6ca706298407668718703b005ab1a558694eab69a2cc33e1bc0d3fb38f", "unlockconditions": { "publickeys": [ {"algorithm": "ed25519", "key": "sU1bxlHat5zgjlAqI7UfVPGKFQp3FpcPzGWa6K9ARfk="}, {"algorithm": "ed25519", "key": "pFrpZJEoH8dF+wQMLwZ6f8N2ghYzXjSCkotoJ0vgAjo="} ], "signaturesrequired": 2 }, "newrevisionnumber": 205, "newfilesize": 792723456, "newfilemerkleroot": "1a6c3bae8f95b168188fcd86461e4b8830a0af5f895b401856e144e6ac833d4d", "newwindowstart": 101748, "newwindowend": 101892, "newvalidproofoutputs": [ {"value": "912571784802684347584", "unlockhash": "f23541eb5c5de647b56c708e1b4972d0770e802f562c521a5f3e6613bb890999fd6a72c3c4cd" }, {"value": "54974203903776165095014400", "unlockhash": "e9d49e22328ba38ecc4969cb21f777915ff9828a79438aa408a668434c557d0c119d7d02e798" } ], "newmissedproofoutputs": [ {"value": "912571784802684347584", "unlockhash": "f23541eb5c5de647b56c708e1b4972d0770e802f562c521a5f3e6613bb890999fd6a72c3c4cd"}, {"value": "53776524850119119131978612", "unlockhash": "e9d49e22328ba38ecc4969cb21f777915ff9828a79438aa408a668434c557d0c119d7d02e798"}, {"value": "1197679053657045963035788", "unlockhash": "000000000000000000000000000000000000000000000000000000000000000089eb0d6a8a69"} ], "newunlockhash": "3aa8c31a63d67d0671d924df556a6214057c9fa611fa5607b1bf5d1ec3e861b0cc78fc4e3914" }, "MerkleRoots": [ "d3c27e3e361f7ff8fbb7aedcb8b24b0613d7e413fc9d7edd8ebbbf9911134487", "5bc124f5dcadee196611252eec599096b5146642b6785cac3c0625ce472d863a", "ff3bf7ccbc092ce4b851b76587fa3e9decff3f8421d49d6e72069d6cfc46f382" ] } ], "Contracts": [ { "filecontract": { "filesize": 0, "filemerkleroot": "0000000000000000000000000000000000000000000000000000000000000000", "windowstart": 101748, "windowend": 101892, "payout": "1601052162572897650533988302", "validproofoutputs": [ {"value": "1258611128232554642163168302", "unlockhash": "9a712eceba9f0523522ff9f5687ef6a54e5299d27c632044cd20f207e809fcb306f8373fe12b"}, {"value": "280000000000000000000000000", "unlockhash": "f879ab09edd4b3650aed02ce6226d4f6a197409d42be84c310a5e86657879a85d9575dc51a0d"} ], "missedproofoutputs": [ {"value": "1258611128232554642163168302", "unlockhash": "9a712eceba9f0523522ff9f5687ef6a54e5299d27c632044cd20f207e809fcb306f8373fe12b"}, {"value": "280000000000000000000000000", "unlockhash": "f879ab09edd4b3650aed02ce6226d4f6a197409d42be84c310a5e86657879a85d9575dc51a0d"}, {"value": "0", "unlockhash": "000000000000000000000000000000000000000000000000000000000000000089eb0d6a8a69"} ], "unlockhash": "4440e7ad4a744a1745797f9180b08aeeb1aa42c3c12729ed063f6ef4897f2cf7599a9efe4059", "revisionnumber": 0 }, "id": "87893a702b4af71151a853229f7dd4071929b24b4bf1c39bafec551daeaf11de", "lastrevision": { "parentid": "87893a702b4af71151a853229f7dd4071929b24b4bf1c39bafec551daeaf11de", "unlockconditions": { "timelock": 0, "publickeys": [ {"algorithm": "ed25519", "key": "ux0dwMoOTt2Q+VlmSy3G59nIn5kwWPrZMUKFphJgIGM="}, {"algorithm": "ed25519", "key": "5rgAREJJuMrmHfS3vWV0TN2Y8cHZf8UU2CM8BBFX5q4="} ], "signaturesrequired": 2 }, "newrevisionnumber": 117, "newfilesize": 465567744, "newfilemerkleroot": "449af205a10e645324c9016062e843856538122e4044e18b6e93aaab960cd8e6", "newwindowstart": 101748, "newwindowend": 101892, "newvalidproofoutputs": [ {"value": "1232516258766241909102140813", "unlockhash": "9a712eceba9f0523522ff9f5687ef6a54e5299d27c632044cd20f207e809fcb306f8373fe12b"}, {"value": "306094869466312733061027489", "unlockhash": "f879ab09edd4b3650aed02ce6226d4f6a197409d42be84c310a5e86657879a85d9575dc51a0d"} ], "newmissedproofoutputs": [ {"value": "1232516258766241909102140813", "unlockhash": "9a712eceba9f0523522ff9f5687ef6a54e5299d27c632044cd20f207e809fcb306f8373fe12b"}, {"value": "278593489793967488506695049", "unlockhash": "f879ab09edd4b3650aed02ce6226d4f6a197409d42be84c310a5e86657879a85d9575dc51a0d"}, {"value": "27501379672345244554332440", "unlockhash": "000000000000000000000000000000000000000000000000000000000000000089eb0d6a8a69"} ], "newunlockhash": "4440e7ad4a744a1745797f9180b08aeeb1aa42c3c12729ed063f6ef4897f2cf7599a9efe4059" }, "lastrevisiontxn": { "filecontractrevisions": [ { "parentid": "87893a702b4af71151a853229f7dd4071929b24b4bf1c39bafec551daeaf11de", "unlockconditions": { "timelock": 0, "publickeys": [ {"algorithm": "ed25519", "key": "ux0dwMoOTt2Q+VlmSy3G59nIn5kwWPrZMUKFphJgIGM="}, {"algorithm": "ed25519", "key": "5rgAREJJuMrmHfS3vWV0TN2Y8cHZf8UU2CM8BBFX5q4="} ], "signaturesrequired": 2 }, "newrevisionnumber": 117, "newfilesize": 465567744, "newfilemerkleroot": "449af205a10e645324c9016062e843856538122e4044e18b6e93aaab960cd8e6", "newwindowstart": 101748, "newwindowend": 101892, "newvalidproofoutputs": [ {"value": "1232516258766241909102140813", "unlockhash": "9a712eceba9f0523522ff9f5687ef6a54e5299d27c632044cd20f207e809fcb306f8373fe12b"}, {"value": "306094869466312733061027489", "unlockhash": "f879ab09edd4b3650aed02ce6226d4f6a197409d42be84c310a5e86657879a85d9575dc51a0d"} ], "newmissedproofoutputs": [ {"value": "1232516258766241909102140813", "unlockhash": "9a712eceba9f0523522ff9f5687ef6a54e5299d27c632044cd20f207e809fcb306f8373fe12b"}, {"value": "278593489793967488506695049", "unlockhash": "f879ab09edd4b3650aed02ce6226d4f6a197409d42be84c310a5e86657879a85d9575dc51a0d"}, {"value": "27501379672345244554332440", "unlockhash": "000000000000000000000000000000000000000000000000000000000000000089eb0d6a8a69"} ], "newunlockhash": "4440e7ad4a744a1745797f9180b08aeeb1aa42c3c12729ed063f6ef4897f2cf7599a9efe4059" } ], "transactionsignatures": [ { "parentid": "87893a702b4af71151a853229f7dd4071929b24b4bf1c39bafec551daeaf11de", "publickeyindex": 0, "coveredfields": { "wholetransaction": false, "filecontractrevisions": [0] }, "signature": "zs1T+NO5sFR6jVgilYXxJx33gPhd4Y7KRjpsKAG4EFZ7cthgBidXIDkTbOknk8P9Al7bDj1Dq6PMt+Mgvb+tBg==" }, { "parentid": "87893a702b4af71151a853229f7dd4071929b24b4bf1c39bafec551daeaf11de", "publickeyindex": 1, "timelock": 0, "coveredfields": { "wholetransaction": false, "filecontractrevisions": [0] }, "signature": "5jxxxqSaKF/KXNT8oWHiesiHl6l+GHH+zDCSxe3UsQJS+LyB+NY6k+AoQ+7l8ysA5rt/MXt08Gh+iFc95StJCQ==" } ] }, "merkleroots": [ "5d8c2b8ecb23b0cbbb842f236bca90f0f9a684c0d49e5008fa356a3c75d83764", "35e9e31000bdfc6adf1eddbe13d2e584bc274f803f03b23bbf1ac3b3334b7335", "9f6b52ff2b68da078648f073e119d030a69137020792bb6ba601590aead4ab76", "c327be1fc31360c40f6ed5cd729354f20c820f31970a1093cadd914ab55bfed9", "749df474d6ff4c306f8ca8695af352e3596724a286171f495097599b6d6bda61" ], "netaddress": "88.196.244.208:5982", "secretkey": [0,0,0,0,0], "startheight": 88793, "downloadspending": "83886080000000000000000", "storagespending": "25189293437743169757705777", "uploadspending": "462296186880000000819500", "totalcost": "1351052162572897650533988302", "contractfee": "30000000000000000000000000", "txnfee": "10240000000000000000000000", "siafundfee": "62441034340343008370820000" } ], "CurrentPeriod": 88788, "LastChange": [194,19,235,129,22,141,244,238,202,1,240,253,223,37,173,182,252,119,197,154,77,226,137,98,242,231,164,201,34,102,96,194], "OldContracts": null, "RenewedIDs": {} } `), 0666) if err != nil { t.Fatal(err) } // load will fail to load journal, fall back to loading contractor.json, // and save data as a new journal p := newPersist(dir) var data contractorPersist err = p.load(&data) if err != nil { t.Fatal(err) } defer p.Close() // second load should find the journal var data2 contractorPersist p = newPersist(dir) err = p.load(&data2) if err != nil { t.Fatal(err) } defer p.Close() if !reflect.DeepEqual(data, data2) { t.Fatal("data mismatch after loading old persist:", data, data2) } } func BenchmarkUpdateJournal(b *testing.B) { j, cleanup := tempJournal(b) defer cleanup() us := updateSet{ updateCachedUploadRevision{ Revision: types.FileContractRevision{ NewValidProofOutputs: []types.SiacoinOutput{{}, {}}, NewMissedProofOutputs: []types.SiacoinOutput{{}, {}}, UnlockConditions: types.UnlockConditions{PublicKeys: []types.SiaPublicKey{{}, {}}}, }, }, updateUploadRevision{ NewRevisionTxn: types.Transaction{ FileContractRevisions: []types.FileContractRevision{{ NewValidProofOutputs: []types.SiacoinOutput{{}, {}}, NewMissedProofOutputs: []types.SiacoinOutput{{}, {}}, UnlockConditions: types.UnlockConditions{PublicKeys: []types.SiaPublicKey{{}, {}}}, }}, TransactionSignatures: []types.TransactionSignature{{}, {}}, }, NewUploadSpending: types.SiacoinPrecision, NewStorageSpending: types.SiacoinPrecision, }, } var buf bytes.Buffer json.NewEncoder(&buf).Encode(us) b.SetBytes(int64(buf.Len())) b.ResetTimer() for i := 0; i < b.N; i++ { if err := j.update(us); err != nil { b.Fatal(err) } } } Sia-1.3.0/modules/renter/contractor/negotiate_test.go000066400000000000000000000151171313565667000227420ustar00rootroot00000000000000package contractor import ( "path/filepath" "testing" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/modules/consensus" "github.com/NebulousLabs/Sia/modules/gateway" "github.com/NebulousLabs/Sia/modules/miner" "github.com/NebulousLabs/Sia/modules/renter/hostdb" "github.com/NebulousLabs/Sia/modules/transactionpool" modWallet "github.com/NebulousLabs/Sia/modules/wallet" // name conflicts with type "github.com/NebulousLabs/Sia/types" ) // contractorTester contains all of the modules that are used while testing the contractor. type contractorTester struct { cs modules.ConsensusSet gateway modules.Gateway miner modules.TestMiner tpool modules.TransactionPool wallet modules.Wallet walletKey crypto.TwofishKey hdb hostDB contractor *Contractor } // Close shuts down the contractor tester. func (rt *contractorTester) Close() error { rt.wallet.Lock() rt.cs.Close() rt.gateway.Close() return nil } // newContractorTester creates a ready-to-use contractor tester with money in the // wallet. func newContractorTester(name string) (*contractorTester, error) { // Create the modules. testdir := build.TempDir("contractor", name) g, err := gateway.New("localhost:0", false, filepath.Join(testdir, modules.GatewayDir)) if err != nil { return nil, err } cs, err := consensus.New(g, false, filepath.Join(testdir, modules.ConsensusDir)) if err != nil { return nil, err } tp, err := transactionpool.New(cs, g, filepath.Join(testdir, modules.TransactionPoolDir)) if err != nil { return nil, err } w, err := modWallet.New(cs, tp, filepath.Join(testdir, modules.WalletDir)) if err != nil { return nil, err } key := crypto.GenerateTwofishKey() _, err = w.Encrypt(key) if err != nil { return nil, err } err = w.Unlock(key) if err != nil { return nil, err } hdb, err := hostdb.New(g, cs, filepath.Join(testdir, modules.RenterDir)) if err != nil { return nil, err } m, err := miner.New(cs, tp, w, filepath.Join(testdir, modules.MinerDir)) if err != nil { return nil, err } c, err := New(cs, w, tp, hdb, filepath.Join(testdir, modules.RenterDir)) if err != nil { return nil, err } // Assemble all pieces into a contractor tester. ct := &contractorTester{ cs: cs, gateway: g, miner: m, tpool: tp, wallet: w, hdb: hdb, contractor: c, } // Mine blocks until there is money in the wallet. for i := types.BlockHeight(0); i <= types.MaturityDelay; i++ { _, err := ct.miner.AddBlock() if err != nil { return nil, err } } return ct, nil } func TestNegotiateContract(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() ct, err := newContractorTester(t.Name()) if err != nil { t.Fatal(err) } payout := types.NewCurrency64(1e16) fc := types.FileContract{ FileSize: 0, FileMerkleRoot: crypto.Hash{}, // no proof possible without data WindowStart: 100, WindowEnd: 1000, Payout: payout, ValidProofOutputs: []types.SiacoinOutput{ {Value: types.PostTax(ct.contractor.blockHeight, payout), UnlockHash: types.UnlockHash{}}, {Value: types.ZeroCurrency, UnlockHash: types.UnlockHash{}}, }, MissedProofOutputs: []types.SiacoinOutput{ // same as above {Value: types.PostTax(ct.contractor.blockHeight, payout), UnlockHash: types.UnlockHash{}}, // goes to the void, not the hostdb {Value: types.ZeroCurrency, UnlockHash: types.UnlockHash{}}, }, UnlockHash: types.UnlockHash{}, RevisionNumber: 0, } txnBuilder := ct.wallet.StartTransaction() err = txnBuilder.FundSiacoins(fc.Payout) if err != nil { t.Fatal(err) } txnBuilder.AddFileContract(fc) signedTxnSet, err := txnBuilder.Sign(true) if err != nil { t.Fatal(err) } err = ct.tpool.AcceptTransactionSet(signedTxnSet) if err != nil { t.Fatal(err) } } func TestReviseContract(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() ct, err := newContractorTester(t.Name()) if err != nil { t.Fatal(err) } // get an address ourAddr, err := ct.wallet.NextAddress() if err != nil { t.Fatal(err) } // generate keys sk, pk := crypto.GenerateKeyPair() renterPubKey := types.SiaPublicKey{ Algorithm: types.SignatureEd25519, Key: pk[:], } uc := types.UnlockConditions{ PublicKeys: []types.SiaPublicKey{renterPubKey, renterPubKey}, SignaturesRequired: 1, } // create file contract payout := types.NewCurrency64(1e16) fc := types.FileContract{ FileSize: 0, FileMerkleRoot: crypto.Hash{}, // no proof possible without data WindowStart: 100, WindowEnd: 1000, Payout: payout, UnlockHash: uc.UnlockHash(), RevisionNumber: 0, } // outputs need account for tax fc.ValidProofOutputs = []types.SiacoinOutput{ {Value: types.PostTax(ct.contractor.blockHeight, payout), UnlockHash: ourAddr.UnlockHash()}, {Value: types.ZeroCurrency, UnlockHash: types.UnlockHash{}}, // no collateral } fc.MissedProofOutputs = []types.SiacoinOutput{ // same as above fc.ValidProofOutputs[0], // goes to the void, not the hostdb {Value: types.ZeroCurrency, UnlockHash: types.UnlockHash{}}, } txnBuilder := ct.wallet.StartTransaction() err = txnBuilder.FundSiacoins(fc.Payout) if err != nil { t.Fatal(err) } txnBuilder.AddFileContract(fc) signedTxnSet, err := txnBuilder.Sign(true) if err != nil { t.Fatal(err) } // submit contract err = ct.tpool.AcceptTransactionSet(signedTxnSet) if err != nil { t.Fatal(err) } // create revision fcid := signedTxnSet[len(signedTxnSet)-1].FileContractID(0) rev := types.FileContractRevision{ ParentID: fcid, UnlockConditions: uc, NewFileSize: 10, NewWindowStart: 100, NewWindowEnd: 1000, NewRevisionNumber: 1, NewValidProofOutputs: fc.ValidProofOutputs, NewMissedProofOutputs: fc.MissedProofOutputs, } // create transaction containing the revision signedTxn := types.Transaction{ FileContractRevisions: []types.FileContractRevision{rev}, TransactionSignatures: []types.TransactionSignature{{ ParentID: crypto.Hash(fcid), CoveredFields: types.CoveredFields{FileContractRevisions: []uint64{0}}, PublicKeyIndex: 0, // hostdb key is always first -- see negotiateContract }}, } // sign the transaction encodedSig := crypto.SignHash(signedTxn.SigHash(0), sk) signedTxn.TransactionSignatures[0].Signature = encodedSig[:] err = signedTxn.StandaloneValid(ct.contractor.blockHeight) if err != nil { t.Fatal(err) } // submit revision err = ct.tpool.AcceptTransactionSet([]types.Transaction{signedTxn}) if err != nil { t.Fatal(err) } } Sia-1.3.0/modules/renter/contractor/persist.go000066400000000000000000000224201313565667000214100ustar00rootroot00000000000000package contractor import ( "path/filepath" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/persist" "github.com/NebulousLabs/Sia/types" ) // contractorPersist defines what Contractor data persists across sessions. type contractorPersist struct { Allowance modules.Allowance `json:"allowance"` BlockHeight types.BlockHeight `json:"blockheight"` CachedRevisions map[string]cachedRevision `json:"cachedrevisions"` Contracts map[string]modules.RenterContract `json:"contracts"` CurrentPeriod types.BlockHeight `json:"currentperiod"` LastChange modules.ConsensusChangeID `json:"lastchange"` OldContracts []modules.RenterContract `json:"oldcontracts"` RenewedIDs map[string]string `json:"renewedids"` } // persistData returns the data in the Contractor that will be saved to disk. func (c *Contractor) persistData() contractorPersist { data := contractorPersist{ Allowance: c.allowance, BlockHeight: c.blockHeight, CachedRevisions: make(map[string]cachedRevision), Contracts: make(map[string]modules.RenterContract), CurrentPeriod: c.currentPeriod, LastChange: c.lastChange, RenewedIDs: make(map[string]string), } for _, rev := range c.cachedRevisions { data.CachedRevisions[rev.Revision.ParentID.String()] = rev } for _, contract := range c.contracts { data.Contracts[contract.ID.String()] = contract } for _, contract := range c.oldContracts { contract.MerkleRoots = []crypto.Hash{} // prevent roots from being saved to disk twice data.OldContracts = append(data.OldContracts, contract) } for oldID, newID := range c.renewedIDs { data.RenewedIDs[oldID.String()] = newID.String() } return data } // load loads the Contractor persistence data from disk. func (c *Contractor) load() error { var data contractorPersist err := c.persist.load(&data) if err != nil { return err } c.allowance = data.Allowance c.blockHeight = data.BlockHeight for _, rev := range data.CachedRevisions { c.cachedRevisions[rev.Revision.ParentID] = rev } c.currentPeriod = data.CurrentPeriod if c.currentPeriod == 0 { // COMPATv1.0.4-lts // If loading old persist, current period will be unknown. Best we can // do is guess based on contracts + allowance. var highestEnd types.BlockHeight for _, contract := range data.Contracts { if h := contract.EndHeight(); h > highestEnd { highestEnd = h } } c.currentPeriod = highestEnd - c.allowance.Period } // COMPATv1.1.0 // // If loading old persist, the host's public key is unknown. We must // rescan the blockchain for a host announcement corresponding to the // contract's NetAddress. for _, contract := range data.Contracts { if len(contract.HostPublicKey.Key) == 0 { data.Contracts = addPubKeys(c.cs, data.Contracts) break // only need to rescan once } } for _, contract := range data.Contracts { // COMPATv1.0.4-lts // If loading old persist, start height of contract is unknown. Give // the contract a fake startheight so that it will included with the // other contracts in the current period. if contract.StartHeight == 0 { contract.StartHeight = c.currentPeriod + 1 } // COMPATv1.1.2 // Old versions calculated the TotalCost field incorrectly, omitting // the transaction fee. Recompute the TotalCost from scratch using the // original allocated funds and fees. if len(contract.FileContract.ValidProofOutputs) > 0 { contract.TotalCost = contract.FileContract.ValidProofOutputs[0].Value. Add(contract.TxnFee).Add(contract.SiafundFee).Add(contract.ContractFee) } c.contracts[contract.ID] = contract } c.lastChange = data.LastChange for _, contract := range data.OldContracts { c.oldContracts[contract.ID] = contract } for oldString, newString := range data.RenewedIDs { var oldHash, newHash crypto.Hash oldHash.LoadString(oldString) newHash.LoadString(newString) c.renewedIDs[types.FileContractID(oldHash)] = types.FileContractID(newHash) } return nil } // save saves the Contractor persistence data to disk. func (c *Contractor) save() error { return c.persist.save(c.persistData()) } // saveSync saves the Contractor persistence data to disk and then syncs to disk. func (c *Contractor) saveSync() error { return c.persist.save(c.persistData()) } // saveUploadRevision returns a function that saves an upload revision. It is // used by the Editor type to prevent desynchronizing with the host. func (c *Contractor) saveUploadRevision(id types.FileContractID) func(types.FileContractRevision, []crypto.Hash) error { return func(rev types.FileContractRevision, newRoots []crypto.Hash) error { c.mu.Lock() defer c.mu.Unlock() c.cachedRevisions[id] = cachedRevision{rev, newRoots} return c.persist.update(updateCachedUploadRevision{ Revision: rev, // only the last root is new SectorRoot: newRoots[len(newRoots)-1], SectorIndex: len(newRoots) - 1, }) } } // saveDownloadRevision returns a function that saves an upload revision. It // is used by the Downloader type to prevent desynchronizing with the host. func (c *Contractor) saveDownloadRevision(id types.FileContractID) func(types.FileContractRevision, []crypto.Hash) error { return func(rev types.FileContractRevision, _ []crypto.Hash) error { c.mu.Lock() defer c.mu.Unlock() // roots have not changed cr := c.cachedRevisions[id] cr.Revision = rev c.cachedRevisions[id] = cr return c.persist.update(updateCachedDownloadRevision{ Revision: rev, }) } } // addPubKeys rescans the blockchain to fill in the HostPublicKey of // contracts, identified by their NetAddress. func addPubKeys(cs consensusSet, contracts map[string]modules.RenterContract) map[string]modules.RenterContract { pubkeys := make(pubkeyScanner) for _, c := range contracts { pubkeys[c.NetAddress] = types.SiaPublicKey{} } cs.ConsensusSetSubscribe(pubkeys, modules.ConsensusChangeBeginning) for id, c := range contracts { c.HostPublicKey = pubkeys[c.NetAddress] contracts[id] = c } cs.Unsubscribe(&pubkeys) return contracts } type pubkeyScanner map[modules.NetAddress]types.SiaPublicKey func (pubkeys pubkeyScanner) ProcessConsensusChange(cc modules.ConsensusChange) { // find announcements for _, block := range cc.AppliedBlocks { for _, txn := range block.Transactions { for _, arb := range txn.ArbitraryData { // decode announcement addr, pubKey, err := modules.DecodeAnnouncement(arb) if err != nil { continue } // For each announcement, if we recognize the addr, map it // to the announced pubkey. Note that we will overwrite // the pubkey if two announcements have the same addr. if _, relevant := pubkeys[addr]; relevant { pubkeys[addr] = pubKey } } } } } // COMPATv1.1.0 func loadv110persist(dir string, data *contractorPersist) error { var oldPersist struct { Allowance modules.Allowance BlockHeight types.BlockHeight CachedRevisions []cachedRevision Contracts []modules.RenterContract CurrentPeriod types.BlockHeight LastChange modules.ConsensusChangeID OldContracts []modules.RenterContract RenewedIDs map[string]string FinancialMetrics struct { ContractSpending types.Currency DownloadSpending types.Currency StorageSpending types.Currency UploadSpending types.Currency } } err := persist.LoadJSON(persist.Metadata{ Header: "Contractor Persistence", Version: "0.5.2", }, &oldPersist, filepath.Join(dir, "contractor.json")) if err != nil { return err } cachedRevisions := make(map[string]cachedRevision) for _, rev := range oldPersist.CachedRevisions { cachedRevisions[rev.Revision.ParentID.String()] = rev } contracts := make(map[string]modules.RenterContract) for _, c := range oldPersist.Contracts { contracts[c.ID.String()] = c } // COMPATv1.0.4-lts // // If loading old persist, only aggregate metrics are known. Store these // in a special contract under a special identifier. if fm := oldPersist.FinancialMetrics; !fm.ContractSpending.Add(fm.DownloadSpending).Add(fm.StorageSpending).Add(fm.UploadSpending).IsZero() { oldPersist.OldContracts = append(oldPersist.OldContracts, modules.RenterContract{ ID: metricsContractID, TotalCost: fm.ContractSpending, DownloadSpending: fm.DownloadSpending, StorageSpending: fm.StorageSpending, UploadSpending: fm.UploadSpending, // Give the contract a fake startheight so that it will included // with the other contracts in the current period. Note that in // update.go, the special contract is specifically deleted when a // new period begins. StartHeight: oldPersist.CurrentPeriod + 1, // We also need to add a ValidProofOutput so that the RenterFunds // method will not panic. The value should be 0, i.e. "all funds // were spent." LastRevision: types.FileContractRevision{ NewValidProofOutputs: make([]types.SiacoinOutput, 2), }, }) } *data = contractorPersist{ Allowance: oldPersist.Allowance, BlockHeight: oldPersist.BlockHeight, CachedRevisions: cachedRevisions, Contracts: contracts, CurrentPeriod: oldPersist.CurrentPeriod, LastChange: oldPersist.LastChange, OldContracts: oldPersist.OldContracts, RenewedIDs: oldPersist.RenewedIDs, } return nil } Sia-1.3.0/modules/renter/contractor/persist_test.go000066400000000000000000000171661313565667000224620ustar00rootroot00000000000000package contractor import ( "bytes" "os" "strconv" "testing" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) // memPersist implements the persister interface in-memory. type memPersist contractorPersist func (m *memPersist) save(data contractorPersist) error { *m = memPersist(data); return nil } func (m *memPersist) update(...journalUpdate) error { return nil } func (m memPersist) load(data *contractorPersist) error { *data = contractorPersist(m); return nil } func (m memPersist) Close() error { return nil } // TestSaveLoad tests that the contractor can save and load itself. func TestSaveLoad(t *testing.T) { // create contractor with mocked persist dependency c := &Contractor{ persist: new(memPersist), } // add some fake contracts c.contracts = map[types.FileContractID]modules.RenterContract{ {0}: {ID: types.FileContractID{0}, HostPublicKey: types.SiaPublicKey{Key: []byte("foo")}}, {1}: {ID: types.FileContractID{1}, HostPublicKey: types.SiaPublicKey{Key: []byte("bar")}}, {2}: {ID: types.FileContractID{2}, HostPublicKey: types.SiaPublicKey{Key: []byte("baz")}}, } c.renewedIDs = map[types.FileContractID]types.FileContractID{ {0}: {1}, {1}: {2}, {2}: {3}, } c.cachedRevisions = map[types.FileContractID]cachedRevision{ {0}: {Revision: types.FileContractRevision{ParentID: types.FileContractID{0}}}, {1}: {Revision: types.FileContractRevision{ParentID: types.FileContractID{1}}}, {2}: {Revision: types.FileContractRevision{ParentID: types.FileContractID{2}}}, } c.oldContracts = map[types.FileContractID]modules.RenterContract{ {0}: {ID: types.FileContractID{0}, HostPublicKey: types.SiaPublicKey{Key: []byte("foo")}}, {1}: {ID: types.FileContractID{1}, HostPublicKey: types.SiaPublicKey{Key: []byte("bar")}}, {2}: {ID: types.FileContractID{2}, HostPublicKey: types.SiaPublicKey{Key: []byte("baz")}}, } // save, clear, and reload err := c.save() if err != nil { t.Fatal(err) } c.hdb = stubHostDB{} c.contracts = make(map[types.FileContractID]modules.RenterContract) c.renewedIDs = make(map[types.FileContractID]types.FileContractID) c.cachedRevisions = make(map[types.FileContractID]cachedRevision) c.oldContracts = make(map[types.FileContractID]modules.RenterContract) err = c.load() if err != nil { t.Fatal(err) } // check that all fields were restored _, ok0 := c.contracts[types.FileContractID{0}] _, ok1 := c.contracts[types.FileContractID{1}] _, ok2 := c.contracts[types.FileContractID{2}] if !ok0 || !ok1 || !ok2 { t.Fatal("contracts were not restored properly:", c.contracts) } _, ok0 = c.renewedIDs[types.FileContractID{0}] _, ok1 = c.renewedIDs[types.FileContractID{1}] _, ok2 = c.renewedIDs[types.FileContractID{2}] if !ok0 || !ok1 || !ok2 { t.Fatal("renewed IDs were not restored properly:", c.renewedIDs) } _, ok0 = c.cachedRevisions[types.FileContractID{0}] _, ok1 = c.cachedRevisions[types.FileContractID{1}] _, ok2 = c.cachedRevisions[types.FileContractID{2}] if !ok0 || !ok1 || !ok2 { t.Fatal("cached revisions were not restored properly:", c.cachedRevisions) } _, ok0 = c.oldContracts[types.FileContractID{0}] _, ok1 = c.oldContracts[types.FileContractID{1}] _, ok2 = c.oldContracts[types.FileContractID{2}] if !ok0 || !ok1 || !ok2 { t.Fatal("oldContracts were not restored properly:", c.oldContracts) } // use stdPersist instead of mock c.persist = newPersist(build.TempDir("contractor", t.Name())) os.MkdirAll(build.TempDir("contractor", t.Name()), 0700) // save, clear, and reload err = c.save() if err != nil { t.Fatal(err) } c.contracts = make(map[types.FileContractID]modules.RenterContract) c.renewedIDs = make(map[types.FileContractID]types.FileContractID) c.cachedRevisions = make(map[types.FileContractID]cachedRevision) c.oldContracts = make(map[types.FileContractID]modules.RenterContract) err = c.load() if err != nil { t.Fatal(err) } // check that all fields were restored _, ok0 = c.contracts[types.FileContractID{0}] _, ok1 = c.contracts[types.FileContractID{1}] _, ok2 = c.contracts[types.FileContractID{2}] if !ok0 || !ok1 || !ok2 { t.Fatal("contracts were not restored properly:", c.contracts) } _, ok0 = c.renewedIDs[types.FileContractID{0}] _, ok1 = c.renewedIDs[types.FileContractID{1}] _, ok2 = c.renewedIDs[types.FileContractID{2}] if !ok0 || !ok1 || !ok2 { t.Fatal("renewed IDs were not restored properly:", c.renewedIDs) } _, ok0 = c.cachedRevisions[types.FileContractID{0}] _, ok1 = c.cachedRevisions[types.FileContractID{1}] _, ok2 = c.cachedRevisions[types.FileContractID{2}] if !ok0 || !ok1 || !ok2 { t.Fatal("cached revisions were not restored properly:", c.cachedRevisions) } _, ok0 = c.oldContracts[types.FileContractID{0}] _, ok1 = c.oldContracts[types.FileContractID{1}] _, ok2 = c.oldContracts[types.FileContractID{2}] if !ok0 || !ok1 || !ok2 { t.Fatal("oldContracts were not restored properly:", c.oldContracts) } } // blockCS is a consensusSet that calls ProcessConsensusChange on its blocks. type blockCS struct { blocks []types.Block } func (cs blockCS) ConsensusSetSubscribe(s modules.ConsensusSetSubscriber, _ modules.ConsensusChangeID) error { s.ProcessConsensusChange(modules.ConsensusChange{ AppliedBlocks: cs.blocks, }) return nil } func (blockCS) Synced() bool { return true } func (blockCS) Unsubscribe(modules.ConsensusSetSubscriber) { return } // TestPubKeyScanner tests that the pubkeyScanner type correctly identifies // public keys in the blockchain. func TestPubKeyScanner(t *testing.T) { // create pubkeys, announcements, and contracts contracts := make(map[types.FileContractID]modules.RenterContract) var blocks []types.Block var pubkeys []types.SiaPublicKey for i := 0; i < 3; i++ { // generate a keypair sk, pk := crypto.GenerateKeyPair() spk := types.SiaPublicKey{ Algorithm: types.SignatureEd25519, Key: pk[:], } pubkeys = append(pubkeys, spk) // create an announcement and add it to cs addr := modules.NetAddress("foo.bar:999" + strconv.Itoa(i)) ann, err := modules.CreateAnnouncement(addr, spk, sk) if err != nil { t.Fatal(err) } blocks = append(blocks, types.Block{ Transactions: []types.Transaction{{ ArbitraryData: [][]byte{ann}, }}, }) id := types.FileContractID{byte(i)} contracts[id] = modules.RenterContract{ID: id, NetAddress: addr} } // overwrite the first pubkey with a new one, using the same netaddress. // The contractor should use the newer pubkey. sk, pk := crypto.GenerateKeyPair() spk := types.SiaPublicKey{ Algorithm: types.SignatureEd25519, Key: pk[:], } pubkeys[0] = spk ann, err := modules.CreateAnnouncement("foo.bar:9990", spk, sk) if err != nil { t.Fatal(err) } blocks = append(blocks, types.Block{ Transactions: []types.Transaction{{ ArbitraryData: [][]byte{ann}, }}, }) // create contractor with mocked persist and cs dependencies c := &Contractor{ persist: new(memPersist), cs: blockCS{blocks}, contracts: contracts, } // save, clear, and reload err = c.save() if err != nil { t.Fatal(err) } c.contracts = make(map[types.FileContractID]modules.RenterContract) err = c.load() if err != nil { t.Fatal(err) } // check that contracts were loaded and have their pubkeys filled in for i, pk := range pubkeys { id := types.FileContractID{byte(i)} contract, ok := c.contracts[id] if !ok { t.Fatal("contracts were not restored properly:", c.contracts) } // check that pubkey was filled in if !bytes.Equal(contract.HostPublicKey.Key, pk.Key) { t.Errorf("contract has wrong pubkey: expected %q, got %q", pk.String(), contract.HostPublicKey.String()) } } } Sia-1.3.0/modules/renter/contractor/update.go000066400000000000000000000040741313565667000212060ustar00rootroot00000000000000package contractor import ( "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) // ProcessConsensusChange will be called by the consensus set every time there // is a change in the blockchain. Updates will always be called in order. func (c *Contractor) ProcessConsensusChange(cc modules.ConsensusChange) { c.mu.Lock() for _, block := range cc.RevertedBlocks { if block.ID() != types.GenesisID { c.blockHeight-- } } for _, block := range cc.AppliedBlocks { if block.ID() != types.GenesisID { c.blockHeight++ } } // archive expired contracts var expired []types.FileContractID for id, contract := range c.contracts { if c.blockHeight > contract.EndHeight() { // No need to wait for extra confirmations - any processes which // depend on this contract should have taken care of any issues // already. expired = append(expired, id) // move to oldContracts c.oldContracts[id] = contract } } // delete expired contracts (can't delete while iterating) for _, id := range expired { delete(c.contracts, id) c.log.Println("INFO: archived expired contract", id) } // If we have entered the next period, update currentPeriod // NOTE: "period" refers to the duration of contracts, whereas "cycle" // refers to how frequently the period metrics are reset. // TODO: How to make this more explicit. cycleLen := c.allowance.Period - c.allowance.RenewWindow if c.blockHeight > c.currentPeriod+cycleLen { c.currentPeriod += cycleLen // COMPATv1.0.4-lts // if we were storing a special metrics contract, it will be invalid // after we enter the next period. delete(c.oldContracts, metricsContractID) } c.lastChange = cc.ID err := c.save() if err != nil { c.log.Println("Unable to save while processing a consensus change:", err) } c.mu.Unlock() // Only attempt contract formation/renewal if we are synced // (harmless if not synced, since hosts will reject our renewal attempts, // but very slow). if cc.Synced { // Perform the contract maintenance in a separate thread. go c.threadedContractMaintenance() } } Sia-1.3.0/modules/renter/contractor/update_test.go000066400000000000000000000141771313565667000222520ustar00rootroot00000000000000package contractor import ( "errors" "io/ioutil" "testing" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/persist" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/fastrand" ) // TestProcessConsensusUpdate tests that contracts are removed at the expected // block height. func TestProcessConsensusUpdate(t *testing.T) { // create contractor with a contract ending at height 20 var stub newStub var rc modules.RenterContract rc.LastRevision.NewWindowStart = 20 rc.FileContract.ValidProofOutputs = []types.SiacoinOutput{{}} c := &Contractor{ cs: stub, hdb: stub, contracts: map[types.FileContractID]modules.RenterContract{ rc.ID: rc, }, oldContracts: make(map[types.FileContractID]modules.RenterContract), persist: new(memPersist), log: persist.NewLogger(ioutil.Discard), } // process 20 blocks; contract should remain cc := modules.ConsensusChange{ // just need to increment blockheight by 1 AppliedBlocks: []types.Block{{}}, } for i := 0; i < 20; i++ { c.ProcessConsensusChange(cc) } if len(c.contracts) != 1 { t.Error("expected 1 contract, got", len(c.contracts)) } // process one more block; contract should be removed c.ProcessConsensusChange(cc) if len(c.contracts) != 0 { t.Error("expected 0 contracts, got", len(c.contracts)) } } // TestIntegrationAutoRenew tests that contracts are automatically renwed at // the expected block height. func TestIntegrationAutoRenew(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() // create testing trio _, c, m, err := newTestingTrio(t.Name()) if err != nil { t.Fatal(err) } // form a contract with the host a := modules.Allowance{ Funds: types.SiacoinPrecision.Mul64(100), // 100 SC Hosts: 1, Period: 50, RenewWindow: 10, } err = c.SetAllowance(a) if err != nil { t.Fatal(err) } err = build.Retry(50, 100*time.Millisecond, func() error { if len(c.Contracts()) == 0 { return errors.New("contracts were not formed") } return nil }) if err != nil { t.Fatal(err) } contract := c.Contracts()[0] // revise the contract editor, err := c.Editor(contract.ID, nil) if err != nil { t.Fatal(err) } data := fastrand.Bytes(int(modules.SectorSize)) // insert the sector root, err := editor.Upload(data) if err != nil { t.Fatal(err) } err = editor.Close() if err != nil { t.Fatal(err) } // mine until we enter the renew window renewHeight := contract.EndHeight() - c.allowance.RenewWindow for c.blockHeight < renewHeight { _, err := m.AddBlock() if err != nil { t.Fatal(err) } } // wait for goroutine in ProcessConsensusChange to finish time.Sleep(100 * time.Millisecond) c.maintenanceLock.Lock() c.maintenanceLock.Unlock() // check renewed contract contract = c.Contracts()[0] if contract.FileContract.FileMerkleRoot != root { t.Fatal("wrong merkle root:", contract.FileContract.FileMerkleRoot) } else if contract.FileContract.FileSize != modules.SectorSize { t.Fatal("wrong file size:", contract.FileContract.FileSize) } else if contract.FileContract.RevisionNumber != 0 { t.Fatal("wrong revision number:", contract.FileContract.RevisionNumber) } else if contract.FileContract.WindowStart != c.blockHeight+c.allowance.Period { t.Fatal("wrong window start:", contract.FileContract.WindowStart) } } // TestIntegrationRenewInvalidate tests that editors and downloaders are // properly invalidated when a renew is queued. func TestIntegrationRenewInvalidate(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() // create testing trio _, c, m, err := newTestingTrio(t.Name()) if err != nil { t.Fatal(err) } // form a contract with the host a := modules.Allowance{ Funds: types.SiacoinPrecision.Mul64(100), // 100 SC Hosts: 1, Period: 50, RenewWindow: 10, } err = c.SetAllowance(a) if err != nil { t.Fatal(err) } err = build.Retry(50, 100*time.Millisecond, func() error { if len(c.Contracts()) == 0 { return errors.New("contracts were not formed") } return nil }) if err != nil { t.Fatal(err) } contract := c.Contracts()[0] // revise the contract editor, err := c.Editor(contract.ID, nil) if err != nil { t.Fatal(err) } data := fastrand.Bytes(int(modules.SectorSize)) // insert the sector root, err := editor.Upload(data) if err != nil { t.Fatal(err) } // mine until we enter the renew window renewHeight := contract.EndHeight() - c.allowance.RenewWindow for c.blockHeight < renewHeight { _, err := m.AddBlock() if err != nil { t.Fatal(err) } } // wait for goroutine in ProcessConsensusChange to finish time.Sleep(100 * time.Millisecond) c.maintenanceLock.Lock() c.maintenanceLock.Unlock() // check renewed contract contract = c.Contracts()[0] if contract.FileContract.FileMerkleRoot != root { t.Error("wrong merkle root:", contract.FileContract.FileMerkleRoot) } else if contract.FileContract.FileSize != modules.SectorSize { t.Error("wrong file size:", contract.FileContract.FileSize) } else if contract.FileContract.RevisionNumber != 0 { t.Error("wrong revision number:", contract.FileContract.RevisionNumber) } else if contract.FileContract.WindowStart != c.blockHeight+c.allowance.Period { t.Error("wrong window start:", contract.FileContract.WindowStart) } // editor should have been invalidated err = editor.Delete(crypto.Hash{}) if err != errInvalidEditor { t.Error("expected invalid editor error; got", err) } editor.Close() // create a downloader downloader, err := c.Downloader(contract.ID, nil) if err != nil { t.Fatal(err) } // mine until we enter the renew window renewHeight = contract.EndHeight() - c.allowance.RenewWindow for c.blockHeight < renewHeight { _, err := m.AddBlock() if err != nil { t.Fatal(err) } } // wait for goroutine in ProcessConsensusChange to finish time.Sleep(100 * time.Millisecond) c.maintenanceLock.Lock() c.maintenanceLock.Unlock() // downloader should have been invalidated _, err = downloader.Sector(crypto.Hash{}) if err != errInvalidDownloader { t.Error("expected invalid downloader error; got", err) } downloader.Close() } Sia-1.3.0/modules/renter/contractor/upload_test.go000066400000000000000000000042741313565667000222510ustar00rootroot00000000000000package contractor import ( "testing" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) // editorHostDB is used to test the Editor method. type editorHostDB struct { stubHostDB hosts map[string]modules.HostDBEntry } func (hdb editorHostDB) Host(spk types.SiaPublicKey) (modules.HostDBEntry, bool) { h, ok := hdb.hosts[string(spk.Key)] return h, ok } // TestEditor tests the failure conditions of the Editor method. The method is // more fully tested in the host integration test. func TestEditor(t *testing.T) { // use a mock hostdb to supply hosts hdb := &editorHostDB{ hosts: make(map[string]modules.HostDBEntry), } c := &Contractor{ hdb: hdb, revising: make(map[types.FileContractID]bool), contracts: make(map[types.FileContractID]modules.RenterContract), } // empty contract ID _, err := c.Editor(types.FileContractID{}, nil) if err == nil { t.Error("expected error, got nil") } // expired contract c.blockHeight = 3 _, err = c.Editor(types.FileContractID{}, nil) if err == nil { t.Error("expected error, got nil") } c.blockHeight = 0 // expensive host _, hostPublicKey := crypto.GenerateKeyPairDeterministic([32]byte{}) dbe := modules.HostDBEntry{ PublicKey: types.SiaPublicKey{ Algorithm: types.SignatureEd25519, Key: hostPublicKey[:], }, } dbe.AcceptingContracts = true dbe.StoragePrice = types.NewCurrency64(^uint64(0)) hdb.hosts["foo"] = dbe contract := modules.RenterContract{NetAddress: "foo"} c.contracts[contract.ID] = contract _, err = c.Editor(contract.ID, nil) if err == nil { t.Error("expected error, got nil") } // invalid contract dbe.StoragePrice = types.NewCurrency64(500) hdb.hosts["bar"] = dbe _, err = c.Editor(contract.ID, nil) if err == nil { t.Error("expected error, got nil") } // spent contract c.contracts[contract.ID] = modules.RenterContract{ NetAddress: "bar", LastRevision: types.FileContractRevision{ NewValidProofOutputs: []types.SiacoinOutput{ {Value: types.NewCurrency64(0)}, {Value: types.NewCurrency64(^uint64(0))}, }, }, } _, err = c.Editor(contract.ID, nil) if err == nil { t.Error("expected error, got nil") } } Sia-1.3.0/modules/renter/contractor/uptime.go000066400000000000000000000054721313565667000212320ustar00rootroot00000000000000package contractor import ( "sort" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) // uptimeMinScans is the minimum number of scans required to judge whether a // host is offline or not. const uptimeMinScans = 3 // uptimeWindow specifies the duration in which host uptime is checked. var uptimeWindow = func() time.Duration { switch build.Release { case "dev": return 30 * time.Minute case "standard": return 7 * 24 * time.Hour // 1 week. case "testing": return 15 * time.Second } panic("undefined uptimeWindow") }() // IsOffline indicates whether a contract's host should be considered offline, // based on its scan metrics. func (c *Contractor) IsOffline(id types.FileContractID) bool { c.mu.RLock() defer c.mu.RUnlock() return c.isOffline(id) } // isOffline indicates whether a contract's host should be considered offline, // based on its scan metrics. func (c *Contractor) isOffline(id types.FileContractID) bool { // Fetch the corresponding contract in the contractor. If the most recent // contract is not in the contractors set of active contracts, this contract // line is dead, and thus the contract should be considered 'offline'. contract, ok := c.contracts[id] if !ok { return true } host, ok := c.hdb.Host(contract.HostPublicKey) if !ok { return true } // Sanity check - ScanHistory should always be ordered from oldest to // newest. if build.DEBUG && !sort.IsSorted(host.ScanHistory) { sort.Sort(host.ScanHistory) build.Critical("host's scan history was not sorted") } // Consider a host offline if: // 1) The host has been scanned at least three times, and // 2) The three most recent scans have all failed, and // 3) The time between the most recent scan and the last successful scan // (or first scan) is at least uptimeWindow numScans := len(host.ScanHistory) if numScans < uptimeMinScans { // Not enough data to make a fair judgment. return false } recent := host.ScanHistory[numScans-uptimeMinScans:] for _, scan := range recent { if scan.Success { // One of the scans succeeded. return false } } // Initialize window bounds. windowStart, windowEnd := host.ScanHistory[0].Timestamp, host.ScanHistory[numScans-1].Timestamp // Iterate from newest-oldest, seeking to last successful scan. for i := numScans - 1; i >= 0; i-- { if scan := host.ScanHistory[i]; scan.Success { windowStart = scan.Timestamp break } } return windowEnd.Sub(windowStart) >= uptimeWindow } // onlineContracts returns the subset of the Contractor's contracts whose // hosts are considered online. func (c *Contractor) onlineContracts() []modules.RenterContract { var cs []modules.RenterContract for _, contract := range c.contracts { if !c.isOffline(contract.ID) { cs = append(cs, contract) } } return cs } Sia-1.3.0/modules/renter/contractor/uptime_test.go000066400000000000000000000136371313565667000222730ustar00rootroot00000000000000package contractor import ( "bytes" "errors" "testing" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) // offlineHostDB overrides an existing hostDB so that it returns a modified // scan history for a specific host. type offlineHostDB struct { hostDB spk types.SiaPublicKey } // Host returns the host with address addr. If addr matches hdb.addr, the // host's scan history will be modified to make the host appear offline. func (hdb offlineHostDB) Host(spk types.SiaPublicKey) (modules.HostDBEntry, bool) { host, ok := hdb.hostDB.Host(spk) if ok && bytes.Equal(spk.Key, hdb.spk.Key) { // fake three scans over the past uptimeWindow, all of which failed badScan1 := modules.HostDBScan{Timestamp: time.Now().Add(-uptimeWindow * 2), Success: false} badScan2 := modules.HostDBScan{Timestamp: time.Now().Add(-uptimeWindow), Success: false} badScan3 := modules.HostDBScan{Timestamp: time.Now(), Success: false} host.ScanHistory = []modules.HostDBScan{badScan1, badScan2, badScan3} } return host, ok } // TestIntegrationReplaceOffline tests that when a host goes offline, its // contract is eventually replaced. func TestIntegrationReplaceOffline(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() h, c, m, err := newTestingTrio(t.Name()) if err != nil { t.Fatal(err) } defer h.Close() // override IsOffline to always return true for h c.mu.Lock() c.hdb = offlineHostDB{c.hdb, h.PublicKey()} c.mu.Unlock() // create another host dir := build.TempDir("contractor", t.Name(), "Host2") h2, err := newTestingHost(dir, c.cs.(modules.ConsensusSet), c.tpool.(modules.TransactionPool)) if err != nil { t.Fatal(err) } // form a contract with h c.SetAllowance(modules.Allowance{ Funds: types.SiacoinPrecision.Mul64(100), Hosts: 1, Period: 100, RenewWindow: 10, }) // we should have a contract, but it will be marked as offline due to the // hocked hostDB err = build.Retry(50, 100*time.Millisecond, func() error { c.mu.Lock() lenC := len(c.contracts) c.mu.Unlock() if lenC < 1 { return errors.New("allowance forming seems to have failed") } return nil }) if err != nil { t.Log(len(c.Contracts())) t.Error(err) } c.mu.Lock() oc := len(c.onlineContracts()) c.mu.Unlock() if oc != 0 { t.Fatal("contract should not be reported as online") } // announce the second host err = h2.Announce() if err != nil { t.Fatal(err) } // mine a block, processing the announcement m.AddBlock() // wait for hostdb to scan host for i := 0; i < 100 && len(c.hdb.RandomHosts(2, nil)) != 2; i++ { time.Sleep(50 * time.Millisecond) } if len(c.hdb.RandomHosts(2, nil)) != 2 { t.Fatal("host did not make it into the contractor hostdb in time", c.hdb.RandomHosts(2, nil)) } // mine a block and wait for a new contract is formed. ProcessConsensusChange will // trigger managedFormAllowanceContracts, which should form a new contract // with h2 m.AddBlock() for i := 0; i < 100 && len(c.Contracts()) != 1; i++ { time.Sleep(100 * time.Millisecond) } if len(c.Contracts()) != 1 { t.Fatal("contract was not replaced:", len(c.Contracts())) } if c.Contracts()[0].NetAddress != h2.ExternalSettings().NetAddress { t.Fatal("contractor formed replacement contract with wrong host") } } // mapHostDB is a hostDB that implements the Host method via a simple map. type mapHostDB struct { stubHostDB hosts map[string]modules.HostDBEntry } func (m mapHostDB) Host(spk types.SiaPublicKey) (modules.HostDBEntry, bool) { h, e := m.hosts[string(spk.Key)] return h, e } // TestIsOffline tests the IsOffline method. func TestIsOffline(t *testing.T) { now := time.Now() oldBadScan := modules.HostDBScan{Timestamp: now.Add(-uptimeWindow * 2), Success: false} oldGoodScan := modules.HostDBScan{Timestamp: now.Add(-uptimeWindow * 2), Success: true} newBadScan := modules.HostDBScan{Timestamp: now.Add(-uptimeWindow / 2), Success: false} newGoodScan := modules.HostDBScan{Timestamp: now.Add(-uptimeWindow / 2), Success: true} currentBadScan := modules.HostDBScan{Timestamp: now, Success: false} currentGoodScan := modules.HostDBScan{Timestamp: now, Success: true} tests := []struct { scans []modules.HostDBScan offline bool }{ // no data {nil, false}, // not enough data {[]modules.HostDBScan{oldBadScan, newGoodScan}, false}, // data covers small range {[]modules.HostDBScan{oldBadScan, oldBadScan, oldBadScan}, false}, // data covers large range, but at least 1 scan succeeded {[]modules.HostDBScan{oldBadScan, newGoodScan, currentBadScan}, false}, // data covers large range, no scans succeeded {[]modules.HostDBScan{oldBadScan, newBadScan, currentBadScan}, true}, // old scan was good, recent scans are bad. {[]modules.HostDBScan{oldGoodScan, newBadScan, newBadScan, currentBadScan}, true}, // recent scan was good, with many recent bad scans. {[]modules.HostDBScan{oldBadScan, newGoodScan, newBadScan, currentBadScan, currentBadScan}, false}, // recent scan was good, old scans were bad. {[]modules.HostDBScan{oldBadScan, newBadScan, currentBadScan, currentGoodScan}, false}, } for i, test := range tests { // construct a contractor with a hostdb containing the scans c := &Contractor{ contracts: map[types.FileContractID]modules.RenterContract{ {1}: {HostPublicKey: types.SiaPublicKey{Key: []byte("foo")}}, }, hdb: mapHostDB{ hosts: map[string]modules.HostDBEntry{ "foo": {ScanHistory: test.scans}, }, }, } if offline := c.IsOffline(types.FileContractID{1}); offline != test.offline { t.Errorf("IsOffline(%v) = %v, expected %v", i, offline, test.offline) } } c := &Contractor{ contracts: map[types.FileContractID]modules.RenterContract{ {1}: {HostPublicKey: types.SiaPublicKey{Key: []byte("foo")}}, }, } // should return true for an unknown contract id if !c.IsOffline(types.FileContractID{4}) { t.Fatal("IsOffline returned false for a nonexistent contract id") } } Sia-1.3.0/modules/renter/download.go000066400000000000000000000615551313565667000173640ustar00rootroot00000000000000package renter // NOTE: All chunk recovery (which involves high computation and disk syncing) // is done in the primary download loop thread. At some point this may be a // significant performance bottleneck. import ( "bytes" "errors" "io" "os" "sync" "sync/atomic" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) const ( defaultFilePerm = 0666 downloadFailureCooldown = time.Minute * 30 ) var ( errPrevErr = errors.New("download could not be completed due to a previous error") errInsufficientHosts = errors.New("insufficient hosts to recover file") errInsufficientPieces = errors.New("couldn't fetch enough pieces to recover data") // maxActiveDownloadPieces determines the maximum number of pieces that are // allowed to be concurrently downloading. More pieces means more // parallelism, but also more RAM usage. // // TODO: Allow this number to be established in the renter settings. maxActiveDownloadPieces = build.Select(build.Var{ Standard: int(60), Dev: int(10), Testing: int(5), }).(int) ) type ( // chunkDownload tracks the progress of a chunk. The chunk download object // should only be read or modified inside of the main download loop thread. chunkDownload struct { download *download index uint64 // index of the chunk within the download // completedPieces contains information about which pieces have been // successfully downloaded. // // workerAttempts contains a list of workers that are able to fetch a // piece of the chunk, mapped to an indication of whether or not they // have tried to fetch a piece of the chunk. completedPieces map[uint64][]byte workerAttempts map[types.FileContractID]bool } // A download is a file download that has been queued by the renter. download struct { // Progress variables. atomicDataReceived uint64 downloadComplete bool downloadErr error finishedChunks map[uint64]bool offset uint64 length uint64 // Timestamp information. completeTime time.Time startTime time.Time // Static information about the file - can be read without a lock. chunkSize uint64 destination modules.DownloadWriter erasureCode modules.ErasureCoder fileSize uint64 masterKey crypto.TwofishKey numChunks uint64 // pieceSet contains a sparse map of the chunk indices to be downloaded to // their piece data. pieceSet map[uint64]map[types.FileContractID]pieceData reportedPieceSize uint64 siapath string // Syncrhonization tools. downloadFinished chan struct{} mu sync.Mutex } // downloadState tracks all of the stateful information within the download // loop, primarily used to simplify the use of helper functions. There is // no thread safety with the download state, as it is only ever accessed by // the primary download loop thread. downloadState struct { // activePieces tracks the number of pieces that have been scheduled // but have not yet been written to disk as a complete chunk. // // availableWorkers tracks which workers are currently idle and ready // to receive work. // // activeWorkers indicates the list of workers which are actively // download a piece, and can be utilized again later but are currently // unavailable. // // incompleteChunks is a list of chunks (by index) which have had a // download fail. Repeat entries means that multiple downloads failed. // A new worker should be assigned to the chunk for each failure, // unless no more workers exist who can download pieces for that chunk, // in which case the download has failed. // // resultChan is the channel that is used to receive completed worker // downloads. activePieces int activeWorkers map[types.FileContractID]struct{} availableWorkers []*worker incompleteChunks []*chunkDownload resultChan chan finishedDownload } ) // newSectionDownload initialises and returns a download object for the specified chunk. func (r *Renter) newSectionDownload(f *file, destination modules.DownloadWriter, offset, length uint64) *download { d := newDownload(f, destination) if length == 0 { build.Critical("download length should not be zero") d.fail(errors.New("download length should not be zero")) return d } // Settings specific to a chunk download. d.offset = offset d.length = length // Calculate chunks to download. minChunk := offset / f.chunkSize() maxChunk := (offset + length - 1) / f.chunkSize() // maxChunk is 1-indexed // mark the chunks as not being downloaded yet for i := minChunk; i <= maxChunk; i++ { d.finishedChunks[i] = false } d.initPieceSet(f, r) return d } // newDownload creates a newly initialized download. func newDownload(f *file, destination modules.DownloadWriter) *download { return &download{ startTime: time.Now(), chunkSize: f.chunkSize(), destination: destination, erasureCode: f.erasureCode, fileSize: f.size, masterKey: f.masterKey, numChunks: f.numChunks(), siapath: f.name, downloadFinished: make(chan struct{}), finishedChunks: make(map[uint64]bool), } } // initPieceSet initialises the piece set, including calculations of the total download size. func (d *download) initPieceSet(f *file, r *Renter) { // Allocate the piece size and progress bar so that the download will // finish at exactly 100%. Due to rounding error and padding, there is not // a strict mapping between 'progress' and 'bytes downloaded' - it is // actually necessary to download more bytes than the size of the file. // The effective size of the download is determined by the number of chunks // to be downloaded. TODO: Handle variable-size last chunk - Same in downloadqueue.go numChunks := uint64(len(d.finishedChunks)) dlSize := d.length d.reportedPieceSize = dlSize / (numChunks * uint64(d.erasureCode.MinPieces())) d.atomicDataReceived = dlSize - (d.reportedPieceSize * numChunks * uint64(d.erasureCode.MinPieces())) // Assemble the piece set for the download. d.pieceSet = make(map[uint64]map[types.FileContractID]pieceData) for i := range d.finishedChunks { d.pieceSet[i] = make(map[types.FileContractID]pieceData) } f.mu.RLock() for _, contract := range f.contracts { id := r.hostContractor.ResolveID(contract.ID) for i := range contract.Pieces { // Only add pieceSet entries for chunks that are going to be downloaded. m, exists := d.pieceSet[contract.Pieces[i].Chunk] if exists { m[id] = contract.Pieces[i] } } } f.mu.RUnlock() } // Err returns the error encountered by a download, if it exists. func (d *download) Err() error { d.mu.Lock() defer d.mu.Unlock() return d.downloadErr } // fail will mark the download as complete, but with the provided error. func (d *download) fail(err error) { if d.downloadComplete { // Either the download has already succeeded or failed, nothing to do. return } d.downloadComplete = true d.downloadErr = err close(d.downloadFinished) } // recoverChunk takes a chunk that has had a sufficient number of pieces // downloaded and verifies, decrypts and decodes them into the file. func (cd *chunkDownload) recoverChunk() error { // Assemble the chunk from the download. cd.download.mu.Lock() chunk := make([][]byte, cd.download.erasureCode.NumPieces()) for pieceIndex, pieceData := range cd.completedPieces { chunk[pieceIndex] = pieceData } complete := cd.download.downloadComplete prevErr := cd.download.downloadErr cd.download.mu.Unlock() // Return early if the download has previously suffered an error. if complete { return build.ComposeErrors(errPrevErr, prevErr) } // Decrypt the chunk pieces. for i := range chunk { // Skip pieces that were not downloaded. if chunk[i] == nil { continue } // Decrypt the piece. key := deriveKey(cd.download.masterKey, cd.index, uint64(i)) decryptedPiece, err := key.DecryptBytes(chunk[i]) if err != nil { return build.ExtendErr("unable to decrypt piece", err) } chunk[i] = decryptedPiece } // Recover the chunk into a byte slice. recoverWriter := new(bytes.Buffer) recoverSize := cd.download.chunkSize if cd.index == cd.download.numChunks-1 && cd.download.fileSize%cd.download.chunkSize != 0 { recoverSize = cd.download.fileSize % cd.download.chunkSize } err := cd.download.erasureCode.Recover(chunk, recoverSize, recoverWriter) if err != nil { return build.ExtendErr("unable to recover chunk", err) } result := recoverWriter.Bytes() // Calculate the offset. If the offset is within the chunk, the // requested offset is passed, otherwise the offset of the chunk // within the overall file is passed. chunkBaseAddress := cd.index * cd.download.chunkSize chunkTopAddress := chunkBaseAddress + cd.download.chunkSize - 1 off := chunkBaseAddress lowerBound := 0 if cd.download.offset >= chunkBaseAddress && cd.download.offset <= chunkTopAddress { off = cd.download.offset offsetInBlock := off - chunkBaseAddress lowerBound = int(offsetInBlock) // If the offset is within the block, part of the block will be ignored } // Truncate b if writing the whole buffer at the specified offset would // exceed the maximum file size. upperBound := cd.download.chunkSize if chunkTopAddress > cd.download.length+cd.download.offset { diff := chunkTopAddress - (cd.download.length + cd.download.offset) upperBound -= diff + 1 } result = result[lowerBound:upperBound] // Write the bytes to the requested output. _, err = cd.download.destination.WriteAt(result, int64(off)) if err != nil { return build.ExtendErr("unable to write to download destination", err) } cd.download.mu.Lock() defer cd.download.mu.Unlock() // Update the download to signal that this chunk has completed. Only update // after the sync, so that durability is maintained. if cd.download.finishedChunks[cd.index] { build.Critical("recovering chunk when the chunk has already finished downloading") } cd.download.finishedChunks[cd.index] = true // Determine whether the download is complete. nowComplete := true for _, chunkComplete := range cd.download.finishedChunks { if !chunkComplete { nowComplete = false break } } if nowComplete { // Signal that the download is complete. cd.download.downloadComplete = true close(cd.download.downloadFinished) } return nil } // addDownloadToChunkQueue takes a file and adds all incomplete work from the file // to the renter's chunk queue. func (r *Renter) addDownloadToChunkQueue(d *download) { d.mu.Lock() defer d.mu.Unlock() // Skip this file if it has already errored out or has already finished // downloading. if d.downloadComplete { return } // Add the unfinished chunks one at a time. for i, isChunkFinished := range d.finishedChunks { // Skip chunks that have already finished downloading. if isChunkFinished { continue } // Add this chunk to the chunk queue. cd := &chunkDownload{ download: d, index: uint64(i), completedPieces: make(map[uint64][]byte), workerAttempts: make(map[types.FileContractID]bool), } for fcid := range d.pieceSet[i] { cd.workerAttempts[fcid] = false } r.chunkQueue = append(r.chunkQueue, cd) } } // downloadIteration performs one iteration of the download loop. func (r *Renter) managedDownloadIteration(ds *downloadState) { // Check for sleep and break conditions. if len(ds.incompleteChunks) == 0 && len(ds.activeWorkers) == 0 && len(r.chunkQueue) == 0 { // If the above conditions are true, it should also be the case that // the number of active pieces is zero. if ds.activePieces != 0 { r.log.Critical("ERROR: the renter is idle, but tracking", ds.activePieces, "active pieces; resetting to zero") ds.activePieces = 0 } // Nothing to do. Sleep until there is something to do, or until // shutdown. select { case d := <-r.newDownloads: r.addDownloadToChunkQueue(d) case <-r.tg.StopChan(): return } } // Update the set of workers to include everyone in the worker pool. contracts := r.hostContractor.Contracts() id := r.mu.Lock() r.updateWorkerPool(contracts) ds.availableWorkers = make([]*worker, 0, len(r.workerPool)) for _, worker := range r.workerPool { // Ignore workers that are already in the active set of workers. _, exists := ds.activeWorkers[worker.contractID] if exists { continue } // Ignore workers that have a download failure recently. if time.Since(worker.recentDownloadFailure) < downloadFailureCooldown { continue } // TODO: Prune workers that do not provide value. If bandwidth can be // saturated with fewer workers, then the more expensive ones should be // eliminated. ds.availableWorkers = append(ds.availableWorkers, worker) } r.mu.Unlock(id) // Add new chunks to the extent that resources allow. r.managedScheduleNewChunks(ds) // Check for incomplete chunks, and assign workers to them where possible. r.managedScheduleIncompleteChunks(ds) // Wait for workers to return after downloading pieces. r.managedWaitOnDownloadWork(ds) } // managedScheduleIncompleteChunks iterates through all of the incomplete // chunks and finds workers to complete the chunks. // managedScheduleIncompleteChunks also checks wheter a chunk is unable to be // completed. func (r *Renter) managedScheduleIncompleteChunks(ds *downloadState) { var newIncompleteChunks []*chunkDownload loop: for _, incompleteChunk := range ds.incompleteChunks { // Drop this chunk if the file download has failed in any way. incompleteChunk.download.mu.Lock() downloadComplete := incompleteChunk.download.downloadComplete incompleteChunk.download.mu.Unlock() if downloadComplete { // The download has most likely failed. No need to complete this // chunk. ds.activePieces-- // For the current incomplete chunk. ds.activePieces -= len(incompleteChunk.completedPieces) // For all completed pieces. // Clear the set of completed pieces so that we do not // over-subtract if the above code is run multiple times. incompleteChunk.completedPieces = make(map[uint64][]byte) continue } // Try to find a worker that is able to pick up the slack on the // incomplete download from the set of available workers. for i, worker := range ds.availableWorkers { scheduled, exists := incompleteChunk.workerAttempts[worker.contractID] if scheduled || !exists { // Either this worker does not contain a piece of this chunk, // or this worker has already been scheduled to download a // piece for this chunk. continue } piece, exists := incompleteChunk.download.pieceSet[incompleteChunk.index][worker.contractID] if !exists { continue } dw := downloadWork{ dataRoot: piece.MerkleRoot, pieceIndex: piece.Piece, chunkDownload: incompleteChunk, resultChan: ds.resultChan, } incompleteChunk.workerAttempts[worker.contractID] = true ds.availableWorkers = append(ds.availableWorkers[:i], ds.availableWorkers[i+1:]...) ds.activeWorkers[worker.contractID] = struct{}{} select { case worker.priorityDownloadChan <- dw: default: r.log.Critical("Download work not immediately received by worker") } continue loop } // Determine whether any of the workers in the set of active workers is // able to pick up the slack, indicating that the chunk can be // completed just not at this time. for fcid := range ds.activeWorkers { // Check whether a piece exists for this worker. _, exists1 := incompleteChunk.download.pieceSet[incompleteChunk.index][fcid] scheduled, exists2 := incompleteChunk.workerAttempts[fcid] if !scheduled && exists1 && exists2 { // This worker is able to complete the download for this chunk, // but is busy. Keep this chunk until the next iteration of the // download loop. newIncompleteChunks = append(newIncompleteChunks, incompleteChunk) continue loop } } // TODO: Determine whether any of the workers not in the available set // or the active set is able to pick up the slack. Verify that they are // safe to be scheduled, and then schedule them if so. // Cannot find workers to complete this download, fail the download // connected to this chunk. r.log.Println("Not enough workers to finish download:", errInsufficientHosts) incompleteChunk.download.fail(errInsufficientHosts) // Clear out the piece burden for this chunk. ds.activePieces-- // for the current incomplete chunk ds.activePieces -= len(incompleteChunk.completedPieces) // for all completed pieces // Clear the set of completed pieces so that we do not // over-subtract if the above code is run multiple times. incompleteChunk.completedPieces = make(map[uint64][]byte) } ds.incompleteChunks = newIncompleteChunks } // managedScheduleNewChunks uses the set of available workers to schedule new // chunks if there are resources available to begin downloading them. func (r *Renter) managedScheduleNewChunks(ds *downloadState) { // Keep adding chunks until a break condition is hit. for { chunkQueueLen := len(r.chunkQueue) if chunkQueueLen == 0 { // There are no more chunks to initiate, return. return } // View the next chunk. nextChunk := r.chunkQueue[0] // Check whether there are enough resources to perform the download. if ds.activePieces+nextChunk.download.erasureCode.MinPieces() > maxActiveDownloadPieces { // There is a limited amount of RAM available, and scheduling the // next piece would consume too much RAM. return } // Chunk is set to be downloaded. Clear it from the queue. r.chunkQueue = r.chunkQueue[1:] // Check if the download has already completed. If it has, it's because // the download failed. nextChunk.download.mu.Lock() downloadComplete := nextChunk.download.downloadComplete nextChunk.download.mu.Unlock() if downloadComplete { // Download has already failed. continue } // Add an incomplete chunk entry for every piece of the download. for i := 0; i < nextChunk.download.erasureCode.MinPieces(); i++ { ds.incompleteChunks = append(ds.incompleteChunks, nextChunk) } ds.activePieces += nextChunk.download.erasureCode.MinPieces() } } // managedWaitOnDownloadWork will wait for workers to return after attempting to // download a piece. func (r *Renter) managedWaitOnDownloadWork(ds *downloadState) { // If there are no workers performing work, return early. if len(ds.activeWorkers) == 0 { return } // Wait for a piece to return. If a new download arrives while waiting, add // it to the download queue immediately. var finishedDownload finishedDownload select { case <-r.tg.StopChan(): return case d := <-r.newDownloads: r.addDownloadToChunkQueue(d) return case finishedDownload = <-ds.resultChan: } // Prepare the piece. workerID := finishedDownload.workerID delete(ds.activeWorkers, workerID) // Fetch the corresponding worker. id := r.mu.RLock() worker, exists := r.workerPool[workerID] r.mu.RUnlock(id) if !exists { ds.incompleteChunks = append(ds.incompleteChunks, finishedDownload.chunkDownload) return } // Check for an error. cd := finishedDownload.chunkDownload if finishedDownload.err != nil { r.log.Debugln("Error when downloading a piece:", finishedDownload.err) worker.recentDownloadFailure = time.Now() ds.incompleteChunks = append(ds.incompleteChunks, cd) return } // Add this returned piece to the appropriate chunk. if _, ok := cd.completedPieces[finishedDownload.pieceIndex]; ok { r.log.Debugln("Piece", finishedDownload.pieceIndex, "already added") ds.incompleteChunks = append(ds.incompleteChunks, cd) return } cd.completedPieces[finishedDownload.pieceIndex] = finishedDownload.data atomic.AddUint64(&cd.download.atomicDataReceived, cd.download.reportedPieceSize) // If the chunk has completed, perform chunk recovery. if len(cd.completedPieces) == cd.download.erasureCode.MinPieces() { err := cd.recoverChunk() ds.activePieces -= len(cd.completedPieces) cd.completedPieces = make(map[uint64][]byte) if err != nil { r.log.Println("Download failed - could not recover a chunk:", err) cd.download.mu.Lock() cd.download.fail(err) cd.download.mu.Unlock() } } } // threadedDownloadLoop utilizes the worker pool to make progress on any queued // downloads. func (r *Renter) threadedDownloadLoop() { // Compile the set of available workers. id := r.mu.RLock() availableWorkers := make([]*worker, 0, len(r.workerPool)) for _, worker := range r.workerPool { availableWorkers = append(availableWorkers, worker) } r.mu.RUnlock(id) // Create the download state. ds := &downloadState{ activeWorkers: make(map[types.FileContractID]struct{}), availableWorkers: availableWorkers, incompleteChunks: make([]*chunkDownload, 0), resultChan: make(chan finishedDownload), } for { if r.tg.Add() != nil { return } r.managedDownloadIteration(ds) r.tg.Done() } } // DownloadBufferWriter is a buffer-backed implementation of DownloadWriter. type DownloadBufferWriter struct { data []byte offset int64 } // NewDownloadBufferWriter creates a new DownloadWriter that writes to a buffer. func NewDownloadBufferWriter(size uint64, offset int64) *DownloadBufferWriter { return &DownloadBufferWriter{ data: make([]byte, size), offset: offset, } } // Destination implements the Destination method of the DownloadWriter // interface and informs callers where this download writer is // being written to. func (dw *DownloadBufferWriter) Destination() string { return "buffer" } // WriteAt writes the passed bytes to the DownloadBuffer. func (dw *DownloadBufferWriter) WriteAt(bytes []byte, off int64) (int, error) { off -= dw.offset if len(bytes)+int(off) > len(dw.data) || off < 0 { return 0, errors.New("write at specified offset exceeds buffer size") } i := copy(dw.data[off:], bytes) return i, nil } // Bytes returns the underlying byte slice of the // DownloadBufferWriter. func (dw *DownloadBufferWriter) Bytes() []byte { return dw.data } // DownloadFileWriter is a file-backed implementation of DownloadWriter. type DownloadFileWriter struct { f *os.File location string offset uint64 } // NewDownloadFileWriter creates a new instance of a DownloadWriter backed by the file named. func NewDownloadFileWriter(fname string, offset, length uint64) *DownloadFileWriter { l, _ := os.OpenFile(fname, os.O_CREATE|os.O_WRONLY, defaultFilePerm) return &DownloadFileWriter{ f: l, location: fname, offset: offset, } } // Destination implements the Location method of the DownloadWriter interface // and informs callers where this download writer is being written to. func (dw *DownloadFileWriter) Destination() string { return dw.location } // WriteAt writes the passed bytes at the specified offset. func (dw *DownloadFileWriter) WriteAt(b []byte, off int64) (int, error) { return dw.f.WriteAt(b, off-int64(dw.offset)) } // DownloadHttpWriter is a http response writer-backed implementation of // DownloadWriter. The writer writes all content that is written to the // current `offset` directly to the ResponseWriter, and buffers all content // that is written at other offsets. After every write to the ResponseWriter // the `offset` and `length` fields are updated, and buffer content written // until type DownloadHttpWriter struct { w io.Writer offset int // The index in the original file of the last byte written to the response writer. firstByteIndex int // The index of the first byte in the original file. length int // The total size of the slice to be written. buffer map[int][]byte // Buffer used for storing the chunks until download finished. } // NewDownloadHttpWriter creates a new instance of http.ResponseWriter backed DownloadWriter. func NewDownloadHttpWriter(w io.Writer, offset, length uint64) *DownloadHttpWriter { return &DownloadHttpWriter{ w: w, offset: 0, // Current offset in the output file. firstByteIndex: int(offset), // Index of first byte in original file. length: int(length), buffer: make(map[int][]byte), } } // Destination implements the Location method of the DownloadWriter // interface and informs callers where this download writer is // being written to. func (dw *DownloadHttpWriter) Destination() string { return "httpresp" } // WriteAt buffers parts of the file until the entire file can be // flushed to the client. Returns the number of bytes written or an error. func (dw *DownloadHttpWriter) WriteAt(b []byte, off int64) (int, error) { // Write bytes to buffer. offsetInBuffer := int(off) - dw.firstByteIndex dw.buffer[offsetInBuffer] = b // Send all chunks to the client that can be sent. totalDataSent := 0 for { data, exists := dw.buffer[dw.offset] if exists { // Send data to client. dw.w.Write(data) // Remove chunk from map. delete(dw.buffer, dw.offset) // Increment offset to point to the beginning of the next chunk. dw.offset += len(data) totalDataSent += len(data) } else { break } } return totalDataSent, nil } Sia-1.3.0/modules/renter/downloadqueue.go000066400000000000000000000054651313565667000204270ustar00rootroot00000000000000package renter import ( "errors" "fmt" "path/filepath" "sync/atomic" "github.com/NebulousLabs/Sia/modules" ) // Download performs a file download using the passed parameters. func (r *Renter) Download(p modules.RenterDownloadParameters) error { // lookup the file associated with the nickname. lockID := r.mu.RLock() file, exists := r.files[p.Siapath] r.mu.RUnlock(lockID) if !exists { return errors.New(fmt.Sprintf("no file with that path: %s", p.Siapath)) } isHttpResp := p.Httpwriter != nil // validate download parameters if p.Async && isHttpResp { return errors.New("cannot async download to http response") } if isHttpResp && p.Destination != "" { return errors.New("destination cannot be specified when downloading to http response") } if !isHttpResp && p.Destination == "" { return errors.New("destination not supplied") } if p.Destination != "" && !filepath.IsAbs(p.Destination) { return errors.New("destination must be an absolute path") } if p.Offset == file.size { return errors.New("offset equals filesize") } // Instantiate the correct DownloadWriter implementation // (e.g. content written to file or response body). var dw modules.DownloadWriter if isHttpResp { dw = NewDownloadHttpWriter(p.Httpwriter, p.Offset, p.Length) } else { dw = NewDownloadFileWriter(p.Destination, p.Offset, p.Length) } // sentinel: if length == 0, download the entire file if p.Length == 0 { p.Length = file.size - p.Offset } // Check whether offset and length is valid. if p.Offset < 0 || p.Offset+p.Length > file.size { return fmt.Errorf("offset and length combination invalid, max byte is at index %d", file.size-1) } // Create the download object and add it to the queue. d := r.newSectionDownload(file, dw, p.Offset, p.Length) lockID = r.mu.Lock() r.downloadQueue = append(r.downloadQueue, d) r.mu.Unlock(lockID) r.newDownloads <- d // Block until the download has completed. // // TODO: Eventually just return the channel to the error instead of the // error itself. select { case <-d.downloadFinished: return d.Err() case <-r.tg.StopChan(): return errors.New("download interrupted by shutdown") } } // DownloadQueue returns the list of downloads in the queue. func (r *Renter) DownloadQueue() []modules.DownloadInfo { lockID := r.mu.RLock() defer r.mu.RUnlock(lockID) // Order from most recent to least recent. downloads := make([]modules.DownloadInfo, len(r.downloadQueue)) for i := range r.downloadQueue { d := r.downloadQueue[len(r.downloadQueue)-i-1] downloads[i] = modules.DownloadInfo{ SiaPath: d.siapath, Destination: d.destination, Filesize: d.length, StartTime: d.startTime, } downloads[i].Received = atomic.LoadUint64(&d.atomicDataReceived) if err := d.Err(); err != nil { downloads[i].Error = err.Error() } } return downloads } Sia-1.3.0/modules/renter/erasure.go000066400000000000000000000033641313565667000172150ustar00rootroot00000000000000package renter import ( "io" "github.com/klauspost/reedsolomon" "github.com/NebulousLabs/Sia/modules" ) // rsCode is a Reed-Solomon encoder/decoder. It implements the // modules.ErasureCoder interface. type rsCode struct { enc reedsolomon.Encoder numPieces int dataPieces int } // NumPieces returns the number of pieces returned by Encode. func (rs *rsCode) NumPieces() int { return rs.numPieces } // MinPieces return the minimum number of pieces that must be present to // recover the original data. func (rs *rsCode) MinPieces() int { return rs.dataPieces } // Encode splits data into equal-length pieces, some containing the original // data and some containing parity data. func (rs *rsCode) Encode(data []byte) ([][]byte, error) { pieces, err := rs.enc.Split(data) if err != nil { return nil, err } // err should not be possible if Encode is called on the result of Split, // but no harm in checking anyway. err = rs.enc.Encode(pieces) if err != nil { return nil, err } return pieces, nil } // Recover recovers the original data from pieces (including parity) and // writes it to w. pieces should be identical to the slice returned by // Encode (length and order must be preserved), but with missing elements // set to nil. func (rs *rsCode) Recover(pieces [][]byte, n uint64, w io.Writer) error { err := rs.enc.Reconstruct(pieces) if err != nil { return err } return rs.enc.Join(w, pieces, int(n)) } // NewRSCode creates a new Reed-Solomon encoder/decoder using the supplied // parameters. func NewRSCode(nData, nParity int) (modules.ErasureCoder, error) { enc, err := reedsolomon.New(nData, nParity) if err != nil { return nil, err } return &rsCode{ enc: enc, numPieces: nData + nParity, dataPieces: nData, }, nil } Sia-1.3.0/modules/renter/erasure_test.go000066400000000000000000000030321313565667000202440ustar00rootroot00000000000000package renter import ( "bytes" "io/ioutil" "testing" "github.com/NebulousLabs/fastrand" ) // TestRSEncode tests the rsCode type. func TestRSEncode(t *testing.T) { badParams := []struct { data, parity int }{ {-1, -1}, {-1, 0}, {0, -1}, {0, 0}, {0, 1}, {1, 0}, } for _, ps := range badParams { if _, err := NewRSCode(ps.data, ps.parity); err == nil { t.Error("expected bad parameter error, got nil") } } rsc, err := NewRSCode(10, 3) if err != nil { t.Fatal(err) } data := fastrand.Bytes(777) pieces, err := rsc.Encode(data) if err != nil { t.Fatal(err) } _, err = rsc.Encode(nil) if err == nil { t.Fatal("expected nil data error, got nil") } buf := new(bytes.Buffer) err = rsc.Recover(pieces, 777, buf) if err != nil { t.Fatal(err) } err = rsc.Recover(nil, 777, buf) if err == nil { t.Fatal("expected nil pieces error, got nil") } if !bytes.Equal(data, buf.Bytes()) { t.Fatal("recovered data does not match original") } } func BenchmarkRSEncode(b *testing.B) { rsc, err := NewRSCode(80, 20) if err != nil { b.Fatal(err) } data := fastrand.Bytes(1 << 20) b.SetBytes(1 << 20) b.ResetTimer() for i := 0; i < b.N; i++ { rsc.Encode(data) } } func BenchmarkRSRecover(b *testing.B) { rsc, err := NewRSCode(50, 200) if err != nil { b.Fatal(err) } data := fastrand.Bytes(1 << 20) pieces, err := rsc.Encode(data) if err != nil { b.Fatal(err) } b.SetBytes(1 << 20) b.ResetTimer() for i := 0; i < b.N; i++ { pieces[0] = nil rsc.Recover(pieces, 1<<20, ioutil.Discard) } } Sia-1.3.0/modules/renter/files.go000066400000000000000000000201451313565667000166450ustar00rootroot00000000000000package renter import ( "errors" "os" "path/filepath" "sync" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) var ( ErrEmptyFilename = errors.New("filename must be a nonempty string") ErrUnknownPath = errors.New("no file known with that path") ErrPathOverload = errors.New("a file already exists at that location") ) // A file is a single file that has been uploaded to the network. Files are // split into equal-length chunks, which are then erasure-coded into pieces. // Each piece is separately encrypted, using a key derived from the file's // master key. The pieces are uploaded to hosts in groups, such that one file // contract covers many pieces. type file struct { name string size uint64 // Static - can be accessed without lock. contracts map[types.FileContractID]fileContract masterKey crypto.TwofishKey // Static - can be accessed without lock. erasureCode modules.ErasureCoder // Static - can be accessed without lock. pieceSize uint64 // Static - can be accessed without lock. mode uint32 // actually an os.FileMode mu sync.RWMutex } // A fileContract is a contract covering an arbitrary number of file pieces. // Chunk/Piece metadata is used to split the raw contract data appropriately. type fileContract struct { ID types.FileContractID IP modules.NetAddress Pieces []pieceData WindowStart types.BlockHeight } // pieceData contains the metadata necessary to request a piece from a // fetcher. // // TODO: Add an 'Unavailable' flag that can be set if the host loses the piece. // Some TODOs exist in 'repair.go' related to this field. type pieceData struct { Chunk uint64 // which chunk the piece belongs to Piece uint64 // the index of the piece in the chunk MerkleRoot crypto.Hash // the Merkle root of the piece } // deriveKey derives the key used to encrypt and decrypt a specific file piece. func deriveKey(masterKey crypto.TwofishKey, chunkIndex, pieceIndex uint64) crypto.TwofishKey { return crypto.TwofishKey(crypto.HashAll(masterKey, chunkIndex, pieceIndex)) } // chunkSize returns the size of one chunk. func (f *file) chunkSize() uint64 { return f.pieceSize * uint64(f.erasureCode.MinPieces()) } // numChunks returns the number of chunks that f was split into. func (f *file) numChunks() uint64 { // empty files still need at least one chunk if f.size == 0 { return 1 } n := f.size / f.chunkSize() // last chunk will be padded, unless chunkSize divides file evenly. if f.size%f.chunkSize() != 0 { n++ } return n } // available indicates whether the file is ready to be downloaded. func (f *file) available(isOffline func(types.FileContractID) bool) bool { chunkPieces := make([]int, f.numChunks()) for _, fc := range f.contracts { if isOffline(fc.ID) { continue } for _, p := range fc.Pieces { chunkPieces[p.Chunk]++ } } for _, n := range chunkPieces { if n < f.erasureCode.MinPieces() { return false } } return true } // uploadProgress indicates what percentage of the file (plus redundancy) has // been uploaded. Note that a file may be Available long before UploadProgress // reaches 100%, and UploadProgress may report a value greater than 100%. func (f *file) uploadProgress() float64 { var uploaded uint64 for _, fc := range f.contracts { uploaded += uint64(len(fc.Pieces)) * f.pieceSize } desired := f.pieceSize * uint64(f.erasureCode.NumPieces()) * f.numChunks() return 100 * (float64(uploaded) / float64(desired)) } // redundancy returns the redundancy of the least redundant chunk. A file // becomes available when this redundancy is >= 1. Assumes that every piece is // unique within a file contract. -1 is returned if the file has size 0. It // takes one argument, a map of offline contracts for this file. func (f *file) redundancy(isOffline func(types.FileContractID) bool) float64 { if f.size == 0 { return -1 } piecesPerChunk := make([]int, f.numChunks()) // If the file has non-0 size then the number of chunks should also be // non-0. Therefore the f.size == 0 conditional block above must appear // before this check. if len(piecesPerChunk) == 0 { build.Critical("cannot get redundancy of a file with 0 chunks") return -1 } for _, fc := range f.contracts { // do not count pieces from the contract if the contract is offline if isOffline(fc.ID) { continue } for _, p := range fc.Pieces { piecesPerChunk[p.Chunk]++ } } minPieces := piecesPerChunk[0] for _, numPieces := range piecesPerChunk { if numPieces < minPieces { minPieces = numPieces } } return float64(minPieces) / float64(f.erasureCode.MinPieces()) } // expiration returns the lowest height at which any of the file's contracts // will expire. func (f *file) expiration() types.BlockHeight { if len(f.contracts) == 0 { return 0 } lowest := ^types.BlockHeight(0) for _, fc := range f.contracts { if fc.WindowStart < lowest { lowest = fc.WindowStart } } return lowest } // newFile creates a new file object. func newFile(name string, code modules.ErasureCoder, pieceSize, fileSize uint64) *file { return &file{ name: name, size: fileSize, contracts: make(map[types.FileContractID]fileContract), masterKey: crypto.GenerateTwofishKey(), erasureCode: code, pieceSize: pieceSize, } } // DeleteFile removes a file entry from the renter and deletes its data from // the hosts it is stored on. // // TODO: The data is not cleared from any contracts where the host is not // immediately online. func (r *Renter) DeleteFile(nickname string) error { lockID := r.mu.Lock() f, exists := r.files[nickname] if !exists { r.mu.Unlock(lockID) return ErrUnknownPath } delete(r.files, nickname) delete(r.tracking, nickname) err := os.RemoveAll(filepath.Join(r.persistDir, f.name+ShareExtension)) if err != nil { r.log.Println("WARN: couldn't remove .sia file during delete:", err) } r.saveSync() r.mu.Unlock(lockID) // delete the file's associated contract data. f.mu.Lock() defer f.mu.Unlock() // TODO: delete the sectors of the file as well. return nil } // FileList returns all of the files that the renter has. func (r *Renter) FileList() []modules.FileInfo { var files []*file lockID := r.mu.RLock() for _, f := range r.files { files = append(files, f) } r.mu.RUnlock(lockID) isOffline := func(id types.FileContractID) bool { id = r.hostContractor.ResolveID(id) offline := r.hostContractor.IsOffline(id) contract, exists := r.hostContractor.ContractByID(id) if !exists { return true } return offline || !contract.GoodForRenew } var fileList []modules.FileInfo for _, f := range files { f.mu.RLock() renewing := true fileList = append(fileList, modules.FileInfo{ SiaPath: f.name, Filesize: f.size, Renewing: renewing, Available: f.available(isOffline), Redundancy: f.redundancy(isOffline), UploadProgress: f.uploadProgress(), Expiration: f.expiration(), }) f.mu.RUnlock() } return fileList } // RenameFile takes an existing file and changes the nickname. The original // file must exist, and there must not be any file that already has the // replacement nickname. func (r *Renter) RenameFile(currentName, newName string) error { lockID := r.mu.Lock() defer r.mu.Unlock(lockID) // Check that newName is nonempty. if newName == "" { return ErrEmptyFilename } // Check that currentName exists and newName doesn't. file, exists := r.files[currentName] if !exists { return ErrUnknownPath } _, exists = r.files[newName] if exists { return ErrPathOverload } // Modify the file and save it to disk. file.mu.Lock() file.name = newName err := r.saveFile(file) file.mu.Unlock() if err != nil { return err } // Update the entries in the renter. delete(r.files, currentName) r.files[newName] = file if t, ok := r.tracking[currentName]; ok { delete(r.tracking, currentName) r.tracking[newName] = t } err = r.saveSync() if err != nil { return err } // Delete the old .sia file. oldPath := filepath.Join(r.persistDir, currentName+ShareExtension) return os.RemoveAll(oldPath) } Sia-1.3.0/modules/renter/files_test.go000066400000000000000000000245111313565667000177050ustar00rootroot00000000000000package renter import ( "os" "path/filepath" "testing" "github.com/NebulousLabs/Sia/types" ) // TestFileNumChunks checks the numChunks method of the file type. func TestFileNumChunks(t *testing.T) { tests := []struct { size uint64 pieceSize uint64 piecesPerChunk int expNumChunks uint64 }{ {100, 10, 1, 10}, // evenly divides {100, 10, 2, 5}, // evenly divides {101, 10, 1, 11}, // padded {101, 10, 2, 6}, // padded {10, 100, 1, 1}, // larger piece than file {0, 10, 1, 1}, // 0-length } for _, test := range tests { rsc, _ := NewRSCode(test.piecesPerChunk, 1) // can't use 0 f := &file{size: test.size, erasureCode: rsc, pieceSize: test.pieceSize} if f.numChunks() != test.expNumChunks { t.Errorf("Test %v: expected %v, got %v", test, test.expNumChunks, f.numChunks()) } } } // TestFileAvailable probes the available method of the file type. func TestFileAvailable(t *testing.T) { rsc, _ := NewRSCode(1, 10) f := &file{ size: 1000, erasureCode: rsc, pieceSize: 100, } neverOffline := func(types.FileContractID) bool { return false } if f.available(neverOffline) { t.Error("file should not be available") } var fc fileContract for i := uint64(0); i < f.numChunks(); i++ { fc.Pieces = append(fc.Pieces, pieceData{Chunk: i, Piece: 0}) } f.contracts = map[types.FileContractID]fileContract{{}: fc} if !f.available(neverOffline) { t.Error("file should be available") } specificOffline := func(fcid types.FileContractID) bool { return fcid == fc.ID } if f.available(specificOffline) { t.Error("file should not be available") } } // TestFileRedundancy tests that redundancy is correctly calculated for files // with varying number of filecontracts and erasure code settings. func TestFileRedundancy(t *testing.T) { nDatas := []int{1, 2, 10} neverOffline := func(types.FileContractID) bool { return false } for _, nData := range nDatas { rsc, _ := NewRSCode(nData, 10) f := &file{ size: 1000, pieceSize: 100, contracts: make(map[types.FileContractID]fileContract), erasureCode: rsc, } // Test that an empty file has 0 redundancy. if r := f.redundancy(neverOffline); r != 0 { t.Error("expected 0 redundancy, got", r) } // Test that a file with 1 filecontract that has a piece for every chunk but // one chunk still has a redundancy of 0. fc := fileContract{ ID: types.FileContractID{0}, } for i := uint64(0); i < f.numChunks()-1; i++ { pd := pieceData{ Chunk: i, Piece: 0, } fc.Pieces = append(fc.Pieces, pd) } f.contracts[fc.ID] = fc if r := f.redundancy(neverOffline); r != 0 { t.Error("expected 0 redundancy, got", r) } // Test that adding another filecontract with a piece for every chunk but one // chunk still results in a file with redundancy 0. fc = fileContract{ ID: types.FileContractID{1}, } for i := uint64(0); i < f.numChunks()-1; i++ { pd := pieceData{ Chunk: i, Piece: 1, } fc.Pieces = append(fc.Pieces, pd) } f.contracts[fc.ID] = fc if r := f.redundancy(neverOffline); r != 0 { t.Error("expected 0 redundancy, got", r) } // Test that adding a file contract with a piece for the missing chunk // results in a file with redundancy > 0 && <= 1. fc = fileContract{ ID: types.FileContractID{2}, } pd := pieceData{ Chunk: f.numChunks() - 1, Piece: 0, } fc.Pieces = append(fc.Pieces, pd) f.contracts[fc.ID] = fc // 1.0 / MinPieces because the chunk with the least number of pieces has 1 piece. expectedR := 1.0 / float64(f.erasureCode.MinPieces()) if r := f.redundancy(neverOffline); r != expectedR { t.Errorf("expected %f redundancy, got %f", expectedR, r) } // Test that adding a file contract that has erasureCode.MinPieces() pieces // per chunk for all chunks results in a file with redundancy > 1. fc = fileContract{ ID: types.FileContractID{3}, } for iChunk := uint64(0); iChunk < f.numChunks(); iChunk++ { for iPiece := uint64(0); iPiece < uint64(f.erasureCode.MinPieces()); iPiece++ { fc.Pieces = append(fc.Pieces, pieceData{ Chunk: iChunk, Piece: iPiece, }) } } f.contracts[fc.ID] = fc // 1+MinPieces / MinPieces because the chunk with the least number of pieces has 1+MinPieces pieces. expectedR = float64(1+f.erasureCode.MinPieces()) / float64(f.erasureCode.MinPieces()) if r := f.redundancy(neverOffline); r != expectedR { t.Errorf("expected %f redundancy, got %f", expectedR, r) } // verify offline file contracts are not counted in the redundancy fc = fileContract{ ID: types.FileContractID{4}, } for iChunk := uint64(0); iChunk < f.numChunks(); iChunk++ { for iPiece := uint64(0); iPiece < uint64(f.erasureCode.MinPieces()); iPiece++ { fc.Pieces = append(fc.Pieces, pieceData{ Chunk: iChunk, Piece: iPiece, }) } } f.contracts[fc.ID] = fc specificOffline := func(fcid types.FileContractID) bool { return fcid == fc.ID } if r := f.redundancy(specificOffline); r != expectedR { t.Errorf("expected redundancy to ignore offline file contracts, wanted %f got %f", expectedR, r) } } } // TestFileExpiration probes the expiration method of the file type. func TestFileExpiration(t *testing.T) { f := &file{ contracts: make(map[types.FileContractID]fileContract), } if f.expiration() != 0 { t.Error("file with no pieces should report as having no time remaining") } // Add a contract. fc := fileContract{} fc.WindowStart = 100 f.contracts[types.FileContractID{0}] = fc if f.expiration() != 100 { t.Error("file did not report lowest WindowStart") } // Add a contract with a lower WindowStart. fc.WindowStart = 50 f.contracts[types.FileContractID{1}] = fc if f.expiration() != 50 { t.Error("file did not report lowest WindowStart") } // Add a contract with a higher WindowStart. fc.WindowStart = 75 f.contracts[types.FileContractID{2}] = fc if f.expiration() != 50 { t.Error("file did not report lowest WindowStart") } } // TestRenterDeleteFile probes the DeleteFile method of the renter type. func TestRenterDeleteFile(t *testing.T) { if testing.Short() { t.SkipNow() } rt, err := newRenterTester(t.Name()) if err != nil { t.Fatal(err) } defer rt.Close() // Delete a file from an empty renter. err = rt.renter.DeleteFile("dne") if err != ErrUnknownPath { t.Error("Expected ErrUnknownPath:", err) } // Put a file in the renter. rt.renter.files["1"] = &file{ name: "one", } // Delete a different file. err = rt.renter.DeleteFile("one") if err != ErrUnknownPath { t.Error("Expected ErrUnknownPath, got", err) } // Delete the file. err = rt.renter.DeleteFile("1") if err != nil { t.Error(err) } if len(rt.renter.FileList()) != 0 { t.Error("file was deleted, but is still reported in FileList") } // Put a file in the renter, then rename it. f := newTestingFile() f.name = "1" rt.renter.files[f.name] = f rt.renter.RenameFile(f.name, "one") // Call delete on the previous name. err = rt.renter.DeleteFile("1") if err != ErrUnknownPath { t.Error("Expected ErrUnknownPath, got", err) } // Call delete on the new name. err = rt.renter.DeleteFile("one") if err != nil { t.Error(err) } // Check that all .sia files have been deleted. var walkStr string filepath.Walk(rt.renter.persistDir, func(path string, _ os.FileInfo, _ error) error { // capture only .sia files if filepath.Ext(path) == ".sia" { rel, _ := filepath.Rel(rt.renter.persistDir, path) // strip testdir prefix walkStr += rel } return nil }) expWalkStr := "" if walkStr != expWalkStr { t.Fatalf("Bad walk string: expected %q, got %q", expWalkStr, walkStr) } } // TestRenterFileList probes the FileList method of the renter type. func TestRenterFileList(t *testing.T) { if testing.Short() { t.SkipNow() } rt, err := newRenterTester(t.Name()) if err != nil { t.Fatal(err) } defer rt.Close() // Get the file list of an empty renter. if len(rt.renter.FileList()) != 0 { t.Error("FileList has non-zero length for empty renter?") } // Put a file in the renter. rsc, _ := NewRSCode(1, 1) rt.renter.files["1"] = &file{ name: "one", erasureCode: rsc, pieceSize: 1, } if len(rt.renter.FileList()) != 1 { t.Error("FileList is not returning the only file in the renter") } if rt.renter.FileList()[0].SiaPath != "one" { t.Error("FileList is not returning the correct filename for the only file") } // Put multiple files in the renter. rt.renter.files["2"] = &file{ name: "two", erasureCode: rsc, pieceSize: 1, } if len(rt.renter.FileList()) != 2 { t.Error("FileList is not returning both files in the renter") } files := rt.renter.FileList() if !((files[0].SiaPath == "one" || files[0].SiaPath == "two") && (files[1].SiaPath == "one" || files[1].SiaPath == "two") && (files[0].SiaPath != files[1].SiaPath)) { t.Error("FileList is returning wrong names for the files:", files[0].SiaPath, files[1].SiaPath) } } // TestRenterRenameFile probes the rename method of the renter. func TestRenterRenameFile(t *testing.T) { if testing.Short() { t.SkipNow() } rt, err := newRenterTester(t.Name()) if err != nil { t.Fatal(err) } defer rt.Close() // Rename a file that doesn't exist. err = rt.renter.RenameFile("1", "1a") if err != ErrUnknownPath { t.Error("Expecting ErrUnknownPath:", err) } // Rename a file that does exist. f := newTestingFile() f.name = "1" rt.renter.files["1"] = f err = rt.renter.RenameFile("1", "1a") if err != nil { t.Fatal(err) } files := rt.renter.FileList() if len(files) != 1 { t.Fatal("FileList has unexpected number of files:", len(files)) } if files[0].SiaPath != "1a" { t.Errorf("RenameFile failed: expected 1a, got %v", files[0].SiaPath) } // Rename a file to an existing name. f2 := newTestingFile() f2.name = "1" rt.renter.files["1"] = f2 err = rt.renter.RenameFile("1", "1a") if err != ErrPathOverload { t.Error("Expecting ErrPathOverload, got", err) } // Rename a file to the same name. err = rt.renter.RenameFile("1", "1") if err != ErrPathOverload { t.Error("Expecting ErrPathOverload, got", err) } // Renaming should also update the tracking set rt.renter.tracking["1"] = trackedFile{"foo"} err = rt.renter.RenameFile("1", "1b") if err != nil { t.Fatal(err) } _, oldexists := rt.renter.tracking["1"] _, newexists := rt.renter.tracking["1b"] if oldexists || !newexists { t.Error("renaming should have updated the entry in the tracking set") } } Sia-1.3.0/modules/renter/hostdb/000077500000000000000000000000001313565667000164755ustar00rootroot00000000000000Sia-1.3.0/modules/renter/hostdb/consts.go000066400000000000000000000057061313565667000203450ustar00rootroot00000000000000package hostdb import ( "time" "github.com/NebulousLabs/Sia/build" ) const ( // historicInteractionDecay defines the decay of the HistoricSuccessfulInteractions // and HistoricFailedInteractions after every block for a host entry. historicInteractionDecay = 0.9995 // historicInteractionDecalLimit defines the number of historic // interactions required before decay is applied. historicInteractionDecayLimit = 500 // hostRequestTimeout indicates how long a host has to respond to a dial. hostRequestTimeout = 2 * time.Minute // hostScanDeadline indicates how long a host has to complete an entire // scan. hostScanDeadline = 4 * time.Minute // maxHostDowntime specifies the maximum amount of time that a host is // allowed to be offline while still being in the hostdb. maxHostDowntime = 10 * 24 * time.Hour // maxSettingsLen indicates how long in bytes the host settings field is // allowed to be before being ignored as a DoS attempt. maxSettingsLen = 10e3 // minScans specifies the number of scans that a host should have before the // scans start getting compressed. minScans = 12 // recentInteractionWeightLimit caps the number of recent interactions as a // percentage of the historic interactions, to be certain that a large // amount of activity in a short period of time does not overwhelm the // score for a host. // // Non-stop heavy interactions for half a day can result in gaining more // than half the total weight at this limit. recentInteractionWeightLimit = 0.01 // saveFrequency defines how frequently the hostdb will save to disk. Hostdb // will also save immediately prior to shutdown. saveFrequency = 2 * time.Minute ) var ( // hostCheckupQuantity specifies the number of hosts that get scanned every // time there is a regular scanning operation. hostCheckupQuantity = build.Select(build.Var{ Standard: int(200), Dev: int(6), Testing: int(5), }).(int) // scanningThreads is the number of threads that will be probing hosts for // their settings and checking for reliability. scanningThreads = build.Select(build.Var{ Standard: int(20), Dev: int(4), Testing: int(3), }).(int) ) var ( // defaultScanSleep is the amount of time that the hostdb will sleep if it // cannot successfully get a random number. defaultScanSleep = build.Select(build.Var{ Standard: time.Hour + time.Minute*37, Dev: time.Minute * 5, Testing: time.Second * 15, }).(time.Duration) // maxScanSleep is the maximum amount of time that the hostdb will sleep // between performing scans of the hosts. maxScanSleep = build.Select(build.Var{ Standard: time.Hour * 8, Dev: time.Minute * 10, Testing: time.Second * 15, }).(time.Duration) // minScanSleep is the minimum amount of time that the hostdb will sleep // between performing scans of the hosts. minScanSleep = build.Select(build.Var{ Standard: time.Hour + time.Minute*20, Dev: time.Minute * 3, Testing: time.Second * 14, }).(time.Duration) ) Sia-1.3.0/modules/renter/hostdb/dependencies.go000066400000000000000000000022131313565667000214500ustar00rootroot00000000000000package hostdb import ( "net" "time" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/persist" ) // These interfaces define the HostDB's dependencies. Using the smallest // interface possible makes it easier to mock these dependencies in testing. type ( dependencies interface { dialTimeout(modules.NetAddress, time.Duration) (net.Conn, error) disrupt(string) bool loadFile(persist.Metadata, interface{}, string) error saveFileSync(persist.Metadata, interface{}, string) error sleep(time.Duration) } ) type prodDependencies struct{} func (prodDependencies) dialTimeout(addr modules.NetAddress, timeout time.Duration) (net.Conn, error) { return net.DialTimeout("tcp", string(addr), timeout) } func (prodDependencies) disrupt(string) bool { return false } func (prodDependencies) loadFile(meta persist.Metadata, data interface{}, filename string) error { return persist.LoadJSON(meta, data, filename) } func (prodDependencies) saveFileSync(meta persist.Metadata, data interface{}, filename string) error { return persist.SaveJSON(meta, data, filename) } func (prodDependencies) sleep(d time.Duration) { time.Sleep(d) } Sia-1.3.0/modules/renter/hostdb/hostdb.go000066400000000000000000000162161313565667000203150ustar00rootroot00000000000000// Package hostdb provides a HostDB object that implements the renter.hostDB // interface. The blockchain is scanned for host announcements and hosts that // are found get added to the host database. The database continually scans the // set of hosts it has found and updates who is online. package hostdb import ( "errors" "fmt" "os" "path/filepath" "sync" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/modules/renter/hostdb/hosttree" "github.com/NebulousLabs/Sia/persist" siasync "github.com/NebulousLabs/Sia/sync" "github.com/NebulousLabs/Sia/types" ) var ( errNilCS = errors.New("cannot create hostdb with nil consensus set") errNilGateway = errors.New("cannot create hostdb with nil gateway") ) // The HostDB is a database of potential hosts. It assigns a weight to each // host based on their hosting parameters, and then can select hosts at random // for uploading files. type HostDB struct { // dependencies cs modules.ConsensusSet deps dependencies gateway modules.Gateway log *persist.Logger mu sync.RWMutex persistDir string tg siasync.ThreadGroup // The hostTree is the root node of the tree that organizes hosts by // weight. The tree is necessary for selecting weighted hosts at // random. hostTree *hosttree.HostTree // the scanPool is a set of hosts that need to be scanned. There are a // handful of goroutines constantly waiting on the channel for hosts to // scan. The scan map is used to prevent duplicates from entering the scan // pool. scanList []modules.HostDBEntry scanMap map[string]struct{} scanPool chan modules.HostDBEntry scanWait bool online bool blockHeight types.BlockHeight lastChange modules.ConsensusChangeID } // New returns a new HostDB. func New(g modules.Gateway, cs modules.ConsensusSet, persistDir string) (*HostDB, error) { // Check for nil inputs. if g == nil { return nil, errNilGateway } if cs == nil { return nil, errNilCS } // Create HostDB using production dependencies. return newHostDB(g, cs, persistDir, prodDependencies{}) } // newHostDB creates a HostDB using the provided dependencies. It loads the old // persistence data, spawns the HostDB's scanning threads, and subscribes it to // the consensusSet. func newHostDB(g modules.Gateway, cs modules.ConsensusSet, persistDir string, deps dependencies) (*HostDB, error) { // Create the HostDB object. hdb := &HostDB{ cs: cs, deps: deps, gateway: g, persistDir: persistDir, scanMap: make(map[string]struct{}), scanPool: make(chan modules.HostDBEntry), } // Create the persist directory if it does not yet exist. err := os.MkdirAll(persistDir, 0700) if err != nil { return nil, err } // Create the logger. logger, err := persist.NewFileLogger(filepath.Join(persistDir, "hostdb.log")) if err != nil { return nil, err } hdb.log = logger hdb.tg.AfterStop(func() { if err := hdb.log.Close(); err != nil { // Resort to println as the logger is in an uncertain state. fmt.Println("Failed to close the hostdb logger:", err) } }) // The host tree is used to manage hosts and query them at random. hdb.hostTree = hosttree.New(hdb.calculateHostWeight) // Load the prior persistence structures. hdb.mu.Lock() err = hdb.load() hdb.mu.Unlock() if err != nil && !os.IsNotExist(err) { return nil, err } hdb.tg.AfterStop(func() { hdb.mu.Lock() err := hdb.saveSync() hdb.mu.Unlock() if err != nil { hdb.log.Println("Unable to save the hostdb:", err) } }) // Loading is complete, establish the save loop. go hdb.threadedSaveLoop() // Don't perform the remaining startup in the presence of a quitAfterLoad // disruption. if hdb.deps.disrupt("quitAfterLoad") { return hdb, nil } // COMPATv1.1.0 // // If the block height has loaded as zero, the most recent consensus change // needs to be set to perform a full rescan. This will also help the hostdb // to pick up any hosts that it has incorrectly dropped in the past. hdb.mu.Lock() if hdb.blockHeight == 0 { hdb.lastChange = modules.ConsensusChangeBeginning } hdb.mu.Unlock() err = cs.ConsensusSetSubscribe(hdb, hdb.lastChange) if err == modules.ErrInvalidConsensusChangeID { // Subscribe again using the new ID. This will cause a triggered scan // on all of the hosts, but that should be acceptable. hdb.mu.Lock() hdb.blockHeight = 0 hdb.lastChange = modules.ConsensusChangeBeginning hdb.mu.Unlock() err = cs.ConsensusSetSubscribe(hdb, hdb.lastChange) } if err != nil { return nil, errors.New("hostdb subscription failed: " + err.Error()) } hdb.tg.OnStop(func() { cs.Unsubscribe(hdb) }) // Spin up the host scanning processes. if build.Release == "standard" { go hdb.threadedOnlineCheck() } else { // During testing, the hostdb is just always assumed to be online, since // the online check of having nonlocal peers will always fail. hdb.mu.Lock() hdb.online = true hdb.mu.Unlock() } for i := 0; i < scanningThreads; i++ { go hdb.threadedProbeHosts() } // Spawn the scan loop during production, but allow it to be disrupted // during testing. Primary reason is so that we can fill the hostdb with // fake hosts and not have them marked as offline as the scanloop operates. if !hdb.deps.disrupt("disableScanLoop") { go hdb.threadedScan() } return hdb, nil } // ActiveHosts returns a list of hosts that are currently online, sorted by // weight. func (hdb *HostDB) ActiveHosts() (activeHosts []modules.HostDBEntry) { allHosts := hdb.hostTree.All() for _, entry := range allHosts { if len(entry.ScanHistory) == 0 { continue } if !entry.ScanHistory[len(entry.ScanHistory)-1].Success { continue } if !entry.AcceptingContracts { continue } activeHosts = append(activeHosts, entry) } return activeHosts } // AllHosts returns all of the hosts known to the hostdb, including the // inactive ones. func (hdb *HostDB) AllHosts() (allHosts []modules.HostDBEntry) { return hdb.hostTree.All() } // AverageContractPrice returns the average price of a host. func (hdb *HostDB) AverageContractPrice() (totalPrice types.Currency) { sampleSize := 32 hosts := hdb.hostTree.SelectRandom(sampleSize, nil) if len(hosts) == 0 { return totalPrice } for _, host := range hosts { totalPrice = totalPrice.Add(host.ContractPrice) } return totalPrice.Div64(uint64(len(hosts))) } // Close closes the hostdb, terminating its scanning threads func (hdb *HostDB) Close() error { return hdb.tg.Stop() } // Host returns the HostSettings associated with the specified NetAddress. If // no matching host is found, Host returns false. func (hdb *HostDB) Host(spk types.SiaPublicKey) (modules.HostDBEntry, bool) { host, exists := hdb.hostTree.Select(spk) if !exists { return host, exists } hdb.mu.RLock() updateHostHistoricInteractions(&host, hdb.blockHeight) hdb.mu.RUnlock() return host, exists } // RandomHosts implements the HostDB interface's RandomHosts() method. It takes // a number of hosts to return, and a slice of netaddresses to ignore, and // returns a slice of entries. func (hdb *HostDB) RandomHosts(n int, excludeKeys []types.SiaPublicKey) []modules.HostDBEntry { return hdb.hostTree.SelectRandom(n, excludeKeys) } Sia-1.3.0/modules/renter/hostdb/hostdb_test.go000066400000000000000000000403641313565667000213550ustar00rootroot00000000000000package hostdb import ( "io/ioutil" "math" "os" "path/filepath" "testing" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/modules/consensus" "github.com/NebulousLabs/Sia/modules/gateway" "github.com/NebulousLabs/Sia/modules/miner" "github.com/NebulousLabs/Sia/modules/renter/hostdb/hosttree" "github.com/NebulousLabs/Sia/modules/transactionpool" "github.com/NebulousLabs/Sia/modules/wallet" "github.com/NebulousLabs/Sia/persist" "github.com/NebulousLabs/Sia/types" ) // hdbTester contains a hostdb and all dependencies. type hdbTester struct { cs modules.ConsensusSet gateway modules.Gateway miner modules.TestMiner tpool modules.TransactionPool wallet modules.Wallet walletKey crypto.TwofishKey hdb *HostDB persistDir string } // bareHostDB returns a HostDB with its fields initialized, but without any // dependencies or scanning threads. It is only intended for use in unit tests. func bareHostDB() *HostDB { hdb := &HostDB{ log: persist.NewLogger(ioutil.Discard), scanPool: make(chan modules.HostDBEntry), } hdb.hostTree = hosttree.New(hdb.calculateHostWeight) return hdb } // makeHostDBEntry makes a new host entry with a random public key func makeHostDBEntry() modules.HostDBEntry { dbe := modules.HostDBEntry{} _, pk := crypto.GenerateKeyPair() dbe.AcceptingContracts = true dbe.PublicKey = types.Ed25519PublicKey(pk) dbe.ScanHistory = modules.HostDBScans{{ Timestamp: time.Now(), Success: true, }} return dbe } // newHDBTester returns a tester object wrapping a HostDB and some extra // information for testing. func newHDBTester(name string) (*hdbTester, error) { return newHDBTesterDeps(name, prodDependencies{}) } // newHDBTesterDeps returns a tester object wrapping a HostDB and some extra // information for testing, using the provided dependencies for the hostdb. func newHDBTesterDeps(name string, deps dependencies) (*hdbTester, error) { if testing.Short() { panic("should not be calling newHDBTester during short tests") } testDir := build.TempDir("HostDB", name) g, err := gateway.New("localhost:0", false, filepath.Join(testDir, modules.GatewayDir)) if err != nil { return nil, err } cs, err := consensus.New(g, false, filepath.Join(testDir, modules.ConsensusDir)) if err != nil { return nil, err } tp, err := transactionpool.New(cs, g, filepath.Join(testDir, modules.TransactionPoolDir)) if err != nil { return nil, err } w, err := wallet.New(cs, tp, filepath.Join(testDir, modules.WalletDir)) if err != nil { return nil, err } m, err := miner.New(cs, tp, w, filepath.Join(testDir, modules.MinerDir)) if err != nil { return nil, err } hdb, err := newHostDB(g, cs, filepath.Join(testDir, modules.RenterDir), deps) if err != nil { return nil, err } hdbt := &hdbTester{ cs: cs, gateway: g, miner: m, tpool: tp, wallet: w, hdb: hdb, persistDir: testDir, } err = hdbt.initWallet() if err != nil { return nil, err } return hdbt, nil } // initWallet creates a wallet key, then initializes and unlocks the wallet. func (hdbt *hdbTester) initWallet() error { hdbt.walletKey = crypto.GenerateTwofishKey() _, err := hdbt.wallet.Encrypt(hdbt.walletKey) if err != nil { return err } err = hdbt.wallet.Unlock(hdbt.walletKey) if err != nil { return err } return nil } // TestAverageContractPrice tests the AverageContractPrice method, which also depends on the // randomHosts method. func TestAverageContractPrice(t *testing.T) { hdb := bareHostDB() // empty if avg := hdb.AverageContractPrice(); !avg.IsZero() { t.Error("average of empty hostdb should be zero:", avg) } // with one host h1 := makeHostDBEntry() h1.ContractPrice = types.NewCurrency64(100) hdb.hostTree.Insert(h1) if avg := hdb.AverageContractPrice(); avg.Cmp(h1.ContractPrice) != 0 { t.Error("average of one host should be that host's price:", avg) } // with two hosts h2 := makeHostDBEntry() h2.ContractPrice = types.NewCurrency64(300) hdb.hostTree.Insert(h2) if avg := hdb.AverageContractPrice(); avg.Cmp64(200) != 0 { t.Error("average of two hosts should be their sum/2:", avg) } } // TestNew tests the New function. func TestNew(t *testing.T) { if testing.Short() { t.SkipNow() } testDir := build.TempDir("HostDB", t.Name()) g, err := gateway.New("localhost:0", false, filepath.Join(testDir, modules.GatewayDir)) if err != nil { t.Fatal(err) } cs, err := consensus.New(g, false, filepath.Join(testDir, modules.ConsensusDir)) if err != nil { t.Fatal(err) } // Vanilla HDB, nothing should go wrong. hdbName := filepath.Join(testDir, modules.RenterDir) _, err = New(g, cs, hdbName+"1") if err != nil { t.Fatal(err) } // Nil gateway. _, err = New(nil, cs, hdbName+"2") if err != errNilGateway { t.Fatalf("expected %v, got %v", errNilGateway, err) } // Nil consensus set. _, err = New(g, nil, hdbName+"3") if err != errNilCS { t.Fatalf("expected %v, got %v", errNilCS, err) } // Bad persistDir. _, err = New(g, cs, "") if !os.IsNotExist(err) { t.Fatalf("expected invalid directory, got %v", err) } } // quitAfterLoadDeps will quit startup in newHostDB type disableScanLoopDeps struct { prodDependencies } // Send a disrupt signal to the quitAfterLoad codebreak. func (disableScanLoopDeps) disrupt(s string) bool { if s == "disableScanLoop" { return true } return false } // TestRandomHosts tests the hostdb's exported RandomHosts method. func TestRandomHosts(t *testing.T) { if testing.Short() { t.SkipNow() } hdbt, err := newHDBTesterDeps(t.Name(), disableScanLoopDeps{}) if err != nil { t.Fatal(err) } entries := make(map[string]modules.HostDBEntry) nEntries := int(1e3) for i := 0; i < nEntries; i++ { entry := makeHostDBEntry() entries[string(entry.PublicKey.Key)] = entry err := hdbt.hdb.hostTree.Insert(entry) if err != nil { t.Error(err) } } // Check that all hosts can be queried. for i := 0; i < 25; i++ { hosts := hdbt.hdb.RandomHosts(nEntries, nil) if len(hosts) != nEntries { t.Errorf("RandomHosts returned few entries. got %v wanted %v\n", len(hosts), nEntries) } dupCheck := make(map[string]modules.HostDBEntry) for _, host := range hosts { _, exists := entries[string(host.PublicKey.Key)] if !exists { t.Error("hostdb returning host that doesn't exist.") } _, exists = dupCheck[string(host.PublicKey.Key)] if exists { t.Error("RandomHosts returning duplicates") } dupCheck[string(host.PublicKey.Key)] = host } } // Base case, fill out a map exposing hosts from a single RH query. dupCheck1 := make(map[string]modules.HostDBEntry) hosts := hdbt.hdb.RandomHosts(nEntries/2, nil) if len(hosts) != nEntries/2 { t.Fatalf("RandomHosts returned few entries. got %v wanted %v\n", len(hosts), nEntries/2) } for _, host := range hosts { _, exists := entries[string(host.PublicKey.Key)] if !exists { t.Error("hostdb returning host that doesn't exist.") } _, exists = dupCheck1[string(host.PublicKey.Key)] if exists { t.Error("RandomHosts returning duplicates") } dupCheck1[string(host.PublicKey.Key)] = host } // Iterative case. Check that every time you query for random hosts, you // get different responses. for i := 0; i < 10; i++ { dupCheck2 := make(map[string]modules.HostDBEntry) var overlap, disjoint bool hosts = hdbt.hdb.RandomHosts(nEntries/2, nil) if len(hosts) != nEntries/2 { t.Fatalf("RandomHosts returned few entries. got %v wanted %v\n", len(hosts), nEntries/2) } for _, host := range hosts { _, exists := entries[string(host.PublicKey.Key)] if !exists { t.Error("hostdb returning host that doesn't exist.") } _, exists = dupCheck2[string(host.PublicKey.Key)] if exists { t.Error("RandomHosts returning duplicates") } _, exists = dupCheck1[string(host.PublicKey.Key)] if exists { overlap = true } else { disjoint = true } dupCheck2[string(host.PublicKey.Key)] = host } if !overlap || !disjoint { t.Error("Random hosts does not seem to be random") } dupCheck1 = dupCheck2 } // Try exclude list by excluding every host except for the last one, and // doing a random select. for i := 0; i < 25; i++ { hosts := hdbt.hdb.RandomHosts(nEntries, nil) var exclude []types.SiaPublicKey for j := 1; j < len(hosts); j++ { exclude = append(exclude, hosts[j].PublicKey) } rand := hdbt.hdb.RandomHosts(1, exclude) if len(rand) != 1 { t.Fatal("wrong number of hosts returned") } if string(rand[0].PublicKey.Key) != string(hosts[0].PublicKey.Key) { t.Error("exclude list seems to be excluding the wrong hosts.") } // Try again but request more hosts than are available. rand = hdbt.hdb.RandomHosts(5, exclude) if len(rand) != 1 { t.Fatal("wrong number of hosts returned") } if string(rand[0].PublicKey.Key) != string(hosts[0].PublicKey.Key) { t.Error("exclude list seems to be excluding the wrong hosts.") } // Create an include map, and decrease the number of excluded hosts. // Make sure all hosts returned by rand function are in the include // map. includeMap := make(map[string]struct{}) for j := 0; j < 50; j++ { includeMap[string(hosts[j].PublicKey.Key)] = struct{}{} } exclude = exclude[49:] // Select only 20 hosts. dupCheck := make(map[string]struct{}) rand = hdbt.hdb.RandomHosts(20, exclude) if len(rand) != 20 { t.Error("random hosts is returning the wrong number of hosts") } for _, host := range rand { _, exists := dupCheck[string(host.PublicKey.Key)] if exists { t.Error("RandomHosts is seleccting duplicates") } dupCheck[string(host.PublicKey.Key)] = struct{}{} _, exists = includeMap[string(host.PublicKey.Key)] if !exists { t.Error("RandomHosts returning excluded hosts") } } // Select exactly 50 hosts. dupCheck = make(map[string]struct{}) rand = hdbt.hdb.RandomHosts(50, exclude) if len(rand) != 50 { t.Error("random hosts is returning the wrong number of hosts") } for _, host := range rand { _, exists := dupCheck[string(host.PublicKey.Key)] if exists { t.Error("RandomHosts is seleccting duplicates") } dupCheck[string(host.PublicKey.Key)] = struct{}{} _, exists = includeMap[string(host.PublicKey.Key)] if !exists { t.Error("RandomHosts returning excluded hosts") } } // Select 100 hosts. dupCheck = make(map[string]struct{}) rand = hdbt.hdb.RandomHosts(100, exclude) if len(rand) != 50 { t.Error("random hosts is returning the wrong number of hosts") } for _, host := range rand { _, exists := dupCheck[string(host.PublicKey.Key)] if exists { t.Error("RandomHosts is seleccting duplicates") } dupCheck[string(host.PublicKey.Key)] = struct{}{} _, exists = includeMap[string(host.PublicKey.Key)] if !exists { t.Error("RandomHosts returning excluded hosts") } } } } // TestRemoveNonexistingHostFromHostTree checks that the host tree interface // correctly responds to having a nonexisting host removed from the host tree. func TestRemoveNonexistingHostFromHostTree(t *testing.T) { if testing.Short() { t.SkipNow() } hdbt, err := newHDBTester(t.Name()) if err != nil { t.Fatal(err) } // Remove a host that doesn't exist from the tree. err = hdbt.hdb.hostTree.Remove(types.SiaPublicKey{}) if err == nil { t.Fatal("There should be an error, but not a panic:", err) } } // TestUpdateHistoricInteractions is a simple check to ensure that incrementing // the recent and historic host interactions works func TestUpdateHistoricInteractions(t *testing.T) { if testing.Short() { t.SkipNow() } // create a HostDB tester without scanloop to be able to manually increment // the interactions without interference. hdbt, err := newHDBTesterDeps(t.Name(), disableScanLoopDeps{}) if err != nil { t.Fatal(err) } // create a HostDBEntry and add it to the tree host := makeHostDBEntry() err = hdbt.hdb.hostTree.Insert(host) if err != nil { t.Error(err) } // increment successful and failed interactions by 100 interactions := 100.0 for i := 0.0; i < interactions; i++ { hdbt.hdb.IncrementSuccessfulInteractions(host.PublicKey) hdbt.hdb.IncrementFailedInteractions(host.PublicKey) } // get updated host from hostdb host, ok := hdbt.hdb.Host(host.PublicKey) if !ok { t.Fatal("Modified host not found in hostdb") } // check that recent interactions are exactly 100 and historic interactions are 0 if host.RecentFailedInteractions != interactions || host.RecentSuccessfulInteractions != interactions { t.Errorf("Interactions should be %v but were %v and %v", interactions, host.RecentFailedInteractions, host.RecentSuccessfulInteractions) } if host.HistoricFailedInteractions != 0 || host.HistoricSuccessfulInteractions != 0 { t.Errorf("Historic Interactions should be %v but were %v and %v", 0, host.HistoricFailedInteractions, host.HistoricSuccessfulInteractions) } // add single block to consensus _, err = hdbt.miner.AddBlock() if err != nil { t.Fatal(err) } // increment interactions again by 100 for i := 0.0; i < interactions; i++ { hdbt.hdb.IncrementSuccessfulInteractions(host.PublicKey) hdbt.hdb.IncrementFailedInteractions(host.PublicKey) } // get updated host from hostdb host, ok = hdbt.hdb.Host(host.PublicKey) if !ok { t.Fatal("Modified host not found in hostdb") } // historic actions should have incremented slightly, due to the clamp the // full interactions should not have made it into the historic group. if host.RecentFailedInteractions != interactions || host.RecentSuccessfulInteractions != interactions { t.Errorf("Interactions should be %v but were %v and %v", interactions, host.RecentFailedInteractions, host.RecentSuccessfulInteractions) } if host.HistoricFailedInteractions == 0 || host.HistoricSuccessfulInteractions == 0 { t.Error("historic actions should have updated") } // add 200 blocks to consensus, adding large numbers of historic actions // each time, so that the clamp does not need to be in effect anymore. for i := 0; i < 200; i++ { for j := uint64(0); j < 10; j++ { hdbt.hdb.IncrementSuccessfulInteractions(host.PublicKey) hdbt.hdb.IncrementFailedInteractions(host.PublicKey) } _, err = hdbt.miner.AddBlock() if err != nil { t.Fatal(err) } } // Add five interactions for i := 0; i < 5; i++ { hdbt.hdb.IncrementSuccessfulInteractions(host.PublicKey) hdbt.hdb.IncrementFailedInteractions(host.PublicKey) } // get updated host from hostdb host, ok = hdbt.hdb.Host(host.PublicKey) if !ok { t.Fatal("Modified host not found in hostdb") } // check that recent interactions are exactly 5. Save the historic actions // to check that decay is being handled correctly, and that the recent // interactions are moved over correctly. if host.RecentFailedInteractions != 5 || host.RecentSuccessfulInteractions != 5 { t.Errorf("Interactions should be %v but were %v and %v", interactions, host.RecentFailedInteractions, host.RecentSuccessfulInteractions) } historicFailed := host.HistoricFailedInteractions if host.HistoricFailedInteractions != host.HistoricSuccessfulInteractions { t.Error("historic failed and successful should have the same values") } // Add a single block to apply one round of decay. _, err = hdbt.miner.AddBlock() if err != nil { t.Fatal(err) } host, ok = hdbt.hdb.Host(host.PublicKey) if !ok { t.Fatal("Modified host not found in hostdb") } // Get the historic successful and failed interactions, and see that they // are decaying properly. expected := historicFailed*math.Pow(historicInteractionDecay, 1) + 5 if host.HistoricFailedInteractions != expected || host.HistoricSuccessfulInteractions != expected { t.Errorf("Historic Interactions should be %v but were %v and %v", expected, host.HistoricFailedInteractions, host.HistoricSuccessfulInteractions) } // Add 10 more blocks and check the decay again, make sure it's being // applied correctly. for i := 0; i < 10; i++ { _, err := hdbt.miner.AddBlock() if err != nil { t.Fatal(err) } } host, ok = hdbt.hdb.Host(host.PublicKey) if !ok { t.Fatal("Modified host not found in hostdb") } expected = expected * math.Pow(historicInteractionDecay, 10) if host.HistoricFailedInteractions != expected || host.HistoricSuccessfulInteractions != expected { t.Errorf("Historic Interactions should be %v but were %v and %v", expected, host.HistoricFailedInteractions, host.HistoricSuccessfulInteractions) } } Sia-1.3.0/modules/renter/hostdb/hostentry.go000066400000000000000000000072341313565667000210710ustar00rootroot00000000000000package hostdb import ( "math" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) // updateHostDBEntry updates a HostDBEntries's historic interactions if more // than one block passed since the last update. This should be called every time // before the recent interactions are updated. if passedTime is e.g. 10, this // means that the recent interactions were updated 10 blocks ago but never // since. So we need to apply the decay of 1 block before we append the recent // interactions from 10 blocks ago and then apply the decay of 9 more blocks in // which the recent interactions have been 0 func updateHostHistoricInteractions(host *modules.HostDBEntry, bh types.BlockHeight) { // Check that the last historic update was not in the future. if host.LastHistoricUpdate >= bh { // The hostdb may be performing a rescan, or maybe no time has passed // since the last update, so there is nothing to do. return } passedTime := bh - host.LastHistoricUpdate // tmp float64 values for more accurate decay hsi := host.HistoricSuccessfulInteractions hfi := host.HistoricFailedInteractions // Apply the decay of a single block. decay := historicInteractionDecay hsi *= decay hfi *= decay // Apply the recent interactions of that single block. Recent interactions // cannot represent more than recentInteractionWeightLimit of historic // interactions, unless there are less than historicInteractionDecayLimit // total interactions, and then the recent interactions cannot count for // more than recentInteractionWeightLimit of the decay limit. rsi := float64(host.RecentSuccessfulInteractions) rfi := float64(host.RecentFailedInteractions) if hsi+hfi > historicInteractionDecayLimit { if rsi+rfi > recentInteractionWeightLimit*(hsi+hfi) { adjustment := recentInteractionWeightLimit * (hsi + hfi) / (rsi + rfi) rsi *= adjustment rfi *= adjustment } } else { if rsi+rfi > recentInteractionWeightLimit*historicInteractionDecayLimit { adjustment := recentInteractionWeightLimit * historicInteractionDecayLimit / (rsi + rfi) rsi *= adjustment rfi *= adjustment } } hsi += rsi hfi += rfi // Apply the decay of the rest of the blocks if passedTime > 1 && hsi+hfi > historicInteractionDecayLimit { decay := math.Pow(historicInteractionDecay, float64(passedTime-1)) hsi *= decay hfi *= decay } // Set new values host.HistoricSuccessfulInteractions = hsi host.HistoricFailedInteractions = hfi host.RecentSuccessfulInteractions = 0 host.RecentFailedInteractions = 0 // Update the time of the last update host.LastHistoricUpdate = bh } // IncrementSuccessfulInteractions increments the number of successful // interactions with a host for a given key func (hdb *HostDB) IncrementSuccessfulInteractions(key types.SiaPublicKey) { hdb.mu.Lock() defer hdb.mu.Unlock() // Fetch the host. host, haveHost := hdb.hostTree.Select(key) if !haveHost { return } // Update historic values if necessary updateHostHistoricInteractions(&host, hdb.blockHeight) // Increment the successful interactions host.RecentSuccessfulInteractions++ hdb.hostTree.Modify(host) } // IncrementFailedInteractions increments the number of failed interactions with // a host for a given key func (hdb *HostDB) IncrementFailedInteractions(key types.SiaPublicKey) { hdb.mu.Lock() defer hdb.mu.Unlock() // Fetch the host. host, haveHost := hdb.hostTree.Select(key) if !haveHost || !hdb.online { // If we are offline it probably wasn't the host's fault return } // Update historic values if necessary updateHostHistoricInteractions(&host, hdb.blockHeight) // Increment the failed interactions host.RecentFailedInteractions++ hdb.hostTree.Modify(host) } Sia-1.3.0/modules/renter/hostdb/hosttree/000077500000000000000000000000001313565667000203325ustar00rootroot00000000000000Sia-1.3.0/modules/renter/hostdb/hosttree/hosttree.go000066400000000000000000000210321313565667000225140ustar00rootroot00000000000000package hosttree import ( "errors" "sort" "sync" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/fastrand" ) var ( // errWeightTooHeavy is returned from a SelectRandom() call if a weight that exceeds // the total weight of the tree is requested. errWeightTooHeavy = errors.New("requested a too-heavy weight") // errNegativeWeight is returned from an Insert() call if an entry with a // negative weight is added to the tree. Entries must always have a positive // weight. errNegativeWeight = errors.New("cannot insert using a negative weight") // errNilEntry is returned if a fetch call results in a nil tree entry. nodes // should always have a non-nil entry, unless they have been Delete()ed. errNilEntry = errors.New("node has a nil entry") // errHostExists is returned if an Insert is called with a public key that // already exists in the tree. errHostExists = errors.New("host already exists in the tree") // errNoSuchHost is returned if Remove is called with a public key that does // not exist in the tree. errNoSuchHost = errors.New("no host with specified public key") ) type ( // WeightFunc is a function used to weight a given HostDBEntry in the tree. WeightFunc func(modules.HostDBEntry) types.Currency // HostTree is used to store and select host database entries. Each HostTree // is initialized with a weighting func that is able to assign a weight to // each entry. The entries can then be selected at random, weighted by the // weight func. HostTree struct { root *node // hosts is a map of public keys to nodes. hosts map[string]*node // weightFn calculates the weight of a hostEntry weightFn WeightFunc mu sync.Mutex } // hostEntry is an entry in the host tree. hostEntry struct { modules.HostDBEntry weight types.Currency } // node is a node in the tree. node struct { parent *node left *node right *node count int // cumulative count of this node and all children taken bool // `taken` indicates whether there is an active host at this node or not. weight types.Currency entry *hostEntry } ) // createNode creates a new node using the provided `parent` and `entry`. func createNode(parent *node, entry *hostEntry) *node { return &node{ parent: parent, weight: entry.weight, count: 1, taken: true, entry: entry, } } // New creates a new, empty, HostTree. It takes one argument, a `WeightFunc`, // which is used to determine the weight of a node on Insert. func New(wf WeightFunc) *HostTree { return &HostTree{ root: &node{ count: 1, }, weightFn: wf, hosts: make(map[string]*node), } } // recursiveInsert inserts an entry into the appropriate place in the tree. The // running time of recursiveInsert is log(n) in the maximum number of elements // that have ever been in the tree. func (n *node) recursiveInsert(entry *hostEntry) (nodesAdded int, newnode *node) { // If there is no parent and no children, and the node is not taken, assign // this entry to this node. if n.parent == nil && n.left == nil && n.right == nil && !n.taken { n.entry = entry n.taken = true n.weight = entry.weight newnode = n return } n.weight = n.weight.Add(entry.weight) // If the current node is empty, add the entry but don't increase the // count. if !n.taken { n.taken = true n.entry = entry newnode = n return } // Insert the element into the lest populated side. if n.left == nil { n.left = createNode(n, entry) nodesAdded = 1 newnode = n.left } else if n.right == nil { n.right = createNode(n, entry) nodesAdded = 1 newnode = n.right } else if n.left.count <= n.right.count { nodesAdded, newnode = n.left.recursiveInsert(entry) } else { nodesAdded, newnode = n.right.recursiveInsert(entry) } n.count += nodesAdded return } // nodeAtWeight grabs an element in the tree that appears at the given weight. // Though the tree has an arbitrary sorting, a sufficiently random weight will // pull a random element. The tree is searched through in a post-ordered way. func (n *node) nodeAtWeight(weight types.Currency) *node { // Sanity check - weight must be less than the total weight of the tree. if weight.Cmp(n.weight) > 0 { build.Critical("Node weight corruption") return nil } // Check if the left or right child should be returned. if n.left != nil { if weight.Cmp(n.left.weight) < 0 { return n.left.nodeAtWeight(weight) } weight = weight.Sub(n.left.weight) // Search from the 0th index of the right side. } if n.right != nil && weight.Cmp(n.right.weight) < 0 { return n.right.nodeAtWeight(weight) } // Should we panic here instead? if !n.taken { build.Critical("Node tree structure corruption") return nil } // Return the root entry. return n } // remove takes a node and removes it from the tree by climbing through the // list of parents. remove does not delete nodes. func (n *node) remove() { n.weight = n.weight.Sub(n.entry.weight) n.taken = false current := n.parent for current != nil { current.weight = current.weight.Sub(n.entry.weight) current = current.parent } } // All returns all of the hosts in the host tree, sorted by weight. func (ht *HostTree) All() []modules.HostDBEntry { ht.mu.Lock() defer ht.mu.Unlock() var he []hostEntry for _, node := range ht.hosts { he = append(he, *node.entry) } sort.Sort(byWeight(he)) var entries []modules.HostDBEntry for _, entry := range he { entries = append(entries, entry.HostDBEntry) } return entries } // Insert inserts the entry provided to `entry` into the host tree. Insert will // return an error if the input host already exists. func (ht *HostTree) Insert(hdbe modules.HostDBEntry) error { ht.mu.Lock() defer ht.mu.Unlock() entry := &hostEntry{ HostDBEntry: hdbe, weight: ht.weightFn(hdbe), } if _, exists := ht.hosts[string(entry.PublicKey.Key)]; exists { return errHostExists } _, node := ht.root.recursiveInsert(entry) ht.hosts[string(entry.PublicKey.Key)] = node return nil } // Remove removes the host with the public key provided by `pk`. func (ht *HostTree) Remove(pk types.SiaPublicKey) error { ht.mu.Lock() defer ht.mu.Unlock() node, exists := ht.hosts[string(pk.Key)] if !exists { return errNoSuchHost } node.remove() delete(ht.hosts, string(pk.Key)) return nil } // Modify updates a host entry at the given public key, replacing the old entry // with the entry provided by `newEntry`. func (ht *HostTree) Modify(hdbe modules.HostDBEntry) error { ht.mu.Lock() defer ht.mu.Unlock() node, exists := ht.hosts[string(hdbe.PublicKey.Key)] if !exists { return errNoSuchHost } node.remove() entry := &hostEntry{ HostDBEntry: hdbe, weight: ht.weightFn(hdbe), } _, node = ht.root.recursiveInsert(entry) ht.hosts[string(entry.PublicKey.Key)] = node return nil } // Select returns the host with the provided public key, should the host exist. func (ht *HostTree) Select(spk types.SiaPublicKey) (modules.HostDBEntry, bool) { ht.mu.Lock() defer ht.mu.Unlock() node, exists := ht.hosts[string(spk.Key)] if !exists { return modules.HostDBEntry{}, false } return node.entry.HostDBEntry, true } // SelectRandom grabs a random n hosts from the tree. There will be no repeats, but // the length of the slice returned may be less than n, and may even be zero. // The hosts that are returned first have the higher priority. Hosts passed to // 'ignore' will not be considered; pass `nil` if no blacklist is desired. func (ht *HostTree) SelectRandom(n int, ignore []types.SiaPublicKey) []modules.HostDBEntry { ht.mu.Lock() defer ht.mu.Unlock() var hosts []modules.HostDBEntry var removedEntries []*hostEntry for _, pubkey := range ignore { node, exists := ht.hosts[string(pubkey.Key)] if !exists { continue } node.remove() delete(ht.hosts, string(pubkey.Key)) removedEntries = append(removedEntries, node.entry) } for len(hosts) < n && len(ht.hosts) > 0 { randWeight := fastrand.BigIntn(ht.root.weight.Big()) node := ht.root.nodeAtWeight(types.NewCurrency(randWeight)) if node.entry.AcceptingContracts && len(node.entry.ScanHistory) > 0 && node.entry.ScanHistory[len(node.entry.ScanHistory)-1].Success { // The host must be online and accepting contracts to be returned // by the random function. hosts = append(hosts, node.entry.HostDBEntry) } removedEntries = append(removedEntries, node.entry) node.remove() delete(ht.hosts, string(node.entry.PublicKey.Key)) } for _, entry := range removedEntries { _, node := ht.root.recursiveInsert(entry) ht.hosts[string(entry.PublicKey.Key)] = node } return hosts } Sia-1.3.0/modules/renter/hostdb/hosttree/hosttree_test.go000066400000000000000000000276321313565667000235670ustar00rootroot00000000000000package hosttree import ( "errors" "fmt" "strconv" "sync" "testing" "time" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" siasync "github.com/NebulousLabs/Sia/sync" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/fastrand" ) func verifyTree(tree *HostTree, nentries int) error { expectedWeight := tree.root.entry.weight.Mul64(uint64(nentries)) if tree.root.weight.Cmp(expectedWeight) != 0 { return fmt.Errorf("expected weight is incorrect: got %v wanted %v\n", tree.root.weight, expectedWeight) } // Check that the length of activeHosts and the count of hostTree are // consistent. if len(tree.hosts) != nentries { return fmt.Errorf("unexpected number of hosts: got %v wanted %v\n", len(tree.hosts), nentries) } // Select many random hosts and do naive statistical analysis on the // results. if !testing.Short() { // Pull a bunch of random hosts and count how many times we pull each // host. selectionMap := make(map[string]int) expected := 100 for i := 0; i < expected*nentries; i++ { entries := tree.SelectRandom(1, nil) if len(entries) == 0 { return errors.New("no hosts") } selectionMap[string(entries[0].PublicKey.Key)]++ } // See if each host was selected enough times. errorBound := 64 // Pretty large, but will still detect if something is seriously wrong. for _, count := range selectionMap { if count < expected-errorBound || count > expected+errorBound { return errors.New("error bound was breached") } } } // Try removing an re-adding all hosts. var removedEntries []*hostEntry for { if tree.root.weight.IsZero() { break } randWeight := fastrand.BigIntn(tree.root.weight.Big()) node := tree.root.nodeAtWeight(types.NewCurrency(randWeight)) node.remove() delete(tree.hosts, string(node.entry.PublicKey.Key)) // remove the entry from the hostdb so it won't be selected as a // repeat removedEntries = append(removedEntries, node.entry) } for _, entry := range removedEntries { tree.Insert(entry.HostDBEntry) } return nil } // makeHostDBEntry makes a new host entry with a random public key and the weight // provided to `weight`. func makeHostDBEntry() modules.HostDBEntry { dbe := modules.HostDBEntry{} _, pk := crypto.GenerateKeyPair() dbe.AcceptingContracts = true dbe.PublicKey = types.Ed25519PublicKey(pk) dbe.ScanHistory = modules.HostDBScans{{ Timestamp: time.Now(), Success: true, }} return dbe } func TestHostTree(t *testing.T) { tree := New(func(hdbe modules.HostDBEntry) types.Currency { return types.NewCurrency64(20) }) // Create a bunch of host entries of equal weight. firstInsertions := 64 var keys []types.SiaPublicKey for i := 0; i < firstInsertions; i++ { entry := makeHostDBEntry() keys = append(keys, entry.PublicKey) err := tree.Insert(entry) if err != nil { t.Fatal(err) } } err := verifyTree(tree, firstInsertions) if err != nil { t.Error(err) } var removed []types.SiaPublicKey // Randomly remove hosts from the tree and check that it is still in order. for _, key := range keys { if fastrand.Intn(1) == 0 { err := tree.Remove(key) if err != nil { t.Fatal(err) } removed = append(removed, key) } } err = verifyTree(tree, firstInsertions-len(removed)) if err != nil { t.Error(err) } // Do some more insertions. secondInsertions := 64 for i := firstInsertions; i < firstInsertions+secondInsertions; i++ { entry := makeHostDBEntry() tree.Insert(entry) } err = verifyTree(tree, firstInsertions-len(removed)+secondInsertions) if err != nil { t.Error(err) } } // Verify that inserting, fetching, deleting, and modifying in parallel from // the hosttree does not cause inconsistency. func TestHostTreeParallel(t *testing.T) { if testing.Short() { t.SkipNow() } tree := New(func(dbe modules.HostDBEntry) types.Currency { return types.NewCurrency64(10) }) // spin up 100 goroutines all randomly inserting, removing, modifying, and // fetching nodes from the tree. var tg siasync.ThreadGroup nthreads := 100 nelements := 0 var mu sync.Mutex for i := 0; i < nthreads; i++ { go func() { tg.Add() defer tg.Done() inserted := make(map[string]modules.HostDBEntry) randEntry := func() *modules.HostDBEntry { for _, entry := range inserted { return &entry } return nil } for { select { case <-tg.StopChan(): return default: switch fastrand.Intn(4) { // INSERT case 0: entry := makeHostDBEntry() err := tree.Insert(entry) if err != nil { t.Error(err) } inserted[string(entry.PublicKey.Key)] = entry mu.Lock() nelements++ mu.Unlock() // REMOVE case 1: entry := randEntry() if entry == nil { continue } err := tree.Remove(entry.PublicKey) if err != nil { t.Error(err) } delete(inserted, string(entry.PublicKey.Key)) mu.Lock() nelements-- mu.Unlock() // MODIFY case 2: entry := randEntry() if entry == nil { continue } newentry := makeHostDBEntry() newentry.PublicKey = entry.PublicKey newentry.NetAddress = "127.0.0.1:31337" err := tree.Modify(newentry) if err != nil { t.Error(err) } inserted[string(entry.PublicKey.Key)] = newentry // FETCH case 3: tree.SelectRandom(3, nil) } } } }() } // let these goroutines operate on the tree for 5 seconds time.Sleep(time.Second * 5) // stop the goroutines tg.Stop() // verify the consistency of the tree err := verifyTree(tree, int(nelements)) if err != nil { t.Fatal(err) } } func TestHostTreeModify(t *testing.T) { tree := New(func(dbe modules.HostDBEntry) types.Currency { return types.NewCurrency64(10) }) treeSize := 100 var keys []types.SiaPublicKey for i := 0; i < treeSize; i++ { entry := makeHostDBEntry() keys = append(keys, entry.PublicKey) err := tree.Insert(entry) if err != nil { t.Fatal(err) } } // should fail with a nonexistent key err := tree.Modify(modules.HostDBEntry{}) if err != errNoSuchHost { t.Fatalf("modify should fail with ErrNoSuchHost when provided a nonexistent public key. Got error: %v\n", err) } targetKey := keys[fastrand.Intn(treeSize)] oldEntry := tree.hosts[string(targetKey.Key)].entry newEntry := makeHostDBEntry() newEntry.AcceptingContracts = false newEntry.PublicKey = oldEntry.PublicKey err = tree.Modify(newEntry) if err != nil { t.Fatal(err) } if tree.hosts[string(targetKey.Key)].entry.AcceptingContracts { t.Fatal("modify did not update host entry") } } // TestVariedWeights runs broad statistical tests on selecting hosts with // multiple different weights. func TestVariedWeights(t *testing.T) { if testing.Short() { t.SkipNow() } // insert i hosts with the weights 0, 1, ..., i-1. 100e3 selections will be made // per weight added to the tree, the total number of selections necessary // will be tallied up as hosts are created. i := 0 tree := New(func(dbe modules.HostDBEntry) types.Currency { return types.NewCurrency64(uint64(i)) }) hostCount := 5 expectedPerWeight := int(10e3) selections := 0 for i = 0; i < hostCount; i++ { entry := makeHostDBEntry() tree.Insert(entry) selections += i * expectedPerWeight } // Perform many random selections, noting which host was selected each // time. selectionMap := make(map[string]int) for i := 0; i < selections; i++ { randEntry := tree.SelectRandom(1, nil) if len(randEntry) == 0 { t.Fatal("no hosts!") } node, exists := tree.hosts[string(randEntry[0].PublicKey.Key)] if !exists { t.Fatal("can't find randomly selected node in tree") } selectionMap[node.entry.weight.String()]++ } // Check that each host was selected an expected number of times. An error // will be reported if the host of 0 weight is ever selected. acceptableError := 0.2 for weight, timesSelected := range selectionMap { intWeight, err := strconv.Atoi(weight) if err != nil { t.Fatal(err) } expectedSelected := float64(intWeight * expectedPerWeight) if float64(expectedSelected)*acceptableError > float64(timesSelected) || float64(expectedSelected)/acceptableError < float64(timesSelected) { t.Error("weighted list not selecting in a uniform distribution based on weight") t.Error(expectedSelected) t.Error(timesSelected) } } } // TestRepeatInsert inserts 2 hosts with the same public key. func TestRepeatInsert(t *testing.T) { if testing.Short() { t.SkipNow() } tree := New(func(dbe modules.HostDBEntry) types.Currency { return types.NewCurrency64(10) }) entry1 := makeHostDBEntry() entry2 := entry1 tree.Insert(entry1) tree.Insert(entry2) if len(tree.hosts) != 1 { t.Error("insterting the same entry twice should result in only 1 entry") } } // TestNodeAtWeight tests the nodeAtWeight method. func TestNodeAtWeight(t *testing.T) { weight := types.NewCurrency64(10) // create hostTree tree := New(func(dbe modules.HostDBEntry) types.Currency { return weight }) entry := makeHostDBEntry() err := tree.Insert(entry) if err != nil { t.Fatal(err) } h := tree.root.nodeAtWeight(weight) if string(h.entry.HostDBEntry.PublicKey.Key) != string(entry.PublicKey.Key) { t.Errorf("nodeAtWeight returned wrong node: expected %v, got %v", entry, h.entry) } } // TestRandomHosts probes the SelectRandom method. func TestRandomHosts(t *testing.T) { calls := 0 // Create the tree. tree := New(func(dbe modules.HostDBEntry) types.Currency { calls++ return types.NewCurrency64(uint64(calls)) }) // Empty. hosts := tree.SelectRandom(1, nil) if len(hosts) != 0 { t.Errorf("empty hostdb returns %v hosts: %v", len(hosts), hosts) } // Insert 3 hosts to be selected. entry1 := makeHostDBEntry() entry2 := makeHostDBEntry() entry3 := makeHostDBEntry() if err := tree.Insert(entry1); err != nil { t.Fatal(err) } if err := tree.Insert(entry2); err != nil { t.Fatal(err) } if err := tree.Insert(entry3); err != nil { t.Fatal(err) } if len(tree.hosts) != 3 { t.Error("wrong number of hosts") } if tree.root.weight.Cmp(types.NewCurrency64(6)) != 0 { t.Error("unexpected weight at initialization") t.Error(tree.root.weight) } // Grab 1 random host. randHosts := tree.SelectRandom(1, nil) if len(randHosts) != 1 { t.Error("didn't get 1 hosts") } // Grab 2 random hosts. randHosts = tree.SelectRandom(2, nil) if len(randHosts) != 2 { t.Error("didn't get 2 hosts") } if randHosts[0].PublicKey.String() == randHosts[1].PublicKey.String() { t.Error("doubled up") } // Grab 3 random hosts. randHosts = tree.SelectRandom(3, nil) if len(randHosts) != 3 { t.Error("didn't get 3 hosts") } if randHosts[0].PublicKey.String() == randHosts[1].PublicKey.String() || randHosts[0].PublicKey.String() == randHosts[2].PublicKey.String() || randHosts[1].PublicKey.String() == randHosts[2].PublicKey.String() { t.Error("doubled up") } // Grab 4 random hosts. 3 should be returned. randHosts = tree.SelectRandom(4, nil) if len(randHosts) != 3 { t.Error("didn't get 3 hosts") } if randHosts[0].PublicKey.String() == randHosts[1].PublicKey.String() || randHosts[0].PublicKey.String() == randHosts[2].PublicKey.String() || randHosts[1].PublicKey.String() == randHosts[2].PublicKey.String() { t.Error("doubled up") } // Ask for 3 hosts that are not in randHosts. No hosts should be // returned. uniqueHosts := tree.SelectRandom(3, []types.SiaPublicKey{ randHosts[0].PublicKey, randHosts[1].PublicKey, randHosts[2].PublicKey, }) if len(uniqueHosts) != 0 { t.Error("didn't get 0 hosts") } // Ask for 3 hosts, blacklisting non-existent hosts. 3 should be returned. randHosts = tree.SelectRandom(3, []types.SiaPublicKey{{}, {}, {}}) if len(randHosts) != 3 { t.Error("didn't get 3 hosts") } if randHosts[0].PublicKey.String() == randHosts[1].PublicKey.String() || randHosts[0].PublicKey.String() == randHosts[2].PublicKey.String() || randHosts[1].PublicKey.String() == randHosts[2].PublicKey.String() { t.Error("doubled up") } } Sia-1.3.0/modules/renter/hostdb/hosttree/sort.go000066400000000000000000000004031313565667000216450ustar00rootroot00000000000000package hosttree type byWeight []hostEntry func (he byWeight) Len() int { return len(he) } func (he byWeight) Less(i, j int) bool { return he[i].weight.Cmp(he[j].weight) < 0 } func (he byWeight) Swap(i, j int) { he[i], he[j] = he[j], he[i] } Sia-1.3.0/modules/renter/hostdb/hostweight.go000066400000000000000000000402611313565667000212140ustar00rootroot00000000000000package hostdb import ( "math" "math/big" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) var ( // Because most weights would otherwise be fractional, we set the base // weight to be very large. baseWeight = types.NewCurrency(new(big.Int).Exp(big.NewInt(10), big.NewInt(80), nil)) // tbMonth is the number of bytes in a terabyte times the number of blocks // in a month. tbMonth = uint64(4032) * uint64(1e12) // collateralExponentiation is the number of times that the collateral is // multiplied into the price. collateralExponentiation = 1 // priceDiveNormalization reduces the raw value of the price so that not so // many digits are needed when operating on the weight. This also allows the // base weight to be a lot lower. priceDivNormalization = types.SiacoinPrecision.Div64(10e3).Div64(tbMonth) // minCollateral is the amount of collateral we weight all hosts as having, // even if they do not have any collateral. This is to temporarily prop up // weak / cheap hosts on the network while the network is bootstrapping. minCollateral = types.SiacoinPrecision.Mul64(5).Div64(tbMonth) // Set a minimum price, below which setting lower prices will no longer put // this host at an advatnage. This price is considered the bar for // 'essentially free', and is kept to a minimum to prevent certain Sybil // attack related attack vectors. // // NOTE: This needs to be intelligently adjusted down as the practical price // of storage changes, and as the price of the siacoin changes. minTotalPrice = types.SiacoinPrecision.Mul64(25).Div64(tbMonth) // priceExponentiation is the number of times that the weight is divided by // the price. priceExponentiation = 5 // requiredStorage indicates the amount of storage that the host must be // offering in order to be considered a valuable/worthwhile host. requiredStorage = build.Select(build.Var{ Standard: uint64(20e9), Dev: uint64(1e6), Testing: uint64(1e3), }).(uint64) ) // collateralAdjustments improves the host's weight according to the amount of // collateral that they have provided. func (hdb *HostDB) collateralAdjustments(entry modules.HostDBEntry) float64 { // Sanity checks - the constants values need to have certain relationships // to eachother if build.DEBUG { // If the minTotalPrice is not much larger than the divNormalization, // there will be problems with granularity after the divNormalization is // applied. if minCollateral.Div64(1e3).Cmp(priceDivNormalization) < 0 { build.Critical("Maladjusted minCollateral and divNormalization constants in hostdb package") } } // Set a minimum on the collateral, then normalize to a sane precision. usedCollateral := entry.Collateral if entry.Collateral.Cmp(minCollateral) < 0 { usedCollateral = minCollateral } baseU64, err := minCollateral.Div(priceDivNormalization).Uint64() if err != nil { baseU64 = math.MaxUint64 } actualU64, err := usedCollateral.Div(priceDivNormalization).Uint64() if err != nil { actualU64 = math.MaxUint64 } base := float64(baseU64) actual := float64(actualU64) // Exponentiate the results. weight := float64(1) for i := 0; i < collateralExponentiation; i++ { weight *= actual / base } return weight } // interactionAdjustments determine the penalty to be applied to a host for the // historic and currnet interactions with that host. This function focuses on // historic interactions and ignores recent interactions. func (hdb *HostDB) interactionAdjustments(entry modules.HostDBEntry) float64 { // Give the host a baseline of 30 successful interactions and 1 failed // interaction. This gives the host a baseline if we've had few // interactions with them. The 1 failed interaction will become // irrelevant after sufficient interactions with the host. hsi := entry.HistoricSuccessfulInteractions + 30 hfi := entry.HistoricFailedInteractions + 1 // Determine the intraction ratio based off of the historic interactions. ratio := float64(hsi) / float64(hsi+hfi) // Raise the ratio to the 15th power and return that. The exponentiation is // very high because the renter will already intentionally avoid hosts that // do not have many successful interactions, meaning that the bad points do // not rack up very quickly. We want to signal a bad score for the host // nonetheless. return math.Pow(ratio, 15) } // priceAdjustments will adjust the weight of the entry according to the prices // that it has set. func (hdb *HostDB) priceAdjustments(entry modules.HostDBEntry) float64 { // Sanity checks - the constants values need to have certain relationships // to eachother if build.DEBUG { // If the minTotalPrice is not much larger than the divNormalization, // there will be problems with granularity after the divNormalization is // applied. if minTotalPrice.Div64(1e3).Cmp(priceDivNormalization) < 0 { build.Critical("Maladjusted minDivePrice and divNormalization constants in hostdb package") } } // Prices tiered as follows: // - the storage price is presented as 'per block per byte' // - the contract price is presented as a flat rate // - the upload bandwidth price is per byte // - the download bandwidth price is per byte // // The hostdb will naively assume the following for now: // - each contract covers 6 weeks of storage (default is 12 weeks, but // renewals occur at midpoint) - 6048 blocks - and 25GB of storage. // - uploads happen once per 12 weeks (average lifetime of a file is 12 weeks) // - downloads happen once per 12 weeks (files are on average downloaded once throughout lifetime) // // In the future, the renter should be able to track average user behavior // and adjust accordingly. This flexibility will be added later. adjustedContractPrice := entry.ContractPrice.Div64(6048).Div64(25e9) // Adjust contract price to match 25GB for 6 weeks. adjustedUploadPrice := entry.UploadBandwidthPrice.Div64(24192) // Adjust upload price to match a single upload over 24 weeks. adjustedDownloadPrice := entry.DownloadBandwidthPrice.Div64(12096).Div64(3) // Adjust download price to match one download over 12 weeks, 1 redundancy. siafundFee := adjustedContractPrice.Add(adjustedUploadPrice).Add(adjustedDownloadPrice).Add(entry.Collateral).MulTax() totalPrice := entry.StoragePrice.Add(adjustedContractPrice).Add(adjustedUploadPrice).Add(adjustedDownloadPrice).Add(siafundFee) // Set a minimum on the price, then normalize to a sane precision. if totalPrice.Cmp(minTotalPrice) < 0 { totalPrice = minTotalPrice } baseU64, err := minTotalPrice.Div(priceDivNormalization).Uint64() if err != nil { baseU64 = math.MaxUint64 } actualU64, err := totalPrice.Div(priceDivNormalization).Uint64() if err != nil { actualU64 = math.MaxUint64 } base := float64(baseU64) actual := float64(actualU64) weight := float64(1) for i := 0; i < priceExponentiation; i++ { weight *= base / actual } return weight } // storageRemainingAdjustments adjusts the weight of the entry according to how // much storage it has remaining. func storageRemainingAdjustments(entry modules.HostDBEntry) float64 { base := float64(1) if entry.RemainingStorage < 200*requiredStorage { base = base / 2 // 2x total penalty } if entry.RemainingStorage < 150*requiredStorage { base = base / 2 // 4x total penalty } if entry.RemainingStorage < 100*requiredStorage { base = base / 2 // 8x total penalty } if entry.RemainingStorage < 80*requiredStorage { base = base / 2 // 16x total penalty } if entry.RemainingStorage < 40*requiredStorage { base = base / 2 // 32x total penalty } if entry.RemainingStorage < 20*requiredStorage { base = base / 2 // 64x total penalty } if entry.RemainingStorage < 15*requiredStorage { base = base / 2 // 128x total penalty } if entry.RemainingStorage < 10*requiredStorage { base = base / 2 // 256x total penalty } if entry.RemainingStorage < 5*requiredStorage { base = base / 2 // 512x total penalty } if entry.RemainingStorage < 3*requiredStorage { base = base / 2 // 1024x total penalty } if entry.RemainingStorage < 2*requiredStorage { base = base / 2 // 2048x total penalty } if entry.RemainingStorage < requiredStorage { base = base / 2 // 4096x total penalty } return base } // versionAdjustments will adjust the weight of the entry according to the siad // version reported by the host. func versionAdjustments(entry modules.HostDBEntry) float64 { base := float64(1) if build.VersionCmp(entry.Version, "1.4.0") < 0 { base = base * 0.99999 // Safety value to make sure we update the version penalties every time we update the host. } if build.VersionCmp(entry.Version, "1.3.0") < 0 { base = base * 0.9 } if build.VersionCmp(entry.Version, "1.2.2") < 0 { base = base * 0.9 } if build.VersionCmp(entry.Version, "1.2.1") < 0 { base = base / 2 // 2x total penalty. } if build.VersionCmp(entry.Version, "1.2.0") < 0 { base = base / 2 // 4x total penalty. } if build.VersionCmp(entry.Version, "1.1.2") < 0 { base = base / 2 // 8x total penalty. } if build.VersionCmp(entry.Version, "1.1.1") < 0 { base = base / 2 // 16x total penalty. } if build.VersionCmp(entry.Version, "1.0.3") < 0 { base = base / 2 // 32x total penalty. } if build.VersionCmp(entry.Version, "1.0.0") < 0 { base = base / 1000 // 32,000x total penalty. } return base } // lifetimeAdjustments will adjust the weight of the host according to the total // amount of time that has passed since the host's original announcement. func (hdb *HostDB) lifetimeAdjustments(entry modules.HostDBEntry) float64 { base := float64(1) if hdb.blockHeight >= entry.FirstSeen { age := hdb.blockHeight - entry.FirstSeen if age < 6000 { base = base / 2 // 2x total } if age < 4000 { base = base / 2 // 4x total } if age < 2000 { base = base / 2 // 8x total } if age < 1000 { base = base / 2 // 16x total } if age < 576 { base = base / 2 // 32x total } if age < 288 { base = base / 2 // 64x total } if age < 144 { base = base / 2 // 128x total } } return base } // uptimeAdjustments penalizes the host for having poor uptime, and for being // offline. // // CAUTION: The function 'updateEntry' will manually fill out two scans for a // new host to give the host some initial uptime or downtime. Modification of // this function needs to be made paying attention to the structure of that // function. func (hdb *HostDB) uptimeAdjustments(entry modules.HostDBEntry) float64 { // Special case: if we have scanned the host twice or fewer, don't perform // uptime math. if len(entry.ScanHistory) == 0 { return 0.25 } if len(entry.ScanHistory) == 1 { if entry.ScanHistory[0].Success { return 0.75 } return 0.25 } if len(entry.ScanHistory) == 2 { if entry.ScanHistory[0].Success && entry.ScanHistory[1].Success { return 0.85 } if entry.ScanHistory[0].Success || entry.ScanHistory[1].Success { return 0.50 } return 0.05 } // Compute the total measured uptime and total measured downtime for this // host. downtime := entry.HistoricDowntime uptime := entry.HistoricUptime recentTime := entry.ScanHistory[0].Timestamp recentSuccess := entry.ScanHistory[0].Success for _, scan := range entry.ScanHistory[1:] { if recentTime.After(scan.Timestamp) { hdb.log.Critical("Host entry scan history not sorted.") // Ignore the unsorted scan entry. continue } if recentSuccess { uptime += scan.Timestamp.Sub(recentTime) } else { downtime += scan.Timestamp.Sub(recentTime) } recentTime = scan.Timestamp recentSuccess = scan.Success } // Sanity check against 0 total time. if uptime == 0 && downtime == 0 { return 0.001 // Shouldn't happen. } // Compute the uptime ratio, but shift by 0.02 to acknowledge fully that // 98% uptime and 100% uptime is valued the same. uptimeRatio := float64(uptime) / float64(uptime+downtime) if uptimeRatio > 0.98 { uptimeRatio = 0.98 } uptimeRatio += 0.02 // Cap the total amount of downtime allowed based on the total number of // scans that have happened. allowedDowntime := 0.03 * float64(len(entry.ScanHistory)) if uptimeRatio < 1-allowedDowntime { uptimeRatio = 1 - allowedDowntime } // Calculate the penalty for low uptime. Penalties increase extremely // quickly as uptime falls away from 95%. // // 100% uptime = 1 // 98% uptime = 1 // 95% uptime = 0.91 // 90% uptime = 0.51 // 85% uptime = 0.16 // 80% uptime = 0.03 // 75% uptime = 0.005 // 70% uptime = 0.001 // 50% uptime = 0.000002 exp := 100 * math.Min(1-uptimeRatio, 0.20) return math.Pow(uptimeRatio, exp) } // calculateHostWeight returns the weight of a host according to the settings of // the host database entry. func (hdb *HostDB) calculateHostWeight(entry modules.HostDBEntry) types.Currency { collateralReward := hdb.collateralAdjustments(entry) interactionPenalty := hdb.interactionAdjustments(entry) lifetimePenalty := hdb.lifetimeAdjustments(entry) pricePenalty := hdb.priceAdjustments(entry) storageRemainingPenalty := storageRemainingAdjustments(entry) uptimePenalty := hdb.uptimeAdjustments(entry) versionPenalty := versionAdjustments(entry) // Combine the adjustments. fullPenalty := collateralReward * interactionPenalty * lifetimePenalty * pricePenalty * storageRemainingPenalty * uptimePenalty * versionPenalty // Return a types.Currency. weight := baseWeight.MulFloat(fullPenalty) if weight.IsZero() { // A weight of zero is problematic for for the host tree. return types.NewCurrency64(1) } return weight } // calculateConversionRate calculates the conversion rate of the provided // host score, comparing it to the hosts in the database and returning what // percentage of contracts it is likely to participate in. func (hdb *HostDB) calculateConversionRate(score types.Currency) float64 { var totalScore types.Currency for _, h := range hdb.ActiveHosts() { totalScore = totalScore.Add(hdb.calculateHostWeight(h)) } if totalScore.IsZero() { totalScore = types.NewCurrency64(1) } conversionRate, _ := big.NewRat(0, 1).SetFrac(score.Mul64(50).Big(), totalScore.Big()).Float64() if conversionRate > 100 { conversionRate = 100 } return conversionRate } // EstimateHostScore takes a HostExternalSettings and returns the estimated // score of that host in the hostdb, assuming no penalties for age or uptime. func (hdb *HostDB) EstimateHostScore(entry modules.HostDBEntry) modules.HostScoreBreakdown { // Grab the adjustments. Age, and uptime penalties are set to '1', to // assume best behavior from the host. collateralReward := hdb.collateralAdjustments(entry) pricePenalty := hdb.priceAdjustments(entry) storageRemainingPenalty := storageRemainingAdjustments(entry) versionPenalty := versionAdjustments(entry) // Combine into a full penalty, then determine the resulting estimated // score. fullPenalty := collateralReward * pricePenalty * storageRemainingPenalty * versionPenalty estimatedScore := baseWeight.MulFloat(fullPenalty) if estimatedScore.IsZero() { estimatedScore = types.NewCurrency64(1) } // Compile the estimates into a host score breakdown. return modules.HostScoreBreakdown{ Score: estimatedScore, ConversionRate: hdb.calculateConversionRate(estimatedScore), AgeAdjustment: 1, BurnAdjustment: 1, CollateralAdjustment: collateralReward, PriceAdjustment: pricePenalty, StorageRemainingAdjustment: storageRemainingPenalty, UptimeAdjustment: 1, VersionAdjustment: versionPenalty, } } // ScoreBreakdown provdes a detailed set of scalars and bools indicating // elements of the host's overall score. func (hdb *HostDB) ScoreBreakdown(entry modules.HostDBEntry) modules.HostScoreBreakdown { hdb.mu.Lock() defer hdb.mu.Unlock() score := hdb.calculateHostWeight(entry) return modules.HostScoreBreakdown{ Score: score, ConversionRate: hdb.calculateConversionRate(score), AgeAdjustment: hdb.lifetimeAdjustments(entry), BurnAdjustment: 1, CollateralAdjustment: hdb.collateralAdjustments(entry), InteractionAdjustment: hdb.interactionAdjustments(entry), PriceAdjustment: hdb.priceAdjustments(entry), StorageRemainingAdjustment: storageRemainingAdjustments(entry), UptimeAdjustment: hdb.uptimeAdjustments(entry), VersionAdjustment: versionAdjustments(entry), } } Sia-1.3.0/modules/renter/hostdb/hostweight_test.go000066400000000000000000000215501313565667000222530ustar00rootroot00000000000000package hostdb import ( "testing" "time" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) func calculateWeightFromUInt64Price(price uint64) (weight types.Currency) { hdb := bareHostDB() hdb.blockHeight = 0 var entry modules.HostDBEntry entry.RemainingStorage = 250e3 entry.StoragePrice = types.NewCurrency64(price).Mul(types.SiacoinPrecision).Div64(4032).Div64(1e9) return hdb.calculateHostWeight(entry) } func TestHostWeightDistinctPrices(t *testing.T) { if testing.Short() { t.SkipNow() } weight1 := calculateWeightFromUInt64Price(300) weight2 := calculateWeightFromUInt64Price(301) if weight1.Cmp(weight2) <= 0 { t.Log(weight1) t.Log(weight2) t.Error("Weight of expensive host is not the correct value.") } } func TestHostWeightIdenticalPrices(t *testing.T) { if testing.Short() { t.SkipNow() } weight1 := calculateWeightFromUInt64Price(42) weight2 := calculateWeightFromUInt64Price(42) if weight1.Cmp(weight2) != 0 { t.Error("Weight of identically priced hosts should be equal.") } } func TestHostWeightWithOnePricedZero(t *testing.T) { if testing.Short() { t.SkipNow() } weight1 := calculateWeightFromUInt64Price(5) weight2 := calculateWeightFromUInt64Price(0) if weight1.Cmp(weight2) >= 0 { t.Error("Zero-priced host should have higher weight than nonzero-priced host.") } } func TestHostWeightWithBothPricesZero(t *testing.T) { if testing.Short() { t.SkipNow() } weight1 := calculateWeightFromUInt64Price(0) weight2 := calculateWeightFromUInt64Price(0) if weight1.Cmp(weight2) != 0 { t.Error("Weight of two zero-priced hosts should be equal.") } } func TestHostWeightCollateralDifferences(t *testing.T) { if testing.Short() { t.SkipNow() } hdb := bareHostDB() var entry modules.HostDBEntry entry.RemainingStorage = 250e3 entry.StoragePrice = types.NewCurrency64(1000).Mul(types.SiacoinPrecision) entry.Collateral = types.NewCurrency64(1000).Mul(types.SiacoinPrecision) entry2 := entry entry2.Collateral = types.NewCurrency64(500).Mul(types.SiacoinPrecision) w1 := hdb.calculateHostWeight(entry) w2 := hdb.calculateHostWeight(entry2) if w1.Cmp(w2) < 0 { t.Error("Larger collateral should have more weight") } } func TestHostWeightStorageRemainingDifferences(t *testing.T) { if testing.Short() { t.SkipNow() } hdb := bareHostDB() var entry modules.HostDBEntry entry.RemainingStorage = 250e3 entry.StoragePrice = types.NewCurrency64(1000).Mul(types.SiacoinPrecision) entry.Collateral = types.NewCurrency64(1000).Mul(types.SiacoinPrecision) entry2 := entry entry2.RemainingStorage = 50e3 w1 := hdb.calculateHostWeight(entry) w2 := hdb.calculateHostWeight(entry2) if w1.Cmp(w2) < 0 { t.Error("Larger storage remaining should have more weight") } } func TestHostWeightVersionDifferences(t *testing.T) { if testing.Short() { t.SkipNow() } hdb := bareHostDB() var entry modules.HostDBEntry entry.RemainingStorage = 250e3 entry.StoragePrice = types.NewCurrency64(1000).Mul(types.SiacoinPrecision) entry.Collateral = types.NewCurrency64(1000).Mul(types.SiacoinPrecision) entry.Version = "v1.0.4" entry2 := entry entry2.Version = "v1.0.3" w1 := hdb.calculateHostWeight(entry) w2 := hdb.calculateHostWeight(entry2) if w1.Cmp(w2) < 0 { t.Error("Higher version should have more weight") } } func TestHostWeightLifetimeDifferences(t *testing.T) { if testing.Short() { t.SkipNow() } hdb := bareHostDB() hdb.blockHeight = 10000 var entry modules.HostDBEntry entry.RemainingStorage = 250e3 entry.StoragePrice = types.NewCurrency64(1000).Mul(types.SiacoinPrecision) entry.Collateral = types.NewCurrency64(1000).Mul(types.SiacoinPrecision) entry.Version = "v1.0.4" entry2 := entry entry2.FirstSeen = 8100 w1 := hdb.calculateHostWeight(entry) w2 := hdb.calculateHostWeight(entry2) if w1.Cmp(w2) < 0 { t.Error("Been around longer should have more weight") } } func TestHostWeightUptimeDifferences(t *testing.T) { if testing.Short() { t.SkipNow() } hdb := bareHostDB() hdb.blockHeight = 10000 var entry modules.HostDBEntry entry.RemainingStorage = 250e3 entry.StoragePrice = types.NewCurrency64(1000).Mul(types.SiacoinPrecision) entry.Collateral = types.NewCurrency64(1000).Mul(types.SiacoinPrecision) entry.Version = "v1.0.4" entry.ScanHistory = modules.HostDBScans{ {Timestamp: time.Now().Add(time.Hour * -100), Success: true}, {Timestamp: time.Now().Add(time.Hour * -80), Success: true}, {Timestamp: time.Now().Add(time.Hour * -60), Success: true}, {Timestamp: time.Now().Add(time.Hour * -40), Success: true}, {Timestamp: time.Now().Add(time.Hour * -20), Success: true}, } entry2 := entry entry2.ScanHistory = modules.HostDBScans{ {Timestamp: time.Now().Add(time.Hour * -100), Success: true}, {Timestamp: time.Now().Add(time.Hour * -80), Success: true}, {Timestamp: time.Now().Add(time.Hour * -60), Success: true}, {Timestamp: time.Now().Add(time.Hour * -40), Success: true}, {Timestamp: time.Now().Add(time.Hour * -20), Success: false}, } w1 := hdb.calculateHostWeight(entry) w2 := hdb.calculateHostWeight(entry2) if w1.Cmp(w2) < 0 { t.Error("Been around longer should have more weight") } } func TestHostWeightUptimeDifferences2(t *testing.T) { if testing.Short() { t.SkipNow() } hdb := bareHostDB() hdb.blockHeight = 10000 var entry modules.HostDBEntry entry.RemainingStorage = 250e3 entry.StoragePrice = types.NewCurrency64(1000).Mul(types.SiacoinPrecision) entry.Collateral = types.NewCurrency64(1000).Mul(types.SiacoinPrecision) entry.Version = "v1.0.4" entry.ScanHistory = modules.HostDBScans{ {Timestamp: time.Now().Add(time.Hour * -100), Success: true}, {Timestamp: time.Now().Add(time.Hour * -80), Success: false}, {Timestamp: time.Now().Add(time.Hour * -60), Success: true}, {Timestamp: time.Now().Add(time.Hour * -40), Success: true}, {Timestamp: time.Now().Add(time.Hour * -20), Success: true}, } entry2 := entry entry2.ScanHistory = modules.HostDBScans{ {Timestamp: time.Now().Add(time.Hour * -100), Success: true}, {Timestamp: time.Now().Add(time.Hour * -80), Success: true}, {Timestamp: time.Now().Add(time.Hour * -60), Success: true}, {Timestamp: time.Now().Add(time.Hour * -40), Success: false}, {Timestamp: time.Now().Add(time.Hour * -20), Success: true}, } w1 := hdb.calculateHostWeight(entry) w2 := hdb.calculateHostWeight(entry2) if w1.Cmp(w2) < 0 { t.Errorf("Been around longer should have more weight\n\t%v\n\t%v", w1, w2) } } func TestHostWeightUptimeDifferences3(t *testing.T) { if testing.Short() { t.SkipNow() } hdb := bareHostDB() hdb.blockHeight = 10000 var entry modules.HostDBEntry entry.RemainingStorage = 250e3 entry.StoragePrice = types.NewCurrency64(1000).Mul(types.SiacoinPrecision) entry.Collateral = types.NewCurrency64(1000).Mul(types.SiacoinPrecision) entry.Version = "v1.0.4" entry.ScanHistory = modules.HostDBScans{ {Timestamp: time.Now().Add(time.Hour * -100), Success: true}, {Timestamp: time.Now().Add(time.Hour * -80), Success: false}, {Timestamp: time.Now().Add(time.Hour * -60), Success: true}, {Timestamp: time.Now().Add(time.Hour * -40), Success: true}, {Timestamp: time.Now().Add(time.Hour * -20), Success: true}, } entry2 := entry entry2.ScanHistory = modules.HostDBScans{ {Timestamp: time.Now().Add(time.Hour * -100), Success: true}, {Timestamp: time.Now().Add(time.Hour * -80), Success: false}, {Timestamp: time.Now().Add(time.Hour * -60), Success: false}, {Timestamp: time.Now().Add(time.Hour * -40), Success: true}, {Timestamp: time.Now().Add(time.Hour * -20), Success: true}, } w1 := hdb.calculateHostWeight(entry) w2 := hdb.calculateHostWeight(entry2) if w1.Cmp(w2) < 0 { t.Error("Been around longer should have more weight") } } func TestHostWeightUptimeDifferences4(t *testing.T) { if testing.Short() { t.SkipNow() } hdb := bareHostDB() hdb.blockHeight = 10000 var entry modules.HostDBEntry entry.RemainingStorage = 250e3 entry.StoragePrice = types.NewCurrency64(1000).Mul(types.SiacoinPrecision) entry.Collateral = types.NewCurrency64(1000).Mul(types.SiacoinPrecision) entry.Version = "v1.0.4" entry.ScanHistory = modules.HostDBScans{ {Timestamp: time.Now().Add(time.Hour * -100), Success: true}, {Timestamp: time.Now().Add(time.Hour * -80), Success: true}, {Timestamp: time.Now().Add(time.Hour * -60), Success: true}, {Timestamp: time.Now().Add(time.Hour * -40), Success: true}, {Timestamp: time.Now().Add(time.Hour * -20), Success: false}, } entry2 := entry entry2.ScanHistory = modules.HostDBScans{ {Timestamp: time.Now().Add(time.Hour * -100), Success: true}, {Timestamp: time.Now().Add(time.Hour * -80), Success: true}, {Timestamp: time.Now().Add(time.Hour * -60), Success: true}, {Timestamp: time.Now().Add(time.Hour * -40), Success: false}, {Timestamp: time.Now().Add(time.Hour * -20), Success: false}, } w1 := hdb.calculateHostWeight(entry) w2 := hdb.calculateHostWeight(entry2) if w1.Cmp(w2) < 0 { t.Error("Been around longer should have more weight") } } Sia-1.3.0/modules/renter/hostdb/online.go000066400000000000000000000014641313565667000203150ustar00rootroot00000000000000package hostdb import ( "time" ) // online.go performs regular checks against the gateway to see if there is // internet connectivity. The check is performed by looking for at least one // non-local peer in the peer list. If there is no internet connectivity, scans // must be stopped lest we penalize otherwise online hosts. func (hdb *HostDB) threadedOnlineCheck() { err := hdb.tg.Add() if err != nil { return } defer hdb.tg.Done() for { // Every 30 seconds, check the online status and update the online // field. peers := hdb.gateway.Peers() hdb.mu.Lock() hdb.online = false for _, peer := range peers { if !peer.Local { hdb.online = true break } } hdb.mu.Unlock() select { case <-time.After(time.Second * 30): continue case <-hdb.tg.StopChan(): return } } } Sia-1.3.0/modules/renter/hostdb/persist.go000066400000000000000000000050711313565667000205200ustar00rootroot00000000000000package hostdb import ( "path/filepath" "time" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/persist" "github.com/NebulousLabs/Sia/types" ) var ( // persistFilename defines the name of the file that holds the hostdb's // persistence. persistFilename = "hostdb.json" // persistMetadata defines the metadata that tags along with the most recent // version of the hostdb persistence file. persistMetadata = persist.Metadata{ Header: "HostDB Persistence", Version: "0.5", } ) // hdbPersist defines what HostDB data persists across sessions. type hdbPersist struct { AllHosts []modules.HostDBEntry BlockHeight types.BlockHeight LastChange modules.ConsensusChangeID } // persistData returns the data in the hostdb that will be saved to disk. func (hdb *HostDB) persistData() (data hdbPersist) { data.AllHosts = hdb.hostTree.All() data.BlockHeight = hdb.blockHeight data.LastChange = hdb.lastChange return data } // saveSync saves the hostdb persistence data to disk and then syncs to disk. func (hdb *HostDB) saveSync() error { return hdb.deps.saveFileSync(persistMetadata, hdb.persistData(), filepath.Join(hdb.persistDir, persistFilename)) } // load loads the hostdb persistence data from disk. func (hdb *HostDB) load() error { // Fetch the data from the file. var data hdbPersist err := hdb.deps.loadFile(persistMetadata, &data, filepath.Join(hdb.persistDir, persistFilename)) if err != nil { return err } // Set the hostdb internal values. hdb.blockHeight = data.BlockHeight hdb.lastChange = data.LastChange // Load each of the hosts into the host tree. for _, host := range data.AllHosts { // COMPATv1.1.0 // // The host did not always track its block height correctly, meaning // that previously the FirstSeen values and the blockHeight values // could get out of sync. if hdb.blockHeight < host.FirstSeen { host.FirstSeen = hdb.blockHeight } err := hdb.hostTree.Insert(host) if err != nil { hdb.log.Debugln("ERROR: could not insert host while loading:", host.NetAddress) } // Make sure that all hosts have gone through the initial scanning. if len(host.ScanHistory) < 2 { hdb.queueScan(host) } } return nil } // threadedSaveLoop saves the hostdb to disk every 2 minutes, also saving when // given the shutdown signal. func (hdb *HostDB) threadedSaveLoop() { for { select { case <-hdb.tg.StopChan(): return case <-time.After(saveFrequency): hdb.mu.Lock() err := hdb.saveSync() hdb.mu.Unlock() if err != nil { hdb.log.Println("Difficulties saving the hostdb:", err) } } } } Sia-1.3.0/modules/renter/hostdb/persist_test.go000066400000000000000000000057001313565667000215560ustar00rootroot00000000000000package hostdb import ( "path/filepath" "testing" "github.com/NebulousLabs/Sia/modules" ) // quitAfterLoadDeps will quit startup in newHostDB type quitAfterLoadDeps struct { prodDependencies } // Send a disrupt signal to the quitAfterLoad codebreak. func (quitAfterLoadDeps) disrupt(s string) bool { if s == "quitAfterLoad" { return true } return false } // TestSaveLoad tests that the hostdb can save and load itself. func TestSaveLoad(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() hdbt, err := newHDBTester(t.Name()) if err != nil { t.Fatal(err) } // Mine two blocks to put the hdb height at 2. for i := 0; i < 2; i++ { _, err := hdbt.miner.AddBlock() if err != nil { t.Fatal(err) } } // Verify that the hdb height is 2. if hdbt.hdb.blockHeight != 2 { t.Fatal("test setup incorrect, hdb height needs to be 2 for remainder of test") } // Add fake hosts and a fake consensus change. The fake consensus change // would normally be detected and routed around, but we stunt the loading // process to only load the persistent fields. var host1, host2, host3 modules.HostDBEntry host1.FirstSeen = 1 host2.FirstSeen = 2 host3.FirstSeen = 3 host1.PublicKey.Key = []byte("foo") host2.PublicKey.Key = []byte("bar") host3.PublicKey.Key = []byte("baz") hdbt.hdb.hostTree.Insert(host1) hdbt.hdb.hostTree.Insert(host2) hdbt.hdb.hostTree.Insert(host3) // Save, close, and reload. hdbt.hdb.mu.Lock() hdbt.hdb.lastChange = modules.ConsensusChangeID{1, 2, 3} stashedLC := hdbt.hdb.lastChange err = hdbt.hdb.saveSync() hdbt.hdb.mu.Unlock() if err != nil { t.Fatal(err) } err = hdbt.hdb.Close() if err != nil { t.Fatal(err) } hdbt.hdb, err = newHostDB(hdbt.gateway, hdbt.cs, filepath.Join(hdbt.persistDir, modules.RenterDir), quitAfterLoadDeps{}) if err != nil { t.Fatal(err) } // Last change should have been reloaded. hdbt.hdb.mu.Lock() lastChange := hdbt.hdb.lastChange hdbt.hdb.mu.Unlock() if lastChange != stashedLC { t.Error("wrong consensus change ID was loaded:", hdbt.hdb.lastChange) } // Check that AllHosts was loaded. h1, ok0 := hdbt.hdb.hostTree.Select(host1.PublicKey) h2, ok1 := hdbt.hdb.hostTree.Select(host2.PublicKey) h3, ok2 := hdbt.hdb.hostTree.Select(host3.PublicKey) if !ok0 || !ok1 || !ok2 || len(hdbt.hdb.hostTree.All()) != 3 { t.Error("allHosts was not restored properly", ok0, ok1, ok2, len(hdbt.hdb.hostTree.All())) } if h1.FirstSeen != 1 { t.Error("h1 block height loaded incorrectly") } if h2.FirstSeen != 2 { t.Error("h1 block height loaded incorrectly") } if h3.FirstSeen != 2 { t.Error("h1 block height loaded incorrectly") } } // TestRescan tests that the hostdb will rescan the blockchain properly, picking // up new hosts which appear in an alternate past. func TestRescan(t *testing.T) { if testing.Short() { t.SkipNow() } _, err := newHDBTester(t.Name()) if err != nil { t.Fatal(err) } t.Skip("create two consensus sets with blocks + announcements") } Sia-1.3.0/modules/renter/hostdb/scan.go000066400000000000000000000267671313565667000177720ustar00rootroot00000000000000package hostdb // scan.go contains the functions which periodically scan the list of all hosts // to see which hosts are online or offline, and to get any updates to the // settings of the hosts. import ( "net" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/fastrand" ) // queueScan will add a host to the queue to be scanned. func (hdb *HostDB) queueScan(entry modules.HostDBEntry) { // If this entry is already in the scan pool, can return immediately. _, exists := hdb.scanMap[entry.PublicKey.String()] if exists { return } // Add the entry to a waitlist, then check if any thread is currently // emptying the waitlist. If not, spawn a thread to empty the waitlist. hdb.scanMap[entry.PublicKey.String()] = struct{}{} hdb.scanList = append(hdb.scanList, entry) if hdb.scanWait { // Another thread is emptying the scan list, nothing to worry about. return } // Sanity check - the scan map and the scan list should have the same // length. if build.DEBUG && len(hdb.scanMap) > len(hdb.scanList)+scanningThreads { hdb.log.Critical("The hostdb scan map has seemingly grown too large:", len(hdb.scanMap), len(hdb.scanList), scanningThreads) } hdb.scanWait = true go func() { // Nobody is emptying the scan list, volunteer. if hdb.tg.Add() != nil { // Hostdb is shutting down, don't spin up another thread. It is // okay to leave scanWait set to true as that will not affect // shutdown. return } defer hdb.tg.Done() for { hdb.mu.Lock() if len(hdb.scanList) == 0 { // Scan list is empty, can exit. Let the world know that nobody // is emptying the scan list anymore. hdb.scanWait = false hdb.mu.Unlock() return } // Get the next host, shrink the scan list. entry := hdb.scanList[0] hdb.scanList = hdb.scanList[1:] delete(hdb.scanMap, entry.PublicKey.String()) scansRemaining := len(hdb.scanList) // Grab the most recent entry for this host. recentEntry, exists := hdb.hostTree.Select(entry.PublicKey) if exists { entry = recentEntry } hdb.mu.Unlock() // Block while waiting for an opening in the scan pool. hdb.log.Debugf("Sending host %v for scan, %v hosts remain", entry.PublicKey.String(), scansRemaining) select { case hdb.scanPool <- entry: // iterate again case <-hdb.tg.StopChan(): // quit return } } }() } // updateEntry updates an entry in the hostdb after a scan has taken place. // // CAUTION: This function will automatically add multiple entries to a new host // to give that host some base uptime. This makes this function co-dependent // with the host weight functions. Adjustment of the host weight functions need // to keep this function in mind, and vice-versa. func (hdb *HostDB) updateEntry(entry modules.HostDBEntry, netErr error) { // If the scan failed because we don't have Internet access, toss out this update. if netErr != nil && !hdb.online { return } // Grab the host from the host tree. newEntry, exists := hdb.hostTree.Select(entry.PublicKey) if exists { newEntry.HostExternalSettings = entry.HostExternalSettings } else { newEntry = entry } // Add the datapoints for the scan. if len(newEntry.ScanHistory) < 2 { // Add two scans to the scan history. Two are needed because the scans // are forward looking, but we want this first scan to represent as // much as one week of uptime or downtime. earliestStartTime := time.Now().Add(time.Hour * 7 * 24 * -1) // Permit up to a week of starting uptime or downtime. suggestedStartTime := time.Now().Add(time.Minute * 10 * time.Duration(hdb.blockHeight-entry.FirstSeen+1) * -1) // Add one to the FirstSeen in case FirstSeen is this block, guarantees incrementing order. if suggestedStartTime.Before(earliestStartTime) { suggestedStartTime = earliestStartTime } newEntry.ScanHistory = modules.HostDBScans{ {Timestamp: suggestedStartTime, Success: netErr == nil}, {Timestamp: time.Now(), Success: netErr == nil}, } } else { if newEntry.ScanHistory[len(newEntry.ScanHistory)-1].Success && netErr != nil { hdb.log.Debugf("Host %v is being downgraded from an online host to an offline host: %v\n", newEntry.PublicKey.String(), netErr) } // Make sure that the current time is after the timestamp of the // previous scan. It may not be if the system clock has changed. This // will prevent the sort-check sanity checks from triggering. newTimestamp := time.Now() prevTimestamp := newEntry.ScanHistory[len(newEntry.ScanHistory)-1].Timestamp if !newTimestamp.After(prevTimestamp) { newTimestamp = prevTimestamp.Add(time.Second) } // Before appending, make sure that the scan we just performed is // timestamped after the previous scan performed. It may not be if the // system clock has changed. newEntry.ScanHistory = append(newEntry.ScanHistory, modules.HostDBScan{Timestamp: newTimestamp, Success: netErr == nil}) } // Check whether any of the recent scans demonstrate uptime. The pruning and // compression of the history ensure that there are only relatively recent // scans represented. var recentUptime bool for _, scan := range newEntry.ScanHistory { if scan.Success { recentUptime = true } } // If the host has been offline for too long, delete the host from the // hostdb. Only delete if there have been enough scans over a long enough // period to be confident that the host really is offline for good. if time.Now().Sub(newEntry.ScanHistory[0].Timestamp) > maxHostDowntime && !recentUptime && len(newEntry.ScanHistory) >= minScans { err := hdb.hostTree.Remove(newEntry.PublicKey) if err != nil { hdb.log.Println("ERROR: unable to remove host newEntry which has had a ton of downtime:", err) } // The function should terminate here as no more interaction is needed // with this host. return } // Compress any old scans into the historic values. for len(newEntry.ScanHistory) > minScans && time.Now().Sub(newEntry.ScanHistory[0].Timestamp) > maxHostDowntime { timePassed := newEntry.ScanHistory[1].Timestamp.Sub(newEntry.ScanHistory[0].Timestamp) if newEntry.ScanHistory[0].Success { newEntry.HistoricUptime += timePassed } else { newEntry.HistoricDowntime += timePassed } newEntry.ScanHistory = newEntry.ScanHistory[1:] } // Add the updated entry if !exists { err := hdb.hostTree.Insert(newEntry) if err != nil { hdb.log.Println("ERROR: unable to insert entry which is was thought to be new:", err) } else { hdb.log.Debugf("Adding host %v to the hostdb. Net error: %v\n", newEntry.PublicKey.String(), netErr) } } else { err := hdb.hostTree.Modify(newEntry) if err != nil { hdb.log.Println("ERROR: unable to modify entry which is thought to exist:", err) } else { hdb.log.Debugf("Adding host %v to the hostdb. Net error: %v\n", newEntry.PublicKey.String(), netErr) } } } // managedScanHost will connect to a host and grab the settings, verifying // uptime and updating to the host's preferences. func (hdb *HostDB) managedScanHost(entry modules.HostDBEntry) { // Request settings from the queued host entry. netAddr := entry.NetAddress pubKey := entry.PublicKey hdb.log.Debugf("Scanning host %v at %v", pubKey, netAddr) // Update historic interactions of entry if necessary hdb.mu.RLock() updateHostHistoricInteractions(&entry, hdb.blockHeight) hdb.mu.RUnlock() var settings modules.HostExternalSettings err := func() error { dialer := &net.Dialer{ Cancel: hdb.tg.StopChan(), Timeout: hostRequestTimeout, } conn, err := dialer.Dial("tcp", string(netAddr)) if err != nil { return err } connCloseChan := make(chan struct{}) go func() { select { case <-hdb.tg.StopChan(): case <-connCloseChan: } conn.Close() }() defer close(connCloseChan) conn.SetDeadline(time.Now().Add(hostScanDeadline)) err = encoding.WriteObject(conn, modules.RPCSettings) if err != nil { return err } var pubkey crypto.PublicKey copy(pubkey[:], pubKey.Key) return crypto.ReadSignedObject(conn, &settings, maxSettingsLen, pubkey) }() if err != nil { hdb.log.Debugf("Scan of host at %v failed: %v", netAddr, err) if hdb.online { // Increment failed host interactions entry.RecentFailedInteractions++ } } else { hdb.log.Debugf("Scan of host at %v succeeded.", netAddr) entry.HostExternalSettings = settings // Increment successful host interactions entry.RecentSuccessfulInteractions++ } // Update the host tree to have a new entry, including the new error. Then // delete the entry from the scan map as the scan has been successful. hdb.mu.Lock() hdb.updateEntry(entry, err) hdb.mu.Unlock() } // threadedProbeHosts pulls hosts from the thread pool and runs a scan on them. func (hdb *HostDB) threadedProbeHosts() { err := hdb.tg.Add() if err != nil { return } defer hdb.tg.Done() for { select { case <-hdb.tg.StopChan(): return case hostEntry := <-hdb.scanPool: // Block the scan until the host is online. for { hdb.mu.RLock() online := hdb.online hdb.mu.RUnlock() if online { break } // Check again in 30 seconds. select { case <-time.After(time.Second * 30): continue case <-hdb.tg.StopChan(): return } } // There appears to be internet connectivity, continue with the // scan. hdb.managedScanHost(hostEntry) } } } // threadedScan is an ongoing function which will query the full set of hosts // every few hours to see who is online and available for uploading. func (hdb *HostDB) threadedScan() { err := hdb.tg.Add() if err != nil { return } defer hdb.tg.Done() for { // Set up a scan for the hostCheckupQuanity most valuable hosts in the // hostdb. Hosts that fail their scans will be docked significantly, // pushing them further back in the hierarchy, ensuring that for the // most part only online hosts are getting scanned unless there are // fewer than hostCheckupQuantity of them. // Grab a set of hosts to scan, grab hosts that are active, inactive, // and offline to get high diversity. var onlineHosts, offlineHosts []modules.HostDBEntry allHosts := hdb.hostTree.All() for i := len(allHosts) - 1; i >= 0; i-- { if len(onlineHosts) >= hostCheckupQuantity && len(offlineHosts) >= hostCheckupQuantity { break } // Figure out if the host is online or offline. host := allHosts[i] online := len(host.ScanHistory) > 0 && host.ScanHistory[len(host.ScanHistory)-1].Success if online && len(onlineHosts) < hostCheckupQuantity { onlineHosts = append(onlineHosts, host) } else if !online && len(offlineHosts) < hostCheckupQuantity { offlineHosts = append(offlineHosts, host) } } // Queue the scans for each host. hdb.log.Println("Performing scan on", len(onlineHosts), "online hosts and", len(offlineHosts), "offline hosts.") hdb.mu.Lock() for _, host := range onlineHosts { hdb.queueScan(host) } for _, host := range offlineHosts { hdb.queueScan(host) } hdb.mu.Unlock() // Sleep for a random amount of time before doing another round of // scanning. The minimums and maximums keep the scan time reasonable, // while the randomness prevents the scanning from always happening at // the same time of day or week. sleepTime := defaultScanSleep sleepRange := int(maxScanSleep - minScanSleep) sleepTime = minScanSleep + time.Duration(fastrand.Intn(sleepRange)) // Sleep until it's time for the next scan cycle. select { case <-hdb.tg.StopChan(): return case <-time.After(sleepTime): } } } Sia-1.3.0/modules/renter/hostdb/scan_test.go000066400000000000000000000152601313565667000210130ustar00rootroot00000000000000package hostdb import ( "errors" "testing" "time" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) // TestUpdateEntry checks that the various components of updateEntry are // working correctly. func TestUpdateEntry(t *testing.T) { if testing.Short() { t.SkipNow() } hdbt, err := newHDBTesterDeps(t.Name(), disableScanLoopDeps{}) if err != nil { t.Fatal(err) } // Test 1: try calling updateEntry with a blank host. Result should be a // host with len 2 scan history. someErr := errors.New("testing err") entry1 := modules.HostDBEntry{ PublicKey: types.SiaPublicKey{ Key: []byte{1}, }, } entry2 := modules.HostDBEntry{ PublicKey: types.SiaPublicKey{ Key: []byte{2}, }, } // Try inserting the first entry. Result in the host tree should be a host // with a scan history length of two. hdbt.hdb.updateEntry(entry1, nil) updatedEntry, exists := hdbt.hdb.hostTree.Select(entry1.PublicKey) if !exists { t.Fatal("Entry did not get inserted into the host tree") } if len(updatedEntry.ScanHistory) != 2 { t.Fatal("new entry was not given two scanning history entries") } if !updatedEntry.ScanHistory[0].Timestamp.Before(updatedEntry.ScanHistory[1].Timestamp) { t.Error("new entry was not provided with a sorted scanning history") } if !updatedEntry.ScanHistory[0].Success || !updatedEntry.ScanHistory[1].Success { t.Error("new entry was not given success values despite a successful scan") } // Try inserting the second entry, but with an error. Results should largely // be the same. hdbt.hdb.updateEntry(entry2, someErr) updatedEntry, exists = hdbt.hdb.hostTree.Select(entry2.PublicKey) if !exists { t.Fatal("Entry did not get inserted into the host tree") } if len(updatedEntry.ScanHistory) != 2 { t.Fatal("new entry was not given two scanning history entries") } if !updatedEntry.ScanHistory[0].Timestamp.Before(updatedEntry.ScanHistory[1].Timestamp) { t.Error("new entry was not provided with a sorted scanning history") } if updatedEntry.ScanHistory[0].Success || updatedEntry.ScanHistory[1].Success { t.Error("new entry was not given success values despite a successful scan") } // Insert the first entry twice more, with no error. There should be 4 // entries, and the timestamps should be strictly increasing. hdbt.hdb.updateEntry(entry1, nil) hdbt.hdb.updateEntry(entry1, nil) updatedEntry, exists = hdbt.hdb.hostTree.Select(entry1.PublicKey) if !exists { t.Fatal("Entry did not get inserted into the host tree") } if len(updatedEntry.ScanHistory) != 4 { t.Fatal("new entry was not given two scanning history entries") } if !updatedEntry.ScanHistory[1].Timestamp.Before(updatedEntry.ScanHistory[2].Timestamp) { t.Error("new entry was not provided with a sorted scanning history") } if !updatedEntry.ScanHistory[2].Timestamp.Before(updatedEntry.ScanHistory[3].Timestamp) { t.Error("new entry was not provided with a sorted scanning history") } if !updatedEntry.ScanHistory[2].Success || !updatedEntry.ScanHistory[3].Success { t.Error("new entries did not get added with successful timestamps") } // Add a non-successful scan and verify that it is registered properly. hdbt.hdb.updateEntry(entry1, someErr) updatedEntry, exists = hdbt.hdb.hostTree.Select(entry1.PublicKey) if !exists { t.Fatal("Entry did not get inserted into the host tree") } if len(updatedEntry.ScanHistory) != 5 { t.Fatal("new entry was not given two scanning history entries") } if !updatedEntry.ScanHistory[3].Success || updatedEntry.ScanHistory[4].Success { t.Error("new entries did not get added with successful timestamps") } // Prefix an invalid entry to have a scan from more than maxHostDowntime // days ago. At less than minScans total, the host should not be deleted // upon update. updatedEntry, exists = hdbt.hdb.hostTree.Select(entry2.PublicKey) if !exists { t.Fatal("Entry did not get inserted into the host tree") } updatedEntry.ScanHistory = append([]modules.HostDBScan{{}}, updatedEntry.ScanHistory...) err = hdbt.hdb.hostTree.Modify(updatedEntry) if err != nil { t.Fatal(err) } // Entry should still exist. updatedEntry, exists = hdbt.hdb.hostTree.Select(entry2.PublicKey) if !exists { t.Fatal("Entry did not get inserted into the host tree") } // Add enough entries to get to minScans total length. When that length is // reached, the entry should be deleted. for i := len(updatedEntry.ScanHistory); i < minScans; i++ { hdbt.hdb.updateEntry(entry2, someErr) } // The entry should no longer exist in the hostdb, wiped for being offline. updatedEntry, exists = hdbt.hdb.hostTree.Select(entry2.PublicKey) if exists { t.Fatal("entry should have been purged for being offline for too long") } // Trigger compression on entry1 by adding a past scan and then adding // unsuccessful scans until compression happens. updatedEntry, exists = hdbt.hdb.hostTree.Select(entry1.PublicKey) if !exists { t.Fatal("Entry did not get inserted into the host tree") } updatedEntry.ScanHistory = append([]modules.HostDBScan{{Timestamp: time.Now().Add(maxHostDowntime * -1).Add(time.Hour * -1)}}, updatedEntry.ScanHistory...) err = hdbt.hdb.hostTree.Modify(updatedEntry) if err != nil { t.Fatal(err) } for i := len(updatedEntry.ScanHistory); i <= minScans; i++ { hdbt.hdb.updateEntry(entry1, someErr) } // The result should be compression, and not the entry getting deleted. updatedEntry, exists = hdbt.hdb.hostTree.Select(entry1.PublicKey) if !exists { t.Fatal("entry should not have been purged for being offline for too long") } if len(updatedEntry.ScanHistory) != minScans { t.Error("expecting a different number of scans", len(updatedEntry.ScanHistory)) } if updatedEntry.HistoricDowntime == 0 { t.Error("host reporting historic downtime?") } if updatedEntry.HistoricUptime != 0 { t.Error("host not reporting historic uptime?") } // Repeat triggering compression, but with uptime this time. updatedEntry, exists = hdbt.hdb.hostTree.Select(entry1.PublicKey) if !exists { t.Fatal("Entry did not get inserted into the host tree") } updatedEntry.ScanHistory = append([]modules.HostDBScan{{Success: true, Timestamp: time.Now().Add(time.Hour * 24 * 11 * -1)}}, updatedEntry.ScanHistory...) err = hdbt.hdb.hostTree.Modify(updatedEntry) if err != nil { t.Fatal(err) } hdbt.hdb.updateEntry(entry1, someErr) // The result should be compression, and not the entry getting deleted. updatedEntry, exists = hdbt.hdb.hostTree.Select(entry1.PublicKey) if !exists { t.Fatal("entry should not have been purged for being offline for too long") } if len(updatedEntry.ScanHistory) != minScans+1 { t.Error("expecting a different number of scans") } if updatedEntry.HistoricUptime == 0 { t.Error("host not reporting historic uptime?") } } Sia-1.3.0/modules/renter/hostdb/update.go000066400000000000000000000103431313565667000203070ustar00rootroot00000000000000package hostdb import ( "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) // findHostAnnouncements returns a list of the host announcements found within // a given block. No check is made to see that the ip address found in the // announcement is actually a valid ip address. func findHostAnnouncements(b types.Block) (announcements []modules.HostDBEntry) { for _, t := range b.Transactions { // the HostAnnouncement must be prefaced by the standard host // announcement string for _, arb := range t.ArbitraryData { addr, pubKey, err := modules.DecodeAnnouncement(arb) if err != nil { continue } // Add the announcement to the slice being returned. var host modules.HostDBEntry host.NetAddress = addr host.PublicKey = pubKey announcements = append(announcements, host) } } return } // insertBlockchainHost adds a host entry to the state. The host will be inserted // into the set of all hosts, and if it is online and responding to requests it // will be put into the list of active hosts. func (hdb *HostDB) insertBlockchainHost(host modules.HostDBEntry) { // Remove garbage hosts and local hosts (but allow local hosts in testing). if err := host.NetAddress.IsValid(); err != nil { hdb.log.Debugf("WARN: host '%v' has an invalid NetAddress: %v", host.NetAddress, err) return } // Ignore all local hosts announced through the blockchain. if build.Release == "standard" && host.NetAddress.IsLocal() { return } // Make sure the host gets into the host tree so it does not get dropped if // shutdown occurs before a scan can be performed. oldEntry, exists := hdb.hostTree.Select(host.PublicKey) if exists { // Replace the netaddress with the most recently announced netaddress. // Also replace the FirstSeen value with the current block height if // the first seen value has been set to zero (no hosts actually have a // first seen height of zero, but due to rescans hosts can end up with // a zero-value FirstSeen field. oldEntry.NetAddress = host.NetAddress if oldEntry.FirstSeen == 0 { oldEntry.FirstSeen = hdb.blockHeight } err := hdb.hostTree.Modify(oldEntry) if err != nil { hdb.log.Println("ERROR: unable to modify host entry of host tree after a blockchain scan:", err) } } else { host.FirstSeen = hdb.blockHeight err := hdb.hostTree.Insert(host) if err != nil { hdb.log.Println("ERROR: unable to insert host entry into host tree after a blockchain scan:", err) } } // Add the host to the scan queue. hdb.queueScan(host) } // ProcessConsensusChange will be called by the consensus set every time there // is a change in the blockchain. Updates will always be called in order. func (hdb *HostDB) ProcessConsensusChange(cc modules.ConsensusChange) { hdb.mu.Lock() defer hdb.mu.Unlock() // Update the hostdb's understanding of the block height. for _, block := range cc.RevertedBlocks { // Only doing the block check if the height is above zero saves hashing // and saves a nontrivial amount of time during IBD. if hdb.blockHeight > 0 || block.ID() != types.GenesisID { hdb.blockHeight-- } else if hdb.blockHeight != 0 { // Sanity check - if the current block is the genesis block, the // hostdb height should be set to zero. hdb.log.Critical("Hostdb has detected a genesis block, but the height of the hostdb is set to ", hdb.blockHeight) hdb.blockHeight = 0 } } for _, block := range cc.AppliedBlocks { // Only doing the block check if the height is above zero saves hashing // and saves a nontrivial amount of time during IBD. if hdb.blockHeight > 0 || block.ID() != types.GenesisID { hdb.blockHeight++ } else if hdb.blockHeight != 0 { // Sanity check - if the current block is the genesis block, the // hostdb height should be set to zero. hdb.log.Critical("Hostdb has detected a genesis block, but the height of the hostdb is set to ", hdb.blockHeight) hdb.blockHeight = 0 } } // Add hosts announced in blocks that were applied. for _, block := range cc.AppliedBlocks { for _, host := range findHostAnnouncements(block) { hdb.log.Debugln("Found a host in a host announcement:", host.NetAddress, host.PublicKey) hdb.insertBlockchainHost(host) } } hdb.lastChange = cc.ID } Sia-1.3.0/modules/renter/hostdb/update_test.go000066400000000000000000000027201313565667000213460ustar00rootroot00000000000000package hostdb import ( "testing" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) // makeSignedAnnouncement creates a []byte that contains an encoded and signed // host announcement for the given net address. func makeSignedAnnouncement(na modules.NetAddress) ([]byte, error) { sk, pk := crypto.GenerateKeyPair() spk := types.SiaPublicKey{ Algorithm: types.SignatureEd25519, Key: pk[:], } return modules.CreateAnnouncement(na, spk, sk) } // TestFindHostAnnouncements probes the findHostAnnouncements function func TestFindHostAnnouncements(t *testing.T) { annBytes, err := makeSignedAnnouncement("foo.com:1234") if err != nil { t.Fatal(err) } b := types.Block{ Transactions: []types.Transaction{ { ArbitraryData: [][]byte{annBytes}, }, }, } announcements := findHostAnnouncements(b) if len(announcements) != 1 { t.Error("host announcement not found in block") } // Try with an altered prefix b.Transactions[0].ArbitraryData[0][0]++ announcements = findHostAnnouncements(b) if len(announcements) != 0 { t.Error("host announcement found when there was an invalid prefix") } b.Transactions[0].ArbitraryData[0][0]-- // Try with an invalid host encoding. b.Transactions[0].ArbitraryData[0][17]++ announcements = findHostAnnouncements(b) if len(announcements) != 0 { t.Error("host announcement found when there was an invalid encoding of a host announcement") } } Sia-1.3.0/modules/renter/persist.go000066400000000000000000000235511313565667000172400ustar00rootroot00000000000000package renter import ( "bytes" "compress/gzip" "encoding/base64" "errors" "io" "os" "path/filepath" "strconv" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/persist" "github.com/NebulousLabs/Sia/types" ) const ( PersistFilename = "renter.json" ShareExtension = ".sia" logFile = modules.RenterDir + ".log" ) var ( ErrNoNicknames = errors.New("at least one nickname must be supplied") ErrNonShareSuffix = errors.New("suffix of file must be " + ShareExtension) ErrBadFile = errors.New("not a .sia file") ErrIncompatible = errors.New("file is not compatible with current version") shareHeader = [15]byte{'S', 'i', 'a', ' ', 'S', 'h', 'a', 'r', 'e', 'd', ' ', 'F', 'i', 'l', 'e'} shareVersion = "0.4" saveMetadata = persist.Metadata{ Header: "Renter Persistence", Version: "0.4", } ) // MarshalSia implements the encoding.SiaMarshaller interface, writing the // file data to w. func (f *file) MarshalSia(w io.Writer) error { enc := encoding.NewEncoder(w) // encode easy fields err := enc.EncodeAll( f.name, f.size, f.masterKey, f.pieceSize, f.mode, ) if err != nil { return err } // COMPATv0.4.3 - encode the bytesUploaded and chunksUploaded fields // TODO: the resulting .sia file may confuse old clients. err = enc.EncodeAll(f.pieceSize*f.numChunks()*uint64(f.erasureCode.NumPieces()), f.numChunks()) if err != nil { return err } // encode erasureCode switch code := f.erasureCode.(type) { case *rsCode: err = enc.EncodeAll( "Reed-Solomon", uint64(code.dataPieces), uint64(code.numPieces-code.dataPieces), ) if err != nil { return err } default: if build.DEBUG { panic("unknown erasure code") } return errors.New("unknown erasure code") } // encode contracts if err := enc.Encode(uint64(len(f.contracts))); err != nil { return err } for _, c := range f.contracts { if err := enc.Encode(c); err != nil { return err } } return nil } // UnmarshalSia implements the encoding.SiaUnmarshaller interface, // reconstructing a file from the encoded bytes read from r. func (f *file) UnmarshalSia(r io.Reader) error { dec := encoding.NewDecoder(r) // COMPATv0.4.3 - decode bytesUploaded and chunksUploaded into dummy vars. var bytesUploaded, chunksUploaded uint64 // Decode easy fields. err := dec.DecodeAll( &f.name, &f.size, &f.masterKey, &f.pieceSize, &f.mode, &bytesUploaded, &chunksUploaded, ) if err != nil { return err } // Decode erasure coder. var codeType string if err := dec.Decode(&codeType); err != nil { return err } switch codeType { case "Reed-Solomon": var nData, nParity uint64 err = dec.DecodeAll( &nData, &nParity, ) if err != nil { return err } rsc, err := NewRSCode(int(nData), int(nParity)) if err != nil { return err } f.erasureCode = rsc default: return errors.New("unrecognized erasure code type: " + codeType) } // Decode contracts. var nContracts uint64 if err := dec.Decode(&nContracts); err != nil { return err } f.contracts = make(map[types.FileContractID]fileContract) var contract fileContract for i := uint64(0); i < nContracts; i++ { if err := dec.Decode(&contract); err != nil { return err } f.contracts[contract.ID] = contract } return nil } // saveFile saves a file to the renter directory. func (r *Renter) saveFile(f *file) error { // Create directory structure specified in nickname. fullPath := filepath.Join(r.persistDir, f.name+ShareExtension) err := os.MkdirAll(filepath.Dir(fullPath), 0700) if err != nil { return err } // Open SafeFile handle. handle, err := persist.NewSafeFile(filepath.Join(r.persistDir, f.name+ShareExtension)) if err != nil { return err } defer handle.Close() // Write file data. err = shareFiles([]*file{f}, handle) if err != nil { return err } // Commit the SafeFile. return handle.CommitSync() } // saveSync stores the current renter data to disk and then syncs to disk. func (r *Renter) saveSync() error { data := struct { Tracking map[string]trackedFile }{r.tracking} return persist.SaveJSON(saveMetadata, data, filepath.Join(r.persistDir, PersistFilename)) } // load fetches the saved renter data from disk. func (r *Renter) load() error { // Recursively load all files found in renter directory. Errors // encountered during loading are logged, but are not considered fatal. err := filepath.Walk(r.persistDir, func(path string, info os.FileInfo, err error) error { // This error is non-nil if filepath.Walk couldn't stat a file or // folder. if err != nil { r.log.Println("WARN: could not stat file or folder during walk:", err) return nil } // Skip folders and non-sia files. if info.IsDir() || filepath.Ext(path) != ShareExtension { return nil } // Open the file. file, err := os.Open(path) if err != nil { r.log.Println("ERROR: could not open .sia file:", err) return nil } defer file.Close() // Load the file contents into the renter. _, err = r.loadSharedFiles(file) if err != nil { r.log.Println("ERROR: could not load .sia file:", err) return nil } return nil }) if err != nil { return err } // Load contracts, repair set, and entropy. data := struct { Tracking map[string]trackedFile Repairing map[string]string // COMPATv0.4.8 }{} err = persist.LoadJSON(saveMetadata, &data, filepath.Join(r.persistDir, PersistFilename)) if err != nil { return err } if data.Tracking != nil { r.tracking = data.Tracking } return nil } // shareFiles writes the specified files to w. First a header is written, // followed by the gzipped concatenation of each file. func shareFiles(files []*file, w io.Writer) error { // Write header. err := encoding.NewEncoder(w).EncodeAll( shareHeader, shareVersion, uint64(len(files)), ) if err != nil { return err } // Create compressor. zip, _ := gzip.NewWriterLevel(w, gzip.BestSpeed) enc := encoding.NewEncoder(zip) // Encode each file. for _, f := range files { err = enc.Encode(f) if err != nil { return err } } return zip.Close() } // ShareFile saves the specified files to shareDest. func (r *Renter) ShareFiles(nicknames []string, shareDest string) error { lockID := r.mu.RLock() defer r.mu.RUnlock(lockID) // TODO: consider just appending the proper extension. if filepath.Ext(shareDest) != ShareExtension { return ErrNonShareSuffix } handle, err := os.Create(shareDest) if err != nil { return err } defer handle.Close() // Load files from renter. files := make([]*file, len(nicknames)) for i, name := range nicknames { f, exists := r.files[name] if !exists { return ErrUnknownPath } files[i] = f } err = shareFiles(files, handle) if err != nil { os.Remove(shareDest) return err } return nil } // ShareFilesAscii returns the specified files in ASCII format. func (r *Renter) ShareFilesAscii(nicknames []string) (string, error) { lockID := r.mu.RLock() defer r.mu.RUnlock(lockID) // Load files from renter. files := make([]*file, len(nicknames)) for i, name := range nicknames { f, exists := r.files[name] if !exists { return "", ErrUnknownPath } files[i] = f } buf := new(bytes.Buffer) err := shareFiles(files, base64.NewEncoder(base64.URLEncoding, buf)) if err != nil { return "", err } return buf.String(), nil } // loadSharedFiles reads .sia data from reader and registers the contained // files in the renter. It returns the nicknames of the loaded files. func (r *Renter) loadSharedFiles(reader io.Reader) ([]string, error) { // read header var header [15]byte var version string var numFiles uint64 err := encoding.NewDecoder(reader).DecodeAll( &header, &version, &numFiles, ) if err != nil { return nil, err } else if header != shareHeader { return nil, ErrBadFile } else if version != shareVersion { return nil, ErrIncompatible } // Create decompressor. unzip, err := gzip.NewReader(reader) if err != nil { return nil, err } dec := encoding.NewDecoder(unzip) // Read each file. files := make([]*file, numFiles) for i := range files { files[i] = new(file) err := dec.Decode(files[i]) if err != nil { return nil, err } // Make sure the file's name does not conflict with existing files. dupCount := 0 origName := files[i].name for { _, exists := r.files[files[i].name] if !exists { break } dupCount++ files[i].name = origName + "_" + strconv.Itoa(dupCount) } } // Add files to renter. names := make([]string, numFiles) for i, f := range files { r.files[f.name] = f names[i] = f.name } // Save the files. for _, f := range files { r.saveFile(f) } return names, nil } // initPersist handles all of the persistence initialization, such as creating // the persistence directory and starting the logger. func (r *Renter) initPersist() error { // Create the perist directory if it does not yet exist. err := os.MkdirAll(r.persistDir, 0700) if err != nil { return err } // Initialize the logger. r.log, err = persist.NewFileLogger(filepath.Join(r.persistDir, logFile)) if err != nil { return err } // Load the prior persistence structures. err = r.load() if err != nil && !os.IsNotExist(err) { return err } return nil } // LoadSharedFiles loads a .sia file into the renter. It returns the nicknames // of the loaded files. func (r *Renter) LoadSharedFiles(filename string) ([]string, error) { lockID := r.mu.Lock() defer r.mu.Unlock(lockID) file, err := os.Open(filename) if err != nil { return nil, err } defer file.Close() return r.loadSharedFiles(file) } // LoadSharedFilesAscii loads an ASCII-encoded .sia file into the renter. It // returns the nicknames of the loaded files. func (r *Renter) LoadSharedFilesAscii(asciiSia string) ([]string, error) { lockID := r.mu.Lock() defer r.mu.Unlock(lockID) dec := base64.NewDecoder(base64.URLEncoding, bytes.NewBufferString(asciiSia)) return r.loadSharedFiles(dec) } Sia-1.3.0/modules/renter/persist_test.go000066400000000000000000000176161313565667000203040ustar00rootroot00000000000000package renter import ( "bytes" "fmt" "os" "path/filepath" "strconv" "testing" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/fastrand" ) // newTestingFile initializes a file object with random parameters. func newTestingFile() *file { data := fastrand.Bytes(8) nData := fastrand.Intn(10) nParity := fastrand.Intn(10) rsc, _ := NewRSCode(nData+1, nParity+1) return &file{ name: "testfile-" + strconv.Itoa(int(data[0])), size: encoding.DecUint64(data[1:5]), masterKey: crypto.GenerateTwofishKey(), erasureCode: rsc, pieceSize: encoding.DecUint64(data[6:8]), } } // equalFiles is a helper function that compares two files for equality. func equalFiles(f1, f2 *file) error { if f1 == nil || f2 == nil { return fmt.Errorf("one or both files are nil") } if f1.name != f2.name { return fmt.Errorf("names do not match: %v %v", f1.name, f2.name) } if f1.size != f2.size { return fmt.Errorf("sizes do not match: %v %v", f1.size, f2.size) } if f1.masterKey != f2.masterKey { return fmt.Errorf("keys do not match: %v %v", f1.masterKey, f2.masterKey) } if f1.pieceSize != f2.pieceSize { return fmt.Errorf("pieceSizes do not match: %v %v", f1.pieceSize, f2.pieceSize) } return nil } // TestFileMarshalling tests the MarshalSia and UnmarshalSia functions of the // file type. func TestFileMarshalling(t *testing.T) { savedFile := newTestingFile() buf := new(bytes.Buffer) savedFile.MarshalSia(buf) loadedFile := new(file) err := loadedFile.UnmarshalSia(buf) if err != nil { t.Fatal(err) } err = equalFiles(savedFile, loadedFile) if err != nil { t.Fatal(err) } } // TestFileShareLoad tests the sharing/loading functions of the renter. func TestFileShareLoad(t *testing.T) { if testing.Short() { t.SkipNow() } rt, err := newRenterTester(t.Name()) if err != nil { t.Fatal(err) } defer rt.Close() // Create a file and add it to the renter. savedFile := newTestingFile() id := rt.renter.mu.Lock() rt.renter.files[savedFile.name] = savedFile rt.renter.mu.Unlock(id) // Share .sia file to disk. path := filepath.Join(build.SiaTestingDir, "renter", t.Name(), "test.sia") err = rt.renter.ShareFiles([]string{savedFile.name}, path) if err != nil { t.Fatal(err) } // Remove the file from the renter. delete(rt.renter.files, savedFile.name) // Load the .sia file back into the renter. names, err := rt.renter.LoadSharedFiles(path) if err != nil { t.Fatal(err) } if len(names) != 1 || names[0] != savedFile.name { t.Fatal("nickname not loaded properly:", names) } err = equalFiles(rt.renter.files[savedFile.name], savedFile) if err != nil { t.Fatal(err) } // Share and load multiple files. savedFile2 := newTestingFile() rt.renter.files[savedFile2.name] = savedFile2 path = filepath.Join(build.SiaTestingDir, "renter", t.Name(), "test2.sia") err = rt.renter.ShareFiles([]string{savedFile.name, savedFile2.name}, path) if err != nil { t.Fatal(err) } // Remove the files from the renter. delete(rt.renter.files, savedFile.name) delete(rt.renter.files, savedFile2.name) names, err = rt.renter.LoadSharedFiles(path) if err != nil { t.Fatal(nil) } if len(names) != 2 || (names[0] != savedFile2.name && names[1] != savedFile2.name) { t.Fatal("nicknames not loaded properly:", names) } err = equalFiles(rt.renter.files[savedFile.name], savedFile) if err != nil { t.Fatal(err) } err = equalFiles(rt.renter.files[savedFile2.name], savedFile2) if err != nil { t.Fatal(err) } } // TestFileShareLoadASCII tests the ASCII sharing/loading functions. func TestFileShareLoadASCII(t *testing.T) { if testing.Short() { t.SkipNow() } rt, err := newRenterTester(t.Name()) if err != nil { t.Fatal(err) } defer rt.Close() // Create a file and add it to the renter. savedFile := newTestingFile() id := rt.renter.mu.Lock() rt.renter.files[savedFile.name] = savedFile rt.renter.mu.Unlock(id) ascii, err := rt.renter.ShareFilesAscii([]string{savedFile.name}) if err != nil { t.Fatal(err) } // Remove the file from the renter. delete(rt.renter.files, savedFile.name) names, err := rt.renter.LoadSharedFilesAscii(ascii) if err != nil { t.Fatal(err) } if len(names) != 1 || names[0] != savedFile.name { t.Fatal("nickname not loaded properly") } err = equalFiles(rt.renter.files[savedFile.name], savedFile) if err != nil { t.Fatal(err) } } // TestRenterSaveLoad probes the save and load methods of the renter type. func TestRenterSaveLoad(t *testing.T) { if testing.Short() { t.SkipNow() } rt, err := newRenterTester(t.Name()) if err != nil { t.Fatal(err) } defer rt.Close() // Create and save some files var f1, f2, f3 *file f1 = newTestingFile() f2 = newTestingFile() f3 = newTestingFile() // names must not conflict for f2.name == f1.name || f2.name == f3.name { f2 = newTestingFile() } for f3.name == f1.name || f3.name == f2.name { f3 = newTestingFile() } rt.renter.saveFile(f1) rt.renter.saveFile(f2) rt.renter.saveFile(f3) err = rt.renter.saveSync() // save metadata if err != nil { t.Fatal(err) } // load should now load the files into memory. id := rt.renter.mu.Lock() err = rt.renter.load() rt.renter.mu.Unlock(id) if err != nil && !os.IsNotExist(err) { t.Fatal(err) } if err := equalFiles(f1, rt.renter.files[f1.name]); err != nil { t.Fatal(err) } if err := equalFiles(f2, rt.renter.files[f2.name]); err != nil { t.Fatal(err) } if err := equalFiles(f3, rt.renter.files[f3.name]); err != nil { t.Fatal(err) } } // TestRenterPaths checks that the renter properly handles nicknames // containing the path separator ("/"). func TestRenterPaths(t *testing.T) { if testing.Short() { t.SkipNow() } rt, err := newRenterTester(t.Name()) if err != nil { t.Fatal(err) } defer rt.Close() // Create and save some files. // The result of saving these files should be a directory containing: // foo.sia // foo/bar.sia // foo/bar/baz.sia f1 := newTestingFile() f1.name = "foo" f2 := newTestingFile() f2.name = "foo/bar" f3 := newTestingFile() f3.name = "foo/bar/baz" rt.renter.saveFile(f1) rt.renter.saveFile(f2) rt.renter.saveFile(f3) // Load the files into the renter. id := rt.renter.mu.Lock() err = rt.renter.load() rt.renter.mu.Unlock(id) if err != nil && !os.IsNotExist(err) { t.Fatal(err) } // Check that the files were loaded properly. if err := equalFiles(f1, rt.renter.files[f1.name]); err != nil { t.Fatal(err) } if err := equalFiles(f2, rt.renter.files[f2.name]); err != nil { t.Fatal(err) } if err := equalFiles(f3, rt.renter.files[f3.name]); err != nil { t.Fatal(err) } // To confirm that the file structure was preserved, we walk the renter // folder and emit the name of each .sia file encountered (filepath.Walk // is deterministic; it orders the files lexically). var walkStr string filepath.Walk(rt.renter.persistDir, func(path string, _ os.FileInfo, _ error) error { // capture only .sia files if filepath.Ext(path) != ".sia" { return nil } rel, _ := filepath.Rel(rt.renter.persistDir, path) // strip testdir prefix walkStr += rel return nil }) // walk will descend into foo/bar/, reading baz, bar, and finally foo expWalkStr := (f3.name + ".sia") + (f2.name + ".sia") + (f1.name + ".sia") if filepath.ToSlash(walkStr) != expWalkStr { t.Fatalf("Bad walk string: expected %v, got %v", expWalkStr, walkStr) } } // TestSiafileCompatibility tests that the renter is able to load v0.4.8 .sia files. func TestSiafileCompatibility(t *testing.T) { if testing.Short() { t.SkipNow() } rt, err := newRenterTester(t.Name()) if err != nil { t.Fatal(err) } defer rt.Close() // Load the compatibility file into the renter. path := filepath.Join("..", "..", "compatibility", "siafile_v0.4.8.sia") names, err := rt.renter.LoadSharedFiles(path) if err != nil { t.Fatal(err) } if len(names) != 1 || names[0] != "testfile-183" { t.Fatal("nickname not loaded properly:", names) } } Sia-1.3.0/modules/renter/proto/000077500000000000000000000000001313565667000163555ustar00rootroot00000000000000Sia-1.3.0/modules/renter/proto/consts.go000066400000000000000000000005111313565667000202120ustar00rootroot00000000000000package proto import ( "time" "github.com/NebulousLabs/Sia/build" ) var ( // connTimeout determines the number of seconds the dialer will wait // for a connect to complete connTimeout = build.Select(build.Var{ Dev: 10 * time.Second, Standard: 60 * time.Second, Testing: 5 * time.Second, }).(time.Duration) ) Sia-1.3.0/modules/renter/proto/downloader.go000066400000000000000000000150501313565667000210430ustar00rootroot00000000000000package proto import ( "errors" "net" "sync" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" ) // A Downloader retrieves sectors by calling the download RPC on a host. // Downloaders are NOT thread- safe; calls to Sector must be serialized. type Downloader struct { host modules.HostDBEntry contract modules.RenterContract // updated after each revision conn net.Conn closeChan chan struct{} once sync.Once hdb hostDB SaveFn revisionSaver } // Sector retrieves the sector with the specified Merkle root, and revises // the underlying contract to pay the host proportionally to the data // retrieve. func (hd *Downloader) Sector(root crypto.Hash) (_ modules.RenterContract, _ []byte, err error) { defer extendDeadline(hd.conn, time.Hour) // reset deadline when finished // calculate price sectorPrice := hd.host.DownloadBandwidthPrice.Mul64(modules.SectorSize) if hd.contract.RenterFunds().Cmp(sectorPrice) < 0 { return modules.RenterContract{}, nil, errors.New("contract has insufficient funds to support download") } // to mitigate small errors (e.g. differing block heights), fudge the // price and collateral by 0.2%. This is only applied to hosts above // v1.0.1; older hosts use stricter math. if build.VersionCmp(hd.host.Version, "1.0.1") > 0 { sectorPrice = sectorPrice.MulFloat(1 + hostPriceLeeway) } // create the download revision rev := newDownloadRevision(hd.contract.LastRevision, sectorPrice) // initiate download by confirming host settings extendDeadline(hd.conn, modules.NegotiateSettingsTime) if err := startDownload(hd.conn, hd.host); err != nil { return modules.RenterContract{}, nil, err } // Before we continue, save the revision. Unexpected termination (e.g. // power failure) during the signature transfer leaves in an ambiguous // state: the host may or may not have received the signature, and thus // may report either revision as being the most recent. To mitigate this, // we save the old revision as a fallback. if hd.SaveFn != nil { if err := hd.SaveFn(rev, hd.contract.MerkleRoots); err != nil { return modules.RenterContract{}, nil, err } } // send download action extendDeadline(hd.conn, 2*time.Minute) err = encoding.WriteObject(hd.conn, []modules.DownloadAction{{ MerkleRoot: root, Offset: 0, Length: modules.SectorSize, }}) if err != nil { return modules.RenterContract{}, nil, err } // Increase Successful/Failed interactions accordingly defer func() { if err != nil { hd.hdb.IncrementFailedInteractions(hd.contract.HostPublicKey) } else if err == nil { hd.hdb.IncrementSuccessfulInteractions(hd.contract.HostPublicKey) } }() // send the revision to the host for approval extendDeadline(hd.conn, 2*time.Minute) signedTxn, err := negotiateRevision(hd.conn, rev, hd.contract.SecretKey) if err == modules.ErrStopResponse { // if host gracefully closed, close our connection as well; this will // cause the next download to fail. However, we must delay closing // until we've finished downloading the sector. defer hd.conn.Close() } else if err != nil { return modules.RenterContract{}, nil, err } // read sector data, completing one iteration of the download loop extendDeadline(hd.conn, modules.NegotiateDownloadTime) var sectors [][]byte if err := encoding.ReadObject(hd.conn, §ors, modules.SectorSize+16); err != nil { return modules.RenterContract{}, nil, err } else if len(sectors) != 1 { return modules.RenterContract{}, nil, errors.New("host did not send enough sectors") } sector := sectors[0] if uint64(len(sector)) != modules.SectorSize { return modules.RenterContract{}, nil, errors.New("host did not send enough sector data") } else if crypto.MerkleRoot(sector) != root { return modules.RenterContract{}, nil, errors.New("host sent bad sector data") } // update contract and metrics hd.contract.LastRevision = rev hd.contract.LastRevisionTxn = signedTxn hd.contract.DownloadSpending = hd.contract.DownloadSpending.Add(sectorPrice) return hd.contract, sector, nil } // shutdown terminates the revision loop and signals the goroutine spawned in // NewDownloader to return. func (hd *Downloader) shutdown() { extendDeadline(hd.conn, modules.NegotiateSettingsTime) // don't care about these errors _, _ = verifySettings(hd.conn, hd.host) _ = modules.WriteNegotiationStop(hd.conn) close(hd.closeChan) } // Close cleanly terminates the download loop with the host and closes the // connection. func (hd *Downloader) Close() error { // using once ensures that Close is idempotent hd.once.Do(hd.shutdown) return hd.conn.Close() } // NewDownloader initiates the download request loop with a host, and returns a // Downloader. func NewDownloader(host modules.HostDBEntry, contract modules.RenterContract, hdb hostDB, cancel <-chan struct{}) (_ *Downloader, err error) { // check that contract has enough value to support a download if len(contract.LastRevision.NewValidProofOutputs) != 2 { return nil, errors.New("invalid contract") } sectorPrice := host.DownloadBandwidthPrice.Mul64(modules.SectorSize) if contract.RenterFunds().Cmp(sectorPrice) < 0 { return nil, errors.New("contract has insufficient funds to support download") } // Increase Successful/Failed interactions accordingly defer func() { // A revision mismatch might not be the host's fault. if err != nil && !IsRevisionMismatch(err) { hdb.IncrementFailedInteractions(contract.HostPublicKey) } else { hdb.IncrementSuccessfulInteractions(contract.HostPublicKey) } }() // initiate download loop conn, err := (&net.Dialer{ Cancel: cancel, Timeout: 15 * time.Second, }).Dial("tcp", string(contract.NetAddress)) if err != nil { return nil, err } closeChan := make(chan struct{}) go func() { select { case <-cancel: conn.Close() case <-closeChan: } }() // allot 2 minutes for RPC request + revision exchange extendDeadline(conn, modules.NegotiateRecentRevisionTime) defer extendDeadline(conn, time.Hour) if err := encoding.WriteObject(conn, modules.RPCDownload); err != nil { conn.Close() close(closeChan) return nil, errors.New("couldn't initiate RPC: " + err.Error()) } if err := verifyRecentRevision(conn, contract, host.Version); err != nil { conn.Close() // TODO: close gracefully if host has entered revision loop close(closeChan) return nil, err } // the host is now ready to accept revisions return &Downloader{ contract: contract, host: host, conn: conn, closeChan: closeChan, hdb: hdb, }, nil } Sia-1.3.0/modules/renter/proto/editor.go000066400000000000000000000240241313565667000201740ustar00rootroot00000000000000package proto import ( "errors" "net" "sync" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) var hostPriceLeeway = build.Select(build.Var{ Dev: 0.05, Standard: 0.002, Testing: 0.002, }).(float64) var ( // sectorHeight is the height of a Merkle tree that covers a single // sector. It is log2(modules.SectorSize / crypto.SegmentSize) sectorHeight = func() uint64 { height := uint64(0) for 1< 0 { sectorStoragePrice = sectorStoragePrice.MulFloat(1 + hostPriceLeeway) sectorBandwidthPrice = sectorBandwidthPrice.MulFloat(1 + hostPriceLeeway) sectorCollateral = sectorCollateral.MulFloat(1 - hostPriceLeeway) } sectorPrice := sectorStoragePrice.Add(sectorBandwidthPrice) if he.contract.RenterFunds().Cmp(sectorPrice) < 0 { return modules.RenterContract{}, crypto.Hash{}, errors.New("contract has insufficient funds to support upload") } if he.contract.LastRevision.NewMissedProofOutputs[1].Value.Cmp(sectorCollateral) < 0 { return modules.RenterContract{}, crypto.Hash{}, errors.New("contract has insufficient collateral to support upload") } // calculate the new Merkle root sectorRoot := crypto.MerkleRoot(data) newRoots := append(he.contract.MerkleRoots, sectorRoot) merkleRoot := cachedMerkleRoot(newRoots) // create the action and revision actions := []modules.RevisionAction{{ Type: modules.ActionInsert, SectorIndex: uint64(len(he.contract.MerkleRoots)), Data: data, }} rev := newUploadRevision(he.contract.LastRevision, merkleRoot, sectorPrice, sectorCollateral) // run the revision iteration if err := he.runRevisionIteration(actions, rev, newRoots); err != nil { return modules.RenterContract{}, crypto.Hash{}, err } // update metrics he.contract.StorageSpending = he.contract.StorageSpending.Add(sectorStoragePrice) he.contract.UploadSpending = he.contract.UploadSpending.Add(sectorBandwidthPrice) return he.contract, sectorRoot, nil } // Delete negotiates a revision that removes a sector from a file contract. func (he *Editor) Delete(root crypto.Hash) (modules.RenterContract, error) { // calculate the new Merkle root newRoots := make([]crypto.Hash, 0, len(he.contract.MerkleRoots)) index := -1 for i, h := range he.contract.MerkleRoots { if h == root { index = i } else { newRoots = append(newRoots, h) } } if index == -1 { return modules.RenterContract{}, errors.New("no record of that sector root") } merkleRoot := cachedMerkleRoot(newRoots) // create the action and accompanying revision actions := []modules.RevisionAction{{ Type: modules.ActionDelete, SectorIndex: uint64(index), }} rev := newDeleteRevision(he.contract.LastRevision, merkleRoot) // run the revision iteration if err := he.runRevisionIteration(actions, rev, newRoots); err != nil { return modules.RenterContract{}, err } return he.contract, nil } // Modify negotiates a revision that edits a sector in a file contract. func (he *Editor) Modify(oldRoot, newRoot crypto.Hash, offset uint64, newData []byte) (modules.RenterContract, error) { // calculate price sectorBandwidthPrice := he.host.UploadBandwidthPrice.Mul64(uint64(len(newData))) if he.contract.RenterFunds().Cmp(sectorBandwidthPrice) < 0 { return modules.RenterContract{}, errors.New("contract has insufficient funds to support modification") } // calculate the new Merkle root newRoots := make([]crypto.Hash, len(he.contract.MerkleRoots)) index := -1 for i, h := range he.contract.MerkleRoots { if h == oldRoot { index = i newRoots[i] = newRoot } else { newRoots[i] = h } } if index == -1 { return modules.RenterContract{}, errors.New("no record of that sector root") } merkleRoot := cachedMerkleRoot(newRoots) // create the action and revision actions := []modules.RevisionAction{{ Type: modules.ActionModify, SectorIndex: uint64(index), Offset: offset, Data: newData, }} rev := newModifyRevision(he.contract.LastRevision, merkleRoot, sectorBandwidthPrice) // run the revision iteration if err := he.runRevisionIteration(actions, rev, newRoots); err != nil { return modules.RenterContract{}, err } // update metrics he.contract.UploadSpending = he.contract.UploadSpending.Add(sectorBandwidthPrice) return he.contract, nil } // NewEditor initiates the contract revision process with a host, and returns // an Editor. func NewEditor(host modules.HostDBEntry, contract modules.RenterContract, currentHeight types.BlockHeight, hdb hostDB, cancel <-chan struct{}) (_ *Editor, err error) { // check that contract has enough value to support an upload if len(contract.LastRevision.NewValidProofOutputs) != 2 { return nil, errors.New("invalid contract") } // Increase Successful/Failed interactions accordingly defer func() { // a revision mismatch is not necessarily the host's fault if err != nil && !IsRevisionMismatch(err) { hdb.IncrementFailedInteractions(contract.HostPublicKey) } else if err == nil { hdb.IncrementSuccessfulInteractions(contract.HostPublicKey) } }() // initiate revision loop conn, err := (&net.Dialer{ Cancel: cancel, Timeout: 15 * time.Second, }).Dial("tcp", string(contract.NetAddress)) if err != nil { return nil, err } closeChan := make(chan struct{}) go func() { select { case <-cancel: conn.Close() case <-closeChan: } }() // allot 2 minutes for RPC request + revision exchange extendDeadline(conn, modules.NegotiateRecentRevisionTime) defer extendDeadline(conn, time.Hour) if err := encoding.WriteObject(conn, modules.RPCReviseContract); err != nil { conn.Close() close(closeChan) return nil, errors.New("couldn't initiate RPC: " + err.Error()) } if err := verifyRecentRevision(conn, contract, host.Version); err != nil { conn.Close() // TODO: close gracefully if host has entered revision loop close(closeChan) return nil, err } // the host is now ready to accept revisions return &Editor{ host: host, hdb: hdb, height: currentHeight, contract: contract, conn: conn, closeChan: closeChan, }, nil } Sia-1.3.0/modules/renter/proto/formcontract.go000066400000000000000000000233771313565667000214210ustar00rootroot00000000000000package proto import ( "errors" "net" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) const ( // estTxnSize is the estimated size of an encoded file contract // transaction set. estTxnSize = 2048 ) // FormContract forms a contract with a host and submits the contract // transaction to tpool. func FormContract(params ContractParams, txnBuilder transactionBuilder, tpool transactionPool, hdb hostDB, cancel <-chan struct{}) (modules.RenterContract, error) { // Extract vars from params, for convenience. host, filesize, startHeight, endHeight, refundAddress := params.Host, params.Filesize, params.StartHeight, params.EndHeight, params.RefundAddress // Create our key. ourSK, ourPK := crypto.GenerateKeyPair() // Create unlock conditions. uc := types.UnlockConditions{ PublicKeys: []types.SiaPublicKey{ types.Ed25519PublicKey(ourPK), host.PublicKey, }, SignaturesRequired: 2, } // Calculate cost to renter and cost to host. // TODO: clarify/abstract this math storageAllocation := host.StoragePrice.Mul64(filesize).Mul64(uint64(endHeight - startHeight)) hostCollateral := host.Collateral.Mul64(filesize).Mul64(uint64(endHeight - startHeight)) if hostCollateral.Cmp(host.MaxCollateral) > 0 { // TODO: if we have to cap the collateral, it probably means we shouldn't be using this host // (ok within a factor of 2) hostCollateral = host.MaxCollateral } hostPayout := hostCollateral.Add(host.ContractPrice) payout := storageAllocation.Add(hostPayout).Mul64(10406).Div64(10000) // renter pays for siafund fee // Check for negative currency. if types.PostTax(startHeight, payout).Cmp(hostPayout) < 0 { return modules.RenterContract{}, errors.New("payout smaller than host payout") } // Create file contract. fc := types.FileContract{ FileSize: 0, FileMerkleRoot: crypto.Hash{}, // no proof possible without data WindowStart: endHeight, WindowEnd: endHeight + host.WindowSize, Payout: payout, UnlockHash: uc.UnlockHash(), RevisionNumber: 0, ValidProofOutputs: []types.SiacoinOutput{ // Outputs need to account for tax. {Value: types.PostTax(startHeight, payout).Sub(hostPayout), UnlockHash: refundAddress}, // Collateral is returned to host. {Value: hostPayout, UnlockHash: host.UnlockHash}, }, MissedProofOutputs: []types.SiacoinOutput{ // Same as above. {Value: types.PostTax(startHeight, payout).Sub(hostPayout), UnlockHash: refundAddress}, // Same as above. {Value: hostPayout, UnlockHash: host.UnlockHash}, // Once we start doing revisions, we'll move some coins to the host and some to the void. {Value: types.ZeroCurrency, UnlockHash: types.UnlockHash{}}, }, } // Calculate transaction fee. _, maxFee := tpool.FeeEstimation() txnFee := maxFee.Mul64(estTxnSize) // Build transaction containing fc, e.g. the File Contract. renterCost := payout.Sub(hostCollateral).Add(txnFee) err := txnBuilder.FundSiacoins(renterCost) if err != nil { return modules.RenterContract{}, err } txnBuilder.AddFileContract(fc) // Add miner fee. txnBuilder.AddMinerFee(txnFee) // Create initial transaction set. txn, parentTxns := txnBuilder.View() txnSet := append(parentTxns, txn) // Increase Successful/Failed interactions accordingly defer func() { if err != nil { hdb.IncrementFailedInteractions(host.PublicKey) } else { hdb.IncrementSuccessfulInteractions(host.PublicKey) } }() // Initiate connection. dialer := &net.Dialer{ Cancel: cancel, Timeout: connTimeout, } conn, err := dialer.Dial("tcp", string(host.NetAddress)) if err != nil { return modules.RenterContract{}, err } defer func() { _ = conn.Close() }() // Allot time for sending RPC ID + verifySettings. extendDeadline(conn, modules.NegotiateSettingsTime) if err = encoding.WriteObject(conn, modules.RPCFormContract); err != nil { return modules.RenterContract{}, err } // Verify the host's settings and confirm its identity. host, err = verifySettings(conn, host) if err != nil { return modules.RenterContract{}, err } if !host.AcceptingContracts { return modules.RenterContract{}, errors.New("host is not accepting contracts") } // Allot time for negotiation. extendDeadline(conn, modules.NegotiateFileContractTime) // Send acceptance, txn signed by us, and pubkey. if err = modules.WriteNegotiationAcceptance(conn); err != nil { return modules.RenterContract{}, errors.New("couldn't send initial acceptance: " + err.Error()) } if err = encoding.WriteObject(conn, txnSet); err != nil { return modules.RenterContract{}, errors.New("couldn't send the contract signed by us: " + err.Error()) } if err = encoding.WriteObject(conn, ourSK.PublicKey()); err != nil { return modules.RenterContract{}, errors.New("couldn't send our public key: " + err.Error()) } // Read acceptance and txn signed by host. if err = modules.ReadNegotiationAcceptance(conn); err != nil { return modules.RenterContract{}, errors.New("host did not accept our proposed contract: " + err.Error()) } // Host now sends any new parent transactions, inputs and outputs that // were added to the transaction. var newParents []types.Transaction var newInputs []types.SiacoinInput var newOutputs []types.SiacoinOutput if err = encoding.ReadObject(conn, &newParents, types.BlockSizeLimit); err != nil { return modules.RenterContract{}, errors.New("couldn't read the host's added parents: " + err.Error()) } if err = encoding.ReadObject(conn, &newInputs, types.BlockSizeLimit); err != nil { return modules.RenterContract{}, errors.New("couldn't read the host's added inputs: " + err.Error()) } if err = encoding.ReadObject(conn, &newOutputs, types.BlockSizeLimit); err != nil { return modules.RenterContract{}, errors.New("couldn't read the host's added outputs: " + err.Error()) } // Merge txnAdditions with txnSet. txnBuilder.AddParents(newParents) for _, input := range newInputs { txnBuilder.AddSiacoinInput(input) } for _, output := range newOutputs { txnBuilder.AddSiacoinOutput(output) } // Sign the txn. signedTxnSet, err := txnBuilder.Sign(true) if err != nil { return modules.RenterContract{}, modules.WriteNegotiationRejection(conn, errors.New("failed to sign transaction: "+err.Error())) } // Calculate signatures added by the transaction builder. var addedSignatures []types.TransactionSignature _, _, _, addedSignatureIndices := txnBuilder.ViewAdded() for _, i := range addedSignatureIndices { addedSignatures = append(addedSignatures, signedTxnSet[len(signedTxnSet)-1].TransactionSignatures[i]) } // create initial (no-op) revision, transaction, and signature initRevision := types.FileContractRevision{ ParentID: signedTxnSet[len(signedTxnSet)-1].FileContractID(0), UnlockConditions: uc, NewRevisionNumber: 1, NewFileSize: fc.FileSize, NewFileMerkleRoot: fc.FileMerkleRoot, NewWindowStart: fc.WindowStart, NewWindowEnd: fc.WindowEnd, NewValidProofOutputs: fc.ValidProofOutputs, NewMissedProofOutputs: fc.MissedProofOutputs, NewUnlockHash: fc.UnlockHash, } renterRevisionSig := types.TransactionSignature{ ParentID: crypto.Hash(initRevision.ParentID), PublicKeyIndex: 0, CoveredFields: types.CoveredFields{ FileContractRevisions: []uint64{0}, }, } revisionTxn := types.Transaction{ FileContractRevisions: []types.FileContractRevision{initRevision}, TransactionSignatures: []types.TransactionSignature{renterRevisionSig}, } encodedSig := crypto.SignHash(revisionTxn.SigHash(0), ourSK) revisionTxn.TransactionSignatures[0].Signature = encodedSig[:] // Send acceptance and signatures. if err = modules.WriteNegotiationAcceptance(conn); err != nil { return modules.RenterContract{}, errors.New("couldn't send transaction acceptance: " + err.Error()) } if err = encoding.WriteObject(conn, addedSignatures); err != nil { return modules.RenterContract{}, errors.New("couldn't send added signatures: " + err.Error()) } if err = encoding.WriteObject(conn, revisionTxn.TransactionSignatures[0]); err != nil { return modules.RenterContract{}, errors.New("couldn't send revision signature: " + err.Error()) } // Read the host acceptance and signatures. err = modules.ReadNegotiationAcceptance(conn) if err != nil { return modules.RenterContract{}, errors.New("host did not accept our signatures: " + err.Error()) } var hostSigs []types.TransactionSignature if err = encoding.ReadObject(conn, &hostSigs, 2e3); err != nil { return modules.RenterContract{}, errors.New("couldn't read the host's signatures: " + err.Error()) } for _, sig := range hostSigs { txnBuilder.AddTransactionSignature(sig) } var hostRevisionSig types.TransactionSignature if err = encoding.ReadObject(conn, &hostRevisionSig, 2e3); err != nil { return modules.RenterContract{}, errors.New("couldn't read the host's revision signature: " + err.Error()) } revisionTxn.TransactionSignatures = append(revisionTxn.TransactionSignatures, hostRevisionSig) // Construct the final transaction. txn, parentTxns = txnBuilder.View() txnSet = append(parentTxns, txn) // Submit to blockchain. err = tpool.AcceptTransactionSet(txnSet) if err == modules.ErrDuplicateTransactionSet { // As long as it made it into the transaction pool, we're good. err = nil } if err != nil { return modules.RenterContract{}, err } // Calculate contract ID. fcid := txn.FileContractID(0) return modules.RenterContract{ FileContract: fc, HostPublicKey: host.PublicKey, ID: fcid, LastRevision: initRevision, LastRevisionTxn: revisionTxn, NetAddress: host.NetAddress, SecretKey: ourSK, StartHeight: startHeight, TotalCost: renterCost, ContractFee: host.ContractPrice, TxnFee: txnFee, SiafundFee: types.Tax(startHeight, fc.Payout), }, nil } Sia-1.3.0/modules/renter/proto/negotiate.go000066400000000000000000000236521313565667000206730ustar00rootroot00000000000000package proto import ( "errors" "net" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) // extendDeadline is a helper function for extending the connection timeout. func extendDeadline(conn net.Conn, d time.Duration) { _ = conn.SetDeadline(time.Now().Add(d)) } // startRevision is run at the beginning of each revision iteration. It reads // the host's settings confirms that the values are acceptable, and writes an acceptance. func startRevision(conn net.Conn, host modules.HostDBEntry) error { // verify the host's settings and confirm its identity _, err := verifySettings(conn, host) if err != nil { return err } return modules.WriteNegotiationAcceptance(conn) } // startDownload is run at the beginning of each download iteration. It reads // the host's settings confirms that the values are acceptable, and writes an acceptance. func startDownload(conn net.Conn, host modules.HostDBEntry) error { // verify the host's settings and confirm its identity _, err := verifySettings(conn, host) if err != nil { return err } return modules.WriteNegotiationAcceptance(conn) } // verifySettings reads a signed HostSettings object from conn, validates the // signature, and checks for discrepancies between the known settings and the // received settings. If there is a discrepancy, the hostDB is notified. The // received settings are returned. func verifySettings(conn net.Conn, host modules.HostDBEntry) (modules.HostDBEntry, error) { // convert host key (types.SiaPublicKey) to a crypto.PublicKey if host.PublicKey.Algorithm != types.SignatureEd25519 || len(host.PublicKey.Key) != crypto.PublicKeySize { build.Critical("hostdb did not filter out host with wrong signature algorithm:", host.PublicKey.Algorithm) return modules.HostDBEntry{}, errors.New("host used unsupported signature algorithm") } var pk crypto.PublicKey copy(pk[:], host.PublicKey.Key) // read signed host settings var recvSettings modules.HostExternalSettings if err := crypto.ReadSignedObject(conn, &recvSettings, modules.NegotiateMaxHostExternalSettingsLen, pk); err != nil { return modules.HostDBEntry{}, errors.New("couldn't read host's settings: " + err.Error()) } // TODO: check recvSettings against host.HostExternalSettings. If there is // a discrepancy, write the error to conn. if recvSettings.NetAddress != host.NetAddress { // for now, just overwrite the NetAddress, since we know that // host.NetAddress works (it was the one we dialed to get conn) recvSettings.NetAddress = host.NetAddress } host.HostExternalSettings = recvSettings return host, nil } // verifyRecentRevision confirms that the host and contractor agree upon the current // state of the contract being revised. func verifyRecentRevision(conn net.Conn, contract modules.RenterContract, hostVersion string) error { // send contract ID if err := encoding.WriteObject(conn, contract.ID); err != nil { return errors.New("couldn't send contract ID: " + err.Error()) } // read challenge var challenge crypto.Hash if err := encoding.ReadObject(conn, &challenge, 32); err != nil { return errors.New("couldn't read challenge: " + err.Error()) } if build.VersionCmp(hostVersion, "1.3.0") >= 0 { crypto.SecureWipe(challenge[:16]) } // sign and return sig := crypto.SignHash(challenge, contract.SecretKey) if err := encoding.WriteObject(conn, sig); err != nil { return errors.New("couldn't send challenge response: " + err.Error()) } // read acceptance if err := modules.ReadNegotiationAcceptance(conn); err != nil { return errors.New("host did not accept revision request: " + err.Error()) } // read last revision and signatures var lastRevision types.FileContractRevision var hostSignatures []types.TransactionSignature if err := encoding.ReadObject(conn, &lastRevision, 2048); err != nil { return errors.New("couldn't read last revision: " + err.Error()) } if err := encoding.ReadObject(conn, &hostSignatures, 2048); err != nil { return errors.New("couldn't read host signatures: " + err.Error()) } // Check that the unlock hashes match; if they do not, something is // seriously wrong. Otherwise, check that the revision numbers match. if lastRevision.UnlockConditions.UnlockHash() != contract.LastRevision.UnlockConditions.UnlockHash() { return errors.New("unlock conditions do not match") } else if lastRevision.NewRevisionNumber != contract.LastRevision.NewRevisionNumber { return &recentRevisionError{contract.LastRevision.NewRevisionNumber, lastRevision.NewRevisionNumber} } // NOTE: we can fake the blockheight here because it doesn't affect // verification; it just needs to be above the fork height and below the // contract expiration (which was checked earlier). return modules.VerifyFileContractRevisionTransactionSignatures(lastRevision, hostSignatures, contract.FileContract.WindowStart-1) } // negotiateRevision sends a revision and actions to the host for approval, // completing one iteration of the revision loop. func negotiateRevision(conn net.Conn, rev types.FileContractRevision, secretKey crypto.SecretKey) (types.Transaction, error) { // create transaction containing the revision signedTxn := types.Transaction{ FileContractRevisions: []types.FileContractRevision{rev}, TransactionSignatures: []types.TransactionSignature{{ ParentID: crypto.Hash(rev.ParentID), CoveredFields: types.CoveredFields{FileContractRevisions: []uint64{0}}, PublicKeyIndex: 0, // renter key is always first -- see formContract }}, } // sign the transaction encodedSig := crypto.SignHash(signedTxn.SigHash(0), secretKey) signedTxn.TransactionSignatures[0].Signature = encodedSig[:] // send the revision if err := encoding.WriteObject(conn, rev); err != nil { return types.Transaction{}, errors.New("couldn't send revision: " + err.Error()) } // read acceptance if err := modules.ReadNegotiationAcceptance(conn); err != nil { return types.Transaction{}, errors.New("host did not accept revision: " + err.Error()) } // send the new transaction signature if err := encoding.WriteObject(conn, signedTxn.TransactionSignatures[0]); err != nil { return types.Transaction{}, errors.New("couldn't send transaction signature: " + err.Error()) } // read the host's acceptance and transaction signature // NOTE: if the host sends ErrStopResponse, we should continue processing // the revision, but return the error anyway. responseErr := modules.ReadNegotiationAcceptance(conn) if responseErr != nil && responseErr != modules.ErrStopResponse { return types.Transaction{}, errors.New("host did not accept transaction signature: " + responseErr.Error()) } var hostSig types.TransactionSignature if err := encoding.ReadObject(conn, &hostSig, 16e3); err != nil { return types.Transaction{}, errors.New("couldn't read host's signature: " + err.Error()) } // add the signature to the transaction and verify it // NOTE: we can fake the blockheight here because it doesn't affect // verification; it just needs to be above the fork height and below the // contract expiration (which was checked earlier). verificationHeight := rev.NewWindowStart - 1 signedTxn.TransactionSignatures = append(signedTxn.TransactionSignatures, hostSig) if err := signedTxn.StandaloneValid(verificationHeight); err != nil { return types.Transaction{}, err } // if the host sent ErrStopResponse, return it return signedTxn, responseErr } // newRevision creates a copy of current with its revision number incremented, // and with cost transferred from the renter to the host. func newRevision(current types.FileContractRevision, cost types.Currency) types.FileContractRevision { rev := current // need to manually copy slice memory rev.NewValidProofOutputs = make([]types.SiacoinOutput, 2) rev.NewMissedProofOutputs = make([]types.SiacoinOutput, 3) copy(rev.NewValidProofOutputs, current.NewValidProofOutputs) copy(rev.NewMissedProofOutputs, current.NewMissedProofOutputs) // move valid payout from renter to host rev.NewValidProofOutputs[0].Value = current.NewValidProofOutputs[0].Value.Sub(cost) rev.NewValidProofOutputs[1].Value = current.NewValidProofOutputs[1].Value.Add(cost) // move missed payout from renter to void rev.NewMissedProofOutputs[0].Value = current.NewMissedProofOutputs[0].Value.Sub(cost) rev.NewMissedProofOutputs[2].Value = current.NewMissedProofOutputs[2].Value.Add(cost) // increment revision number rev.NewRevisionNumber++ return rev } // newDownloadRevision revises the current revision to cover the cost of // downloading data. func newDownloadRevision(current types.FileContractRevision, downloadCost types.Currency) types.FileContractRevision { return newRevision(current, downloadCost) } // newUploadRevision revises the current revision to cover the cost of // uploading a sector. func newUploadRevision(current types.FileContractRevision, merkleRoot crypto.Hash, price, collateral types.Currency) types.FileContractRevision { rev := newRevision(current, price) // move collateral from host to void rev.NewMissedProofOutputs[1].Value = rev.NewMissedProofOutputs[1].Value.Sub(collateral) rev.NewMissedProofOutputs[2].Value = rev.NewMissedProofOutputs[2].Value.Add(collateral) // set new filesize and Merkle root rev.NewFileSize += modules.SectorSize rev.NewFileMerkleRoot = merkleRoot return rev } // newDeleteRevision revises the current revision to cover the cost of // deleting a sector. func newDeleteRevision(current types.FileContractRevision, merkleRoot crypto.Hash) types.FileContractRevision { rev := newRevision(current, types.ZeroCurrency) rev.NewFileSize -= modules.SectorSize rev.NewFileMerkleRoot = merkleRoot return rev } // newModifyRevision revises the current revision to cover the cost of // modifying a sector. func newModifyRevision(current types.FileContractRevision, merkleRoot crypto.Hash, uploadCost types.Currency) types.FileContractRevision { rev := newRevision(current, uploadCost) rev.NewFileMerkleRoot = merkleRoot return rev } Sia-1.3.0/modules/renter/proto/negotiate_test.go000066400000000000000000000043561313565667000217320ustar00rootroot00000000000000package proto import ( "errors" "net" "testing" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) // TestNegotiateRevisionStopResponse tests that when the host sends // StopResponse, the renter continues processing the revision instead of // immediately terminating. func TestNegotiateRevisionStopResponse(t *testing.T) { // simulate a renter-host connection rConn, hConn := net.Pipe() // handle the host's half of the pipe go func() { defer hConn.Close() // read revision encoding.ReadObject(hConn, new(types.FileContractRevision), 1<<22) // write acceptance modules.WriteNegotiationAcceptance(hConn) // read txn signature encoding.ReadObject(hConn, new(types.TransactionSignature), 1<<22) // write StopResponse modules.WriteNegotiationStop(hConn) // write txn signature encoding.WriteObject(hConn, types.TransactionSignature{}) }() // since the host wrote StopResponse, we should proceed to validating the // transaction. This will return a known error because we are supplying an // empty revision. _, err := negotiateRevision(rConn, types.FileContractRevision{}, crypto.SecretKey{}) if err != types.ErrFileContractWindowStartViolation { t.Fatalf("expected %q, got \"%v\"", types.ErrFileContractWindowStartViolation, err) } rConn.Close() // same as above, but send an error instead of StopResponse. The error // should be returned by negotiateRevision immediately (if it is not, we // should expect to see a transaction validation error instead). rConn, hConn = net.Pipe() go func() { defer hConn.Close() encoding.ReadObject(hConn, new(types.FileContractRevision), 1<<22) modules.WriteNegotiationAcceptance(hConn) encoding.ReadObject(hConn, new(types.TransactionSignature), 1<<22) // write a sentinel error modules.WriteNegotiationRejection(hConn, errors.New("sentinel")) encoding.WriteObject(hConn, types.TransactionSignature{}) }() expectedErr := "host did not accept transaction signature: sentinel" _, err = negotiateRevision(rConn, types.FileContractRevision{}, crypto.SecretKey{}) if err == nil || err.Error() != expectedErr { t.Fatalf("expected %q, got \"%v\"", expectedErr, err) } rConn.Close() } Sia-1.3.0/modules/renter/proto/proto.go000066400000000000000000000037111313565667000200510ustar00rootroot00000000000000package proto import ( "fmt" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) // Dependencies. type ( transactionBuilder interface { AddFileContract(types.FileContract) uint64 AddMinerFee(types.Currency) uint64 AddParents([]types.Transaction) AddSiacoinInput(types.SiacoinInput) uint64 AddSiacoinOutput(types.SiacoinOutput) uint64 AddTransactionSignature(types.TransactionSignature) uint64 FundSiacoins(types.Currency) error Sign(bool) ([]types.Transaction, error) View() (types.Transaction, []types.Transaction) ViewAdded() (parents, coins, funds, signatures []int) } transactionPool interface { AcceptTransactionSet([]types.Transaction) error FeeEstimation() (min types.Currency, max types.Currency) } hostDB interface { IncrementSuccessfulInteractions(key types.SiaPublicKey) IncrementFailedInteractions(key types.SiaPublicKey) } ) // ContractParams are supplied as an argument to FormContract. type ContractParams struct { Host modules.HostDBEntry Filesize uint64 StartHeight types.BlockHeight EndHeight types.BlockHeight RefundAddress types.UnlockHash // TODO: add optional keypair } // A revisionSaver is called just before we send our revision signature to the host; this // allows the revision and Merkle roots to be reloaded later if we desync from the host. type revisionSaver func(types.FileContractRevision, []crypto.Hash) error // A recentRevisionError occurs if the host reports a different revision // number than expected. type recentRevisionError struct { ours, theirs uint64 } func (e *recentRevisionError) Error() string { return fmt.Sprintf("our revision number (%v) does not match the host's (%v)", e.ours, e.theirs) } // IsRevisionMismatch returns true if err was caused by the host reporting a // different revision number than expected. func IsRevisionMismatch(err error) bool { _, ok := err.(*recentRevisionError) return ok } Sia-1.3.0/modules/renter/proto/renew.go000066400000000000000000000256431313565667000200360ustar00rootroot00000000000000package proto import ( "errors" "net" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) // Renew negotiates a new contract for data already stored with a host, and // submits the new contract transaction to tpool. func Renew(contract modules.RenterContract, params ContractParams, txnBuilder transactionBuilder, tpool transactionPool, hdb hostDB, cancel <-chan struct{}) (modules.RenterContract, error) { // extract vars from params, for convenience host, filesize, startHeight, endHeight, refundAddress := params.Host, params.Filesize, params.StartHeight, params.EndHeight, params.RefundAddress ourSK := contract.SecretKey // calculate cost to renter and cost to host storageAllocation := host.StoragePrice.Mul64(filesize).Mul64(uint64(endHeight - startHeight)) hostCollateral := host.Collateral.Mul64(filesize).Mul64(uint64(endHeight - startHeight)) if hostCollateral.Cmp(host.MaxCollateral) > 0 { // TODO: if we have to cap the collateral, it probably means we shouldn't be using this host // (ok within a factor of 2) hostCollateral = host.MaxCollateral } // Calculate additional basePrice and baseCollateral. If the contract // height did not increase, basePrice and baseCollateral are zero. var basePrice, baseCollateral types.Currency if endHeight+host.WindowSize > contract.LastRevision.NewWindowEnd { timeExtension := uint64((endHeight + host.WindowSize) - contract.LastRevision.NewWindowEnd) basePrice = host.StoragePrice.Mul64(contract.LastRevision.NewFileSize).Mul64(timeExtension) // cost of data already covered by contract, i.e. lastrevision.Filesize baseCollateral = host.Collateral.Mul64(contract.LastRevision.NewFileSize).Mul64(timeExtension) // same but collateral } hostPayout := hostCollateral.Add(host.ContractPrice).Add(basePrice) payout := storageAllocation.Add(hostCollateral.Add(host.ContractPrice)).Mul64(10406).Div64(10000) // renter covers siafund fee // check for negative currency if types.PostTax(startHeight, payout).Cmp(hostPayout) < 0 { return modules.RenterContract{}, errors.New("payout smaller than host payout") } else if hostCollateral.Cmp(baseCollateral) < 0 { return modules.RenterContract{}, errors.New("new collateral smaller than old collateral") } // create file contract fc := types.FileContract{ FileSize: contract.LastRevision.NewFileSize, FileMerkleRoot: contract.LastRevision.NewFileMerkleRoot, WindowStart: endHeight, WindowEnd: endHeight + host.WindowSize, Payout: payout, UnlockHash: contract.LastRevision.NewUnlockHash, RevisionNumber: 0, ValidProofOutputs: []types.SiacoinOutput{ // renter {Value: types.PostTax(startHeight, payout).Sub(hostPayout), UnlockHash: refundAddress}, // host {Value: hostPayout, UnlockHash: host.UnlockHash}, }, MissedProofOutputs: []types.SiacoinOutput{ // renter {Value: types.PostTax(startHeight, payout).Sub(hostPayout), UnlockHash: refundAddress}, // host gets its unused collateral back, plus the contract price {Value: hostCollateral.Sub(baseCollateral).Add(host.ContractPrice), UnlockHash: host.UnlockHash}, // void gets the spent storage fees, plus the collateral being risked {Value: basePrice.Add(baseCollateral), UnlockHash: types.UnlockHash{}}, }, } // calculate transaction fee _, maxFee := tpool.FeeEstimation() txnFee := maxFee.Mul64(estTxnSize) // build transaction containing fc renterCost := payout.Sub(hostCollateral).Add(txnFee) err := txnBuilder.FundSiacoins(renterCost) if err != nil { return modules.RenterContract{}, err } txnBuilder.AddFileContract(fc) // add miner fee txnBuilder.AddMinerFee(txnFee) // create initial transaction set txn, parentTxns := txnBuilder.View() txnSet := append(parentTxns, txn) // Increase Successful/Failed interactions accordingly defer func() { // A revision mismatch might not be the host's fault. if err != nil && !IsRevisionMismatch(err) { hdb.IncrementFailedInteractions(contract.HostPublicKey) } else if err == nil { hdb.IncrementSuccessfulInteractions(contract.HostPublicKey) } }() // initiate connection dialer := &net.Dialer{ Cancel: cancel, Timeout: connTimeout, } conn, err := dialer.Dial("tcp", string(host.NetAddress)) if err != nil { return modules.RenterContract{}, err } defer func() { _ = conn.Close() }() // allot time for sending RPC ID, verifyRecentRevision, and verifySettings extendDeadline(conn, modules.NegotiateRecentRevisionTime+modules.NegotiateSettingsTime) if err = encoding.WriteObject(conn, modules.RPCRenewContract); err != nil { return modules.RenterContract{}, errors.New("couldn't initiate RPC: " + err.Error()) } // verify that both parties are renewing the same contract if err = verifyRecentRevision(conn, contract, host.Version); err != nil { // don't add context; want to preserve the original error type so that // callers can check using IsRevisionMismatch return modules.RenterContract{}, err } // verify the host's settings and confirm its identity host, err = verifySettings(conn, host) if err != nil { return modules.RenterContract{}, errors.New("settings exchange failed: " + err.Error()) } if !host.AcceptingContracts { return modules.RenterContract{}, errors.New("host is not accepting contracts") } // allot time for negotiation extendDeadline(conn, modules.NegotiateRenewContractTime) // send acceptance, txn signed by us, and pubkey if err = modules.WriteNegotiationAcceptance(conn); err != nil { return modules.RenterContract{}, errors.New("couldn't send initial acceptance: " + err.Error()) } if err = encoding.WriteObject(conn, txnSet); err != nil { return modules.RenterContract{}, errors.New("couldn't send the contract signed by us: " + err.Error()) } if err = encoding.WriteObject(conn, ourSK.PublicKey()); err != nil { return modules.RenterContract{}, errors.New("couldn't send our public key: " + err.Error()) } // read acceptance and txn signed by host if err = modules.ReadNegotiationAcceptance(conn); err != nil { return modules.RenterContract{}, errors.New("host did not accept our proposed contract: " + err.Error()) } // host now sends any new parent transactions, inputs and outputs that // were added to the transaction var newParents []types.Transaction var newInputs []types.SiacoinInput var newOutputs []types.SiacoinOutput if err = encoding.ReadObject(conn, &newParents, types.BlockSizeLimit); err != nil { return modules.RenterContract{}, errors.New("couldn't read the host's added parents: " + err.Error()) } if err = encoding.ReadObject(conn, &newInputs, types.BlockSizeLimit); err != nil { return modules.RenterContract{}, errors.New("couldn't read the host's added inputs: " + err.Error()) } if err = encoding.ReadObject(conn, &newOutputs, types.BlockSizeLimit); err != nil { return modules.RenterContract{}, errors.New("couldn't read the host's added outputs: " + err.Error()) } // merge txnAdditions with txnSet txnBuilder.AddParents(newParents) for _, input := range newInputs { txnBuilder.AddSiacoinInput(input) } for _, output := range newOutputs { txnBuilder.AddSiacoinOutput(output) } // sign the txn signedTxnSet, err := txnBuilder.Sign(true) if err != nil { return modules.RenterContract{}, modules.WriteNegotiationRejection(conn, errors.New("failed to sign transaction: "+err.Error())) } // calculate signatures added by the transaction builder var addedSignatures []types.TransactionSignature _, _, _, addedSignatureIndices := txnBuilder.ViewAdded() for _, i := range addedSignatureIndices { addedSignatures = append(addedSignatures, signedTxnSet[len(signedTxnSet)-1].TransactionSignatures[i]) } // create initial (no-op) revision, transaction, and signature initRevision := types.FileContractRevision{ ParentID: signedTxnSet[len(signedTxnSet)-1].FileContractID(0), UnlockConditions: contract.LastRevision.UnlockConditions, NewRevisionNumber: 1, NewFileSize: fc.FileSize, NewFileMerkleRoot: fc.FileMerkleRoot, NewWindowStart: fc.WindowStart, NewWindowEnd: fc.WindowEnd, NewValidProofOutputs: fc.ValidProofOutputs, NewMissedProofOutputs: fc.MissedProofOutputs, NewUnlockHash: fc.UnlockHash, } renterRevisionSig := types.TransactionSignature{ ParentID: crypto.Hash(initRevision.ParentID), PublicKeyIndex: 0, CoveredFields: types.CoveredFields{ FileContractRevisions: []uint64{0}, }, } revisionTxn := types.Transaction{ FileContractRevisions: []types.FileContractRevision{initRevision}, TransactionSignatures: []types.TransactionSignature{renterRevisionSig}, } encodedSig := crypto.SignHash(revisionTxn.SigHash(0), ourSK) revisionTxn.TransactionSignatures[0].Signature = encodedSig[:] // Send acceptance and signatures if err = modules.WriteNegotiationAcceptance(conn); err != nil { return modules.RenterContract{}, errors.New("couldn't send transaction acceptance: " + err.Error()) } if err = encoding.WriteObject(conn, addedSignatures); err != nil { return modules.RenterContract{}, errors.New("couldn't send added signatures: " + err.Error()) } if err = encoding.WriteObject(conn, revisionTxn.TransactionSignatures[0]); err != nil { return modules.RenterContract{}, errors.New("couldn't send revision signature: " + err.Error()) } // Read the host acceptance and signatures. err = modules.ReadNegotiationAcceptance(conn) if err != nil { return modules.RenterContract{}, errors.New("host did not accept our signatures: " + err.Error()) } var hostSigs []types.TransactionSignature if err = encoding.ReadObject(conn, &hostSigs, 2e3); err != nil { return modules.RenterContract{}, errors.New("couldn't read the host's signatures: " + err.Error()) } for _, sig := range hostSigs { txnBuilder.AddTransactionSignature(sig) } var hostRevisionSig types.TransactionSignature if err = encoding.ReadObject(conn, &hostRevisionSig, 2e3); err != nil { return modules.RenterContract{}, errors.New("couldn't read the host's revision signature: " + err.Error()) } revisionTxn.TransactionSignatures = append(revisionTxn.TransactionSignatures, hostRevisionSig) // Construct the final transaction. txn, parentTxns = txnBuilder.View() txnSet = append(parentTxns, txn) // Submit to blockchain. err = tpool.AcceptTransactionSet(txnSet) if err == modules.ErrDuplicateTransactionSet { // as long as it made it into the transaction pool, we're good err = nil } if err != nil { return modules.RenterContract{}, err } // calculate contract ID fcid := txn.FileContractID(0) return modules.RenterContract{ FileContract: fc, HostPublicKey: host.PublicKey, ID: fcid, LastRevision: initRevision, LastRevisionTxn: revisionTxn, MerkleRoots: contract.MerkleRoots, NetAddress: host.NetAddress, SecretKey: ourSK, StartHeight: startHeight, TotalCost: renterCost, ContractFee: host.ContractPrice, TxnFee: txnFee, SiafundFee: types.Tax(startHeight, fc.Payout), }, nil } Sia-1.3.0/modules/renter/renter.go000066400000000000000000000272001313565667000170410ustar00rootroot00000000000000package renter // TODO: Change the upload loop to have an upload state, and make it so that // instead of occasionally rebuilding the whole file matrix it has just a // single matrix that it's constantly pulling chunks from. Have a separate loop // which goes through the files and adds them to the matrix. Have the loop // listen on the channel for new files, so that they can go directly into the // matrix. import ( "errors" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/modules/renter/contractor" "github.com/NebulousLabs/Sia/modules/renter/hostdb" "github.com/NebulousLabs/Sia/persist" "github.com/NebulousLabs/Sia/sync" "github.com/NebulousLabs/Sia/types" ) var ( errNilContractor = errors.New("cannot create renter with nil contractor") errNilCS = errors.New("cannot create renter with nil consensus set") errNilTpool = errors.New("cannot create renter with nil transaction pool") errNilHdb = errors.New("cannot create renter with nil hostdb") ) var ( // priceEstimationScope is the number of hosts that get queried by the // renter when providing price estimates. Especially for the 'Standard' // variable, there should be congruence with the number of contracts being // used in the renter allowance. priceEstimationScope = build.Select(build.Var{ Standard: int(50), Dev: int(12), Testing: int(4), }).(int) ) // A hostDB is a database of hosts that the renter can use for figuring out who // to upload to, and download from. type hostDB interface { // ActiveHosts returns the list of hosts that are actively being selected // from. ActiveHosts() []modules.HostDBEntry // AllHosts returns the full list of hosts known to the hostdb, sorted in // order of preference. AllHosts() []modules.HostDBEntry // AverageContractPrice returns the average contract price of a host. AverageContractPrice() types.Currency // Close closes the hostdb. Close() error // Host returns the HostDBEntry for a given host. Host(types.SiaPublicKey) (modules.HostDBEntry, bool) // RandomHosts returns a set of random hosts, weighted by their estimated // usefulness / attractiveness to the renter. RandomHosts will not return // any offline or inactive hosts. RandomHosts(int, []types.SiaPublicKey) []modules.HostDBEntry // ScoreBreakdown returns a detailed explanation of the various properties // of the host. ScoreBreakdown(modules.HostDBEntry) modules.HostScoreBreakdown // EstimateHostScore returns the estimated score breakdown of a host with the // provided settings. EstimateHostScore(modules.HostDBEntry) modules.HostScoreBreakdown } // A hostContractor negotiates, revises, renews, and provides access to file // contracts. type hostContractor interface { // SetAllowance sets the amount of money the contractor is allowed to // spend on contracts over a given time period, divided among the number // of hosts specified. Note that contractor can start forming contracts as // soon as SetAllowance is called; that is, it may block. SetAllowance(modules.Allowance) error // Allowance returns the current allowance Allowance() modules.Allowance // Close closes the hostContractor. Close() error // Contract returns the latest contract formed with the specified host. Contract(modules.NetAddress) (modules.RenterContract, bool) // Contracts returns the contracts formed by the contractor. Contracts() []modules.RenterContract // ContractByID returns the contract associated with the file contract id. ContractByID(types.FileContractID) (modules.RenterContract, bool) // CurrentPeriod returns the height at which the current allowance period // began. CurrentPeriod() types.BlockHeight // Editor creates an Editor from the specified contract ID, allowing the // insertion, deletion, and modification of sectors. Editor(types.FileContractID, <-chan struct{}) (contractor.Editor, error) // GoodForRenew indicates whether the contract line of the provided contract // is actively being renewed. GoodForRenew(types.FileContractID) bool // IsOffline reports whether the specified host is considered offline. IsOffline(types.FileContractID) bool // Downloader creates a Downloader from the specified contract ID, // allowing the retrieval of sectors. Downloader(types.FileContractID, <-chan struct{}) (contractor.Downloader, error) // ResolveID returns the most recent renewal of the specified ID. ResolveID(types.FileContractID) types.FileContractID } // A trackedFile contains metadata about files being tracked by the Renter. // Tracked files are actively repaired by the Renter. By default, files // uploaded by the user are tracked, and files that are added (via loading a // .sia file) are not. type trackedFile struct { // location of original file on disk RepairPath string } // A Renter is responsible for tracking all of the files that a user has // uploaded to Sia, as well as the locations and health of these files. type Renter struct { // File management. // // tracking contains a list of files that the user intends to maintain. By // default, files loaded through sharing are not maintained by the user. files map[string]*file tracking map[string]trackedFile // map from nickname to metadata // Work management. // // chunkQueue contains a list of incomplete work that the download loop acts // upon. The chunkQueue is only ever modified by the main download loop // thread, which means it can be accessed and updated without locks. // // downloadQueue contains a complete history of work that has been // submitted to the download loop. chunkQueue []*chunkDownload // Accessed without locks. downloadQueue []*download newDownloads chan *download newRepairs chan *file workerPool map[types.FileContractID]*worker // Utilities. cs modules.ConsensusSet hostContractor hostContractor hostDB hostDB log *persist.Logger persistDir string mu *sync.RWMutex tg *sync.ThreadGroup tpool modules.TransactionPool } // New returns an initialized renter. func New(g modules.Gateway, cs modules.ConsensusSet, wallet modules.Wallet, tpool modules.TransactionPool, persistDir string) (*Renter, error) { hdb, err := hostdb.New(g, cs, persistDir) if err != nil { return nil, err } hc, err := contractor.New(cs, wallet, tpool, hdb, persistDir) if err != nil { return nil, err } return newRenter(cs, tpool, hdb, hc, persistDir) } // newRenter initializes a renter and returns it. func newRenter(cs modules.ConsensusSet, tpool modules.TransactionPool, hdb hostDB, hc hostContractor, persistDir string) (*Renter, error) { if cs == nil { return nil, errNilCS } if tpool == nil { return nil, errNilTpool } if hc == nil { return nil, errNilContractor } if hdb == nil { // Nil hdb currently allowed for testing purposes. :( // return nil, errNilHdb } r := &Renter{ newRepairs: make(chan *file), files: make(map[string]*file), tracking: make(map[string]trackedFile), newDownloads: make(chan *download), workerPool: make(map[types.FileContractID]*worker), cs: cs, hostDB: hdb, hostContractor: hc, persistDir: persistDir, mu: sync.New(modules.SafeMutexDelay, 1), tg: new(sync.ThreadGroup), tpool: tpool, } if err := r.initPersist(); err != nil { return nil, err } // Spin up the workers for the work pool. contracts := r.hostContractor.Contracts() r.updateWorkerPool(contracts) go r.threadedRepairLoop() go r.threadedDownloadLoop() go r.threadedQueueRepairs() // Kill workers on shutdown. r.tg.OnStop(func() { id := r.mu.RLock() for _, worker := range r.workerPool { close(worker.killChan) } r.mu.RUnlock(id) }) return r, nil } // Close closes the Renter and its dependencies func (r *Renter) Close() error { r.tg.Stop() r.hostDB.Close() return r.hostContractor.Close() } // PriceEstimation estimates the cost in siacoins of performing various storage // and data operations. // // TODO: Make this function line up with the actual settings in the renter. // Perhaps even make it so it uses the renter's actual contracts if it has any. func (r *Renter) PriceEstimation() modules.RenterPriceEstimation { // Grab hosts to perform the estimation. hosts := r.hostDB.RandomHosts(priceEstimationScope, nil) // Check if there are zero hosts, which means no estimation can be made. if len(hosts) == 0 { return modules.RenterPriceEstimation{} } // Add up the costs for each host. var totalContractCost types.Currency var totalDownloadCost types.Currency var totalStorageCost types.Currency var totalUploadCost types.Currency for _, host := range hosts { totalContractCost = totalContractCost.Add(host.ContractPrice) totalDownloadCost = totalDownloadCost.Add(host.DownloadBandwidthPrice) totalStorageCost = totalStorageCost.Add(host.StoragePrice) totalUploadCost = totalUploadCost.Add(host.UploadBandwidthPrice) } // Convert values to being human-scale. totalDownloadCost = totalDownloadCost.Mul(modules.BytesPerTerabyte) totalStorageCost = totalStorageCost.Mul(modules.BlockBytesPerMonthTerabyte) totalUploadCost = totalUploadCost.Mul(modules.BytesPerTerabyte) // Factor in redundancy. totalStorageCost = totalStorageCost.Mul64(3) // TODO: follow file settings? totalUploadCost = totalUploadCost.Mul64(3) // TODO: follow file settings? // Perform averages. totalContractCost = totalContractCost.Div64(uint64(len(hosts))) totalDownloadCost = totalDownloadCost.Div64(uint64(len(hosts))) totalStorageCost = totalStorageCost.Div64(uint64(len(hosts))) totalUploadCost = totalUploadCost.Div64(uint64(len(hosts))) // Take the average of the host set to estimate the overall cost of the // contract forming. totalContractCost = totalContractCost.Mul64(uint64(priceEstimationScope)) // Add the cost of paying the transaction fees for the first contract. _, feePerByte := r.tpool.FeeEstimation() totalContractCost = totalContractCost.Add(feePerByte.Mul64(1000).Mul64(uint64(priceEstimationScope))) return modules.RenterPriceEstimation{ FormContracts: totalContractCost, DownloadTerabyte: totalDownloadCost, StorageTerabyteMonth: totalStorageCost, UploadTerabyte: totalUploadCost, } } // SetSettings will update the settings for the renter. func (r *Renter) SetSettings(s modules.RenterSettings) error { err := r.hostContractor.SetAllowance(s.Allowance) if err != nil { return err } contracts := r.hostContractor.Contracts() id := r.mu.Lock() r.updateWorkerPool(contracts) r.mu.Unlock(id) return nil } // hostdb passthroughs func (r *Renter) ActiveHosts() []modules.HostDBEntry { return r.hostDB.ActiveHosts() } func (r *Renter) AllHosts() []modules.HostDBEntry { return r.hostDB.AllHosts() } func (r *Renter) Host(spk types.SiaPublicKey) (modules.HostDBEntry, bool) { return r.hostDB.Host(spk) } func (r *Renter) ScoreBreakdown(e modules.HostDBEntry) modules.HostScoreBreakdown { return r.hostDB.ScoreBreakdown(e) } func (r *Renter) EstimateHostScore(e modules.HostDBEntry) modules.HostScoreBreakdown { return r.hostDB.EstimateHostScore(e) } // contractor passthroughs func (r *Renter) Contracts() []modules.RenterContract { return r.hostContractor.Contracts() } func (r *Renter) CurrentPeriod() types.BlockHeight { return r.hostContractor.CurrentPeriod() } func (r *Renter) Settings() modules.RenterSettings { return modules.RenterSettings{ Allowance: r.hostContractor.Allowance(), } } func (r *Renter) AllContracts() []modules.RenterContract { return r.hostContractor.(interface { AllContracts() []modules.RenterContract }).AllContracts() } // Enforce that Renter satisfies the modules.Renter interface. var _ modules.Renter = (*Renter)(nil) Sia-1.3.0/modules/renter/renter_test.go000066400000000000000000000124011313565667000200750ustar00rootroot00000000000000package renter import ( "path/filepath" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/modules/consensus" "github.com/NebulousLabs/Sia/modules/gateway" "github.com/NebulousLabs/Sia/modules/miner" "github.com/NebulousLabs/Sia/modules/renter/contractor" "github.com/NebulousLabs/Sia/modules/transactionpool" "github.com/NebulousLabs/Sia/modules/wallet" "github.com/NebulousLabs/Sia/types" ) // renterTester contains all of the modules that are used while testing the renter. type renterTester struct { cs modules.ConsensusSet gateway modules.Gateway miner modules.TestMiner tpool modules.TransactionPool wallet modules.Wallet walletKey crypto.TwofishKey renter *Renter } // Close shuts down the renter tester. func (rt *renterTester) Close() error { rt.wallet.Lock() rt.cs.Close() rt.gateway.Close() return nil } // newRenterTester creates a ready-to-use renter tester with money in the // wallet. func newRenterTester(name string) (*renterTester, error) { // Create the modules. testdir := build.TempDir("renter", name) g, err := gateway.New("localhost:0", false, filepath.Join(testdir, modules.GatewayDir)) if err != nil { return nil, err } cs, err := consensus.New(g, false, filepath.Join(testdir, modules.ConsensusDir)) if err != nil { return nil, err } tp, err := transactionpool.New(cs, g, filepath.Join(testdir, modules.TransactionPoolDir)) if err != nil { return nil, err } w, err := wallet.New(cs, tp, filepath.Join(testdir, modules.WalletDir)) if err != nil { return nil, err } key := crypto.GenerateTwofishKey() _, err = w.Encrypt(key) if err != nil { return nil, err } err = w.Unlock(key) if err != nil { return nil, err } r, err := New(g, cs, w, tp, filepath.Join(testdir, modules.RenterDir)) if err != nil { return nil, err } m, err := miner.New(cs, tp, w, filepath.Join(testdir, modules.MinerDir)) if err != nil { return nil, err } // Assemble all pieces into a renter tester. rt := &renterTester{ cs: cs, gateway: g, miner: m, tpool: tp, wallet: w, renter: r, } // Mine blocks until there is money in the wallet. for i := types.BlockHeight(0); i <= types.MaturityDelay; i++ { _, err := rt.miner.AddBlock() if err != nil { return nil, err } } return rt, nil } // newContractorTester creates a renterTester, but with the supplied // hostContractor. func newContractorTester(name string, hdb hostDB, hc hostContractor) (*renterTester, error) { // Create the modules. testdir := build.TempDir("renter", name) g, err := gateway.New("localhost:0", false, filepath.Join(testdir, modules.GatewayDir)) if err != nil { return nil, err } cs, err := consensus.New(g, false, filepath.Join(testdir, modules.ConsensusDir)) if err != nil { return nil, err } tp, err := transactionpool.New(cs, g, filepath.Join(testdir, modules.TransactionPoolDir)) if err != nil { return nil, err } w, err := wallet.New(cs, tp, filepath.Join(testdir, modules.WalletDir)) if err != nil { return nil, err } key := crypto.GenerateTwofishKey() _, err = w.Encrypt(key) if err != nil { return nil, err } err = w.Unlock(key) if err != nil { return nil, err } r, err := newRenter(cs, tp, hdb, hc, filepath.Join(testdir, modules.RenterDir)) if err != nil { return nil, err } m, err := miner.New(cs, tp, w, filepath.Join(testdir, modules.MinerDir)) if err != nil { return nil, err } // Assemble all pieces into a renter tester. rt := &renterTester{ cs: cs, gateway: g, miner: m, tpool: tp, wallet: w, renter: r, } // Mine blocks until there is money in the wallet. for i := types.BlockHeight(0); i <= types.MaturityDelay; i++ { _, err := rt.miner.AddBlock() if err != nil { return nil, err } } return rt, nil } // stubHostDB is the minimal implementation of the hostDB interface. It can be // embedded in other mock hostDB types, removing the need to reimplement all // of the hostDB's methods on every mock. type stubHostDB struct{} func (stubHostDB) ActiveHosts() []modules.HostDBEntry { return nil } func (stubHostDB) AllHosts() []modules.HostDBEntry { return nil } func (stubHostDB) AverageContractPrice() types.Currency { return types.Currency{} } func (stubHostDB) Close() error { return nil } func (stubHostDB) IsOffline(modules.NetAddress) bool { return true } // stubContractor is the minimal implementation of the hostContractor // interface. type stubContractor struct{} func (stubContractor) SetAllowance(modules.Allowance) error { return nil } func (stubContractor) Allowance() modules.Allowance { return modules.Allowance{} } func (stubContractor) Contract(modules.NetAddress) (modules.RenterContract, bool) { return modules.RenterContract{}, false } func (stubContractor) Contracts() []modules.RenterContract { return nil } func (stubContractor) CurrentPeriod() types.BlockHeight { return 0 } func (stubContractor) IsOffline(modules.NetAddress) bool { return false } func (stubContractor) Editor(types.FileContractID) (contractor.Editor, error) { return nil, nil } func (stubContractor) Downloader(types.FileContractID) (contractor.Downloader, error) { return nil, nil } Sia-1.3.0/modules/renter/repair.go000066400000000000000000000442661313565667000170370ustar00rootroot00000000000000package renter // TODO: There are no download-to-reupload strategies implemented. // TODO: The chunkStatus stuff needs to recognize when two different contract // ids are actually a part of the same file contract. import ( "errors" "io" "os" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/types" ) var ( // errFileDeleted indicates that a chunk which is trying to be repaired // cannot be found in the renter. errFileDeleted = errors.New("cannot repair chunk as the file is not being tracked by the renter") ) type ( // chunkStatus contains information about a chunk to assist with repairing // the chunk. chunkStatus struct { // contracts is the set of file contracts which are already storing // pieces for the chunk. // // pieces contains the indices of the pieces that have already been // uploaded for this chunk. // // recordedGaps indicates the value that this chunk has recorded in the // gapCounts map. activePieces int contracts map[types.FileContractID]struct{} pieces map[uint64]struct{} recordedGaps int totalPieces int } // chunkID can be used to uniquely identify a chunk within the repair // matrix. chunkID struct { index uint64 // the index of the chunk within its file. masterkey crypto.TwofishKey } // repairState tracks a bunch of chunks that are being actively repaired. repairState struct { // activeWorkers is the set of workers that is currently performing // work, thus are currently unavailable but will become available soon. // // availableWorkers is a set of workers that are currently available to // perform work, and do not have any current jobs. // // gapCounts tracks how many chunks have each number of gaps. This is // used to drive uploading optimizations. // // incompleteChunks tracks a set of chunks that don't yet have full // redundancy, along with information about which pieces and contracts // aren't being used. // // downloadingChunks tracks the set of chunks that are currently being // downloaded in order to be re-uploaded. // // cachedChunks tracks the set of chunks that have recently been retreived // from hosts. // // workerSet tracks the set of workers which can be used for uploading. activeWorkers map[types.FileContractID]*worker availableWorkers map[types.FileContractID]*worker gapCounts map[int]int incompleteChunks map[chunkID]*chunkStatus downloadingChunks map[chunkID]struct{} cachedChunks map[chunkID][]byte resultChan chan finishedUpload } ) // numGaps returns the number of gaps that a chunk has. func (cs *chunkStatus) numGaps(rs *repairState) int { incompatContracts := 0 for contract := range cs.contracts { _, exists1 := rs.activeWorkers[contract] _, exists2 := rs.availableWorkers[contract] if exists1 || exists2 { incompatContracts++ } } contractGaps := len(rs.activeWorkers) + len(rs.availableWorkers) - incompatContracts pieceGaps := cs.totalPieces - len(cs.pieces) if contractGaps < pieceGaps { return contractGaps } return pieceGaps } // managedAddFileToRepairState will take a file and add each of the incomplete // chunks to the repair state, along with data about which pieces need // attention. func (r *Renter) managedAddFileToRepairState(rs *repairState, file *file) { // Check that the file is being tracked, and therefore candidate for // repair. id := r.mu.RLock() file.mu.RLock() _, exists := r.tracking[file.name] file.mu.RUnlock() r.mu.RUnlock(id) if !exists { return } // Fetch the list of potential contracts from the repair state. contracts := make([]types.FileContractID, 0) for contract := range rs.activeWorkers { contracts = append(contracts, contract) } for contract := range rs.availableWorkers { contracts = append(contracts, contract) } // Create the data structures that allow us to fill out the status for each // chunk. chunkCount := file.numChunks() availablePieces := make([]map[uint64]struct{}, chunkCount) utilizedContracts := make([]map[types.FileContractID]struct{}, chunkCount) for i := uint64(0); i < chunkCount; i++ { availablePieces[i] = make(map[uint64]struct{}) utilizedContracts[i] = make(map[types.FileContractID]struct{}) } // Iterate through each contract and figure out which pieces are available. file.mu.RLock() var fileContracts []fileContract for _, c := range file.contracts { fileContracts = append(fileContracts, c) } file.mu.RUnlock() for _, contract := range fileContracts { // Check whether this contract is offline. Even if the contract is // offline, we want to record that the chunk has attempted to use this // contract. id := r.hostContractor.ResolveID(contract.ID) stable := !r.hostContractor.IsOffline(id) && r.hostContractor.GoodForRenew(id) // Scan all of the pieces of the contract. for _, piece := range contract.Pieces { utilizedContracts[piece.Chunk][id] = struct{}{} // Only mark the piece as complete if the piece can be recovered. if stable { availablePieces[piece.Chunk][piece.Piece] = struct{}{} } } } // Create the chunkStatus object for each chunk and add it to the set of // incomplete chunks. for i := uint64(0); i < chunkCount; i++ { // Skip this chunk if all pieces have been uploaded. if len(availablePieces[i]) >= file.erasureCode.NumPieces() { continue } // Skip this chunk if it's already in the set of incomplete chunks. cid := chunkID{i, file.masterKey} _, exists := rs.incompleteChunks[cid] if exists { continue } // Create the chunkStatus object and add it to the set of incomplete // chunks. cs := &chunkStatus{ contracts: utilizedContracts[i], pieces: availablePieces[i], totalPieces: file.erasureCode.NumPieces(), } cs.recordedGaps = cs.numGaps(rs) rs.incompleteChunks[cid] = cs rs.gapCounts[cs.recordedGaps]++ } } // managedRepairIteration does a full file repair iteration, which includes // scanning all of the files for missing pieces and attempting repair them by // uploading to chunks. func (r *Renter) managedRepairIteration(rs *repairState) { // Wait for work if there is nothing to do. if len(rs.activeWorkers) == 0 && len(rs.incompleteChunks) == 0 { select { case <-r.tg.StopChan(): return case file := <-r.newRepairs: r.managedAddFileToRepairState(rs, file) return } } // Reset the available workers. contracts := r.hostContractor.Contracts() id := r.mu.Lock() r.updateWorkerPool(contracts) rs.availableWorkers = make(map[types.FileContractID]*worker) for id, worker := range r.workerPool { // Ignore the workers that are not good for uploading. if !worker.contract.GoodForUpload { continue } // Ignore workers that are already in the active set of workers. _, exists := rs.activeWorkers[worker.contractID] if exists { continue } // Ignore workers that have had an upload failure recently. The cooldown // time scales exponentially as the number of consecutive failures grow, // stopping at 10 doublings, or about 17 hours total cooldown. penalty := uint64(worker.consecutiveUploadFailures) if worker.consecutiveUploadFailures > time.Duration(maxConsecutivePenalty) { penalty = uint64(maxConsecutivePenalty) } if time.Since(worker.recentUploadFailure) < uploadFailureCooldown*(1< maxGaps && gaps > 0 { maxGaps = i } } // prune the chunk cache for cid := range rs.cachedChunks { if len(rs.cachedChunks) <= maxChunkCacheSize { break } delete(rs.cachedChunks, cid) } // Scan through the chunks until a candidate for uploads is found. var chunksToDelete []chunkID for chunkID, chunkStatus := range rs.incompleteChunks { // check if the chunk is currently being downloaded for recovery if _, downloading := rs.downloadingChunks[chunkID]; downloading { continue } // Update the number of gaps for this chunk. numGaps := chunkStatus.numGaps(rs) rs.gapCounts[chunkStatus.recordedGaps]-- rs.gapCounts[numGaps]++ chunkStatus.recordedGaps = numGaps // Remove this chunk from the set of incomplete chunks if it has been // completed and there are no workers still working on it. if numGaps == 0 && chunkStatus.activePieces == 0 { chunksToDelete = append(chunksToDelete, chunkID) continue } // Skip this chunk if it does not have enough gaps. if maxGaps >= minPiecesRepair && numGaps < minPiecesRepair { continue } // Determine the set of useful workers - workers that are both // available and able to repair this chunk. var usefulWorkers []types.FileContractID for workerID, worker := range rs.availableWorkers { _, exists := chunkStatus.contracts[workerID] if !exists && worker.contract.GoodForUpload { usefulWorkers = append(usefulWorkers, workerID) } } // Skip this chunk if the set of useful workers does not meet the // minimum pieces requirement. if maxGaps >= minPiecesRepair && len(usefulWorkers) < minPiecesRepair { continue } // Skip this chunk if the set of useful workers is not complete, and // the maxGaps value is less than the minPiecesRepair value. if maxGaps < minPiecesRepair && len(usefulWorkers) < numGaps { continue } // Send off the work. err := r.managedScheduleChunkRepair(rs, chunkID, chunkStatus, usefulWorkers) if err != nil { r.log.Println("Unable to repair chunk:", err) chunksToDelete = append(chunksToDelete, chunkID) continue } } for _, cid := range chunksToDelete { delete(rs.incompleteChunks, cid) } // Block until some of the workers return. r.managedWaitOnRepairWork(rs) } // managedDownloadChunkData downloads the requested chunk from Sia, for use in // the repair loop. func (r *Renter) managedDownloadChunkData(rs *repairState, file *file, offset uint64, chunkIndex uint64, chunkID chunkID) ([]byte, error) { rs.downloadingChunks[chunkID] = struct{}{} defer delete(rs.downloadingChunks, chunkID) downloadSize := file.chunkSize() if offset+downloadSize > file.size { downloadSize = file.size - offset } // create a DownloadBufferWriter for the chunk buf := NewDownloadBufferWriter(file.chunkSize(), int64(offset)) // create the download object and push it on to the download queue d := r.newSectionDownload(file, buf, offset, downloadSize) done := make(chan struct{}) defer close(done) go func() { select { case r.newDownloads <- d: case <-done: } }() // wait for the download to complete and return the data select { case <-d.downloadFinished: return buf.Bytes(), d.Err() case <-r.tg.StopChan(): return nil, errors.New("chunk download interrupted by shutdown") case <-time.After(chunkDownloadTimeout): return nil, errors.New("chunk download timed out") } } // managedGetChunkData grabs the requested `chunkID` from the file, in order to // repair the file. If the `trackedFile` can be found on disk, grab the chunk // from the file, otherwise attempt to queue a new download for only that chunk // and return the downloaded chunk. func (r *Renter) managedGetChunkData(rs *repairState, file *file, trackedFile trackedFile, chunkID chunkID) ([]byte, error) { chunkIndex := chunkID.index offset := chunkIndex * file.chunkSize() // try to read the chunk from disk f, err := os.Open(trackedFile.RepairPath) if err != nil { // if that fails, try to download the chunk return r.managedDownloadChunkData(rs, file, offset, chunkIndex, chunkID) } defer f.Close() chunkData := make([]byte, file.chunkSize()) _, err = f.ReadAt(chunkData, int64(offset)) if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { // TODO: We should be doing better error handling here - shouldn't be // running into ErrUnexpectedEOF intentionally because if it happens // unintentionally we will believe that the chunk was read from memory // correctly. return nil, err } return chunkData, nil } // managedScheduleChunkRepair takes a chunk and schedules some repair on that // chunk using the chunk state and a list of workers. func (r *Renter) managedScheduleChunkRepair(rs *repairState, chunkID chunkID, chunkStatus *chunkStatus, usefulWorkers []types.FileContractID) error { // Check that the file is still in the renter. id := r.mu.RLock() var file *file for _, f := range r.files { if f.masterKey == chunkID.masterkey { file = f break } } var meta trackedFile var exists bool if file != nil { meta, exists = r.tracking[file.name] } r.mu.RUnlock(id) if !exists { return errFileDeleted } // read the chunk into memory // check the cache first var chunkData []byte if cachedData, exists := rs.cachedChunks[chunkID]; exists { chunkData = cachedData } else { data, err := r.managedGetChunkData(rs, file, meta, chunkID) if err != nil { return build.ExtendErr("unable to get repair chunk:", err) } chunkData = data rs.cachedChunks[chunkID] = data } // Erasure code the pieces. pieces, err := file.erasureCode.Encode(chunkData) if err != nil { return build.ExtendErr("unable to erasure code chunk data", err) } // Get the set of pieces that are missing from the chunk. var missingPieces []uint64 for i := uint64(0); i < uint64(file.erasureCode.NumPieces()); i++ { _, exists := chunkStatus.pieces[i] if !exists { missingPieces = append(missingPieces, i) } } // Truncate the pieces so that they match the size of the useful workers. if len(usefulWorkers) < len(missingPieces) { missingPieces = missingPieces[:len(usefulWorkers)] } // Encrypt the missing pieces. for _, missingPiece := range missingPieces { key := deriveKey(file.masterKey, chunkID.index, uint64(missingPiece)) pieces[missingPiece] = key.EncryptBytes(pieces[missingPiece]) } // Give each piece to a worker in the set of useful workers. for len(usefulWorkers) > 0 && len(missingPieces) > 0 { uw := uploadWork{ chunkID: chunkID, data: pieces[missingPieces[0]], file: file, pieceIndex: missingPieces[0], resultChan: rs.resultChan, } // Grab the worker, and update the worker tracking in the repair state. worker := rs.availableWorkers[usefulWorkers[0]] rs.activeWorkers[usefulWorkers[0]] = worker delete(rs.availableWorkers, usefulWorkers[0]) chunkStatus.activePieces++ chunkStatus.contracts[usefulWorkers[0]] = struct{}{} chunkStatus.pieces[missingPieces[0]] = struct{}{} // Update the number of gaps for this chunk. numGaps := chunkStatus.numGaps(rs) rs.gapCounts[chunkStatus.recordedGaps]-- rs.gapCounts[numGaps]++ chunkStatus.recordedGaps = numGaps // Update the set of useful workers and the set of missing pieces. missingPieces = missingPieces[1:] usefulWorkers = usefulWorkers[1:] // Deliver the payload to the worker. select { case worker.uploadChan <- uw: default: r.log.Critical("Worker is supposed to be available, but upload work channel is full") worker.uploadChan <- uw } } return nil } // managedWaitOnRepairWork will block until a worker returns from an upload, // handling the results. func (r *Renter) managedWaitOnRepairWork(rs *repairState) { // If there are no active workers, return early. if len(rs.activeWorkers) == 0 { return } // Wait for an upload to finish. var finishedUpload finishedUpload select { case finishedUpload = <-rs.resultChan: case file := <-r.newRepairs: r.managedAddFileToRepairState(rs, file) return case <-r.tg.StopChan(): return } // Mark that the worker of this chunk has completed its work. if cs, ok := rs.incompleteChunks[finishedUpload.chunkID]; !ok { // The file was deleted mid-upload. Add the worker back to the set of // available workers. rs.availableWorkers[finishedUpload.workerID] = rs.activeWorkers[finishedUpload.workerID] delete(rs.activeWorkers, finishedUpload.workerID) return } else { cs.activePieces-- } // If there was no error, add the worker back to the set of // available workers and wait for the next worker. if finishedUpload.err == nil { rs.availableWorkers[finishedUpload.workerID] = rs.activeWorkers[finishedUpload.workerID] delete(rs.activeWorkers, finishedUpload.workerID) return } // Log the error and retire the worker. r.log.Debugln("Error while performing upload to", finishedUpload.workerID, "::", finishedUpload.err) delete(rs.activeWorkers, finishedUpload.workerID) // Indicate in the set of incomplete chunks that this piece was not // completed. rs.incompleteChunks[finishedUpload.chunkID].pieces[finishedUpload.pieceIndex] = struct{}{} } // threadedQueueRepairs is a goroutine that runs in the background and // continuously adds files to the repair loop, slow enough that it's not a // resource burden but fast enough that no file is ever at risk. // // NOTE: This loop is pretty naive in terms of work management. As the number // of files goes up, and as the number of chunks per file goes up, this will // become a performance bottleneck, and even inhibit repair progress. func (r *Renter) threadedQueueRepairs() { for { // Compress the set of files into a slice. id := r.mu.RLock() var files []*file for _, file := range r.files { if _, ok := r.tracking[file.name]; ok { // Only repair files that are being tracked. files = append(files, file) } } r.mu.RUnlock(id) // Add files. for _, file := range files { // Send the file down the repair channel. select { case r.newRepairs <- file: case <-r.tg.StopChan(): return } } // Chill out for an extra 15 minutes before going through the files // again. select { case <-time.After(repairQueueInterval): case <-r.tg.StopChan(): return } } } // threadedRepairLoop improves the health of files tracked by the renter by // reuploading their missing pieces. Multiple repair attempts may be necessary // before the file reaches full redundancy. func (r *Renter) threadedRepairLoop() { rs := &repairState{ activeWorkers: make(map[types.FileContractID]*worker), availableWorkers: make(map[types.FileContractID]*worker), gapCounts: make(map[int]int), incompleteChunks: make(map[chunkID]*chunkStatus), cachedChunks: make(map[chunkID][]byte), downloadingChunks: make(map[chunkID]struct{}), resultChan: make(chan finishedUpload), } for { if r.tg.Add() != nil { return } r.managedRepairIteration(rs) r.tg.Done() } } Sia-1.3.0/modules/renter/upload.go000066400000000000000000000067371313565667000170420ustar00rootroot00000000000000package renter import ( "errors" "fmt" "os" "strings" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" ) var ( errInsufficientContracts = errors.New("not enough contracts to upload file") errUploadDirectory = errors.New("cannot upload directory") // Erasure-coded piece size pieceSize = modules.SectorSize - crypto.TwofishOverhead // defaultDataPieces is the number of data pieces per erasure-coded chunk defaultDataPieces = func() int { switch build.Release { case "dev": return 1 case "standard": return 10 case "testing": return 1 } panic("undefined defaultDataPieces") }() // defaultParityPieces is the number of parity pieces per erasure-coded // chunk defaultParityPieces = func() int { switch build.Release { case "dev": return 1 case "standard": return 20 case "testing": return 8 } panic("undefined defaultParityPieces") }() ) // validateSiapath checks that a Siapath is a legal filename. // ../ is disallowed to prevent directory traversal, // and paths must not begin with / or be empty. func validateSiapath(siapath string) error { if strings.HasPrefix(siapath, "/") || strings.HasPrefix(siapath, "./") { return errors.New("nicknames cannot begin with /") } if siapath == "" { return ErrEmptyFilename } if strings.Contains(siapath, "../") { return errors.New("directory traversal is not allowed") } if strings.Contains(siapath, "./") { return errors.New("siapath contains invalid characters") } return nil } // validateSource verifies that a sourcePath meets the // requirements for upload. func validateSource(sourcePath string) error { finfo, err := os.Stat(sourcePath) if err != nil { return err } if finfo.IsDir() { return errUploadDirectory } return nil } // Upload instructs the renter to start tracking a file. The renter will // automatically upload and repair tracked files using a background loop. func (r *Renter) Upload(up modules.FileUploadParams) error { // Enforce nickname rules. if err := validateSiapath(up.SiaPath); err != nil { return err } // Enforce source rules. if err := validateSource(up.Source); err != nil { return err } // Check for a nickname conflict. lockID := r.mu.RLock() _, exists := r.files[up.SiaPath] r.mu.RUnlock(lockID) if exists { return ErrPathOverload } // Fill in any missing upload params with sensible defaults. fileInfo, err := os.Stat(up.Source) if err != nil { return err } if up.ErasureCode == nil { up.ErasureCode, _ = NewRSCode(defaultDataPieces, defaultParityPieces) } // Check that we have contracts to upload to. We need at least (data + // parity/2) contracts; since NumPieces = data + parity, we arrive at the // expression below. if nContracts := len(r.hostContractor.Contracts()); nContracts < (up.ErasureCode.NumPieces()+up.ErasureCode.MinPieces())/2 && build.Release != "testing" { return fmt.Errorf("not enough contracts to upload file: got %v, needed %v", nContracts, (up.ErasureCode.NumPieces()+up.ErasureCode.MinPieces())/2) } // Create file object. f := newFile(up.SiaPath, up.ErasureCode, pieceSize, uint64(fileInfo.Size())) f.mode = uint32(fileInfo.Mode()) // Add file to renter. lockID = r.mu.Lock() r.files[up.SiaPath] = f r.tracking[up.SiaPath] = trackedFile{ RepairPath: up.Source, } r.saveSync() err = r.saveFile(f) r.mu.Unlock(lockID) if err != nil { return err } // Send the upload to the repair loop. r.newRepairs <- f return nil } Sia-1.3.0/modules/renter/upload_test.go000066400000000000000000000033701313565667000200670ustar00rootroot00000000000000package renter import ( "io/ioutil" "os" "testing" "github.com/NebulousLabs/Sia/modules" ) // TestRenterSiapathValidate verifies that the validateSiapath function correctly validates SiaPaths. func TestRenterSiapathValidate(t *testing.T) { var pathtests = []struct { in string valid bool }{ {"valid/siapath", true}, {"../../../directory/traversal", false}, {"testpath", true}, {"valid/siapath/../with/directory/traversal", false}, {"validpath/test", true}, {"..validpath/..test", true}, {"./invalid/path", false}, {"test/path", true}, {"/leading/slash", false}, {"foo/./bar", false}, {"", false}, } for _, pathtest := range pathtests { err := validateSiapath(pathtest.in) if err != nil && pathtest.valid { t.Fatal("validateSiapath failed on valid path: ", pathtest.in) } if err == nil && !pathtest.valid { t.Fatal("validateSiapath succeeded on invalid path: ", pathtest.in) } } } // TestRenterUploadDirectory verifies that the renter returns an error if a // directory is provided as the source of an upload. func TestRenterUploadInode(t *testing.T) { if testing.Short() { t.SkipNow() } rt, err := newRenterTester(t.Name()) if err != nil { t.Fatal(err) } defer rt.Close() testUploadPath, err := ioutil.TempDir("", t.Name()) if err != nil { t.Fatal(err) } defer os.RemoveAll(testUploadPath) ec, err := NewRSCode(defaultDataPieces, defaultParityPieces) if err != nil { t.Fatal(err) } params := modules.FileUploadParams{ Source: testUploadPath, SiaPath: "test", ErasureCode: ec, } err = rt.renter.Upload(params) if err == nil { t.Fatal("expected Upload to fail with empty directory as source") } if err != errUploadDirectory { t.Fatal("expected errUploadDirectory, got", err) } } Sia-1.3.0/modules/renter/worker.go000066400000000000000000000162441313565667000170610ustar00rootroot00000000000000package renter // TODO: Need to make sure that we do not end up with two workers for the same // host, which could potentially happen over renewals because the contract ids // will be different. import ( "time" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) type ( // downloadWork contains instructions to download a piece from a host, and // a channel for returning the results. downloadWork struct { // dataRoot is the MerkleRoot of the data being requested, which serves // as an ID when requesting data from the host. dataRoot crypto.Hash pieceIndex uint64 chunkDownload *chunkDownload // resultChan is a channel that the worker will use to return the // results of the download. resultChan chan finishedDownload } // finishedDownload contains the data and error from performing a download. finishedDownload struct { chunkDownload *chunkDownload data []byte err error pieceIndex uint64 workerID types.FileContractID } // finishedUpload contains the Merkle root and error from performing an // upload. finishedUpload struct { chunkID chunkID dataRoot crypto.Hash err error pieceIndex uint64 workerID types.FileContractID } // uploadWork contains instructions to upload a piece to a host, and a // channel for returning the results. uploadWork struct { // data is the payload of the upload. chunkID chunkID data []byte file *file pieceIndex uint64 // resultChan is a channel that the worker will use to return the // results of the upload. resultChan chan finishedUpload } // A worker listens for work on a certain host. worker struct { // contractID specifies which contract the worker specifically works // with. contract modules.RenterContract contractID types.FileContractID // If there is work on all three channels, the worker will first do all // of the work in the priority download chan, then all of the work in the // download chan, and finally all of the work in the upload chan. // // A busy higher priority channel is able to entirely starve all of the // channels with lower priority. downloadChan chan downloadWork // higher priority than all uploads killChan chan struct{} // highest priority priorityDownloadChan chan downloadWork // higher priority than downloads (used for user-initiated downloads) uploadChan chan uploadWork // lowest priority // recentUploadFailure documents the most recent time that an upload // has failed. consecutiveUploadFailures time.Duration recentUploadFailure time.Time // Only modified by primary repair loop. // recentDownloadFailure documents the most recent time that a download // has failed. recentDownloadFailure time.Time // Only modified by the primary download loop. // Utilities. renter *Renter } ) // download will perform some download work. func (w *worker) download(dw downloadWork) { d, err := w.renter.hostContractor.Downloader(w.contractID, w.renter.tg.StopChan()) if err != nil { go func() { select { case dw.resultChan <- finishedDownload{dw.chunkDownload, nil, err, dw.pieceIndex, w.contractID}: case <-w.renter.tg.StopChan(): } }() return } defer d.Close() data, err := d.Sector(dw.dataRoot) go func() { select { case dw.resultChan <- finishedDownload{dw.chunkDownload, data, err, dw.pieceIndex, w.contractID}: case <-w.renter.tg.StopChan(): } }() } // upload will perform some upload work. func (w *worker) upload(uw uploadWork) { e, err := w.renter.hostContractor.Editor(w.contractID, w.renter.tg.StopChan()) if err != nil { w.recentUploadFailure = time.Now() w.consecutiveUploadFailures++ go func() { select { case uw.resultChan <- finishedUpload{uw.chunkID, crypto.Hash{}, err, uw.pieceIndex, w.contractID}: case <-w.renter.tg.StopChan(): } }() return } defer e.Close() root, err := e.Upload(uw.data) if err != nil { w.recentUploadFailure = time.Now() w.consecutiveUploadFailures++ go func() { select { case uw.resultChan <- finishedUpload{uw.chunkID, root, err, uw.pieceIndex, w.contractID}: case <-w.renter.tg.StopChan(): } }() return } // Success - reset the consecutive upload failures count. w.consecutiveUploadFailures = 0 // Update the renter metadata. addr := e.Address() endHeight := e.EndHeight() id := w.renter.mu.Lock() uw.file.mu.Lock() contract, exists := uw.file.contracts[w.contractID] if !exists { contract = fileContract{ ID: w.contractID, IP: addr, WindowStart: endHeight, } } contract.Pieces = append(contract.Pieces, pieceData{ Chunk: uw.chunkID.index, Piece: uw.pieceIndex, MerkleRoot: root, }) uw.file.contracts[w.contractID] = contract w.renter.saveFile(uw.file) uw.file.mu.Unlock() w.renter.mu.Unlock(id) go func() { select { case uw.resultChan <- finishedUpload{uw.chunkID, root, err, uw.pieceIndex, w.contractID}: case <-w.renter.tg.StopChan(): } }() } // work will perform one unit of work, exiting early if there is a kill signal // given before work is completed. func (w *worker) work() { // Check for priority downloads. select { case d := <-w.priorityDownloadChan: w.download(d) return default: // do nothing } // Check for standard downloads. select { case d := <-w.downloadChan: w.download(d) return default: // do nothing } // None of the priority channels have work, listen on all channels. select { case d := <-w.downloadChan: w.download(d) return case <-w.killChan: return case d := <-w.priorityDownloadChan: w.download(d) return case u := <-w.uploadChan: w.upload(u) return case <-w.renter.tg.StopChan(): return } } // threadedWorkLoop repeatedly issues work to a worker, stopping when the // thread group is closed. func (w *worker) threadedWorkLoop() { for { // Check if the worker has been killed individually. select { case <-w.killChan: return default: // do nothing } if w.renter.tg.Add() != nil { return } w.work() w.renter.tg.Done() } } // updateWorkerPool will grab the set of contracts from the contractor and // update the worker pool to match. func (r *Renter) updateWorkerPool(contracts []modules.RenterContract) { // Get a map of all the contracts in the contractor. newContracts := make(map[types.FileContractID]modules.RenterContract) for _, nc := range contracts { newContracts[nc.ID] = nc } // Add a worker for any contract that does not already have a worker. for id, contract := range newContracts { _, exists := r.workerPool[id] if !exists { worker := &worker{ contract: contract, contractID: id, downloadChan: make(chan downloadWork, 1), killChan: make(chan struct{}), priorityDownloadChan: make(chan downloadWork, 1), uploadChan: make(chan uploadWork, 1), renter: r, } r.workerPool[id] = worker go worker.threadedWorkLoop() } } // Remove a worker for any worker that is not in the set of new contracts. for id, worker := range r.workerPool { _, exists := newContracts[id] if !exists { delete(r.workerPool, id) close(worker.killChan) } } } Sia-1.3.0/modules/renter_test.go000066400000000000000000000121171313565667000166020ustar00rootroot00000000000000package modules import ( "encoding/json" "os" "path/filepath" "testing" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/persist" "github.com/NebulousLabs/fastrand" ) // TestMerkleRootSetCompatibility checks that the persist encoding for the // MerkleRootSet type is compatible with the previous encoding for the data, // which was a slice of type crypto.Hash. func TestMerkleRootSetCompatibility(t *testing.T) { if testing.Short() { t.SkipNow() } // Create some fake headers for the files. meta := persist.Metadata{ Header: "Test Header", Version: "1.1.1", } // Try multiple sizes of array. for i := 0; i < 10; i++ { // Create a []crypto.Hash of length i. type chStruct struct { Hashes []crypto.Hash } var chs chStruct for j := 0; j < i; j++ { var ch crypto.Hash fastrand.Read(ch[:]) chs.Hashes = append(chs.Hashes, ch) } // Save and load, check that they are the same. dir := build.TempDir("modules", t.Name()) err := os.MkdirAll(dir, 0700) if err != nil { t.Fatal(err) } filename := filepath.Join(dir, "file") err = persist.SaveJSON(meta, chs, filename) if err != nil { t.Fatal(err) } // Load and verify equivalence. var loadCHS chStruct err = persist.LoadJSON(meta, &loadCHS, filename) if err != nil { t.Fatal(err) } if len(chs.Hashes) != len(loadCHS.Hashes) { t.Fatal("arrays should be the same size") } for j := range chs.Hashes { if chs.Hashes[j] != loadCHS.Hashes[j] { t.Error("loading failed", i, j) } } // Load into MerkleRootSet and verify equivalence. type mrStruct struct { Hashes MerkleRootSet } var loadMRS mrStruct err = persist.LoadJSON(meta, &loadMRS, filename) if err != nil { t.Fatal(err) } if len(chs.Hashes) != len(loadMRS.Hashes) { t.Fatal("arrays should be the same size") } for j := range chs.Hashes { if chs.Hashes[j] != loadMRS.Hashes[j] { t.Error("loading failed", i, j) } } // Save as a MerkleRootSet and verify it can be loaded again. var mrs mrStruct mrs.Hashes = MerkleRootSet(chs.Hashes) err = persist.SaveJSON(meta, mrs, filename) if err != nil { t.Fatal(err) } err = persist.LoadJSON(meta, &loadMRS, filename) if err != nil { t.Fatal(err) } if len(mrs.Hashes) != len(loadMRS.Hashes) { t.Fatal("arrays should be the same size") } for j := range mrs.Hashes { if mrs.Hashes[j] != loadMRS.Hashes[j] { t.Error("loading failed", i, j) } } } } // BenchmarkMerkleRootSetEncode clocks how fast large MerkleRootSets can be // encoded and written to disk. func BenchmarkMerkleRootSetEncode(b *testing.B) { // Create a []crypto.Hash of length i. type chStruct struct { Hashes MerkleRootSet } var chs chStruct for i := 0; i < 1e3; i++ { var ch crypto.Hash fastrand.Read(ch[:]) chs.Hashes = append(chs.Hashes, ch) } b.ResetTimer() for i := 0; i < b.N; i++ { _, err := json.Marshal(chs) if err != nil { b.Fatal(err) } } } // BenchmarkSliceCryptoHashEncode clocks how fast large []crypto.Hashes can be // encoded and written to disk. func BenchmarkSliceCryptoHashEncode(b *testing.B) { // Create a []crypto.Hash of length i. type chStruct struct { Hashes []crypto.Hash } var chs chStruct for i := 0; i < 1e3; i++ { var ch crypto.Hash fastrand.Read(ch[:]) chs.Hashes = append(chs.Hashes, ch) } b.ResetTimer() for i := 0; i < b.N; i++ { _, err := json.Marshal(chs) if err != nil { b.Fatal(err) } } } // BenchmarkMerkleRootSetSave clocks how fast large MerkleRootSets can be // encoded and written to disk. func BenchmarkMerkleRootSetSave(b *testing.B) { // Create some fake headers for the files. meta := persist.Metadata{ Header: "Bench Header", Version: "1.1.1", } // Create a []crypto.Hash of length i. type chStruct struct { Hashes MerkleRootSet } var chs chStruct for i := 0; i < 1e3; i++ { var ch crypto.Hash fastrand.Read(ch[:]) chs.Hashes = append(chs.Hashes, ch) } // Save through the persist. dir := build.TempDir("modules", "BenchmarkSliceCryptoHashSave") err := os.MkdirAll(dir, 0700) if err != nil { b.Fatal(err) } filename := filepath.Join(dir, "file") b.ResetTimer() for i := 0; i < b.N; i++ { err = persist.SaveJSON(meta, chs, filename) if err != nil { b.Fatal(err) } } } // BenchmarkSliceCryptoHashSave clocks how fast large []crypto.Hashes can be // encoded and written to disk. func BenchmarkSliceCryptoHashSave(b *testing.B) { // Create some fake headers for the files. meta := persist.Metadata{ Header: "Bench Header", Version: "1.1.1", } // Create a []crypto.Hash of length i. type chStruct struct { Hashes []crypto.Hash } var chs chStruct for i := 0; i < 1e3; i++ { var ch crypto.Hash fastrand.Read(ch[:]) chs.Hashes = append(chs.Hashes, ch) } // Save through the persist. dir := build.TempDir("modules", "BenchmarkSliceCryptoHashSave") err := os.MkdirAll(dir, 0700) if err != nil { b.Fatal(err) } filename := filepath.Join(dir, "file") b.ResetTimer() for i := 0; i < b.N; i++ { err = persist.SaveJSON(meta, chs, filename) if err != nil { b.Fatal(err) } } } Sia-1.3.0/modules/storagemanager.go000066400000000000000000000134101313565667000172400ustar00rootroot00000000000000package modules import ( "github.com/NebulousLabs/Sia/crypto" ) const ( // ContractManagerDir is the standard name used for the directory that // contains all files directly related to the contract manager. ContractManagerDir = "contractmanager" // StorageManagerDir is standard name used for the directory that contains // all of the storage manager files. StorageManagerDir = "storagemanager" ) type ( // StorageFolderMetadata contains metadata about a storage folder that is // tracked by the storage folder manager. StorageFolderMetadata struct { Capacity uint64 `json:"capacity"` // bytes CapacityRemaining uint64 `json:"capacityremaining"` // bytes Index uint16 `json:"index"` Path string `json:"path"` // Below are statistics about the filesystem. FailedReads and // FailedWrites are only incremented if the filesystem is returning // errors when operations are being performed. A large number of // FailedWrites can indicate that more space has been allocated on a // drive than is physically available. A high number of failures can // also indicate disk trouble. FailedReads uint64 `json:"failedreads"` FailedWrites uint64 `json:"failedwrites"` SuccessfulReads uint64 `json:"successfulreads"` SuccessfulWrites uint64 `json:"successfulwrites"` // Certain operations on a storage folder can take a long time (Add, // Remove, and Resize). The fields below indicate the progress of any // long running operations that might be under way in the storage // folder. Progress is always reported in bytes. ProgressNumerator uint64 ProgressDenominator uint64 } // A StorageManager is responsible for managing storage folders and // sectors. Sectors are the base unit of storage that gets moved between // renters and hosts, and primarily is stored on the hosts. StorageManager interface { // AddSector will add a sector to the storage manager. If the sector // already exists, a virtual sector will be added, meaning that the // 'sectorData' will be ignored and no new disk space will be consumed. // The expiry height is used to track what height the sector can be // safely deleted at, though typically the host will manually delete // the sector before the expiry height. The same sector can be added // multiple times at different expiry heights, and the storage manager // is expected to only store the data once. AddSector(sectorRoot crypto.Hash, sectorData []byte) error // AddSectorBatch is a performance optimization over AddSector when // adding a bunch of virtual sectors. It is necessary because otherwise // potentially thousands or even tens-of-thousands of fsync calls would // need to be made in serial, which would prevent renters from ever // successfully renewing. AddSectorBatch(sectorRoots []crypto.Hash) error // AddStorageFolder adds a storage folder to the manager. The manager // may not check that there is enough space available on-disk to // support as much storage as requested, though the manager should // gracefully handle running out of storage unexpectedly. AddStorageFolder(path string, size uint64) error // The storage manager needs to be able to shut down. Close() error // DeleteSector deletes a sector, meaning that the manager will be // unable to upload that sector and be unable to provide a storage // proof on that sector. DeleteSector is for removing the data // entirely, and will remove instances of the sector appearing at all // heights. The primary purpose of DeleteSector is to comply with legal // requests to remove data. DeleteSector(sectorRoot crypto.Hash) error // ReadSector will read a sector from the storage manager, returning the // bytes that match the input sector root. ReadSector(sectorRoot crypto.Hash) ([]byte, error) // RemoveSector will remove a sector from the storage manager. The // height at which the sector expires should be provided, so that the // auto-expiry information for that sector can be properly updated. RemoveSector(sectorRoot crypto.Hash) error // RemoveSectorBatch is a non-ACID performance optimization to remove a // ton of sectors from the storage manager all at once. This is // necessary when clearing out an entire contract from the host. RemoveSectorBatch(sectorRoots []crypto.Hash) error // RemoveStorageFolder will remove a storage folder from the manager. // All storage on the folder will be moved to other storage folders, // meaning that no data will be lost. If the manager is unable to save // data, an error will be returned and the operation will be stopped. If // the force flag is set to true, errors will be ignored and the remove // operation will be completed, meaning that data will be lost. RemoveStorageFolder(index uint16, force bool) error // ResetStorageFolderHealth will reset the health statistics on a // storage folder. ResetStorageFolderHealth(index uint16) error // ResizeStorageFolder will grow or shrink a storage folder in the // manager. The manager may not check that there is enough space // on-disk to support growing the storage folder, but should gracefully // handle running out of space unexpectedly. When shrinking a storage // folder, any data in the folder that needs to be moved will be placed // into other storage folders, meaning that no data will be lost. If // the manager is unable to migrate the data, an error will be returned // and the operation will be stopped. If the force flag is set to true, // errors will be ignored and the resize operation completed, meaning // that data will be lost. ResizeStorageFolder(index uint16, newSize uint64, force bool) error // StorageFolders will return a list of storage folders tracked by the // manager. StorageFolders() []StorageFolderMetadata } ) Sia-1.3.0/modules/transactionpool.go000066400000000000000000000154211313565667000174640ustar00rootroot00000000000000package modules import ( "errors" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/types" ) const ( // TransactionSizeLimit defines the size of the largest transaction that // will be accepted by the transaction pool according to the IsStandard // rules. TransactionSizeLimit = 32e3 // TransactionSetSizeLimit defines the largest set of dependent unconfirmed // transactions that will be accepted by the transaction pool. TransactionSetSizeLimit = 250e3 ) var ( // ErrDuplicateTransactionSet is the error that gets returned if a // duplicate transaction set is given to the transaction pool. ErrDuplicateTransactionSet = errors.New("transaction set contains only duplicate transactions") // ErrLargeTransaction is the error that gets returned if a transaction // provided to the transaction pool is larger than what is allowed by the // IsStandard rules. ErrLargeTransaction = errors.New("transaction is too large for this transaction pool") // ErrLargeTransactionSet is the error that gets returned if a transaction // set given to the transaction pool is larger than the limit placed by the // IsStandard rules of the transaction pool. ErrLargeTransactionSet = errors.New("transaction set is too large for this transaction pool") // ErrInvalidArbPrefix is the error that gets returned if a transaction is // submitted to the transaction pool which contains a prefix that is not // recognized. This helps prevent miners on old versions from mining // potentially illegal transactions in the event of a soft-fork. ErrInvalidArbPrefix = errors.New("transaction contains non-standard arbitrary data") // PrefixNonSia defines the prefix that should be appended to any // transactions that use the arbitrary data for reasons outside of the // standard Sia protocol. This will prevent these transactions from being // rejected by the IsStandard set of rules, but also means that the data // will never be used within the formal Sia protocol. PrefixNonSia = types.Specifier{'N', 'o', 'n', 'S', 'i', 'a'} // TransactionPoolDir is the name of the directory that is used to store // the transaction pool's persistent data. TransactionPoolDir = "transactionpool" ) type ( // ConsensusConflict implements the error interface, and indicates that a // transaction was rejected due to being incompatible with the current // consensus set, meaning either a double spend or a consensus rule violation - // it is unlikely that the transaction will ever be valid. ConsensusConflict string // TransactionSetID is a type-safe wrapper for a crypto.Hash that represents // the ID of an entire transaction set. TransactionSetID crypto.Hash // A TransactionPoolDiff indicates the adding or removal of a transaction set to // the transaction pool. The transactions in the pool are not persisted, so at // startup modules should assume an empty transaction pool. TransactionPoolDiff struct { AppliedTransactions []*UnconfirmedTransactionSet RevertedTransactions []TransactionSetID } // UnconfirmedTransactionSet defines a new unconfirmed transaction that has // been added to the transaction pool. ID is the ID of the set, IDs contians // an ID for each transaction, eliminating the need to recompute it (because // that's an expensive operation). UnconfirmedTransactionSet struct { Change *ConsensusChange ID TransactionSetID IDs []types.TransactionID Sizes []uint64 Transactions []types.Transaction } ) type ( // A TransactionPoolSubscriber receives updates about the confirmed and // unconfirmed set from the transaction pool. Generally, there is no need to // subscribe to both the consensus set and the transaction pool. TransactionPoolSubscriber interface { // ReceiveTransactionPoolUpdate notifies subscribers of a change to the // consensus set and/or unconfirmed set, and includes the consensus change // that would result if all of the transactions made it into a block. ReceiveUpdatedUnconfirmedTransactions(*TransactionPoolDiff) } // A TransactionPool manages unconfirmed transactions. TransactionPool interface { // AcceptTransactionSet accepts a set of potentially interdependent // transactions. AcceptTransactionSet([]types.Transaction) error // Broadcast broadcasts a transaction set to all of the transaction pool's // peers. Broadcast(ts []types.Transaction) // Close is necessary for clean shutdown (e.g. during testing). Close() error // FeeEstimation returns an estimation for how high the transaction fee // needs to be per byte. The minimum recommended targets getting accepted // in ~3 blocks, and the maximum recommended targets getting accepted // immediately. Taking the average has a moderate chance of being accepted // within one block. The minimum has a strong chance of getting accepted // within 10 blocks. FeeEstimation() (minimumRecommended, maximumRecommended types.Currency) // PurgeTransactionPool is a temporary function available to the miner. In // the event that a miner mines an unacceptable block, the transaction pool // will be purged to clear out the transaction pool and get rid of the // illegal transaction. This should never happen, however there are bugs // that make this condition necessary. PurgeTransactionPool() // TransactionList returns a list of all transactions in the transaction // pool. The transactions are provided in an order that can acceptably be // put into a block. TransactionList() []types.Transaction // TransactionPoolSubscribe adds a subscriber to the transaction pool. // Subscribers will receive all consensus set changes as well as // transaction pool changes, and should not subscribe to both. TransactionPoolSubscribe(TransactionPoolSubscriber) // Transaction returns the transaction and unconfirmed parents // corresponding to the provided transaction id. Transaction(id types.TransactionID) (txn types.Transaction, unconfirmedParents []types.Transaction, exists bool) // Unsubscribe removes a subscriber from the transaction pool. // This is necessary for clean shutdown of the miner. Unsubscribe(TransactionPoolSubscriber) } ) // NewConsensusConflict returns a consensus conflict, which implements the // error interface. func NewConsensusConflict(s string) ConsensusConflict { return ConsensusConflict("consensus conflict: " + s) } // Error implements the error interface, turning the consensus conflict into an // acceptable error type. func (cc ConsensusConflict) Error() string { return string(cc) } // CalculateFee returns the fee-per-byte of a transaction set. func CalculateFee(ts []types.Transaction) types.Currency { var sum types.Currency for _, t := range ts { for _, fee := range t.MinerFees { sum = sum.Add(fee) } } size := len(encoding.Marshal(ts)) return sum.Div64(uint64(size)) } Sia-1.3.0/modules/transactionpool/000077500000000000000000000000001313565667000171325ustar00rootroot00000000000000Sia-1.3.0/modules/transactionpool/accept.go000066400000000000000000000304661313565667000207310ustar00rootroot00000000000000package transactionpool // TODO: It seems like the transaction pool is not properly detecting conflicts // between a file contract revision and a file contract. import ( "errors" "fmt" "math" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) var ( errObjectConflict = errors.New("transaction set conflicts with an existing transaction set") errFullTransactionPool = errors.New("transaction pool cannot accept more transactions") errLowMinerFees = errors.New("transaction set needs more miner fees to be accepted") errEmptySet = errors.New("transaction set is empty") ) // relatedObjectIDs determines all of the object ids related to a transaction. func relatedObjectIDs(ts []types.Transaction) []ObjectID { oidMap := make(map[ObjectID]struct{}) for _, t := range ts { for _, sci := range t.SiacoinInputs { oidMap[ObjectID(sci.ParentID)] = struct{}{} } for i := range t.SiacoinOutputs { oidMap[ObjectID(t.SiacoinOutputID(uint64(i)))] = struct{}{} } for i := range t.FileContracts { oidMap[ObjectID(t.FileContractID(uint64(i)))] = struct{}{} } for _, fcr := range t.FileContractRevisions { oidMap[ObjectID(fcr.ParentID)] = struct{}{} } for _, sp := range t.StorageProofs { oidMap[ObjectID(sp.ParentID)] = struct{}{} } for _, sfi := range t.SiafundInputs { oidMap[ObjectID(sfi.ParentID)] = struct{}{} } for i := range t.SiafundOutputs { oidMap[ObjectID(t.SiafundOutputID(uint64(i)))] = struct{}{} } } var oids []ObjectID for oid := range oidMap { oids = append(oids, oid) } return oids } // requiredFeesToExtendTpool returns the amount of fees required to extend the // transaction pool to fit another transaction set. The amount returned has the // unit 'currency per byte'. func (tp *TransactionPool) requiredFeesToExtendTpool() types.Currency { // If the transaction pool is nearly empty, it can be extended even if there // are no fees. if tp.transactionListSize < TransactionPoolSizeForFee { return types.ZeroCurrency } // Calculate the fee required to bump out the size of the transaction pool. ratioToTarget := float64(tp.transactionListSize) / TransactionPoolSizeTarget feeFactor := math.Pow(ratioToTarget, TransactionPoolExponentiation) return types.SiacoinPrecision.MulFloat(feeFactor).Div64(1000) // Divide by 1000 to get SC / kb } // checkTransactionSetComposition checks if the transaction set is valid given // the state of the pool. It does not check that each individual transaction // would be legal in the next block, but does check things like miner fees and // IsStandard. func (tp *TransactionPool) checkTransactionSetComposition(ts []types.Transaction) (uint64, error) { // Check that the transaction set is not already known. setID := TransactionSetID(crypto.HashObject(ts)) _, exists := tp.transactionSets[setID] if exists { return 0, modules.ErrDuplicateTransactionSet } // All checks after this are expensive. // // TODO: There is no DoS prevention mechanism in place to prevent repeated // expensive verifications of invalid transactions that are created on the // fly. // Check that all transactions follow 'Standard.md' guidelines. setSize, err := isStandardTransactionSet(ts) if err != nil { return 0, err } return setSize, nil } // handleConflicts detects whether the conflicts in the transaction pool are // legal children of the new transaction pool set or not. func (tp *TransactionPool) handleConflicts(ts []types.Transaction, conflicts []TransactionSetID, txnFn func([]types.Transaction) (modules.ConsensusChange, error)) error { // Create a list of all the transaction ids that compose the set of // conflicts. conflictMap := make(map[types.TransactionID]TransactionSetID) for _, conflict := range conflicts { conflictSet := tp.transactionSets[conflict] for _, conflictTxn := range conflictSet { conflictMap[conflictTxn.ID()] = conflict } } // Discard all duplicate transactions from the input transaction set. var dedupSet []types.Transaction for _, t := range ts { _, exists := conflictMap[t.ID()] if exists { continue } dedupSet = append(dedupSet, t) } if len(dedupSet) == 0 { return modules.ErrDuplicateTransactionSet } // If transactions were pruned, it's possible that the set of // dependencies/conflicts has also reduced. To minimize computational load // on the consensus set, we want to prune out all of the conflicts that are // no longer relevant. As an example, consider the transaction set {A}, the // set {B}, and the new set {A, C}, where C is dependent on B. {A} and {B} // are both conflicts, but after deduplication {A} is no longer a conflict. // This is recursive, but it is guaranteed to run only once as the first // deduplication is guaranteed to be complete. if len(dedupSet) < len(ts) { oids := relatedObjectIDs(dedupSet) var conflicts []TransactionSetID for _, oid := range oids { conflict, exists := tp.knownObjects[oid] if exists { conflicts = append(conflicts, conflict) } } return tp.handleConflicts(dedupSet, conflicts, txnFn) } // Merge all of the conflict sets with the input set (input set goes last // to preserve dependency ordering), and see if the set as a whole is both // small enough to be legal and valid as a set. If no, return an error. If // yes, add the new set to the pool, and eliminate the old set. The output // diff objects can be repeated, (no need to remove those). Just need to // remove the conflicts from tp.transactionSets. var superset []types.Transaction supersetMap := make(map[TransactionSetID]struct{}) for _, conflict := range conflictMap { supersetMap[conflict] = struct{}{} } for conflict := range supersetMap { superset = append(superset, tp.transactionSets[conflict]...) } superset = append(superset, dedupSet...) // Check the composition of the transaction set, including fees and // IsStandard rules (this is a new set, the rules must be rechecked). setSize, err := tp.checkTransactionSetComposition(superset) if err != nil { return err } // Check that the transaction set has enough fees to justify adding it to // the transaction list. requiredFees := tp.requiredFeesToExtendTpool().Mul64(setSize) if err != nil { return err } var setFees types.Currency for _, txn := range superset { for _, fee := range txn.MinerFees { setFees = setFees.Add(fee) } } if requiredFees.Cmp(setFees) > 0 { // TODO: check if there is an existing set with lower fees that we can // kick out. return errLowMinerFees } // Check that the transaction set is valid. cc, err := txnFn(superset) if err != nil { return modules.NewConsensusConflict("provided transaction set has prereqs, but is still invalid: " + err.Error()) } // Remove the conflicts from the transaction pool. for conflict := range supersetMap { conflictSet := tp.transactionSets[conflict] tp.transactionListSize -= len(encoding.Marshal(conflictSet)) delete(tp.transactionSets, conflict) delete(tp.transactionSetDiffs, conflict) } // Add the transaction set to the pool. setID := TransactionSetID(crypto.HashObject(superset)) tp.transactionSets[setID] = superset for _, diff := range cc.SiacoinOutputDiffs { tp.knownObjects[ObjectID(diff.ID)] = setID } for _, diff := range cc.FileContractDiffs { tp.knownObjects[ObjectID(diff.ID)] = setID } for _, diff := range cc.SiafundOutputDiffs { tp.knownObjects[ObjectID(diff.ID)] = setID } tp.transactionSetDiffs[setID] = &cc tsetSize := len(encoding.Marshal(superset)) tp.transactionListSize += tsetSize // debug logging if build.DEBUG { txLogs := "" for i, t := range superset { txLogs += fmt.Sprintf("superset transaction %v size: %vB\n", i, len(encoding.Marshal(t))) } tp.log.Debugf("accepted transaction superset %v, size: %vB\ntpool size is %vB after accpeting transaction superset\ntransactions: \n%v\n", setID, tsetSize, tp.transactionListSize, txLogs) } return nil } // acceptTransactionSet verifies that a transaction set is allowed to be in the // transaction pool, and then adds it to the transaction pool. func (tp *TransactionPool) acceptTransactionSet(ts []types.Transaction, txnFn func([]types.Transaction) (modules.ConsensusChange, error)) error { if len(ts) == 0 { return errEmptySet } // Remove all transactions that have been confirmed in the transaction set. oldTS := ts ts = []types.Transaction{} for _, txn := range oldTS { if !tp.transactionConfirmed(tp.dbTx, txn.ID()) { ts = append(ts, txn) } } // If no transactions remain, return a dublicate error. if len(ts) == 0 { return modules.ErrDuplicateTransactionSet } // Check the composition of the transaction set. setSize, err := tp.checkTransactionSetComposition(ts) if err != nil { return err } // Check that the transaction set has enough fees to justify adding it to // the transaction list. requiredFees := tp.requiredFeesToExtendTpool().Mul64(setSize) if err != nil { return err } var setFees types.Currency for _, txn := range ts { for _, fee := range txn.MinerFees { setFees = setFees.Add(fee) } } if requiredFees.Cmp(setFees) > 0 { // TODO: check if there is an existing set with lower fees that we can // kick out. return errLowMinerFees } // Check for conflicts with other transactions, which would indicate a // double-spend. Legal children of a transaction set will also trigger the // conflict-detector. oids := relatedObjectIDs(ts) var conflicts []TransactionSetID for _, oid := range oids { conflict, exists := tp.knownObjects[oid] if exists { conflicts = append(conflicts, conflict) } } if len(conflicts) > 0 { return tp.handleConflicts(ts, conflicts, txnFn) } cc, err := txnFn(ts) if err != nil { return modules.NewConsensusConflict("provided transaction set is standalone and invalid: " + err.Error()) } // Add the transaction set to the pool. setID := TransactionSetID(crypto.HashObject(ts)) tp.transactionSets[setID] = ts for _, oid := range oids { tp.knownObjects[oid] = setID } tp.transactionSetDiffs[setID] = &cc tsetSize := len(encoding.Marshal(ts)) tp.transactionListSize += tsetSize for _, txn := range ts { if _, exists := tp.transactionHeights[txn.ID()]; !exists { tp.transactionHeights[txn.ID()] = tp.blockHeight } } // debug logging if build.DEBUG { txLogs := "" for i, t := range ts { txLogs += fmt.Sprintf("transaction %v size: %vB\n", i, len(encoding.Marshal(t))) } tp.log.Debugf("accepted transaction set %v, size: %vB\ntpool size is %vB after accpeting transaction set\ntransactions: \n%v\n", setID, tsetSize, tp.transactionListSize, txLogs) } return nil } // AcceptTransaction adds a transaction to the unconfirmed set of // transactions. If the transaction is accepted, it will be relayed to // connected peers. // // TODO: Break into component sets when the set gets accepted. func (tp *TransactionPool) AcceptTransactionSet(ts []types.Transaction) error { // assert on consensus set to get special method cs, ok := tp.consensusSet.(interface { LockedTryTransactionSet(fn func(func(txns []types.Transaction) (modules.ConsensusChange, error)) error) error }) if !ok { return errors.New("consensus set does not support LockedTryTransactionSet method") } return cs.LockedTryTransactionSet(func(txnFn func(txns []types.Transaction) (modules.ConsensusChange, error)) error { tp.mu.Lock() defer tp.mu.Unlock() err := tp.acceptTransactionSet(ts, txnFn) if err != nil { return err } go tp.gateway.Broadcast("RelayTransactionSet", ts, tp.gateway.Peers()) // Notify subscribers of an accepted transaction set tp.updateSubscribersTransactions() return nil }) } // relayTransactionSet is an RPC that accepts a transaction set from a peer. If // the accept is successful, the transaction will be relayed to the gateway's // other peers. func (tp *TransactionPool) relayTransactionSet(conn modules.PeerConn) error { err := conn.SetDeadline(time.Now().Add(relayTransactionSetTimeout)) if err != nil { return err } // Automatically close the channel when tg.Stop() is called. finishedChan := make(chan struct{}) defer close(finishedChan) go func() { select { case <-tp.tg.StopChan(): case <-finishedChan: } conn.Close() }() var ts []types.Transaction err = encoding.ReadObject(conn, &ts, types.BlockSizeLimit) if err != nil { return err } return tp.AcceptTransactionSet(ts) } Sia-1.3.0/modules/transactionpool/accept_test.go000066400000000000000000000530451313565667000217660ustar00rootroot00000000000000package transactionpool import ( "testing" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/fastrand" ) // TestAcceptTransactionSet probes the AcceptTransactionSet method // of the transaction pool. func TestAcceptTransactionSet(t *testing.T) { // Create a transaction pool tester. tpt, err := createTpoolTester(t.Name()) if err != nil { t.Fatal(err) } defer tpt.Close() // Check that the transaction pool is empty. if len(tpt.tpool.transactionSets) != 0 { t.Error("transaction pool is not empty") } // Create a valid transaction set using the wallet. txns, err := tpt.wallet.SendSiacoins(types.NewCurrency64(100), types.UnlockHash{}) if err != nil { t.Fatal(err) } if len(tpt.tpool.transactionSets) != 1 { t.Error("sending coins did not increase the transaction sets by 1") } // Submit the transaction set again to trigger a duplication error. err = tpt.tpool.AcceptTransactionSet(txns) if err != modules.ErrDuplicateTransactionSet { t.Error(err) } // Mine a block and check that the transaction pool gets emptied. block, _ := tpt.miner.FindBlock() err = tpt.cs.AcceptBlock(block) if err != nil { t.Fatal(err) } if len(tpt.tpool.TransactionList()) != 0 { t.Error("transaction pool was not emptied after mining a block") } // Try to resubmit the transaction set to verify err = tpt.tpool.AcceptTransactionSet(txns) if err == nil { t.Error("transaction set was supposed to be rejected") } } // TestConflictingTransactionSets tries to add two transaction sets // to the transaction pool that are each legal individually, but double spend // an output. func TestConflictingTransactionSets(t *testing.T) { if testing.Short() { t.SkipNow() } // Create a transaction pool tester. tpt, err := createTpoolTester(t.Name()) if err != nil { t.Fatal(err) } defer tpt.Close() // Fund a partial transaction. fund := types.NewCurrency64(30e6) txnBuilder := tpt.wallet.StartTransaction() err = txnBuilder.FundSiacoins(fund) if err != nil { t.Fatal(err) } // wholeTransaction is set to false so that we can use the same signature // to create a double spend. txnSet, err := txnBuilder.Sign(false) if err != nil { t.Fatal(err) } txnSetDoubleSpend := make([]types.Transaction, len(txnSet)) copy(txnSetDoubleSpend, txnSet) // There are now two sets of transactions that are signed and ready to // spend the same output. Have one spend the money in a miner fee, and the // other create a siacoin output. txnIndex := len(txnSet) - 1 txnSet[txnIndex].MinerFees = append(txnSet[txnIndex].MinerFees, fund) txnSetDoubleSpend[txnIndex].SiacoinOutputs = append(txnSetDoubleSpend[txnIndex].SiacoinOutputs, types.SiacoinOutput{Value: fund}) // Add the first and then the second txn set. err = tpt.tpool.AcceptTransactionSet(txnSet) if err != nil { t.Error(err) } err = tpt.tpool.AcceptTransactionSet(txnSetDoubleSpend) if err == nil { t.Error("transaction should not have passed inspection") } // Purge and try the sets in the reverse order. tpt.tpool.PurgeTransactionPool() err = tpt.tpool.AcceptTransactionSet(txnSetDoubleSpend) if err != nil { t.Error(err) } err = tpt.tpool.AcceptTransactionSet(txnSet) if err == nil { t.Error("transaction should not have passed inspection") } } // TestCheckMinerFees probes the checkMinerFees method of the // transaction pool. func TestCheckMinerFees(t *testing.T) { if testing.Short() { t.SkipNow() } // Create a transaction pool tester. tpt, err := createTpoolTester(t.Name()) if err != nil { t.Fatal(err) } defer tpt.Close() // Prepare a bunch of outputs for a series of graphs to fill up the // transaction pool. graphLens := 200 // 40 kb per graph numGraphs := (int(TransactionPoolSizeTarget) * 4 / 3) / (graphLens * 206) // 206 is the size of a single input-output graph txn. graphFund := types.SiacoinPrecision.Mul64(1000) var outputs []types.SiacoinOutput for i := 0; i < numGraphs+1; i++ { outputs = append(outputs, types.SiacoinOutput{ UnlockHash: types.UnlockConditions{}.UnlockHash(), Value: graphFund, }) } txns, err := tpt.wallet.SendSiacoinsMulti(outputs) if err != nil { t.Error(err) } // Mine the graph setup in the consensus set so that the graph outputs are // distinct transaction sets. _, err = tpt.miner.AddBlock() if err != nil { t.Fatal(err) } // Recommended fees at this point should be the minimum. minRec, maxRec := tpt.tpool.FeeEstimation() if minRec.Cmp(minEstimation) < 0 { t.Error("transaction pool is not respecting the sane fee minimum") } if maxRec.Cmp(minEstimation.Mul64(3)) < 0 { t.Error("transaction pool is not respecting the sane fee min maximum") } // Fill the transaction pool to the fee limit. for i := 0; i < TransactionPoolSizeForFee/10e3; i++ { arbData := make([]byte, 10e3) copy(arbData, modules.PrefixNonSia[:]) fastrand.Read(arbData[100:116]) // prevents collisions with other transacitons in the loop. txn := types.Transaction{ArbitraryData: [][]byte{arbData}} err := tpt.tpool.AcceptTransactionSet([]types.Transaction{txn}) if err != nil { t.Fatal(err) } } // Add another transaction, this one should fail for having too few fees. err = tpt.tpool.AcceptTransactionSet([]types.Transaction{{}}) if err != errLowMinerFees { t.Error(err) } // Add a transaction that has sufficient fees. _, err = tpt.wallet.SendSiacoins(types.SiacoinPrecision.Mul64(50), types.UnlockHash{}) if err != nil { t.Fatal(err) } // Create all of the graphs. finalTxn := txns[len(txns)-1] for i := 0; i < numGraphs; i++ { var edges []types.TransactionGraphEdge for j := 0; j < graphLens; j++ { edges = append(edges, types.TransactionGraphEdge{ Dest: j + 1, Fee: types.SiacoinPrecision, Source: j, Value: graphFund.Sub(types.SiacoinPrecision.Mul64(uint64(j + 1))), }) } graph, err := types.TransactionGraph(finalTxn.SiacoinOutputID(uint64(i)), edges) if err != nil { t.Fatal(err) } err = tpt.tpool.AcceptTransactionSet(graph) if err != nil { t.Fatal(err) } } // Try to submit a transaction with too few fees. source := finalTxn.SiacoinOutputID(uint64(numGraphs)) lowFee := types.SiacoinPrecision.Div64(3) remaining := types.SiacoinPrecision.Mul64(1000).Sub(lowFee) edge := types.TransactionGraphEdge{ Dest: 1, Fee: lowFee, Source: 0, Value: remaining, } lowFeeGraph, err := types.TransactionGraph(source, []types.TransactionGraphEdge{edge}) if err != nil { t.Fatal(err) } err = tpt.tpool.AcceptTransactionSet(lowFeeGraph) if err != errLowMinerFees { t.Fatal(err) } } // TestTransactionGraph checks that the TransactionGraph method of the types // package is able to create transasctions that actually validate and can get // inserted into the tpool. func TestTransactionGraph(t *testing.T) { if testing.Short() { t.SkipNow() } // Create a transaction pool tester. tpt, err := createTpoolTester(t.Name()) if err != nil { t.Fatal(err) } defer tpt.Close() // Create a transaction sending money to an output that TransactionGraph can // spent (the empty UnlockConditions). txns, err := tpt.wallet.SendSiacoins(types.SiacoinPrecision.Mul64(100), types.UnlockConditions{}.UnlockHash()) if err != nil { t.Fatal(err) } // Get the output of that transaction. graphSourceOutputID := txns[len(txns)-1].SiacoinOutputID(0) edge := types.TransactionGraphEdge{ Dest: 1, Fee: types.SiacoinPrecision.Mul64(10), Source: 0, Value: types.SiacoinPrecision.Mul64(90), } graphTxns, err := types.TransactionGraph(graphSourceOutputID, []types.TransactionGraphEdge{edge}) if err != nil { t.Fatal(err) } if len(graphTxns) != 1 { t.Fatal("wrong number of tranasctions produced") } err = tpt.tpool.AcceptTransactionSet(graphTxns) if err != nil { t.Fatal(err) } } // TestTransactionGraphDiamond checks that the TransactionGraph method of the // types package is able to create transasctions that actually validate and can // get inserted into the tpool. func TestTransactionGraphDiamond(t *testing.T) { if testing.Short() { t.SkipNow() } // Create a transaction pool tester. tpt, err := createTpoolTester(t.Name()) if err != nil { t.Fatal(err) } defer tpt.Close() // Create a transaction sending money to an output that TransactionGraph can // spent (the empty UnlockConditions). txns, err := tpt.wallet.SendSiacoins(types.SiacoinPrecision.Mul64(100), types.UnlockConditions{}.UnlockHash()) if err != nil { t.Fatal(err) } // Get the output of that transaction. graphSourceOutputID := txns[len(txns)-1].SiacoinOutputID(0) var edges []types.TransactionGraphEdge sources := []int{0, 0, 1, 2} dests := []int{1, 2, 3, 3} values := []uint64{40, 40, 30, 30} fees := []uint64{10, 10, 10, 10} for i := range sources { edges = append(edges, types.TransactionGraphEdge{ Dest: dests[i], Fee: types.SiacoinPrecision.Mul64(fees[i]), Source: sources[i], Value: types.SiacoinPrecision.Mul64(values[i]), }) } graphTxns, err := types.TransactionGraph(graphSourceOutputID, edges) if err != nil { t.Fatal(err) } if len(graphTxns) != 3 { t.Fatal("wrong number of tranasctions produced") } err = tpt.tpool.AcceptTransactionSet(graphTxns) if err != nil { t.Fatal(err) } } // TestTransactionSuperset submits a single transaction to the network, // followed by a transaction set containing that single transaction. func TestTransactionSuperset(t *testing.T) { if testing.Short() { t.SkipNow() } // Create a transaction pool tester. tpt, err := createTpoolTester(t.Name()) if err != nil { t.Fatal(err) } defer tpt.Close() // Fund a partial transaction. fund := types.NewCurrency64(30e6) txnBuilder := tpt.wallet.StartTransaction() err = txnBuilder.FundSiacoins(fund) if err != nil { t.Fatal(err) } txnBuilder.AddMinerFee(fund) // wholeTransaction is set to false so that we can use the same signature // to create a double spend. txnSet, err := txnBuilder.Sign(false) if err != nil { t.Fatal(err) } if len(txnSet) <= 1 { t.Fatal("test is invalid unless the transaction set has two or more transactions") } // Check that the second transaction is dependent on the first. err = tpt.tpool.AcceptTransactionSet(txnSet[1:]) if err == nil { t.Fatal("transaction set must have dependent transactions") } // Submit the first transaction in the set to the transaction pool, and // then the superset. err = tpt.tpool.AcceptTransactionSet(txnSet[:1]) if err != nil { t.Fatal("first transaction in the transaction set was not valid?") } err = tpt.tpool.AcceptTransactionSet(txnSet) if err != nil { t.Fatal("super setting is not working:", err) } // Try resubmitting the individual transaction and the superset, a // duplication error should be returned for each case. err = tpt.tpool.AcceptTransactionSet(txnSet[:1]) if err != modules.ErrDuplicateTransactionSet { t.Fatal(err) } err = tpt.tpool.AcceptTransactionSet(txnSet) if err != modules.ErrDuplicateTransactionSet { t.Fatal("super setting is not working:", err) } } // TestTransactionSubset submits a transaction set to the network, followed by // just a subset, expectint ErrDuplicateTransactionSet as a response. func TestTransactionSubset(t *testing.T) { if testing.Short() { t.SkipNow() } // Create a transaction pool tester. tpt, err := createTpoolTester(t.Name()) if err != nil { t.Fatal(err) } defer tpt.Close() // Fund a partial transaction. fund := types.NewCurrency64(30e6) txnBuilder := tpt.wallet.StartTransaction() err = txnBuilder.FundSiacoins(fund) if err != nil { t.Fatal(err) } txnBuilder.AddMinerFee(fund) // wholeTransaction is set to false so that we can use the same signature // to create a double spend. txnSet, err := txnBuilder.Sign(false) if err != nil { t.Fatal(err) } if len(txnSet) <= 1 { t.Fatal("test is invalid unless the transaction set has two or more transactions") } // Check that the second transaction is dependent on the first. err = tpt.tpool.AcceptTransactionSet(txnSet[1:]) if err == nil { t.Fatal("transaction set must have dependent transactions") } // Submit the set to the pool, followed by just the transaction. err = tpt.tpool.AcceptTransactionSet(txnSet) if err != nil { t.Fatal("super setting is not working:", err) } err = tpt.tpool.AcceptTransactionSet(txnSet[:1]) if err != modules.ErrDuplicateTransactionSet { t.Fatal(err) } } // TestTransactionChild submits a single transaction to the network, // followed by a child transaction. func TestTransactionChild(t *testing.T) { if testing.Short() { t.SkipNow() } // Create a transaction pool tester. tpt, err := createTpoolTester(t.Name()) if err != nil { t.Fatal(err) } defer tpt.Close() // Fund a partial transaction. fund := types.NewCurrency64(30e6) txnBuilder := tpt.wallet.StartTransaction() err = txnBuilder.FundSiacoins(fund) if err != nil { t.Fatal(err) } txnBuilder.AddMinerFee(fund) // wholeTransaction is set to false so that we can use the same signature // to create a double spend. txnSet, err := txnBuilder.Sign(false) if err != nil { t.Fatal(err) } if len(txnSet) <= 1 { t.Fatal("test is invalid unless the transaction set has two or more transactions") } // Check that the second transaction is dependent on the first. err = tpt.tpool.AcceptTransactionSet([]types.Transaction{txnSet[1]}) if err == nil { t.Fatal("transaction set must have dependent transactions") } // Submit the first transaction in the set to the transaction pool. err = tpt.tpool.AcceptTransactionSet(txnSet[:1]) if err != nil { t.Fatal("first transaction in the transaction set was not valid?") } err = tpt.tpool.AcceptTransactionSet(txnSet[1:]) if err != nil { t.Fatal("child transaction not seen as valid") } } // TestNilAccept tries submitting a nil transaction set and a 0-len // transaction set to the transaction pool. func TestNilAccept(t *testing.T) { if testing.Short() { t.SkipNow() } tpt, err := createTpoolTester(t.Name()) if err != nil { t.Fatal(err) } defer tpt.Close() err = tpt.tpool.AcceptTransactionSet(nil) if err == nil { t.Error("no error returned when submitting nothing to the transaction pool") } err = tpt.tpool.AcceptTransactionSet([]types.Transaction{}) if err == nil { t.Error("no error returned when submitting nothing to the transaction pool") } } // TestAcceptFCAndConflictingRevision checks that the transaction pool // correctly accepts a file contract in a transaction set followed by a correct // revision to that file contract in the a following transaction set, with no // block separating them. func TestAcceptFCAndConflictingRevision(t *testing.T) { if testing.Short() { t.SkipNow() } tpt, err := createTpoolTester(t.Name()) if err != nil { t.Fatal(err) } defer tpt.Close() // Create and fund a valid file contract. builder := tpt.wallet.StartTransaction() payout := types.NewCurrency64(1e9) err = builder.FundSiacoins(payout) if err != nil { t.Fatal(err) } builder.AddFileContract(types.FileContract{ WindowStart: tpt.cs.Height() + 2, WindowEnd: tpt.cs.Height() + 5, Payout: payout, ValidProofOutputs: []types.SiacoinOutput{{Value: types.PostTax(tpt.cs.Height(), payout)}}, MissedProofOutputs: []types.SiacoinOutput{{Value: types.PostTax(tpt.cs.Height(), payout)}}, UnlockHash: types.UnlockConditions{}.UnlockHash(), }) tSet, err := builder.Sign(true) if err != nil { t.Fatal(err) } err = tpt.tpool.AcceptTransactionSet(tSet) if err != nil { t.Fatal(err) } fcid := tSet[len(tSet)-1].FileContractID(0) // Create a file contract revision and submit it. rSet := []types.Transaction{{ FileContractRevisions: []types.FileContractRevision{{ ParentID: fcid, NewRevisionNumber: 2, NewWindowStart: tpt.cs.Height() + 2, NewWindowEnd: tpt.cs.Height() + 5, NewValidProofOutputs: []types.SiacoinOutput{{Value: types.PostTax(tpt.cs.Height(), payout)}}, NewMissedProofOutputs: []types.SiacoinOutput{{Value: types.PostTax(tpt.cs.Height(), payout)}}, }}, }} err = tpt.tpool.AcceptTransactionSet(rSet) if err != nil { t.Fatal(err) } } // TestPartialConfirmation checks that the transaction pool correctly accepts a // transaction set which has parents that have been accepted by the consensus // set but not the whole set has been accepted by the consensus set. func TestPartialConfirmation(t *testing.T) { if testing.Short() { t.SkipNow() } tpt, err := createTpoolTester(t.Name()) if err != nil { t.Fatal(err) } defer tpt.Close() // Create and fund a valid file contract. builder := tpt.wallet.StartTransaction() payout := types.NewCurrency64(1e9) err = builder.FundSiacoins(payout) if err != nil { t.Fatal(err) } builder.AddFileContract(types.FileContract{ WindowStart: tpt.cs.Height() + 2, WindowEnd: tpt.cs.Height() + 5, Payout: payout, ValidProofOutputs: []types.SiacoinOutput{{Value: types.PostTax(tpt.cs.Height(), payout)}}, MissedProofOutputs: []types.SiacoinOutput{{Value: types.PostTax(tpt.cs.Height(), payout)}}, UnlockHash: types.UnlockConditions{}.UnlockHash(), }) tSet, err := builder.Sign(true) if err != nil { t.Fatal(err) } fcid := tSet[len(tSet)-1].FileContractID(0) // Create a file contract revision. rSet := []types.Transaction{{ FileContractRevisions: []types.FileContractRevision{{ ParentID: fcid, NewRevisionNumber: 2, NewWindowStart: tpt.cs.Height() + 2, NewWindowEnd: tpt.cs.Height() + 5, NewValidProofOutputs: []types.SiacoinOutput{{Value: types.PostTax(tpt.cs.Height(), payout)}}, NewMissedProofOutputs: []types.SiacoinOutput{{Value: types.PostTax(tpt.cs.Height(), payout)}}, }}, }} // Combine the contract and revision in to a single set. fullSet := append(tSet, rSet...) // Get the tSet onto the blockchain. unsolvedBlock, target, err := tpt.miner.BlockForWork() if err != nil { t.Fatal(err) } unsolvedBlock.Transactions = append(unsolvedBlock.Transactions, tSet...) solvedBlock, solved := tpt.miner.SolveBlock(unsolvedBlock, target) if !solved { t.Fatal("Failed to solve block") } err = tpt.cs.AcceptBlock(solvedBlock) if err != nil { t.Fatal(err) } // Try to get the full set into the transaction pool. The transaction pool // should recognize that the set is partially accepted, and be able to // accept on the the transactions that are new and are not yet on the // blockchain. err = tpt.tpool.AcceptTransactionSet(fullSet) if err != nil { t.Fatal(err) } } // TestPartialConfirmationWeave checks that the transaction pool correctly // accepts a transaction set which has parents that have been accepted by the // consensus set but not the whole set has been accepted by the consensus set, // this time weaving the dependencies, such that the first transaction is not // in the consensus set, the second is, and the third has both as dependencies. func TestPartialConfirmationWeave(t *testing.T) { if testing.Short() { t.SkipNow() } tpt, err := createTpoolTester(t.Name()) if err != nil { t.Fatal(err) } defer tpt.Close() // Create a transaction with a single output to a fully controlled address. emptyUH := types.UnlockConditions{}.UnlockHash() builder1 := tpt.wallet.StartTransaction() funding1 := types.NewCurrency64(1e9) err = builder1.FundSiacoins(funding1) if err != nil { t.Fatal(err) } scOutput1 := types.SiacoinOutput{ Value: funding1, UnlockHash: emptyUH, } i1 := builder1.AddSiacoinOutput(scOutput1) tSet1, err := builder1.Sign(true) if err != nil { t.Fatal(err) } // Submit to the transaction pool and mine the block, to minimize // complexity. err = tpt.tpool.AcceptTransactionSet(tSet1) if err != nil { t.Fatal(err) } _, err = tpt.miner.AddBlock() if err != nil { t.Fatal(err) } // Create a second output to the fully controlled address, to fund the // second transaction in the weave. builder2 := tpt.wallet.StartTransaction() funding2 := types.NewCurrency64(2e9) err = builder2.FundSiacoins(funding2) if err != nil { t.Fatal(err) } scOutput2 := types.SiacoinOutput{ Value: funding2, UnlockHash: emptyUH, } i2 := builder2.AddSiacoinOutput(scOutput2) tSet2, err := builder2.Sign(true) if err != nil { t.Fatal(err) } // Submit to the transaction pool and mine the block, to minimize // complexity. err = tpt.tpool.AcceptTransactionSet(tSet2) if err != nil { t.Fatal(err) } _, err = tpt.miner.AddBlock() if err != nil { t.Fatal(err) } // Create a passthrough transaction for output1 and output2, so that they // can be used as unconfirmed dependencies. txn1 := types.Transaction{ SiacoinInputs: []types.SiacoinInput{{ ParentID: tSet1[len(tSet1)-1].SiacoinOutputID(i1), }}, SiacoinOutputs: []types.SiacoinOutput{{ Value: funding1, UnlockHash: emptyUH, }}, } txn2 := types.Transaction{ SiacoinInputs: []types.SiacoinInput{{ ParentID: tSet2[len(tSet2)-1].SiacoinOutputID(i2), }}, SiacoinOutputs: []types.SiacoinOutput{{ Value: funding2, UnlockHash: emptyUH, }}, } // Create a child transaction that depends on inputs from both txn1 and // txn2. child := types.Transaction{ SiacoinInputs: []types.SiacoinInput{ { ParentID: txn1.SiacoinOutputID(0), }, { ParentID: txn2.SiacoinOutputID(0), }, }, SiacoinOutputs: []types.SiacoinOutput{{ Value: funding1.Add(funding2), }}, } // Get txn2 accepted into the consensus set. err = tpt.tpool.AcceptTransactionSet([]types.Transaction{txn2}) if err != nil { t.Fatal(err) } _, err = tpt.miner.AddBlock() if err != nil { t.Fatal(err) } // Try to get the set of txn1, txn2, and child accepted into the // transaction pool. err = tpt.tpool.AcceptTransactionSet([]types.Transaction{txn1, txn2, child}) if err != nil { t.Fatal(err) } } Sia-1.3.0/modules/transactionpool/consts.go000066400000000000000000000055441313565667000210020ustar00rootroot00000000000000package transactionpool import ( "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/persist" "github.com/NebulousLabs/Sia/types" ) // Consts related to the persisting structures of the transactoin pool. const ( dbFilename = "transactionpool.db" logFile = "transactionpool.log" ) // Constants related to the size and ease-of-entry of the transaction pool. const ( // maxTxnAge determines the maximum age of a transaction (in block height) // allowed before the transaction is pruned from the transaction pool. maxTxnAge = types.BlockHeight(24) // TransactionPoolFeeExponentiation defines the polynomial rate of growth // required to keep putting transactions into the transaction pool. If the // exponentiation is 2, then doubling the size of the transaction pool // requires quadrupling the fees of the transactions being added. A higher // number makes it harder for the transaction pool to grow beyond its // default size during times of congestion. TransactionPoolExponentiation = 3 // TransactionPoolSizeTarget defines the target size of the pool when the // transactions are paying 1 SC / kb in fees. TransactionPoolSizeTarget = 3e6 // TransactionPoolSizeForFee defines how large the transaction pool needs to // be before it starts expecting fees to be on the transaction. This initial // limit is to help the network grow and provide some wiggle room for // wallets that are not yet able to operate via a fee market. TransactionPoolSizeForFee = 500e3 ) // Constants related to fee estimation. const ( // blockFeeEstimationDepth defines how far backwards in the blockchain the // fee estimator looks when using blocks to figure out the appropriate fees // to add to transactions. blockFeeEstimationDepth = 6 // maxMultiplier defines the general gap between the maximum recommended fee // and the minimum recommended fee. maxMultiplier = 3 // minExtendMultiplier defines the amount we multiply into the minimum // amount required to extend the fee pool when coming up with a min fee // recommendation. minExtendMultiplier = 1.2 ) // Variables related to the persisting structures of the transaction pool. var ( dbMetadata = persist.Metadata{ Header: "Sia Transaction Pool DB", Version: "0.6.0", } ) // Variables related to the size and ease-of-entry of the transaction pool. var ( // minEstimation defines a sane minimum fee per byte for transactions. This // will typically be only suggested as a fee in the absense of congestion. minEstimation = types.SiacoinPrecision.Div64(100).Div64(1e3) ) // Variables related to propagating transactions through the network. var ( // relayTransactionSetTimeout establishes the timeout for a relay // transaction set call. relayTransactionSetTimeout = build.Select(build.Var{ Standard: 3 * time.Minute, Dev: 20 * time.Second, Testing: 3 * time.Second, }).(time.Duration) ) Sia-1.3.0/modules/transactionpool/database.go000066400000000000000000000104161313565667000212270ustar00rootroot00000000000000package transactionpool import ( "encoding/json" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/bolt" ) // database.go contains objects related to the layout of the transaction pool's // database, as well as getters and setters. Logic for interacting with the // database can be found in persist.go // Buckets in the database. var ( // bucketBlockHeight holds the most recent block height seen by the // transaction pool. bucketBlockHeight = []byte("BlockHeight") // bucketConfirmedTransactions holds the ids of every transaction that has // been confirmed on the blockchain. bucketConfirmedTransactions = []byte("ConfirmedTransactions") // bucketFeeMedian stores all of the persist data relating to the fee // median. bucketFeeMedian = []byte("FeeMedian") // bucketRecentConsensusChange holds the most recent consensus change seen // by the transaction pool. bucketRecentConsensusChange = []byte("RecentConsensusChange") ) // Explicitly named fields in the database. var ( // fieldRecentConsensusChange is the field in bucketRecentConsensusChange // that holds the value of the most recent consensus change. fieldRecentConsensusChange = []byte("RecentConsensusChange") // fieldBlockHeight is the field in bucketBlockHeight that holds the value of // the most recent block height. fieldBlockHeight = []byte("BlockHeight") // fieldFeeMedian is the fee median persist data stored in a fee median // field. fieldFeeMedian = []byte("FeeMedian") ) // Complex objects that get stored in database fields. type ( // medianPersist is the json object that gets stored in the database so that // the transaction pool can persist its block based fee estimations. medianPersist struct { RecentMedians []types.Currency RecentMedianFee types.Currency } ) // deleteTransaction deletes a transaction from the list of confirmed // transactions. func (tp *TransactionPool) deleteTransaction(tx *bolt.Tx, id types.TransactionID) error { return tx.Bucket(bucketConfirmedTransactions).Delete(id[:]) } // getBlockHeight returns the most recent block height from the database. func (tp *TransactionPool) getBlockHeight(tx *bolt.Tx) (bh types.BlockHeight, err error) { err = encoding.Unmarshal(tx.Bucket(bucketBlockHeight).Get(fieldBlockHeight), &bh) return } // getFeeMedian will get the fee median struct stored in the database. func (tp *TransactionPool) getFeeMedian(tx *bolt.Tx) (medianPersist, error) { medianBytes := tp.dbTx.Bucket(bucketFeeMedian).Get(fieldFeeMedian) if medianBytes == nil { return medianPersist{}, errNilFeeMedian } var mp medianPersist err := json.Unmarshal(medianBytes, &mp) if err != nil { return medianPersist{}, build.ExtendErr("unable to unmarshal median data:", err) } return mp, nil } // getRecentConsensusChange returns the most recent consensus change from the // database. func (tp *TransactionPool) getRecentConsensusChange(tx *bolt.Tx) (cc modules.ConsensusChangeID, err error) { ccBytes := tx.Bucket(bucketRecentConsensusChange).Get(fieldRecentConsensusChange) if ccBytes == nil { return modules.ConsensusChangeID{}, errNilConsensusChange } copy(cc[:], ccBytes) return cc, nil } // putBlockHeight updates the transaction pool's block height. func (tp *TransactionPool) putBlockHeight(tx *bolt.Tx, height types.BlockHeight) error { tp.blockHeight = height return tx.Bucket(bucketBlockHeight).Put(fieldBlockHeight, encoding.Marshal(height)) } // putFeeMedian puts a median fees object into the database. func (tp *TransactionPool) putFeeMedian(tx *bolt.Tx, mp medianPersist) error { objBytes, err := json.Marshal(mp) if err != nil { return err } return tx.Bucket(bucketFeeMedian).Put(fieldFeeMedian, objBytes) } // putRecentConsensusChange updates the most recent consensus change seen by // the transaction pool. func (tp *TransactionPool) putRecentConsensusChange(tx *bolt.Tx, cc modules.ConsensusChangeID) error { return tx.Bucket(bucketRecentConsensusChange).Put(fieldRecentConsensusChange, cc[:]) } // putTransaction adds a transaction to the list of confirmed transactions. func (tp *TransactionPool) putTransaction(tx *bolt.Tx, id types.TransactionID) error { return tx.Bucket(bucketConfirmedTransactions).Put(id[:], []byte{}) } Sia-1.3.0/modules/transactionpool/persist.go000066400000000000000000000141251313565667000211550ustar00rootroot00000000000000package transactionpool import ( "errors" "fmt" "os" "path/filepath" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/persist" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/bolt" ) const tpoolSyncRate = time.Minute * 2 var ( // errNilConsensusChange is returned if there is no consensus change in the // database. errNilConsensusChange = errors.New("no consensus change found") // errNilFeeMedian is the message returned if a database does not find fee // median persistance. errNilFeeMedian = errors.New("no fee median found") ) // threadedRegularSync will make sure that sync gets called on the database // every once in a while. func (tp *TransactionPool) threadedRegularSync() { if err := tp.tg.Add(); err != nil { return } defer tp.tg.Done() for { select { case <-tp.tg.StopChan(): // A queued AfterStop will close out the db properly. return case <-time.After(tpoolSyncRate): tp.mu.Lock() tp.syncDB() tp.mu.Unlock() } } } // syncDB commits the current global transaction and immediately begins a new // one. func (tp *TransactionPool) syncDB() { // Commit the existing tx. err := tp.dbTx.Commit() if err != nil { tp.log.Severe("ERROR: failed to apply database update:", err) tp.dbTx.Rollback() } tp.dbTx, err = tp.db.Begin(true) if err != nil { tp.log.Severe("ERROR: failed to initialize a db transaction:", err) } } // resetDB deletes all consensus related persistence from the transaction pool. func (tp *TransactionPool) resetDB(tx *bolt.Tx) error { err := tx.DeleteBucket(bucketConfirmedTransactions) if err != nil { return err } err = tp.putRecentConsensusChange(tx, modules.ConsensusChangeBeginning) if err != nil { return err } err = tp.putBlockHeight(tx, types.BlockHeight(0)) if err != nil { return err } _, err = tx.CreateBucket(bucketConfirmedTransactions) return err } // initPersist creates buckets in the database func (tp *TransactionPool) initPersist() error { // Create the persist directory if it does not yet exist. err := os.MkdirAll(tp.persistDir, 0700) if err != nil { return err } // Create the tpool logger. tp.log, err = persist.NewFileLogger(filepath.Join(tp.persistDir, logFile)) if err != nil { return build.ExtendErr("unable to initialize the transaction pool logger", err) } tp.tg.AfterStop(func() { err := tp.log.Close() if err != nil { fmt.Println("Unable to close the transaction pool logger:", err) } }) // Open the database file. tp.db, err = persist.OpenDatabase(dbMetadata, filepath.Join(tp.persistDir, dbFilename)) if err != nil { return err } tp.tg.AfterStop(func() { err := tp.db.Close() if err != nil { tp.log.Println("Error while closing transaction pool database:", err) } }) // Create the global tpool tx that will be used for most persist actions. tp.dbTx, err = tp.db.Begin(true) if err != nil { return build.ExtendErr("unable to begin tpool dbTx", err) } tp.tg.AfterStop(func() { err := tp.dbTx.Commit() if err != nil { tp.log.Println("Unable to close transaction properly during shutdown:", err) } }) // Spin up the thread that occasionally syncrhonizes the database. go tp.threadedRegularSync() // Create the database and get the most recent consensus change. var cc modules.ConsensusChangeID // Create the database buckets. buckets := [][]byte{ bucketBlockHeight, bucketRecentConsensusChange, bucketConfirmedTransactions, bucketFeeMedian, } for _, bucket := range buckets { _, err := tp.dbTx.CreateBucketIfNotExists(bucket) if err != nil { return build.ExtendErr("unable to create the tpool buckets", err) } } // Get the recent consensus change. cc, err = tp.getRecentConsensusChange(tp.dbTx) if err == errNilConsensusChange { err = tp.putRecentConsensusChange(tp.dbTx, modules.ConsensusChangeBeginning) } if err != nil { return build.ExtendErr("unable to initialize the recent consensus change in the tpool", err) } // Get the most recent block height bh, err := tp.getBlockHeight(tp.dbTx) if err != nil { tp.log.Println("Block height is reporting as zero, setting up to subscribe from the beginning.") err = tp.putBlockHeight(tp.dbTx, types.BlockHeight(0)) if err != nil { return build.ExtendErr("unable to initialize the block height in the tpool", err) } err = tp.putRecentConsensusChange(tp.dbTx, modules.ConsensusChangeBeginning) } else { tp.log.Debugln("Transaction pool is loading from height:", bh) tp.blockHeight = bh } if err != nil { return build.ExtendErr("unable to initialize the block height in the tpool", err) } // Get the fee median data. mp, err := tp.getFeeMedian(tp.dbTx) if err != nil && err != errNilFeeMedian { return build.ExtendErr("unable to load the fee median", err) } // Just leave the fields empty if no fee median was found. They will be // filled out. if err != errNilFeeMedian { tp.recentMedians = mp.RecentMedians tp.recentMedianFee = mp.RecentMedianFee } // Subscribe to the consensus set using the most recent consensus change. err = tp.consensusSet.ConsensusSetSubscribe(tp, cc) if err == modules.ErrInvalidConsensusChangeID { tp.log.Println("Invalid consensus change loaded; resetting. This can take a while.") // Reset and rescan because the consensus set does not recognize the // provided consensus change id. resetErr := tp.resetDB(tp.dbTx) if resetErr != nil { return resetErr } freshScanErr := tp.consensusSet.ConsensusSetSubscribe(tp, modules.ConsensusChangeBeginning) if freshScanErr != nil { return freshScanErr } tp.tg.OnStop(func() { tp.consensusSet.Unsubscribe(tp) }) return nil } if err != nil { return err } tp.tg.OnStop(func() { tp.consensusSet.Unsubscribe(tp) }) return nil } // transactionConfirmed returns true if the transaction has been confirmed on // the blockchain and false if the transaction has not been confirmed on the // blockchain. func (tp *TransactionPool) transactionConfirmed(tx *bolt.Tx, id types.TransactionID) bool { confirmedBytes := tx.Bucket(bucketConfirmedTransactions).Get(id[:]) if confirmedBytes == nil { return false } return true } Sia-1.3.0/modules/transactionpool/persist_test.go000066400000000000000000000047271313565667000222230ustar00rootroot00000000000000package transactionpool import ( "os" "path/filepath" "testing" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/persist" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/bolt" ) // TestRescan triggers a rescan in the transaction pool, verifying that the // rescan code does not cause deadlocks or crashes. func TestRescan(t *testing.T) { if testing.Short() { t.SkipNow() } tpt, err := createTpoolTester(t.Name()) if err != nil { t.Fatal(err) } defer tpt.Close() // Create a valid transaction set using the wallet. txns, err := tpt.wallet.SendSiacoins(types.NewCurrency64(100), types.UnlockHash{}) if err != nil { t.Fatal(err) } if len(tpt.tpool.transactionSets) != 1 { t.Error("sending coins did not increase the transaction sets by 1") } // Mine the transaction into a block, so that it's in the consensus set. _, err = tpt.miner.AddBlock() if err != nil { t.Fatal(err) } // Close the tpool, delete the persistence, then restart the tpool. The // tpool should still recognize the transaction set as a duplicate. persistDir := tpt.tpool.persistDir err = tpt.tpool.Close() if err != nil { t.Fatal(err) } err = os.RemoveAll(persistDir) if err != nil { t.Fatal(err) } tpt.tpool, err = New(tpt.cs, tpt.gateway, persistDir) if err != nil { t.Fatal(err) } err = tpt.tpool.AcceptTransactionSet(txns) if err != modules.ErrDuplicateTransactionSet { t.Fatal("expecting modules.ErrDuplicateTransactionSet, got:", err) } // Close the tpool, corrupt the database, then restart the tpool. The tpool // should still recognize the transaction set as a duplicate. err = tpt.tpool.Close() if err != nil { t.Fatal(err) } db, err := persist.OpenDatabase(dbMetadata, filepath.Join(persistDir, dbFilename)) if err != nil { t.Fatal(err) } err = db.Update(func(tx *bolt.Tx) error { ccBytes := tx.Bucket(bucketRecentConsensusChange).Get(fieldRecentConsensusChange) // copy the bytes due to bolt's mmap. newCCBytes := make([]byte, len(ccBytes)) copy(newCCBytes, ccBytes) newCCBytes[0]++ return tx.Bucket(bucketRecentConsensusChange).Put(fieldRecentConsensusChange, newCCBytes) }) if err != nil { t.Fatal(err) } err = db.Close() if err != nil { t.Fatal(err) } tpt.tpool, err = New(tpt.cs, tpt.gateway, persistDir) if err != nil { t.Fatal(err) } err = tpt.tpool.AcceptTransactionSet(txns) if err != modules.ErrDuplicateTransactionSet { t.Fatal("expecting modules.ErrDuplicateTransactionSet, got:", err) } } Sia-1.3.0/modules/transactionpool/standard.go000066400000000000000000000124311313565667000212620ustar00rootroot00000000000000package transactionpool import ( "errors" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) // standard.go adds extra rules to transactions which help preserve network // health and provides flexibility for future soft forks and tweaks to the // network. // // Rule: Transaction size is limited // There is a DoS vector where large transactions can both contain many // signatures, and have each signature's CoveredFields object cover a // unique but large portion of the transaction. A 1mb transaction could // force a verifier to hash very large volumes of data, which takes a long // time on nonspecialized hardware. // // Rule: Foreign signature algorithms are rejected. // There are plans to add newer, faster signature algorithms to Sia as the // project matures and the need for increased verification speed grows. // Foreign signatures are allowed into the blockchain, where they are // accepted as valid. Hoewver, if there has been a soft-fork, the foreign // signatures might actually be invalid. This rule protects legacy miners // from including potentially invalid transactions in their blocks. // // Rule: The types of allowed arbitrary data are limited // The arbitrary data field can be used to orchestrate soft-forks to Sia // that add features. Legacy miners are at risk of creating invalid blocks // if they include arbitrary data which has meanings that the legacy miner // doesn't understand. // // Rule: The transaction set size is limited. // A group of dependent transactions cannot exceed 100kb to limit how // quickly the transaction pool can be filled with new transactions. // checkUnlockConditions looks at the UnlockConditions and verifies that all // public keys are recognized. Unrecognized public keys are automatically // accepted as valid by the consnensus set, but rejected by the transaction // pool. This allows new types of keys to be added via a softfork without // alienating all of the older nodes. func checkUnlockConditions(uc types.UnlockConditions) error { for _, pk := range uc.PublicKeys { if pk.Algorithm != types.SignatureEntropy && pk.Algorithm != types.SignatureEd25519 { return errors.New("unrecognized key type in transaction") } } return nil } // isStandardTransaction enforces extra rules such as a transaction size limit. // These rules can be altered without disrupting consensus. // // The size of the transaction is returned so that the transaction does not need // to be encoded multiple times. func isStandardTransaction(t types.Transaction) (uint64, error) { // Check that the size of the transaction does not exceed the standard // established in Standard.md. Larger transactions are a DOS vector, // because someone can fill a large transaction with a bunch of signatures // that require hashing the entire transaction. Several hundred megabytes // of hashing can be required of a verifier. Enforcing this rule makes it // more difficult for attackers to exploid this DOS vector, though a miner // with sufficient power could still create unfriendly blocks. tlen := len(encoding.Marshal(t)) if tlen > modules.TransactionSizeLimit { return 0, modules.ErrLargeTransaction } // Check that all public keys are of a recognized type. Need to check all // of the UnlockConditions, which currently can appear in 3 separate fields // of the transaction. Unrecognized types are ignored because a softfork // may make certain unrecognized signatures invalid, and this node cannot // tell which signatures are the invalid ones. for _, sci := range t.SiacoinInputs { err := checkUnlockConditions(sci.UnlockConditions) if err != nil { return 0, err } } for _, fcr := range t.FileContractRevisions { err := checkUnlockConditions(fcr.UnlockConditions) if err != nil { return 0, err } } for _, sfi := range t.SiafundInputs { err := checkUnlockConditions(sfi.UnlockConditions) if err != nil { return 0, err } } // Check that all arbitrary data is prefixed using the recognized set of // prefixes. The allowed prefixes include a 'NonSia' prefix for truly // arbitrary data. Blocking all other prefixes allows arbitrary data to be // used to orchestrate more complicated soft forks in the future without // putting older nodes at risk of violating the new rules. var prefix types.Specifier for _, arb := range t.ArbitraryData { // Check for a whilelisted prefix. copy(prefix[:], arb) if prefix == modules.PrefixHostAnnouncement || prefix == modules.PrefixNonSia { continue } return 0, modules.ErrInvalidArbPrefix } return uint64(tlen), nil } // isStandardTransactionSet checks that all transacitons of a set follow the // IsStandard guidelines, and that the set as a whole follows the guidelines as // well. // // The size of the transaction set is returned so that the encoding only needs // to happen once. func isStandardTransactionSet(ts []types.Transaction) (uint64, error) { // Check that each transaction is acceptable, while also making sure that // the size of the whole set is legal. var totalSize uint64 for i := range ts { tSize, err := isStandardTransaction(ts[i]) if err != nil { return 0, err } totalSize += tSize if totalSize > modules.TransactionSetSizeLimit { return 0, modules.ErrLargeTransactionSet } } return totalSize, nil } Sia-1.3.0/modules/transactionpool/standard_test.go000066400000000000000000000025701313565667000223240ustar00rootroot00000000000000package transactionpool import ( "testing" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/fastrand" ) // TestIntegrationLargeTransactions tries to add a large transaction to the // transaction pool. func TestIntegrationLargeTransactions(t *testing.T) { if testing.Short() { t.SkipNow() } tpt, err := createTpoolTester(t.Name()) if err != nil { t.Fatal(err) } defer tpt.Close() // Create a large transaction and try to get it accepted. arbData := make([]byte, modules.TransactionSizeLimit) copy(arbData, modules.PrefixNonSia[:]) fastrand.Read(arbData[100:116]) // prevents collisions with other transacitons in the loop. txn := types.Transaction{ArbitraryData: [][]byte{arbData}} err = tpt.tpool.AcceptTransactionSet([]types.Transaction{txn}) if err != modules.ErrLargeTransaction { t.Fatal(err) } // Create a large transaction set and try to get it accepted. var tset []types.Transaction for i := 0; i <= modules.TransactionSetSizeLimit/10e3; i++ { arbData := make([]byte, 10e3) copy(arbData, modules.PrefixNonSia[:]) fastrand.Read(arbData[100:116]) // prevents collisions with other transacitons in the loop. txn := types.Transaction{ArbitraryData: [][]byte{arbData}} tset = append(tset, txn) } err = tpt.tpool.AcceptTransactionSet(tset) if err != modules.ErrLargeTransactionSet { t.Fatal(err) } } Sia-1.3.0/modules/transactionpool/subscribe.go000066400000000000000000000067701313565667000214540ustar00rootroot00000000000000package transactionpool import ( "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) // updateSubscribersTransactions sends a new transaction pool update to all // subscribers. func (tp *TransactionPool) updateSubscribersTransactions() { diff := new(modules.TransactionPoolDiff) // Create all of the diffs for reverted sets. for id := range tp.subscriberSets { // The transaction set is still in the transaction pool, no need to // create an update. _, exists := tp.transactionSets[id] if exists { continue } // Report that this set has been removed. Negative diffs don't have all // fields filled out. diff.RevertedTransactions = append(diff.RevertedTransactions, modules.TransactionSetID(id)) } // Clear the subscriber sets map. for _, revert := range diff.RevertedTransactions { delete(tp.subscriberSets, TransactionSetID(revert)) } // Create all of the diffs for sets that have been recently created. for id, set := range tp.transactionSets { _, exists := tp.subscriberSets[id] if exists { // The transaction set has already been sent in an update. continue } // Report that this transaction set is new to the transaction pool. ids := make([]types.TransactionID, 0, len(set)) sizes := make([]uint64, 0, len(set)) for i := range set { encodedTxn := encoding.Marshal(set[i]) sizes = append(sizes, uint64(len(encodedTxn))) ids = append(ids, set[i].ID()) } ut := &modules.UnconfirmedTransactionSet{ Change: tp.transactionSetDiffs[id], ID: modules.TransactionSetID(id), IDs: ids, Sizes: sizes, Transactions: set, } // Add this diff to our set of subscriber diffs. tp.subscriberSets[id] = ut diff.AppliedTransactions = append(diff.AppliedTransactions, ut) } for _, subscriber := range tp.subscribers { subscriber.ReceiveUpdatedUnconfirmedTransactions(diff) } } // TransactionPoolSubscribe adds a subscriber to the transaction pool. // Subscribers will receive the full transaction set every time there is a // significant change to the transaction pool. func (tp *TransactionPool) TransactionPoolSubscribe(subscriber modules.TransactionPoolSubscriber) { tp.mu.Lock() defer tp.mu.Unlock() // Check that this subscriber is not already subscribed. for _, s := range tp.subscribers { if s == subscriber { build.Critical("refusing to double-subscribe subscriber") } } // Add the subscriber to the subscriber list. tp.subscribers = append(tp.subscribers, subscriber) // Send the new subscriber the transaction pool set. diff := new(modules.TransactionPoolDiff) diff.AppliedTransactions = make([]*modules.UnconfirmedTransactionSet, 0, len(tp.subscriberSets)) for _, ut := range tp.subscriberSets { diff.AppliedTransactions = append(diff.AppliedTransactions, ut) } subscriber.ReceiveUpdatedUnconfirmedTransactions(diff) } // Unsubscribe removes a subscriber from the transaction pool. If the // subscriber is not in tp.subscribers, Unsubscribe does nothing. If the // subscriber occurs more than once in tp.subscribers, only the earliest // occurrence is removed (unsubscription fails). func (tp *TransactionPool) Unsubscribe(subscriber modules.TransactionPoolSubscriber) { tp.mu.Lock() defer tp.mu.Unlock() // Search for and remove subscriber from list of subscribers. for i := range tp.subscribers { if tp.subscribers[i] == subscriber { tp.subscribers = append(tp.subscribers[0:i], tp.subscribers[i+1:]...) break } } } Sia-1.3.0/modules/transactionpool/subscribe_test.go000066400000000000000000000051121313565667000225000ustar00rootroot00000000000000package transactionpool import ( "testing" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) // mockSubscriber receives transactions from the transaction pool it is // subscribed to, retaining them in the order they were received. type mockSubscriber struct { txnMap map[modules.TransactionSetID][]types.Transaction txns []types.Transaction } // ReceiveUpdatedUnconfirmedTransactions receives transactinos from the // transaction pool and stores them in the order they were received. // This method allows *mockSubscriber to satisfy the // modules.TransactionPoolSubscriber interface. func (ms *mockSubscriber) ReceiveUpdatedUnconfirmedTransactions(diff *modules.TransactionPoolDiff) { for _, revert := range diff.RevertedTransactions { delete(ms.txnMap, revert) } for _, uts := range diff.AppliedTransactions { ms.txnMap[uts.ID] = uts.Transactions } ms.txns = nil for _, txnSet := range ms.txnMap { ms.txns = append(ms.txns, txnSet...) } } // TestSubscription checks that calling Unsubscribe on a mockSubscriber // shortens the list of subscribers to the transaction pool by 1 (doesn't // actually check that the mockSubscriber was the one unsubscribed). func TestSubscription(t *testing.T) { if testing.Short() { t.Skip() } tpt, err := createTpoolTester(t.Name()) if err != nil { t.Fatal(err) } defer tpt.Close() // Check the transaction pool is empty when initialized. if len(tpt.tpool.transactionSets) != 0 { t.Fatal("transaction pool is not empty") } // Create a mock subscriber and subscribe it to the transaction pool. ms := mockSubscriber{ txnMap: make(map[modules.TransactionSetID][]types.Transaction), } tpt.tpool.TransactionPoolSubscribe(&ms) if len(ms.txns) != 0 { t.Fatalf("mock subscriber has received %v transactions; shouldn't have received any yet", len(ms.txns)) } // Create a valid transaction set and check that the mock subscriber's // transaction list is updated. _, err = tpt.wallet.SendSiacoins(types.NewCurrency64(100), types.UnlockHash{}) if err != nil { t.Fatal(err) } if len(tpt.tpool.transactionSets) != 1 { t.Error("sending coins didn't increase the transaction sets by 1") } numTxns := 0 for _, txnSet := range tpt.tpool.transactionSets { numTxns += len(txnSet) } if len(ms.txns) != numTxns { t.Errorf("mock subscriber should've received %v transactions; received %v instead", numTxns, len(ms.txns)) } numSubscribers := len(tpt.tpool.subscribers) tpt.tpool.Unsubscribe(&ms) if len(tpt.tpool.subscribers) != numSubscribers-1 { t.Error("transaction pool failed to unsubscribe mock subscriber") } } Sia-1.3.0/modules/transactionpool/transactionpool.go000066400000000000000000000203221313565667000226770ustar00rootroot00000000000000package transactionpool import ( "errors" "github.com/NebulousLabs/bolt" "github.com/NebulousLabs/demotemutex" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/persist" "github.com/NebulousLabs/Sia/sync" "github.com/NebulousLabs/Sia/types" ) var ( errNilCS = errors.New("transaction pool cannot initialize with a nil consensus set") errNilGateway = errors.New("transaction pool cannot initialize with a nil gateway") ) type ( // ObjectIDs are the IDs of objects such as siacoin outputs and file // contracts, and are used to see if there are conflicts or overlaps within // the transaction pool. A TransactionSetID is the hash of a transaction // set. ObjectID crypto.Hash TransactionSetID crypto.Hash // The TransactionPool tracks incoming transactions, accepting them or // rejecting them based on internal criteria such as fees and unconfirmed // double spends. TransactionPool struct { // Dependencies of the transaction pool. consensusSet modules.ConsensusSet gateway modules.Gateway // To prevent double spends in the unconfirmed transaction set, the // transaction pool keeps a list of all objects that have either been // created or consumed by the current unconfirmed transaction pool. All // transactions with overlaps are rejected. This model is // over-aggressive - one transaction set may create an object that // another transaction set spends. This is done to minimize the // computation and memory load on the transaction pool. Dependent // transactions should be lumped into a single transaction set. // // transactionSetDiffs map form a transaction set id to the set of // diffs that resulted from the transaction set. knownObjects map[ObjectID]TransactionSetID subscriberSets map[TransactionSetID]*modules.UnconfirmedTransactionSet transactionHeights map[types.TransactionID]types.BlockHeight transactionSets map[TransactionSetID][]types.Transaction transactionSetDiffs map[TransactionSetID]*modules.ConsensusChange transactionListSize int // Variables related to the blockchain. blockHeight types.BlockHeight recentMedians []types.Currency recentMedianFee types.Currency // SC per byte // The consensus change index tracks how many consensus changes have // been sent to the transaction pool. When a new subscriber joins the // transaction pool, all prior consensus changes are sent to the new // subscriber. subscribers []modules.TransactionPoolSubscriber // Utilities. db *persist.BoltDatabase dbTx *bolt.Tx log *persist.Logger mu demotemutex.DemoteMutex tg sync.ThreadGroup persistDir string } ) // New creates a transaction pool that is ready to receive transactions. func New(cs modules.ConsensusSet, g modules.Gateway, persistDir string) (*TransactionPool, error) { // Check that the input modules are non-nil. if cs == nil { return nil, errNilCS } if g == nil { return nil, errNilGateway } // Initialize a transaction pool. tp := &TransactionPool{ consensusSet: cs, gateway: g, knownObjects: make(map[ObjectID]TransactionSetID), subscriberSets: make(map[TransactionSetID]*modules.UnconfirmedTransactionSet), transactionHeights: make(map[types.TransactionID]types.BlockHeight), transactionSets: make(map[TransactionSetID][]types.Transaction), transactionSetDiffs: make(map[TransactionSetID]*modules.ConsensusChange), persistDir: persistDir, } // Open the tpool database. err := tp.initPersist() if err != nil { return nil, err } // Register RPCs g.RegisterRPC("RelayTransactionSet", tp.relayTransactionSet) tp.tg.OnStop(func() { tp.gateway.UnregisterRPC("RelayTransactionSet") }) return tp, nil } // Close releases any resources held by the transaction pool, stopping all of // its worker threads. func (tp *TransactionPool) Close() error { return tp.tg.Stop() } // FeeEstimation returns an estimation for what fee should be applied to // transactions. func (tp *TransactionPool) FeeEstimation() (min, max types.Currency) { err := tp.tg.Add() if err != nil { return } defer tp.tg.Done() tp.mu.Lock() defer tp.mu.Unlock() // Use three methods to determine an acceptable fee, and then take the // largest result of the two methods. The first method checks the historic // blocks, to make sure that we don't under-estimate the number of fees // needed in the event that we just purged the tpool. // // The second method looks at the existing tpool. Sudden congestion won't be // represented on the blockchain right away, but should be immediately // influencing how you set fees. Using the current tpool fullness will help // pick appropriate fees in the event of sudden congestion. // // The third method just has hardcoded minimums as a sanity check. In the // event of empty blocks, there should still be some fees being added to the // chain. // Set the minimum fee to the numbers recommended by the blockchain. min = tp.recentMedianFee max = tp.recentMedianFee.Mul64(maxMultiplier) // Method two: use 'requiredFeesToExtendPool'. required := tp.requiredFeesToExtendTpool() requiredMin := required.MulFloat(minExtendMultiplier) // Clear the local requirement by a little bit. requiredMax := requiredMin.MulFloat(maxMultiplier) // Clear the local requirement by a lot. if min.Cmp(requiredMin) < 0 { min = requiredMin } if max.Cmp(requiredMax) < 0 { max = requiredMax } // Method three: sane mimimums. if min.Cmp(minEstimation) < 0 { min = minEstimation } if max.Cmp(minEstimation.Mul64(maxMultiplier)) < 0 { max = minEstimation.Mul64(maxMultiplier) } return } // TransactionList returns a list of all transactions in the transaction pool. // The transactions are provided in an order that can acceptably be put into a // block. func (tp *TransactionPool) TransactionList() []types.Transaction { tp.mu.Lock() defer tp.mu.Unlock() var txns []types.Transaction for _, tSet := range tp.transactionSets { txns = append(txns, tSet...) } return txns } // Transaction returns the transaction with the provided txid, its parents, and // a bool indicating if it exists in the transaction pool. func (tp *TransactionPool) Transaction(id types.TransactionID) (types.Transaction, []types.Transaction, bool) { tp.mu.Lock() defer tp.mu.Unlock() // find the transaction exists := false var txn types.Transaction var allParents []types.Transaction for _, tSet := range tp.transactionSets { for i, t := range tSet { if t.ID() == id { txn = t allParents = tSet[:i] exists = true break } } } // prune unneeded parents parentIDs := make(map[types.OutputID]struct{}) addOutputIDs := func(txn types.Transaction) { for _, input := range txn.SiacoinInputs { parentIDs[types.OutputID(input.ParentID)] = struct{}{} } for _, fcr := range txn.FileContractRevisions { parentIDs[types.OutputID(fcr.ParentID)] = struct{}{} } for _, input := range txn.SiafundInputs { parentIDs[types.OutputID(input.ParentID)] = struct{}{} } for _, proof := range txn.StorageProofs { parentIDs[types.OutputID(proof.ParentID)] = struct{}{} } for _, sig := range txn.TransactionSignatures { parentIDs[types.OutputID(sig.ParentID)] = struct{}{} } } isParent := func(t types.Transaction) bool { for i := range t.SiacoinOutputs { if _, exists := parentIDs[types.OutputID(t.SiacoinOutputID(uint64(i)))]; exists { return true } } for i := range t.FileContracts { if _, exists := parentIDs[types.OutputID(t.SiacoinOutputID(uint64(i)))]; exists { return true } } for i := range t.SiafundOutputs { if _, exists := parentIDs[types.OutputID(t.SiacoinOutputID(uint64(i)))]; exists { return true } } return false } addOutputIDs(txn) var necessaryParents []types.Transaction for i := len(allParents) - 1; i >= 0; i-- { parent := allParents[i] if isParent(parent) { necessaryParents = append([]types.Transaction{parent}, necessaryParents...) addOutputIDs(parent) } } return txn, necessaryParents, exists } // Broadcast broadcasts a transaction set to all of the transaction pool's // peers. func (tp *TransactionPool) Broadcast(ts []types.Transaction) { go tp.gateway.Broadcast("RelayTransactionSet", ts, tp.gateway.Peers()) } Sia-1.3.0/modules/transactionpool/transactionpool_test.go000066400000000000000000000435201313565667000237430ustar00rootroot00000000000000package transactionpool import ( "path/filepath" "testing" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/modules/consensus" "github.com/NebulousLabs/Sia/modules/gateway" "github.com/NebulousLabs/Sia/modules/miner" "github.com/NebulousLabs/Sia/modules/wallet" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/fastrand" ) // A tpoolTester is used during testing to initialize a transaction pool and // useful helper modules. type tpoolTester struct { cs modules.ConsensusSet gateway modules.Gateway tpool *TransactionPool miner modules.TestMiner wallet modules.Wallet walletKey crypto.TwofishKey persistDir string } // blankTpoolTester returns a ready-to-use tpool tester, with all modules // initialized, without mining a block. func blankTpoolTester(name string) (*tpoolTester, error) { // Initialize the modules. testdir := build.TempDir(modules.TransactionPoolDir, name) g, err := gateway.New("localhost:0", false, filepath.Join(testdir, modules.GatewayDir)) if err != nil { return nil, err } cs, err := consensus.New(g, false, filepath.Join(testdir, modules.ConsensusDir)) if err != nil { return nil, err } tp, err := New(cs, g, filepath.Join(testdir, modules.TransactionPoolDir)) if err != nil { return nil, err } w, err := wallet.New(cs, tp, filepath.Join(testdir, modules.WalletDir)) if err != nil { return nil, err } var key crypto.TwofishKey fastrand.Read(key[:]) _, err = w.Encrypt(key) if err != nil { return nil, err } err = w.Unlock(key) if err != nil { return nil, err } m, err := miner.New(cs, tp, w, filepath.Join(testdir, modules.MinerDir)) if err != nil { return nil, err } // Assemble all of the objects into a tpoolTester return &tpoolTester{ cs: cs, gateway: g, tpool: tp, miner: m, wallet: w, walletKey: key, persistDir: testdir, }, nil } // createTpoolTester returns a ready-to-use tpool tester, with all modules // initialized. func createTpoolTester(name string) (*tpoolTester, error) { tpt, err := blankTpoolTester(name) if err != nil { return nil, err } // Mine blocks until there is money in the wallet. for i := types.BlockHeight(0); i <= types.MaturityDelay; i++ { b, _ := tpt.miner.FindBlock() err = tpt.cs.AcceptBlock(b) if err != nil { return nil, err } } return tpt, nil } // Close safely closes the tpoolTester, calling a panic in the event of an // error since there isn't a good way to errcheck when deferring a Close. func (tpt *tpoolTester) Close() error { errs := []error{ tpt.cs.Close(), tpt.gateway.Close(), tpt.tpool.Close(), tpt.miner.Close(), tpt.wallet.Close(), } if err := build.JoinErrors(errs, "; "); err != nil { panic(err) } return nil } // TestIntegrationNewNilInputs tries to trigger a panic with nil inputs. func TestIntegrationNewNilInputs(t *testing.T) { // Create a gateway and consensus set. testdir := build.TempDir(modules.TransactionPoolDir, t.Name()) g, err := gateway.New("localhost:0", false, filepath.Join(testdir, modules.GatewayDir)) if err != nil { t.Fatal(err) } cs, err := consensus.New(g, false, filepath.Join(testdir, modules.ConsensusDir)) if err != nil { t.Fatal(err) } tpDir := filepath.Join(testdir, modules.TransactionPoolDir) // Try all combinations of nil inputs. _, err = New(nil, nil, tpDir) if err == nil { t.Error(err) } _, err = New(nil, g, tpDir) if err != errNilCS { t.Error(err) } _, err = New(cs, nil, tpDir) if err != errNilGateway { t.Error(err) } _, err = New(cs, g, tpDir) if err != nil { t.Error(err) } } // TestGetTransaction verifies that the transaction pool's Transaction() method // works correctly. func TestGetTransaction(t *testing.T) { if testing.Short() { t.SkipNow() } tpt, err := createTpoolTester(t.Name()) if err != nil { t.Fatal(err) } defer tpt.Close() value := types.NewCurrency64(35e6) fee := types.NewCurrency64(3e2) emptyUH := types.UnlockConditions{}.UnlockHash() txnBuilder := tpt.wallet.StartTransaction() err = txnBuilder.FundSiacoins(value) if err != nil { t.Fatal(err) } txnBuilder.AddMinerFee(fee) output := types.SiacoinOutput{ Value: value.Sub(fee), UnlockHash: emptyUH, } txnBuilder.AddSiacoinOutput(output) txnSet, err := txnBuilder.Sign(true) if err != nil { t.Fatal(err) } childrenSet := []types.Transaction{{ SiacoinInputs: []types.SiacoinInput{{ ParentID: txnSet[len(txnSet)-1].SiacoinOutputID(0), }}, SiacoinOutputs: []types.SiacoinOutput{{ Value: value.Sub(fee), UnlockHash: emptyUH, }}, }} superSet := append(txnSet, childrenSet...) err = tpt.tpool.AcceptTransactionSet(superSet) if err != nil { t.Fatal(err) } targetTxn := childrenSet[0] txn, parents, exists := tpt.tpool.Transaction(targetTxn.ID()) if !exists { t.Fatal("transaction set did not exist") } if txn.ID() != targetTxn.ID() { t.Fatal("returned the wrong transaction") } if len(parents) != len(txnSet) { t.Fatal("transaction had wrong number of parents") } for i, txn := range txnSet { if parents[i].ID() != txn.ID() { t.Fatal("returned the wrong parent") } } } // TestBlockFeeEstimation checks that the fee estimation algorithm is reasonably // on target when the tpool is relying on blockchain based fee estimation. func TestFeeEstimation(t *testing.T) { if testing.Short() || !build.VLONG { t.Skip("Tpool is too slow to run this test regularly") } tpt, err := createTpoolTester(t.Name()) if err != nil { t.Fatal(err) } defer tpt.Close() // Prepare a bunch of outputs for a series of graphs to fill up the // transaction pool. graphLens := 400 // 80 kb per graph numGraphs := int(types.BlockSizeLimit) * blockFeeEstimationDepth / (graphLens * 206) // Enough to fill 'estimation depth' blocks. graphFund := types.SiacoinPrecision.Mul64(1000) var outputs []types.SiacoinOutput for i := 0; i < numGraphs+1; i++ { outputs = append(outputs, types.SiacoinOutput{ UnlockHash: types.UnlockConditions{}.UnlockHash(), Value: graphFund, }) } txns, err := tpt.wallet.SendSiacoinsMulti(outputs) if err != nil { t.Error(err) } // Mine the graph setup in the consensus set so that the graph outputs are // distinct transaction sets. _, err = tpt.miner.AddBlock() if err != nil { t.Fatal(err) } // Create all of the graphs. finalTxn := txns[len(txns)-1] var graphs [][]types.Transaction for i := 0; i < numGraphs; i++ { var edges []types.TransactionGraphEdge var cumFee types.Currency for j := 0; j < graphLens; j++ { fee := types.SiacoinPrecision.Mul64(uint64(j + i + 1)).Div64(200) cumFee = cumFee.Add(fee) edges = append(edges, types.TransactionGraphEdge{ Dest: j + 1, Fee: fee, Source: j, Value: graphFund.Sub(cumFee), }) } graph, err := types.TransactionGraph(finalTxn.SiacoinOutputID(uint64(i)), edges) if err != nil { t.Fatal(err) } graphs = append(graphs, graph) } // One block at a time, add graphs to the tpool and blockchain. Then check // the median fee estimation and see that it's the right value. var prevMin types.Currency for i := 0; i < blockFeeEstimationDepth; i++ { // Insert enough graphs to fill a block. for j := 0; j < numGraphs/blockFeeEstimationDepth; j++ { err = tpt.tpool.AcceptTransactionSet(graphs[0]) if err != nil { t.Fatal(err) } graphs = graphs[1:] } // Add a block to the transaction pool. _, err = tpt.miner.AddBlock() if err != nil { t.Fatal(err) } // Check that max is always greater than min. min, max := tpt.tpool.FeeEstimation() if min.Cmp(max) > 0 { t.Error("max fee is less than min fee estimation") } // If we're over halfway through the depth, the suggested fee should // start to exceed the default. if i > blockFeeEstimationDepth/2 { if min.Cmp(minEstimation) <= 0 { t.Error("fee estimation does not seem to be increasing") } if min.Cmp(prevMin) <= 0 { t.Error("fee estimation does not seem to be increasing") } } prevMin = min // Reset the tpool to verify that the persist structures are // functioning. // // TODO: For some reason, closing and re-opeining the tpool results in // incredible performance penalties. /* err = tpt.tpool.Close() if err != nil { t.Fatal(err) } tpt.tpool, err = New(tpt.cs, tpt.gateway, tpt.persistDir) if err != nil { t.Fatal(err) } */ } // Mine a few blocks and then check that the fee estimation has returned to // minimum as congestion clears up. for i := 0; i < (blockFeeEstimationDepth/2)+1; i++ { _, err = tpt.miner.AddBlock() if err != nil { t.Fatal(err) } } min, _ := tpt.tpool.FeeEstimation() if !(min.Cmp(minEstimation) == 0) { t.Error("fee estimator does not seem to be reducing with empty blocks.") } } // TestTpoolScalability fills the whole transaction pool with complex // transactions, then mines enough blocks to empty it out. Running sequentially, // the test should take less than 250ms per mb that the transaction pool fills // up, and less than 250ms per mb to empty out - indicating linear scalability // and tolerance for a larger pool size. func TestTpoolScalability(t *testing.T) { if testing.Short() || !build.VLONG { t.Skip("Tpool is too slow to run this test regularly") } tpt, err := createTpoolTester(t.Name()) if err != nil { t.Fatal(err) } defer tpt.Close() // Mine a few more blocks to get some extra funding. for i := 0; i < 3; i++ { _, err := tpt.miner.AddBlock() if err != nil { t.Fatal(err) } } // Prepare a bunch of outputs for a series of graphs to fill up the // transaction pool. rows := 10 // needs to factor into exclusively '2's and '5's. graphSize := 11796 // Measured with logging. Change if 'rows' changes. numGraphs := TransactionPoolSizeTarget / graphSize // Enough to fill the transaction pool. graphFund := types.SiacoinPrecision.Mul64(2000) var outputs []types.SiacoinOutput for i := 0; i < numGraphs+1; i++ { outputs = append(outputs, types.SiacoinOutput{ UnlockHash: types.UnlockConditions{}.UnlockHash(), Value: graphFund, }) } txns, err := tpt.wallet.SendSiacoinsMulti(outputs) if err != nil { t.Error(err) } // Mine the graph setup in the consensus set so that the graph outputs are // distinct transaction sets. _, err = tpt.miner.AddBlock() if err != nil { t.Fatal(err) } // Create all of the graphs. finalTxn := txns[len(txns)-1] var graphs [][]types.Transaction for i := 0; i < numGraphs; i++ { var edges []types.TransactionGraphEdge // Create the root of the graph. feeValues := types.SiacoinPrecision firstRowValues := graphFund.Sub(feeValues.Mul64(uint64(rows))).Div64(uint64(rows)) for j := 0; j < rows; j++ { edges = append(edges, types.TransactionGraphEdge{ Dest: j + 1, Fee: types.SiacoinPrecision, Source: 0, Value: firstRowValues, }) } // Create each row of the graph. var firstNodeValue types.Currency nodeIndex := 1 for j := 0; j < rows; j++ { // Create the first node in the row, which has an increasing // balance. rowValue := firstRowValues.Sub(types.SiacoinPrecision.Mul64(uint64(j + 1))) firstNodeValue = firstNodeValue.Add(rowValue) edges = append(edges, types.TransactionGraphEdge{ Dest: nodeIndex + (rows - j), Fee: types.SiacoinPrecision, Source: nodeIndex, Value: firstNodeValue, }) nodeIndex++ // Create the remaining nodes in this row. for k := j + 1; k < rows; k++ { edges = append(edges, types.TransactionGraphEdge{ Dest: nodeIndex + (rows - (j + 1)), Fee: types.SiacoinPrecision, Source: nodeIndex, Value: rowValue, }) nodeIndex++ } } // Build the graph and add it to the stack of graphs. graph, err := types.TransactionGraph(finalTxn.SiacoinOutputID(uint64(i)), edges) if err != nil { t.Fatal(err) } graphs = append(graphs, graph) } // Add all of the root transactions to the blockchain to throw off the // parent math off for the transaction pool. for _, graph := range graphs { err := tpt.tpool.AcceptTransactionSet([]types.Transaction{graph[0]}) if err != nil { t.Fatal(err) } } _, err = tpt.miner.AddBlock() if err != nil { t.Fatal(err) } // Add all of the transactions in each graph into the tpool, one transaction // at a time, interweaved, chaotically. for i := 1; i < len(graphs[0]); i++ { for j := 0; j < len(graphs); j++ { err := tpt.tpool.AcceptTransactionSet([]types.Transaction{graphs[j][i]}) if err != nil { t.Fatal(err, i, j) } } } // Mine blocks until the tpool is gone. for tpt.tpool.transactionListSize > 0 { _, err := tpt.miner.AddBlock() if err != nil { t.Fatal(err) } } } // TestHeapFees creates a large number of transaction graphs with increasing fee // value. Then it checks that those sets with higher value transaction fees are // prioritized for placement in blocks. func TestHeapFees(t *testing.T) { if testing.Short() || !build.VLONG { t.SkipNow() } tpt, err := createTpoolTester(t.Name()) if err != nil { t.Fatal(err) } defer tpt.Close() // Mine a few more blocks to get some extra funding. for i := 0; i < 4; i++ { _, err := tpt.miner.AddBlock() if err != nil { t.Fatal(err) } } // Create transaction graph setup. coinFrac := types.SiacoinPrecision numGraphs := 110 graphFund := coinFrac.Mul64(12210) var outputs []types.SiacoinOutput for i := 0; i < numGraphs; i++ { outputs = append(outputs, types.SiacoinOutput{ UnlockHash: types.UnlockConditions{}.UnlockHash(), Value: graphFund, }) } txns, err := tpt.wallet.SendSiacoinsMulti(outputs) if err != nil { t.Error(err) } // Mine the graph setup in the consensus set so that the graph outputs are // transaction sets. This guarantees that the parent of every graph will be // its own output. _, err = tpt.miner.AddBlock() if err != nil { t.Fatal(err) } finalTxn := txns[len(txns)-1] // For each output, create 250 transactions var graphs [][]types.Transaction for i := 0; i < numGraphs; i++ { var edges []types.TransactionGraphEdge var cumFee types.Currency for j := 0; j < numGraphs; j++ { fee := coinFrac.Mul64(uint64((j + 1))) cumFee = cumFee.Add(fee) edges = append(edges, types.TransactionGraphEdge{ Dest: j + 1, Fee: fee, Source: 0, Value: fee, }) } for k := 0; k < numGraphs; k++ { fee := coinFrac.Mul64(uint64(k + 1)).Div64(2) cumFee = cumFee.Add(fee) edges = append(edges, types.TransactionGraphEdge{ Dest: k + 251, Fee: fee, Source: k + 1, Value: fee, }) } graph, err := types.TransactionGraph(finalTxn.SiacoinOutputID(uint64(i)), edges) if err != nil { t.Fatal(err) } graphs = append(graphs, graph) } // Accept the parent node of each graph so that its outputs we can test // spending its outputs after mining the next block. for _, graph := range graphs { err := tpt.tpool.AcceptTransactionSet([]types.Transaction{graph[0]}) if err != nil { t.Fatal(err) } } block, err := tpt.miner.AddBlock() if err != nil { t.Fatal(err) } // Now accept all the other nodes of each graph. for _, graph := range graphs { for _, txn := range graph[1:] { err := tpt.tpool.AcceptTransactionSet([]types.Transaction{txn}) if err != nil { t.Fatal(err) } } } // Now we mine 2 blocks in sequence and check that higher fee transactions // show up to the first block. block, err = tpt.miner.AddBlock() if err != nil { t.Fatal(err) } var totalFee1 types.Currency expectedFee1 := coinFrac.Mul64(321915) // Add up total fees numTxns1 := 0 maxFee1 := types.SiacoinPrecision.Div64(1000000) minFee1 := types.SiacoinPrecision.Mul64(1000000) for _, txn := range block.Transactions { for _, fee := range txn.MinerFees { if fee.Cmp(maxFee1) >= 0 { maxFee1 = fee } if fee.Cmp(minFee1) <= 0 { minFee1 = fee } totalFee1 = totalFee1.Add(fee) numTxns1++ } } avgFee1 := totalFee1.Div64(uint64(numTxns1)) if totalFee1.Cmp(expectedFee1) != 0 { t.Error("totalFee1 different than expected fee.", totalFee1.String(), expectedFee1.String()) //t.Log(totalFee1.Sub(expectedFee1).HumanString()) } // Mine the next block so we can check the transactions inside block, err = tpt.miner.AddBlock() if err != nil { t.Fatal(err) } var totalFee2 types.Currency expectedFee2 := coinFrac.Mul64(13860) // Add up total fees numTxns2 := 0 maxFee2 := types.SiacoinPrecision.Div64(1000000) minFee2 := types.SiacoinPrecision.Mul64(1000000) for _, txn := range block.Transactions { for _, fee := range txn.MinerFees { if fee.Cmp(maxFee2) >= 0 { maxFee2 = fee } if fee.Cmp(minFee2) <= 0 { minFee2 = fee } totalFee2 = totalFee2.Add(fee) numTxns2++ } } avgFee2 := totalFee2.Div64(uint64(numTxns2)) if totalFee2.Cmp(expectedFee2) != 0 { t.Error("totalFee2 different than expected fee.", totalFee2.String(), expectedFee2.String()) //t.Log(totalFee2.Sub(expectedFee2).HumanString()) } if avgFee1.Cmp(avgFee2) <= 0 { t.Error("Expected average fee from first block to be greater than average fee from second block.") } if totalFee1.Cmp(totalFee2) <= 0 { t.Error("Expected total fee from first block to be greater than total fee from second block.") } if numTxns1 < numTxns2 { t.Error("Expected more transactions in the first block than second block.") } if maxFee1.Cmp(maxFee2) <= 0 { t.Error("Expected highest fee from first block to be greater than highest fee from second block.") } if minFee1.Cmp(maxFee2) < 0 { t.Error("Expected lowest fee from first block to be greater than or equal to than highest fee from second block.") } if maxFee1.Cmp(minFee1) <= 0 { t.Error("Expected highest fee from first block to be greater than lowest fee from first block.") } if maxFee2.Cmp(minFee2) <= 0 { t.Error("Expected highest fee from second block to be greater than lowest fee from second block.") } } Sia-1.3.0/modules/transactionpool/update.go000066400000000000000000000327301313565667000207500ustar00rootroot00000000000000package transactionpool import ( "bytes" "sort" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) // findSets takes a bunch of transactions (presumably from a block) and finds // all of the separate transaction sets within it. Set does not check for // conflicts. // // The algorithm goes through one transaction at a time. All of the outputs of // that transaction are added to the objMap, pointing to the transaction to // indicate that the transaction contains those outputs. The transaction is // assigned an integer id (each transaction will have a unique id) and added to // the txMap. // // The transaction's inputs are then checked against the objMap to see if there // are any parents of the transaction in the graph. If there are, the // transaction is added to the parent set instead of its own set. If not, the // transaction is added as its own set. // // The forwards map contains a list of ints indicating when a transaction has // been merged with a set. When a transaction gets merged with a parent set, its // integer id gets added to the forwards map, indicating that the transaction is // no longer in its own set, but instead has been merged with other sets. // // Some transactions will have parents from multiple distinct sets. If a // transaction has parents in multiple distinct sets, those sets get merged // together and the transaction gets added to the result. One of the sets is // nominated (arbitrarily) as the official set, and the integer id of the other // set and the new transaction get forwarded to the official set. // // TODO: Set merging currently occurs any time that there is a child. But // really, it should only occur if the child increases the average fee value of // the set that it is merging with (which it will if and only if it has a higher // average fee than that set). If the child has multiple parent sets, it should // be compared with the parent set that has the lowest fee value. Then, after it // is merged with that parent, the result should be merged with the next // lowest-fee parent set if and only if the new set has a higher average fee // than the parent set. And this continues until either all of the sets have // been merged, or until the remaining parent sets have higher values. func findSets(ts []types.Transaction) [][]types.Transaction { // txMap marks what set each transaction is in. If two sets get combined, // this number will not be updated. The 'forwards' map defined further on // will help to discover which sets have been combined. txMap := make(map[types.TransactionID]int) setMap := make(map[int][]types.Transaction) objMap := make(map[ObjectID]types.TransactionID) forwards := make(map[int]int) // Define a function to follow and collapse any update chain. forward := func(prev int) (ret int) { ret = prev next, exists := forwards[prev] for exists { ret = next forwards[prev] = next // collapse the forwards function to prevent quadratic runtime of findSets. next, exists = forwards[next] } return ret } // Add the transactions to the setup one-by-one, merging them as they belong // to a set. for i, t := range ts { // Check if the inputs depend on any previous transaction outputs. tid := t.ID() parentSets := make(map[int]struct{}) for _, obj := range t.SiacoinInputs { txid, exists := objMap[ObjectID(obj.ParentID)] if exists { parentSet := forward(txMap[txid]) parentSets[parentSet] = struct{}{} } } for _, obj := range t.FileContractRevisions { txid, exists := objMap[ObjectID(obj.ParentID)] if exists { parentSet := forward(txMap[txid]) parentSets[parentSet] = struct{}{} } } for _, obj := range t.StorageProofs { txid, exists := objMap[ObjectID(obj.ParentID)] if exists { parentSet := forward(txMap[txid]) parentSets[parentSet] = struct{}{} } } for _, obj := range t.SiafundInputs { txid, exists := objMap[ObjectID(obj.ParentID)] if exists { parentSet := forward(txMap[txid]) parentSets[parentSet] = struct{}{} } } // Determine the new counter for this transaction. if len(parentSets) == 0 { // No parent sets. Make a new set for this transaction. txMap[tid] = i setMap[i] = []types.Transaction{t} // Don't need to add anything for the file contract outputs, storage // proof outputs, siafund claim outputs; these outputs are not // allowed to be spent until 50 confirmations. } else { // There are parent sets, pick one as the base and then merge the // rest into it. parentsSlice := make([]int, 0, len(parentSets)) for j := range parentSets { parentsSlice = append(parentsSlice, j) } base := parentsSlice[0] txMap[tid] = base for _, j := range parentsSlice[1:] { // Forward any future transactions pointing at this set to the // base set. forwards[j] = base // Combine the transactions in this set with the transactions in // the base set. setMap[base] = append(setMap[base], setMap[j]...) // Delete this set map, it has been merged with the base set. delete(setMap, j) } // Add this transaction to the base set. setMap[base] = append(setMap[base], t) } // Mark this transaction's outputs as potential inputs to future // transactions. for j := range t.SiacoinOutputs { scoid := t.SiacoinOutputID(uint64(j)) objMap[ObjectID(scoid)] = tid } for j := range t.FileContracts { fcid := t.FileContractID(uint64(j)) objMap[ObjectID(fcid)] = tid } for j := range t.FileContractRevisions { fcid := t.FileContractRevisions[j].ParentID objMap[ObjectID(fcid)] = tid } for j := range t.SiafundOutputs { sfoid := t.SiafundOutputID(uint64(j)) objMap[ObjectID(sfoid)] = tid } } // Compile the final group of sets. ret := make([][]types.Transaction, 0, len(setMap)) for _, set := range setMap { ret = append(ret, set) } return ret } // purge removes all transactions from the transaction pool. func (tp *TransactionPool) purge() { tp.knownObjects = make(map[ObjectID]TransactionSetID) tp.transactionSets = make(map[TransactionSetID][]types.Transaction) tp.transactionSetDiffs = make(map[TransactionSetID]*modules.ConsensusChange) tp.transactionListSize = 0 } // ProcessConsensusChange gets called to inform the transaction pool of changes // to the consensus set. func (tp *TransactionPool) ProcessConsensusChange(cc modules.ConsensusChange) { tp.mu.Lock() // Update the database of confirmed transactions. for _, block := range cc.RevertedBlocks { if tp.blockHeight > 0 || block.ID() != types.GenesisID { tp.blockHeight-- } for _, txn := range block.Transactions { err := tp.deleteTransaction(tp.dbTx, txn.ID()) if err != nil { tp.log.Println("ERROR: could not delete a transaction:", err) } } // Pull the transactions out of the fee summary. For estimating only // over 10 blocks, it is extermely likely that there will be more // applied blocks than reverted blocks, and if there aren't (a height // decreasing reorg), there will be more than 10 applied blocks. if len(tp.recentMedians) > 0 { // Strip out all of the transactions in this block. tp.recentMedians = tp.recentMedians[:len(tp.recentMedians)-1] } } for _, block := range cc.AppliedBlocks { if tp.blockHeight > 0 || block.ID() != types.GenesisID { tp.blockHeight++ } for _, txn := range block.Transactions { err := tp.putTransaction(tp.dbTx, txn.ID()) if err != nil { tp.log.Println("ERROR: could not add a transaction:", err) } } // Find the median transaction fee for this block. type feeSummary struct { fee types.Currency size int } var fees []feeSummary var totalSize int txnSets := findSets(block.Transactions) for _, set := range txnSets { // Compile the fees for this set. var feeSum types.Currency var sizeSum int b := new(bytes.Buffer) for _, txn := range set { txn.MarshalSia(b) sizeSum += b.Len() b.Reset() for _, fee := range txn.MinerFees { feeSum = feeSum.Add(fee) } } feeAvg := feeSum.Div64(uint64(sizeSum)) fees = append(fees, feeSummary{ fee: feeAvg, size: sizeSum, }) totalSize += sizeSum } // Add an extra zero-fee tranasction for any unused block space. remaining := int(types.BlockSizeLimit) - totalSize fees = append(fees, feeSummary{ fee: types.ZeroCurrency, size: remaining, // fine if remaining is zero. }) // Sort the fees by value and then scroll until the median. sort.Slice(fees, func(i, j int) bool { return fees[i].fee.Cmp(fees[j].fee) < 0 }) var progress int for i := range fees { progress += fees[i].size // Instead of grabbing the full median, look at the 75%-ile. It's // going to be cheaper than the 50%-ile, but it still got into a // block. if uint64(progress) > types.BlockSizeLimit/4 { tp.recentMedians = append(tp.recentMedians, fees[i].fee) break } } // If there are more than 10 blocks recorded in the txnsPerBlock, strip // off the oldest blocks. for len(tp.recentMedians) > blockFeeEstimationDepth { tp.recentMedians = tp.recentMedians[1:] } } // Grab the median of the recent medians. Copy to a new slice so the sorting // doesn't screw up the slice. safeMedians := make([]types.Currency, len(tp.recentMedians)) copy(safeMedians, tp.recentMedians) sort.Slice(safeMedians, func(i, j int) bool { return safeMedians[i].Cmp(safeMedians[j]) < 0 }) tp.recentMedianFee = safeMedians[len(safeMedians)/2] // Update all the on-disk structures. err := tp.putRecentConsensusChange(tp.dbTx, cc.ID) if err != nil { tp.log.Println("ERROR: could not update the recent consensus change:", err) } err = tp.putBlockHeight(tp.dbTx, tp.blockHeight) if err != nil { tp.log.Println("ERROR: could not update the block height:", err) } err = tp.putFeeMedian(tp.dbTx, medianPersist{ RecentMedians: tp.recentMedians, RecentMedianFee: tp.recentMedianFee, }) if err != nil { tp.log.Println("ERROR: could not update the transaction pool median fee information:", err) } // Scan the applied blocks for transactions that got accepted. This will // help to determine which transactions to remove from the transaction // pool. Having this list enables both efficiency improvements and helps to // clean out transactions with no dependencies, such as arbitrary data // transactions from the host. txids := make(map[types.TransactionID]struct{}) for _, block := range cc.AppliedBlocks { for _, txn := range block.Transactions { txids[txn.ID()] = struct{}{} } } // Save all of the current unconfirmed transaction sets into a list. var unconfirmedSets [][]types.Transaction for _, tSet := range tp.transactionSets { // Compile a new transaction set the removes all transactions duplicated // in the block. Though mostly handled by the dependency manager in the // transaction pool, this should both improve efficiency and will strip // out duplicate transactions with no dependencies (arbitrary data only // transactions) var newTSet []types.Transaction for _, txn := range tSet { _, exists := txids[txn.ID()] if !exists { newTSet = append(newTSet, txn) } } unconfirmedSets = append(unconfirmedSets, newTSet) } // Purge the transaction pool. Some of the transactions sets may be invalid // after the consensus change. tp.purge() // prune transactions older than maxTxnAge. for i, tSet := range unconfirmedSets { var validTxns []types.Transaction for _, txn := range tSet { seenHeight, seen := tp.transactionHeights[txn.ID()] if tp.blockHeight-seenHeight <= maxTxnAge || !seen { validTxns = append(validTxns, txn) } else { delete(tp.transactionHeights, txn.ID()) } } unconfirmedSets[i] = validTxns } // Scan through the reverted blocks and re-add any transactions that got // reverted to the tpool. for i := len(cc.RevertedBlocks) - 1; i >= 0; i-- { block := cc.RevertedBlocks[i] for _, txn := range block.Transactions { // Check whether this transaction has already be re-added to the // consensus set by the applied blocks. _, exists := txids[txn.ID()] if exists { continue } // Try adding the transaction back into the transaction pool. tp.acceptTransactionSet([]types.Transaction{txn}, cc.TryTransactionSet) // Error is ignored. } } // Add all of the unconfirmed transaction sets back to the transaction // pool. The ones that are invalid will throw an error and will not be // re-added. // // Accepting a transaction set requires locking the consensus set (to check // validity). But, ProcessConsensusChange is only called when the consensus // set is already locked, causing a deadlock problem. Therefore, // transactions are readded to the pool in a goroutine, so that this // function can finish and consensus can unlock. The tpool lock is held // however until the goroutine completes. // // Which means that no other modules can require a tpool lock when // processing consensus changes. Overall, the locking is pretty fragile and // more rules need to be put in place. for _, set := range unconfirmedSets { for _, txn := range set { err := tp.acceptTransactionSet([]types.Transaction{txn}, cc.TryTransactionSet) if err != nil { // The transaction is no longer valid, delete it from the // heights map to prevent a memory leak. delete(tp.transactionHeights, txn.ID()) } } } // Inform subscribers that an update has executed. tp.mu.Demote() tp.updateSubscribersTransactions() tp.mu.DemotedUnlock() } // PurgeTransactionPool deletes all transactions from the transaction pool. func (tp *TransactionPool) PurgeTransactionPool() { tp.mu.Lock() tp.purge() tp.mu.Unlock() } Sia-1.3.0/modules/transactionpool/update_test.go000066400000000000000000000243541313565667000220120ustar00rootroot00000000000000package transactionpool import ( "sort" "testing" "time" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) // TestFindSets checks that the findSets functions is properly parsing and // combining transactions into their minimal sets. func TestFindSets(t *testing.T) { // Graph a graph which is a chain. Graph will be invalid, but we don't need // the consensus set, so no worries. graph1Size := 5 edges := make([]types.TransactionGraphEdge, 0, graph1Size) for i := 0; i < graph1Size; i++ { edges = append(edges, types.TransactionGraphEdge{ Dest: i + 1, Fee: types.NewCurrency64(5), Source: i, Value: types.NewCurrency64(100), }) } graph1, err := types.TransactionGraph(types.SiacoinOutputID{}, edges) if err != nil { t.Fatal(err) } // Split the graph using findSets. Result should be a single set with 5 // transactions. sets := findSets(graph1) if len(sets) != 1 { t.Fatal("there should be only one set") } if len(sets[0]) != graph1Size { t.Error("findSets is not grouping the transactions correctly") } // Create a second graph to check it can handle two graphs. graph2Size := 6 edges = make([]types.TransactionGraphEdge, 0, graph2Size) for i := 0; i < graph2Size; i++ { edges = append(edges, types.TransactionGraphEdge{ Dest: i + 1, Fee: types.NewCurrency64(5), Source: i, Value: types.NewCurrency64(100), }) } graph2, err := types.TransactionGraph(types.SiacoinOutputID{1}, edges) if err != nil { t.Fatal(err) } sets = findSets(append(graph1, graph2...)) if len(sets) != 2 { t.Fatal("there should be two sets") } lens := []int{len(sets[0]), len(sets[1])} sort.Ints(lens) expected := []int{graph1Size, graph2Size} sort.Ints(expected) if lens[0] != expected[0] || lens[1] != expected[1] { t.Error("Resulting sets do not have the right lengths") } // Create a diamond graph to make sure it can handle diamond graph. edges = make([]types.TransactionGraphEdge, 0, 5) sources := []int{0, 0, 1, 2, 3} dests := []int{1, 2, 3, 3, 4} for i := 0; i < 5; i++ { edges = append(edges, types.TransactionGraphEdge{ Dest: dests[i], Fee: types.NewCurrency64(5), Source: sources[i], Value: types.NewCurrency64(100), }) } graph3, err := types.TransactionGraph(types.SiacoinOutputID{2}, edges) graph3Size := len(graph3) if err != nil { t.Fatal(err) } sets = findSets(append(graph1, append(graph2, graph3...)...)) if len(sets) != 3 { t.Fatal("there should be two sets") } lens = []int{len(sets[0]), len(sets[1]), len(sets[2])} sort.Ints(lens) expected = []int{graph1Size, graph2Size, graph3Size} sort.Ints(expected) if lens[0] != expected[0] || lens[1] != expected[1] || lens[2] != expected[2] { t.Error("Resulting sets do not have the right lengths") } // Sporadically weave the transactions and make sure the set finder still // parses the sets correctly (sets can assumed to be ordered, but not all in // a row). var sporadic []types.Transaction for len(graph1) > 0 || len(graph2) > 0 || len(graph3) > 0 { if len(graph1) > 0 { sporadic = append(sporadic, graph1[0]) graph1 = graph1[1:] } if len(graph2) > 0 { sporadic = append(sporadic, graph2[0]) graph2 = graph2[1:] } if len(graph3) > 0 { sporadic = append(sporadic, graph3[0]) graph3 = graph3[1:] } } if len(sporadic) != graph1Size+graph2Size+graph3Size { t.Error("sporadic block creation failed") } // Result of findSets should match previous result. sets = findSets(sporadic) if len(sets) != 3 { t.Fatal("there should be two sets") } lens = []int{len(sets[0]), len(sets[1]), len(sets[2])} sort.Ints(lens) expected = []int{graph1Size, graph2Size, graph3Size} sort.Ints(expected) if lens[0] != expected[0] || lens[1] != expected[1] || lens[2] != expected[2] { t.Error("Resulting sets do not have the right lengths") } } // TestArbDataOnly tries submitting a transaction with only arbitrary data to // the transaction pool. Then a block is mined, putting the transaction on the // blockchain. The arb data transaction should no longer be in the transaction // pool. func TestArbDataOnly(t *testing.T) { if testing.Short() { t.SkipNow() } tpt, err := createTpoolTester(t.Name()) if err != nil { t.Fatal(err) } defer tpt.Close() txn := types.Transaction{ ArbitraryData: [][]byte{ append(modules.PrefixNonSia[:], []byte("arb-data")...), }, } err = tpt.tpool.AcceptTransactionSet([]types.Transaction{txn}) if err != nil { t.Fatal(err) } if len(tpt.tpool.TransactionList()) != 1 { t.Error("expecting to see a transaction in the transaction pool") } _, err = tpt.miner.AddBlock() if err != nil { t.Fatal(err) } if len(tpt.tpool.TransactionList()) != 0 { t.Error("transaction was not cleared from the transaction pool") } } // TestValidRevertedTransaction verifies that if a transaction appears in a // block's reverted transactions, it is added correctly to the pool. func TestValidRevertedTransaction(t *testing.T) { if testing.Short() { t.SkipNow() } tpt, err := createTpoolTester(t.Name()) if err != nil { t.Fatal(err) } defer tpt.Close() tpt2, err := blankTpoolTester(t.Name() + "-tpt2") if err != nil { t.Fatal(err) } defer tpt2.Close() // connect the testers and wait for them to have the same current block err = tpt2.gateway.Connect(tpt.gateway.Address()) if err != nil { t.Fatal(err) } success := false for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(time.Millisecond * 100) { if tpt.cs.CurrentBlock().ID() == tpt2.cs.CurrentBlock().ID() { success = true break } } if !success { t.Fatal("testers did not have the same block height after one minute") } // disconnect the testers err = tpt2.gateway.Disconnect(tpt.gateway.Address()) if err != nil { t.Fatal(err) } tpt.gateway.Disconnect(tpt2.gateway.Address()) // make some transactions on tpt var txnSets [][]types.Transaction for i := 0; i < 5; i++ { txns, err := tpt.wallet.SendSiacoins(types.SiacoinPrecision.Mul64(1000), types.UnlockHash{}) if err != nil { t.Fatal(err) } txnSets = append(txnSets, txns) } // mine some blocks to cause a re-org for i := 0; i < 3; i++ { _, err = tpt.miner.AddBlock() if err != nil { t.Fatal(err) } } // put tpt2 at a higher height for i := 0; i < 10; i++ { _, err = tpt2.miner.AddBlock() if err != nil { t.Fatal(err) } } // connect the testers and wait for them to have the same current block err = tpt.gateway.Connect(tpt2.gateway.Address()) if err != nil { t.Fatal(err) } success = false for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(time.Millisecond * 100) { if tpt.cs.CurrentBlock().ID() == tpt2.cs.CurrentBlock().ID() { success = true break } } if !success { t.Fatal("testers did not have the same block height after one minute") } // verify the transaction pool still has the reorged txns for _, txnSet := range txnSets { for _, txn := range txnSet { _, _, exists := tpt.tpool.Transaction(txn.ID()) if !exists { t.Error("Transaction was not re-added to the transaction pool after being re-orged out of the blockchain:", txn.ID()) } } } // Try to get the transactoins into a block. _, err = tpt.miner.AddBlock() if err != nil { t.Fatal(err) } if len(tpt.tpool.TransactionList()) != 0 { t.Error("Does not seem that the transactions were added to the transaction pool.") } } // TestTransactionPoolPruning verifies that the transaction pool correctly // prunes transactions older than maxTxnAge. func TestTransactionPoolPruning(t *testing.T) { if testing.Short() { t.SkipNow() } tpt, err := createTpoolTester(t.Name()) if err != nil { t.Fatal(err) } defer tpt.Close() tpt2, err := blankTpoolTester(t.Name() + "-tpt2") if err != nil { t.Fatal(err) } defer tpt2.Close() // connect the testers and wait for them to have the same current block err = tpt2.gateway.Connect(tpt.gateway.Address()) if err != nil { t.Fatal(err) } success := false for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(time.Millisecond * 100) { if tpt.cs.CurrentBlock().ID() == tpt2.cs.CurrentBlock().ID() { success = true break } } if !success { t.Fatal("testers did not have the same block height after one minute") } // disconnect tpt, create an unconfirmed transaction on tpt, mine maxTxnAge // blocks on tpt2 and reconnect. The unconfirmed transactions should be // removed from tpt's pool. err = tpt.gateway.Disconnect(tpt2.gateway.Address()) if err != nil { t.Fatal(err) } tpt2.gateway.Disconnect(tpt.gateway.Address()) txns, err := tpt.wallet.SendSiacoins(types.SiacoinPrecision.Mul64(1000), types.UnlockHash{}) if err != nil { t.Fatal(err) } for i := types.BlockHeight(0); i < maxTxnAge+1; i++ { _, err = tpt2.miner.AddBlock() if err != nil { t.Fatal(err) } } // reconnect the testers err = tpt.gateway.Connect(tpt2.gateway.Address()) if err != nil { t.Fatal(err) } success = false for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(time.Millisecond * 100) { if tpt.cs.CurrentBlock().ID() == tpt2.cs.CurrentBlock().ID() { success = true break } } if !success { t.Fatal("testers did not have the same block height after one minute") } for _, txn := range txns { _, _, exists := tpt.tpool.Transaction(txn.ID()) if exists { t.Fatal("transaction pool had a transaction that should have been pruned") } } if len(tpt.tpool.TransactionList()) != 0 { t.Fatal("should have no unconfirmed transactions") } if len(tpt.tpool.knownObjects) != 0 { t.Fatal("should have no known objects") } if len(tpt.tpool.transactionSetDiffs) != 0 { t.Fatal("should have no transaction set diffs") } if tpt.tpool.transactionListSize != 0 { t.Fatal("transactionListSize should be zero") } } // TestUpdateBlockHeight verifies that the transactionpool updates its internal // block height correctly. func TestUpdateBlockHeight(t *testing.T) { if testing.Short() { t.SkipNow() } tpt, err := blankTpoolTester(t.Name()) if err != nil { t.Fatal(err) } defer tpt.Close() targetHeight := 20 for i := 0; i < targetHeight; i++ { _, err = tpt.miner.AddBlock() if err != nil { t.Fatal(err) } } if tpt.tpool.blockHeight != types.BlockHeight(targetHeight) { t.Fatalf("transaction pool had the wrong block height, got %v wanted %v\n", tpt.tpool.blockHeight, targetHeight) } } Sia-1.3.0/modules/transactionpool_test.go000066400000000000000000000063221313565667000205230ustar00rootroot00000000000000package modules import ( "testing" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/types" ) // TestConsensusConflict checks that the consensus conflict type is correctly // assembling consensus conflict errors. func TestConsensusConflict(t *testing.T) { t.Parallel() ncc := NewConsensusConflict("problem") if ncc.Error() != "consensus conflict: problem" { t.Error("wrong error message being reported in a consensus conflict") } err := func() error { return ncc }() if err.Error() != "consensus conflict: problem" { t.Error("wrong error message being reported in a consensus conflict") } if _, ok := err.(ConsensusConflict); !ok { t.Error("error is not maintaining consensus conflict type") } } // TestCalculateFee checks that the CalculateFee function is correctly tallying // the number of fees in a transaction set. func TestCalculateFee(t *testing.T) { t.Parallel() // Try calculating the fees on a nil transaction set. if CalculateFee(nil).Cmp(types.ZeroCurrency) != 0 { t.Error("CalculateFee is incorrectly handling nil input") } // Try a single transaction with no fees. txnSet := []types.Transaction{{}} if CalculateFee(txnSet).Cmp(types.ZeroCurrency) != 0 { t.Error("CalculateFee is not correctly calculating the fees on an empty transaction set") } // Try a non-empty transaction. txnSet = []types.Transaction{{ SiacoinOutputs: []types.SiacoinOutput{{ Value: types.NewCurrency64(253e9), }}, }} if CalculateFee(txnSet).Cmp(types.ZeroCurrency) != 0 { t.Error("CalculateFee is not correctly calculating the fees on a non-empty transaction set") } // Try a transaction set with a single miner fee. baseFee := types.NewCurrency64(12e3) txnSet = []types.Transaction{{ MinerFees: []types.Currency{ baseFee, }, }} setLen := uint64(len(encoding.Marshal(txnSet))) expectedFee := baseFee.Div64(setLen) if CalculateFee(txnSet).Cmp(expectedFee) != 0 { t.Error("CalculateFee doesn't seem to be calculating the correct transaction fee") } // Try the transaction set when there is more data. txnSet[0].FileContracts = append(txnSet[0].FileContracts, types.FileContract{ FileSize: 10e3, }) newSetLen := uint64(len(encoding.Marshal(txnSet))) if newSetLen <= setLen { t.Fatal("transaction set did not grow after adding a file contract") } newExpectedFee := baseFee.Div64(newSetLen) if newExpectedFee.Cmp(expectedFee) >= 0 { t.Error("the new expected fee should go down as the txn size increases") } if CalculateFee(txnSet).Cmp(newExpectedFee) != 0 { t.Error("the new expected fee does not match the new actual fee") } // Try a transaction set with multiple transactions and multiple fees per // transaction. fee1 := types.NewCurrency64(1e6) fee2 := types.NewCurrency64(2e6) fee3 := types.NewCurrency64(3e6) fee4 := types.NewCurrency64(4e6) txnSet = []types.Transaction{ { MinerFees: []types.Currency{ fee1, fee2, }, }, { MinerFees: []types.Currency{ fee3, fee4, }, }, } currencyLen := types.NewCurrency64(uint64(len(encoding.Marshal(txnSet)))) multiExpectedFee := fee1.Add(fee2).Add(fee3).Add(fee4).Div(currencyLen) if CalculateFee(txnSet).Cmp(multiExpectedFee) != 0 { t.Error("got the wrong fee for a multi transaction set") } } Sia-1.3.0/modules/wallet.go000066400000000000000000000451421313565667000155400ustar00rootroot00000000000000package modules import ( "bytes" "errors" "github.com/NebulousLabs/entropy-mnemonics" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/types" ) const ( // WalletDir is the directory that contains the wallet persistence. WalletDir = "wallet" // SeedChecksumSize is the number of bytes that are used to checksum // addresses to prevent accidental spending. SeedChecksumSize = 6 // PublicKeysPerSeed define the number of public keys that get pregenerated // for a seed at startup when searching for balances in the blockchain. PublicKeysPerSeed = 2500 ) var ( // ErrBadEncryptionKey is returned if the incorrect encryption key to a // file is provided. ErrBadEncryptionKey = errors.New("provided encryption key is incorrect") // ErrLowBalance is returned if the wallet does not have enough funds to // complete the desired action. ErrLowBalance = errors.New("insufficient balance") // ErrIncompleteTransactions is returned if the wallet has incomplete // transactions being built that are using all of the current outputs, and // therefore the wallet is unable to spend money despite it not technically // being 'unconfirmed' yet. ErrIncompleteTransactions = errors.New("wallet has coins spent in incomplete transactions - not enough remaining coins") // ErrLockedWallet is returned when an action cannot be performed due to // the wallet being locked. ErrLockedWallet = errors.New("wallet must be unlocked before it can be used") ) type ( // Seed is cryptographic entropy that is used to derive spendable wallet // addresses. Seed [crypto.EntropySize]byte // WalletTransactionID is a unique identifier for a wallet transaction. WalletTransactionID crypto.Hash // A ProcessedInput represents funding to a transaction. The input is // coming from an address and going to the outputs. The fund types are // 'SiacoinInput', 'SiafundInput'. ProcessedInput struct { ParentID types.OutputID `json:"parentid"` FundType types.Specifier `json:"fundtype"` WalletAddress bool `json:"walletaddress"` RelatedAddress types.UnlockHash `json:"relatedaddress"` Value types.Currency `json:"value"` } // A ProcessedOutput is a siacoin output that appears in a transaction. // Some outputs mature immediately, some are delayed, and some may never // mature at all (in the event of storage proofs). // // Fund type can either be 'SiacoinOutput', 'SiafundOutput', 'ClaimOutput', // 'MinerPayout', or 'MinerFee'. All outputs except the miner fee create // outputs accessible to an address. Miner fees are not spendable, and // instead contribute to the block subsidy. // // MaturityHeight indicates at what block height the output becomes // available. SiacoinInputs and SiafundInputs become available immediately. // ClaimInputs and MinerPayouts become available after 144 confirmations. ProcessedOutput struct { ID types.OutputID `json:"id"` FundType types.Specifier `json:"fundtype"` MaturityHeight types.BlockHeight `json:"maturityheight"` WalletAddress bool `json:"walletaddress"` RelatedAddress types.UnlockHash `json:"relatedaddress"` Value types.Currency `json:"value"` } // A ProcessedTransaction is a transaction that has been processed into // explicit inputs and outputs and tagged with some header data such as // confirmation height + timestamp. // // Because of the block subsidy, a block is considered as a transaction. // Since there is technically no transaction id for the block subsidy, the // block id is used instead. ProcessedTransaction struct { Transaction types.Transaction `json:"transaction"` TransactionID types.TransactionID `json:"transactionid"` ConfirmationHeight types.BlockHeight `json:"confirmationheight"` ConfirmationTimestamp types.Timestamp `json:"confirmationtimestamp"` Inputs []ProcessedInput `json:"inputs"` Outputs []ProcessedOutput `json:"outputs"` } // TransactionBuilder is used to construct custom transactions. A transaction // builder is initialized via 'RegisterTransaction' and then can be modified by // adding funds or other fields. The transaction is completed by calling // 'Sign', which will sign all inputs added via the 'FundSiacoins' or // 'FundSiafunds' call. All modifications are additive. // // Parents of the transaction are kept in the transaction builder. A parent is // any unconfirmed transaction that is required for the child to be valid. // // Transaction builders are not thread safe. TransactionBuilder interface { // FundSiacoins will add a siacoin input of exactly 'amount' to the // transaction. A parent transaction may be needed to achieve an input // with the correct value. The siacoin input will not be signed until // 'Sign' is called on the transaction builder. The expectation is that // the transaction will be completed and broadcast within a few hours. // Longer risks double-spends, as the wallet will assume that the // transaction failed. FundSiacoins(amount types.Currency) error // FundSiafunds will add a siafund input of exactly 'amount' to the // transaction. A parent transaction may be needed to achieve an input // with the correct value. The siafund input will not be signed until // 'Sign' is called on the transaction builder. Any siacoins that are // released by spending the siafund outputs will be sent to another // address owned by the wallet. The expectation is that the transaction // will be completed and broadcast within a few hours. Longer risks // double-spends, because the wallet will assume the transaction // failed. FundSiafunds(amount types.Currency) error // AddParents adds a set of parents to the transaction. AddParents([]types.Transaction) // AddMinerFee adds a miner fee to the transaction, returning the index // of the miner fee within the transaction. AddMinerFee(fee types.Currency) uint64 // AddSiacoinInput adds a siacoin input to the transaction, returning // the index of the siacoin input within the transaction. When 'Sign' // gets called, this input will be left unsigned. AddSiacoinInput(types.SiacoinInput) uint64 // AddSiacoinOutput adds a siacoin output to the transaction, returning // the index of the siacoin output within the transaction. AddSiacoinOutput(types.SiacoinOutput) uint64 // AddFileContract adds a file contract to the transaction, returning // the index of the file contract within the transaction. AddFileContract(types.FileContract) uint64 // AddFileContractRevision adds a file contract revision to the // transaction, returning the index of the file contract revision // within the transaction. When 'Sign' gets called, this revision will // be left unsigned. AddFileContractRevision(types.FileContractRevision) uint64 // AddStorageProof adds a storage proof to the transaction, returning // the index of the storage proof within the transaction. AddStorageProof(types.StorageProof) uint64 // AddSiafundInput adds a siafund input to the transaction, returning // the index of the siafund input within the transaction. When 'Sign' // is called, this input will be left unsigned. AddSiafundInput(types.SiafundInput) uint64 // AddSiafundOutput adds a siafund output to the transaction, returning // the index of the siafund output within the transaction. AddSiafundOutput(types.SiafundOutput) uint64 // AddArbitraryData adds arbitrary data to the transaction, returning // the index of the data within the transaction. AddArbitraryData(arb []byte) uint64 // AddTransactionSignature adds a transaction signature to the // transaction, returning the index of the signature within the // transaction. The signature should already be valid, and shouldn't // sign any of the inputs that were added by calling 'FundSiacoins' or // 'FundSiafunds'. AddTransactionSignature(types.TransactionSignature) uint64 // Sign will sign any inputs added by 'FundSiacoins' or 'FundSiafunds' // and return a transaction set that contains all parents prepended to // the transaction. If more fields need to be added, a new transaction // builder will need to be created. // // If the whole transaction flag is set to true, then the whole // transaction flag will be set in the covered fields object. If the // whole transaction flag is set to false, then the covered fields // object will cover all fields that have already been added to the // transaction, but will also leave room for more fields to be added. // // An error will be returned if there are multiple calls to 'Sign', // sometimes even if the first call to Sign has failed. Sign should // only ever be called once, and if the first signing fails, the // transaction should be dropped. Sign(wholeTransaction bool) ([]types.Transaction, error) // View returns the incomplete transaction along with all of its // parents. View() (txn types.Transaction, parents []types.Transaction) // ViewAdded returns all of the siacoin inputs, siafund inputs, and // parent transactions that have been automatically added by the // builder. Items are returned by index. ViewAdded() (newParents, siacoinInputs, siafundInputs, transactionSignatures []int) // Drop indicates that a transaction is no longer useful and will not be // broadcast, and that all of the outputs can be reclaimed. 'Drop' // should only be used before signatures are added. Drop() } // EncryptionManager can encrypt, lock, unlock, and indicate the current // status of the EncryptionManager. EncryptionManager interface { // Encrypt will encrypt the wallet using the input key. Upon // encryption, a primary seed will be created for the wallet (no seed // exists prior to this point). If the key is blank, then the hash of // the seed that is generated will be used as the key. // // Encrypt can only be called once throughout the life of the wallet // and will return an error on subsequent calls (even after restarting // the wallet). To reset the wallet, the wallet files must be moved to // a different directory or deleted. Encrypt(masterKey crypto.TwofishKey) (Seed, error) // Reset will reset the wallet, clearing the database and returning it to // the unencrypted state. Reset can only be called on a wallet that has // already been encrypted. Reset() error // Encrypted returns whether or not the wallet has been encrypted yet. // After being encrypted for the first time, the wallet can only be // unlocked using the encryption password. Encrypted() bool // InitFromSeed functions like Encrypt, but using a specified seed. // Unlike Encrypt, the blockchain will be scanned to determine the // seed's progress. For this reason, InitFromSeed should not be called // until the blockchain is fully synced. InitFromSeed(masterKey crypto.TwofishKey, seed Seed) error // Lock deletes all keys in memory and prevents the wallet from being // used to spend coins or extract keys until 'Unlock' is called. Lock() error // Unlock must be called before the wallet is usable. All wallets and // wallet seeds are encrypted by default, and the wallet will not know // which addresses to watch for on the blockchain until unlock has been // called. // // All items in the wallet are encrypted using different keys which are // derived from the master key. Unlock(masterKey crypto.TwofishKey) error // ChangeKey changes the wallet's materKey from masterKey to newKey, // re-encrypting the wallet with the provided key. ChangeKey(masterKey crypto.TwofishKey, newKey crypto.TwofishKey) error // Unlocked returns true if the wallet is currently unlocked, false // otherwise. Unlocked() bool } // KeyManager manages wallet keys, including the use of seeds, creating and // loading backups, and providing a layer of compatibility for older wallet // files. KeyManager interface { // AllAddresses returns all addresses that the wallet is able to spend // from, including unseeded addresses. Addresses are returned sorted in // byte-order. AllAddresses() []types.UnlockHash // AllSeeds returns all of the seeds that are being tracked by the // wallet, including the primary seed. Only the primary seed is used to // generate new addresses, but the wallet can spend funds sent to // public keys generated by any of the seeds returned. AllSeeds() ([]Seed, error) // CreateBackup will create a backup of the wallet at the provided // filepath. The backup will have all seeds and keys. CreateBackup(string) error // LoadBackup will load a backup of the wallet from the provided // address. The backup wallet will be added as an auxiliary seed, not // as a primary seed. // LoadBackup(masterKey, backupMasterKey crypto.TwofishKey, string) error // Load033xWallet will load a version 0.3.3.x wallet from disk and add all of // the keys in the wallet as unseeded keys. Load033xWallet(crypto.TwofishKey, string) error // LoadSeed will recreate a wallet file using the recovery phrase. // LoadSeed only needs to be called if the original seed file or // encryption password was lost. The master key is used to encrypt the // recovery seed before saving it to disk. LoadSeed(crypto.TwofishKey, Seed) error // LoadSiagKeys will take a set of filepaths that point to a siag key // and will have the siag keys loaded into the wallet so that they will // become spendable. LoadSiagKeys(crypto.TwofishKey, []string) error // NextAddress returns a new coin addresses generated from the // primary seed. NextAddress() (types.UnlockConditions, error) // PrimarySeed returns the unencrypted primary seed of the wallet, // along with a uint64 indicating how many addresses may be safely // generated from the seed. PrimarySeed() (Seed, uint64, error) // SweepSeed scans the blockchain for outputs generated from seed and // creates a transaction that transfers them to the wallet. Note that // this incurs a transaction fee. It returns the total value of the // outputs, minus the fee. If only siafunds were found, the fee is // deducted from the wallet. SweepSeed(seed Seed) (coins, funds types.Currency, err error) } // Wallet stores and manages siacoins and siafunds. The wallet file is // encrypted using a user-specified password. Common addresses are all // derived from a single address seed. Wallet interface { EncryptionManager KeyManager // Close permits clean shutdown during testing and serving. Close() error // ConfirmedBalance returns the confirmed balance of the wallet, minus // any outgoing transactions. ConfirmedBalance will include unconfirmed // refund transactions. ConfirmedBalance() (siacoinBalance types.Currency, siafundBalance types.Currency, siacoinClaimBalance types.Currency) // UnconfirmedBalance returns the unconfirmed balance of the wallet. // Outgoing funds and incoming funds are reported separately. Refund // outputs are included, meaning that sending a single coin to // someone could result in 'outgoing: 12, incoming: 11'. Siafunds are // not considered in the unconfirmed balance. UnconfirmedBalance() (outgoingSiacoins types.Currency, incomingSiacoins types.Currency) // AddressTransactions returns all of the transactions that are related // to a given address. AddressTransactions(types.UnlockHash) []ProcessedTransaction // AddressUnconfirmedHistory returns all of the unconfirmed // transactions related to a given address. AddressUnconfirmedTransactions(types.UnlockHash) []ProcessedTransaction // Transaction returns the transaction with the given id. The bool // indicates whether the transaction is in the wallet database. The // wallet only stores transactions that are related to the wallet. Transaction(types.TransactionID) (ProcessedTransaction, bool) // Transactions returns all of the transactions that were confirmed at // heights [startHeight, endHeight]. Unconfirmed transactions are not // included. Transactions(startHeight types.BlockHeight, endHeight types.BlockHeight) ([]ProcessedTransaction, error) // UnconfirmedTransactions returns all unconfirmed transactions // relative to the wallet. UnconfirmedTransactions() []ProcessedTransaction // RegisterTransaction takes a transaction and its parents and returns // a TransactionBuilder which can be used to expand the transaction. RegisterTransaction(t types.Transaction, parents []types.Transaction) TransactionBuilder // Rescanning reports whether the wallet is currently rescanning the // blockchain. Rescanning() bool // StartTransaction is a convenience method that calls // RegisterTransaction(types.Transaction{}, nil) StartTransaction() TransactionBuilder // SendSiacoins is a tool for sending siacoins from the wallet to an // address. Sending money usually results in multiple transactions. The // transactions are automatically given to the transaction pool, and // are also returned to the caller. SendSiacoins(amount types.Currency, dest types.UnlockHash) ([]types.Transaction, error) // SendSiacoinsMulti sends coins to multiple addresses. SendSiacoinsMulti(outputs []types.SiacoinOutput) ([]types.Transaction, error) // SendSiafunds is a tool for sending siafunds from the wallet to an // address. Sending money usually results in multiple transactions. The // transactions are automatically given to the transaction pool, and // are also returned to the caller. SendSiafunds(amount types.Currency, dest types.UnlockHash) ([]types.Transaction, error) } ) // CalculateWalletTransactionID is a helper function for determining the id of // a wallet transaction. func CalculateWalletTransactionID(tid types.TransactionID, oid types.OutputID) WalletTransactionID { return WalletTransactionID(crypto.HashAll(tid, oid)) } // SeedToString converts a wallet seed to a human friendly string. func SeedToString(seed Seed, did mnemonics.DictionaryID) (string, error) { fullChecksum := crypto.HashObject(seed) checksumSeed := append(seed[:], fullChecksum[:SeedChecksumSize]...) phrase, err := mnemonics.ToPhrase(checksumSeed, did) if err != nil { return "", err } return phrase.String(), nil } // StringToSeed converts a string to a wallet seed. func StringToSeed(str string, did mnemonics.DictionaryID) (Seed, error) { // Decode the string into the checksummed byte slice. checksumSeedBytes, err := mnemonics.FromString(str, did) if err != nil { return Seed{}, err } // Copy the seed from the checksummed slice. var seed Seed copy(seed[:], checksumSeedBytes) fullChecksum := crypto.HashObject(seed) if len(checksumSeedBytes) != crypto.EntropySize+SeedChecksumSize || !bytes.Equal(fullChecksum[:SeedChecksumSize], checksumSeedBytes[crypto.EntropySize:]) { return Seed{}, errors.New("seed failed checksum verification") } return seed, nil } Sia-1.3.0/modules/wallet/000077500000000000000000000000001313565667000152035ustar00rootroot00000000000000Sia-1.3.0/modules/wallet/consts.go000066400000000000000000000042531313565667000170470ustar00rootroot00000000000000package wallet import ( "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/types" ) const ( // defragThreshold is the number of outputs a wallet is allowed before it is // defragmented. defragThreshold = 50 // defragBatchSize defines how many outputs are combined during one defrag. defragBatchSize = 35 // defragStartIndex is the number of outputs to skip over when performing a // defrag. defragStartIndex = 10 ) var ( // lookaheadRescanThreshold is the number of keys in the lookahead that will be // generated before a complete wallet rescan is initialized. lookaheadRescanThreshold = build.Select(build.Var{ Dev: uint64(100), Standard: uint64(1000), Testing: uint64(10), }).(uint64) // lookaheadBuffer together with lookaheadRescanThreshold defines the constant part // of the maxLookahead lookaheadBuffer = build.Select(build.Var{ Dev: uint64(400), Standard: uint64(4000), Testing: uint64(40), }).(uint64) ) // dustValue is the quantity below which a Currency is considered to be Dust. // // TODO: These need to be functions of the wallet that interact with the // transaction pool. func dustValue() types.Currency { return types.SiacoinPrecision } // defragFee is the miner fee paid to miners when performing a defrag // transaction. // // TODO: These need to be functions of the wallet that interact with the // transaction pool. func defragFee() types.Currency { // 35 outputs at an estimated 250 bytes needed per output means about a 10kb // total transaction, much larger than your average transaction. So you need // a lot of fees. return types.SiacoinPrecision.Mul64(10) } func init() { // Sanity check - the defrag threshold needs to be higher than the batch // size plus the start index. if build.DEBUG && defragThreshold <= defragBatchSize+defragStartIndex { panic("constants are incorrect, defragThreshold needs to be larger than the sum of defragBatchSize and defragStartIndex") } } // maxLookahead returns the size of the lookahead for a given seed progress // which usually is the current primarySeedProgress func maxLookahead(start uint64) uint64 { return start + lookaheadRescanThreshold + lookaheadBuffer + start/10 } Sia-1.3.0/modules/wallet/database.go000066400000000000000000000321331313565667000173000ustar00rootroot00000000000000package wallet import ( "encoding/binary" "errors" "reflect" "time" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/fastrand" "github.com/NebulousLabs/bolt" ) var ( // bucketProcessedTransactions stores ProcessedTransactions in // chronological order. Only transactions relevant to the wallet are // stored. The key of this bucket is an autoincrementing integer. bucketProcessedTransactions = []byte("bucketProcessedTransactions") // bucketSiacoinOutputs maps a SiacoinOutputID to its SiacoinOutput. Only // outputs that the wallet controls are stored. The wallet uses these // outputs to fund transactions. bucketSiacoinOutputs = []byte("bucketSiacoinOutputs") // bucketSiacoinOutputs maps a SiafundOutputID to its SiafundOutput. Only // outputs that the wallet controls are stored. The wallet uses these // outputs to fund transactions. bucketSiafundOutputs = []byte("bucketSiafundOutputs") // bucketSpentOutputs maps an OutputID to the height at which it was // spent. Only outputs spent by the wallet are stored. The wallet tracks // these outputs so that it can reuse them if they are not confirmed on // the blockchain. bucketSpentOutputs = []byte("bucketSpentOutputs") // bucketWallet contains various fields needed by the wallet, such as its // UID, EncryptionVerification, and PrimarySeedFile. bucketWallet = []byte("bucketWallet") dbBuckets = [][]byte{ bucketProcessedTransactions, bucketSiacoinOutputs, bucketSiafundOutputs, bucketSpentOutputs, bucketWallet, } // these keys are used in bucketWallet keyUID = []byte("keyUID") keyEncryptionVerification = []byte("keyEncryptionVerification") keyPrimarySeedFile = []byte("keyPrimarySeedFile") keyPrimarySeedProgress = []byte("keyPrimarySeedProgress") keyConsensusChange = []byte("keyConsensusChange") keyConsensusHeight = []byte("keyConsensusHeight") keySpendableKeyFiles = []byte("keySpendableKeyFiles") keyAuxiliarySeedFiles = []byte("keyAuxiliarySeedFiles") keySiafundPool = []byte("keySiafundPool") errNoKey = errors.New("key does not exist") ) // threadedDBUpdate commits the active database transaction and starts a new // transaction. func (w *Wallet) threadedDBUpdate() { if err := w.tg.Add(); err != nil { return } defer w.tg.Done() for { select { case <-time.After(2 * time.Minute): case <-w.tg.StopChan(): return } w.mu.Lock() w.syncDB() w.mu.Unlock() } } // syncDB commits the current global transaction and immediately begins a // new one. It must be called with a write-lock. func (w *Wallet) syncDB() { // commit the current tx err := w.dbTx.Commit() if err != nil { w.log.Severe("ERROR: failed to apply database update:", err) w.dbTx.Rollback() } // begin a new tx w.dbTx, err = w.db.Begin(true) if err != nil { w.log.Severe("ERROR: failed to start database update:", err) } } // dbReset wipes and reinitializes a wallet database. func dbReset(tx *bolt.Tx) error { for _, bucket := range dbBuckets { err := tx.DeleteBucket(bucket) if err != nil { return err } _, err = tx.CreateBucket(bucket) if err != nil { return err } } // reinitialize the database with default values wb := tx.Bucket(bucketWallet) wb.Put(keyUID, fastrand.Bytes(len(uniqueID{}))) wb.Put(keyConsensusHeight, encoding.Marshal(uint64(0))) wb.Put(keyAuxiliarySeedFiles, encoding.Marshal([]seedFile{})) wb.Put(keySpendableKeyFiles, encoding.Marshal([]spendableKeyFile{})) dbPutConsensusHeight(tx, 0) dbPutConsensusChangeID(tx, modules.ConsensusChangeBeginning) dbPutSiafundPool(tx, types.ZeroCurrency) return nil } // dbPut is a helper function for storing a marshalled key/value pair. func dbPut(b *bolt.Bucket, key, val interface{}) error { return b.Put(encoding.Marshal(key), encoding.Marshal(val)) } // dbGet is a helper function for retrieving a marshalled key/value pair. val // must be a pointer. func dbGet(b *bolt.Bucket, key, val interface{}) error { valBytes := b.Get(encoding.Marshal(key)) if valBytes == nil { return errNoKey } return encoding.Unmarshal(valBytes, val) } // dbDelete is a helper function for deleting a marshalled key/value pair. func dbDelete(b *bolt.Bucket, key interface{}) error { return b.Delete(encoding.Marshal(key)) } // dbForEach is a helper function for iterating over a bucket and calling fn // on each entry. fn must be a function with two parameters. The key/value // bytes of each bucket entry will be unmarshalled into the types of fn's // parameters. func dbForEach(b *bolt.Bucket, fn interface{}) error { // check function type fnVal, fnTyp := reflect.ValueOf(fn), reflect.TypeOf(fn) if fnTyp.Kind() != reflect.Func || fnTyp.NumIn() != 2 { panic("bad fn type: needed func(key, val), got " + fnTyp.String()) } return b.ForEach(func(keyBytes, valBytes []byte) error { key, val := reflect.New(fnTyp.In(0)), reflect.New(fnTyp.In(1)) if err := encoding.Unmarshal(keyBytes, key.Interface()); err != nil { return err } else if err := encoding.Unmarshal(valBytes, val.Interface()); err != nil { return err } fnVal.Call([]reflect.Value{key.Elem(), val.Elem()}) return nil }) } // Type-safe wrappers around the db helpers func dbPutSiacoinOutput(tx *bolt.Tx, id types.SiacoinOutputID, output types.SiacoinOutput) error { return dbPut(tx.Bucket(bucketSiacoinOutputs), id, output) } func dbGetSiacoinOutput(tx *bolt.Tx, id types.SiacoinOutputID) (output types.SiacoinOutput, err error) { err = dbGet(tx.Bucket(bucketSiacoinOutputs), id, &output) return } func dbDeleteSiacoinOutput(tx *bolt.Tx, id types.SiacoinOutputID) error { return dbDelete(tx.Bucket(bucketSiacoinOutputs), id) } func dbForEachSiacoinOutput(tx *bolt.Tx, fn func(types.SiacoinOutputID, types.SiacoinOutput)) error { return dbForEach(tx.Bucket(bucketSiacoinOutputs), fn) } func dbPutSiafundOutput(tx *bolt.Tx, id types.SiafundOutputID, output types.SiafundOutput) error { return dbPut(tx.Bucket(bucketSiafundOutputs), id, output) } func dbGetSiafundOutput(tx *bolt.Tx, id types.SiafundOutputID) (output types.SiafundOutput, err error) { err = dbGet(tx.Bucket(bucketSiafundOutputs), id, &output) return } func dbDeleteSiafundOutput(tx *bolt.Tx, id types.SiafundOutputID) error { return dbDelete(tx.Bucket(bucketSiafundOutputs), id) } func dbForEachSiafundOutput(tx *bolt.Tx, fn func(types.SiafundOutputID, types.SiafundOutput)) error { return dbForEach(tx.Bucket(bucketSiafundOutputs), fn) } func dbPutSpentOutput(tx *bolt.Tx, id types.OutputID, height types.BlockHeight) error { return dbPut(tx.Bucket(bucketSpentOutputs), id, height) } func dbGetSpentOutput(tx *bolt.Tx, id types.OutputID) (height types.BlockHeight, err error) { err = dbGet(tx.Bucket(bucketSpentOutputs), id, &height) return } func dbDeleteSpentOutput(tx *bolt.Tx, id types.OutputID) error { return dbDelete(tx.Bucket(bucketSpentOutputs), id) } // bucketProcessedTransactions works a little differently: the key is // meaningless, only used to order the transactions chronologically. func dbAppendProcessedTransaction(tx *bolt.Tx, pt modules.ProcessedTransaction) error { b := tx.Bucket(bucketProcessedTransactions) key, err := b.NextSequence() if err != nil { return err } // big-endian is used so that the keys are properly sorted keyBytes := make([]byte, 8) binary.BigEndian.PutUint64(keyBytes, key) return b.Put(keyBytes, encoding.Marshal(pt)) } func dbGetLastProcessedTransaction(tx *bolt.Tx) (pt modules.ProcessedTransaction, err error) { _, val := tx.Bucket(bucketProcessedTransactions).Cursor().Last() err = encoding.Unmarshal(val, &pt) if err != nil { // COMPATv1.2.1: try decoding into old transaction type var oldpt v121ProcessedTransaction err = encoding.Unmarshal(val, &oldpt) pt = convertProcessedTransaction(oldpt) } return } func dbDeleteLastProcessedTransaction(tx *bolt.Tx) error { // delete the last entry in the bucket. Note that we don't need to // decrement the sequence integer; we only care that the next integer is // larger than the previous one. b := tx.Bucket(bucketProcessedTransactions) key, _ := b.Cursor().Last() return b.Delete(key) } func dbForEachProcessedTransaction(tx *bolt.Tx, fn func(modules.ProcessedTransaction)) error { return dbForEach(tx.Bucket(bucketProcessedTransactions), func(_ uint64, pt modules.ProcessedTransaction) { fn(pt) }) } // A processedTransactionsIter iterates through the ProcessedTransactions bucket. type processedTransactionsIter struct { c *bolt.Cursor pt modules.ProcessedTransaction } // next decodes the next ProcessedTransaction, returning false if the end of // the bucket has been reached. func (it *processedTransactionsIter) next() bool { var ptBytes []byte if it.pt.TransactionID == (types.TransactionID{}) { // this is the first time next has been called, so cursor is not // initialized yet _, ptBytes = it.c.First() } else { _, ptBytes = it.c.Next() } err := encoding.Unmarshal(ptBytes, &it.pt) if err != nil { // COMPATv1.2.1: try decoding into old transaction type var oldpt v121ProcessedTransaction err = encoding.Unmarshal(ptBytes, &oldpt) it.pt = convertProcessedTransaction(oldpt) } return err == nil } // value returns the most recently decoded ProcessedTransaction. func (it *processedTransactionsIter) value() modules.ProcessedTransaction { return it.pt } // dbProcessedTransactionsIterator creates a new processedTransactionsIter. func dbProcessedTransactionsIterator(tx *bolt.Tx) *processedTransactionsIter { return &processedTransactionsIter{ c: tx.Bucket(bucketProcessedTransactions).Cursor(), } } // dbGetWalletUID returns the UID assigned to the wallet's primary seed. func dbGetWalletUID(tx *bolt.Tx) (uid uniqueID) { copy(uid[:], tx.Bucket(bucketWallet).Get(keyUID)) return } // dbGetPrimarySeedProgress returns the number of keys generated from the // primary seed. func dbGetPrimarySeedProgress(tx *bolt.Tx) (progress uint64, err error) { err = encoding.Unmarshal(tx.Bucket(bucketWallet).Get(keyPrimarySeedProgress), &progress) return } // dbPutPrimarySeedProgress sets the primary seed progress counter. func dbPutPrimarySeedProgress(tx *bolt.Tx, progress uint64) error { return tx.Bucket(bucketWallet).Put(keyPrimarySeedProgress, encoding.Marshal(progress)) } // dbGetConsensusChangeID returns the ID of the last ConsensusChange processed by the wallet. func dbGetConsensusChangeID(tx *bolt.Tx) (cc modules.ConsensusChangeID) { copy(cc[:], tx.Bucket(bucketWallet).Get(keyConsensusChange)) return } // dbPutConsensusChangeID stores the ID of the last ConsensusChange processed by the wallet. func dbPutConsensusChangeID(tx *bolt.Tx, cc modules.ConsensusChangeID) error { return tx.Bucket(bucketWallet).Put(keyConsensusChange, cc[:]) } // dbGetConsensusHeight returns the height that the wallet has scanned to. func dbGetConsensusHeight(tx *bolt.Tx) (height types.BlockHeight, err error) { err = encoding.Unmarshal(tx.Bucket(bucketWallet).Get(keyConsensusHeight), &height) return } // dbPutConsensusHeight stores the height that the wallet has scanned to. func dbPutConsensusHeight(tx *bolt.Tx, height types.BlockHeight) error { return tx.Bucket(bucketWallet).Put(keyConsensusHeight, encoding.Marshal(height)) } // dbGetSiafundPool returns the value of the siafund pool. func dbGetSiafundPool(tx *bolt.Tx) (pool types.Currency, err error) { err = encoding.Unmarshal(tx.Bucket(bucketWallet).Get(keySiafundPool), &pool) return } // dbPutSiafundPool stores the value of the siafund pool. func dbPutSiafundPool(tx *bolt.Tx, pool types.Currency) error { return tx.Bucket(bucketWallet).Put(keySiafundPool, encoding.Marshal(pool)) } // COMPATv121: these types were stored in the db in v1.2.2 and earlier. type ( v121ProcessedInput struct { FundType types.Specifier WalletAddress bool RelatedAddress types.UnlockHash Value types.Currency } v121ProcessedOutput struct { FundType types.Specifier MaturityHeight types.BlockHeight WalletAddress bool RelatedAddress types.UnlockHash Value types.Currency } v121ProcessedTransaction struct { Transaction types.Transaction TransactionID types.TransactionID ConfirmationHeight types.BlockHeight ConfirmationTimestamp types.Timestamp Inputs []v121ProcessedInput Outputs []v121ProcessedOutput } ) func convertProcessedTransaction(oldpt v121ProcessedTransaction) (pt modules.ProcessedTransaction) { pt.Transaction = oldpt.Transaction pt.TransactionID = oldpt.TransactionID pt.ConfirmationHeight = oldpt.ConfirmationHeight pt.ConfirmationTimestamp = oldpt.ConfirmationTimestamp pt.Inputs = make([]modules.ProcessedInput, len(oldpt.Inputs)) for i, in := range oldpt.Inputs { pt.Inputs[i] = modules.ProcessedInput{ FundType: in.FundType, WalletAddress: in.WalletAddress, RelatedAddress: in.RelatedAddress, Value: in.Value, } } pt.Outputs = make([]modules.ProcessedOutput, len(oldpt.Outputs)) for i, out := range oldpt.Outputs { pt.Outputs[i] = modules.ProcessedOutput{ FundType: out.FundType, MaturityHeight: out.MaturityHeight, WalletAddress: out.WalletAddress, RelatedAddress: out.RelatedAddress, Value: out.Value, } } return } Sia-1.3.0/modules/wallet/database_test.go000066400000000000000000000013031313565667000203320ustar00rootroot00000000000000package wallet import ( "os" "path/filepath" "testing" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/bolt" ) // TestDBOpen tests the wallet.openDB method. func TestDBOpen(t *testing.T) { w := new(Wallet) err := w.openDB("") if err == nil { t.Fatal("expected error, got nil") } testdir := build.TempDir(modules.WalletDir, "TestDBOpen") os.MkdirAll(testdir, 0700) err = w.openDB(filepath.Join(testdir, dbFile)) if err != nil { t.Fatal(err) } w.db.View(func(tx *bolt.Tx) error { for _, b := range dbBuckets { if tx.Bucket(b) == nil { t.Error("bucket", string(b), "does not exist") } } return nil }) w.db.Close() } Sia-1.3.0/modules/wallet/defrag.go000066400000000000000000000111671313565667000167700ustar00rootroot00000000000000package wallet import ( "errors" "sort" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/types" ) var ( errDefragNotNeeded = errors.New("defragging not needed, wallet is already sufficiently defragged") ) // createDefragTransaction creates a transaction that spends multiple existing // wallet outputs into a single new address. func (w *Wallet) createDefragTransaction() ([]types.Transaction, error) { consensusHeight, err := dbGetConsensusHeight(w.dbTx) if err != nil { return nil, err } // Collect a value-sorted set of siacoin outputs. var so sortedOutputs err = dbForEachSiacoinOutput(w.dbTx, func(scoid types.SiacoinOutputID, sco types.SiacoinOutput) { if w.checkOutput(w.dbTx, consensusHeight, scoid, sco) == nil { so.ids = append(so.ids, scoid) so.outputs = append(so.outputs, sco) } }) if err != nil { return nil, err } sort.Sort(sort.Reverse(so)) // Only defrag if there are enough outputs to merit defragging. if len(so.ids) <= defragThreshold { return nil, errDefragNotNeeded } // Skip over the 'defragStartIndex' largest outputs, so that the user can // still reasonably use their wallet while the defrag is happening. var amount types.Currency var parentTxn types.Transaction var spentScoids []types.SiacoinOutputID for i := defragStartIndex; i < defragStartIndex+defragBatchSize; i++ { scoid := so.ids[i] sco := so.outputs[i] // Add a siacoin input for this output. outputUnlockConditions := w.keys[sco.UnlockHash].UnlockConditions sci := types.SiacoinInput{ ParentID: scoid, UnlockConditions: outputUnlockConditions, } parentTxn.SiacoinInputs = append(parentTxn.SiacoinInputs, sci) spentScoids = append(spentScoids, scoid) // Add the output to the total fund amount = amount.Add(sco.Value) } // Create and add the output that will be used to fund the defrag // transaction. parentUnlockConditions, err := w.nextPrimarySeedAddress(w.dbTx) if err != nil { return nil, err } exactOutput := types.SiacoinOutput{ Value: amount, UnlockHash: parentUnlockConditions.UnlockHash(), } parentTxn.SiacoinOutputs = append(parentTxn.SiacoinOutputs, exactOutput) // Sign all of the inputs to the parent transaction. for _, sci := range parentTxn.SiacoinInputs { addSignatures(&parentTxn, types.FullCoveredFields, sci.UnlockConditions, crypto.Hash(sci.ParentID), w.keys[sci.UnlockConditions.UnlockHash()]) } // Create the defrag transaction. fee := defragFee() refundAddr, err := w.nextPrimarySeedAddress(w.dbTx) if err != nil { return nil, err } txn := types.Transaction{ SiacoinInputs: []types.SiacoinInput{{ ParentID: parentTxn.SiacoinOutputID(0), UnlockConditions: parentUnlockConditions, }}, SiacoinOutputs: []types.SiacoinOutput{{ Value: amount.Sub(fee), UnlockHash: refundAddr.UnlockHash(), }}, MinerFees: []types.Currency{fee}, } addSignatures(&txn, types.FullCoveredFields, parentUnlockConditions, crypto.Hash(parentTxn.SiacoinOutputID(0)), w.keys[parentUnlockConditions.UnlockHash()]) // Mark all outputs that were spent as spent. for _, scoid := range spentScoids { if err = dbPutSpentOutput(w.dbTx, types.OutputID(scoid), consensusHeight); err != nil { return nil, err } } // Mark the parent output as spent. Must be done after the transaction is // finished because otherwise the txid and output id will change. if err = dbPutSpentOutput(w.dbTx, types.OutputID(parentTxn.SiacoinOutputID(0)), consensusHeight); err != nil { return nil, err } // Construct the final transaction set return []types.Transaction{parentTxn, txn}, nil } // threadedDefragWallet computes the sum of the 15 largest outputs in the wallet and // sends that sum to itself, effectively defragmenting the wallet. This defrag // operation is only performed if the wallet has greater than defragThreshold // outputs. func (w *Wallet) threadedDefragWallet() { err := w.tg.Add() if err != nil { return } defer w.tg.Done() // Check that a defrag makes sense. w.mu.Lock() if !w.unlocked { // Can't defrag if the wallet is locked. w.mu.Unlock() return } // Create the defrag transaction. txnSet, err := w.createDefragTransaction() w.mu.Unlock() if err == errDefragNotNeeded { // benign return } else if err != nil { w.log.Println("WARN: couldn't create defrag transaction:", err) return } // Submit the defrag to the transaction pool. err = w.tpool.AcceptTransactionSet(txnSet) if err != nil { w.log.Println("WARN: defrag transaction was rejected:", err) return } w.log.Println("Submitting a transaction set to defragment the wallet's outputs, IDs:") for _, txn := range txnSet { w.log.Println("\t", txn.ID()) } } Sia-1.3.0/modules/wallet/defrag_test.go000066400000000000000000000110661313565667000200250ustar00rootroot00000000000000package wallet import ( "testing" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/types" ) // TestDefragWallet mines many blocks and checks that the wallet's outputs are // consolidated once more than defragThreshold blocks are mined. func TestDefragWallet(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() wt, err := createWalletTester(t.Name()) if err != nil { t.Fatal(err) } defer wt.closeWt() // mine defragThreshold blocks, resulting in defragThreshold outputs for i := 0; i < defragThreshold; i++ { _, err := wt.miner.AddBlock() if err != nil { t.Fatal(err) } } // add another block to push the number of outputs over the threshold _, err = wt.miner.AddBlock() if err != nil { t.Fatal(err) } // allow some time for the defrag transaction to occur, then mine another block time.Sleep(time.Second * 5) _, err = wt.miner.AddBlock() if err != nil { t.Fatal(err) } // defrag should keep the outputs below the threshold wt.wallet.mu.Lock() // force a sync because bucket stats may not be reliable until commit wt.wallet.syncDB() siacoinOutputs := wt.wallet.dbTx.Bucket(bucketSiacoinOutputs).Stats().KeyN wt.wallet.mu.Unlock() if siacoinOutputs > defragThreshold { t.Fatalf("defrag should result in fewer than defragThreshold outputs, got %v wanted %v\n", siacoinOutputs, defragThreshold) } } // TestDefragWalletDust verifies that dust outputs do not trigger the defrag // operation. func TestDefragWalletDust(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() wt, err := createWalletTester(t.Name()) if err != nil { t.Fatal(err) } defer wt.closeWt() dustOutputValue := types.NewCurrency64(10000) noutputs := defragThreshold + 1 tbuilder := wt.wallet.StartTransaction() err = tbuilder.FundSiacoins(dustOutputValue.Mul64(uint64(noutputs))) if err != nil { t.Fatal(err) } wt.wallet.mu.Lock() var dest types.UnlockHash for k := range wt.wallet.keys { dest = k break } wt.wallet.mu.Unlock() for i := 0; i < noutputs; i++ { tbuilder.AddSiacoinOutput(types.SiacoinOutput{ Value: dustOutputValue, UnlockHash: dest, }) } txns, err := tbuilder.Sign(true) if err != nil { t.Fatal(err) } err = wt.tpool.AcceptTransactionSet(txns) if err != nil { t.Fatal(err) } _, err = wt.miner.AddBlock() if err != nil { t.Fatal(err) } time.Sleep(time.Second) wt.wallet.mu.Lock() // force a sync because bucket stats may not be reliable until commit wt.wallet.syncDB() siacoinOutputs := wt.wallet.dbTx.Bucket(bucketSiacoinOutputs).Stats().KeyN wt.wallet.mu.Unlock() if siacoinOutputs < defragThreshold { t.Fatal("defrag consolidated dust outputs") } } // TestDefragOutputExhaustion verifies that sending transactions still succeeds // even when the defragger is under heavy stress. func TestDefragOutputExhaustion(t *testing.T) { if testing.Short() || !build.VLONG { t.SkipNow() } t.Parallel() wt, err := createWalletTester(t.Name()) if err != nil { t.Fatal(err) } defer wt.closeWt() wt.wallet.mu.Lock() var dest types.UnlockHash for k := range wt.wallet.keys { dest = k break } wt.wallet.mu.Unlock() wt.miner.AddBlock() // concurrently make a bunch of transactions with lots of outputs to keep the // defragger running closechan := make(chan struct{}) donechan := make(chan struct{}) go func() { defer close(donechan) for { select { case <-closechan: return case <-time.After(time.Millisecond * 100): wt.miner.AddBlock() txnValue := types.SiacoinPrecision.Mul64(3000) fee := types.SiacoinPrecision.Mul64(10) numOutputs := defragThreshold + 1 tbuilder := wt.wallet.StartTransaction() tbuilder.FundSiacoins(txnValue.Mul64(uint64(numOutputs)).Add(fee)) for i := 0; i < numOutputs; i++ { tbuilder.AddSiacoinOutput(types.SiacoinOutput{ Value: txnValue, UnlockHash: dest, }) } tbuilder.AddMinerFee(fee) txns, err := tbuilder.Sign(true) if err != nil { t.Error("Error signing fragmenting transaction:", err) } err = wt.tpool.AcceptTransactionSet(txns) if err != nil { t.Error("Error accepting fragmenting transaction:", err) } wt.miner.AddBlock() } } }() time.Sleep(time.Second * 1) // ensure we can still send transactions while receiving aggressively // fragmented outputs for i := 0; i < 30; i++ { sendAmount := types.SiacoinPrecision.Mul64(2000) _, err = wt.wallet.SendSiacoins(sendAmount, types.UnlockHash{}) if err != nil { t.Errorf("%v: %v", i, err) } time.Sleep(time.Millisecond * 50) } close(closechan) <-donechan } Sia-1.3.0/modules/wallet/encrypt.go000066400000000000000000000375311313565667000172270ustar00rootroot00000000000000package wallet import ( "bytes" "errors" "fmt" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/bolt" "github.com/NebulousLabs/fastrand" ) var ( errAlreadyUnlocked = errors.New("wallet has already been unlocked") errReencrypt = errors.New("wallet is already encrypted, cannot encrypt again") errUnencryptedWallet = errors.New("wallet has not been encrypted yet") errScanInProgress = errors.New("another wallet rescan is already underway") // verificationPlaintext is the plaintext used to verify encryption keys. // By storing the corresponding ciphertext for a given key, we can later // verify that a key is correct by using it to decrypt the ciphertext and // comparing the result to verificationPlaintext. verificationPlaintext = make([]byte, 32) ) // uidEncryptionKey creates an encryption key that is used to decrypt a // specific key file. func uidEncryptionKey(masterKey crypto.TwofishKey, uid uniqueID) crypto.TwofishKey { return crypto.TwofishKey(crypto.HashAll(masterKey, uid)) } // verifyEncryption verifies that key properly decrypts the ciphertext to a // preset plaintext. func verifyEncryption(key crypto.TwofishKey, encrypted crypto.Ciphertext) error { verification, err := key.DecryptBytes(encrypted) if err != nil { return modules.ErrBadEncryptionKey } if !bytes.Equal(verificationPlaintext, verification) { return modules.ErrBadEncryptionKey } return nil } // checkMasterKey verifies that the masterKey is the key used to encrypt the wallet. func checkMasterKey(tx *bolt.Tx, masterKey crypto.TwofishKey) error { uk := uidEncryptionKey(masterKey, dbGetWalletUID(tx)) encryptedVerification := tx.Bucket(bucketWallet).Get(keyEncryptionVerification) return verifyEncryption(uk, encryptedVerification) } // initEncryption initializes and encrypts the primary SeedFile. func (w *Wallet) initEncryption(masterKey crypto.TwofishKey, seed modules.Seed, progress uint64) (modules.Seed, error) { wb := w.dbTx.Bucket(bucketWallet) // Check if the wallet encryption key has already been set. if wb.Get(keyEncryptionVerification) != nil { return modules.Seed{}, errReencrypt } // create a seedFile for the seed sf := createSeedFile(masterKey, seed) // set this as the primary seedFile err := wb.Put(keyPrimarySeedFile, encoding.Marshal(sf)) if err != nil { return modules.Seed{}, err } err = wb.Put(keyPrimarySeedProgress, encoding.Marshal(progress)) if err != nil { return modules.Seed{}, err } // Establish the encryption verification using the masterKey. After this // point, the wallet is encrypted. uk := uidEncryptionKey(masterKey, dbGetWalletUID(w.dbTx)) err = wb.Put(keyEncryptionVerification, uk.EncryptBytes(verificationPlaintext)) if err != nil { return modules.Seed{}, err } // on future startups, this field will be set by w.initPersist w.encrypted = true return seed, nil } // managedUnlock loads all of the encrypted file structures into wallet memory. Even // after loading, the structures are kept encrypted, but some data such as // addresses are decrypted so that the wallet knows what to track. func (w *Wallet) managedUnlock(masterKey crypto.TwofishKey) error { w.mu.RLock() unlocked := w.unlocked encrypted := w.encrypted w.mu.RUnlock() if unlocked { return errAlreadyUnlocked } else if !encrypted { return errUnencryptedWallet } // Load db objects into memory. var lastChange modules.ConsensusChangeID var primarySeedFile seedFile var primarySeedProgress uint64 var auxiliarySeedFiles []seedFile var unseededKeyFiles []spendableKeyFile err := func() error { w.mu.Lock() defer w.mu.Unlock() // verify masterKey err := checkMasterKey(w.dbTx, masterKey) if err != nil { return err } // lastChange lastChange = dbGetConsensusChangeID(w.dbTx) // primarySeedFile + primarySeedProgress wb := w.dbTx.Bucket(bucketWallet) err = encoding.Unmarshal(wb.Get(keyPrimarySeedFile), &primarySeedFile) if err != nil { return err } err = encoding.Unmarshal(wb.Get(keyPrimarySeedProgress), &primarySeedProgress) if err != nil { return err } // auxiliarySeedFiles err = encoding.Unmarshal(wb.Get(keyAuxiliarySeedFiles), &auxiliarySeedFiles) if err != nil { return err } // unseededKeyFiles err = encoding.Unmarshal(wb.Get(keySpendableKeyFiles), &unseededKeyFiles) if err != nil { return err } return nil }() if err != nil { return err } // Decrypt + load keys. err = func() error { w.mu.Lock() defer w.mu.Unlock() // primarySeedFile primarySeed, err := decryptSeedFile(masterKey, primarySeedFile) if err != nil { return err } w.integrateSeed(primarySeed, primarySeedProgress) w.primarySeed = primarySeed w.regenerateLookahead(primarySeedProgress) // auxiliarySeedFiles for _, sf := range auxiliarySeedFiles { auxSeed, err := decryptSeedFile(masterKey, sf) if err != nil { return err } w.integrateSeed(auxSeed, modules.PublicKeysPerSeed) w.seeds = append(w.seeds, auxSeed) } // unseededKeyFiles for _, uk := range unseededKeyFiles { sk, err := decryptSpendableKeyFile(masterKey, uk) if err != nil { return err } w.integrateSpendableKey(masterKey, sk) } return nil }() if err != nil { return err } // Subscribe to the consensus set if this is the first unlock for the // wallet object. w.mu.RLock() subscribed := w.subscribed w.mu.RUnlock() if !subscribed { // Subscription can take a while, so spawn a goroutine to print the // wallet height every few seconds. (If subscription completes // quickly, nothing will be printed.) done := make(chan struct{}) go w.rescanMessage(done) defer close(done) err = w.cs.ConsensusSetSubscribe(w, lastChange) if err == modules.ErrInvalidConsensusChangeID { // something went wrong; resubscribe from the beginning err = dbPutConsensusChangeID(w.dbTx, modules.ConsensusChangeBeginning) if err != nil { return fmt.Errorf("failed to reset db during rescan: %v", err) } err = dbPutConsensusHeight(w.dbTx, 0) if err != nil { return fmt.Errorf("failed to reset db during rescan: %v", err) } err = w.cs.ConsensusSetSubscribe(w, modules.ConsensusChangeBeginning) } if err != nil { return fmt.Errorf("wallet subscription failed: %v", err) } w.tpool.TransactionPoolSubscribe(w) } w.mu.Lock() w.unlocked = true w.subscribed = true w.mu.Unlock() return nil } // rescanMessage prints the blockheight every 3 seconds until done is closed. func (w *Wallet) rescanMessage(done chan struct{}) { if build.Release == "testing" { return } // sleep first because we may not need to print a message at all if // done is closed quickly. select { case <-done: return case <-time.After(3 * time.Second): } for { w.mu.Lock() height, _ := dbGetConsensusHeight(w.dbTx) w.mu.Unlock() print("\rWallet: scanned to height ", height, "...") select { case <-done: println("\nDone!") return case <-time.After(3 * time.Second): } } } // wipeSecrets erases all of the seeds and secret keys in the wallet. func (w *Wallet) wipeSecrets() { // 'for i := range' must be used to prevent copies of secret data from // being made. for i := range w.keys { for j := range w.keys[i].SecretKeys { crypto.SecureWipe(w.keys[i].SecretKeys[j][:]) } } for i := range w.seeds { crypto.SecureWipe(w.seeds[i][:]) } crypto.SecureWipe(w.primarySeed[:]) w.seeds = w.seeds[:0] } // Encrypted returns whether or not the wallet has been encrypted. func (w *Wallet) Encrypted() bool { w.mu.Lock() defer w.mu.Unlock() if build.DEBUG && w.unlocked && !w.encrypted { panic("wallet is both unlocked and unencrypted") } return w.encrypted } // Encrypt will create a primary seed for the wallet and encrypt it using // masterKey. If masterKey is blank, then the hash of the primary seed will be // used instead. The wallet will still be locked after Encrypt is called. // // Encrypt can only be called once throughout the life of the wallet, and will // return an error on subsequent calls (even after restarting the wallet). To // reset the wallet, the wallet files must be moved to a different directory // or deleted. func (w *Wallet) Encrypt(masterKey crypto.TwofishKey) (modules.Seed, error) { if err := w.tg.Add(); err != nil { return modules.Seed{}, err } defer w.tg.Done() w.mu.Lock() defer w.mu.Unlock() // Create a random seed. var seed modules.Seed fastrand.Read(seed[:]) // If masterKey is blank, use the hash of the seed. if masterKey == (crypto.TwofishKey{}) { masterKey = crypto.TwofishKey(crypto.HashObject(seed)) } // Initial seed progress is 0. return w.initEncryption(masterKey, seed, 0) } // Reset will reset the wallet, clearing the database and returning it to // the unencrypted state. Reset can only be called on a wallet that has // already been encrypted. func (w *Wallet) Reset() error { if err := w.tg.Add(); err != nil { return err } defer w.tg.Done() w.mu.Lock() defer w.mu.Unlock() wb := w.dbTx.Bucket(bucketWallet) if wb.Get(keyEncryptionVerification) == nil { return errUnencryptedWallet } w.cs.Unsubscribe(w) w.tpool.Unsubscribe(w) err := dbReset(w.dbTx) if err != nil { return err } w.wipeSecrets() w.keys = make(map[types.UnlockHash]spendableKey) w.lookahead = make(map[types.UnlockHash]uint64) w.seeds = []modules.Seed{} w.unconfirmedProcessedTransactions = []modules.ProcessedTransaction{} w.unlocked = false w.encrypted = false w.subscribed = false return nil } // InitFromSeed functions like Init, but using a specified seed. Unlike Init, // the blockchain will be scanned to determine the seed's progress. For this // reason, InitFromSeed should not be called until the blockchain is fully // synced. func (w *Wallet) InitFromSeed(masterKey crypto.TwofishKey, seed modules.Seed) error { if err := w.tg.Add(); err != nil { return err } defer w.tg.Done() if !w.cs.Synced() { return errors.New("cannot init from seed until blockchain is synced") } // If masterKey is blank, use the hash of the seed. if masterKey == (crypto.TwofishKey{}) { masterKey = crypto.TwofishKey(crypto.HashObject(seed)) } if !w.scanLock.TryLock() { return errScanInProgress } defer w.scanLock.Unlock() // estimate the primarySeedProgress by scanning the blockchain s := newSeedScanner(seed, w.log) if err := s.scan(w.cs); err != nil { return err } // NOTE: each time the wallet generates a key for index n, it sets its // progress to n+1, so the progress should be the largest index seen + 1. // We also add 10% as a buffer because the seed may have addresses in the // wild that have not appeared in the blockchain yet. progress := s.largestIndexSeen + 1 progress += progress / 10 w.log.Printf("INFO: found key index %v in blockchain. Setting primary seed progress to %v", s.largestIndexSeen, progress) // initialize the wallet with the appropriate seed progress w.mu.Lock() defer w.mu.Unlock() _, err := w.initEncryption(masterKey, seed, progress) return err } // Unlocked indicates whether the wallet is locked or unlocked. func (w *Wallet) Unlocked() bool { w.mu.RLock() defer w.mu.RUnlock() return w.unlocked } // Lock will erase all keys from memory and prevent the wallet from spending // coins until it is unlocked. func (w *Wallet) Lock() error { w.mu.Lock() defer w.mu.Unlock() if !w.unlocked { return modules.ErrLockedWallet } w.log.Println("INFO: Locking wallet.") // Wipe all of the seeds and secret keys. They will be replaced upon // calling 'Unlock' again. Note that since the public keys are not wiped, // we can continue processing blocks. w.wipeSecrets() w.unlocked = false return nil } // managedChangeKey safely performs the database operations required to change // the wallet's encryption key. func (w *Wallet) managedChangeKey(masterKey crypto.TwofishKey, newKey crypto.TwofishKey) error { w.mu.Lock() encrypted := w.encrypted w.mu.Unlock() if !encrypted { return errUnencryptedWallet } // grab the current seed files var primarySeedFile seedFile var auxiliarySeedFiles []seedFile var unseededKeyFiles []spendableKeyFile err := func() error { w.mu.Lock() defer w.mu.Unlock() // verify masterKey err := checkMasterKey(w.dbTx, masterKey) if err != nil { return err } wb := w.dbTx.Bucket(bucketWallet) // primarySeedFile err = encoding.Unmarshal(wb.Get(keyPrimarySeedFile), &primarySeedFile) if err != nil { return err } // auxiliarySeedFiles err = encoding.Unmarshal(wb.Get(keyAuxiliarySeedFiles), &auxiliarySeedFiles) if err != nil { return err } // unseededKeyFiles err = encoding.Unmarshal(wb.Get(keySpendableKeyFiles), &unseededKeyFiles) if err != nil { return err } return nil }() if err != nil { return err } // decrypt key files var primarySeed modules.Seed var auxiliarySeeds []modules.Seed var spendableKeys []spendableKey primarySeed, err = decryptSeedFile(masterKey, primarySeedFile) if err != nil { return err } for _, sf := range auxiliarySeedFiles { auxSeed, err := decryptSeedFile(masterKey, sf) if err != nil { return err } auxiliarySeeds = append(auxiliarySeeds, auxSeed) } for _, uk := range unseededKeyFiles { sk, err := decryptSpendableKeyFile(masterKey, uk) if err != nil { return err } spendableKeys = append(spendableKeys, sk) } // encrypt new keyfiles using newKey var newPrimarySeedFile seedFile var newAuxiliarySeedFiles []seedFile var newUnseededKeyFiles []spendableKeyFile newPrimarySeedFile = createSeedFile(newKey, primarySeed) for _, seed := range auxiliarySeeds { newAuxiliarySeedFiles = append(newAuxiliarySeedFiles, createSeedFile(newKey, seed)) } for _, sk := range spendableKeys { var skf spendableKeyFile fastrand.Read(skf.UID[:]) encryptionKey := uidEncryptionKey(newKey, skf.UID) skf.EncryptionVerification = encryptionKey.EncryptBytes(verificationPlaintext) // Encrypt and save the key. skf.SpendableKey = encryptionKey.EncryptBytes(encoding.Marshal(sk)) newUnseededKeyFiles = append(newUnseededKeyFiles, skf) } // put the newly encrypted keys in the database err = func() error { w.mu.Lock() defer w.mu.Unlock() wb := w.dbTx.Bucket(bucketWallet) err = wb.Put(keyPrimarySeedFile, encoding.Marshal(newPrimarySeedFile)) if err != nil { return err } err = wb.Put(keyAuxiliarySeedFiles, encoding.Marshal(newAuxiliarySeedFiles)) if err != nil { return err } err = wb.Put(keySpendableKeyFiles, encoding.Marshal(newUnseededKeyFiles)) if err != nil { return err } uk := uidEncryptionKey(newKey, dbGetWalletUID(w.dbTx)) err = wb.Put(keyEncryptionVerification, uk.EncryptBytes(verificationPlaintext)) if err != nil { return err } return nil }() if err != nil { return err } return nil } // ChangeKey changes the wallet's encryption key from masterKey to newKey. func (w *Wallet) ChangeKey(masterKey crypto.TwofishKey, newKey crypto.TwofishKey) error { if err := w.tg.Add(); err != nil { return err } defer w.tg.Done() return w.managedChangeKey(masterKey, newKey) } // Unlock will decrypt the wallet seed and load all of the addresses into // memory. func (w *Wallet) Unlock(masterKey crypto.TwofishKey) error { // By having the wallet's ThreadGroup track the Unlock method, we ensure // that Unlock will never unlock the wallet once the ThreadGroup has been // stopped. Without this precaution, the wallet's Close method would be // unsafe because it would theoretically be possible for another function // to Unlock the wallet in the short interval after Close calls w.Lock // and before Close calls w.mu.Lock. if err := w.tg.Add(); err != nil { return err } defer w.tg.Done() if !w.scanLock.TryLock() { return errScanInProgress } defer w.scanLock.Unlock() w.log.Println("INFO: Unlocking wallet.") // Initialize all of the keys in the wallet under a lock. While holding the // lock, also grab the subscriber status. return w.managedUnlock(masterKey) } Sia-1.3.0/modules/wallet/encrypt_test.go000066400000000000000000000302551313565667000202620ustar00rootroot00000000000000package wallet import ( "bytes" "os" "path/filepath" "testing" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/modules/miner" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/fastrand" ) // postEncryptionTesting runs a series of checks on the wallet after it has // been encrypted, to make sure that locking, unlocking, and spending after // unlocking are all happening in the correct order and returning the correct // errors. func postEncryptionTesting(m modules.TestMiner, w *Wallet, masterKey crypto.TwofishKey) { if !w.Encrypted() { panic("wallet is not encrypted when starting postEncryptionTesting") } if w.Unlocked() { panic("wallet is unlocked when starting postEncryptionTesting") } if len(w.seeds) != 0 { panic("wallet has seeds in it when startin postEncryptionTesting") } // Try unlocking and using the wallet. err := w.Unlock(masterKey) if err != nil { panic(err) } err = w.Unlock(masterKey) if err != errAlreadyUnlocked { panic(err) } // Mine enough coins so that a balance appears (and some buffer for the // send later). for i := types.BlockHeight(0); i <= types.MaturityDelay+1; i++ { _, err := m.AddBlock() if err != nil { panic(err) } } siacoinBal, _, _ := w.ConfirmedBalance() if siacoinBal.IsZero() { panic("wallet balance reported as 0 after maturing some mined blocks") } err = w.Unlock(masterKey) if err != errAlreadyUnlocked { panic(err) } // Lock, unlock, and trying using the wallet some more. err = w.Lock() if err != nil { panic(err) } err = w.Lock() if err != modules.ErrLockedWallet { panic(err) } err = w.Unlock(crypto.TwofishKey{}) if err != modules.ErrBadEncryptionKey { panic(err) } err = w.Unlock(masterKey) if err != nil { panic(err) } // Verify that the secret keys have been restored by sending coins to the // void. Send more coins than are received by mining a block. _, err = w.SendSiacoins(types.CalculateCoinbase(0), types.UnlockHash{}) if err != nil { panic(err) } _, err = m.AddBlock() if err != nil { panic(err) } siacoinBal2, _, _ := w.ConfirmedBalance() if siacoinBal2.Cmp(siacoinBal) >= 0 { panic("balance did not increase") } } // TestIntegrationPreEncryption checks that the wallet operates as expected // prior to encryption. func TestIntegrationPreEncryption(t *testing.T) { if testing.Short() { t.SkipNow() } wt, err := createBlankWalletTester(t.Name()) if err != nil { t.Fatal(err) } // Check that the wallet knows it's not encrypted. if wt.wallet.Encrypted() { t.Error("wallet is reporting that it has been encrypted") } err = wt.wallet.Lock() if err != modules.ErrLockedWallet { t.Fatal(err) } err = wt.wallet.Unlock(crypto.TwofishKey{}) if err != errUnencryptedWallet { t.Fatal(err) } wt.closeWt() // Create a second wallet using the same directory - make sure that if any // files have been created, the wallet is still being treated as new. w1, err := New(wt.cs, wt.tpool, filepath.Join(wt.persistDir, modules.WalletDir)) if err != nil { t.Fatal(err) } if w1.Encrypted() { t.Error("wallet is reporting that it has been encrypted when no such action has occurred") } if w1.Unlocked() { t.Error("new wallet is not being treated as locked") } w1.Close() } // TestIntegrationUserSuppliedEncryption probes the encryption process when the // user manually supplies an encryption key. func TestIntegrationUserSuppliedEncryption(t *testing.T) { if testing.Short() { t.SkipNow() } // Create and wallet and user-specified key, then encrypt the wallet and // run post-encryption tests on it. wt, err := createBlankWalletTester(t.Name()) if err != nil { t.Fatal(err) } defer wt.closeWt() var masterKey crypto.TwofishKey fastrand.Read(masterKey[:]) _, err = wt.wallet.Encrypt(masterKey) if err != nil { t.Error(err) } postEncryptionTesting(wt.miner, wt.wallet, masterKey) } // TestIntegrationBlankEncryption probes the encryption process when the user // supplies a blank encryption key during the encryption process. func TestIntegrationBlankEncryption(t *testing.T) { if testing.Short() { t.SkipNow() } // Create the wallet. wt, err := createBlankWalletTester(t.Name()) if err != nil { t.Fatal(err) } defer wt.closeWt() // Encrypt the wallet using a blank key. seed, err := wt.wallet.Encrypt(crypto.TwofishKey{}) if err != nil { t.Error(err) } // Try unlocking the wallet using a blank key. err = wt.wallet.Unlock(crypto.TwofishKey{}) if err != modules.ErrBadEncryptionKey { t.Fatal(err) } // Try unlocking the wallet using the correct key. err = wt.wallet.Unlock(crypto.TwofishKey(crypto.HashObject(seed))) if err != nil { t.Fatal(err) } err = wt.wallet.Lock() if err != nil { t.Fatal(err) } postEncryptionTesting(wt.miner, wt.wallet, crypto.TwofishKey(crypto.HashObject(seed))) } // TestLock checks that lock correctly wipes keys when locking the wallet, // while still being able to track the balance of the wallet. func TestLock(t *testing.T) { if testing.Short() { t.SkipNow() } wt, err := createWalletTester(t.Name()) if err != nil { t.Fatal(err) } defer wt.closeWt() // Grab a block for work - miner will not supply blocks after the wallet // has been locked, and the test needs to mine a block after locking the // wallet to verify that the balance reporting of a locked wallet is // correct. block, target, err := wt.miner.BlockForWork() if err != nil { t.Fatal(err) } // Lock the wallet. siacoinBalance, _, _ := wt.wallet.ConfirmedBalance() err = wt.wallet.Lock() if err != nil { t.Error(err) } // Compare to the original balance. siacoinBalance2, _, _ := wt.wallet.ConfirmedBalance() if !siacoinBalance2.Equals(siacoinBalance) { t.Error("siacoin balance reporting changed upon closing the wallet") } // Check that the keys and seeds were wiped. wipedKey := make([]byte, crypto.SecretKeySize) for _, key := range wt.wallet.keys { for i := range key.SecretKeys { if !bytes.Equal(wipedKey, key.SecretKeys[i][:]) { t.Error("Key was not wiped after closing the wallet") } } } if len(wt.wallet.seeds) != 0 { t.Error("seeds not wiped from wallet") } if !bytes.Equal(wipedKey[:crypto.EntropySize], wt.wallet.primarySeed[:]) { t.Error("primary seed not wiped from memory") } // Solve the block generated earlier and add it to the consensus set, this // should boost the balance of the wallet. solvedBlock, _ := wt.miner.SolveBlock(block, target) err = wt.cs.AcceptBlock(solvedBlock) if err != nil { t.Fatal(err) } siacoinBalance3, _, _ := wt.wallet.ConfirmedBalance() if siacoinBalance3.Cmp(siacoinBalance2) <= 0 { t.Error("balance should increase after a block was mined") } } // TestInitFromSeedConcurrentUnlock verifies that calling InitFromSeed and // then Unlock() concurrently results in the correct balance. func TestInitFromSeedConcurrentUnlock(t *testing.T) { t.Skip("Test has poor concurrency design") if testing.Short() { t.SkipNow() } // create a wallet with some money wt, err := createWalletTester(t.Name()) if err != nil { t.Fatal(err) } defer wt.closeWt() seed, _, err := wt.wallet.PrimarySeed() if err != nil { t.Fatal(err) } origBal, _, _ := wt.wallet.ConfirmedBalance() // create a blank wallet dir := filepath.Join(build.TempDir(modules.WalletDir, t.Name()+"-new"), modules.WalletDir) w, err := New(wt.cs, wt.tpool, dir) if err != nil { t.Fatal(err) } // spawn an initfromseed goroutine go w.InitFromSeed(crypto.TwofishKey{}, seed) // pause for 10ms to allow the seed sweeper to start time.Sleep(time.Millisecond * 10) // unlock should now return an error err = w.Unlock(crypto.TwofishKey(crypto.HashObject(seed))) if err != errScanInProgress { t.Fatal("expected errScanInProgress, got", err) } // wait for init to finish for i := 0; i < 100; i++ { time.Sleep(time.Millisecond * 10) err = w.Unlock(crypto.TwofishKey(crypto.HashObject(seed))) if err == nil { break } } // starting balance should match the original wallet newBal, _, _ := w.ConfirmedBalance() if newBal.Cmp(origBal) != 0 { t.Log(w.UnconfirmedBalance()) t.Fatalf("wallet should have correct balance after loading seed: wanted %v, got %v", origBal, newBal) } } // TestUnlockConcurrent verifies that calling unlock multiple times // concurrently results in only one unlock operation. func TestUnlockConcurrent(t *testing.T) { if testing.Short() { t.SkipNow() } // create a wallet with some money wt, err := createWalletTester(t.Name()) if err != nil { t.Fatal(err) } defer wt.closeWt() // lock the wallet wt.wallet.Lock() // spawn an unlock goroutine errChan := make(chan error) go func() { // acquire the write lock so that Unlock acquires the trymutex, but // cannot proceed further wt.wallet.mu.Lock() errChan <- wt.wallet.Unlock(wt.walletMasterKey) }() // wait for goroutine to start time.Sleep(time.Millisecond * 10) // unlock should now return an error err = wt.wallet.Unlock(wt.walletMasterKey) if err != errScanInProgress { t.Fatal("expected errScanInProgress, got", err) } wt.wallet.mu.Unlock() if err := <-errChan; err != nil { t.Fatal("first unlock failed:", err) } } // TestInitFromSeed tests creating a wallet from a preexisting seed. func TestInitFromSeed(t *testing.T) { if testing.Short() { t.SkipNow() } // create a wallet with some money wt, err := createWalletTester("TestInitFromSeed0") if err != nil { t.Fatal(err) } defer wt.closeWt() seed, _, err := wt.wallet.PrimarySeed() if err != nil { t.Fatal(err) } origBal, _, _ := wt.wallet.ConfirmedBalance() // create a blank wallet dir := filepath.Join(build.TempDir(modules.WalletDir, "TestInitFromSeed1"), modules.WalletDir) w, err := New(wt.cs, wt.tpool, dir) if err != nil { t.Fatal(err) } err = w.InitFromSeed(crypto.TwofishKey{}, seed) if err != nil { t.Fatal(err) } err = w.Unlock(crypto.TwofishKey(crypto.HashObject(seed))) if err != nil { t.Fatal(err) } // starting balance should match the original wallet newBal, _, _ := w.ConfirmedBalance() if newBal.Cmp(origBal) != 0 { t.Log(w.UnconfirmedBalance()) t.Fatalf("wallet should have correct balance after loading seed: wanted %v, got %v", origBal, newBal) } } // TestReset tests that Reset resets a wallet correctly. func TestReset(t *testing.T) { if testing.Short() { t.SkipNow() } wt, err := createBlankWalletTester(t.Name()) if err != nil { t.Fatal(err) } defer wt.closeWt() var originalKey crypto.TwofishKey fastrand.Read(originalKey[:]) _, err = wt.wallet.Encrypt(originalKey) if err != nil { t.Fatal(err) } postEncryptionTesting(wt.miner, wt.wallet, originalKey) err = wt.wallet.Reset() if err != nil { t.Fatal(err) } // reinitialize the miner so it mines into the new seed err = wt.miner.Close() if err != nil { t.Fatal(err) } minerData := filepath.Join(wt.persistDir, modules.MinerDir) err = os.RemoveAll(minerData) if err != nil { t.Fatal(err) } newminer, err := miner.New(wt.cs, wt.tpool, wt.wallet, filepath.Join(wt.persistDir, modules.MinerDir)) if err != nil { t.Fatal(err) } wt.miner = newminer var newKey crypto.TwofishKey fastrand.Read(newKey[:]) _, err = wt.wallet.Encrypt(newKey) if err != nil { t.Fatal(err) } postEncryptionTesting(wt.miner, wt.wallet, newKey) } // TestChangeKey tests that a wallet can only be unlocked with the new key // after changing it and that it shows the same balance as before func TestChangeKey(t *testing.T) { if testing.Short() { t.SkipNow() } wt, err := createWalletTester(t.Name()) if err != nil { t.Fatal(err) } defer wt.closeWt() var newKey crypto.TwofishKey fastrand.Read(newKey[:]) origBal, _, _ := wt.wallet.ConfirmedBalance() err = wt.wallet.ChangeKey(wt.walletMasterKey, newKey) if err != nil { t.Fatal(err) } err = wt.wallet.Lock() if err != nil { t.Fatal(err) } err = wt.wallet.Unlock(wt.walletMasterKey) if err == nil { t.Fatal("expected unlock to fail with the original key") } err = wt.wallet.Unlock(newKey) if err != nil { t.Fatal(err) } newBal, _, _ := wt.wallet.ConfirmedBalance() if newBal.Cmp(origBal) != 0 { t.Fatal("wallet with changed key did not have the same balance") } err = wt.wallet.Lock() if err != nil { t.Fatal(err) } postEncryptionTesting(wt.miner, wt.wallet, newKey) } Sia-1.3.0/modules/wallet/money.go000066400000000000000000000171131313565667000166640ustar00rootroot00000000000000package wallet import ( "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) // sortedOutputs is a struct containing a slice of siacoin outputs and their // corresponding ids. sortedOutputs can be sorted using the sort package. type sortedOutputs struct { ids []types.SiacoinOutputID outputs []types.SiacoinOutput } // ConfirmedBalance returns the balance of the wallet according to all of the // confirmed transactions. func (w *Wallet) ConfirmedBalance() (siacoinBalance types.Currency, siafundBalance types.Currency, siafundClaimBalance types.Currency) { w.mu.Lock() defer w.mu.Unlock() // ensure durability of reported balance w.syncDB() dbForEachSiacoinOutput(w.dbTx, func(_ types.SiacoinOutputID, sco types.SiacoinOutput) { if sco.Value.Cmp(dustValue()) > 0 { siacoinBalance = siacoinBalance.Add(sco.Value) } }) siafundPool, err := dbGetSiafundPool(w.dbTx) if err != nil { return } dbForEachSiafundOutput(w.dbTx, func(_ types.SiafundOutputID, sfo types.SiafundOutput) { siafundBalance = siafundBalance.Add(sfo.Value) if sfo.ClaimStart.Cmp(siafundPool) > 0 { // Skip claims larger than the siafund pool. This should only // occur if the siafund pool has not been initialized yet. w.log.Debugf("skipping claim with start value %v because siafund pool is only %v", sfo.ClaimStart, siafundPool) return } siafundClaimBalance = siafundClaimBalance.Add(siafundPool.Sub(sfo.ClaimStart).Mul(sfo.Value).Div(types.SiafundCount)) }) return } // UnconfirmedBalance returns the number of outgoing and incoming siacoins in // the unconfirmed transaction set. Refund outputs are included in this // reporting. func (w *Wallet) UnconfirmedBalance() (outgoingSiacoins types.Currency, incomingSiacoins types.Currency) { w.mu.Lock() defer w.mu.Unlock() for _, upt := range w.unconfirmedProcessedTransactions { for _, input := range upt.Inputs { if input.FundType == types.SpecifierSiacoinInput && input.WalletAddress { outgoingSiacoins = outgoingSiacoins.Add(input.Value) } } for _, output := range upt.Outputs { if output.FundType == types.SpecifierSiacoinOutput && output.WalletAddress && output.Value.Cmp(dustValue()) > 0 { incomingSiacoins = incomingSiacoins.Add(output.Value) } } } return } // SendSiacoins creates a transaction sending 'amount' to 'dest'. The transaction // is submitted to the transaction pool and is also returned. func (w *Wallet) SendSiacoins(amount types.Currency, dest types.UnlockHash) ([]types.Transaction, error) { if err := w.tg.Add(); err != nil { return nil, err } defer w.tg.Done() if !w.unlocked { w.log.Println("Attempt to send coins has failed - wallet is locked") return nil, modules.ErrLockedWallet } _, tpoolFee := w.tpool.FeeEstimation() tpoolFee = tpoolFee.Mul64(750) // Estimated transaction size in bytes output := types.SiacoinOutput{ Value: amount, UnlockHash: dest, } txnBuilder := w.StartTransaction() err := txnBuilder.FundSiacoins(amount.Add(tpoolFee)) if err != nil { w.log.Println("Attempt to send coins has failed - failed to fund transaction:", err) return nil, build.ExtendErr("unable to fund transaction", err) } txnBuilder.AddMinerFee(tpoolFee) txnBuilder.AddSiacoinOutput(output) txnSet, err := txnBuilder.Sign(true) if err != nil { w.log.Println("Attempt to send coins has failed - failed to sign transaction:", err) return nil, build.ExtendErr("unable to sign transaction", err) } err = w.tpool.AcceptTransactionSet(txnSet) if err != nil { w.log.Println("Attempt to send coins has failed - transaction pool rejected transaction:", err) return nil, build.ExtendErr("unable to get transaction accepted", err) } w.log.Println("Submitted a siacoin transfer transaction set for value", amount.HumanString(), "with fees", tpoolFee.HumanString(), "IDs:") for _, txn := range txnSet { w.log.Println("\t", txn.ID()) } return txnSet, nil } // SendSiacoinsMulti creates a transaction that includes the specified // outputs. The transaction is submitted to the transaction pool and is also // returned. func (w *Wallet) SendSiacoinsMulti(outputs []types.SiacoinOutput) ([]types.Transaction, error) { if err := w.tg.Add(); err != nil { return nil, err } defer w.tg.Done() if !w.unlocked { w.log.Println("Attempt to send coins has failed - wallet is locked") return nil, modules.ErrLockedWallet } txnBuilder := w.StartTransaction() // Add estimated transaction fee. _, tpoolFee := w.tpool.FeeEstimation() tpoolFee = tpoolFee.Mul64(2) // We don't want send-to-many transactions to fail. tpoolFee = tpoolFee.Mul64(1000 + 60*uint64(len(outputs))) // Estimated transaction size in bytes txnBuilder.AddMinerFee(tpoolFee) // Calculate total cost to wallet. // NOTE: we only want to call FundSiacoins once; that way, it will // (ideally) fund the entire transaction with a single input, instead of // many smaller ones. totalCost := tpoolFee for _, sco := range outputs { totalCost = totalCost.Add(sco.Value) } err := txnBuilder.FundSiacoins(totalCost) if err != nil { return nil, build.ExtendErr("unable to fund transaction", err) } for _, sco := range outputs { txnBuilder.AddSiacoinOutput(sco) } txnSet, err := txnBuilder.Sign(true) if err != nil { w.log.Println("Attempt to send coins has failed - failed to sign transaction:", err) return nil, build.ExtendErr("unable to sign transaction", err) } err = w.tpool.AcceptTransactionSet(txnSet) if err != nil { w.log.Println("Attempt to send coins has failed - transaction pool rejected transaction:", err) return nil, build.ExtendErr("unable to get transaction accepted", err) } return txnSet, nil } // SendSiafunds creates a transaction sending 'amount' to 'dest'. The transaction // is submitted to the transaction pool and is also returned. func (w *Wallet) SendSiafunds(amount types.Currency, dest types.UnlockHash) ([]types.Transaction, error) { if err := w.tg.Add(); err != nil { return nil, err } defer w.tg.Done() if !w.unlocked { return nil, modules.ErrLockedWallet } _, tpoolFee := w.tpool.FeeEstimation() tpoolFee = tpoolFee.Mul64(750) // Estimated transaction size in bytes tpoolFee = tpoolFee.Mul64(5) // use large fee to ensure siafund transactions are selected by miners output := types.SiafundOutput{ Value: amount, UnlockHash: dest, } txnBuilder := w.StartTransaction() err := txnBuilder.FundSiacoins(tpoolFee) if err != nil { return nil, err } err = txnBuilder.FundSiafunds(amount) if err != nil { return nil, err } txnBuilder.AddMinerFee(tpoolFee) txnBuilder.AddSiafundOutput(output) txnSet, err := txnBuilder.Sign(true) if err != nil { return nil, err } err = w.tpool.AcceptTransactionSet(txnSet) if err != nil { return nil, err } w.log.Println("Submitted a siafund transfer transaction set for value", amount.HumanString(), "with fees", tpoolFee.HumanString(), "IDs:") for _, txn := range txnSet { w.log.Println("\t", txn.ID()) } return txnSet, nil } // Len returns the number of elements in the sortedOutputs struct. func (so sortedOutputs) Len() int { if build.DEBUG && len(so.ids) != len(so.outputs) { panic("sortedOutputs object is corrupt") } return len(so.ids) } // Less returns whether element 'i' is less than element 'j'. The currency // value of each output is used for comparison. func (so sortedOutputs) Less(i, j int) bool { return so.outputs[i].Value.Cmp(so.outputs[j].Value) < 0 } // Swap swaps two elements in the sortedOutputs set. func (so sortedOutputs) Swap(i, j int) { so.ids[i], so.ids[j] = so.ids[j], so.ids[i] so.outputs[i], so.outputs[j] = so.outputs[j], so.outputs[i] } Sia-1.3.0/modules/wallet/money_test.go000066400000000000000000000132161313565667000177230ustar00rootroot00000000000000package wallet import ( "sort" "testing" "github.com/NebulousLabs/Sia/types" ) // TestSendSiacoins probes the SendSiacoins method of the wallet. func TestSendSiacoins(t *testing.T) { if testing.Short() { t.SkipNow() } wt, err := createWalletTester(t.Name()) if err != nil { t.Fatal(err) } defer wt.closeWt() // Get the initial balance - should be 1 block. The unconfirmed balances // should be 0. confirmedBal, _, _ := wt.wallet.ConfirmedBalance() unconfirmedOut, unconfirmedIn := wt.wallet.UnconfirmedBalance() if !confirmedBal.Equals(types.CalculateCoinbase(1)) { t.Error("unexpected confirmed balance") } if !unconfirmedOut.Equals(types.ZeroCurrency) { t.Error("unconfirmed balance should be 0") } if !unconfirmedIn.Equals(types.ZeroCurrency) { t.Error("unconfirmed balance should be 0") } // Send 5000 hastings. The wallet will automatically add a fee. Outgoing // unconfirmed siacoins - incoming unconfirmed siacoins should equal 5000 + // fee. sendValue := types.SiacoinPrecision.Mul64(3) _, tpoolFee := wt.wallet.tpool.FeeEstimation() tpoolFee = tpoolFee.Mul64(750) _, err = wt.wallet.SendSiacoins(sendValue, types.UnlockHash{}) if err != nil { t.Fatal(err) } confirmedBal2, _, _ := wt.wallet.ConfirmedBalance() unconfirmedOut2, unconfirmedIn2 := wt.wallet.UnconfirmedBalance() if !confirmedBal2.Equals(confirmedBal) { t.Error("confirmed balance changed without introduction of blocks") } if !unconfirmedOut2.Equals(unconfirmedIn2.Add(sendValue).Add(tpoolFee)) { t.Error("sending siacoins appears to be ineffective") } // Move the balance into the confirmed set. b, _ := wt.miner.FindBlock() err = wt.cs.AcceptBlock(b) if err != nil { t.Fatal(err) } confirmedBal3, _, _ := wt.wallet.ConfirmedBalance() unconfirmedOut3, unconfirmedIn3 := wt.wallet.UnconfirmedBalance() if !confirmedBal3.Equals(confirmedBal2.Add(types.CalculateCoinbase(2)).Sub(sendValue).Sub(tpoolFee)) { t.Error("confirmed balance did not adjust to the expected value") } if !unconfirmedOut3.Equals(types.ZeroCurrency) { t.Error("unconfirmed balance should be 0") } if !unconfirmedIn3.Equals(types.ZeroCurrency) { t.Error("unconfirmed balance should be 0") } } // TestIntegrationSendOverUnder sends too many siacoins, resulting in an error, // followed by sending few enough siacoins that the send should complete. // // This test is here because of a bug found in production where the wallet // would mark outputs as spent before it knew that there was enough money to // complete the transaction. This meant that, after trying to send too many // coins, all outputs got marked 'sent'. This test reproduces those conditions // to ensure it does not happen again. func TestIntegrationSendOverUnder(t *testing.T) { if testing.Short() { t.SkipNow() } wt, err := createWalletTester(t.Name()) if err != nil { t.Fatal(err) } defer wt.closeWt() // Spend too many siacoins. tooManyCoins := types.SiacoinPrecision.Mul64(1e12) _, err = wt.wallet.SendSiacoins(tooManyCoins, types.UnlockHash{}) if err == nil { t.Error("low balance err not returned after attempting to send too many coins:", err) } // Spend a reasonable amount of siacoins. reasonableCoins := types.SiacoinPrecision.Mul64(100e3) _, err = wt.wallet.SendSiacoins(reasonableCoins, types.UnlockHash{}) if err != nil { t.Error("unexpected error: ", err) } } // TestIntegrationSpendHalfHalf spends more than half of the coins, and then // more than half of the coins again, to make sure that the wallet is not // reusing outputs that it has already spent. func TestIntegrationSpendHalfHalf(t *testing.T) { if testing.Short() { t.SkipNow() } wt, err := createWalletTester(t.Name()) if err != nil { t.Fatal(err) } defer wt.closeWt() // Spend more than half of the coins twice. halfPlus := types.SiacoinPrecision.Mul64(200e3) _, err = wt.wallet.SendSiacoins(halfPlus, types.UnlockHash{}) if err != nil { t.Error("unexpected error: ", err) } _, err = wt.wallet.SendSiacoins(halfPlus, types.UnlockHash{1}) if err == nil { t.Error("wallet appears to be reusing outputs when building transactions: ", err) } } // TestIntegrationSpendUnconfirmed spends an unconfirmed siacoin output. func TestIntegrationSpendUnconfirmed(t *testing.T) { if testing.Short() { t.SkipNow() } wt, err := createWalletTester(t.Name()) if err != nil { t.Fatal(err) } defer wt.closeWt() // Spend the only output. halfPlus := types.SiacoinPrecision.Mul64(200e3) _, err = wt.wallet.SendSiacoins(halfPlus, types.UnlockHash{}) if err != nil { t.Error("unexpected error: ", err) } someMore := types.SiacoinPrecision.Mul64(75e3) _, err = wt.wallet.SendSiacoins(someMore, types.UnlockHash{1}) if err != nil { t.Error("wallet appears to be struggling to spend unconfirmed outputs") } } // TestIntegrationSortedOutputsSorting checks that the outputs are being correctly sorted // by the currency value. func TestIntegrationSortedOutputsSorting(t *testing.T) { if testing.Short() { t.SkipNow() } so := sortedOutputs{ ids: []types.SiacoinOutputID{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}}, outputs: []types.SiacoinOutput{ {Value: types.NewCurrency64(2)}, {Value: types.NewCurrency64(3)}, {Value: types.NewCurrency64(4)}, {Value: types.NewCurrency64(7)}, {Value: types.NewCurrency64(6)}, {Value: types.NewCurrency64(0)}, {Value: types.NewCurrency64(1)}, {Value: types.NewCurrency64(5)}, }, } sort.Sort(so) expectedIDSorting := []types.SiacoinOutputID{{5}, {6}, {0}, {1}, {2}, {7}, {4}, {3}} for i := uint64(0); i < 8; i++ { if so.ids[i] != expectedIDSorting[i] { t.Error("an id is out of place: ", i) } if !so.outputs[i].Value.Equals64(i) { t.Error("a value is out of place: ", i) } } } Sia-1.3.0/modules/wallet/persist.go000066400000000000000000000143341313565667000172300ustar00rootroot00000000000000package wallet import ( "fmt" "io" "os" "path/filepath" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/persist" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/fastrand" "github.com/NebulousLabs/bolt" ) const ( logFile = modules.WalletDir + ".log" dbFile = modules.WalletDir + ".db" compatFile = modules.WalletDir + ".json" ) var ( dbMetadata = persist.Metadata{ Header: "Wallet Database", Version: "1.1.0", } ) // spendableKeyFile stores an encrypted spendable key on disk. type spendableKeyFile struct { UID uniqueID EncryptionVerification crypto.Ciphertext SpendableKey crypto.Ciphertext } // openDB loads the set database and populates it with the necessary buckets. func (w *Wallet) openDB(filename string) (err error) { w.db, err = persist.OpenDatabase(dbMetadata, filename) if err != nil { return err } // initialize the database err = w.db.Update(func(tx *bolt.Tx) error { for _, b := range dbBuckets { _, err := tx.CreateBucketIfNotExists(b) if err != nil { return fmt.Errorf("could not create bucket %v: %v", string(b), err) } } // if the wallet does not have a UID, create one if tx.Bucket(bucketWallet).Get(keyUID) == nil { uid := make([]byte, len(uniqueID{})) fastrand.Read(uid[:]) tx.Bucket(bucketWallet).Put(keyUID, uid) } // if fields in bucketWallet are nil, set them to zero to prevent unmarshal errors wb := tx.Bucket(bucketWallet) if wb.Get(keyConsensusHeight) == nil { wb.Put(keyConsensusHeight, encoding.Marshal(uint64(0))) } if wb.Get(keyAuxiliarySeedFiles) == nil { wb.Put(keyAuxiliarySeedFiles, encoding.Marshal([]seedFile{})) } if wb.Get(keySpendableKeyFiles) == nil { wb.Put(keySpendableKeyFiles, encoding.Marshal([]spendableKeyFile{})) } if wb.Get(keySiafundPool) == nil { wb.Put(keySiafundPool, encoding.Marshal(types.ZeroCurrency)) } // check whether wallet is encrypted w.encrypted = tx.Bucket(bucketWallet).Get(keyEncryptionVerification) != nil return nil }) return err } // initPersist loads all of the wallet's persistence files into memory, // creating them if they do not exist. func (w *Wallet) initPersist() error { // Create a directory for the wallet without overwriting an existing // directory. err := os.MkdirAll(w.persistDir, 0700) if err != nil { return err } // Start logging. w.log, err = persist.NewFileLogger(filepath.Join(w.persistDir, logFile)) if err != nil { return err } // Open the database. dbFilename := filepath.Join(w.persistDir, dbFile) compatFilename := filepath.Join(w.persistDir, compatFile) _, dbErr := os.Stat(dbFilename) _, compatErr := os.Stat(compatFilename) if dbErr != nil && compatErr == nil { // database does not exist, but old persist does; convert it err = w.convertPersistFrom112To120(dbFilename, compatFilename) } else { // either database exists or neither exists; open/create the database err = w.openDB(filepath.Join(w.persistDir, dbFile)) } if err != nil { return err } w.tg.AfterStop(func() { w.db.Close() }) return nil } // createBackup copies the wallet database to dst. func (w *Wallet) createBackup(dst io.Writer) error { _, err := w.dbTx.WriteTo(dst) return err } // CreateBackup creates a backup file at the desired filepath. func (w *Wallet) CreateBackup(backupFilepath string) error { if err := w.tg.Add(); err != nil { return err } defer w.tg.Done() w.mu.Lock() defer w.mu.Unlock() f, err := os.Create(backupFilepath) if err != nil { return err } defer f.Close() return w.createBackup(f) } // compat112Persist is the structure of the wallet.json file used in v1.1.2 type compat112Persist struct { UID uniqueID EncryptionVerification crypto.Ciphertext PrimarySeedFile seedFile PrimarySeedProgress uint64 AuxiliarySeedFiles []seedFile UnseededKeys []spendableKeyFile } // compat112Meta is the metadata of the wallet.json file used in v1.1.2 var compat112Meta = persist.Metadata{ Header: "Wallet Settings", Version: "0.4.0", } // convertPersistFrom112To120 converts an old (pre-v1.2.0) wallet.json file to // a wallet.db database. func (w *Wallet) convertPersistFrom112To120(dbFilename, compatFilename string) error { var data compat112Persist err := persist.LoadJSON(compat112Meta, &data, compatFilename) if err != nil { return err } w.db, err = persist.OpenDatabase(dbMetadata, dbFilename) if err != nil { return err } // initialize the database err = w.db.Update(func(tx *bolt.Tx) error { for _, b := range dbBuckets { _, err := tx.CreateBucket(b) if err != nil { return fmt.Errorf("could not create bucket %v: %v", string(b), err) } } // set UID, verification, seeds, and seed progress tx.Bucket(bucketWallet).Put(keyUID, data.UID[:]) tx.Bucket(bucketWallet).Put(keyEncryptionVerification, data.EncryptionVerification) tx.Bucket(bucketWallet).Put(keyPrimarySeedFile, encoding.Marshal(data.PrimarySeedFile)) tx.Bucket(bucketWallet).Put(keyAuxiliarySeedFiles, encoding.Marshal(data.AuxiliarySeedFiles)) tx.Bucket(bucketWallet).Put(keySpendableKeyFiles, encoding.Marshal(data.UnseededKeys)) // old wallets had a "preload depth" of 25 dbPutPrimarySeedProgress(tx, data.PrimarySeedProgress+25) // set consensus height and CCID to zero so that a full rescan is // triggered dbPutConsensusHeight(tx, 0) dbPutConsensusChangeID(tx, modules.ConsensusChangeBeginning) return nil }) w.encrypted = true return err } /* // LoadBackup loads a backup file from the provided filepath. The backup file // primary seed is loaded as an auxiliary seed. func (w *Wallet) LoadBackup(masterKey, backupMasterKey crypto.TwofishKey, backupFilepath string) error { if err := w.tg.Add(); err != nil { return err } defer w.tg.Done() lockID := w.mu.Lock() defer w.mu.Unlock(lockID) // Load all of the seed files, check for duplicates, re-encrypt them (but // keep the UID), and add them to the walletPersist object) var backupPersist walletPersist err := persist.LoadFile(settingsMetadata, &backupPersist, backupFilepath) if err != nil { return err } backupSeeds := append(backupPersist.AuxiliarySeedFiles, backupPersist.PrimarySeedFile) TODO: more } */ Sia-1.3.0/modules/wallet/scan.go000066400000000000000000000130531313565667000164600ustar00rootroot00000000000000package wallet import ( "fmt" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/persist" "github.com/NebulousLabs/Sia/types" ) const scanMultiplier = 4 // how many more keys to generate after each scan iteration // numInitialKeys is the number of keys generated by the seedScanner before // scanning the blockchain for the first time. var numInitialKeys = func() uint64 { switch build.Release { case "dev": return 10e3 case "standard": return 1e6 case "testing": return 1e3 default: panic("unrecognized build.Release") } }() // maxScanKeys is the number of maximum number of keys the seedScanner will // generate before giving up. var maxScanKeys = func() uint64 { switch build.Release { case "dev": return 1e6 case "standard": return 100e6 case "testing": return 100e3 default: panic("unrecognized build.Release") } }() var errMaxKeys = fmt.Errorf("refused to generate more than %v keys from seed", maxScanKeys) // A scannedOutput is an output found in the blockchain that was generated // from a given seed. type scannedOutput struct { id types.OutputID value types.Currency seedIndex uint64 } // A seedScanner scans the blockchain for addresses that belong to a given // seed. type seedScanner struct { dustThreshold types.Currency // minimum value of outputs to be included keys map[types.UnlockHash]uint64 // map address to seed index largestIndexSeen uint64 // largest index that has appeared in the blockchain seed modules.Seed siacoinOutputs map[types.SiacoinOutputID]scannedOutput siafundOutputs map[types.SiafundOutputID]scannedOutput log *persist.Logger } func (s *seedScanner) numKeys() uint64 { return uint64(len(s.keys)) } // generateKeys generates n additional keys from the seedScanner's seed. func (s *seedScanner) generateKeys(n uint64) { initialProgress := s.numKeys() for i, k := range generateKeys(s.seed, initialProgress, n) { s.keys[k.UnlockConditions.UnlockHash()] = initialProgress + uint64(i) } } // ProcessConsensusChange scans the blockchain for information relevant to the // seedScanner. func (s *seedScanner) ProcessConsensusChange(cc modules.ConsensusChange) { // update outputs for _, diff := range cc.SiacoinOutputDiffs { if diff.Direction == modules.DiffApply { if index, exists := s.keys[diff.SiacoinOutput.UnlockHash]; exists && diff.SiacoinOutput.Value.Cmp(s.dustThreshold) > 0 { s.siacoinOutputs[diff.ID] = scannedOutput{ id: types.OutputID(diff.ID), value: diff.SiacoinOutput.Value, seedIndex: index, } } } else if diff.Direction == modules.DiffRevert { // NOTE: DiffRevert means the output was either spent or was in a // block that was reverted. if _, exists := s.keys[diff.SiacoinOutput.UnlockHash]; exists { delete(s.siacoinOutputs, diff.ID) } } } for _, diff := range cc.SiafundOutputDiffs { if diff.Direction == modules.DiffApply { // do not compare against dustThreshold here; we always want to // sweep every siafund found if index, exists := s.keys[diff.SiafundOutput.UnlockHash]; exists { s.siafundOutputs[diff.ID] = scannedOutput{ id: types.OutputID(diff.ID), value: diff.SiafundOutput.Value, seedIndex: index, } } } else if diff.Direction == modules.DiffRevert { // NOTE: DiffRevert means the output was either spent or was in a // block that was reverted. if _, exists := s.keys[diff.SiafundOutput.UnlockHash]; exists { delete(s.siafundOutputs, diff.ID) } } } // update s.largestIndexSeen for _, diff := range cc.SiacoinOutputDiffs { index, exists := s.keys[diff.SiacoinOutput.UnlockHash] if exists { s.log.Debugln("Seed scanner found a key used at index", index) if index > s.largestIndexSeen { s.largestIndexSeen = index } } } for _, diff := range cc.SiafundOutputDiffs { index, exists := s.keys[diff.SiafundOutput.UnlockHash] if exists { s.log.Debugln("Seed scanner found a key used at index", index) if index > s.largestIndexSeen { s.largestIndexSeen = index } } } } // scan subscribes s to cs and scans the blockchain for addresses that belong // to s's seed. If scan returns errMaxKeys, additional keys may need to be // generated to find all the addresses. func (s *seedScanner) scan(cs modules.ConsensusSet) error { // generate a bunch of keys and scan the blockchain looking for them. If // none of the 'upper' half of the generated keys are found, we are done; // otherwise, generate more keys and try again (bounded by a sane // default). // // NOTE: since scanning is very slow, we aim to only scan once, which // means generating many keys. var numKeys uint64 = numInitialKeys for s.numKeys() < maxScanKeys { s.generateKeys(numKeys) if err := cs.ConsensusSetSubscribe(s, modules.ConsensusChangeBeginning); err != nil { return err } cs.Unsubscribe(s) if s.largestIndexSeen < s.numKeys()/2 { return nil } // increase number of keys generated each iteration, capping so that // we do not exceed maxScanKeys numKeys *= scanMultiplier if numKeys > maxScanKeys-s.numKeys() { numKeys = maxScanKeys - s.numKeys() } } return errMaxKeys } // newSeedScanner returns a new seedScanner. func newSeedScanner(seed modules.Seed, log *persist.Logger) *seedScanner { return &seedScanner{ seed: seed, keys: make(map[types.UnlockHash]uint64), siacoinOutputs: make(map[types.SiacoinOutputID]scannedOutput), siafundOutputs: make(map[types.SiafundOutputID]scannedOutput), log: log, } } Sia-1.3.0/modules/wallet/scan_test.go000066400000000000000000000067151313565667000175260ustar00rootroot00000000000000package wallet import ( "testing" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/fastrand" ) // TestScanLargeIndex tests the limits of the seedScanner.scan function. func TestScanLargeIndex(t *testing.T) { if testing.Short() { t.SkipNow() } // create an empty wallet wt, err := createBlankWalletTester("TestScanLargeIndex") if err != nil { t.Fatal(err) } defer wt.closeWt() var masterKey crypto.TwofishKey fastrand.Read(masterKey[:]) _, err = wt.wallet.Encrypt(masterKey) if err != nil { t.Fatal(err) } err = wt.wallet.Unlock(masterKey) if err != nil { t.Fatal(err) } // set the wallet's seed progress to a high number and then mine some coins. wt.wallet.mu.Lock() dbPutPrimarySeedProgress(wt.wallet.dbTx, numInitialKeys+1) wt.wallet.mu.Unlock() if err != nil { t.Fatal(err) } for i := types.BlockHeight(0); i <= types.MaturityDelay; i++ { wt.miner.AddBlock() } // send money to ourselves so that we sweep a real output (instead of just // a miner payout) uc, err := wt.wallet.NextAddress() if err != nil { t.Fatal(err) } _, err = wt.wallet.SendSiacoins(types.SiacoinPrecision, uc.UnlockHash()) if err != nil { t.Fatal(err) } wt.miner.AddBlock() // create seed scanner and scan the block seed, _, _ := wt.wallet.PrimarySeed() ss := newSeedScanner(seed, wt.wallet.log) err = ss.scan(wt.cs) if err != nil { t.Fatal(err) } // no outputs should have been added if len(ss.siacoinOutputs) != 0 { t.Error("expected 0 outputs, got", len(ss.siacoinOutputs)) for _, o := range ss.siacoinOutputs { t.Log(o.seedIndex, o.value) } } if ss.largestIndexSeen != 0 { t.Error("expected no index to be seen, got", ss.largestIndexSeen) } } // TestScanLoop tests that the scan loop will continue to run as long as it // finds indices in the upper half of the last set of generated keys. func TestScanLoop(t *testing.T) { if testing.Short() || !build.VLONG { t.SkipNow() } // create a wallet wt, err := createWalletTester("TestScanLoop") if err != nil { t.Fatal(err) } defer wt.closeWt() // send money to ourselves at four specific indices. This should cause the // scanner to loop exactly three times. indices := []uint64{500, 2500, 8000, 100000} for _, index := range indices { wt.wallet.mu.Lock() dbPutPrimarySeedProgress(wt.wallet.dbTx, index) wt.wallet.mu.Unlock() if err != nil { t.Fatal(err) } uc, err := wt.wallet.NextAddress() if err != nil { t.Fatal(err) } _, err = wt.wallet.SendSiacoins(types.SiacoinPrecision, uc.UnlockHash()) if err != nil { t.Fatal(err) } } wt.miner.AddBlock() // create seed scanner and scan the block seed, _, _ := wt.wallet.PrimarySeed() ss := newSeedScanner(seed, wt.wallet.log) err = ss.scan(wt.cs) if err != nil { t.Fatal(err) } // the scanner should have generated a specific number of keys expected := numInitialKeys + (numInitialKeys * scanMultiplier) + (numInitialKeys * scanMultiplier * scanMultiplier) if uint64(len(ss.keys)) != expected { t.Errorf("expected %v keys, got %v", expected, len(ss.keys)) } // the largest index seen should be the penultimate element (+2, since 2 // addresses are generated when sending coins). The last element should // not be seen, because it was outside the scanning range. if ss.largestIndexSeen != indices[len(indices)-2]+2 { t.Errorf("expected largest index to be %v, got %v", indices[len(indices)-2]+2, ss.largestIndexSeen) } } Sia-1.3.0/modules/wallet/seed.go000066400000000000000000000372511313565667000164620ustar00rootroot00000000000000package wallet import ( "errors" "runtime" "sync" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/bolt" "github.com/NebulousLabs/fastrand" ) var ( errKnownSeed = errors.New("seed is already known") ) type ( // uniqueID is a unique id randomly generated and put at the front of every // persistence object. It is used to make sure that a different encryption // key can be used for every persistence object. uniqueID [crypto.EntropySize]byte // seedFile stores an encrypted wallet seed on disk. seedFile struct { UID uniqueID EncryptionVerification crypto.Ciphertext Seed crypto.Ciphertext } ) // generateSpendableKey creates the keys and unlock conditions for seed at a // given index. func generateSpendableKey(seed modules.Seed, index uint64) spendableKey { sk, pk := crypto.GenerateKeyPairDeterministic(crypto.HashAll(seed, index)) return spendableKey{ UnlockConditions: types.UnlockConditions{ PublicKeys: []types.SiaPublicKey{types.Ed25519PublicKey(pk)}, SignaturesRequired: 1, }, SecretKeys: []crypto.SecretKey{sk}, } } // generateKeys generates n keys from seed, starting from index start. func generateKeys(seed modules.Seed, start, n uint64) []spendableKey { // generate in parallel, one goroutine per core. keys := make([]spendableKey, n) var wg sync.WaitGroup wg.Add(runtime.NumCPU()) for cpu := 0; cpu < runtime.NumCPU(); cpu++ { go func(offset uint64) { defer wg.Done() for i := offset; i < n; i += uint64(runtime.NumCPU()) { // NOTE: don't bother trying to optimize generateSpendableKey; // profiling shows that ed25519 key generation consumes far // more CPU time than encoding or hashing. keys[i] = generateSpendableKey(seed, start+i) } }(uint64(cpu)) } wg.Wait() return keys } // createSeedFile creates and encrypts a seedFile. func createSeedFile(masterKey crypto.TwofishKey, seed modules.Seed) seedFile { var sf seedFile fastrand.Read(sf.UID[:]) sek := uidEncryptionKey(masterKey, sf.UID) sf.EncryptionVerification = sek.EncryptBytes(verificationPlaintext) sf.Seed = sek.EncryptBytes(seed[:]) return sf } // decryptSeedFile decrypts a seed file using the encryption key. func decryptSeedFile(masterKey crypto.TwofishKey, sf seedFile) (seed modules.Seed, err error) { // Verify that the provided master key is the correct key. decryptionKey := uidEncryptionKey(masterKey, sf.UID) err = verifyEncryption(decryptionKey, sf.EncryptionVerification) if err != nil { return modules.Seed{}, err } // Decrypt and return the seed. plainSeed, err := decryptionKey.DecryptBytes(sf.Seed) if err != nil { return modules.Seed{}, err } copy(seed[:], plainSeed) return seed, nil } // regenerateLookahead creates future keys up to a maximum of maxKeys keys func (w *Wallet) regenerateLookahead(start uint64) { // Check how many keys need to be generated maxKeys := maxLookahead(start) existingKeys := uint64(len(w.lookahead)) for i, k := range generateKeys(w.primarySeed, start+existingKeys, maxKeys-existingKeys) { w.lookahead[k.UnlockConditions.UnlockHash()] = start + existingKeys + uint64(i) } } // integrateSeed generates n spendableKeys from the seed and loads them into // the wallet. func (w *Wallet) integrateSeed(seed modules.Seed, n uint64) { for _, sk := range generateKeys(seed, 0, n) { w.keys[sk.UnlockConditions.UnlockHash()] = sk } } // nextPrimarySeedAddress fetches the next address from the primary seed. func (w *Wallet) nextPrimarySeedAddress(tx *bolt.Tx) (types.UnlockConditions, error) { // Check that the wallet has been unlocked. if !w.unlocked { return types.UnlockConditions{}, modules.ErrLockedWallet } // Fetch and increment the seed progress. progress, err := dbGetPrimarySeedProgress(tx) if err != nil { return types.UnlockConditions{}, err } if err = dbPutPrimarySeedProgress(tx, progress+1); err != nil { return types.UnlockConditions{}, err } // Integrate the next key into the wallet, and return the unlock // conditions. spendableKey := generateSpendableKey(w.primarySeed, progress) w.keys[spendableKey.UnlockConditions.UnlockHash()] = spendableKey // Remove new key from the future keys and update them according to new progress delete(w.lookahead, spendableKey.UnlockConditions.UnlockHash()) w.regenerateLookahead(progress + 1) return spendableKey.UnlockConditions, nil } // AllSeeds returns a list of all seeds known to and used by the wallet. func (w *Wallet) AllSeeds() ([]modules.Seed, error) { w.mu.Lock() defer w.mu.Unlock() if !w.unlocked { return nil, modules.ErrLockedWallet } return append([]modules.Seed{w.primarySeed}, w.seeds...), nil } // PrimarySeed returns the decrypted primary seed of the wallet, as well as // the number of addresses that the seed can be safely used to generate. func (w *Wallet) PrimarySeed() (modules.Seed, uint64, error) { w.mu.Lock() defer w.mu.Unlock() if !w.unlocked { return modules.Seed{}, 0, modules.ErrLockedWallet } progress, err := dbGetPrimarySeedProgress(w.dbTx) if err != nil { return modules.Seed{}, 0, err } // addresses remaining is maxScanKeys-progress; generating more keys than // that risks not being able to recover them when using SweepSeed or // InitFromSeed. remaining := maxScanKeys - progress if progress > maxScanKeys { remaining = 0 } return w.primarySeed, remaining, nil } // NextAddress returns an unlock hash that is ready to receive siacoins or // siafunds. The address is generated using the primary address seed. func (w *Wallet) NextAddress() (types.UnlockConditions, error) { if err := w.tg.Add(); err != nil { return types.UnlockConditions{}, err } defer w.tg.Done() // TODO: going to the db is slow; consider creating 100 addresses at a // time. w.mu.Lock() uc, err := w.nextPrimarySeedAddress(w.dbTx) w.syncDB() // ensure durability of reported address w.mu.Unlock() if err != nil { return types.UnlockConditions{}, err } return uc, err } // LoadSeed will track all of the addresses generated by the input seed, // reclaiming any funds that were lost due to a deleted file or lost encryption // key. An error will be returned if the seed has already been integrated with // the wallet. func (w *Wallet) LoadSeed(masterKey crypto.TwofishKey, seed modules.Seed) error { if err := w.tg.Add(); err != nil { return err } defer w.tg.Done() if !w.cs.Synced() { return errors.New("cannot load seed until blockchain is synced") } if !w.scanLock.TryLock() { return errScanInProgress } defer w.scanLock.Unlock() // Because the recovery seed does not have a UID, duplication must be // prevented by comparing with the list of decrypted seeds. This can only // occur while the wallet is unlocked. w.mu.RLock() if !w.unlocked { w.mu.RUnlock() return modules.ErrLockedWallet } for _, wSeed := range append([]modules.Seed{w.primarySeed}, w.seeds...) { if seed == wSeed { w.mu.RUnlock() return errKnownSeed } } w.mu.RUnlock() // scan blockchain to determine how many keys to generate for the seed s := newSeedScanner(seed, w.log) if err := s.scan(w.cs); err != nil { return err } // Add 4% as a buffer because the seed may have addresses in the wild // that have not appeared in the blockchain yet. seedProgress := s.largestIndexSeen + 500 seedProgress += seedProgress / 25 w.log.Printf("INFO: found key index %v in blockchain. Setting auxiliary seed progress to %v", s.largestIndexSeen, seedProgress) err := func() error { w.mu.Lock() defer w.mu.Unlock() err := checkMasterKey(w.dbTx, masterKey) if err != nil { return err } // create a seedFile for the seed sf := createSeedFile(masterKey, seed) // add the seedFile var current []seedFile err = encoding.Unmarshal(w.dbTx.Bucket(bucketWallet).Get(keyAuxiliarySeedFiles), ¤t) if err != nil { return err } err = w.dbTx.Bucket(bucketWallet).Put(keyAuxiliarySeedFiles, encoding.Marshal(append(current, sf))) if err != nil { return err } // load the seed's keys w.integrateSeed(seed, seedProgress) w.seeds = append(w.seeds, seed) // delete the set of processed transactions; they will be recreated // when we rescan if err = w.dbTx.DeleteBucket(bucketProcessedTransactions); err != nil { return err } if _, err = w.dbTx.CreateBucket(bucketProcessedTransactions); err != nil { return err } w.unconfirmedProcessedTransactions = nil // reset the consensus change ID and height in preparation for rescan err = dbPutConsensusChangeID(w.dbTx, modules.ConsensusChangeBeginning) if err != nil { return err } return dbPutConsensusHeight(w.dbTx, 0) }() if err != nil { return err } // rescan the blockchain w.cs.Unsubscribe(w) w.tpool.Unsubscribe(w) done := make(chan struct{}) go w.rescanMessage(done) defer close(done) err = w.cs.ConsensusSetSubscribe(w, modules.ConsensusChangeBeginning) if err != nil { return err } w.tpool.TransactionPoolSubscribe(w) return nil } // SweepSeed scans the blockchain for outputs generated from seed and creates // a transaction that transfers them to the wallet. Note that this incurs a // transaction fee. It returns the total value of the outputs, minus the fee. // If only siafunds were found, the fee is deducted from the wallet. func (w *Wallet) SweepSeed(seed modules.Seed) (coins, funds types.Currency, err error) { if err = w.tg.Add(); err != nil { return } defer w.tg.Done() if !w.scanLock.TryLock() { return types.Currency{}, types.Currency{}, errScanInProgress } defer w.scanLock.Unlock() w.mu.RLock() match := seed == w.primarySeed w.mu.RUnlock() if match { return types.Currency{}, types.Currency{}, errors.New("cannot sweep primary seed") } if !w.cs.Synced() { return types.Currency{}, types.Currency{}, errors.New("cannot sweep until blockchain is synced") } // get an address to spend into w.mu.Lock() uc, err := w.nextPrimarySeedAddress(w.dbTx) w.mu.Unlock() if err != nil { return } // scan blockchain for outputs, filtering out 'dust' (outputs that cost // more in fees than they are worth) s := newSeedScanner(seed, w.log) _, maxFee := w.tpool.FeeEstimation() const outputSize = 350 // approx. size in bytes of an output and accompanying signature const maxOutputs = 50 // approx. number of outputs that a transaction can handle s.dustThreshold = maxFee.Mul64(outputSize) if err = s.scan(w.cs); err != nil { return } if len(s.siacoinOutputs) == 0 && len(s.siafundOutputs) == 0 { // if we aren't sweeping any coins or funds, then just return an // error; no reason to proceed return types.Currency{}, types.Currency{}, errors.New("nothing to sweep") } // Flatten map to slice var siacoinOutputs, siafundOutputs []scannedOutput for _, sco := range s.siacoinOutputs { siacoinOutputs = append(siacoinOutputs, sco) } for _, sfo := range s.siafundOutputs { siafundOutputs = append(siafundOutputs, sfo) } for len(siacoinOutputs) > 0 || len(siafundOutputs) > 0 { // process up to maxOutputs siacoinOutputs txnSiacoinOutputs := make([]scannedOutput, maxOutputs) n := copy(txnSiacoinOutputs, siacoinOutputs) txnSiacoinOutputs = txnSiacoinOutputs[:n] siacoinOutputs = siacoinOutputs[n:] // process up to (maxOutputs-n) siafundOutputs txnSiafundOutputs := make([]scannedOutput, maxOutputs-n) n = copy(txnSiafundOutputs, siafundOutputs) txnSiafundOutputs = txnSiafundOutputs[:n] siafundOutputs = siafundOutputs[n:] var txnCoins, txnFunds types.Currency // construct a transaction that spends the outputs tb := w.StartTransaction() var sweptCoins, sweptFunds types.Currency // total values of swept outputs for _, output := range txnSiacoinOutputs { // construct a siacoin input that spends the output sk := generateSpendableKey(seed, output.seedIndex) tb.AddSiacoinInput(types.SiacoinInput{ ParentID: types.SiacoinOutputID(output.id), UnlockConditions: sk.UnlockConditions, }) // add a signature for the input sweptCoins = sweptCoins.Add(output.value) } for _, output := range txnSiafundOutputs { // construct a siafund input that spends the output sk := generateSpendableKey(seed, output.seedIndex) tb.AddSiafundInput(types.SiafundInput{ ParentID: types.SiafundOutputID(output.id), UnlockConditions: sk.UnlockConditions, }) // add a signature for the input sweptFunds = sweptFunds.Add(output.value) } // estimate the transaction size and fee. NOTE: this equation doesn't // account for other fields in the transaction, but since we are // multiplying by maxFee, lowballing is ok estTxnSize := (len(txnSiacoinOutputs) + len(txnSiafundOutputs)) * outputSize estFee := maxFee.Mul64(uint64(estTxnSize)) tb.AddMinerFee(estFee) // calculate total siacoin payout if sweptCoins.Cmp(estFee) > 0 { txnCoins = sweptCoins.Sub(estFee) } txnFunds = sweptFunds switch { case txnCoins.IsZero() && txnFunds.IsZero(): // if we aren't sweeping any coins or funds, then just return an // error; no reason to proceed return types.Currency{}, types.Currency{}, errors.New("transaction fee exceeds value of swept outputs") case !txnCoins.IsZero() && txnFunds.IsZero(): // if we're sweeping coins but not funds, add a siacoin output for // them tb.AddSiacoinOutput(types.SiacoinOutput{ Value: txnCoins, UnlockHash: uc.UnlockHash(), }) case txnCoins.IsZero() && !txnFunds.IsZero(): // if we're sweeping funds but not coins, add a siafund output for // them. This is tricky because we still need to pay for the // transaction fee, but we can't simply subtract the fee from the // output value like we can with swept coins. Instead, we need to fund // the fee using the existing wallet balance. tb.AddSiafundOutput(types.SiafundOutput{ Value: txnFunds, UnlockHash: uc.UnlockHash(), }) err = tb.FundSiacoins(estFee) if err != nil { return types.Currency{}, types.Currency{}, errors.New("couldn't pay transaction fee on swept funds: " + err.Error()) } case !txnCoins.IsZero() && !txnFunds.IsZero(): // if we're sweeping both coins and funds, add a siacoin output and a // siafund output tb.AddSiacoinOutput(types.SiacoinOutput{ Value: txnCoins, UnlockHash: uc.UnlockHash(), }) tb.AddSiafundOutput(types.SiafundOutput{ Value: txnFunds, UnlockHash: uc.UnlockHash(), }) } // add signatures for all coins and funds (manually, since tb doesn't have // access to the signing keys) txn, parents := tb.View() for _, output := range txnSiacoinOutputs { sk := generateSpendableKey(seed, output.seedIndex) addSignatures(&txn, types.FullCoveredFields, sk.UnlockConditions, crypto.Hash(output.id), sk) } for _, sfo := range txnSiafundOutputs { sk := generateSpendableKey(seed, sfo.seedIndex) addSignatures(&txn, types.FullCoveredFields, sk.UnlockConditions, crypto.Hash(sfo.id), sk) } // Usually, all the inputs will come from swept outputs. However, there is // an edge case in which inputs will be added from the wallet. To cover // this case, we iterate through the SiacoinInputs and add a signature for // any input that belongs to the wallet. w.mu.RLock() for _, input := range txn.SiacoinInputs { if key, ok := w.keys[input.UnlockConditions.UnlockHash()]; ok { addSignatures(&txn, types.FullCoveredFields, input.UnlockConditions, crypto.Hash(input.ParentID), key) } } w.mu.RUnlock() // Append transaction to txnSet txnSet := append(parents, txn) // submit the transactions err = w.tpool.AcceptTransactionSet(txnSet) if err != nil { return } w.log.Println("Creating a transaction set to sweep a seed, IDs:") for _, txn := range txnSet { w.log.Println("\t", txn.ID()) } coins = coins.Add(txnCoins) funds = funds.Add(txnFunds) } return } Sia-1.3.0/modules/wallet/seed_test.go000066400000000000000000000325121313565667000175140ustar00rootroot00000000000000package wallet import ( "bytes" "path/filepath" "testing" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/modules/miner" "github.com/NebulousLabs/Sia/types" ) // TestPrimarySeed checks that the correct seed is returned when calling // PrimarySeed. func TestPrimarySeed(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() // Start with a blank wallet tester. wt, err := createBlankWalletTester(t.Name()) if err != nil { t.Fatal(err) } defer wt.closeWt() // Create a seed and unlock the wallet. seed, err := wt.wallet.Encrypt(crypto.TwofishKey{}) if err != nil { t.Fatal(err) } err = wt.wallet.Unlock(crypto.TwofishKey(crypto.HashObject(seed))) if err != nil { t.Fatal(err) } // Try getting an address, see that the seed advances correctly. primarySeed, remaining, err := wt.wallet.PrimarySeed() if err != nil { t.Fatal(err) } if !bytes.Equal(primarySeed[:], seed[:]) { t.Error("PrimarySeed is returning a value inconsitent with the seed returned by Encrypt") } if remaining != maxScanKeys { t.Error("primary seed is returning the wrong number of remaining addresses") } _, err = wt.wallet.NextAddress() if err != nil { t.Fatal(err) } _, remaining, err = wt.wallet.PrimarySeed() if err != nil { t.Fatal(err) } if remaining != maxScanKeys-1 { t.Error("primary seed is returning the wrong number of remaining addresses") } // Lock then unlock the wallet and check the responses. err = wt.wallet.Lock() if err != nil { t.Fatal(err) } _, _, err = wt.wallet.PrimarySeed() if err != modules.ErrLockedWallet { t.Error("unexpected err:", err) } err = wt.wallet.Unlock(crypto.TwofishKey(crypto.HashObject(seed))) if err != nil { t.Fatal(err) } primarySeed, remaining, err = wt.wallet.PrimarySeed() if err != nil { t.Fatal(err) } if !bytes.Equal(primarySeed[:], seed[:]) { t.Error("PrimarySeed is returning a value inconsitent with the seed returned by Encrypt") } if remaining != maxScanKeys-1 { t.Error("primary seed is returning the wrong number of remaining addresses") } } // TestLoadSeed checks that a seed can be successfully recovered from a wallet, // and then remain available on subsequent loads of the wallet. func TestLoadSeed(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() wt, err := createWalletTester(t.Name()) if err != nil { t.Fatal(err) } defer wt.closeWt() seed, _, err := wt.wallet.PrimarySeed() if err != nil { t.Fatal(err) } allSeeds, err := wt.wallet.AllSeeds() if err != nil { t.Fatal(err) } if len(allSeeds) != 1 { t.Fatal("AllSeeds should be returning the primary seed.") } else if allSeeds[0] != seed { t.Fatal("AllSeeds returned the wrong seed") } wt.wallet.Close() dir := filepath.Join(build.TempDir(modules.WalletDir, t.Name()+"1"), modules.WalletDir) w, err := New(wt.cs, wt.tpool, dir) if err != nil { t.Fatal(err) } newSeed, err := w.Encrypt(crypto.TwofishKey{}) if err != nil { t.Fatal(err) } err = w.Unlock(crypto.TwofishKey(crypto.HashObject(newSeed))) if err != nil { t.Fatal(err) } // Balance of wallet should be 0. siacoinBal, _, _ := w.ConfirmedBalance() if !siacoinBal.Equals64(0) { t.Error("fresh wallet should not have a balance") } err = w.LoadSeed(crypto.TwofishKey(crypto.HashObject(newSeed)), seed) if err != nil { t.Fatal(err) } allSeeds, err = w.AllSeeds() if err != nil { t.Fatal(err) } if len(allSeeds) != 2 { t.Error("AllSeeds should be returning the primary seed with the recovery seed.") } if allSeeds[0] != newSeed { t.Error("AllSeeds returned the wrong seed") } if !bytes.Equal(allSeeds[1][:], seed[:]) { t.Error("AllSeeds returned the wrong seed") } siacoinBal2, _, _ := w.ConfirmedBalance() if siacoinBal2.Cmp64(0) <= 0 { t.Error("wallet failed to load a seed with money in it") } allSeeds, err = w.AllSeeds() if err != nil { t.Fatal(err) } if len(allSeeds) != 2 { t.Error("AllSeeds should be returning the primary seed with the recovery seed.") } if !bytes.Equal(allSeeds[0][:], newSeed[:]) { t.Error("AllSeeds returned the wrong seed") } if !bytes.Equal(allSeeds[1][:], seed[:]) { t.Error("AllSeeds returned the wrong seed") } } // TestSweepSeedCoins tests that sweeping a seed results in the transfer of // its siacoin outputs to the wallet. func TestSweepSeedCoins(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() // create a wallet with some money wt, err := createWalletTester("TestSweepSeedCoins0") if err != nil { t.Fatal(err) } defer wt.closeWt() seed, _, err := wt.wallet.PrimarySeed() if err != nil { t.Fatal(err) } // send money to ourselves, so that we sweep a real output (instead of // just a miner payout) uc, err := wt.wallet.NextAddress() if err != nil { t.Fatal(err) } _, err = wt.wallet.SendSiacoins(types.SiacoinPrecision, uc.UnlockHash()) if err != nil { t.Fatal(err) } wt.miner.AddBlock() // create a blank wallet dir := filepath.Join(build.TempDir(modules.WalletDir, "TestSweepSeedCoins1"), modules.WalletDir) w, err := New(wt.cs, wt.tpool, dir) if err != nil { t.Fatal(err) } newSeed, err := w.Encrypt(crypto.TwofishKey{}) if err != nil { t.Fatal(err) } err = w.Unlock(crypto.TwofishKey(crypto.HashObject(newSeed))) if err != nil { t.Fatal(err) } // starting balance should be 0. siacoinBal, _, _ := w.ConfirmedBalance() if !siacoinBal.IsZero() { t.Error("fresh wallet should not have a balance") } // sweep the seed of the first wallet into the second sweptCoins, _, err := w.SweepSeed(seed) if err != nil { t.Fatal(err) } // new wallet should have exactly 'sweptCoins' coins _, incoming := w.UnconfirmedBalance() if incoming.Cmp(sweptCoins) != 0 { t.Fatalf("wallet should have correct balance after sweeping seed: wanted %v, got %v", sweptCoins, incoming) } } // TestSweepSeedFunds tests that sweeping a seed results in the transfer of // its siafund outputs to the wallet. func TestSweepSeedFunds(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() wt, err := createWalletTester("TestSweepSeedFunds") if err != nil { t.Fatal(err) } defer wt.closeWt() // Load the key into the wallet. err = wt.wallet.LoadSiagKeys(wt.walletMasterKey, []string{"../../types/siag0of1of1.siakey"}) if err != nil { t.Error(err) } _, siafundBal, _ := wt.wallet.ConfirmedBalance() if siafundBal.Cmp(types.NewCurrency64(2000)) != 0 { t.Error("expecting a siafund balance of 2000 from the 1of1 key") } // need to reset the miner as well, since it depends on the wallet wt.miner, err = miner.New(wt.cs, wt.tpool, wt.wallet, wt.wallet.persistDir) if err != nil { t.Fatal(err) } // Create a seed and generate an address to send money to. seed := modules.Seed{1, 2, 3} sk := generateSpendableKey(seed, 1) // Send some siafunds to the address. _, err = wt.wallet.SendSiafunds(types.NewCurrency64(12), sk.UnlockConditions.UnlockHash()) if err != nil { t.Fatal(err) } // Send some siacoins to the address, but not enough to cover the // transaction fee. _, err = wt.wallet.SendSiacoins(types.NewCurrency64(1), sk.UnlockConditions.UnlockHash()) if err != nil { t.Fatal(err) } // mine blocks without earning payout until our balance is stable for i := types.BlockHeight(0); i < types.MaturityDelay; i++ { wt.addBlockNoPayout() } oldCoinBalance, siafundBal, _ := wt.wallet.ConfirmedBalance() if siafundBal.Cmp(types.NewCurrency64(1988)) != 0 { t.Errorf("expecting balance of %v after sending siafunds to the seed, got %v", 1988, siafundBal) } // Sweep the seed. coins, funds, err := wt.wallet.SweepSeed(seed) if err != nil { t.Fatal(err) } if !coins.IsZero() { t.Error("expected to sweep 0 coins, got", coins) } if funds.Cmp(types.NewCurrency64(12)) != 0 { t.Errorf("expected to sweep %v funds, got %v", 12, funds) } // add a block without earning its payout wt.addBlockNoPayout() // Wallet balance should have decreased to pay for the sweep transaction. newCoinBalance, _, _ := wt.wallet.ConfirmedBalance() if newCoinBalance.Cmp(oldCoinBalance) >= 0 { t.Error("expecting balance to go down; instead, increased by", newCoinBalance.Sub(oldCoinBalance)) } } // TestSweepSeedSentFunds tests that sweeping a seed results in the transfer // of its siafund outputs to the wallet, even after the funds have been // transferred a few times. func TestSweepSeedSentFunds(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() wt, err := createWalletTester("TestSweepSeedSentFunds") if err != nil { t.Fatal(err) } defer wt.closeWt() // Load the key into the wallet. err = wt.wallet.LoadSiagKeys(wt.walletMasterKey, []string{"../../types/siag0of1of1.siakey"}) if err != nil { t.Error(err) } _, siafundBal, _ := wt.wallet.ConfirmedBalance() if siafundBal.Cmp(types.NewCurrency64(2000)) != 0 { t.Error("expecting a siafund balance of 2000 from the 1of1 key") } // need to reset the miner as well, since it depends on the wallet wt.miner, err = miner.New(wt.cs, wt.tpool, wt.wallet, wt.wallet.persistDir) if err != nil { t.Fatal(err) } // send funds to ourself a few times for i := 0; i < 10; i++ { uc, err := wt.wallet.NextAddress() if err != nil { t.Fatal(err) } _, err = wt.wallet.SendSiafunds(types.NewCurrency64(1), uc.UnlockHash()) if err != nil { t.Fatal(err) } wt.addBlockNoPayout() } // send some funds to the void _, err = wt.wallet.SendSiafunds(types.NewCurrency64(10), types.UnlockHash{}) if err != nil { t.Fatal(err) } wt.addBlockNoPayout() // Create a seed and generate an address to send money to. seed := modules.Seed{1, 2, 3} sk := generateSpendableKey(seed, 1) // Send some siafunds to the address. _, err = wt.wallet.SendSiafunds(types.NewCurrency64(12), sk.UnlockConditions.UnlockHash()) if err != nil { t.Fatal(err) } // mine blocks without earning payout until our balance is stable for i := types.BlockHeight(0); i < types.MaturityDelay; i++ { wt.addBlockNoPayout() } oldCoinBalance, siafundBal, _ := wt.wallet.ConfirmedBalance() if expected := 2000 - 12 - 10; siafundBal.Cmp(types.NewCurrency64(uint64(expected))) != 0 { t.Errorf("expecting balance of %v after sending siafunds to the seed, got %v", expected, siafundBal) } // Sweep the seed. coins, funds, err := wt.wallet.SweepSeed(seed) if err != nil { t.Fatal(err) } if !coins.IsZero() { t.Error("expected to sweep 0 coins, got", coins) } if funds.Cmp(types.NewCurrency64(12)) != 0 { t.Errorf("expected to sweep %v funds, got %v", 12, funds) } // add a block without earning its payout wt.addBlockNoPayout() // Wallet balance should have decreased to pay for the sweep transaction. newCoinBalance, _, _ := wt.wallet.ConfirmedBalance() if newCoinBalance.Cmp(oldCoinBalance) >= 0 { t.Error("expecting balance to go down; instead, increased by", newCoinBalance.Sub(oldCoinBalance)) } } // TestSweepSeedCoinsAndFunds tests that sweeping a seed results in the // transfer of its siacoin and siafund outputs to the wallet. func TestSweepSeedCoinsAndFunds(t *testing.T) { if testing.Short() || !build.VLONG { t.SkipNow() } t.Parallel() wt, err := createWalletTester("TestSweepSeedCoinsAndFunds") if err != nil { t.Fatal(err) } defer wt.closeWt() // Load the key into the wallet. err = wt.wallet.LoadSiagKeys(wt.walletMasterKey, []string{"../../types/siag0of1of1.siakey"}) if err != nil { t.Error(err) } _, siafundBal, _ := wt.wallet.ConfirmedBalance() if siafundBal.Cmp(types.NewCurrency64(2000)) != 0 { t.Error("expecting a siafund balance of 2000 from the 1of1 key") } // Create a seed and generate an address to send money to. seed := modules.Seed{1, 2, 3} sk := generateSpendableKey(seed, 1) // Send some siafunds to the address. for i := 0; i < 12; i++ { _, err = wt.wallet.SendSiafunds(types.NewCurrency64(1), sk.UnlockConditions.UnlockHash()) if err != nil { t.Fatal(err) } wt.addBlockNoPayout() } // Send some siacoins to the address -- must be more than the transaction // fee. for i := 0; i < 100; i++ { _, err = wt.wallet.SendSiacoins(types.SiacoinPrecision.Mul64(10), sk.UnlockConditions.UnlockHash()) if err != nil { t.Fatal(err) } wt.addBlockNoPayout() } // mine blocks without earning payout until our balance is stable for i := types.BlockHeight(0); i < types.MaturityDelay; i++ { wt.addBlockNoPayout() } oldCoinBalance, siafundBal, _ := wt.wallet.ConfirmedBalance() if siafundBal.Cmp(types.NewCurrency64(1988)) != 0 { t.Errorf("expecting balance of %v after sending siafunds to the seed, got %v", 1988, siafundBal) } // Sweep the seed. coins, funds, err := wt.wallet.SweepSeed(seed) if err != nil { t.Fatal(err) } if coins.IsZero() { t.Error("expected to sweep coins, got 0") } if funds.Cmp(types.NewCurrency64(12)) != 0 { t.Errorf("expected to sweep %v funds, got %v", 12, funds) } // add a block without earning its payout wt.addBlockNoPayout() // Wallet balance should have decreased to pay for the sweep transaction. newCoinBalance, _, _ := wt.wallet.ConfirmedBalance() if newCoinBalance.Cmp(oldCoinBalance) <= 0 { t.Error("expecting balance to go up; instead, decreased by", oldCoinBalance.Sub(newCoinBalance)) } } // TestGenerateKeys tests that the generateKeys function correctly generates a // key for every index specified. func TestGenerateKeys(t *testing.T) { for i, k := range generateKeys(modules.Seed{}, 1000, 4000) { if len(k.UnlockConditions.PublicKeys) == 0 { t.Errorf("index %v was skipped", i) } } } Sia-1.3.0/modules/wallet/transactionbuilder.go000066400000000000000000000574131313565667000214400ustar00rootroot00000000000000package wallet import ( "bytes" "errors" "sort" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/bolt" ) var ( // errBuilderAlreadySigned indicates that the transaction builder has // already added at least one successful signature to the transaction, // meaning that future calls to Sign will result in an invalid transaction. errBuilderAlreadySigned = errors.New("sign has already been called on this transaction builder, multiple calls can cause issues") // errSpendHeightTooHigh indicates an output's spend height is greater than // the allowed height. errSpendHeightTooHigh = errors.New("output spend height exceeds the allowed height") // errOutputTimelock indicates an output's timelock is still active. errOutputTimelock = errors.New("wallet consensus set height is lower than the output timelock") // errDustOutput indicates an output is not spendable because it is dust. errDustOutput = errors.New("output is too small") ) // transactionBuilder allows transactions to be manually constructed, including // the ability to fund transactions with siacoins and siafunds from the wallet. type transactionBuilder struct { // 'signed' indicates that at least one transaction signature has been // added to the wallet, meaning that future calls to 'Sign' will fail. parents []types.Transaction signed bool transaction types.Transaction newParents []int siacoinInputs []int siafundInputs []int transactionSignatures []int wallet *Wallet } // addSignatures will sign a transaction using a spendable key, with support // for multisig spendable keys. Because of the restricted input, the function // is compatible with both siacoin inputs and siafund inputs. func addSignatures(txn *types.Transaction, cf types.CoveredFields, uc types.UnlockConditions, parentID crypto.Hash, spendKey spendableKey) (newSigIndices []int) { // Try to find the matching secret key for each public key - some public // keys may not have a match. Some secret keys may be used multiple times, // which is why public keys are used as the outer loop. totalSignatures := uint64(0) for i, siaPubKey := range uc.PublicKeys { // Search for the matching secret key to the public key. for j := range spendKey.SecretKeys { pubKey := spendKey.SecretKeys[j].PublicKey() if !bytes.Equal(siaPubKey.Key, pubKey[:]) { continue } // Found the right secret key, add a signature. sig := types.TransactionSignature{ ParentID: parentID, CoveredFields: cf, PublicKeyIndex: uint64(i), } newSigIndices = append(newSigIndices, len(txn.TransactionSignatures)) txn.TransactionSignatures = append(txn.TransactionSignatures, sig) sigIndex := len(txn.TransactionSignatures) - 1 sigHash := txn.SigHash(sigIndex) encodedSig := crypto.SignHash(sigHash, spendKey.SecretKeys[j]) txn.TransactionSignatures[sigIndex].Signature = encodedSig[:] // Count that the signature has been added, and break out of the // secret key loop. totalSignatures++ break } // If there are enough signatures to satisfy the unlock conditions, // break out of the outer loop. if totalSignatures == uc.SignaturesRequired { break } } return newSigIndices } // checkOutput is a helper function used to determine if an output is usable. func (w *Wallet) checkOutput(tx *bolt.Tx, currentHeight types.BlockHeight, id types.SiacoinOutputID, output types.SiacoinOutput) error { // Check that an output is not dust if output.Value.Cmp(dustValue()) < 0 { return errDustOutput } // Check that this output has not recently been spent by the wallet. spendHeight, err := dbGetSpentOutput(tx, types.OutputID(id)) if err == nil { if spendHeight+RespendTimeout > currentHeight { return errSpendHeightTooHigh } } outputUnlockConditions := w.keys[output.UnlockHash].UnlockConditions if currentHeight < outputUnlockConditions.Timelock { return errOutputTimelock } return nil } // FundSiacoins will add a siacoin input of exactly 'amount' to the // transaction. A parent transaction may be needed to achieve an input with the // correct value. The siacoin input will not be signed until 'Sign' is called // on the transaction builder. func (tb *transactionBuilder) FundSiacoins(amount types.Currency) error { tb.wallet.mu.Lock() defer tb.wallet.mu.Unlock() consensusHeight, err := dbGetConsensusHeight(tb.wallet.dbTx) if err != nil { return err } // Collect a value-sorted set of siacoin outputs. var so sortedOutputs err = dbForEachSiacoinOutput(tb.wallet.dbTx, func(scoid types.SiacoinOutputID, sco types.SiacoinOutput) { so.ids = append(so.ids, scoid) so.outputs = append(so.outputs, sco) }) if err != nil { return err } // Add all of the unconfirmed outputs as well. for _, upt := range tb.wallet.unconfirmedProcessedTransactions { for i, sco := range upt.Transaction.SiacoinOutputs { // Determine if the output belongs to the wallet. _, exists := tb.wallet.keys[sco.UnlockHash] if !exists { continue } so.ids = append(so.ids, upt.Transaction.SiacoinOutputID(uint64(i))) so.outputs = append(so.outputs, sco) } } sort.Sort(sort.Reverse(so)) // Create and fund a parent transaction that will add the correct amount of // siacoins to the transaction. var fund types.Currency // potentialFund tracks the balance of the wallet including outputs that // have been spent in other unconfirmed transactions recently. This is to // provide the user with a more useful error message in the event that they // are overspending. var potentialFund types.Currency parentTxn := types.Transaction{} var spentScoids []types.SiacoinOutputID for i := range so.ids { scoid := so.ids[i] sco := so.outputs[i] // Check that the output can be spent. if err := tb.wallet.checkOutput(tb.wallet.dbTx, consensusHeight, scoid, sco); err != nil { if err == errSpendHeightTooHigh { potentialFund = potentialFund.Add(sco.Value) } continue } // Add a siacoin input for this output. sci := types.SiacoinInput{ ParentID: scoid, UnlockConditions: tb.wallet.keys[sco.UnlockHash].UnlockConditions, } parentTxn.SiacoinInputs = append(parentTxn.SiacoinInputs, sci) spentScoids = append(spentScoids, scoid) // Add the output to the total fund fund = fund.Add(sco.Value) potentialFund = potentialFund.Add(sco.Value) if fund.Cmp(amount) >= 0 { break } } if potentialFund.Cmp(amount) >= 0 && fund.Cmp(amount) < 0 { return modules.ErrIncompleteTransactions } if fund.Cmp(amount) < 0 { return modules.ErrLowBalance } // Create and add the output that will be used to fund the standard // transaction. parentUnlockConditions, err := tb.wallet.nextPrimarySeedAddress(tb.wallet.dbTx) if err != nil { return err } exactOutput := types.SiacoinOutput{ Value: amount, UnlockHash: parentUnlockConditions.UnlockHash(), } parentTxn.SiacoinOutputs = append(parentTxn.SiacoinOutputs, exactOutput) // Create a refund output if needed. if !amount.Equals(fund) { refundUnlockConditions, err := tb.wallet.nextPrimarySeedAddress(tb.wallet.dbTx) if err != nil { return err } refundOutput := types.SiacoinOutput{ Value: fund.Sub(amount), UnlockHash: refundUnlockConditions.UnlockHash(), } parentTxn.SiacoinOutputs = append(parentTxn.SiacoinOutputs, refundOutput) } // Sign all of the inputs to the parent trancstion. for _, sci := range parentTxn.SiacoinInputs { addSignatures(&parentTxn, types.FullCoveredFields, sci.UnlockConditions, crypto.Hash(sci.ParentID), tb.wallet.keys[sci.UnlockConditions.UnlockHash()]) } // Mark the parent output as spent. Must be done after the transaction is // finished because otherwise the txid and output id will change. err = dbPutSpentOutput(tb.wallet.dbTx, types.OutputID(parentTxn.SiacoinOutputID(0)), consensusHeight) if err != nil { return err } // Add the exact output. newInput := types.SiacoinInput{ ParentID: parentTxn.SiacoinOutputID(0), UnlockConditions: parentUnlockConditions, } tb.newParents = append(tb.newParents, len(tb.parents)) tb.parents = append(tb.parents, parentTxn) tb.siacoinInputs = append(tb.siacoinInputs, len(tb.transaction.SiacoinInputs)) tb.transaction.SiacoinInputs = append(tb.transaction.SiacoinInputs, newInput) // Mark all outputs that were spent as spent. for _, scoid := range spentScoids { err = dbPutSpentOutput(tb.wallet.dbTx, types.OutputID(scoid), consensusHeight) if err != nil { return err } } return nil } // FundSiafunds will add a siafund input of exactly 'amount' to the // transaction. A parent transaction may be needed to achieve an input with the // correct value. The siafund input will not be signed until 'Sign' is called // on the transaction builder. func (tb *transactionBuilder) FundSiafunds(amount types.Currency) error { tb.wallet.mu.Lock() defer tb.wallet.mu.Unlock() consensusHeight, err := dbGetConsensusHeight(tb.wallet.dbTx) if err != nil { return err } // Create and fund a parent transaction that will add the correct amount of // siafunds to the transaction. var fund types.Currency var potentialFund types.Currency parentTxn := types.Transaction{} var spentSfoids []types.SiafundOutputID c := tb.wallet.dbTx.Bucket(bucketSiafundOutputs).Cursor() for idBytes, sfoBytes := c.First(); idBytes != nil; idBytes, sfoBytes = c.Next() { var sfoid types.SiafundOutputID var sfo types.SiafundOutput if err := encoding.Unmarshal(idBytes, &sfoid); err != nil { return err } else if err := encoding.Unmarshal(sfoBytes, &sfo); err != nil { return err } // Check that this output has not recently been spent by the wallet. spendHeight, err := dbGetSpentOutput(tb.wallet.dbTx, types.OutputID(sfoid)) if err != nil { // mimic map behavior: no entry means zero value spendHeight = 0 } // Prevent an underflow error. allowedHeight := consensusHeight - RespendTimeout if consensusHeight < RespendTimeout { allowedHeight = 0 } if spendHeight > allowedHeight { potentialFund = potentialFund.Add(sfo.Value) continue } outputUnlockConditions := tb.wallet.keys[sfo.UnlockHash].UnlockConditions if consensusHeight < outputUnlockConditions.Timelock { continue } // Add a siafund input for this output. parentClaimUnlockConditions, err := tb.wallet.nextPrimarySeedAddress(tb.wallet.dbTx) if err != nil { return err } sfi := types.SiafundInput{ ParentID: sfoid, UnlockConditions: outputUnlockConditions, ClaimUnlockHash: parentClaimUnlockConditions.UnlockHash(), } parentTxn.SiafundInputs = append(parentTxn.SiafundInputs, sfi) spentSfoids = append(spentSfoids, sfoid) // Add the output to the total fund fund = fund.Add(sfo.Value) potentialFund = potentialFund.Add(sfo.Value) if fund.Cmp(amount) >= 0 { break } } if potentialFund.Cmp(amount) >= 0 && fund.Cmp(amount) < 0 { return modules.ErrIncompleteTransactions } if fund.Cmp(amount) < 0 { return modules.ErrLowBalance } // Create and add the output that will be used to fund the standard // transaction. parentUnlockConditions, err := tb.wallet.nextPrimarySeedAddress(tb.wallet.dbTx) if err != nil { return err } exactOutput := types.SiafundOutput{ Value: amount, UnlockHash: parentUnlockConditions.UnlockHash(), } parentTxn.SiafundOutputs = append(parentTxn.SiafundOutputs, exactOutput) // Create a refund output if needed. if !amount.Equals(fund) { refundUnlockConditions, err := tb.wallet.nextPrimarySeedAddress(tb.wallet.dbTx) if err != nil { return err } refundOutput := types.SiafundOutput{ Value: fund.Sub(amount), UnlockHash: refundUnlockConditions.UnlockHash(), } parentTxn.SiafundOutputs = append(parentTxn.SiafundOutputs, refundOutput) } // Sign all of the inputs to the parent trancstion. for _, sfi := range parentTxn.SiafundInputs { addSignatures(&parentTxn, types.FullCoveredFields, sfi.UnlockConditions, crypto.Hash(sfi.ParentID), tb.wallet.keys[sfi.UnlockConditions.UnlockHash()]) } // Add the exact output. claimUnlockConditions, err := tb.wallet.nextPrimarySeedAddress(tb.wallet.dbTx) if err != nil { return err } newInput := types.SiafundInput{ ParentID: parentTxn.SiafundOutputID(0), UnlockConditions: parentUnlockConditions, ClaimUnlockHash: claimUnlockConditions.UnlockHash(), } tb.newParents = append(tb.newParents, len(tb.parents)) tb.parents = append(tb.parents, parentTxn) tb.siafundInputs = append(tb.siafundInputs, len(tb.transaction.SiafundInputs)) tb.transaction.SiafundInputs = append(tb.transaction.SiafundInputs, newInput) // Mark all outputs that were spent as spent. for _, sfoid := range spentSfoids { err = dbPutSpentOutput(tb.wallet.dbTx, types.OutputID(sfoid), consensusHeight) if err != nil { return err } } return nil } // AddParents adds a set of parents to the transaction. func (tb *transactionBuilder) AddParents(newParents []types.Transaction) { tb.parents = append(tb.parents, newParents...) } // AddMinerFee adds a miner fee to the transaction, returning the index of the // miner fee within the transaction. func (tb *transactionBuilder) AddMinerFee(fee types.Currency) uint64 { tb.transaction.MinerFees = append(tb.transaction.MinerFees, fee) return uint64(len(tb.transaction.MinerFees) - 1) } // AddSiacoinInput adds a siacoin input to the transaction, returning the index // of the siacoin input within the transaction. When 'Sign' gets called, this // input will be left unsigned. func (tb *transactionBuilder) AddSiacoinInput(input types.SiacoinInput) uint64 { tb.transaction.SiacoinInputs = append(tb.transaction.SiacoinInputs, input) return uint64(len(tb.transaction.SiacoinInputs) - 1) } // AddSiacoinOutput adds a siacoin output to the transaction, returning the // index of the siacoin output within the transaction. func (tb *transactionBuilder) AddSiacoinOutput(output types.SiacoinOutput) uint64 { tb.transaction.SiacoinOutputs = append(tb.transaction.SiacoinOutputs, output) return uint64(len(tb.transaction.SiacoinOutputs) - 1) } // AddFileContract adds a file contract to the transaction, returning the index // of the file contract within the transaction. func (tb *transactionBuilder) AddFileContract(fc types.FileContract) uint64 { tb.transaction.FileContracts = append(tb.transaction.FileContracts, fc) return uint64(len(tb.transaction.FileContracts) - 1) } // AddFileContractRevision adds a file contract revision to the transaction, // returning the index of the file contract revision within the transaction. // When 'Sign' gets called, this revision will be left unsigned. func (tb *transactionBuilder) AddFileContractRevision(fcr types.FileContractRevision) uint64 { tb.transaction.FileContractRevisions = append(tb.transaction.FileContractRevisions, fcr) return uint64(len(tb.transaction.FileContractRevisions) - 1) } // AddStorageProof adds a storage proof to the transaction, returning the index // of the storage proof within the transaction. func (tb *transactionBuilder) AddStorageProof(sp types.StorageProof) uint64 { tb.transaction.StorageProofs = append(tb.transaction.StorageProofs, sp) return uint64(len(tb.transaction.StorageProofs) - 1) } // AddSiafundInput adds a siafund input to the transaction, returning the index // of the siafund input within the transaction. When 'Sign' is called, this // input will be left unsigned. func (tb *transactionBuilder) AddSiafundInput(input types.SiafundInput) uint64 { tb.transaction.SiafundInputs = append(tb.transaction.SiafundInputs, input) return uint64(len(tb.transaction.SiafundInputs) - 1) } // AddSiafundOutput adds a siafund output to the transaction, returning the // index of the siafund output within the transaction. func (tb *transactionBuilder) AddSiafundOutput(output types.SiafundOutput) uint64 { tb.transaction.SiafundOutputs = append(tb.transaction.SiafundOutputs, output) return uint64(len(tb.transaction.SiafundOutputs) - 1) } // AddArbitraryData adds arbitrary data to the transaction, returning the index // of the data within the transaction. func (tb *transactionBuilder) AddArbitraryData(arb []byte) uint64 { tb.transaction.ArbitraryData = append(tb.transaction.ArbitraryData, arb) return uint64(len(tb.transaction.ArbitraryData) - 1) } // AddTransactionSignature adds a transaction signature to the transaction, // returning the index of the signature within the transaction. The signature // should already be valid, and shouldn't sign any of the inputs that were // added by calling 'FundSiacoins' or 'FundSiafunds'. func (tb *transactionBuilder) AddTransactionSignature(sig types.TransactionSignature) uint64 { tb.transaction.TransactionSignatures = append(tb.transaction.TransactionSignatures, sig) return uint64(len(tb.transaction.TransactionSignatures) - 1) } // Drop discards all of the outputs in a transaction, returning them to the // pool so that other transactions may use them. 'Drop' should only be called // if a transaction is both unsigned and will not be used any further. func (tb *transactionBuilder) Drop() { tb.wallet.mu.Lock() defer tb.wallet.mu.Unlock() // Iterate through all parents and the transaction itself and restore all // outputs to the list of available outputs. txns := append(tb.parents, tb.transaction) for _, txn := range txns { for _, sci := range txn.SiacoinInputs { dbDeleteSpentOutput(tb.wallet.dbTx, types.OutputID(sci.ParentID)) } } tb.parents = nil tb.signed = false tb.transaction = types.Transaction{} tb.newParents = nil tb.siacoinInputs = nil tb.siafundInputs = nil tb.transactionSignatures = nil } // Sign will sign any inputs added by 'FundSiacoins' or 'FundSiafunds' and // return a transaction set that contains all parents prepended to the // transaction. If more fields need to be added, a new transaction builder will // need to be created. // // If the whole transaction flag is set to true, then the whole transaction // flag will be set in the covered fields object. If the whole transaction flag // is set to false, then the covered fields object will cover all fields that // have already been added to the transaction, but will also leave room for // more fields to be added. // // Sign should not be called more than once. If, for some reason, there is an // error while calling Sign, the builder should be dropped. func (tb *transactionBuilder) Sign(wholeTransaction bool) ([]types.Transaction, error) { if tb.signed { return nil, errBuilderAlreadySigned } // Create the coveredfields struct. var coveredFields types.CoveredFields if wholeTransaction { coveredFields = types.CoveredFields{WholeTransaction: true} } else { for i := range tb.transaction.MinerFees { coveredFields.MinerFees = append(coveredFields.MinerFees, uint64(i)) } for i := range tb.transaction.SiacoinInputs { coveredFields.SiacoinInputs = append(coveredFields.SiacoinInputs, uint64(i)) } for i := range tb.transaction.SiacoinOutputs { coveredFields.SiacoinOutputs = append(coveredFields.SiacoinOutputs, uint64(i)) } for i := range tb.transaction.FileContracts { coveredFields.FileContracts = append(coveredFields.FileContracts, uint64(i)) } for i := range tb.transaction.FileContractRevisions { coveredFields.FileContractRevisions = append(coveredFields.FileContractRevisions, uint64(i)) } for i := range tb.transaction.StorageProofs { coveredFields.StorageProofs = append(coveredFields.StorageProofs, uint64(i)) } for i := range tb.transaction.SiafundInputs { coveredFields.SiafundInputs = append(coveredFields.SiafundInputs, uint64(i)) } for i := range tb.transaction.SiafundOutputs { coveredFields.SiafundOutputs = append(coveredFields.SiafundOutputs, uint64(i)) } for i := range tb.transaction.ArbitraryData { coveredFields.ArbitraryData = append(coveredFields.ArbitraryData, uint64(i)) } } // TransactionSignatures don't get covered by the 'WholeTransaction' flag, // and must be covered manually. for i := range tb.transaction.TransactionSignatures { coveredFields.TransactionSignatures = append(coveredFields.TransactionSignatures, uint64(i)) } // For each siacoin input in the transaction that we added, provide a // signature. tb.wallet.mu.RLock() defer tb.wallet.mu.RUnlock() for _, inputIndex := range tb.siacoinInputs { input := tb.transaction.SiacoinInputs[inputIndex] key, ok := tb.wallet.keys[input.UnlockConditions.UnlockHash()] if !ok { return nil, errors.New("transaction builder added an input that it cannot sign") } newSigIndices := addSignatures(&tb.transaction, coveredFields, input.UnlockConditions, crypto.Hash(input.ParentID), key) tb.transactionSignatures = append(tb.transactionSignatures, newSigIndices...) tb.signed = true // Signed is set to true after one successful signature to indicate that future signings can cause issues. } for _, inputIndex := range tb.siafundInputs { input := tb.transaction.SiafundInputs[inputIndex] key, ok := tb.wallet.keys[input.UnlockConditions.UnlockHash()] if !ok { return nil, errors.New("transaction builder added an input that it cannot sign") } newSigIndices := addSignatures(&tb.transaction, coveredFields, input.UnlockConditions, crypto.Hash(input.ParentID), key) tb.transactionSignatures = append(tb.transactionSignatures, newSigIndices...) tb.signed = true // Signed is set to true after one successful signature to indicate that future signings can cause issues. } // Get the transaction set and delete the transaction from the registry. txnSet := append(tb.parents, tb.transaction) return txnSet, nil } // ViewTransaction returns a transaction-in-progress along with all of its // parents, specified by id. An error is returned if the id is invalid. Note // that ids become invalid for a transaction after 'SignTransaction' has been // called because the transaction gets deleted. func (tb *transactionBuilder) View() (types.Transaction, []types.Transaction) { return tb.transaction, tb.parents } // ViewAdded returns all of the siacoin inputs, siafund inputs, and parent // transactions that have been automatically added by the builder. func (tb *transactionBuilder) ViewAdded() (newParents, siacoinInputs, siafundInputs, transactionSignatures []int) { return tb.newParents, tb.siacoinInputs, tb.siafundInputs, tb.transactionSignatures } // registerTransaction takes a transaction and its parents and returns a // wallet.TransactionBuilder which can be used to expand the transaction. The // most typical call is 'RegisterTransaction(types.Transaction{}, nil)', which // registers a new transaction without parents. func (w *Wallet) registerTransaction(t types.Transaction, parents []types.Transaction) *transactionBuilder { // Create a deep copy of the transaction and parents by encoding them. A // deep copy ensures that there are no pointer or slice related errors - // the builder will be working directly on the transaction, and the // transaction may be in use elsewhere (in this case, the host is using the // transaction. pBytes := encoding.Marshal(parents) var pCopy []types.Transaction err := encoding.Unmarshal(pBytes, &pCopy) if err != nil { panic(err) } tBytes := encoding.Marshal(t) var tCopy types.Transaction err = encoding.Unmarshal(tBytes, &tCopy) if err != nil { panic(err) } return &transactionBuilder{ parents: pCopy, transaction: tCopy, wallet: w, } } // RegisterTransaction takes a transaction and its parents and returns a // modules.TransactionBuilder which can be used to expand the transaction. The // most typical call is 'RegisterTransaction(types.Transaction{}, nil)', which // registers a new transaction without parents. func (w *Wallet) RegisterTransaction(t types.Transaction, parents []types.Transaction) modules.TransactionBuilder { w.mu.Lock() defer w.mu.Unlock() return w.registerTransaction(t, parents) } // StartTransaction is a convenience function that calls // RegisterTransaction(types.Transaction{}, nil). func (w *Wallet) StartTransaction() modules.TransactionBuilder { return w.RegisterTransaction(types.Transaction{}, nil) } Sia-1.3.0/modules/wallet/transactionbuilder_test.go000066400000000000000000000320651313565667000224730ustar00rootroot00000000000000package wallet import ( "sync" "testing" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) // addBlockNoPayout adds a block to the wallet tester that does not have any // payouts. func (wt *walletTester) addBlockNoPayout() error { block, target, err := wt.miner.BlockForWork() if err != nil { return err } // Clear the miner payout so that the wallet is not getting additional // outputs from these blocks. for i := range block.MinerPayouts { block.MinerPayouts[i].UnlockHash = types.UnlockHash{} } // Solve and submit the block. solvedBlock, _ := wt.miner.SolveBlock(block, target) err = wt.cs.AcceptBlock(solvedBlock) if err != nil { return err } return nil } // TestViewAdded checks that 'ViewAdded' returns sane-seeming values when // indicating which elements have been added automatically to a transaction // set. func TestViewAdded(t *testing.T) { if testing.Short() { t.SkipNow() } wt, err := createWalletTester(t.Name()) if err != nil { t.Fatal(err) } defer wt.closeWt() // Mine an extra block to get more outputs - the wallet is going to be // loading two transactions at the same time. _, err = wt.miner.AddBlock() if err != nil { t.Fatal(err) } // Create a transaction, add money to it, spend the money in a miner fee // but do not sign the transaction. The format of this test mimics the way // that the host-renter protocol behaves when building a file contract // transaction. b := wt.wallet.StartTransaction() txnFund := types.NewCurrency64(100e9) err = b.FundSiacoins(txnFund) if err != nil { t.Fatal(err) } _ = b.AddMinerFee(txnFund) _ = b.AddSiacoinOutput(types.SiacoinOutput{Value: txnFund}) unfinishedTxn, unfinishedParents := b.View() // Create a second builder that extends the first, unsigned transaction. Do // not sign the transaction, but do give the extensions to the original // builder. b2 := wt.wallet.RegisterTransaction(unfinishedTxn, unfinishedParents) err = b2.FundSiacoins(txnFund) if err != nil { t.Fatal(err) } unfinishedTxn2, unfinishedParents2 := b2.View() newParentIndices, newInputIndices, _, _ := b2.ViewAdded() // Add the new elements from b2 to b and sign the transaction, fetching the // signature for b. for _, parentIndex := range newParentIndices { b.AddParents([]types.Transaction{unfinishedParents2[parentIndex]}) } for _, inputIndex := range newInputIndices { b.AddSiacoinInput(unfinishedTxn2.SiacoinInputs[inputIndex]) } // Signing with WholeTransaction=true makes the transaction more brittle to // construction mistakes, meaning that an error is more likely to turn up. set1, err := b.Sign(true) if err != nil { t.Fatal(err) } if set1[len(set1)-1].ID() == unfinishedTxn.ID() { t.Error("seems like there's memory sharing happening between txn calls") } // Set1 should be missing some signatures. err = wt.tpool.AcceptTransactionSet(set1) if err == nil { t.Fatal(err) } unfinishedTxn3, _ := b.View() // Only the new signatures are needed because the previous call to 'View' // included everything else. _, _, _, newTxnSignaturesIndices := b.ViewAdded() // Add the new signatures to b2, and then sign b2's inputs. The resulting // set from b2 should be valid. for _, sigIndex := range newTxnSignaturesIndices { b2.AddTransactionSignature(unfinishedTxn3.TransactionSignatures[sigIndex]) } set2, err := b2.Sign(true) err = wt.tpool.AcceptTransactionSet(set2) if err != nil { t.Fatal(err) } finishedTxn, _ := b2.View() _, _, _, newTxnSignaturesIndices3 := b2.ViewAdded() // Add the new signatures from b2 to the b1 transaction, which should // complete the transaction and create a transaction set in 'b' that is // identical to the transaction set that is in b2. for _, sigIndex := range newTxnSignaturesIndices3 { b.AddTransactionSignature(finishedTxn.TransactionSignatures[sigIndex]) } set3Txn, set3Parents := b.View() err = wt.tpool.AcceptTransactionSet(append(set3Parents, set3Txn)) if err != modules.ErrDuplicateTransactionSet { t.Fatal(err) } } // TestDoubleSignError checks that an error is returned if there is a problem // when trying to call 'Sign' on a transaction twice. func TestDoubleSignError(t *testing.T) { if testing.Short() { t.SkipNow() } wt, err := createWalletTester(t.Name()) if err != nil { t.Fatal(err) } defer wt.closeWt() // Create a transaction, add money to it, and then call sign twice. b := wt.wallet.StartTransaction() txnFund := types.NewCurrency64(100e9) err = b.FundSiacoins(txnFund) if err != nil { t.Fatal(err) } _ = b.AddMinerFee(txnFund) txnSet, err := b.Sign(true) if err != nil { t.Fatal(err) } txnSet2, err := b.Sign(true) if err != errBuilderAlreadySigned { t.Error("the wrong error is being returned after a double call to sign") } if err != nil && txnSet2 != nil { t.Error("errored call to sign did not return a nil txn set") } err = wt.tpool.AcceptTransactionSet(txnSet) if err != nil { t.Fatal(err) } } // TestConcurrentBuilders checks that multiple transaction builders can safely // be opened at the same time, and that they will make valid transactions when // building concurrently. func TestConcurrentBuilders(t *testing.T) { if testing.Short() { t.SkipNow() } wt, err := createWalletTester(t.Name()) if err != nil { t.Fatal(err) } defer wt.closeWt() // Mine a few more blocks so that the wallet has lots of outputs to pick // from. for i := 0; i < 5; i++ { _, err := wt.miner.AddBlock() if err != nil { t.Fatal(err) } } // Get a baseline balance for the wallet. startingSCConfirmed, _, _ := wt.wallet.ConfirmedBalance() startingOutgoing, startingIncoming := wt.wallet.UnconfirmedBalance() if !startingOutgoing.IsZero() { t.Fatal(startingOutgoing) } if !startingIncoming.IsZero() { t.Fatal(startingIncoming) } // Create two builders at the same time, then add money to each. builder1 := wt.wallet.StartTransaction() builder2 := wt.wallet.StartTransaction() // Fund each builder with a siacoin output that is smaller than all of the // outputs that the wallet should currently have. funding := types.NewCurrency64(10e3).Mul(types.SiacoinPrecision) err = builder1.FundSiacoins(funding) if err != nil { t.Fatal(err) } err = builder2.FundSiacoins(funding) if err != nil { t.Fatal(err) } // Get a second reading on the wallet's balance. fundedSCConfirmed, _, _ := wt.wallet.ConfirmedBalance() if !startingSCConfirmed.Equals(fundedSCConfirmed) { t.Fatal("confirmed siacoin balance changed when no blocks have been mined", startingSCConfirmed, fundedSCConfirmed) } // Spend the transaction funds on miner fees and the void output. builder1.AddMinerFee(types.NewCurrency64(25).Mul(types.SiacoinPrecision)) builder2.AddMinerFee(types.NewCurrency64(25).Mul(types.SiacoinPrecision)) // Send the money to the void. output := types.SiacoinOutput{Value: types.NewCurrency64(9975).Mul(types.SiacoinPrecision)} builder1.AddSiacoinOutput(output) builder2.AddSiacoinOutput(output) // Sign the transactions and verify that both are valid. tset1, err := builder1.Sign(true) if err != nil { t.Fatal(err) } tset2, err := builder2.Sign(true) if err != nil { t.Fatal(err) } err = wt.tpool.AcceptTransactionSet(tset1) if err != nil { t.Fatal(err) } err = wt.tpool.AcceptTransactionSet(tset2) if err != nil { t.Fatal(err) } // Mine a block to get the transaction sets into the blockchain. _, err = wt.miner.AddBlock() if err != nil { t.Fatal(err) } } // TestConcurrentBuildersSingleOutput probes the behavior when multiple // builders are created at the same time, but there is only a single wallet // output that they end up needing to share. func TestConcurrentBuildersSingleOutput(t *testing.T) { if testing.Short() { t.SkipNow() } wt, err := createWalletTester(t.Name()) if err != nil { t.Fatal(err) } defer wt.closeWt() // Mine MaturityDelay blocks on the wallet using blocks that don't give // miner payouts to the wallet, so that all outputs can be condensed into a // single confirmed output. Currently the wallet will be getting a new // output per block because it has mined some blocks that haven't had their // outputs matured. for i := types.BlockHeight(0); i < types.MaturityDelay+1; i++ { err = wt.addBlockNoPayout() if err != nil { t.Fatal(err) } } // Send all coins to a single confirmed output for the wallet. unlockConditions, err := wt.wallet.NextAddress() scBal, _, _ := wt.wallet.ConfirmedBalance() // Use a custom builder so that there is no transaction fee. builder := wt.wallet.StartTransaction() err = builder.FundSiacoins(scBal) if err != nil { t.Fatal(err) } output := types.SiacoinOutput{ Value: scBal, UnlockHash: unlockConditions.UnlockHash(), } builder.AddSiacoinOutput(output) tSet, err := builder.Sign(true) if err != nil { t.Fatal(err) } err = wt.tpool.AcceptTransactionSet(tSet) if err != nil { t.Fatal(err) } // Get the transaction into the blockchain without giving a miner payout to // the wallet. err = wt.addBlockNoPayout() if err != nil { t.Fatal(err) } // Get a baseline balance for the wallet. startingSCConfirmed, _, _ := wt.wallet.ConfirmedBalance() startingOutgoing, startingIncoming := wt.wallet.UnconfirmedBalance() if !startingOutgoing.IsZero() { t.Fatal(startingOutgoing) } if !startingIncoming.IsZero() { t.Fatal(startingIncoming) } // Create two builders at the same time, then add money to each. builder1 := wt.wallet.StartTransaction() builder2 := wt.wallet.StartTransaction() // Fund each builder with a siacoin output. funding := types.NewCurrency64(10e3).Mul(types.SiacoinPrecision) err = builder1.FundSiacoins(funding) if err != nil { t.Fatal(err) } // This add should fail, blocking the builder from completion. err = builder2.FundSiacoins(funding) if err != modules.ErrIncompleteTransactions { t.Fatal(err) } // Get a second reading on the wallet's balance. fundedSCConfirmed, _, _ := wt.wallet.ConfirmedBalance() if !startingSCConfirmed.Equals(fundedSCConfirmed) { t.Fatal("confirmed siacoin balance changed when no blocks have been mined", startingSCConfirmed, fundedSCConfirmed) } // Spend the transaction funds on miner fees and the void output. builder1.AddMinerFee(types.NewCurrency64(25).Mul(types.SiacoinPrecision)) // Send the money to the void. output = types.SiacoinOutput{Value: types.NewCurrency64(9975).Mul(types.SiacoinPrecision)} builder1.AddSiacoinOutput(output) // Sign the transaction and submit it. tset1, err := builder1.Sign(true) if err != nil { t.Fatal(err) } err = wt.tpool.AcceptTransactionSet(tset1) if err != nil { t.Fatal(err) } // Mine a block to get the transaction sets into the blockchain. _, err = wt.miner.AddBlock() if err != nil { t.Fatal(err) } } // TestParallelBuilders checks that multiple transaction builders can safely be // opened at the same time, and that they will make valid transactions when // building concurrently, using multiple gothreads to manage the builders. func TestParallelBuilders(t *testing.T) { if testing.Short() { t.SkipNow() } wt, err := createWalletTester(t.Name()) if err != nil { t.Fatal(err) } defer wt.closeWt() // Mine a few more blocks so that the wallet has lots of outputs to pick // from. outputsDesired := 10 for i := 0; i < outputsDesired; i++ { _, err := wt.miner.AddBlock() if err != nil { t.Fatal(err) } } // Add MatruityDelay blocks with no payout to make tracking the balance // easier. for i := types.BlockHeight(0); i < types.MaturityDelay+1; i++ { err = wt.addBlockNoPayout() if err != nil { t.Fatal(err) } } // Get a baseline balance for the wallet. startingSCConfirmed, _, _ := wt.wallet.ConfirmedBalance() startingOutgoing, startingIncoming := wt.wallet.UnconfirmedBalance() if !startingOutgoing.IsZero() { t.Fatal(startingOutgoing) } if !startingIncoming.IsZero() { t.Fatal(startingIncoming) } // Create several builders in parallel. var wg sync.WaitGroup funding := types.NewCurrency64(10e3).Mul(types.SiacoinPrecision) for i := 0; i < outputsDesired; i++ { wg.Add(1) go func() { // Create the builder and fund the transaction. builder := wt.wallet.StartTransaction() err := builder.FundSiacoins(funding) if err != nil { t.Fatal(err) } // Spend the transaction funds on miner fees and the void output. builder.AddMinerFee(types.NewCurrency64(25).Mul(types.SiacoinPrecision)) output := types.SiacoinOutput{Value: types.NewCurrency64(9975).Mul(types.SiacoinPrecision)} builder.AddSiacoinOutput(output) // Sign the transactions and verify that both are valid. tset, err := builder.Sign(true) if err != nil { t.Fatal(err) } err = wt.tpool.AcceptTransactionSet(tset) if err != nil { t.Fatal(err) } wg.Done() }() } wg.Wait() // Mine a block to get the transaction sets into the blockchain. err = wt.addBlockNoPayout() if err != nil { t.Fatal(err) } // Check the final balance. endingSCConfirmed, _, _ := wt.wallet.ConfirmedBalance() expected := startingSCConfirmed.Sub(funding.Mul(types.NewCurrency64(uint64(outputsDesired)))) if !expected.Equals(endingSCConfirmed) { t.Fatal("did not get the expected ending balance", expected, endingSCConfirmed, startingSCConfirmed) } } Sia-1.3.0/modules/wallet/transactions.go000066400000000000000000000063121313565667000202440ustar00rootroot00000000000000package wallet import ( "errors" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) var ( errOutOfBounds = errors.New("requesting transactions at unknown confirmation heights") ) // AddressTransactions returns all of the wallet transactions associated with a // single unlock hash. func (w *Wallet) AddressTransactions(uh types.UnlockHash) (pts []modules.ProcessedTransaction) { // ensure durability of reported transactions w.mu.Lock() defer w.mu.Unlock() w.syncDB() it := dbProcessedTransactionsIterator(w.dbTx) for it.next() { pt := it.value() relevant := false for _, input := range pt.Inputs { relevant = relevant || input.RelatedAddress == uh } for _, output := range pt.Outputs { relevant = relevant || output.RelatedAddress == uh } if relevant { pts = append(pts, pt) } } return pts } // AddressUnconfirmedHistory returns all of the unconfirmed wallet transactions // related to a specific address. func (w *Wallet) AddressUnconfirmedTransactions(uh types.UnlockHash) (pts []modules.ProcessedTransaction) { // ensure durability of reported transactions w.mu.Lock() defer w.mu.Unlock() w.syncDB() // Scan the full list of unconfirmed transactions to see if there are any // related transactions. for _, pt := range w.unconfirmedProcessedTransactions { relevant := false for _, input := range pt.Inputs { if input.RelatedAddress == uh { relevant = true break } } for _, output := range pt.Outputs { if output.RelatedAddress == uh { relevant = true break } } if relevant { pts = append(pts, pt) } } return pts } // Transaction returns the transaction with the given id. 'False' is returned // if the transaction does not exist. func (w *Wallet) Transaction(txid types.TransactionID) (pt modules.ProcessedTransaction, found bool) { // ensure durability of reported transaction w.mu.Lock() defer w.mu.Unlock() w.syncDB() it := dbProcessedTransactionsIterator(w.dbTx) for it.next() { pt := it.value() if pt.TransactionID == txid { return pt, true } } return modules.ProcessedTransaction{}, false } // Transactions returns all transactions relevant to the wallet that were // confirmed in the range [startHeight, endHeight]. func (w *Wallet) Transactions(startHeight, endHeight types.BlockHeight) (pts []modules.ProcessedTransaction, err error) { // ensure durability of reported transactions w.mu.Lock() defer w.mu.Unlock() w.syncDB() height, err := dbGetConsensusHeight(w.dbTx) if err != nil { return } else if startHeight > height || startHeight > endHeight { return nil, errOutOfBounds } it := dbProcessedTransactionsIterator(w.dbTx) for it.next() { pt := it.value() if pt.ConfirmationHeight < startHeight { continue } else if pt.ConfirmationHeight > endHeight { // transactions are stored in chronological order, so we can // break as soon as we are above endHeight break } else { pts = append(pts, pt) } } return } // UnconfirmedTransactions returns the set of unconfirmed transactions that are // relevant to the wallet. func (w *Wallet) UnconfirmedTransactions() []modules.ProcessedTransaction { w.mu.RLock() defer w.mu.RUnlock() return w.unconfirmedProcessedTransactions } Sia-1.3.0/modules/wallet/transactions_test.go000066400000000000000000000162651313565667000213130ustar00rootroot00000000000000package wallet import ( "testing" "github.com/NebulousLabs/Sia/types" ) // TestIntegrationTransactions checks that the transaction history is being // correctly recorded and extended. func TestIntegrationTransactions(t *testing.T) { if testing.Short() { t.SkipNow() } wt, err := createWalletTester(t.Name()) if err != nil { t.Fatal(err) } defer wt.closeWt() // Creating the wallet tester results in blocks being mined until the miner // has money, which means types.MaturityDelay+1 blocks are created, and // each block is going to have a transaction (the miner payout) going to // the wallet. txns, err := wt.wallet.Transactions(0, 100) if err != nil { t.Fatal(err) } if len(txns) != int(types.MaturityDelay+1) { t.Error("unexpected transaction history length") } sentValue := types.NewCurrency64(5000) _, err = wt.wallet.SendSiacoins(sentValue, types.UnlockHash{}) if err != nil { t.Fatal(err) } // No more confirmed transactions have been added. txns, err = wt.wallet.Transactions(0, 100) if err != nil { t.Fatal(err) } if len(txns) != int(types.MaturityDelay+1) { t.Error("unexpected transaction history length") } // Two transactions added to unconfirmed pool - 1 to fund the exact output, // and 1 to hold the exact output. if len(wt.wallet.UnconfirmedTransactions()) != 2 { t.Error("was expecting 4 unconfirmed transactions") } b, _ := wt.miner.FindBlock() err = wt.cs.AcceptBlock(b) if err != nil { t.Fatal(err) } // A confirmed transaction was added for the miner payout, and the 2 // transactions that were previously unconfirmed. txns, err = wt.wallet.Transactions(0, 100) if err != nil { t.Fatal(err) } if len(txns) != int(types.MaturityDelay+2+2) { t.Error("unexpected transaction history length") } // Try getting a partial history for just the previous block. txns, err = wt.wallet.Transactions(types.MaturityDelay+2, types.MaturityDelay+2) if err != nil { t.Fatal(err) } // The partial should include one transaction for a block, and 2 for the // send that occurred. if len(txns) != 3 { t.Error(len(txns)) } } // TestIntegrationTransaction checks that individually queried transactions // contain the correct values. func TestIntegrationTransaction(t *testing.T) { if testing.Short() { t.SkipNow() } wt, err := createWalletTester(t.Name()) if err != nil { t.Fatal(err) } defer wt.closeWt() _, exists := wt.wallet.Transaction(types.TransactionID{}) if exists { t.Error("able to query a nonexisting transction") } // test sending siacoins sentValue := types.NewCurrency64(5000) sendTxns, err := wt.wallet.SendSiacoins(sentValue, types.UnlockHash{}) if err != nil { t.Fatal(err) } _, err = wt.miner.AddBlock() if err != nil { t.Fatal(err) } // sendTxns[0] is the set-up transaction, sendTxns[1] contains the sentValue output txn, exists := wt.wallet.Transaction(sendTxns[1].ID()) if !exists { t.Fatal("unable to query transaction") } if txn.TransactionID != sendTxns[1].ID() { t.Error("wrong transaction was fetched") } else if len(txn.Inputs) != 1 || len(txn.Outputs) != 2 { t.Error("expected 1 input and 2 outputs, got", len(txn.Inputs), len(txn.Outputs)) } else if !txn.Outputs[0].Value.Equals(sentValue) { t.Errorf("expected first output to equal %v, got %v", sentValue, txn.Outputs[0].Value) } else if exp := txn.Inputs[0].Value.Sub(sentValue); !txn.Outputs[1].Value.Equals(exp) { t.Errorf("expected first output to equal %v, got %v", exp, txn.Outputs[1].Value) } // test sending siafunds err = wt.wallet.LoadSiagKeys(wt.walletMasterKey, []string{"../../types/siag0of1of1.siakey"}) if err != nil { t.Error(err) } sentValue = types.NewCurrency64(12) sendTxns, err = wt.wallet.SendSiafunds(sentValue, types.UnlockHash{}) if err != nil { t.Fatal(err) } _, err = wt.miner.AddBlock() if err != nil { t.Fatal(err) } txn, exists = wt.wallet.Transaction(sendTxns[1].ID()) if !exists { t.Fatal("unable to query transaction") } if len(txn.Inputs) != 1 || len(txn.Outputs) != 3 { t.Error("expected 1 input and 3 outputs, got", len(txn.Inputs), len(txn.Outputs)) } else if !txn.Outputs[1].Value.Equals(sentValue) { t.Errorf("expected first output to equal %v, got %v", sentValue, txn.Outputs[1].Value) } else if exp := txn.Inputs[0].Value.Sub(sentValue); !txn.Outputs[2].Value.Equals(exp) { t.Errorf("expected first output to equal %v, got %v", exp, txn.Outputs[2].Value) } } // TestIntegrationAddressTransactions checks grabbing the history for a single // address. func TestIntegrationAddressTransactions(t *testing.T) { if testing.Short() { t.SkipNow() } wt, err := createWalletTester(t.Name()) if err != nil { t.Fatal(err) } defer wt.closeWt() // Grab an address and send it money. uc, err := wt.wallet.NextAddress() addr := uc.UnlockHash() if err != nil { t.Fatal(err) } _, err = wt.wallet.SendSiacoins(types.NewCurrency64(5005), addr) if err != nil { t.Fatal(err) } // Check the confirmed balance of the address. addrHist := wt.wallet.AddressTransactions(addr) if len(addrHist) != 0 { t.Error("address should be empty - no confirmed transactions") } if len(wt.wallet.AddressUnconfirmedTransactions(addr)) == 0 { t.Error("addresses unconfirmed transactions should not be empty") } b, _ := wt.miner.FindBlock() err = wt.cs.AcceptBlock(b) if err != nil { t.Fatal(err) } addrHist = wt.wallet.AddressTransactions(addr) if len(addrHist) == 0 { t.Error("address history should have some transactions") } if len(wt.wallet.AddressUnconfirmedTransactions(addr)) != 0 { t.Error("addresses unconfirmed transactions should be empty") } } // TestTransactionInputOutputIDs verifies that ProcessedTransaction's inputs // and outputs have a valid ID field. func TestTransactionInputOutputIDs(t *testing.T) { if testing.Short() { t.SkipNow() } wt, err := createWalletTester(t.Name()) if err != nil { t.Fatal(err) } defer wt.closeWt() // mine a few blocks to create miner payouts for i := 0; i < 5; i++ { _, err = wt.miner.AddBlock() if err != nil { t.Fatal(err) } } // create some siacoin outputs uc, err := wt.wallet.NextAddress() addr := uc.UnlockHash() if err != nil { t.Fatal(err) } _, err = wt.wallet.SendSiacoins(types.NewCurrency64(5005), addr) if err != nil { t.Fatal(err) } _, err = wt.miner.AddBlock() if err != nil { t.Fatal(err) } // verify the miner payouts and siacoin outputs/inputs have correct IDs txns, err := wt.wallet.Transactions(0, 1000) if err != nil { t.Fatal(err) } outputIDs := make(map[types.OutputID]struct{}) for _, txn := range txns { block, _ := wt.cs.BlockAtHeight(txn.ConfirmationHeight) for i, output := range txn.Outputs { outputIDs[output.ID] = struct{}{} if output.FundType == types.SpecifierMinerPayout { if output.ID != types.OutputID(block.MinerPayoutID(uint64(i))) { t.Fatal("miner payout had incorrect output ID") } } if output.FundType == types.SpecifierSiacoinOutput { if output.ID != types.OutputID(txn.Transaction.SiacoinOutputID(uint64(i))) { t.Fatal("siacoin output had incorrect output ID") } } } for _, input := range txn.Inputs { if _, exists := outputIDs[input.ParentID]; !exists { t.Fatal("input has ParentID that points to a nonexistent output:", input.ParentID) } } } } Sia-1.3.0/modules/wallet/unseeded.go000066400000000000000000000200131313565667000173220ustar00rootroot00000000000000package wallet import ( "errors" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/fastrand" ) const ( // The header for all siag files. Do not change. Because siag was created // early in development, compatibility with siag requires manually handling // the headers and version instead of using the persist package. SiagFileHeader = "siag" SiagFileExtension = ".siakey" SiagFileVersion = "1.0" ) var ( ErrInconsistentKeys = errors.New("keyfiles provided that are for different addresses") ErrInsufficientKeys = errors.New("not enough keys provided to spend the siafunds") ErrNoKeyfile = errors.New("no keyfile has been presented") ErrUnknownHeader = errors.New("file contains the wrong header") ErrUnknownVersion = errors.New("file has an unknown version number") errAllDuplicates = errors.New("old wallet has no new seeds") errDuplicateSpendableKey = errors.New("key has already been loaded into the wallet") ) // A siagKeyPair is the struct representation of the bytes that get saved to // disk by siag when a new keyfile is created. type siagKeyPair struct { Header string Version string Index int // should be uint64 - too late now SecretKey crypto.SecretKey UnlockConditions types.UnlockConditions } // savedKey033x is the persist structure that was used to save and load private // keys in versions v0.3.3.x for siad. type savedKey033x struct { SecretKey crypto.SecretKey UnlockConditions types.UnlockConditions Visible bool } // decryptSpendableKeyFile decrypts a spendableKeyFile, returning a // spendableKey. func decryptSpendableKeyFile(masterKey crypto.TwofishKey, uk spendableKeyFile) (sk spendableKey, err error) { // Verify that the decryption key is correct. decryptionKey := uidEncryptionKey(masterKey, uk.UID) err = verifyEncryption(decryptionKey, uk.EncryptionVerification) if err != nil { return } // Decrypt the spendable key and add it to the wallet. encodedKey, err := decryptionKey.DecryptBytes(uk.SpendableKey) if err != nil { return } err = encoding.Unmarshal(encodedKey, &sk) return } // integrateSpendableKey loads a spendableKey into the wallet. func (w *Wallet) integrateSpendableKey(masterKey crypto.TwofishKey, sk spendableKey) { w.keys[sk.UnlockConditions.UnlockHash()] = sk } // loadSpendableKey loads a spendable key into the wallet database. func (w *Wallet) loadSpendableKey(masterKey crypto.TwofishKey, sk spendableKey) error { // Duplication is detected by looking at the set of unlock conditions. If // the wallet is locked, correct deduplication is uncertain. if !w.unlocked { return modules.ErrLockedWallet } // Check for duplicates. _, exists := w.keys[sk.UnlockConditions.UnlockHash()] if exists { return errDuplicateSpendableKey } // TODO: Check that the key is actually spendable. // Create a UID and encryption verification. var skf spendableKeyFile fastrand.Read(skf.UID[:]) encryptionKey := uidEncryptionKey(masterKey, skf.UID) skf.EncryptionVerification = encryptionKey.EncryptBytes(verificationPlaintext) // Encrypt and save the key. skf.SpendableKey = encryptionKey.EncryptBytes(encoding.Marshal(sk)) err := checkMasterKey(w.dbTx, masterKey) if err != nil { return err } var current []spendableKeyFile err = encoding.Unmarshal(w.dbTx.Bucket(bucketWallet).Get(keySpendableKeyFiles), ¤t) if err != nil { return err } return w.dbTx.Bucket(bucketWallet).Put(keySpendableKeyFiles, encoding.Marshal(append(current, skf))) // w.keys[sk.UnlockConditions.UnlockHash()] = sk -> aids with duplicate // detection, but causes db inconsistency. Rescanning is probably the // solution. } // loadSiagKeys loads a set of siag keyfiles into the wallet, so that the // wallet may spend the siafunds. func (w *Wallet) loadSiagKeys(masterKey crypto.TwofishKey, keyfiles []string) error { // Load the keyfiles from disk. if len(keyfiles) < 1 { return ErrNoKeyfile } skps := make([]siagKeyPair, len(keyfiles)) for i, keyfile := range keyfiles { err := encoding.ReadFile(keyfile, &skps[i]) if err != nil { return err } if skps[i].Header != SiagFileHeader { return ErrUnknownHeader } if skps[i].Version != SiagFileVersion { return ErrUnknownVersion } } // Check that all of the loaded files have the same address, and that there // are enough to create the transaction. baseUnlockHash := skps[0].UnlockConditions.UnlockHash() for _, skp := range skps { if skp.UnlockConditions.UnlockHash() != baseUnlockHash { return ErrInconsistentKeys } } if uint64(len(skps)) < skps[0].UnlockConditions.SignaturesRequired { return ErrInsufficientKeys } // Drop all unneeded keys. skps = skps[0:skps[0].UnlockConditions.SignaturesRequired] // Merge the keys into a single spendableKey and save it to the wallet. var sk spendableKey sk.UnlockConditions = skps[0].UnlockConditions for _, skp := range skps { sk.SecretKeys = append(sk.SecretKeys, skp.SecretKey) } err := w.loadSpendableKey(masterKey, sk) if err != nil { return err } w.integrateSpendableKey(masterKey, sk) return nil } // LoadSiagKeys loads a set of siag-generated keys into the wallet. func (w *Wallet) LoadSiagKeys(masterKey crypto.TwofishKey, keyfiles []string) error { if err := w.tg.Add(); err != nil { return err } defer w.tg.Done() // load the keys and reset the consensus change ID and height in preparation for rescan err := func() error { w.mu.Lock() defer w.mu.Unlock() err := w.loadSiagKeys(masterKey, keyfiles) if err != nil { return err } if err = w.dbTx.DeleteBucket(bucketProcessedTransactions); err != nil { return err } if _, err = w.dbTx.CreateBucket(bucketProcessedTransactions); err != nil { return err } w.unconfirmedProcessedTransactions = nil err = dbPutConsensusChangeID(w.dbTx, modules.ConsensusChangeBeginning) if err != nil { return err } return dbPutConsensusHeight(w.dbTx, 0) }() if err != nil { return err } // rescan the blockchain w.cs.Unsubscribe(w) w.tpool.Unsubscribe(w) done := make(chan struct{}) go w.rescanMessage(done) defer close(done) err = w.cs.ConsensusSetSubscribe(w, modules.ConsensusChangeBeginning) if err != nil { return err } w.tpool.TransactionPoolSubscribe(w) return nil } // Load033xWallet loads a v0.3.3.x wallet as an unseeded key, such that the // funds become spendable to the current wallet. func (w *Wallet) Load033xWallet(masterKey crypto.TwofishKey, filepath033x string) error { if err := w.tg.Add(); err != nil { return err } defer w.tg.Done() // load the keys and reset the consensus change ID and height in preparation for rescan err := func() error { w.mu.Lock() defer w.mu.Unlock() var savedKeys []savedKey033x err := encoding.ReadFile(filepath033x, &savedKeys) if err != nil { return err } var seedsLoaded int for _, savedKey := range savedKeys { spendKey := spendableKey{ UnlockConditions: savedKey.UnlockConditions, SecretKeys: []crypto.SecretKey{savedKey.SecretKey}, } err = w.loadSpendableKey(masterKey, spendKey) if err != nil && err != errDuplicateSpendableKey { return err } if err == nil { seedsLoaded++ } w.integrateSpendableKey(masterKey, spendKey) } if seedsLoaded == 0 { return errAllDuplicates } if err = w.dbTx.DeleteBucket(bucketProcessedTransactions); err != nil { return err } if _, err = w.dbTx.CreateBucket(bucketProcessedTransactions); err != nil { return err } w.unconfirmedProcessedTransactions = nil err = dbPutConsensusChangeID(w.dbTx, modules.ConsensusChangeBeginning) if err != nil { return err } return dbPutConsensusHeight(w.dbTx, 0) }() if err != nil { return err } // rescan the blockchain w.cs.Unsubscribe(w) w.tpool.Unsubscribe(w) done := make(chan struct{}) go w.rescanMessage(done) defer close(done) err = w.cs.ConsensusSetSubscribe(w, modules.ConsensusChangeBeginning) if err != nil { return err } w.tpool.TransactionPoolSubscribe(w) return nil } Sia-1.3.0/modules/wallet/unseeded_test.go000066400000000000000000000042601313565667000203670ustar00rootroot00000000000000package wallet import ( "testing" "github.com/NebulousLabs/Sia/types" ) // TestIntegrationLoad1of1Siag loads a 1 of 1 unseeded key generated by siag // and then tries to spend the siafunds contained within. The key is taken from // the testing keys. func TestIntegrationLoad1of1Siag(t *testing.T) { if testing.Short() { t.SkipNow() } wt, err := createWalletTester(t.Name()) if err != nil { t.Fatal(err) } defer wt.closeWt() // Load the key into the wallet. err = wt.wallet.LoadSiagKeys(wt.walletMasterKey, []string{"../../types/siag0of1of1.siakey"}) if err != nil { t.Error(err) } _, siafundBal, _ := wt.wallet.ConfirmedBalance() if !siafundBal.Equals64(2000) { t.Error("expecting a siafund balance of 2000 from the 1of1 key") } // Send some siafunds to the void. _, err = wt.wallet.SendSiafunds(types.NewCurrency64(12), types.UnlockHash{}) if err != nil { t.Fatal(err) } _, err = wt.miner.AddBlock() if err != nil { t.Fatal(err) } _, siafundBal, _ = wt.wallet.ConfirmedBalance() if !siafundBal.Equals64(1988) { t.Error("expecting balance of 1988 after sending siafunds to the void") } } // TestIntegrationLoad2of3Siag loads a 2 of 3 unseeded key generated by siag // and then tries to spend the siafunds contained within. The key is taken from // the testing keys. func TestIntegrationLoad2of3Siag(t *testing.T) { if testing.Short() { t.SkipNow() } wt, err := createWalletTester(t.Name()) if err != nil { t.Fatal(err) } defer wt.closeWt() // Load the key into the wallet. err = wt.wallet.LoadSiagKeys(wt.walletMasterKey, []string{"../../types/siag0of2of3.siakey", "../../types/siag1of2of3.siakey"}) if err != nil { t.Error(err) } _, siafundBal, _ := wt.wallet.ConfirmedBalance() if !siafundBal.Equals64(7000) { t.Error("expecting a siafund balance of 7000 from the 2of3 key") } // Send some siafunds to the void. _, err = wt.wallet.SendSiafunds(types.NewCurrency64(12), types.UnlockHash{}) if err != nil { t.Fatal(err) } _, err = wt.miner.AddBlock() if err != nil { t.Fatal(err) } _, siafundBal, _ = wt.wallet.ConfirmedBalance() if !siafundBal.Equals64(6988) { t.Error("expecting balance of 6988 after sending siafunds to the void") } } Sia-1.3.0/modules/wallet/update.go000066400000000000000000000431341313565667000170210ustar00rootroot00000000000000package wallet import ( "fmt" "math" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/bolt" ) // threadedResetSubscriptions unsubscribes the wallet from the consensus set and transaction pool // and subscribes again. func (w *Wallet) threadedResetSubscriptions() error { if !w.scanLock.TryLock() { return errScanInProgress } defer w.scanLock.Unlock() w.cs.Unsubscribe(w) w.tpool.Unsubscribe(w) err := w.cs.ConsensusSetSubscribe(w, modules.ConsensusChangeBeginning) if err != nil { return err } w.tpool.TransactionPoolSubscribe(w) return nil } // advanceSeedLookahead generates all keys from the current primary seed progress up to index // and adds them to the set of spendable keys. Therefore the new primary seed progress will // be index+1 and new lookahead keys will be generated starting from index+1 // Returns true if a blockchain rescan is required func (w *Wallet) advanceSeedLookahead(index uint64) (bool, error) { progress, err := dbGetPrimarySeedProgress(w.dbTx) if err != nil { return false, err } newProgress := index + 1 // Add spendable keys and remove them from lookahead spendableKeys := generateKeys(w.primarySeed, progress, newProgress-progress) for _, key := range spendableKeys { w.keys[key.UnlockConditions.UnlockHash()] = key delete(w.lookahead, key.UnlockConditions.UnlockHash()) } // Update the primarySeedProgress dbPutPrimarySeedProgress(w.dbTx, newProgress) if err != nil { return false, err } // Regenerate lookahead w.regenerateLookahead(newProgress) // If more than lookaheadRescanThreshold keys were generated // also initialize a rescan just to be safe. if uint64(len(spendableKeys)) > lookaheadRescanThreshold { return true, nil } return false, nil } // isWalletAddress is a helper function that checks if an UnlockHash is // derived from one of the wallet's spendable keys or future keys. func (w *Wallet) isWalletAddress(uh types.UnlockHash) bool { _, exists := w.keys[uh] return exists } // updateLookahead uses a consensus change to update the seed progress if one of the outputs // contains an unlock hash of the lookahead set. Returns true if a blockchain rescan is required func (w *Wallet) updateLookahead(tx *bolt.Tx, cc modules.ConsensusChange) (bool, error) { var largestIndex uint64 for _, diff := range cc.SiacoinOutputDiffs { if index, ok := w.lookahead[diff.SiacoinOutput.UnlockHash]; ok { if index > largestIndex { largestIndex = index } } } for _, diff := range cc.SiafundOutputDiffs { if index, ok := w.lookahead[diff.SiafundOutput.UnlockHash]; ok { if index > largestIndex { largestIndex = index } } } if largestIndex > 0 { return w.advanceSeedLookahead(largestIndex) } return false, nil } // updateConfirmedSet uses a consensus change to update the confirmed set of // outputs as understood by the wallet. func (w *Wallet) updateConfirmedSet(tx *bolt.Tx, cc modules.ConsensusChange) error { for _, diff := range cc.SiacoinOutputDiffs { // Verify that the diff is relevant to the wallet. if !w.isWalletAddress(diff.SiacoinOutput.UnlockHash) { continue } var err error if diff.Direction == modules.DiffApply { w.log.Println("Wallet has gained a spendable siacoin output:", diff.ID, "::", diff.SiacoinOutput.Value.HumanString()) err = dbPutSiacoinOutput(tx, diff.ID, diff.SiacoinOutput) } else { w.log.Println("Wallet has lost a spendable siacoin output:", diff.ID, "::", diff.SiacoinOutput.Value.HumanString()) err = dbDeleteSiacoinOutput(tx, diff.ID) } if err != nil { w.log.Severe("Could not update siacoin output:", err) } } for _, diff := range cc.SiafundOutputDiffs { // Verify that the diff is relevant to the wallet. if !w.isWalletAddress(diff.SiafundOutput.UnlockHash) { continue } var err error if diff.Direction == modules.DiffApply { w.log.Println("Wallet has gained a spendable siafund output:", diff.ID, "::", diff.SiafundOutput.Value) err = dbPutSiafundOutput(tx, diff.ID, diff.SiafundOutput) } else { w.log.Println("Wallet has lost a spendable siafund output:", diff.ID, "::", diff.SiafundOutput.Value) err = dbDeleteSiafundOutput(tx, diff.ID) } if err != nil { w.log.Severe("Could not update siafund output:", err) } } for _, diff := range cc.SiafundPoolDiffs { var err error if diff.Direction == modules.DiffApply { err = dbPutSiafundPool(tx, diff.Adjusted) } else { err = dbPutSiafundPool(tx, diff.Previous) } if err != nil { w.log.Severe("Could not update siafund pool:", err) } } return nil } // revertHistory reverts any transaction history that was destroyed by reverted // blocks in the consensus change. func (w *Wallet) revertHistory(tx *bolt.Tx, reverted []types.Block) error { for _, block := range reverted { // Remove any transactions that have been reverted. for i := len(block.Transactions) - 1; i >= 0; i-- { // If the transaction is relevant to the wallet, it will be the // most recent transaction in bucketProcessedTransactions. txid := block.Transactions[i].ID() pt, err := dbGetLastProcessedTransaction(tx) if err != nil { break // bucket is empty } if txid == pt.TransactionID { w.log.Println("A wallet transaction has been reverted due to a reorg:", txid) if err := dbDeleteLastProcessedTransaction(tx); err != nil { w.log.Severe("Could not revert transaction:", err) } } } // Remove the miner payout transaction if applicable. for i, mp := range block.MinerPayouts { if w.isWalletAddress(mp.UnlockHash) { w.log.Println("Miner payout has been reverted due to a reorg:", block.MinerPayoutID(uint64(i)), "::", mp.Value.HumanString()) if err := dbDeleteLastProcessedTransaction(tx); err != nil { w.log.Severe("Could not revert transaction:", err) } break // there will only ever be one miner transaction } } // decrement the consensus height if block.ID() != types.GenesisID { consensusHeight, err := dbGetConsensusHeight(tx) if err != nil { return err } err = dbPutConsensusHeight(tx, consensusHeight-1) if err != nil { return err } } } return nil } // applyHistory applies any transaction history that was introduced by the // applied blocks. func (w *Wallet) applyHistory(tx *bolt.Tx, cc modules.ConsensusChange) error { // compute spent outputs spentSiacoinOutputs := make(map[types.SiacoinOutputID]types.SiacoinOutput) spentSiafundOutputs := make(map[types.SiafundOutputID]types.SiafundOutput) for _, diff := range cc.SiacoinOutputDiffs { if diff.Direction == modules.DiffRevert { // revert means spent spentSiacoinOutputs[diff.ID] = diff.SiacoinOutput } } for _, diff := range cc.SiafundOutputDiffs { if diff.Direction == modules.DiffRevert { // revert means spent spentSiafundOutputs[diff.ID] = diff.SiafundOutput } } for _, block := range cc.AppliedBlocks { consensusHeight, err := dbGetConsensusHeight(tx) if err != nil { return err } // increment the consensus height if block.ID() != types.GenesisID { consensusHeight++ err = dbPutConsensusHeight(tx, consensusHeight) if err != nil { return err } } relevant := false for _, mp := range block.MinerPayouts { relevant = relevant || w.isWalletAddress(mp.UnlockHash) } if relevant { w.log.Println("Wallet has received new miner payouts:", block.ID()) // Apply the miner payout transaction if applicable. minerPT := modules.ProcessedTransaction{ Transaction: types.Transaction{}, TransactionID: types.TransactionID(block.ID()), ConfirmationHeight: consensusHeight, ConfirmationTimestamp: block.Timestamp, } for i, mp := range block.MinerPayouts { w.log.Println("\tminer payout:", block.MinerPayoutID(uint64(i)), "::", mp.Value.HumanString()) minerPT.Outputs = append(minerPT.Outputs, modules.ProcessedOutput{ ID: types.OutputID(block.MinerPayoutID(uint64(i))), FundType: types.SpecifierMinerPayout, MaturityHeight: consensusHeight + types.MaturityDelay, WalletAddress: w.isWalletAddress(mp.UnlockHash), RelatedAddress: mp.UnlockHash, Value: mp.Value, }) } err := dbAppendProcessedTransaction(tx, minerPT) if err != nil { return fmt.Errorf("could not put processed miner transaction: %v", err) } } for _, txn := range block.Transactions { // determine if transaction is relevant relevant := false for _, sci := range txn.SiacoinInputs { relevant = relevant || w.isWalletAddress(sci.UnlockConditions.UnlockHash()) } for _, sco := range txn.SiacoinOutputs { relevant = relevant || w.isWalletAddress(sco.UnlockHash) } for _, sfi := range txn.SiafundInputs { relevant = relevant || w.isWalletAddress(sfi.UnlockConditions.UnlockHash()) } for _, sfo := range txn.SiafundOutputs { relevant = relevant || w.isWalletAddress(sfo.UnlockHash) } // only create a ProcessedTransaction if txn is relevant if !relevant { continue } w.log.Println("A transaction has been confirmed on the blockchain:", txn.ID()) pt := modules.ProcessedTransaction{ Transaction: txn, TransactionID: txn.ID(), ConfirmationHeight: consensusHeight, ConfirmationTimestamp: block.Timestamp, } for _, sci := range txn.SiacoinInputs { pi := modules.ProcessedInput{ ParentID: types.OutputID(sci.ParentID), FundType: types.SpecifierSiacoinInput, WalletAddress: w.isWalletAddress(sci.UnlockConditions.UnlockHash()), RelatedAddress: sci.UnlockConditions.UnlockHash(), Value: spentSiacoinOutputs[sci.ParentID].Value, } pt.Inputs = append(pt.Inputs, pi) // Log any wallet-relevant inputs. if pi.WalletAddress { w.log.Println("\tSiacoin Input:", pi.ParentID, "::", pi.Value.HumanString()) } } for i, sco := range txn.SiacoinOutputs { po := modules.ProcessedOutput{ ID: types.OutputID(txn.SiacoinOutputID(uint64(i))), FundType: types.SpecifierSiacoinOutput, MaturityHeight: consensusHeight, WalletAddress: w.isWalletAddress(sco.UnlockHash), RelatedAddress: sco.UnlockHash, Value: sco.Value, } pt.Outputs = append(pt.Outputs, po) // Log any wallet-relevant outputs. if po.WalletAddress { w.log.Println("\tSiacoin Output:", po.ID, "::", po.Value.HumanString()) } } for _, sfi := range txn.SiafundInputs { pi := modules.ProcessedInput{ ParentID: types.OutputID(sfi.ParentID), FundType: types.SpecifierSiafundInput, WalletAddress: w.isWalletAddress(sfi.UnlockConditions.UnlockHash()), RelatedAddress: sfi.UnlockConditions.UnlockHash(), Value: spentSiafundOutputs[sfi.ParentID].Value, } pt.Inputs = append(pt.Inputs, pi) // Log any wallet-relevant inputs. if pi.WalletAddress { w.log.Println("\tSiafund Input:", pi.ParentID, "::", pi.Value.HumanString()) } siafundPool, err := dbGetSiafundPool(w.dbTx) if err != nil { return fmt.Errorf("could not get siafund pool: %v", err) } sfo := spentSiafundOutputs[sfi.ParentID] po := modules.ProcessedOutput{ ID: types.OutputID(sfi.ParentID), FundType: types.SpecifierClaimOutput, MaturityHeight: consensusHeight + types.MaturityDelay, WalletAddress: w.isWalletAddress(sfi.UnlockConditions.UnlockHash()), RelatedAddress: sfi.ClaimUnlockHash, Value: siafundPool.Sub(sfo.ClaimStart).Mul(sfo.Value), } pt.Outputs = append(pt.Outputs, po) // Log any wallet-relevant outputs. if po.WalletAddress { w.log.Println("\tClaim Output:", po.ID, "::", po.Value.HumanString()) } } for i, sfo := range txn.SiafundOutputs { po := modules.ProcessedOutput{ ID: types.OutputID(txn.SiafundOutputID(uint64(i))), FundType: types.SpecifierSiafundOutput, MaturityHeight: consensusHeight, WalletAddress: w.isWalletAddress(sfo.UnlockHash), RelatedAddress: sfo.UnlockHash, Value: sfo.Value, } pt.Outputs = append(pt.Outputs, po) // Log any wallet-relevant outputs. if po.WalletAddress { w.log.Println("\tSiafund Output:", po.ID, "::", po.Value.HumanString()) } } for _, fee := range txn.MinerFees { pt.Outputs = append(pt.Outputs, modules.ProcessedOutput{ FundType: types.SpecifierMinerFee, Value: fee, }) } err := dbAppendProcessedTransaction(tx, pt) if err != nil { return fmt.Errorf("could not put processed transaction: %v", err) } } } return nil } // ProcessConsensusChange parses a consensus change to update the set of // confirmed outputs known to the wallet. func (w *Wallet) ProcessConsensusChange(cc modules.ConsensusChange) { w.mu.Lock() defer w.mu.Unlock() if needRescan, err := w.updateLookahead(w.dbTx, cc); err != nil { w.log.Println("ERROR: failed to update lookahead:", err) } else if needRescan { go w.threadedResetSubscriptions() } if err := w.updateConfirmedSet(w.dbTx, cc); err != nil { w.log.Println("ERROR: failed to update confirmed set:", err) } if err := w.revertHistory(w.dbTx, cc.RevertedBlocks); err != nil { w.log.Println("ERROR: failed to revert consensus change:", err) } if err := w.applyHistory(w.dbTx, cc); err != nil { w.log.Println("ERROR: failed to apply consensus change:", err) } if err := dbPutConsensusChangeID(w.dbTx, cc.ID); err != nil { w.log.Println("ERROR: failed to update consensus change ID:", err) } if cc.Synced { go w.threadedDefragWallet() } } // ReceiveUpdatedUnconfirmedTransactions updates the wallet's unconfirmed // transaction set. func (w *Wallet) ReceiveUpdatedUnconfirmedTransactions(diff *modules.TransactionPoolDiff) { w.mu.Lock() defer w.mu.Unlock() // Do the pruning first. If there are any pruned transactions, we will need // to re-allocate the whole processed transactions array. droppedTransactions := make(map[types.TransactionID]struct{}) for i := range diff.RevertedTransactions { txids := w.unconfirmedSets[diff.RevertedTransactions[i]] for i := range txids { droppedTransactions[txids[i]] = struct{}{} } delete(w.unconfirmedSets, diff.RevertedTransactions[i]) } // Skip the reallocation if we can, otherwise reallocate the // unconfirmedProcessedTransactions to no longer have the dropped // transactions. if len(droppedTransactions) != 0 { // Capacity can't be reduced, because we have no way of knowing if the // dropped transactions are relevant to the wallet or not, and some will // not be relevant to the wallet, meaning they don't have a counterpart // in w.unconfirmedProcessedTransactions. newUPT := make([]modules.ProcessedTransaction, 0, len(w.unconfirmedProcessedTransactions)) for _, txn := range w.unconfirmedProcessedTransactions { _, exists := droppedTransactions[txn.TransactionID] if !exists { // Transaction was not dropped, add it to the new unconfirmed // transactions. newUPT = append(newUPT, txn) } } // Set the unconfirmed preocessed transactions to the pruned set. w.unconfirmedProcessedTransactions = newUPT } // Scroll through all of the diffs and add any new transactions. for _, unconfirmedTxnSet := range diff.AppliedTransactions { // Mark all of the transactions that appeared in this set. // // TODO: Technically only necessary to mark the ones that are relevant // to the wallet, but overhead should be low. w.unconfirmedSets[unconfirmedTxnSet.ID] = unconfirmedTxnSet.IDs // Get the values for the spent outputs. spentSiacoinOutputs := make(map[types.SiacoinOutputID]types.SiacoinOutput) for _, scod := range unconfirmedTxnSet.Change.SiacoinOutputDiffs { // Only need to grab the reverted ones, because only reverted ones // have the possibility of having been spent. if scod.Direction == modules.DiffRevert { spentSiacoinOutputs[scod.ID] = scod.SiacoinOutput } } // Add each transaction to our set of unconfirmed transactions. for i, txn := range unconfirmedTxnSet.Transactions { // determine whether transaction is relevant to the wallet relevant := false for _, sci := range txn.SiacoinInputs { relevant = relevant || w.isWalletAddress(sci.UnlockConditions.UnlockHash()) } for _, sco := range txn.SiacoinOutputs { relevant = relevant || w.isWalletAddress(sco.UnlockHash) } // only create a ProcessedTransaction if txn is relevant if !relevant { continue } pt := modules.ProcessedTransaction{ Transaction: txn, TransactionID: unconfirmedTxnSet.IDs[i], ConfirmationHeight: types.BlockHeight(math.MaxUint64), ConfirmationTimestamp: types.Timestamp(math.MaxUint64), } for _, sci := range txn.SiacoinInputs { pt.Inputs = append(pt.Inputs, modules.ProcessedInput{ ParentID: types.OutputID(sci.ParentID), FundType: types.SpecifierSiacoinInput, WalletAddress: w.isWalletAddress(sci.UnlockConditions.UnlockHash()), RelatedAddress: sci.UnlockConditions.UnlockHash(), Value: spentSiacoinOutputs[sci.ParentID].Value, }) } for i, sco := range txn.SiacoinOutputs { pt.Outputs = append(pt.Outputs, modules.ProcessedOutput{ ID: types.OutputID(txn.SiacoinOutputID(uint64(i))), FundType: types.SpecifierSiacoinOutput, MaturityHeight: types.BlockHeight(math.MaxUint64), WalletAddress: w.isWalletAddress(sco.UnlockHash), RelatedAddress: sco.UnlockHash, Value: sco.Value, }) } for _, fee := range txn.MinerFees { pt.Outputs = append(pt.Outputs, modules.ProcessedOutput{ FundType: types.SpecifierMinerFee, Value: fee, }) } w.unconfirmedProcessedTransactions = append(w.unconfirmedProcessedTransactions, pt) } } } Sia-1.3.0/modules/wallet/update_test.go000066400000000000000000000036261313565667000200620ustar00rootroot00000000000000package wallet import ( "testing" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) // TestUpdate tests that the wallet processes consensus updates properly. func TestUpdate(t *testing.T) { if testing.Short() { t.SkipNow() } wt, err := createWalletTester("TestUpdate") if err != nil { t.Fatal(err) } // mine a block and add it to the consensus set b, err := wt.miner.FindBlock() if err != nil { t.Fatal(err) } if err := wt.cs.AcceptBlock(b); err != nil { t.Fatal(err) } // since the miner is mining into a wallet address, the wallet should have // added a new transaction _, ok := wt.wallet.Transaction(types.TransactionID(b.ID())) if !ok { t.Fatal("no record of miner transaction") } // revert the block wt.wallet.ProcessConsensusChange(modules.ConsensusChange{ RevertedBlocks: []types.Block{b}, }) // transaction should no longer be present _, ok = wt.wallet.Transaction(types.TransactionID(b.ID())) if ok { t.Fatal("miner transaction was not removed after block was reverted") } // create a transaction addr, _ := wt.wallet.NextAddress() txnSet, err := wt.wallet.SendSiacoins(types.SiacoinPrecision.Mul64(10), addr.UnlockHash()) if err != nil { t.Fatal(err) } // mine blocks until transaction is confirmed, while building up a cc that will revert all the blocks we add var revertCC modules.ConsensusChange for i := types.BlockHeight(0); i <= types.MaturityDelay; i++ { b, _ := wt.miner.FindBlock() if err := wt.cs.AcceptBlock(b); err != nil { t.Fatal(err) } revertCC.RevertedBlocks = append([]types.Block{b}, revertCC.RevertedBlocks...) } // transaction should be present _, ok = wt.wallet.Transaction(txnSet[0].ID()) if !ok { t.Fatal("no record of transaction") } // revert all the blocks wt.wallet.ProcessConsensusChange(revertCC) _, ok = wt.wallet.Transaction(txnSet[0].ID()) if ok { t.Fatal("transaction was not removed") } } Sia-1.3.0/modules/wallet/wallet.go000066400000000000000000000145411313565667000170270ustar00rootroot00000000000000package wallet // TODO: Theoretically, the transaction builder in this wallet supports // multisig, but there are no automated tests to verify that. import ( "bytes" "errors" "fmt" "sort" "sync" "github.com/NebulousLabs/bolt" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/persist" siasync "github.com/NebulousLabs/Sia/sync" "github.com/NebulousLabs/Sia/types" ) const ( // RespendTimeout records the number of blocks that the wallet will wait // before spending an output that has been spent in the past. If the // transaction spending the output has not made it to the transaction pool // after the limit, the assumption is that it never will. RespendTimeout = 40 ) var ( errNilConsensusSet = errors.New("wallet cannot initialize with a nil consensus set") errNilTpool = errors.New("wallet cannot initialize with a nil transaction pool") ) // spendableKey is a set of secret keys plus the corresponding unlock // conditions. The public key can be derived from the secret key and then // matched to the corresponding public keys in the unlock conditions. All // addresses that are to be used in 'FundSiacoins' or 'FundSiafunds' in the // transaction builder must conform to this form of spendable key. type spendableKey struct { UnlockConditions types.UnlockConditions SecretKeys []crypto.SecretKey } // Wallet is an object that tracks balances, creates keys and addresses, // manages building and sending transactions. type Wallet struct { // encrypted indicates whether the wallet has been encrypted (i.e. // initialized). unlocked indicates whether the wallet is currently // storing secret keys in memory. subscribed indicates whether the wallet // has subscribed to the consensus set yet - the wallet is unable to // subscribe to the consensus set until it has been unlocked for the first // time. The primary seed is used to generate new addresses for the // wallet. encrypted bool unlocked bool subscribed bool primarySeed modules.Seed // The wallet's dependencies. cs modules.ConsensusSet tpool modules.TransactionPool // The following set of fields are responsible for tracking the confirmed // outputs, and for being able to spend them. The seeds are used to derive // the keys that are tracked on the blockchain. All keys are pregenerated // from the seeds, when checking new outputs or spending outputs, the seeds // are not referenced at all. The seeds are only stored so that the user // may access them. seeds []modules.Seed keys map[types.UnlockHash]spendableKey lookahead map[types.UnlockHash]uint64 // unconfirmedProcessedTransactions tracks unconfirmed transactions. // // TODO: Replace this field with a linked list. Currently when a new // transaction set diff is provided, the entire array needs to be // reallocated. Since this can happen tens of times per second, and the // array can have tens of thousands of elements, it's a performance issue. unconfirmedSets map[modules.TransactionSetID][]types.TransactionID unconfirmedProcessedTransactions []modules.ProcessedTransaction // The wallet's database tracks its seeds, keys, outputs, and // transactions. A global db transaction is maintained in memory to avoid // excessive disk writes. Any operations involving dbTx must hold an // exclusive lock. db *persist.BoltDatabase dbTx *bolt.Tx persistDir string log *persist.Logger mu sync.RWMutex // A separate TryMutex is used to protect against concurrent unlocking or // initialization. scanLock siasync.TryMutex // The wallet's ThreadGroup tells tracked functions to shut down and // blocks until they have all exited before returning from Close. tg siasync.ThreadGroup } // New creates a new wallet, loading any known addresses from the input file // name and then using the file to save in the future. Keys and addresses are // not loaded into the wallet during the call to 'new', but rather during the // call to 'Unlock'. func New(cs modules.ConsensusSet, tpool modules.TransactionPool, persistDir string) (*Wallet, error) { // Check for nil dependencies. if cs == nil { return nil, errNilConsensusSet } if tpool == nil { return nil, errNilTpool } // Initialize the data structure. w := &Wallet{ cs: cs, tpool: tpool, keys: make(map[types.UnlockHash]spendableKey), lookahead: make(map[types.UnlockHash]uint64), unconfirmedSets: make(map[modules.TransactionSetID][]types.TransactionID), persistDir: persistDir, } err := w.initPersist() if err != nil { return nil, err } // begin the initial transaction w.dbTx, err = w.db.Begin(true) if err != nil { w.log.Critical("ERROR: failed to start database update:", err) } // make sure we commit on shutdown w.tg.AfterStop(func() { err := w.dbTx.Commit() if err != nil { w.log.Println("ERROR: failed to apply database update:", err) w.dbTx.Rollback() } }) go w.threadedDBUpdate() return w, nil } // Close terminates all ongoing processes involving the wallet, enabling // garbage collection. func (w *Wallet) Close() error { if err := w.tg.Stop(); err != nil { return err } var errs []error // Lock the wallet outside of mu.Lock because Lock uses its own mu.Lock. // Once the wallet is locked it cannot be unlocked except using the // unexported unlock method (w.Unlock returns an error if the wallet's // ThreadGroup is stopped). if w.Unlocked() { if err := w.Lock(); err != nil { errs = append(errs, err) } } w.cs.Unsubscribe(w) w.tpool.Unsubscribe(w) if err := w.log.Close(); err != nil { errs = append(errs, fmt.Errorf("log.Close failed: %v", err)) } return build.JoinErrors(errs, "; ") } // AllAddresses returns all addresses that the wallet is able to spend from, // including unseeded addresses. Addresses are returned sorted in byte-order. func (w *Wallet) AllAddresses() []types.UnlockHash { w.mu.RLock() defer w.mu.RUnlock() addrs := make([]types.UnlockHash, 0, len(w.keys)) for addr := range w.keys { addrs = append(addrs, addr) } sort.Slice(addrs, func(i, j int) bool { return bytes.Compare(addrs[i][:], addrs[j][:]) < 0 }) return addrs } // Rescanning reports whether the wallet is currently rescanning the // blockchain. func (w *Wallet) Rescanning() bool { rescanning := !w.scanLock.TryLock() if !rescanning { w.scanLock.Unlock() } return rescanning } Sia-1.3.0/modules/wallet/wallet_test.go000066400000000000000000000357551313565667000201000ustar00rootroot00000000000000package wallet import ( "path/filepath" "testing" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/modules/consensus" "github.com/NebulousLabs/Sia/modules/gateway" "github.com/NebulousLabs/Sia/modules/miner" "github.com/NebulousLabs/Sia/modules/transactionpool" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/fastrand" ) // A Wallet tester contains a ConsensusTester and has a bunch of helpful // functions for facilitating wallet integration testing. type walletTester struct { cs modules.ConsensusSet gateway modules.Gateway tpool modules.TransactionPool miner modules.TestMiner wallet *Wallet walletMasterKey crypto.TwofishKey persistDir string } // createWalletTester takes a testing.T and creates a WalletTester. func createWalletTester(name string) (*walletTester, error) { // Create the modules testdir := build.TempDir(modules.WalletDir, name) g, err := gateway.New("localhost:0", false, filepath.Join(testdir, modules.GatewayDir)) if err != nil { return nil, err } cs, err := consensus.New(g, false, filepath.Join(testdir, modules.ConsensusDir)) if err != nil { return nil, err } tp, err := transactionpool.New(cs, g, filepath.Join(testdir, modules.TransactionPoolDir)) if err != nil { return nil, err } w, err := New(cs, tp, filepath.Join(testdir, modules.WalletDir)) if err != nil { return nil, err } var masterKey crypto.TwofishKey fastrand.Read(masterKey[:]) _, err = w.Encrypt(masterKey) if err != nil { return nil, err } err = w.Unlock(masterKey) if err != nil { return nil, err } m, err := miner.New(cs, tp, w, filepath.Join(testdir, modules.WalletDir)) if err != nil { return nil, err } // Assemble all components into a wallet tester. wt := &walletTester{ cs: cs, gateway: g, tpool: tp, miner: m, wallet: w, walletMasterKey: masterKey, persistDir: testdir, } // Mine blocks until there is money in the wallet. for i := types.BlockHeight(0); i <= types.MaturityDelay; i++ { b, _ := wt.miner.FindBlock() err := wt.cs.AcceptBlock(b) if err != nil { return nil, err } } return wt, nil } // createBlankWalletTester creates a wallet tester that has not mined any // blocks or encrypted the wallet. func createBlankWalletTester(name string) (*walletTester, error) { // Create the modules testdir := build.TempDir(modules.WalletDir, name) g, err := gateway.New("localhost:0", false, filepath.Join(testdir, modules.GatewayDir)) if err != nil { return nil, err } cs, err := consensus.New(g, false, filepath.Join(testdir, modules.ConsensusDir)) if err != nil { return nil, err } tp, err := transactionpool.New(cs, g, filepath.Join(testdir, modules.TransactionPoolDir)) if err != nil { return nil, err } w, err := New(cs, tp, filepath.Join(testdir, modules.WalletDir)) if err != nil { return nil, err } m, err := miner.New(cs, tp, w, filepath.Join(testdir, modules.MinerDir)) if err != nil { return nil, err } // Assemble all components into a wallet tester. wt := &walletTester{ gateway: g, cs: cs, tpool: tp, miner: m, wallet: w, persistDir: testdir, } return wt, nil } // closeWt closes all of the modules in the wallet tester. func (wt *walletTester) closeWt() error { errs := []error{ wt.gateway.Close(), wt.cs.Close(), wt.tpool.Close(), wt.miner.Close(), wt.wallet.Close(), } return build.JoinErrors(errs, "; ") } // TestNilInputs tries starting the wallet using nil inputs. func TestNilInputs(t *testing.T) { testdir := build.TempDir(modules.WalletDir, t.Name()) g, err := gateway.New("localhost:0", false, filepath.Join(testdir, modules.GatewayDir)) if err != nil { t.Fatal(err) } cs, err := consensus.New(g, false, filepath.Join(testdir, modules.ConsensusDir)) if err != nil { t.Fatal(err) } tp, err := transactionpool.New(cs, g, filepath.Join(testdir, modules.TransactionPoolDir)) if err != nil { t.Fatal(err) } wdir := filepath.Join(testdir, modules.WalletDir) _, err = New(cs, nil, wdir) if err != errNilTpool { t.Error(err) } _, err = New(nil, tp, wdir) if err != errNilConsensusSet { t.Error(err) } _, err = New(nil, nil, wdir) if err != errNilConsensusSet { t.Error(err) } } // TestAllAddresses checks that AllAddresses returns all of the wallet's // addresses in sorted order. func TestAllAddresses(t *testing.T) { wt, err := createBlankWalletTester(t.Name()) if err != nil { t.Fatal(err) } defer wt.closeWt() wt.wallet.keys[types.UnlockHash{1}] = spendableKey{} wt.wallet.keys[types.UnlockHash{5}] = spendableKey{} wt.wallet.keys[types.UnlockHash{0}] = spendableKey{} wt.wallet.keys[types.UnlockHash{2}] = spendableKey{} wt.wallet.keys[types.UnlockHash{4}] = spendableKey{} wt.wallet.keys[types.UnlockHash{3}] = spendableKey{} addrs := wt.wallet.AllAddresses() for i := range addrs { if addrs[i][0] != byte(i) { t.Error("address sorting failed:", i, addrs[i][0]) } } } // TestCloseWallet tries to close the wallet. func TestCloseWallet(t *testing.T) { if testing.Short() { t.Skip() } testdir := build.TempDir(modules.WalletDir, t.Name()) g, err := gateway.New("localhost:0", false, filepath.Join(testdir, modules.GatewayDir)) if err != nil { t.Fatal(err) } cs, err := consensus.New(g, false, filepath.Join(testdir, modules.ConsensusDir)) if err != nil { t.Fatal(err) } tp, err := transactionpool.New(cs, g, filepath.Join(testdir, modules.TransactionPoolDir)) if err != nil { t.Fatal(err) } wdir := filepath.Join(testdir, modules.WalletDir) w, err := New(cs, tp, wdir) if err != nil { t.Fatal(err) } if err := w.Close(); err != nil { t.Fatal(err) } } // TestRescanning verifies that calling Rescanning during a scan operation // returns true, and false otherwise. func TestRescanning(t *testing.T) { if testing.Short() { t.SkipNow() } wt, err := createWalletTester(t.Name()) if err != nil { t.Fatal(err) } defer wt.closeWt() // A fresh wallet should not be rescanning. if wt.wallet.Rescanning() { t.Fatal("fresh wallet should not report that a scan is underway") } // lock the wallet wt.wallet.Lock() // spawn an unlock goroutine errChan := make(chan error) go func() { // acquire the write lock so that Unlock acquires the trymutex, but // cannot proceed further wt.wallet.mu.Lock() errChan <- wt.wallet.Unlock(wt.walletMasterKey) }() // wait for goroutine to start, after which Rescanning should return true time.Sleep(time.Millisecond * 10) if !wt.wallet.Rescanning() { t.Fatal("wallet should report that a scan is underway") } // release the mutex and allow the call to complete wt.wallet.mu.Unlock() if err := <-errChan; err != nil { t.Fatal("unlock failed:", err) } // Rescanning should now return false again if wt.wallet.Rescanning() { t.Fatal("wallet should not report that a scan is underway") } } // TestFutureAddressGeneration checks if the right amount of future addresses // is generated after calling NextAddress() or locking + unlocking the wallet. func TestLookaheadGeneration(t *testing.T) { if testing.Short() { t.SkipNow() } wt, err := createWalletTester(t.Name()) if err != nil { t.Fatal(err) } defer wt.closeWt() // Check if number of future keys is correct wt.wallet.mu.RLock() progress, err := dbGetPrimarySeedProgress(wt.wallet.dbTx) wt.wallet.mu.RUnlock() if err != nil { t.Fatal("Couldn't fetch primary seed from db") } actualKeys := uint64(len(wt.wallet.lookahead)) expectedKeys := maxLookahead(progress) if actualKeys != expectedKeys { t.Errorf("expected len(lookahead) == %d but was %d", actualKeys, expectedKeys) } // Generate some more keys for i := 0; i < 100; i++ { wt.wallet.NextAddress() } // Lock and unlock wt.wallet.Lock() wt.wallet.Unlock(wt.walletMasterKey) wt.wallet.mu.RLock() progress, err = dbGetPrimarySeedProgress(wt.wallet.dbTx) wt.wallet.mu.RUnlock() if err != nil { t.Fatal("Couldn't fetch primary seed from db") } actualKeys = uint64(len(wt.wallet.lookahead)) expectedKeys = maxLookahead(progress) if actualKeys != expectedKeys { t.Errorf("expected len(lookahead) == %d but was %d", actualKeys, expectedKeys) } wt.wallet.mu.RLock() defer wt.wallet.mu.RUnlock() for i := range wt.wallet.keys { _, exists := wt.wallet.lookahead[i] if exists { t.Fatal("wallet keys contained a key which is also present in lookahead") } } } // TestAdvanceLookaheadNoRescan tests if a transaction to multiple lookahead addresses // is handled correctly without forcing a wallet rescan. func TestAdvanceLookaheadNoRescan(t *testing.T) { if testing.Short() { t.SkipNow() } wt, err := createWalletTester(t.Name()) if err != nil { t.Fatal(err) } defer wt.closeWt() builder := wt.wallet.StartTransaction() payout := types.ZeroCurrency // Get the current progress wt.wallet.mu.RLock() progress, err := dbGetPrimarySeedProgress(wt.wallet.dbTx) wt.wallet.mu.RUnlock() if err != nil { t.Fatal("Couldn't fetch primary seed from db") } // choose 10 keys in the lookahead and remember them var receivingAddresses []types.UnlockHash for _, sk := range generateKeys(wt.wallet.primarySeed, progress, 10) { sco := types.SiacoinOutput{ UnlockHash: sk.UnlockConditions.UnlockHash(), Value: types.NewCurrency64(1e3), } builder.AddSiacoinOutput(sco) payout = payout.Add(sco.Value) receivingAddresses = append(receivingAddresses, sk.UnlockConditions.UnlockHash()) } err = builder.FundSiacoins(payout) if err != nil { t.Fatal(err) } tSet, err := builder.Sign(true) if err != nil { t.Fatal(err) } err = wt.tpool.AcceptTransactionSet(tSet) if err != nil { t.Fatal(err) } _, err = wt.miner.AddBlock() if err != nil { t.Fatal(err) } // Check if the receiving addresses were moved from future keys to keys wt.wallet.mu.RLock() defer wt.wallet.mu.RUnlock() for _, uh := range receivingAddresses { _, exists := wt.wallet.lookahead[uh] if exists { t.Fatal("UnlockHash still exists in wallet lookahead") } _, exists = wt.wallet.keys[uh] if !exists { t.Fatal("UnlockHash not in map of spendable keys") } } } // TestAdvanceLookaheadNoRescan tests if a transaction to multiple lookahead addresses // is handled correctly forcing a wallet rescan. func TestAdvanceLookaheadForceRescan(t *testing.T) { if testing.Short() { t.SkipNow() } wt, err := createWalletTester(t.Name()) if err != nil { t.Fatal(err) } defer wt.closeWt() // Mine blocks without payouts so that the balance stabilizes for i := types.BlockHeight(0); i < types.MaturityDelay; i++ { wt.addBlockNoPayout() } // Get the current progress and balance wt.wallet.mu.RLock() progress, err := dbGetPrimarySeedProgress(wt.wallet.dbTx) wt.wallet.mu.RUnlock() if err != nil { t.Fatal("Couldn't fetch primary seed from db") } startBal, _, _ := wt.wallet.ConfirmedBalance() // Send coins to an address with a high seed index, just outside the // lookahead range. It will not be initially detected, but later the // rescan should find it. highIndex := progress + uint64(len(wt.wallet.lookahead)) + 5 farAddr := generateSpendableKey(wt.wallet.primarySeed, highIndex).UnlockConditions.UnlockHash() farPayout := types.SiacoinPrecision.Mul64(8888) builder := wt.wallet.StartTransaction() builder.AddSiacoinOutput(types.SiacoinOutput{ UnlockHash: farAddr, Value: farPayout, }) err = builder.FundSiacoins(farPayout) if err != nil { t.Fatal(err) } txnSet, err := builder.Sign(true) if err != nil { t.Fatal(err) } err = wt.tpool.AcceptTransactionSet(txnSet) if err != nil { t.Fatal(err) } wt.addBlockNoPayout() newBal, _, _ := wt.wallet.ConfirmedBalance() if !startBal.Sub(newBal).Equals(farPayout) { t.Fatal("wallet should not recognize coins sent to very high seed index") } builder = wt.wallet.StartTransaction() var payout types.Currency // choose 10 keys in the lookahead and remember them var receivingAddresses []types.UnlockHash for uh, index := range wt.wallet.lookahead { // Only choose keys that force a rescan if index < progress+lookaheadRescanThreshold { continue } sco := types.SiacoinOutput{ UnlockHash: uh, Value: types.SiacoinPrecision.Mul64(1000), } builder.AddSiacoinOutput(sco) payout = payout.Add(sco.Value) receivingAddresses = append(receivingAddresses, uh) if len(receivingAddresses) >= 10 { break } } err = builder.FundSiacoins(payout) if err != nil { t.Fatal(err) } txnSet, err = builder.Sign(true) if err != nil { t.Fatal(err) } err = wt.tpool.AcceptTransactionSet(txnSet) if err != nil { t.Fatal(err) } wt.addBlockNoPayout() // Allow the wallet rescan to finish time.Sleep(time.Second * 2) // Check that high seed index txn was discovered in the rescan rescanBal, _, _ := wt.wallet.ConfirmedBalance() if !rescanBal.Equals(startBal) { t.Fatal("wallet did not discover txn after rescan") } // Check if the receiving addresses were moved from future keys to keys wt.wallet.mu.RLock() defer wt.wallet.mu.RUnlock() for _, uh := range receivingAddresses { _, exists := wt.wallet.lookahead[uh] if exists { t.Fatal("UnlockHash still exists in wallet lookahead") } _, exists = wt.wallet.keys[uh] if !exists { t.Fatal("UnlockHash not in map of spendable keys") } } } // TestDistantWallets tests if two wallets that use the same seed stay // synchronized. func TestDistantWallets(t *testing.T) { if testing.Short() { t.SkipNow() } wt, err := createWalletTester(t.Name()) if err != nil { t.Fatal(err) } defer wt.closeWt() // Create another wallet with the same seed. w2, err := New(wt.cs, wt.tpool, build.TempDir(modules.WalletDir, t.Name()+"2", modules.WalletDir)) if err != nil { t.Fatal(err) } err = w2.InitFromSeed(crypto.TwofishKey{}, wt.wallet.primarySeed) if err != nil { t.Fatal(err) } err = w2.Unlock(crypto.TwofishKey(crypto.HashObject(wt.wallet.primarySeed))) if err != nil { t.Fatal(err) } // Use the first wallet. for i := uint64(0); i < lookaheadBuffer/2; i++ { _, err = wt.wallet.SendSiacoins(types.SiacoinPrecision, types.UnlockHash{}) if err != nil { t.Fatal(err) } wt.addBlockNoPayout() } // The second wallet's balance should update accordingly. w1bal, _, _ := wt.wallet.ConfirmedBalance() w2bal, _, _ := w2.ConfirmedBalance() if !w1bal.Equals(w2bal) { t.Fatal("balances do not match:", w1bal, w2bal) } // Send coins to an address with a very high seed index, outside the // lookahead range. w2 should not detect it. tbuilder := wt.wallet.StartTransaction() farAddr := generateSpendableKey(wt.wallet.primarySeed, lookaheadBuffer*10).UnlockConditions.UnlockHash() value := types.SiacoinPrecision.Mul64(1e3) tbuilder.AddSiacoinOutput(types.SiacoinOutput{ UnlockHash: farAddr, Value: value, }) err = tbuilder.FundSiacoins(value) if err != nil { t.Fatal(err) } txnSet, err := tbuilder.Sign(true) if err != nil { t.Fatal(err) } err = wt.tpool.AcceptTransactionSet(txnSet) if err != nil { t.Fatal(err) } wt.addBlockNoPayout() if newBal, _, _ := w2.ConfirmedBalance(); !newBal.Equals(w2bal.Sub(value)) { t.Fatal("wallet should not recognize coins sent to very high seed index") } } Sia-1.3.0/persist/000077500000000000000000000000001313565667000137345ustar00rootroot00000000000000Sia-1.3.0/persist/boltdb.go000066400000000000000000000041111313565667000155260ustar00rootroot00000000000000package persist import ( "time" "github.com/NebulousLabs/bolt" ) // BoltDatabase is a persist-level wrapper for the bolt database, providing // extra information such as a version number. type BoltDatabase struct { Metadata *bolt.DB } // checkMetadata confirms that the metadata in the database is // correct. If there is no metadata, correct metadata is inserted func (db *BoltDatabase) checkMetadata(md Metadata) error { err := db.Update(func(tx *bolt.Tx) error { // Check if the database has metadata. If not, create metadata for the // database. bucket := tx.Bucket([]byte("Metadata")) if bucket == nil { err := db.updateMetadata(tx) if err != nil { return err } return nil } // Verify that the metadata matches the expected metadata. header := bucket.Get([]byte("Header")) if string(header) != md.Header { return ErrBadHeader } version := bucket.Get([]byte("Version")) if string(version) != md.Version { return ErrBadVersion } return nil }) return err } // updateMetadata will set the contents of the metadata bucket to the values // in db.Metadata. func (db *BoltDatabase) updateMetadata(tx *bolt.Tx) error { bucket, err := tx.CreateBucketIfNotExists([]byte("Metadata")) if err != nil { return err } err = bucket.Put([]byte("Header"), []byte(db.Header)) if err != nil { return err } err = bucket.Put([]byte("Version"), []byte(db.Version)) if err != nil { return err } return nil } // Close closes the database. func (db *BoltDatabase) Close() error { return db.DB.Close() } // OpenDatabase opens a database and validates its metadata. func OpenDatabase(md Metadata, filename string) (*BoltDatabase, error) { // Open the database using a 3 second timeout (without the timeout, // database will potentially hang indefinitely. db, err := bolt.Open(filename, 0600, &bolt.Options{Timeout: 3 * time.Second}) if err != nil { return nil, err } // Check the metadata. boltDB := &BoltDatabase{ Metadata: md, DB: db, } err = boltDB.checkMetadata(md) if err != nil { db.Close() return nil, err } return boltDB, nil } Sia-1.3.0/persist/boltdb_test.go000066400000000000000000000325361313565667000166010ustar00rootroot00000000000000package persist import ( "os" "path/filepath" "runtime" "testing" "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/bolt" "github.com/NebulousLabs/fastrand" ) // testInputs and testFilenames are global variables because most tests require // a variety of metadata and filename inputs (although only TestCheckMetadata // and TestIntegratedCheckMetadata use testInput.newMd and testInput.err). // Weird strings are from https://github.com/minimaxir/big-ltist-of-naughty-strings var ( testInputs = []struct { md Metadata newMd Metadata err error }{ {Metadata{"1sadf23", "12253"}, Metadata{"1sa-df23", "12253"}, ErrBadHeader}, {Metadata{"$@#$%^&", "$@#$%^&"}, Metadata{"$@#$%^&", "$@#$%!^&"}, ErrBadVersion}, {Metadata{"//", "//"}, Metadata{"////", "//"}, ErrBadHeader}, {Metadata{":]", ":)"}, Metadata{":]", ":("}, ErrBadVersion}, {Metadata{"¯|_(ツ)_|¯", "_|¯(ツ)¯|_"}, Metadata{"¯|_(ツ)_|¯", "_|¯(ツ)_|¯"}, ErrBadVersion}, {Metadata{"世界", "怎么办呢"}, Metadata{"世界", "怎么好呢"}, ErrBadVersion}, {Metadata{" ", " "}, Metadata{"\t", " "}, ErrBadHeader}, {Metadata{"", ""}, Metadata{"asdf", ""}, ErrBadHeader}, {Metadata{"", "_"}, Metadata{"", ""}, ErrBadVersion}, {Metadata{"%&*", "#@$"}, Metadata{"", "#@$"}, ErrBadHeader}, {Metadata{"a.sdf", "0.30.2"}, Metadata{"a.sdf", "0.3.02"}, ErrBadVersion}, {Metadata{"/", "/"}, Metadata{"//", "/"}, ErrBadHeader}, {Metadata{"%*.*s", "%d"}, Metadata{"%*.*s", "% d"}, ErrBadVersion}, {Metadata{" ", ""}, Metadata{" ", ""}, ErrBadHeader}, {Metadata{"⒯⒣⒠ ⒬⒰⒤⒞⒦ ⒝⒭⒪⒲⒩ ⒡⒪⒳ ⒥⒰⒨⒫⒮ ⒪⒱⒠⒭ ⒯⒣⒠ ⒧⒜⒵⒴ ⒟⒪⒢", "undefined"}, Metadata{"⒯⒣⒠ ⒬⒰⒤⒞⒦ ⒝⒭⒪⒲⒩ ⒡⒪⒳ ⒥⒰⒨⒫⒮ ⒪⒱⒠⒭ ⒯⒣⒠ ⒧⒜⒵⒴ ⒟⒪⒢", "␢undefined"}, ErrBadVersion}, {Metadata{" ", "  "}, Metadata{"  ", "  "}, ErrBadHeader}, {Metadata{"\xF0\x9F\x98\x8F", "\xF0\x9F\x98\xBE"}, Metadata{"\xF0\x9F\x98\x8F", " \xF0\x9F\x98\xBE"}, ErrBadVersion}, {Metadata{"'", ""}, Metadata{"`", ""}, ErrBadHeader}, {Metadata{"", "-"}, Metadata{"", "-␡"}, ErrBadVersion}, {Metadata{"", "(ノಥ益ಥ ┻━┻"}, Metadata{"", "(ノ\nಥ益ಥ ┻━┻"}, ErrBadVersion}, {Metadata{"\n\n", "Ṱ̺̺o͞ ̷i̲̬n̝̗v̟̜o̶̙kè͚̮ ̖t̝͕h̼͓e͇̣ ̢̼h͚͎i̦̲v̻͍e̺̭-m̢iͅn̖̺d̵̼ ̞̥r̛̗e͙p͠r̼̞e̺̠s̘͇e͉̥ǹ̬͎t͍̬i̪̱n͠g̴͉ ͏͉c̬̟h͡a̫̻o̫̟s̗̦.̨̹"}, Metadata{"\n\n", "Ṱ̺̺o͞ ̷i̲̬n̝̗v̟̜o̶̙kè͚̮ t̝͕h̼͓e͇̣ ̢̼h͚͎i̦̲v̻͍e̺̭-m̢iͅn̖̺d̵̼ ̞̥r̛̗e͙p͠r̼̞e̺̠s̘͇e͉̥ǹ̬͎t͍̬i̪̱n͠g̴͉ ͏͉c̬̟h͡a̫̻o̫̟s̗̦.̨̹"}, ErrBadVersion}, } testFilenames = []string{ "_", "-", "1234sg", "@#$%@#", "你好好q wgc好", "\xF0\x9F\x99\x8A", "␣", " ", "$HOME", ",.;'[]-=", "%s", } ) // TestOpenDatabase tests calling OpenDatabase on the following types of // database: // - a database that has not yet been created // - an existing empty database // - an existing nonempty database // Along the way, it also tests calling Close on: // - a newly-created database // - a newly-filled database // - a newly-emptied database func TestOpenDatabase(t *testing.T) { if testing.Short() { t.SkipNow() } testBuckets := [][]byte{ []byte("Fake Bucket123!@#$"), []byte("你好好好"), []byte("¯|_(ツ)_|¯"), []byte("Powerلُلُصّبُلُلصّبُررً ॣ ॣh ॣ ॣ冗"), []byte("﷽"), []byte("(ノಥ益ಥ ┻━┻"), []byte("Ṱ̺̺o͞ ̷i̲̬n̝̗v̟̜o̶̙kè͚̮ ̖t̝͕h̼͓e͇̣ ̢̼h͚͎i̦̲v̻͍e̺̭-m̢iͅn̖̺d̵̼ ̞̥r̛̗e͙p͠r̼̞e̺̠s̘͇e͉̥ǹ̬͎t͍̬i̪̱n͠g̴͉ ͏͉c̬̟h͡a̫̻o̫̟s̗̦.̨̹"), []byte("0xbadidea"), []byte("␣"), []byte("你好好好"), } // Create a folder for the database file. If a folder by that name exists // already, it will be replaced by an empty folder. testDir := build.TempDir(persistDir, t.Name()) err := os.MkdirAll(testDir, 0700) if err != nil { t.Fatal(err) } for i, in := range testInputs { dbFilename := testFilenames[i%len(testFilenames)] dbFilepath := filepath.Join(testDir, dbFilename) // Create a new database. db, err := OpenDatabase(in.md, dbFilepath) if err != nil { t.Errorf("calling OpenDatabase on a new database failed for metadata %v, filename %v; error was %v", in.md, dbFilename, err) continue } // Close the newly-created, empty database. err = db.Close() if err != nil { t.Errorf("closing a newly created database failed for metadata %v, filename %v; error was %v", in.md, dbFilename, err) continue } // Call OpenDatabase again, this time on the existing empty database. db, err = OpenDatabase(in.md, dbFilepath) if err != nil { t.Errorf("calling OpenDatabase on an existing empty database failed for metadata %v, filename %v; error was %v", in.md, dbFilename, err) continue } // Create buckets in the database. err = db.Update(func(tx *bolt.Tx) error { for _, testBucket := range testBuckets { _, err := tx.CreateBucketIfNotExists(testBucket) if err != nil { t.Errorf("db.Update failed on bucket name %v for metadata %v, filename %v; error was %v", testBucket, in.md, dbFilename, err) return err } } return nil }) if err != nil { t.Error(err) continue } // Make sure CreateBucketIfNotExists method handles invalid (nil) // bucket name. err = db.Update(func(tx *bolt.Tx) error { _, err := tx.CreateBucketIfNotExists(nil) return err }) if err != bolt.ErrBucketNameRequired { } // Fill each bucket with a random number (0-9, inclusive) of key/value // pairs, where each key is a length-10 random byteslice and each value // is a length-1000 random byteslice. err = db.Update(func(tx *bolt.Tx) error { for _, testBucket := range testBuckets { b := tx.Bucket(testBucket) x := fastrand.Intn(10) for i := 0; i <= x; i++ { err := b.Put(fastrand.Bytes(10), fastrand.Bytes(1e3)) if err != nil { t.Errorf("db.Update failed to fill bucket %v for metadata %v, filename %v; error was %v", testBucket, in.md, dbFilename, err) return err } } } return nil }) if err != nil { t.Error(err) continue } // Close the newly-filled database. err = db.Close() if err != nil { t.Errorf("closing a newly-filled database failed for metadata %v, filename %v; error was %v", in.md, dbFilename, err) continue } // Call OpenDatabase on the database now that it's been filled. db, err = OpenDatabase(in.md, dbFilepath) if err != nil { t.Error(err) continue } // Empty every bucket in the database. err = db.Update(func(tx *bolt.Tx) error { for _, testBucket := range testBuckets { b := tx.Bucket(testBucket) err := b.ForEach(func(k, v []byte) error { return b.Delete(k) }) if err != nil { return err } } return nil }) // Close and delete the newly emptied database. err = db.Close() if err != nil { t.Errorf("closing a newly-emptied database for metadata %v, filename %v; error was %v", in.md, dbFilename, err) continue } err = os.Remove(dbFilepath) if err != nil { t.Errorf("removing database file failed for metadata %v, filename %v; error was %v", in.md, dbFilename, err) continue } } } // TestErrPermissionOpenDatabase tests calling OpenDatabase on a database file // with the wrong filemode (< 0600), which should result in an os.ErrPermission // error. func TestErrPermissionOpenDatabase(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("can't reproduce on Windows") } const ( dbHeader = "Fake Header" dbVersion = "0.0.0" dbFilename = "Fake Filename" ) testDir := build.TempDir(persistDir, t.Name()) err := os.MkdirAll(testDir, 0700) if err != nil { t.Fatal(err) } dbFilepath := filepath.Join(testDir, dbFilename) badFileModes := []os.FileMode{0000, 0001, 0002, 0003, 0004, 0005, 0010, 0040, 0060, 0105, 0110, 0126, 0130, 0143, 0150, 0166, 0170, 0200, 0313, 0470, 0504, 0560, 0566, 0577} // Make sure OpenDatabase returns a permissions error for each of the modes // in badFileModes. for _, mode := range badFileModes { // Create a file named dbFilename in directory testDir with the wrong // permissions (mode < 0600). _, err := os.OpenFile(dbFilepath, os.O_RDWR|os.O_CREATE, mode) if err != nil { t.Fatal(err) } // OpenDatabase should return a permissions error because the database // mode is less than 0600. _, err = OpenDatabase(Metadata{dbHeader, dbVersion}, dbFilepath) if !os.IsPermission(err) { t.Errorf("OpenDatabase failed to return expected error when called on a database with the wrong permissions (%o instead of >= 0600);\n wanted:\topen %v: permission denied\n got:\t\t%v", mode, dbFilepath, err) } err = os.Remove(dbFilepath) if err != nil { t.Error(err) } } } // TestErrTxNotWritable checks that updateMetadata returns an error when called // from a read-only transaction. func TestErrTxNotWritable(t *testing.T) { if testing.Short() { t.SkipNow() } testDir := build.TempDir(persistDir, t.Name()) err := os.MkdirAll(testDir, 0700) if err != nil { t.Fatal(err) } for i, in := range testInputs { dbFilename := testFilenames[i%len(testFilenames)] dbFilepath := filepath.Join(testDir, dbFilename) db, err := bolt.Open(dbFilepath, 0600, &bolt.Options{Timeout: 3 * time.Second}) if err != nil { t.Fatal(err) } boltDB := &BoltDatabase{ Metadata: in.md, DB: db, } // Should return an error because updateMetadata is being called from // a read-only transaction. err = db.View(boltDB.updateMetadata) if err != bolt.ErrTxNotWritable { t.Errorf("updateMetadata returned wrong error for input %v, filename %v; expected tx not writable, got %v", in.md, dbFilename, err) } err = boltDB.Close() if err != nil { t.Fatal(err) } err = os.Remove(dbFilepath) if err != nil { t.Fatal(err) } } } // TestErrDatabaseNotOpen tests that checkMetadata returns an error when called // on a BoltDatabase that is closed. func TestErrDatabaseNotOpen(t *testing.T) { if testing.Short() { t.SkipNow() } testDir := build.TempDir(persistDir, t.Name()) err := os.MkdirAll(testDir, 0700) if err != nil { t.Fatal(err) } dbFilepath := filepath.Join(testDir, "fake_filename") md := Metadata{"Fake Header", "Fake Version"} db, err := bolt.Open(dbFilepath, 0600, &bolt.Options{Timeout: 3 * time.Second}) if err != nil { t.Fatal(err) } boltDB := &BoltDatabase{ Metadata: md, DB: db, } err = boltDB.Close() if err != nil { t.Fatal(err) } // Should return an error since boltDB is closed. err = boltDB.checkMetadata(md) if err != bolt.ErrDatabaseNotOpen { t.Errorf("expected database not open, got %v", err) } err = os.Remove(dbFilepath) if err != nil { t.Error(err) } } // TestErrCheckMetadata tests that checkMetadata returns an error when called // on a BoltDatabase whose metadata has been changed. func TestErrCheckMetadata(t *testing.T) { if testing.Short() { t.SkipNow() } testDir := build.TempDir(persistDir, t.Name()) err := os.MkdirAll(testDir, 0700) if err != nil { t.Fatal(err) } for i, in := range testInputs { dbFilename := testFilenames[i%len(testFilenames)] dbFilepath := filepath.Join(testDir, dbFilename) db, err := bolt.Open(dbFilepath, 0600, &bolt.Options{Timeout: 3 * time.Second}) if err != nil { t.Fatal(err) } boltDB := &BoltDatabase{ Metadata: in.md, DB: db, } err = db.Update(func(tx *bolt.Tx) error { bucket, err := tx.CreateBucketIfNotExists([]byte("Metadata")) if err != nil { return err } err = bucket.Put([]byte("Header"), []byte(in.newMd.Header)) if err != nil { return err } err = bucket.Put([]byte("Version"), []byte(in.newMd.Version)) if err != nil { return err } return nil }) if err != nil { t.Errorf("Put method failed for input %v, filename %v with error %v", in, dbFilename, err) continue } // Should return an error because boltDB's metadata now differs from // its original metadata. err = (*boltDB).checkMetadata(in.md) if err != in.err { t.Errorf("expected %v, got %v for input %v -> %v", in.err, err, in.md, in.newMd) } err = boltDB.Close() if err != nil { t.Fatal(err) } err = os.Remove(dbFilepath) if err != nil { t.Fatal(err) } } } // TestErrIntegratedCheckMetadata checks that checkMetadata returns an error // within OpenDatabase when OpenDatabase is called on a BoltDatabase that has // already been set up with different metadata. func TestErrIntegratedCheckMetadata(t *testing.T) { if testing.Short() { t.SkipNow() } testDir := build.TempDir(persistDir, t.Name()) err := os.MkdirAll(testDir, 0700) if err != nil { t.Fatal(err) } for i, in := range testInputs { dbFilename := testFilenames[i%len(testFilenames)] dbFilepath := filepath.Join(testDir, dbFilename) boltDB, err := OpenDatabase(in.md, dbFilepath) if err != nil { t.Errorf("OpenDatabase failed on input %v, filename %v; error was %v", in, dbFilename, err) continue } err = boltDB.Close() if err != nil { t.Fatal(err) } // Should return an error because boltDB was set up with metadata in.md, not in.newMd boltDB, err = OpenDatabase(in.newMd, dbFilepath) if err != in.err { t.Errorf("expected error %v for input %v and filename %v; got %v instead", in.err, in, dbFilename, err) } err = os.Remove(dbFilepath) if err != nil { t.Fatal(err) } } } Sia-1.3.0/persist/disk_test.go000066400000000000000000000156401313565667000162620ustar00rootroot00000000000000package persist // disk_test.go probes some of the disk operations that are very commonly used // within Sia. Namely, Read, Write, Truncate, WriteAt(rand), ReadAt(rand). import ( "os" "path/filepath" "testing" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/fastrand" ) // BenchmarkWrite512MiB checks how long it takes to write 512MiB sequentially. func BenchmarkWrite512MiB(b *testing.B) { testDir := build.TempDir("persist", b.Name()) err := os.MkdirAll(testDir, 0700) if err != nil { b.Fatal(err) } b.SetBytes(1 << 29) filename := filepath.Join(testDir, "512MiB.file") b.ResetTimer() for i := 0; i < b.N; i++ { // Make the file. f, err := os.Create(filename) if err != nil { b.Fatal(err) } // 2^12 writes of 4MiB. for i := 0; i < 1<<7; i++ { // Get the entropy separate from the timer. b.StopTimer() data := fastrand.Bytes(1 << 22) b.StartTimer() _, err = f.Write(data) if err != nil { b.Fatal(err) } // Sync after every write. err = f.Sync() if err != nil { b.Fatal(err) } } // Close the file before iterating. err = f.Close() if err != nil { b.Fatal(err) } } err = os.Remove(filename) if err != nil { b.Fatal(err) } } // BenchmarkWrite512MiBTrunc checks how long it takes to write 512MiB using // stepwise truncate. func BenchmarkWrite512MiBTrunc(b *testing.B) { testDir := build.TempDir("persist", b.Name()) err := os.MkdirAll(testDir, 0700) if err != nil { b.Fatal(err) } b.SetBytes(1 << 29) filename := filepath.Join(testDir, "512MiB.file") b.ResetTimer() for i := 0; i < b.N; i++ { // Make the file. f, err := os.Create(filename) if err != nil { b.Fatal(err) } // 2^12 writes of 4MiB. for i := 0; i < 1<<7; i++ { // Extend the file through truncation. err = f.Truncate(int64((i + 1) * 1 << 22)) if err != nil { b.Fatal(err) } // Sync after every write. err = f.Sync() if err != nil { b.Fatal(err) } } // Close the file before iterating. err = f.Close() if err != nil { b.Fatal(err) } } err = os.Remove(filename) if err != nil { b.Fatal(err) } } // BenchmarkWrite512MiBRand checks how long it takes to write 512MiB randomly. func BenchmarkWrite512MiBRand(b *testing.B) { testDir := build.TempDir("persist", b.Name()) err := os.MkdirAll(testDir, 0700) if err != nil { b.Fatal(err) } b.SetBytes(1 << 29) filename := filepath.Join(testDir, "512MiB.file") b.ResetTimer() for i := 0; i < b.N; i++ { // Make the file. f, err := os.Create(filename) if err != nil { b.Fatal(err) } // 2^7 writes of 4MiB. for i := 0; i < 1<<7; i++ { // Get the entropy separate from the timer. b.StopTimer() data := fastrand.Bytes(1 << 22) offset := int64(fastrand.Intn(1 << 6)) offset *= 1 << 22 b.StartTimer() _, err = f.WriteAt(data, offset) if err != nil { b.Fatal(err) } // Sync after every write. err = f.Sync() if err != nil { b.Fatal(err) } } // Close the file before iterating. err = f.Close() if err != nil { b.Fatal(err) } } err = os.Remove(filename) if err != nil { b.Fatal(err) } } // BenchmarkRead512MiB checks how long it takes to read 512MiB sequentially. func BenchmarkRead512MiB(b *testing.B) { testDir := build.TempDir("persist", b.Name()) err := os.MkdirAll(testDir, 0700) if err != nil { b.Fatal(err) } b.SetBytes(1 << 29) // Make the file. filename := filepath.Join(testDir, "512MiB.file") f, err := os.Create(filename) if err != nil { b.Fatal(err) } // 2^7 writes of 4MiB. for i := 0; i < 1<<7; i++ { // Get the entropy separate from the timer. b.StopTimer() data := fastrand.Bytes(1 << 22) b.StartTimer() _, err = f.Write(data) if err != nil { b.Fatal(err) } // Sync after every write. err = f.Sync() if err != nil { b.Fatal(err) } } // Close the file. err = f.Close() if err != nil { b.Fatal(err) } // Check the sequential read speed. b.ResetTimer() for i := 0; i < b.N; i++ { // Open the file. f, err := os.Open(filename) if err != nil { b.Fatal(err) } // Read the file 4 MiB at a time. for i := 0; i < 1<<6; i++ { data := make([]byte, 1<<22) _, err = f.Read(data) if err != nil { b.Fatal(err) } } err = f.Close() if err != nil { b.Fatal(err) } } err = os.Remove(filename) if err != nil { b.Fatal(err) } } // BenchmarkRead512MiBRand checks how long it takes to read 512MiB randomly. func BenchmarkRead512MiBRand(b *testing.B) { testDir := build.TempDir("persist", b.Name()) err := os.MkdirAll(testDir, 0700) if err != nil { b.Fatal(err) } b.SetBytes(1 << 29) // Make the file. filename := filepath.Join(testDir, "512MiB.file") f, err := os.Create(filename) if err != nil { b.Fatal(err) } // 2^7 writes of 4MiB. for i := 0; i < 1<<7; i++ { // Get the entropy separate from the timer. b.StopTimer() data := fastrand.Bytes(1 << 22) b.StartTimer() _, err = f.Write(data) if err != nil { b.Fatal(err) } // Sync after every write. err = f.Sync() if err != nil { b.Fatal(err) } } // Close the file. err = f.Close() if err != nil { b.Fatal(err) } // Check the sequential read speed. b.ResetTimer() for i := 0; i < b.N; i++ { // Open the file. f, err := os.Open(filename) if err != nil { b.Fatal(err) } // Read the file 4 MiB at a time. for i := 0; i < 1<<6; i++ { offset := int64(fastrand.Intn(1 << 6)) offset *= 1 << 22 data := make([]byte, 1<<22) _, err = f.ReadAt(data, offset) if err != nil { b.Fatal(err) } } err = f.Close() if err != nil { b.Fatal(err) } } err = os.Remove(filename) if err != nil { b.Fatal(err) } } // BenchmarkTruncate512MiB checks how long it takes to truncate a 512 MiB file. func BenchmarkTruncate512MiB(b *testing.B) { testDir := build.TempDir("persist", b.Name()) err := os.MkdirAll(testDir, 0700) if err != nil { b.Fatal(err) } b.SetBytes(1 << 29) filename := filepath.Join(testDir, "512MiB.file") // Check the truncate speed. b.ResetTimer() for i := 0; i < b.N; i++ { // Make the file separate from the timer. b.StopTimer() f, err := os.Create(filename) if err != nil { b.Fatal(err) } // 2^7 writes of 4MiB. for i := 0; i < 1<<7; i++ { // Get the entropy separate from the timer. b.StopTimer() data := fastrand.Bytes(1 << 22) b.StartTimer() _, err = f.Write(data) if err != nil { b.Fatal(err) } } // Sync after writing. err = f.Sync() if err != nil { b.Fatal(err) } // Close the file. err = f.Close() if err != nil { b.Fatal(err) } b.StartTimer() // Open the file. f, err = os.OpenFile(filename, os.O_RDWR, 0600) if err != nil { b.Fatal(err) } // Truncate the file. err = f.Truncate(0) if err != nil { b.Fatal(err) } // Sync. err = f.Sync() if err != nil { b.Fatal(err) } // Close. err = f.Close() if err != nil { b.Fatal(err) } } err = os.Remove(filename) if err != nil { b.Fatal(err) } } Sia-1.3.0/persist/json.go000066400000000000000000000153761313565667000152500ustar00rootroot00000000000000package persist import ( "bytes" "encoding/json" "io/ioutil" "os" "strings" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" ) // readJSON will try to read a persisted json object from a file. func readJSON(meta Metadata, object interface{}, filename string) error { // Open the file. file, err := os.Open(filename) if os.IsNotExist(err) { return err } if err != nil { return build.ExtendErr("unable to open persisted json object file", err) } defer file.Close() // Read the metadata from the file. var header, version string dec := json.NewDecoder(file) if err := dec.Decode(&header); err != nil { return build.ExtendErr("unable to read header from persisted json object file", err) } if header != meta.Header { return ErrBadHeader } if err := dec.Decode(&version); err != nil { return build.ExtendErr("unable to read version from persisted json object file", err) } if version != meta.Version { return ErrBadVersion } // Read everything else. remainingBytes, err := ioutil.ReadAll(dec.Buffered()) if err != nil { return build.ExtendErr("unable to read persisted json object data", err) } // The buffer may or may not have read the rest of the file, read the rest // of the file to be certain. remainingBytesExtra, err := ioutil.ReadAll(file) if err != nil { return build.ExtendErr("unable to read persisted json object data", err) } remainingBytes = append(remainingBytes, remainingBytesExtra...) // Determine whether the leading bytes contain a checksum. A proper checksum // will be 67 bytes (quote, 64 byte checksum, quote, newline). A manual // checksum will be the characters "manual\n" (9 characters). If neither // decode correctly, it is assumed that there is no checksum at all. var checksum crypto.Hash err = json.Unmarshal(remainingBytes[:67], &checksum) if err == nil && checksum == crypto.HashBytes(remainingBytes[68:]) { // Checksum is proper, and matches the data. Update the data portion to // exclude the checksum. remainingBytes = remainingBytes[68:] } else { // Cryptographic checksum failed, try interpreting a manual checksum. var manualChecksum string err := json.Unmarshal(remainingBytes[:8], &manualChecksum) if err == nil && manualChecksum == "manual" { // Manual checksum is proper. Update the remaining data to exclude // the manual checksum. remainingBytes = remainingBytes[9:] } } // Any valid checksum has been stripped off. There is also the case that no // checksum was written at all, which is ignored as a case - it's needed to // preserve compatibility with previous persist files. // Parse the json object. return json.Unmarshal(remainingBytes, &object) } // LoadJSON will load a persisted json object from disk. func LoadJSON(meta Metadata, object interface{}, filename string) error { // Verify that the filename does not have the persist temp suffix. if strings.HasSuffix(filename, tempSuffix) { return ErrBadFilenameSuffix } // Verify that no other thread is using this filename. err := func() error { activeFilesMu.Lock() defer activeFilesMu.Unlock() _, exists := activeFiles[filename] if exists { build.Critical(ErrFileInUse, filename) return ErrFileInUse } activeFiles[filename] = struct{}{} return nil }() if err != nil { return err } // Release the lock at the end of the function. defer func() { activeFilesMu.Lock() delete(activeFiles, filename) activeFilesMu.Unlock() }() // Try opening the primary file. err = readJSON(meta, object, filename) if err == ErrBadHeader || err == ErrBadVersion || os.IsNotExist(err) { return err } if err != nil { // Try opening the temp file. err := readJSON(meta, object, filename+tempSuffix) if err != nil { return build.ExtendErr("unable to read persisted json object from disk", err) } } // Success. return nil } // SaveJSON will save a json object to disk in a durable, atomic way. The // resulting file will have a checksum of the data as the third line. If // manually editing files, the checksum line can be replaced with the 8 // characters "manual". This will cause the reader to accept the checksum even // though the file has been changed. func SaveJSON(meta Metadata, object interface{}, filename string) error { // Verify that the filename does not have the persist temp suffix. if strings.HasSuffix(filename, tempSuffix) { return ErrBadFilenameSuffix } // Verify that no other thread is using this filename. err := func() error { activeFilesMu.Lock() defer activeFilesMu.Unlock() _, exists := activeFiles[filename] if exists { build.Critical(ErrFileInUse, filename) return ErrFileInUse } activeFiles[filename] = struct{}{} return nil }() if err != nil { return err } // Release the lock at the end of the function. defer func() { activeFilesMu.Lock() delete(activeFiles, filename) activeFilesMu.Unlock() }() // Write the metadata to the buffer. buf := new(bytes.Buffer) enc := json.NewEncoder(buf) if err := enc.Encode(meta.Header); err != nil { return build.ExtendErr("unable to encode metadata header", err) } if err := enc.Encode(meta.Version); err != nil { return build.ExtendErr("unable to encode metadata version", err) } // Marshal the object into json and write the checksum + result to the // buffer. objBytes, err := json.MarshalIndent(object, "", "\t") if err != nil { return build.ExtendErr("unable to marshal the provided object", err) } checksum := crypto.HashBytes(objBytes) if err := enc.Encode(checksum); err != nil { return build.ExtendErr("unable to encode checksum", err) } buf.Write(objBytes) // Write out the data to the temp file, with a sync. data := buf.Bytes() err = func() (err error) { file, err := os.OpenFile(filename+tempSuffix, os.O_RDWR|os.O_TRUNC|os.O_CREATE, 0600) if err != nil { return build.ExtendErr("unable to open temp file", err) } defer func() { err = build.ComposeErrors(err, file.Close()) }() // Write and sync. _, err = file.Write(data) if err != nil { return build.ExtendErr("unable to write temp file", err) } err = file.Sync() if err != nil { return build.ExtendErr("unable to sync temp file", err) } return nil }() if err != nil { return err } // Write out the data to the real file, with a sync. err = func() (err error) { file, err := os.OpenFile(filename, os.O_RDWR|os.O_TRUNC|os.O_CREATE, 0600) if err != nil { return build.ExtendErr("unable to open file", err) } defer func() { err = build.ComposeErrors(err, file.Close()) }() // Write and sync. _, err = file.Write(data) if err != nil { return build.ExtendErr("unable to write file", err) } err = file.Sync() if err != nil { return build.ExtendErr("unable to sync temp file", err) } return nil }() if err != nil { return err } // Success return nil } Sia-1.3.0/persist/json_test.go000066400000000000000000000140011313565667000162670ustar00rootroot00000000000000package persist import ( "bytes" "os" "path/filepath" "sync" "testing" "github.com/NebulousLabs/Sia/build" ) // TestSaveLoadJSON creates a simple object and then tries saving and loading // it. func TestSaveLoadJSON(t *testing.T) { if testing.Short() { t.SkipNow() } // Create the directory used for testing. dir := filepath.Join(build.TempDir(persistDir), t.Name()) err := os.MkdirAll(dir, 0700) if err != nil { t.Fatal(err) } // Create and save the test object. testMeta := Metadata{"Test Struct", "v1.2.1"} type testStruct struct { One string Two uint64 Three []byte } obj1 := testStruct{"dog", 25, []byte("more dog")} obj1Filename := filepath.Join(dir, "obj1.json") err = SaveJSON(testMeta, obj1, obj1Filename) if err != nil { t.Fatal(err) } var obj2 testStruct // Try loading the object err = LoadJSON(testMeta, &obj2, obj1Filename) if err != nil { t.Fatal(err) } // Verify equivalence. if obj2.One != obj1.One { t.Error("persist mismatch") } if obj2.Two != obj1.Two { t.Error("persist mismatch") } if !bytes.Equal(obj2.Three, obj1.Three) { t.Error("persist mismatch") } if obj2.One != "dog" { t.Error("persist mismatch") } if obj2.Two != 25 { t.Error("persist mismatch") } if !bytes.Equal(obj2.Three, []byte("more dog")) { t.Error("persist mismatch") } // Try loading the object using the temp file. err = LoadJSON(testMeta, &obj2, obj1Filename+tempSuffix) if err != ErrBadFilenameSuffix { t.Error("did not get bad filename suffix") } // Try saving the object multiple times concurrently. var wg sync.WaitGroup for i := 0; i < 250; i++ { wg.Add(1) go func(i int) { defer wg.Done() defer func() { recover() // Error is irrelevant. }() SaveJSON(testMeta, obj1, obj1Filename) }(i) } wg.Wait() // Despite possible errors from saving the object many times concurrently, // the object should still be readable. err = LoadJSON(testMeta, &obj2, obj1Filename) if err != nil { t.Fatal(err) } // Verify equivalence. if obj2.One != obj1.One { t.Error("persist mismatch") } if obj2.Two != obj1.Two { t.Error("persist mismatch") } if !bytes.Equal(obj2.Three, obj1.Three) { t.Error("persist mismatch") } if obj2.One != "dog" { t.Error("persist mismatch") } if obj2.Two != 25 { t.Error("persist mismatch") } if !bytes.Equal(obj2.Three, []byte("more dog")) { t.Error("persist mismatch") } } // TestLoadJSONCorruptedFiles checks that LoadJSON correctly handles various // types of corruption that can occur during the saving process. func TestLoadJSONCorruptedFiles(t *testing.T) { if testing.Short() { t.SkipNow() } // Define the test object that will be getting loaded. testMeta := Metadata{"Test Struct", "v1.2.1"} type testStruct struct { One string Two uint64 Three []byte } obj1 := testStruct{"dog", 25, []byte("more dog")} var obj2 testStruct // Try loading a file with a bad checksum. err := LoadJSON(testMeta, &obj2, filepath.Join("testdata", "badchecksum.json")) if err == nil { t.Error("bad checksum should have failed") } // Try loading a file where only the main has a bad checksum. err = LoadJSON(testMeta, &obj2, filepath.Join("testdata", "badchecksummain.json")) if err != nil { t.Error("bad checksum main failed:", err) } // Verify equivalence. if obj2.One != obj1.One { t.Error("persist mismatch") } if obj2.Two != obj1.Two { t.Error("persist mismatch") } if !bytes.Equal(obj2.Three, obj1.Three) { t.Error("persist mismatch") } if obj2.One != "dog" { t.Error("persist mismatch") } if obj2.Two != 25 { t.Error("persist mismatch") } if !bytes.Equal(obj2.Three, []byte("more dog")) { t.Error("persist mismatch") } // Try loading a file with a manual checksum. err = LoadJSON(testMeta, &obj2, filepath.Join("testdata", "manual.json")) if err != nil { t.Error("bad checksum should have failed") } // Verify equivalence. if obj2.One != obj1.One { t.Error("persist mismatch") } if obj2.Two != obj1.Two { t.Error("persist mismatch") } if !bytes.Equal(obj2.Three, obj1.Three) { t.Error("persist mismatch") } if obj2.One != "dog" { t.Error("persist mismatch") } if obj2.Two != 25 { t.Error("persist mismatch") } if !bytes.Equal(obj2.Three, []byte("more dog")) { t.Error("persist mismatch") } // Try loading a corrupted main file. err = LoadJSON(testMeta, &obj2, filepath.Join("testdata", "corruptmain.json")) if err != nil { t.Error("couldn't load corrupted main:", err) } // Verify equivalence. if obj2.One != obj1.One { t.Error("persist mismatch") } if obj2.Two != obj1.Two { t.Error("persist mismatch") } if !bytes.Equal(obj2.Three, obj1.Three) { t.Error("persist mismatch") } if obj2.One != "dog" { t.Error("persist mismatch") } if obj2.Two != 25 { t.Error("persist mismatch") } if !bytes.Equal(obj2.Three, []byte("more dog")) { t.Error("persist mismatch") } // Try loading a corrupted temp file. err = LoadJSON(testMeta, &obj2, filepath.Join("testdata", "corrupttemp.json")) if err != nil { t.Error("couldn't load corrupted main:", err) } // Verify equivalence. if obj2.One != obj1.One { t.Error("persist mismatch") } if obj2.Two != obj1.Two { t.Error("persist mismatch") } if !bytes.Equal(obj2.Three, obj1.Three) { t.Error("persist mismatch") } if obj2.One != "dog" { t.Error("persist mismatch") } if obj2.Two != 25 { t.Error("persist mismatch") } if !bytes.Equal(obj2.Three, []byte("more dog")) { t.Error("persist mismatch") } // Try loading a file with no temp, and no checksum. err = LoadJSON(testMeta, &obj2, filepath.Join("testdata", "nochecksum.json")) if err != nil { t.Error("couldn't load no checksum:", err) } // Verify equivalence. if obj2.One != obj1.One { t.Error("persist mismatch") } if obj2.Two != obj1.Two { t.Error("persist mismatch") } if !bytes.Equal(obj2.Three, obj1.Three) { t.Error("persist mismatch") } if obj2.One != "dog" { t.Error("persist mismatch") } if obj2.Two != 25 { t.Error("persist mismatch") } if !bytes.Equal(obj2.Three, []byte("more dog")) { t.Error("persist mismatch") } } Sia-1.3.0/persist/log.go000066400000000000000000000074441313565667000150550ustar00rootroot00000000000000package persist import ( "fmt" "io" "log" "os" "sync" "github.com/NebulousLabs/Sia/build" ) // Logger is a wrapper for the standard library logger that enforces logging // with the Sia-standard settings. It also supports a Close method, which // attempts to close the underlying io.Writer. type Logger struct { *log.Logger w io.Writer } // Close logs a shutdown message and closes the Logger's underlying io.Writer, // if it is also an io.Closer. func (l *Logger) Close() error { l.Output(2, "SHUTDOWN: Logging has terminated.") if c, ok := l.w.(io.Closer); ok { return c.Close() } return nil } // Critical logs a message with a CRITICAL prefix that guides the user to the // Sia github tracker. If debug mode is enabled, it will also write the message // to os.Stderr and panic. Critical should only be called if there has been a // developer error, otherwise Severe should be called. func (l *Logger) Critical(v ...interface{}) { l.Output(2, "CRITICAL: "+fmt.Sprintln(v...)) build.Critical(v...) } // Debug is equivalent to Logger.Print when build.DEBUG is true. Otherwise it // is a no-op. func (l *Logger) Debug(v ...interface{}) { if build.DEBUG { l.Output(2, fmt.Sprint(v...)) } } // Debugf is equivalent to Logger.Printf when build.DEBUG is true. Otherwise it // is a no-op. func (l *Logger) Debugf(format string, v ...interface{}) { if build.DEBUG { l.Output(2, fmt.Sprintf(format, v...)) } } // Debugln is equivalent to Logger.Println when build.DEBUG is true. Otherwise // it is a no-op. func (l *Logger) Debugln(v ...interface{}) { if build.DEBUG { l.Output(2, "[DEBUG] "+fmt.Sprintln(v...)) } } // Severe logs a message with a SEVERE prefix. If debug mode is enabled, it // will also write the message to os.Stderr and panic. Severe should be called // if there is a severe problem with the user's machine or setup that should be // addressed ASAP but does not necessarily require that the machine crash or // exit. func (l *Logger) Severe(v ...interface{}) { l.Output(2, "SEVERE: "+fmt.Sprintln(v...)) build.Severe(v...) } // NewLogger returns a logger that can be closed. Calls should not be made to // the logger after 'Close' has been called. func NewLogger(w io.Writer) *Logger { l := log.New(w, "", log.Ldate|log.Ltime|log.Lmicroseconds|log.Lshortfile|log.LUTC) l.Output(3, "STARTUP: Logging has started.") // Call depth is 3 because NewLogger is usually called by NewFileLogger return &Logger{l, w} } // closeableFile wraps an os.File to perform sanity checks on its Write and // Close methods. When the checks are enabled, calls to Write or Close will // panic if they are called after the file has already been closed. type closeableFile struct { *os.File closed bool mu sync.RWMutex } // Close closes the file and sets the closed flag. func (cf *closeableFile) Close() error { cf.mu.Lock() defer cf.mu.Unlock() // Sanity check - close should not have been called yet. if cf.closed { build.Critical("cannot close the file; already closed") } // Ensure that all data has actually hit the disk. if err := cf.Sync(); err != nil { return err } cf.closed = true return cf.File.Close() } // Write takes the input data and writes it to the file. func (cf *closeableFile) Write(b []byte) (int, error) { cf.mu.RLock() defer cf.mu.RUnlock() // Sanity check - close should not have been called yet. if cf.closed { build.Critical("cannot write to the file after it has been closed") } return cf.File.Write(b) } // NewFileLogger returns a logger that logs to logFilename. The file is opened // in append mode, and created if it does not exist. func NewFileLogger(logFilename string) (*Logger, error) { logFile, err := os.OpenFile(logFilename, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0660) if err != nil { return nil, err } cf := &closeableFile{File: logFile} return NewLogger(cf), nil } Sia-1.3.0/persist/log_test.go000066400000000000000000000041541313565667000161070ustar00rootroot00000000000000package persist import ( "io/ioutil" "os" "path/filepath" "strings" "testing" "github.com/NebulousLabs/Sia/build" ) // TestLogger checks that the basic functions of the file logger work as // designed. func TestLogger(t *testing.T) { // Create a folder for the log file. testdir := build.TempDir(persistDir, t.Name()) err := os.MkdirAll(testdir, 0700) if err != nil { t.Fatal(err) } // Create the logger. logFilename := filepath.Join(testdir, "test.log") fl, err := NewFileLogger(logFilename) if err != nil { t.Fatal(err) } // Write an example statement, and then close the logger. fl.Println("TEST: this should get written to the logfile") err = fl.Close() if err != nil { t.Fatal(err) } // Check that data was written to the log file. There should be three // lines, one for startup, the example line, and one to close the logger. expectedSubstring := []string{"STARTUP", "TEST", "SHUTDOWN", ""} // file ends with a newline fileData, err := ioutil.ReadFile(logFilename) if err != nil { t.Fatal(err) } fileLines := strings.Split(string(fileData), "\n") for i, line := range fileLines { if !strings.Contains(string(line), expectedSubstring[i]) { t.Error("did not find the expected message in the logger") } } if len(fileLines) != 4 { // file ends with a newline t.Error("logger did not create the correct number of lines:", len(fileLines)) } } // TestLoggerCritical prints a critical message from the logger. func TestLoggerCritical(t *testing.T) { // Create a folder for the log file. testdir := build.TempDir(persistDir, t.Name()) err := os.MkdirAll(testdir, 0700) if err != nil { t.Fatal(err) } // Create the logger. logFilename := filepath.Join(testdir, "test.log") fl, err := NewFileLogger(logFilename) if err != nil { t.Fatal(err) } // Write a catch for a panic that should trigger when logger.Critical is // called. defer func() { r := recover() if r == nil { t.Error("critical message was not thrown in a panic") } // Close the file logger to clean up the test. err = fl.Close() if err != nil { t.Fatal(err) } }() fl.Critical("a critical message") } Sia-1.3.0/persist/persist.go000066400000000000000000000064271313565667000157650ustar00rootroot00000000000000package persist import ( "encoding/base32" "errors" "os" "path/filepath" "sync" "github.com/NebulousLabs/fastrand" ) const ( // persistDir defines the folder that is used for testing the persist // package. persistDir = "persist" // tempSuffix is the suffix that is applied to the temporary/backup versions // of the files being persisted. tempSuffix = "_temp" ) var ( // ErrBadFilenameSuffix indicates that SaveJSON or LoadJSON was called using // a filename that has a bad suffix. This prevents users from trying to use // this package to manage the temp files - this packaage will manage them // automatically. ErrBadFilenameSuffix = errors.New("filename suffix not allowed") // ErrBadHeader indicates that the file opened is not the file that was // expected. ErrBadHeader = errors.New("wrong header") // ErrBadVersion indicates that the version number of the file is not // compatible with the current codebase. ErrBadVersion = errors.New("incompatible version") // ErrFileInUse is returned if SaveJSON or LoadJSON is called on a file // that's already being manipulated in another thread by the persist // package. ErrFileInUse = errors.New("another thread is saving or loading this file") ) var ( // activeFiles is a map tracking which filenames are currently being used // for saving and loading. There should never be a situation where the same // file is being called twice from different threads, as the persist package // has no way to tell what order they were intended to be called. activeFiles = make(map[string]struct{}) activeFilesMu sync.Mutex ) // Metadata contains the header and version of the data being stored. type Metadata struct { Header, Version string } // RandomSuffix returns a 20 character base32 suffix for a filename. There are // 100 bits of entropy, and a very low probability of colliding with existing // files unintentionally. func RandomSuffix() string { str := base32.StdEncoding.EncodeToString(fastrand.Bytes(20)) return str[:20] } // A safeFile is a file that is stored under a temporary filename. When Commit // is called, the file is renamed to its "final" filename. This allows for // atomic updating of files; otherwise, an unexpected shutdown could leave a // valuable file in a corrupted state. Callers must still Close the file handle // as usual. type safeFile struct { *os.File finalName string } // CommitSync syncs the file, closes it, and then renames it to the intended // final filename. CommitSync should not be called from a defer if the // function it is being called from can return an error. func (sf *safeFile) CommitSync() error { if err := sf.Sync(); err != nil { return err } if err := sf.Close(); err != nil { return err } return os.Rename(sf.finalName+"_temp", sf.finalName) } // NewSafeFile returns a file that can atomically be written to disk, // minimizing the risk of corruption. func NewSafeFile(filename string) (*safeFile, error) { file, err := os.Create(filename + "_temp") if err != nil { return nil, err } // Get the absolute path of the filename so that calling os.Chdir in // between calling NewSafeFile and calling safeFile.Commit does not change // the final file path. absFilename, err := filepath.Abs(filename) if err != nil { return nil, err } return &safeFile{file, absFilename}, nil } Sia-1.3.0/persist/persist_test.go000066400000000000000000000065711313565667000170240ustar00rootroot00000000000000package persist import ( "bytes" "io/ioutil" "os" "path/filepath" "testing" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/fastrand" ) // TestIntegrationRandomSuffix checks that the random suffix creator creates // valid files. func TestIntegrationRandomSuffix(t *testing.T) { tmpDir := build.TempDir(persistDir, t.Name()) err := os.MkdirAll(tmpDir, 0700) if err != nil { t.Fatal(err) } for i := 0; i < 100; i++ { suffix := RandomSuffix() filename := filepath.Join(tmpDir, "test file - "+suffix+".nil") file, err := os.Create(filename) if err != nil { t.Fatal(err) } file.Close() } } // TestAbsolutePathSafeFile tests creating and committing safe files with // absolute paths. func TestAbsolutePathSafeFile(t *testing.T) { tmpDir := build.TempDir(persistDir, t.Name()) err := os.MkdirAll(tmpDir, 0700) if err != nil { t.Fatal(err) } absPath := filepath.Join(tmpDir, "test") // Create safe file. sf, err := NewSafeFile(absPath) defer sf.Close() if err != nil { t.Fatal(err) } // Check that the name of the file is not equal to the final name of the // file. if sf.Name() == absPath { t.Errorf("safeFile created with filename: %s has temporary filename that is equivalent to finalName: %s\n", absPath, sf.Name()) } // Write random data to the file and commit. data := fastrand.Bytes(10) _, err = sf.Write(data) if err != nil { t.Fatal(err) } err = sf.CommitSync() if err != nil { t.Fatal(err) } // Check that the file exists and has same data that was written to it. dataRead, err := ioutil.ReadFile(absPath) if err != nil { t.Fatal(err) } if !bytes.Equal(data, dataRead) { t.Fatalf("Committed file has different data than was written to it: expected %v, got %v\n", data, dataRead) } } // TestRelativePathSafeFile tests creating and committing safe files with // relative paths. Specifically, we test that calling os.Chdir between creating // and committing a safe file doesn't affect the safe file's final path. The // relative path tested is relative to the working directory. func TestRelativePathSafeFile(t *testing.T) { tmpDir := build.TempDir(persistDir, t.Name()) err := os.MkdirAll(tmpDir, 0700) if err != nil { t.Fatal(err) } absPath := filepath.Join(tmpDir, "test") wd, err := os.Getwd() if err != nil { t.Fatal(err) } relPath, err := filepath.Rel(wd, absPath) if err != nil { t.Fatal(err) } // Create safe file. sf, err := NewSafeFile(relPath) if err != nil { t.Fatal(err) } defer sf.Close() // Check that the path of the file is not equal to the final path of the // file. if sf.Name() == absPath { t.Errorf("safeFile created with filename: %s has temporary filename that is equivalent to finalName: %s\n", absPath, sf.Name()) } // Write random data to the file. data := fastrand.Bytes(10) _, err = sf.Write(data) if err != nil { t.Fatal(err) } // Change directories and commit. tmpChdir := build.TempDir(persistDir, t.Name()+"2") err = os.MkdirAll(tmpChdir, 0700) if err != nil { t.Fatal(err) } os.Chdir(tmpChdir) defer os.Chdir(wd) err = sf.CommitSync() if err != nil { t.Fatal(err) } // Check that the file exists and has same data that was written to it. dataRead, err := ioutil.ReadFile(absPath) if err != nil { t.Fatal(err) } if !bytes.Equal(data, dataRead) { t.Fatalf("Committed file has different data than was written to it: expected %v, got %v\n", data, dataRead) } } Sia-1.3.0/persist/testdata/000077500000000000000000000000001313565667000155455ustar00rootroot00000000000000Sia-1.3.0/persist/testdata/badchecksum.json000066400000000000000000000002221313565667000207050ustar00rootroot00000000000000"Test Struct" "v1.2.1" "1eb184b72ef88349b156f6b46fa7db280b19e1def01493a3ba510ab58527893d" { "One": "dog", "Two": 25, "Three": "bW9yZSBkb2c=" } Sia-1.3.0/persist/testdata/badchecksum.json_temp000066400000000000000000000002221313565667000217320ustar00rootroot00000000000000"Test Struct" "v1.2.1" "1eb184b72ef88349b156f6b46fa7db280b19e1def01493a3ba510ab58527893d" { "One": "dog", "Two": 25, "Three": "bW9yZSBkb2c=" } Sia-1.3.0/persist/testdata/badchecksummain.json000066400000000000000000000002221313565667000215520ustar00rootroot00000000000000"Test Struct" "v1.2.1" "1eb184b72ef88349b156f6b46fa7db280b19e1def01493a3ba510ab58527893d" { "One": "dog", "Two": 25, "Three": "bW9yZSBkb2c=" } Sia-1.3.0/persist/testdata/badchecksummain.json_temp000066400000000000000000000002211313565667000225760ustar00rootroot00000000000000"Test Struct" "v1.2.1" "0eb184b72ef88349b156f6b46fa7db280b19e1def01493a3ba510ab58527893d" { "One": "dog", "Two": 25, "Three": "bW9yZSBkb2c=" }Sia-1.3.0/persist/testdata/corruptmain.json000066400000000000000000000002221313565667000207770ustar00rootroot00000000000000"Test Struct" "v1.2.1" "0eb184b72ef88349b156f6b46fa7db280b19e1def01493a3ba510ab58527893d" { "One": "cog", "Two": 25, "Three": "bW9yZSBkb2c=" } Sia-1.3.0/persist/testdata/corruptmain.json_temp000066400000000000000000000002211313565667000220230ustar00rootroot00000000000000"Test Struct" "v1.2.1" "0eb184b72ef88349b156f6b46fa7db280b19e1def01493a3ba510ab58527893d" { "One": "dog", "Two": 25, "Three": "bW9yZSBkb2c=" }Sia-1.3.0/persist/testdata/corrupttemp.json000066400000000000000000000002211313565667000210170ustar00rootroot00000000000000"Test Struct" "v1.2.1" "0eb184b72ef88349b156f6b46fa7db280b19e1def01493a3ba510ab58527893d" { "One": "dog", "Two": 25, "Three": "bW9yZSBkb2c=" }Sia-1.3.0/persist/testdata/corrupttemp.json_temp000066400000000000000000000002221313565667000220450ustar00rootroot00000000000000"Test Struct" "v1.2.1" "0eb184b72ef88349b156f6b46fa7db280b19e1def01493a3ba510ab58527893d" { "One": "dog", "Rwo": 25, "Three": "bW9yZSBkb2c=" } Sia-1.3.0/persist/testdata/manual.json000066400000000000000000000001301313565667000177070ustar00rootroot00000000000000"Test Struct" "v1.2.1" "manual" { "One": "dog", "Two": 25, "Three": "bW9yZSBkb2c=" } Sia-1.3.0/persist/testdata/manual.json_temp000066400000000000000000000002211313565667000207350ustar00rootroot00000000000000"Test Struct" "v1.2.1" "0eb184b72ef88349b156f6b46fa7db280b19e1def01493a3ba510ab58527893d" { "One": "dog", "Two": 25, "Three": "bW9yZSBkb2c=" }Sia-1.3.0/persist/testdata/nochecksum.json000066400000000000000000000001171313565667000205760ustar00rootroot00000000000000"Test Struct" "v1.2.1" { "One": "dog", "Two": 25, "Three": "bW9yZSBkb2c=" } Sia-1.3.0/persist/testdata/std.json000066400000000000000000000002211313565667000172250ustar00rootroot00000000000000"Test Struct" "v1.2.1" "0eb184b72ef88349b156f6b46fa7db280b19e1def01493a3ba510ab58527893d" { "One": "dog", "Two": 25, "Three": "bW9yZSBkb2c=" }Sia-1.3.0/persist/testdata/std.json_temp000066400000000000000000000002211313565667000202520ustar00rootroot00000000000000"Test Struct" "v1.2.1" "0eb184b72ef88349b156f6b46fa7db280b19e1def01493a3ba510ab58527893d" { "One": "dog", "Two": 25, "Three": "bW9yZSBkb2c=" }Sia-1.3.0/profile/000077500000000000000000000000001313565667000137035ustar00rootroot00000000000000Sia-1.3.0/profile/profile.go000066400000000000000000000117441313565667000157010ustar00rootroot00000000000000package profile import ( "errors" "fmt" "os" "path/filepath" "runtime" "runtime/pprof" "runtime/trace" "sync" "time" "github.com/NebulousLabs/Sia/persist" ) // There's a global lock on cpu and memory profiling, because I'm not sure what // happens if multiple threads call each at the same time. This lock might be // unnecessary. var ( cpuActive bool cpuLock sync.Mutex memActive bool memLock sync.Mutex traceActive bool traceLock sync.Mutex ) // StartCPUProfile starts cpu profiling. An error will be returned if a cpu // profiler is already running. func StartCPUProfile(profileDir, identifier string) error { // Lock the cpu profile lock so that only one profiler is running at a // time. cpuLock.Lock() if cpuActive { cpuLock.Unlock() return errors.New("cannot start cpu profiler, a profiler is already running") } cpuActive = true cpuLock.Unlock() // Start profiling into the profile dir, using the identifer. The timestamp // of the start time of the profiling will be included in the filename. cpuProfileFile, err := os.Create(filepath.Join(profileDir, "cpu-profile-"+identifier+"-"+time.Now().Format(time.RFC3339Nano)+".prof")) if err != nil { return err } pprof.StartCPUProfile(cpuProfileFile) return nil } // StopCPUProfile stops cpu profiling. func StopCPUProfile() { cpuLock.Lock() if cpuActive { pprof.StopCPUProfile() cpuActive = false } cpuLock.Unlock() } // SaveMemProfile saves the current memory structure of the program. An error // will be returned if memory profiling is already in progress. Unlike for cpu // profiling, there is no 'stopMemProfile' call - everything happens at once. func SaveMemProfile(profileDir, identifier string) error { memLock.Lock() if memActive { memLock.Unlock() return errors.New("cannot start memory profiler, a memory profiler is already running") } memActive = true memLock.Unlock() // Save the memory profile. memFile, err := os.Create(filepath.Join(profileDir, "mem-profile-"+identifier+"-"+time.Now().Format(time.RFC3339Nano)+".prof")) if err != nil { return err } pprof.WriteHeapProfile(memFile) memLock.Lock() memActive = false memLock.Unlock() return nil } // StartTrace starts trace. An error will be returned if a trace // is already running. func StartTrace(traceDir, identifier string) error { // Lock the trace lock so that only one profiler is running at a // time. traceLock.Lock() if traceActive { traceLock.Unlock() return errors.New("cannot start trace, it is already running") } traceActive = true traceLock.Unlock() // Start trace into the trace dir, using the identifer. The timestamp // of the start time of the trace will be included in the filename. traceFile, err := os.Create(filepath.Join(traceDir, "trace-"+identifier+"-"+time.Now().Format(time.RFC3339Nano)+".trace")) if err != nil { return err } return trace.Start(traceFile) } // StopTrace stops trace. func StopTrace() { traceLock.Lock() if traceActive { trace.Stop() traceActive = false } traceLock.Unlock() } // startContinuousLog creates dir and saves inexpensive logs periodically. // It also runs the restart function periodically. func startContinuousLog(dir string, sleepCap time.Duration, restart func()) { // Create the folder for all of the profiling results. err := os.MkdirAll(dir, 0700) if err != nil { fmt.Println(err) return } // Continuously log statistics about the running Sia application. go func() { // Create the logger. log, err := persist.NewFileLogger(filepath.Join(dir, "continuousStats.log")) if err != nil { fmt.Println("Stats logging failed:", err) return } // Collect statistics in an infinite loop. sleepTime := time.Second * 20 for { // Sleep for an exponential amount of time each iteration, this // keeps the size of the log small while still providing lots of // information. restart() time.Sleep(sleepTime) sleepTime = time.Duration(1.5 * float64(sleepTime)) if sleepCap != 0*time.Second && sleepTime > sleepCap { sleepTime = sleepCap } var m runtime.MemStats runtime.ReadMemStats(&m) log.Printf("\n\tGoroutines: %v\n\tAlloc: %v\n\tTotalAlloc: %v\n\tHeapAlloc: %v\n\tHeapSys: %v\n", runtime.NumGoroutine(), m.Alloc, m.TotalAlloc, m.HeapAlloc, m.HeapSys) } }() } // StartContinuousProfile will continuously print statistics about the cpu // usage, memory usage, and runtime stats of the program, and run an execution // logger. Select one (recommended) or more functionalities by passing the // corresponding flag(s) func StartContinuousProfile(profileDir string, profileCPU bool, profileMem bool, profileTrace bool) { sleepCap := 0 * time.Second // Unlimited. if profileTrace { sleepCap = 10 * time.Minute } startContinuousLog(profileDir, sleepCap, func() { if profileCPU { StopCPUProfile() StartCPUProfile(profileDir, "continuousProfileCPU") } if profileMem { SaveMemProfile(profileDir, "continuousProfileMem") } if profileTrace { StopTrace() StartTrace(profileDir, "continuousProfileTrace") } }) } Sia-1.3.0/profile/server.go000066400000000000000000000002201313565667000155320ustar00rootroot00000000000000// +build profile package profile import ( "net/http" _ "net/http/pprof" ) func init() { go http.ListenAndServe("localhost:10501", nil) } Sia-1.3.0/profile/timing.go000066400000000000000000000016431313565667000155250ustar00rootroot00000000000000package profile import ( "fmt" "time" ) var ( uptime int64 times = make(map[string]int64) activeTimers = make(map[string]int64) ) // Uptime() returns the number of nanoseconds that have passed since the first // call to uptime. func Uptime() int64 { if uptime == 0 { uptime = time.Now().UnixNano() return 0 } return (time.Now().UnixNano() - uptime) / 1e6 } // PrintTimes prints how much time has passed at each timer. func PrintTimes() string { s := "Printing Timers:\n" for name, time := range times { s += fmt.Sprintf("\t%v: %v\n", name, time/1e6) } return s } // ToggleTimer actives a timer known by a given string. If the timer does not // yet exist, it is created. func ToggleTimer(s string) { toggleTime, exists := activeTimers[s] if exists { times[s] = times[s] + (time.Now().UnixNano() - toggleTime) delete(activeTimers, s) } else { activeTimers[s] = time.Now().UnixNano() } } Sia-1.3.0/release.sh000077500000000000000000000025351313565667000142270ustar00rootroot00000000000000#!/bin/bash set -e # version and keys are supplied as arguments version="$1" keyfile="$2" pubkeyfile="$3" # optional if [[ -z $version || -z $keyfile ]]; then echo "Usage: $0 VERSION KEYFILE" exit 1 fi if [[ -z $pubkeyfile ]]; then echo "Warning: no public keyfile supplied. Binaries will not be verified." fi # check for keyfile before proceeding if [ ! -f $keyfile ]; then echo "Key file not found: $keyfile" exit 1 fi keysum=$(shasum -a 256 $keyfile | cut -c -64) if [ $keysum != "735320b4698010500d230c487e970e12776e88f33ad777ab380a493691dadb1b" ]; then echo "Wrong key file: checksum does not match developer key file." exit 1 fi for os in darwin linux windows; do echo Packaging ${os}... # create workspace folder=release/Sia-$version-$os-amd64 rm -rf $folder mkdir -p $folder # compile and sign binaries for pkg in siac siad; do bin=$pkg if [ "$os" == "windows" ]; then bin=${pkg}.exe fi GOOS=${os} go build -ldflags="-s -w" -o $folder/$bin ./$pkg openssl dgst -sha256 -sign $keyfile -out $folder/${bin}.sig $folder/$bin # verify signature if [[ -n $pubkeyfile ]]; then openssl dgst -sha256 -verify $pubkeyfile -signature $folder/${bin}.sig $folder/$bin fi done # add other artifacts cp -r doc LICENSE README.md $folder # zip ( cd release zip -rq Sia-$version-$os-amd64.zip Sia-$version-$os-amd64 ) done Sia-1.3.0/siac/000077500000000000000000000000001313565667000131625ustar00rootroot00000000000000Sia-1.3.0/siac/README.md000066400000000000000000000170671313565667000144540ustar00rootroot00000000000000Siac Usage ========== `siac` is the command line interface to Sia, for use by power users and those on headless servers. It comes as a part of the command line package, and can be run as `./siac` from the same folder, or just by calling `siac` if you move the binary into your path. Most of the following commands have online help. For example, executing `siac wallet send help` will list the arguments for that command, while `siac host help` will list the commands that can be called pertaining to hosting. `siac help` will list all of the top level command groups that can be used. You can change the address of where siad is pointing using the `-a` flag. For example, `siac -a :9000 status` will display the status of the siad instance launched on the local machine with `siad -a :9000`. Common tasks ------------ * `siac consensus` view block height Wallet: * `siac wallet init [-p]` initilize a wallet * `siac wallet unlock` unlock a wallet * `siac wallet balance` retrieve wallet balance * `siac wallet address` get a wallet address * `siac wallet send [amount] [dest]` sends siacoin to an address Renter: * `siac renter list` list all renter files * `siac renter upload [filepath] [nickname]` upload a file * `siac renter download [nickname] [filepath]` download a file Full Descriptions ----------------- #### Wallet tasks * `siac wallet init [-p]` encrypts and initializes the wallet. If the `-p` flag is provided, an encryption password is requested from the user. Otherwise the initial seed is used as the encryption password. The wallet must be initialized and unlocked before any actions can be performed on the wallet. Examples: ```bash user@hostname:~$ siac -a :9920 wallet init Seed is: cider sailor incur sober feast unhappy mundane sadness hinder aglow imitate amaze duties arrow gigantic uttered inflamed girth myriad jittery hexagon nail lush reef sushi pastry southern inkling acquire Wallet encrypted with password: cider sailor incur sober feast unhappy mundane sadness hinder aglow imitate amaze duties arrow gigantic uttered inflamed girth myriad jittery hexagon nail lush reef sushi pastry southern inkling acquire ``` ```bash user@hostname:~$ siac -a :9920 wallet init -p Wallet password: Seed is: potato haunted fuming lordship library vane fever powder zippers fabrics dexterity hoisting emails pebbles each vampire rockets irony summon sailor lemon vipers foxes oneself glide cylinder vehicle mews acoustic Wallet encrypted with given password ``` * `siac wallet unlock` prompts the user for the encryption password to the wallet, supplied by the `init` command. The wallet must be initialized and unlocked before any actions can take place. * `siac wallet balance` prints information about your wallet. Example: ```bash user@hostname:~$ siac wallet balance Wallet status: Encrypted, Unlocked Confirmed Balance: 61516458.00 SC Unconfirmed Balance: 64516461.00 SC Exact: 61516457999999999999999999999999 H ``` * `siac wallet address` returns a never seen before address for sending siacoins to. * `siac wallet send [amount] [dest]` Sends `amount` siacoins to `dest`. `amount` is in the form XXXXUU where an X is a number and U is a unit, for example MS, S, mS, ps, etc. If no unit is given hastings is assumed. `dest` must be a valid siacoin address. * `siac wallet lock` locks a wallet. After calling, the wallet must be unlocked using the encryption password in order to use it further * `siac wallet seeds` returns the list of secret seeds in use by the wallet. These can be used to regenerate the wallet * `siac wallet addseed` prompts the user for his encryption password, as well as a new secret seed. The wallet will then incorporate this seed into itself. This can be used for wallet recovery and merging. #### Host tasks * `host config [setting] [value]` is used to configure hosting. In version `1.2.2`, sia hosting is configured as follows: | Setting | Value | | -------------------------|-------------------------------------------------| | acceptingcontracts | Yes or No | | maxduration | in weeks, at least 12 | | collateral | in SC / TB / Month, 10-1000 | | collateralbudget | in SC | | maxcollateral | in SC, max per contract | | mincontractprice | minimum price in SC per contract | | mindownloadbandwidthprice| in SC / TB | | minstorageprice | in SC / TB | | minuploadbandwidthprice | in SC / TB | You can call this many times to configure you host before announcing. Alternatively, you can manually adjust these parameters inside the `host/config.json` file. * `siac host announce` makes an host announcement. You may optionally supply a specific address to be announced; this allows you to announce a domain name. Announcing a second time after changing settings is not necessary, as the announcement only contains enough information to reach your host. * `siac host -v` outputs some of your hosting settings. Example: ```bash user@hostname:~$ siac host -v Host settings: Storage: 2.0000 TB (1.524 GB used) Price: 0.000 SC per GB per month Collateral: 0 Max Filesize: 10000000000 Max Duration: 8640 Contracts: 32 ``` * `siac hostdb -v` prints a list of all the know active hosts on the network. #### Renter tasks * `siac renter upload [filename] [nickname]` uploads a file to the sia network. `filename` is the path to the file you want to upload, and nickname is what you will use to refer to that file in the network. For example, it is common to have the nickname be the same as the filename. * `siac renter list` displays a list of the your uploaded files currently on the sia network by nickname, and their filesizes. * `siac renter download [nickname] [destination]` downloads a file from the sia network onto your computer. `nickname` is the name used to refer to your file in the sia network, and `destination` is the path to where the file will be. If a file already exists there, it will be overwritten. * `siac renter rename [nickname] [newname]` changes the nickname of a file. * `siac renter delete [nickname]` removes a file from your list of stored files. This does not remove it from the network, but only from your saved list. * `siac renter queue` shows the download queue. This is only relevant if you have multiple downloads happening simultaneously. #### Gateway tasks * `siac gateway` prints info about the gateway, including its address and how many peers it's connected to. * `siac gateway list` prints a list of all currently connected peers. * `siac gateway connect [address:port]` manually connects to a peer and adds it to the gateway's node list. * `siac gateway disconnect [address:port]` manually disconnects from a peer, but leaves it in the gateway's node list. #### Miner tasks * `siac miner status` returns information about the miner. It is only valid for when siad is running. * `siac miner start` starts running the CPU miner on one thread. This is virtually useless outside of debugging. * `siac miner stop` halts the CPU miner. #### General commands * `siac consensus` prints the current block ID, current block height, and current target. * `siac stop` sends the stop signal to siad to safely terminate. This has the same affect as C^c on the terminal. * `siac version` displays the version string of siac. * `siac update` checks the server for updates. Sia-1.3.0/siac/bashcomplcmd.go000066400000000000000000000011631313565667000161460ustar00rootroot00000000000000package main import "github.com/spf13/cobra" var ( bashcomplCmd = &cobra.Command{ Use: "bash-completion [path]", Short: "Creates bash completion file.", Long: "Creates a bash completion file at the specified " + "location.\n\n" + "Note: Bash completions will only work with the " + "prefix with which the script is created (e.g. " + "`./siac` or `siac`).\n\n" + "Once created, the file has to be moved to the bash " + "completion script folder - usually " + "`/etc/bash_completion.d/`.", Run: wrap(bashcomplcmd), } ) func bashcomplcmd(path string) { rootCmd.GenBashCompletionFile(path) } Sia-1.3.0/siac/consensuscmd.go000066400000000000000000000032771313565667000162260ustar00rootroot00000000000000package main import ( "fmt" "time" "github.com/spf13/cobra" "github.com/NebulousLabs/Sia/api" "github.com/NebulousLabs/Sia/types" ) var ( consensusCmd = &cobra.Command{ Use: "consensus", Short: "Print the current state of consensus", Long: "Print the current state of consensus such as current block, block height, and target.", Run: wrap(consensuscmd), } ) // consensuscmd is the handler for the command `siac consensus`. // Prints the current state of consensus. func consensuscmd() { var cg api.ConsensusGET err := getAPI("/consensus", &cg) if err != nil { die("Could not get current consensus state:", err) } if cg.Synced { fmt.Printf(`Synced: %v Block: %v Height: %v Target: %v Difficulty: %v `, yesNo(cg.Synced), cg.CurrentBlock, cg.Height, cg.Target, cg.Difficulty) } else { estimatedHeight := estimatedHeightAt(time.Now()) estimatedProgress := float64(cg.Height) / float64(estimatedHeight) * 100 if estimatedProgress > 100 { estimatedProgress = 100 } fmt.Printf(`Synced: %v Height: %v Progress (estimated): %.1f%% `, yesNo(cg.Synced), cg.Height, estimatedProgress) } } // estimatedHeightAt returns the estimated block height for the given time. // Block height is estimated by calculating the minutes since a known block in // the past and dividing by 10 minutes (the block time). func estimatedHeightAt(t time.Time) types.BlockHeight { block100kTimestamp := time.Date(2017, time.April, 13, 23, 29, 49, 0, time.UTC) blockTime := float64(9) // overestimate block time for better UX diff := t.Sub(block100kTimestamp) estimatedHeight := 100e3 + (diff.Minutes() / blockTime) return types.BlockHeight(estimatedHeight + 0.5) // round to the nearest block } Sia-1.3.0/siac/consensuscmd_test.go000066400000000000000000000021111313565667000172470ustar00rootroot00000000000000package main import ( "testing" "time" "github.com/NebulousLabs/Sia/types" ) // TestEstimatedHeightAt tests that the expectedHeightAt function correctly // estimates the blockheight (and rounds to the nearest block). func TestEstimatedHeightAt(t *testing.T) { tests := []struct { t time.Time expectedHeight types.BlockHeight }{ // Test on the same block that is used to estimate the height { time.Date(2017, time.April, 13, 23, 29, 49, 0, time.UTC), 100e3, }, // 4 minutes later { time.Date(2017, time.April, 13, 23, 33, 49, 0, time.UTC), 100e3, }, // 5 minutes later { time.Date(2017, time.April, 13, 23, 34, 49, 0, time.UTC), 100e3 + 1, }, // 15 minutes later { time.Date(2017, time.April, 13, 23, 44, 49, 0, time.UTC), 100e3 + 2, }, // 1 day later { time.Date(2017, time.April, 14, 23, 29, 49, 0, time.UTC), 100e3 + 160, }, } for _, tt := range tests { h := estimatedHeightAt(tt.t) if h != tt.expectedHeight { t.Errorf("expected an estimated height of %v, but got %v", tt.expectedHeight, h) } } } Sia-1.3.0/siac/daemoncmd.go000066400000000000000000000042121313565667000154370ustar00rootroot00000000000000package main import ( "fmt" "github.com/NebulousLabs/Sia/build" "github.com/spf13/cobra" ) var ( stopCmd = &cobra.Command{ Use: "stop", Short: "Stop the Sia daemon", Long: "Stop the Sia daemon.", Run: wrap(stopcmd), } updateCmd = &cobra.Command{ Use: "update", Short: "Update Sia", Long: "Check for (and/or download) available updates for Sia.", Run: wrap(updatecmd), } updateCheckCmd = &cobra.Command{ Use: "check", Short: "Check for available updates", Long: "Check for available updates.", Run: wrap(updatecheckcmd), } versionCmd = &cobra.Command{ Use: "version", Short: "Print version information", Long: "Print version information.", Run: wrap(versioncmd), } ) type updateInfo struct { Available bool `json:"available"` Version string `json:"version"` } type daemonVersion struct { Version string } // version prints the version of siac and siad. func versioncmd() { fmt.Println("Sia Client v" + build.Version) var versioninfo daemonVersion err := getAPI("/daemon/version", &versioninfo) if err != nil { fmt.Println("Could not get daemon version:", err) return } fmt.Println("Sia Daemon v" + versioninfo.Version) } // stopcmd is the handler for the command `siac stop`. // Stops the daemon. func stopcmd() { err := get("/daemon/stop") if err != nil { die("Could not stop daemon:", err) } fmt.Println("Sia daemon stopped.") } func updatecmd() { var update updateInfo err := getAPI("/daemon/update", &update) if err != nil { fmt.Println("Could not check for update:", err) return } if !update.Available { fmt.Println("Already up to date.") return } err = post("/daemon/update", "") if err != nil { fmt.Println("Could not apply update:", err) return } fmt.Printf("Updated to version %s! Restart siad now.\n", update.Version) } func updatecheckcmd() { var update updateInfo err := getAPI("/daemon/update", &update) if err != nil { fmt.Println("Could not check for update:", err) return } if update.Available { fmt.Printf("A new release (v%s) is available! Run 'siac update' to install it.\n", update.Version) } else { fmt.Println("Up to date.") } } Sia-1.3.0/siac/export.go000066400000000000000000000027461313565667000150430ustar00rootroot00000000000000package main import ( "encoding/json" "fmt" "os" "github.com/NebulousLabs/Sia/api" "github.com/NebulousLabs/Sia/types" "github.com/spf13/cobra" ) var ( renterExportCmd = &cobra.Command{ Use: "export", Short: "export renter data to various formats", Long: "Export renter data in various formats.", // Run field not provided; export requires a subcommand. } renterExportContractTxnsCmd = &cobra.Command{ Use: "contract-txns [destination]", Short: "export the renter's contracts for import to `https://rankings.sia.tech/`", Long: "Export the renter's current contract set in JSON format to the specified " + "file. Intended for upload to `https://rankings.sia.tech/`.", Run: wrap(renterexportcontracttxnscmd), } ) // renterexportcontracttxnscmd is the handler for the command `siac renter export contract-txns`. // Exports the current contract set to JSON. func renterexportcontracttxnscmd(destination string) { var cs api.RenterContracts err := getAPI("/renter/contracts", &cs) if err != nil { die("Could not retrieve contracts:", err) } var contractTxns []types.Transaction for _, c := range cs.Contracts { contractTxns = append(contractTxns, c.LastTransaction) } destination = abs(destination) file, err := os.Create(destination) if err != nil { die("Could not export to file:", err) } err = json.NewEncoder(file).Encode(contractTxns) if err != nil { die("Could not export to file:", err) } fmt.Println("Exported contract data to", destination) } Sia-1.3.0/siac/gatewaycmd.go000066400000000000000000000056331313565667000156450ustar00rootroot00000000000000package main import ( "fmt" "os" "text/tabwriter" "github.com/spf13/cobra" "github.com/NebulousLabs/Sia/api" ) var ( gatewayCmd = &cobra.Command{ Use: "gateway", Short: "Perform gateway actions", Long: "View and manage the gateway's connected peers.", Run: wrap(gatewaycmd), } gatewayConnectCmd = &cobra.Command{ Use: "connect [address]", Short: "Connect to a peer", Long: "Connect to a peer and add it to the node list.", Run: wrap(gatewayconnectcmd), } gatewayDisconnectCmd = &cobra.Command{ Use: "disconnect [address]", Short: "Disconnect from a peer", Long: "Disconnect from a peer. Does not remove the peer from the node list.", Run: wrap(gatewaydisconnectcmd), } gatewayAddressCmd = &cobra.Command{ Use: "address", Short: "Print the gateway address", Long: "Print the network address of the gateway.", Run: wrap(gatewayaddresscmd), } gatewayListCmd = &cobra.Command{ Use: "list", Short: "View a list of peers", Long: "View the current peer list.", Run: wrap(gatewaylistcmd), } ) // gatewayconnectcmd is the handler for the command `siac gateway add [address]`. // Adds a new peer to the peer list. func gatewayconnectcmd(addr string) { err := post("/gateway/connect/"+addr, "") if err != nil { die("Could not add peer:", err) } fmt.Println("Added", addr, "to peer list.") } // gatewaydisconnectcmd is the handler for the command `siac gateway remove [address]`. // Removes a peer from the peer list. func gatewaydisconnectcmd(addr string) { err := post("/gateway/disconnect/"+addr, "") if err != nil { die("Could not remove peer:", err) } fmt.Println("Removed", addr, "from peer list.") } // gatewayaddresscmd is the handler for the command `siac gateway address`. // Prints the gateway's network address. func gatewayaddresscmd() { var info api.GatewayGET err := getAPI("/gateway", &info) if err != nil { die("Could not get gateway address:", err) } fmt.Println("Address:", info.NetAddress) } // gatewaycmd is the handler for the command `siac gateway`. // Prints the gateway's network address and number of peers. func gatewaycmd() { var info api.GatewayGET err := getAPI("/gateway", &info) if err != nil { die("Could not get gateway address:", err) } fmt.Println("Address:", info.NetAddress) fmt.Println("Active peers:", len(info.Peers)) } // gatewaylistcmd is the handler for the command `siac gateway list`. // Prints a list of all peers. func gatewaylistcmd() { var info api.GatewayGET err := getAPI("/gateway", &info) if err != nil { die("Could not get peer list:", err) } if len(info.Peers) == 0 { fmt.Println("No peers to show.") return } fmt.Println(len(info.Peers), "active peers:") w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) fmt.Fprintln(w, "Version\tOutbound\tAddress") for _, peer := range info.Peers { fmt.Fprintf(w, "%v\t%v\t%v\n", peer.Version, yesNo(!peer.Inbound), peer.NetAddress) } w.Flush() } Sia-1.3.0/siac/hostcmd.go000066400000000000000000000326111313565667000151550ustar00rootroot00000000000000package main import ( "fmt" "math/big" "os" "sort" "strings" "text/tabwriter" "github.com/NebulousLabs/Sia/api" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" "github.com/spf13/cobra" ) var ( hostCmd = &cobra.Command{ Use: "host", Short: "Perform host actions", Long: "View or modify host settings.", Run: wrap(hostcmd), } hostConfigCmd = &cobra.Command{ Use: "config [setting] [value]", Short: "Modify host settings", Long: `Modify host settings. Available settings: acceptingcontracts: boolean maxduration: blocks maxdownloadbatchsize: bytes maxrevisebatchsize: bytes netaddress: string windowsize: blocks collateral: currency collateralbudget: currency maxcollateral: currency mincontractprice: currency mindownloadbandwidthprice: currency / TB minstorageprice: currency / TB / Month minuploadbandwidthprice: currency / TB Currency units can be specified, e.g. 10SC; run 'siac help wallet' for details. Durations (maxduration and windowsize) must be specified in either blocks (b), hours (h), days (d), or weeks (w). A block is approximately 10 minutes, so one hour is six blocks, a day is 144 blocks, and a week is 1008 blocks. For a description of each parameter, see doc/API.md. To configure the host to accept new contracts, set acceptingcontracts to true: siac host config acceptingcontracts true `, Run: wrap(hostconfigcmd), } hostAnnounceCmd = &cobra.Command{ Use: "announce", Short: "Announce yourself as a host", Long: `Announce yourself as a host on the network. Announcing will also configure the host to start accepting contracts. You can revert this by running: siac host config acceptingcontracts false You may also supply a specific address to be announced, e.g.: siac host announce my-host-domain.com:9001 Doing so will override the standard connectivity checks.`, Run: hostannouncecmd, } hostFolderCmd = &cobra.Command{ Use: "folder", Short: "Add, remove, or resize a storage folder", Long: "Add, remove, or resize a storage folder.", } hostFolderAddCmd = &cobra.Command{ Use: "add [path] [size]", Short: "Add a storage folder to the host", Long: "Add a storage folder to the host, specifying how much data it should store", Run: wrap(hostfolderaddcmd), } hostFolderRemoveCmd = &cobra.Command{ Use: "remove [path]", Short: "Remove a storage folder from the host", Long: `Remove a storage folder from the host. Note that this does not delete any data; it will instead be distributed across the remaining storage folders.`, Run: wrap(hostfolderremovecmd), } hostFolderResizeCmd = &cobra.Command{ Use: "resize [path] [size]", Short: "Resize a storage folder", Long: `Change how much data a storage folder should store. If the new size is less than what the folder is currently storing, data will be distributed across the other storage folders.`, Run: wrap(hostfolderresizecmd), } hostSectorCmd = &cobra.Command{ Use: "sector", Short: "Add or delete a sector (add not supported)", Long: `Add or delete a sector. Adding is not currently supported. Note that deleting a sector may impact host revenue.`, } hostSectorDeleteCmd = &cobra.Command{ Use: "delete [root]", Short: "Delete a sector", Long: `Delete a sector, identified by its Merkle root. Note that deleting a sector may impact host revenue.`, Run: wrap(hostsectordeletecmd), } ) // hostcmd is the handler for the command `siac host`. // Prints info about the host and its storage folders. func hostcmd() { hg := new(api.HostGET) err := getAPI("/host", hg) if err != nil { die("Could not fetch host settings:", err) } sg := new(api.StorageGET) err = getAPI("/host/storage", sg) if err != nil { die("Could not fetch storage info:", err) } es := hg.ExternalSettings fm := hg.FinancialMetrics is := hg.InternalSettings nm := hg.NetworkMetrics // calculate total storage available and remaining var totalstorage, storageremaining uint64 for _, folder := range sg.Folders { totalstorage += folder.Capacity storageremaining += folder.CapacityRemaining } // convert price from bytes/block to TB/Month price := currencyUnits(is.MinStoragePrice.Mul(modules.BlockBytesPerMonthTerabyte)) // calculate total revenue totalRevenue := fm.ContractCompensation. Add(fm.StorageRevenue). Add(fm.DownloadBandwidthRevenue). Add(fm.UploadBandwidthRevenue) totalPotentialRevenue := fm.PotentialContractCompensation. Add(fm.PotentialStorageRevenue). Add(fm.PotentialDownloadBandwidthRevenue). Add(fm.PotentialUploadBandwidthRevenue) // determine the display method for the net address. netaddr := es.NetAddress if is.NetAddress == "" { netaddr += " (automatically determined)" } else { netaddr += " (manually specified)" } var connectabilityString string if hg.WorkingStatus == "working" { connectabilityString = "Host appears to be working." } else if hg.WorkingStatus == "not working" && hg.ConnectabilityStatus == "connectable" { connectabilityString = "Nobody is connecting to host. Try re-announcing." } else if hg.WorkingStatus == "checking" || hg.ConnectabilityStatus == "checking" { connectabilityString = "Host is checking status (takes a few minues)." } else { connectabilityString = "Host is not connectable (re-checks every few minutes)." } if hostVerbose { // describe net address fmt.Printf(`General Info: Connectability Status: %v Host Internal Settings: acceptingcontracts: %v maxduration: %v Weeks maxdownloadbatchsize: %v maxrevisebatchsize: %v netaddress: %v windowsize: %v Hours collateral: %v / TB / Month collateralbudget: %v maxcollateral: %v Per Contract mincontractprice: %v mindownloadbandwidthprice: %v / TB minstorageprice: %v / TB / Month minuploadbandwidthprice: %v / TB Host Financials: Contract Count: %v Transaction Fee Compensation: %v Potential Fee Compensation: %v Transaction Fee Expenses: %v Storage Revenue: %v Potential Storage Revenue: %v Locked Collateral: %v Risked Collateral: %v Lost Collateral: %v Download Revenue: %v Potential Download Revenue: %v Upload Revenue: %v Potential Upload Revenue: %v RPC Stats: Error Calls: %v Unrecognized Calls: %v Download Calls: %v Renew Calls: %v Revise Calls: %v Settings Calls: %v FormContract Calls: %v `, connectabilityString, yesNo(is.AcceptingContracts), periodUnits(is.MaxDuration), filesizeUnits(int64(is.MaxDownloadBatchSize)), filesizeUnits(int64(is.MaxReviseBatchSize)), netaddr, is.WindowSize/6, currencyUnits(is.Collateral.Mul(modules.BlockBytesPerMonthTerabyte)), currencyUnits(is.CollateralBudget), currencyUnits(is.MaxCollateral), currencyUnits(is.MinContractPrice), currencyUnits(is.MinDownloadBandwidthPrice.Mul(modules.BytesPerTerabyte)), currencyUnits(is.MinStoragePrice.Mul(modules.BlockBytesPerMonthTerabyte)), currencyUnits(is.MinUploadBandwidthPrice.Mul(modules.BytesPerTerabyte)), fm.ContractCount, currencyUnits(fm.ContractCompensation), currencyUnits(fm.PotentialContractCompensation), currencyUnits(fm.TransactionFeeExpenses), currencyUnits(fm.StorageRevenue), currencyUnits(fm.PotentialStorageRevenue), currencyUnits(fm.LockedStorageCollateral), currencyUnits(fm.RiskedStorageCollateral), currencyUnits(fm.LostStorageCollateral), currencyUnits(fm.DownloadBandwidthRevenue), currencyUnits(fm.PotentialDownloadBandwidthRevenue), currencyUnits(fm.UploadBandwidthRevenue), currencyUnits(fm.PotentialUploadBandwidthRevenue), nm.ErrorCalls, nm.UnrecognizedCalls, nm.DownloadCalls, nm.RenewCalls, nm.ReviseCalls, nm.SettingsCalls, nm.FormContractCalls) } else { fmt.Printf(`Host info: Connectability Status: %v Storage: %v (%v used) Price: %v / TB / Month Max Duration: %v Weeks Accepting Contracts: %v Anticipated Revenue: %v Locked Collateral: %v Revenue: %v `, connectabilityString, filesizeUnits(int64(totalstorage)), filesizeUnits(int64(totalstorage-storageremaining)), price, periodUnits(is.MaxDuration), yesNo(is.AcceptingContracts), currencyUnits(totalPotentialRevenue), currencyUnits(fm.LockedStorageCollateral), currencyUnits(totalRevenue)) } fmt.Println("\nStorage Folders:") // display storage folder info sort.Slice(sg.Folders, func(i, j int) bool { return sg.Folders[i].Path < sg.Folders[j].Path }) if len(sg.Folders) == 0 { fmt.Println("No storage folders configured") return } w := tabwriter.NewWriter(os.Stdout, 0, 0, 4, ' ', 0) fmt.Fprintf(w, "\tUsed\tCapacity\t%% Used\tPath\n") for _, folder := range sg.Folders { curSize := int64(folder.Capacity - folder.CapacityRemaining) pctUsed := 100 * (float64(curSize) / float64(folder.Capacity)) fmt.Fprintf(w, "\t%s\t%s\t%.2f\t%s\n", filesizeUnits(curSize), filesizeUnits(int64(folder.Capacity)), pctUsed, folder.Path) } w.Flush() } // hostconfigcmd is the handler for the command `siac host config [setting] [value]`. // Modifies host settings. func hostconfigcmd(param, value string) { var err error switch param { // currency (convert to hastings) case "collateralbudget", "maxcollateral", "mincontractprice": value, err = parseCurrency(value) if err != nil { die("Could not parse "+param+":", err) } // currency/TB (convert to hastings/byte) case "mindownloadbandwidthprice", "minuploadbandwidthprice": hastings, err := parseCurrency(value) if err != nil { die("Could not parse "+param+":", err) } i, _ := new(big.Int).SetString(hastings, 10) c := types.NewCurrency(i).Div(modules.BytesPerTerabyte) value = c.String() // currency/TB/month (convert to hastings/byte/block) case "collateral", "minstorageprice": hastings, err := parseCurrency(value) if err != nil { die("Could not parse "+param+":", err) } i, _ := new(big.Int).SetString(hastings, 10) c := types.NewCurrency(i).Div(modules.BlockBytesPerMonthTerabyte) value = c.String() // bool (allow "yes" and "no") case "acceptingcontracts": switch strings.ToLower(value) { case "yes": value = "true" case "no": value = "false" } // duration (convert to blocks) case "maxduration", "windowsize": value, err = parsePeriod(value) if err != nil { die("Could not parse "+param+":", err) } // other valid settings case "maxdownloadbatchsize", "maxrevisebatchsize", "netaddress": // invalid settings default: die("\"" + param + "\" is not a host setting") } err = post("/host", param+"="+value) if err != nil { die("Could not update host settings:", err) } var eg api.HostEstimateScoreGET err = getAPI(fmt.Sprintf("/host/estimatescore?%v=%v", param, value), &eg) if err != nil { die("could not get host score estimate:", err) } fmt.Printf("Estimated conversion rate: %v%%\n", eg.ConversionRate) fmt.Println("Host settings updated.") } // hostannouncecmd is the handler for the command `siac host announce`. // Announces yourself as a host to the network. Optionally takes an address to // announce as. func hostannouncecmd(cmd *cobra.Command, args []string) { var err error switch len(args) { case 0: err = post("/host/announce", "") case 1: err = post("/host/announce", "netaddress="+args[0]) default: cmd.UsageFunc()(cmd) os.Exit(exitCodeUsage) } if err != nil { die("Could not announce host:", err) } fmt.Println("Host announcement submitted to network.") // start accepting contracts err = post("/host", "acceptingcontracts=true") if err != nil { die("Could not configure host to accept contracts:", err) } fmt.Println(` The host has also been configured to accept contracts. To revert this, run: siac host config acceptingcontracts false `) } // hostfolderaddcmd adds a folder to the host. func hostfolderaddcmd(path, size string) { size, err := parseFilesize(size) if err != nil { die("Could not parse size:", err) } // round size down to nearest multiple of 256MiB var sizeUint64 uint64 fmt.Sscan(size, &sizeUint64) sizeUint64 /= 64 * modules.SectorSize sizeUint64 *= 64 * modules.SectorSize size = fmt.Sprint(sizeUint64) err = post("/host/storage/folders/add", fmt.Sprintf("path=%s&size=%s", abs(path), size)) if err != nil { die("Could not add folder:", err) } fmt.Println("Added folder", path) } // hostfolderremovecmd removes a folder from the host. func hostfolderremovecmd(path string) { err := post("/host/storage/folders/remove", "path="+abs(path)) if err != nil { die("Could not remove folder:", err) } fmt.Println("Removed folder", path) } // hostfolderresizecmd resizes a folder in the host. func hostfolderresizecmd(path, newsize string) { newsize, err := parseFilesize(newsize) if err != nil { die("Could not parse size:", err) } // round size down to nearest multiple of 256MiB var sizeUint64 uint64 fmt.Sscan(newsize, &sizeUint64) sizeUint64 /= 64 * modules.SectorSize sizeUint64 *= 64 * modules.SectorSize newsize = fmt.Sprint(sizeUint64) err = post("/host/storage/folders/resize", fmt.Sprintf("path=%s&newsize=%s", abs(path), newsize)) if err != nil { die("Could not resize folder:", err) } fmt.Printf("Resized folder %v to %v\n", path, newsize) } // hostsectordeletecmd deletes a sector from the host. func hostsectordeletecmd(root string) { err := post("/host/storage/sectors/delete/"+root, "") if err != nil { die("Could not delete sector:", err) } fmt.Println("Deleted sector", root) } Sia-1.3.0/siac/hostdbcmd.go000066400000000000000000000275541313565667000154750ustar00rootroot00000000000000package main import ( "fmt" "math/big" "os" "text/tabwriter" "github.com/spf13/cobra" "github.com/NebulousLabs/Sia/api" "github.com/NebulousLabs/Sia/modules" ) const scanHistoryLen = 30 var ( hostdbNumHosts int hostdbVerbose bool ) var ( hostdbCmd = &cobra.Command{ Use: "hostdb", Short: "Interact with the renter's host database.", Long: "View the list of active hosts, the list of all hosts, or query specific hosts.\nIf the '-v' flag is set, a list of recent scans will be provided, with the most\nrecent scan on the right. a '0' indicates that the host was offline, and a '1'\nindicates that the host was online.", Run: wrap(hostdbcmd), } hostdbViewCmd = &cobra.Command{ Use: "view [pubkey]", Short: "View the full information for a host.", Long: "View detailed information about a host, including things like a score breakdown.", Run: wrap(hostdbviewcmd), } ) // printScoreBreakdown prints the score breakdown of a host, provided the info. func printScoreBreakdown(info *api.HostdbHostsGET) { fmt.Println("\n Score Breakdown:") w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) fmt.Fprintf(w, "\t\tAge:\t %.3f\n", info.ScoreBreakdown.AgeAdjustment) fmt.Fprintf(w, "\t\tBurn:\t %.3f\n", info.ScoreBreakdown.BurnAdjustment) fmt.Fprintf(w, "\t\tCollateral:\t %.3f\n", info.ScoreBreakdown.CollateralAdjustment) fmt.Fprintf(w, "\t\tInteraction:\t %.3f\n", info.ScoreBreakdown.InteractionAdjustment) fmt.Fprintf(w, "\t\tPrice:\t %.3f\n", info.ScoreBreakdown.PriceAdjustment*1e6) fmt.Fprintf(w, "\t\tStorage:\t %.3f\n", info.ScoreBreakdown.StorageRemainingAdjustment) fmt.Fprintf(w, "\t\tUptime:\t %.3f\n", info.ScoreBreakdown.UptimeAdjustment) fmt.Fprintf(w, "\t\tVersion:\t %.3f\n", info.ScoreBreakdown.VersionAdjustment) w.Flush() } func hostdbcmd() { if !hostdbVerbose { info := new(api.HostdbActiveGET) err := getAPI("/hostdb/active", info) if err != nil { die("Could not fetch host list:", err) } if len(info.Hosts) == 0 { fmt.Println("No known active hosts") return } // Strip down to the number of requested hosts. if hostdbNumHosts != 0 && hostdbNumHosts < len(info.Hosts) { info.Hosts = info.Hosts[len(info.Hosts)-hostdbNumHosts:] } fmt.Println(len(info.Hosts), "Active Hosts:") w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) fmt.Fprintln(w, "\t\tAddress\tPrice (per TB per Mo)") for i, host := range info.Hosts { price := host.StoragePrice.Mul(modules.BlockBytesPerMonthTerabyte) fmt.Fprintf(w, "\t%v:\t%v\t%v\n", len(info.Hosts)-i, host.NetAddress, currencyUnits(price)) } w.Flush() } else { info := new(api.HostdbAllGET) err := getAPI("/hostdb/all", info) if err != nil { die("Could not fetch host list:", err) } if len(info.Hosts) == 0 { fmt.Println("No known hosts") return } // Iterate through the hosts and divide by category. var activeHosts, inactiveHosts, offlineHosts []api.ExtendedHostDBEntry for _, host := range info.Hosts { if host.AcceptingContracts && len(host.ScanHistory) > 0 && host.ScanHistory[len(host.ScanHistory)-1].Success { activeHosts = append(activeHosts, host) continue } if len(host.ScanHistory) > 0 && host.ScanHistory[len(host.ScanHistory)-1].Success { inactiveHosts = append(inactiveHosts, host) continue } offlineHosts = append(offlineHosts, host) } if hostdbNumHosts > 0 && len(offlineHosts) > hostdbNumHosts { offlineHosts = offlineHosts[len(offlineHosts)-hostdbNumHosts:] } if hostdbNumHosts > 0 && len(inactiveHosts) > hostdbNumHosts { inactiveHosts = inactiveHosts[len(inactiveHosts)-hostdbNumHosts:] } if hostdbNumHosts > 0 && len(activeHosts) > hostdbNumHosts { activeHosts = activeHosts[len(activeHosts)-hostdbNumHosts:] } fmt.Println() fmt.Println(len(offlineHosts), "Offline Hosts:") w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) fmt.Fprintln(w, "\t\tPubkey\tAddress\tUptime\tPrice (/ TB / Month)\tDownload Price (/ TB)\tRecent Scans") for i, host := range offlineHosts { // Compute the total measured uptime and total measured downtime for this // host. uptimeRatio := float64(0) if len(host.ScanHistory) > 1 { downtime := host.HistoricDowntime uptime := host.HistoricUptime recentTime := host.ScanHistory[0].Timestamp recentSuccess := host.ScanHistory[0].Success for _, scan := range host.ScanHistory[1:] { if recentSuccess { uptime += scan.Timestamp.Sub(recentTime) } else { downtime += scan.Timestamp.Sub(recentTime) } recentTime = scan.Timestamp recentSuccess = scan.Success } uptimeRatio = float64(uptime) / float64(uptime+downtime) } // Get the scan history string. scanHistStr := "" displayScans := host.ScanHistory if len(host.ScanHistory) > scanHistoryLen { displayScans = host.ScanHistory[len(host.ScanHistory)-scanHistoryLen:] } for _, scan := range displayScans { if scan.Success { scanHistStr += "1" } else { scanHistStr += "0" } } // Get a string representation of the historic outcomes of the most // recent scans. price := host.StoragePrice.Mul(modules.BlockBytesPerMonthTerabyte) downloadBWPrice := host.StoragePrice.Mul(modules.BytesPerTerabyte) fmt.Fprintf(w, "\t%v:\t%v\t%v\t%v\t%v\t%.3f\t%s\n", len(offlineHosts)-i, host.PublicKeyString, host.NetAddress, currencyUnits(price), currencyUnits(downloadBWPrice), uptimeRatio, scanHistStr) } w.Flush() fmt.Println() fmt.Println(len(inactiveHosts), "Inactive Hosts:") w = tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) fmt.Fprintln(w, "\t\tPubkey\tAddress\tPrice (/ TB / Month)\tDownload Price (/ TB)\tUptime\tRecent Scans") for i, host := range inactiveHosts { // Compute the total measured uptime and total measured downtime for this // host. uptimeRatio := float64(0) if len(host.ScanHistory) > 1 { downtime := host.HistoricDowntime uptime := host.HistoricUptime recentTime := host.ScanHistory[0].Timestamp recentSuccess := host.ScanHistory[0].Success for _, scan := range host.ScanHistory[1:] { if recentSuccess { uptime += scan.Timestamp.Sub(recentTime) } else { downtime += scan.Timestamp.Sub(recentTime) } recentTime = scan.Timestamp recentSuccess = scan.Success } uptimeRatio = float64(uptime) / float64(uptime+downtime) } // Get a string representation of the historic outcomes of the most // recent scans. scanHistStr := "" displayScans := host.ScanHistory if len(host.ScanHistory) > scanHistoryLen { displayScans = host.ScanHistory[len(host.ScanHistory)-scanHistoryLen:] } for _, scan := range displayScans { if scan.Success { scanHistStr += "1" } else { scanHistStr += "0" } } price := host.StoragePrice.Mul(modules.BlockBytesPerMonthTerabyte) downloadBWPrice := host.DownloadBandwidthPrice.Mul(modules.BytesPerTerabyte) fmt.Fprintf(w, "\t%v:\t%v\t%v\t%v\t%v\t%.3f\t%s\n", len(inactiveHosts)-i, host.PublicKeyString, host.NetAddress, currencyUnits(price), currencyUnits(downloadBWPrice), uptimeRatio, scanHistStr) } w.Flush() // Grab the host at the 1/5th point and use it as the reference. (it's // like using the median, except at the 1/5th point instead of the 1/2 // point.) referenceScore := big.NewRat(1, 1) if len(activeHosts) > 0 { referenceIndex := len(activeHosts) / 5 hostInfo := new(api.HostdbHostsGET) err := getAPI("/hostdb/hosts/"+activeHosts[referenceIndex].PublicKeyString, hostInfo) if err != nil { die("Could not fetch provided host:", err) } if !hostInfo.ScoreBreakdown.Score.IsZero() { referenceScore = new(big.Rat).Inv(new(big.Rat).SetInt(hostInfo.ScoreBreakdown.Score.Big())) } } fmt.Println() fmt.Println(len(activeHosts), "Active Hosts:") w = tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) fmt.Fprintln(w, "\t\tPubkey\tAddress\tScore\tPrice (/ TB / Month)\tDownload Price (/TB)\tUptime\tRecent Scans") for i, host := range activeHosts { // Compute the total measured uptime and total measured downtime for this // host. uptimeRatio := float64(0) if len(host.ScanHistory) > 1 { downtime := host.HistoricDowntime uptime := host.HistoricUptime recentTime := host.ScanHistory[0].Timestamp recentSuccess := host.ScanHistory[0].Success for _, scan := range host.ScanHistory[1:] { if recentSuccess { uptime += scan.Timestamp.Sub(recentTime) } else { downtime += scan.Timestamp.Sub(recentTime) } recentTime = scan.Timestamp recentSuccess = scan.Success } uptimeRatio = float64(uptime) / float64(uptime+downtime) } // Get a string representation of the historic outcomes of the most // recent scans. scanHistStr := "" displayScans := host.ScanHistory if len(host.ScanHistory) > scanHistoryLen { displayScans = host.ScanHistory[len(host.ScanHistory)-scanHistoryLen:] } for _, scan := range displayScans { if scan.Success { scanHistStr += "1" } else { scanHistStr += "0" } } // Grab the score information for the active hosts. hostInfo := new(api.HostdbHostsGET) err := getAPI("/hostdb/hosts/"+host.PublicKeyString, hostInfo) if err != nil { die("Could not fetch provided host:", err) } score, _ := new(big.Rat).Mul(referenceScore, new(big.Rat).SetInt(hostInfo.ScoreBreakdown.Score.Big())).Float64() price := host.StoragePrice.Mul(modules.BlockBytesPerMonthTerabyte) downloadBWPrice := host.DownloadBandwidthPrice.Mul(modules.BytesPerTerabyte) fmt.Fprintf(w, "\t%v:\t%v\t%v\t%12.6g\t%v\t%v\t%.3f\t%s\n", len(activeHosts)-i, host.PublicKeyString, host.NetAddress, score, currencyUnits(price), currencyUnits(downloadBWPrice), uptimeRatio, scanHistStr) } w.Flush() } } func hostdbviewcmd(pubkey string) { info := new(api.HostdbHostsGET) err := getAPI("/hostdb/hosts/"+pubkey, info) if err != nil { die("Could not fetch provided host:", err) } fmt.Println("Host information:") fmt.Println(" Public Key:", info.Entry.PublicKeyString) fmt.Println(" Block First Seen:", info.Entry.FirstSeen) fmt.Println("\n Host Settings:") w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) fmt.Fprintln(w, "\t\tAccepting Contracts:\t", info.Entry.AcceptingContracts) fmt.Fprintln(w, "\t\tTotal Storage:\t", info.Entry.TotalStorage/1e9, "GB") fmt.Fprintln(w, "\t\tRemaining Storage:\t", info.Entry.RemainingStorage/1e9, "GB") fmt.Fprintln(w, "\t\tOffered Collateral (TB / Mo):\t", currencyUnits(info.Entry.Collateral.Mul(modules.BlockBytesPerMonthTerabyte))) fmt.Fprintln(w, "\n\t\tContract Price:\t", currencyUnits(info.Entry.ContractPrice)) fmt.Fprintln(w, "\t\tStorage Price (TB / Mo):\t", currencyUnits(info.Entry.StoragePrice.Mul(modules.BlockBytesPerMonthTerabyte))) fmt.Fprintln(w, "\t\tDownload Price (1 TB):\t", currencyUnits(info.Entry.DownloadBandwidthPrice.Mul(modules.BytesPerTerabyte))) fmt.Fprintln(w, "\t\tUpload Price (1 TB):\t", currencyUnits(info.Entry.UploadBandwidthPrice.Mul(modules.BytesPerTerabyte))) fmt.Fprintln(w, "\t\tVersion:\t", info.Entry.Version) w.Flush() printScoreBreakdown(info) // Compute the total measured uptime and total measured downtime for this // host. uptimeRatio := float64(0) if len(info.Entry.ScanHistory) > 1 { downtime := info.Entry.HistoricDowntime uptime := info.Entry.HistoricUptime recentTime := info.Entry.ScanHistory[0].Timestamp recentSuccess := info.Entry.ScanHistory[0].Success for _, scan := range info.Entry.ScanHistory[1:] { if recentSuccess { uptime += scan.Timestamp.Sub(recentTime) } else { downtime += scan.Timestamp.Sub(recentTime) } recentTime = scan.Timestamp recentSuccess = scan.Success } uptimeRatio = float64(uptime) / float64(uptime+downtime) } // Compute the uptime ratio, but shift by 0.02 to acknowledge fully that // 98% uptime and 100% uptime is valued the same. fmt.Println("\n Scan History Length:", len(info.Entry.ScanHistory)) fmt.Printf(" Overall Uptime: %.3f\n", uptimeRatio) fmt.Println() } Sia-1.3.0/siac/main.go000066400000000000000000000231601313565667000144370ustar00rootroot00000000000000package main import ( "encoding/json" "errors" "fmt" "net" "net/http" "os" "reflect" "github.com/bgentry/speakeasy" "github.com/spf13/cobra" "github.com/NebulousLabs/Sia/api" "github.com/NebulousLabs/Sia/build" ) var ( // Flags. addr string // override default API address initPassword bool // supply a custom password when creating a wallet initForce bool // destroy and reencrypt the wallet on init if it already exists hostVerbose bool // display additional host info renterShowHistory bool // Show download history in addition to download queue. renterListVerbose bool // Show additional info about uploaded files. // Globals. rootCmd *cobra.Command // Root command cobra object, used by bash completion cmd. // User-supplied password, cached so that we don't need to prompt multiple // times. apiPassword string ) // Exit codes. // inspired by sysexits.h const ( exitCodeGeneral = 1 // Not in sysexits.h, but is standard practice. exitCodeUsage = 64 // EX_USAGE in sysexits.h ) // non2xx returns true for non-success HTTP status codes. func non2xx(code int) bool { return code < 200 || code > 299 } // decodeError returns the api.Error from a API response. This method should // only be called if the response's status code is non-2xx. The error returned // may not be of type api.Error in the event of an error unmarshalling the // JSON. func decodeError(resp *http.Response) error { var apiErr api.Error err := json.NewDecoder(resp.Body).Decode(&apiErr) if err != nil { return err } return apiErr } // apiGet wraps a GET request with a status code check, such that if the GET does // not return 2xx, the error will be read and returned. The response body is // not closed. func apiGet(call string) (*http.Response, error) { if host, port, _ := net.SplitHostPort(addr); host == "" { addr = net.JoinHostPort("localhost", port) } resp, err := api.HttpGET("http://" + addr + call) if err != nil { return nil, errors.New("no response from daemon") } // check error code if resp.StatusCode == http.StatusUnauthorized { // retry request with authentication. resp.Body.Close() if apiPassword == "" { // prompt for password and store it in a global var for subsequent // calls apiPassword, err = speakeasy.Ask("API password: ") if err != nil { return nil, err } } resp, err = api.HttpGETAuthenticated("http://"+addr+call, apiPassword) if err != nil { return nil, errors.New("no response from daemon - authentication failed") } } if resp.StatusCode == http.StatusNotFound { resp.Body.Close() return nil, errors.New("API call not recognized: " + call) } if non2xx(resp.StatusCode) { err := decodeError(resp) resp.Body.Close() return nil, err } return resp, nil } // getAPI makes a GET API call and decodes the response. An error is returned // if the response status is not 2xx. func getAPI(call string, obj interface{}) error { resp, err := apiGet(call) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode == http.StatusNoContent { return errors.New("expecting a response, but API returned status code 204 No Content") } err = json.NewDecoder(resp.Body).Decode(obj) if err != nil { return err } return nil } // get makes an API call and discards the response. An error is returned if the // response status is not 2xx. func get(call string) error { resp, err := apiGet(call) if err != nil { return err } resp.Body.Close() return nil } // apiPost wraps a POST request with a status code check, such that if the POST // does not return 2xx, the error will be read and returned. The response body // is not closed. func apiPost(call, vals string) (*http.Response, error) { if host, port, _ := net.SplitHostPort(addr); host == "" { addr = net.JoinHostPort("localhost", port) } resp, err := api.HttpPOST("http://"+addr+call, vals) if err != nil { return nil, errors.New("no response from daemon") } // check error code if resp.StatusCode == http.StatusUnauthorized { resp.Body.Close() // Prompt for password and retry request with authentication. password, err := speakeasy.Ask("API password: ") if err != nil { return nil, err } resp, err = api.HttpPOSTAuthenticated("http://"+addr+call, vals, password) if err != nil { return nil, errors.New("no response from daemon - authentication failed") } } if resp.StatusCode == http.StatusNotFound { resp.Body.Close() return nil, errors.New("API call not recognized: " + call) } if non2xx(resp.StatusCode) { err := decodeError(resp) resp.Body.Close() return nil, err } return resp, nil } // postResp makes a POST API call and decodes the response. An error is // returned if the response status is not 2xx. func postResp(call, vals string, obj interface{}) error { resp, err := apiPost(call, vals) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode == http.StatusNoContent { return errors.New("expecting a response, but API returned status code 204 No Content") } err = json.NewDecoder(resp.Body).Decode(obj) if err != nil { return err } return nil } // post makes an API call and discards the response. An error is returned if // the response status is not 2xx. func post(call, vals string) error { resp, err := apiPost(call, vals) if err != nil { return err } resp.Body.Close() return nil } // wrap wraps a generic command with a check that the command has been // passed the correct number of arguments. The command must take only strings // as arguments. func wrap(fn interface{}) func(*cobra.Command, []string) { fnVal, fnType := reflect.ValueOf(fn), reflect.TypeOf(fn) if fnType.Kind() != reflect.Func { panic("wrapped function has wrong type signature") } for i := 0; i < fnType.NumIn(); i++ { if fnType.In(i).Kind() != reflect.String { panic("wrapped function has wrong type signature") } } return func(cmd *cobra.Command, args []string) { if len(args) != fnType.NumIn() { cmd.UsageFunc()(cmd) os.Exit(exitCodeUsage) } argVals := make([]reflect.Value, fnType.NumIn()) for i := range args { argVals[i] = reflect.ValueOf(args[i]) } fnVal.Call(argVals) } } // die prints its arguments to stderr, then exits the program with the default // error code. func die(args ...interface{}) { fmt.Fprintln(os.Stderr, args...) os.Exit(exitCodeGeneral) } func main() { root := &cobra.Command{ Use: os.Args[0], Short: "Sia Client v" + build.Version, Long: "Sia Client v" + build.Version, Run: wrap(consensuscmd), } rootCmd = root // create command tree root.AddCommand(versionCmd) root.AddCommand(stopCmd) root.AddCommand(updateCmd) updateCmd.AddCommand(updateCheckCmd) root.AddCommand(hostCmd) hostCmd.AddCommand(hostConfigCmd, hostAnnounceCmd, hostFolderCmd, hostSectorCmd) hostFolderCmd.AddCommand(hostFolderAddCmd, hostFolderRemoveCmd, hostFolderResizeCmd) hostSectorCmd.AddCommand(hostSectorDeleteCmd) hostCmd.Flags().BoolVarP(&hostVerbose, "verbose", "v", false, "Display detailed host info") root.AddCommand(hostdbCmd) hostdbCmd.AddCommand(hostdbViewCmd) hostdbCmd.Flags().IntVarP(&hostdbNumHosts, "numhosts", "n", 0, "Number of hosts to display from the hostdb") hostdbCmd.Flags().BoolVarP(&hostdbVerbose, "verbose", "v", false, "Display full hostdb information") root.AddCommand(minerCmd) minerCmd.AddCommand(minerStartCmd, minerStopCmd) root.AddCommand(walletCmd) walletCmd.AddCommand(walletAddressCmd, walletAddressesCmd, walletChangepasswordCmd, walletInitCmd, walletInitSeedCmd, walletLoadCmd, walletLockCmd, walletSeedsCmd, walletSendCmd, walletSweepCmd, walletBalanceCmd, walletTransactionsCmd, walletUnlockCmd) walletInitCmd.Flags().BoolVarP(&initPassword, "password", "p", false, "Prompt for a custom password") walletInitCmd.Flags().BoolVarP(&initForce, "force", "", false, "destroy the existing wallet and re-encrypt") walletInitSeedCmd.Flags().BoolVarP(&initForce, "force", "", false, "destroy the existing wallet") walletLoadCmd.AddCommand(walletLoad033xCmd, walletLoadSeedCmd, walletLoadSiagCmd) walletSendCmd.AddCommand(walletSendSiacoinsCmd, walletSendSiafundsCmd) root.AddCommand(renterCmd) renterCmd.AddCommand(renterFilesDeleteCmd, renterFilesDownloadCmd, renterDownloadsCmd, renterAllowanceCmd, renterSetAllowanceCmd, renterContractsCmd, renterFilesListCmd, renterFilesRenameCmd, renterFilesUploadCmd, renterUploadsCmd, renterExportCmd, renterPricesCmd) renterContractsCmd.AddCommand(renterContractsViewCmd) renterAllowanceCmd.AddCommand(renterAllowanceCancelCmd) renterCmd.Flags().BoolVarP(&renterListVerbose, "verbose", "v", false, "Show additional file info such as redundancy") renterDownloadsCmd.Flags().BoolVarP(&renterShowHistory, "history", "H", false, "Show download history in addition to the download queue") renterFilesListCmd.Flags().BoolVarP(&renterListVerbose, "verbose", "v", false, "Show additional file info such as redundancy") renterExportCmd.AddCommand(renterExportContractTxnsCmd) root.AddCommand(gatewayCmd) gatewayCmd.AddCommand(gatewayConnectCmd, gatewayDisconnectCmd, gatewayAddressCmd, gatewayListCmd) root.AddCommand(consensusCmd) root.AddCommand(bashcomplCmd) root.AddCommand(mangenCmd) // parse flags root.PersistentFlags().StringVarP(&addr, "addr", "a", "localhost:9980", "which host/port to communicate with (i.e. the host/port siad is listening on)") // run if err := root.Execute(); err != nil { // Since no commands return errors (all commands set Command.Run instead of // Command.RunE), Command.Execute() should only return an error on an // invalid command or flag. Therefore Command.Usage() was called (assuming // Command.SilenceUsage is false) and we should exit with exitCodeUsage. os.Exit(exitCodeUsage) } } Sia-1.3.0/siac/mangencmd.go000066400000000000000000000007071313565667000154460ustar00rootroot00000000000000package main import ( "github.com/spf13/cobra" "github.com/spf13/cobra/doc" ) var ( mangenCmd = &cobra.Command{ Use: "man-generation [path]", Short: "Creates unix style manpages.", Long: "Creates unix style man pages at the specified directory.", Run: wrap(mangencmd), } ) func mangencmd(path string) { header := &doc.GenManHeader{ Section: "1", Manual: "siac Manual", Source: "", } doc.GenManTree(rootCmd, header, path) } Sia-1.3.0/siac/minercmd.go000066400000000000000000000030661313565667000153140ustar00rootroot00000000000000package main import ( "fmt" "github.com/NebulousLabs/Sia/api" "github.com/spf13/cobra" ) var ( minerCmd = &cobra.Command{ Use: "miner", Short: "Perform miner actions", Long: "Perform miner actions and view miner status.", Run: wrap(minercmd), } minerStartCmd = &cobra.Command{ Use: "start", Short: "Start cpu mining", Long: "Start cpu mining, if the miner is already running, this command does nothing", Run: wrap(minerstartcmd), } minerStopCmd = &cobra.Command{ Use: "stop", Short: "Stop mining", Long: "Stop mining (this may take a few moments).", Run: wrap(minerstopcmd), } ) // minerstartcmd is the handler for the command `siac miner start`. // Starts the CPU miner. func minerstartcmd() { err := get("/miner/start") if err != nil { die("Could not start miner:", err) } fmt.Println("CPU Miner is now running.") } // minercmd is the handler for the command `siac miner`. // Prints the status of the miner. func minercmd() { status := new(api.MinerGET) err := getAPI("/miner", status) if err != nil { die("Could not get miner status:", err) } miningStr := "off" if status.CPUMining { miningStr = "on" } fmt.Printf(`Miner status: CPU Mining: %s CPU Hashrate: %v KH/s Blocks Mined: %d (%d stale) `, miningStr, status.CPUHashrate/1000, status.BlocksMined, status.StaleBlocksMined) } // minerstopcmd is the handler for the command `siac miner stop`. // Stops the CPU miner. func minerstopcmd() { err := get("/miner/stop") if err != nil { die("Could not stop miner:", err) } fmt.Println("Stopped mining.") } Sia-1.3.0/siac/parse.go000066400000000000000000000106661313565667000146340ustar00rootroot00000000000000package main import ( "errors" "fmt" "math" "math/big" "strings" "github.com/NebulousLabs/Sia/types" ) var errUnableToParseSize = errors.New("unable to parse size") // filesize returns a string that displays a filesize in human-readable units. func filesizeUnits(size int64) string { if size == 0 { return "0 B" } sizes := []string{"B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} i := int(math.Log10(float64(size)) / 3) return fmt.Sprintf("%.*f %s", i, float64(size)/math.Pow10(3*i), sizes[i]) } // parseFilesize converts strings of form 10GB to a size in bytes. Fractional // sizes are truncated at the byte size. func parseFilesize(strSize string) (string, error) { units := []struct { suffix string multiplier int64 }{ {"kb", 1e3}, {"mb", 1e6}, {"gb", 1e9}, {"tb", 1e12}, {"kib", 1 << 10}, {"mib", 1 << 20}, {"gib", 1 << 30}, {"tib", 1 << 40}, {"b", 1}, // must be after others else it'll match on them all } strSize = strings.ToLower(strSize) for _, unit := range units { if strings.HasSuffix(strSize, unit.suffix) { r, ok := new(big.Rat).SetString(strings.TrimSuffix(strSize, unit.suffix)) if !ok { return "", errUnableToParseSize } r.Mul(r, new(big.Rat).SetInt(big.NewInt(unit.multiplier))) if !r.IsInt() { f, _ := r.Float64() return fmt.Sprintf("%d", int64(f)), nil } return r.RatString(), nil } } return "", errUnableToParseSize } // periodUnits turns a period in terms of blocks to a number of weeks. func periodUnits(blocks types.BlockHeight) string { return fmt.Sprint(blocks / 1008) // 1008 blocks per week } // parsePeriod converts a duration specified in blocks, hours, or weeks to a // number of blocks. func parsePeriod(period string) (string, error) { units := []struct { suffix string multiplier float64 }{ {"b", 1}, // blocks {"block", 1}, // blocks {"blocks", 1}, // blocks {"h", 6}, // hours {"hour", 6}, // hours {"hours", 6}, // hours {"d", 144}, // days {"day", 144}, // days {"days", 144}, // days {"w", 1008}, // weeks {"week", 1008}, // weeks {"weeks", 1008}, // weeks } period = strings.ToLower(period) for _, unit := range units { if strings.HasSuffix(period, unit.suffix) { var base float64 _, err := fmt.Sscan(strings.TrimSuffix(period, unit.suffix), &base) if err != nil { return "", errUnableToParseSize } blocks := int(base * unit.multiplier) return fmt.Sprint(blocks), nil } } return "", errUnableToParseSize } // currencyUnits converts a types.Currency to a string with human-readable // units. The unit used will be the largest unit that results in a value // greater than 1. The value is rounded to 4 significant digits. func currencyUnits(c types.Currency) string { pico := types.SiacoinPrecision.Div64(1e12) if c.Cmp(pico) < 0 { return c.String() + " H" } // iterate until we find a unit greater than c mag := pico unit := "" for _, unit = range []string{"pS", "nS", "uS", "mS", "SC", "KS", "MS", "GS", "TS"} { if c.Cmp(mag.Mul64(1e3)) < 0 { break } else if unit != "TS" { // don't want to perform this multiply on the last iter; that // would give us 1.235 TS instead of 1235 TS mag = mag.Mul64(1e3) } } num := new(big.Rat).SetInt(c.Big()) denom := new(big.Rat).SetInt(mag.Big()) res, _ := new(big.Rat).Mul(num, denom.Inv(denom)).Float64() return fmt.Sprintf("%.4g %s", res, unit) } // parseCurrency converts a siacoin amount to base units. func parseCurrency(amount string) (string, error) { units := []string{"pS", "nS", "uS", "mS", "SC", "KS", "MS", "GS", "TS"} for i, unit := range units { if strings.HasSuffix(amount, unit) { // scan into big.Rat r, ok := new(big.Rat).SetString(strings.TrimSuffix(amount, unit)) if !ok { return "", errors.New("malformed amount") } // convert units exp := 24 + 3*(int64(i)-4) mag := new(big.Int).Exp(big.NewInt(10), big.NewInt(exp), nil) r.Mul(r, new(big.Rat).SetInt(mag)) // r must be an integer at this point if !r.IsInt() { return "", errors.New("non-integer number of hastings") } return r.RatString(), nil } } // check for hastings separately if strings.HasSuffix(amount, "H") { return strings.TrimSuffix(amount, "H"), nil } return "", errors.New("amount is missing units; run 'wallet --help' for a list of units") } // yesNo returns "Yes" if b is true, and "No" if b is false. func yesNo(b bool) string { if b { return "Yes" } return "No" } Sia-1.3.0/siac/parse_test.go000066400000000000000000000063771313565667000156770ustar00rootroot00000000000000package main import ( "math/big" "testing" "github.com/NebulousLabs/Sia/types" ) func TestParseFilesize(t *testing.T) { tests := []struct { in, out string err error }{ {"1b", "1", nil}, {"1KB", "1000", nil}, {"1MB", "1000000", nil}, {"1GB", "1000000000", nil}, {"1TB", "1000000000000", nil}, {"1KiB", "1024", nil}, {"1MiB", "1048576", nil}, {"1GiB", "1073741824", nil}, {"1TiB", "1099511627776", nil}, {"", "", errUnableToParseSize}, {"123", "", errUnableToParseSize}, {"123b", "123", nil}, {"123TB", "123000000000000", nil}, {"123GiB", "132070244352", nil}, {"123BiB", "", errUnableToParseSize}, {"GB", "", errUnableToParseSize}, {"123G", "", errUnableToParseSize}, {"123B99", "", errUnableToParseSize}, {"12A3456", "", errUnableToParseSize}, {"1.23KB", "1230", nil}, {"1.234KB", "1234", nil}, {"1.2345KB", "1234", nil}, } for _, test := range tests { res, err := parseFilesize(test.in) if res != test.out || err != test.err { t.Errorf("parseFilesize(%v): expected %v %v, got %v %v", test.in, test.out, test.err, res, err) } } } func TestParsePeriod(t *testing.T) { tests := []struct { in, out string err error }{ {"x", "", errUnableToParseSize}, {"1", "", errUnableToParseSize}, {"b", "", errUnableToParseSize}, {"1b", "1", nil}, {"1 b", "1", nil}, {"1block", "1", nil}, {"1 block", "1", nil}, {"1blocks", "1", nil}, {"1 blocks", "1", nil}, {"2b", "2", nil}, {"2 b", "2", nil}, {"2block", "2", nil}, {"2 block", "2", nil}, {"2blocks", "2", nil}, {"2 blocks", "2", nil}, {"2h", "12", nil}, {"2 h", "12", nil}, {"2hour", "12", nil}, {"2 hour", "12", nil}, {"2hours", "12", nil}, {"2 hours", "12", nil}, {"0.5d", "72", nil}, {"0.5 d", "72", nil}, {"0.5day", "72", nil}, {"0.5 day", "72", nil}, {"0.5days", "72", nil}, {"0.5 days", "72", nil}, {"10w", "10080", nil}, {"10 w", "10080", nil}, {"10week", "10080", nil}, {"10 week", "10080", nil}, {"10weeks", "10080", nil}, {"10 weeks", "10080", nil}, {"1 fortnight", "", errUnableToParseSize}, {"three h", "", errUnableToParseSize}, } for _, test := range tests { res, err := parsePeriod(test.in) if res != test.out || err != test.err { t.Errorf("parsePeriod(%v): expected %v %v, got %v %v", test.in, test.out, test.err, res, err) } } } func TestCurrencyUnits(t *testing.T) { tests := []struct { in, out string }{ {"1", "1 H"}, {"1000", "1000 H"}, {"100000000000", "100000000000 H"}, {"1000000000000", "1 pS"}, {"1234560000000", "1.235 pS"}, {"12345600000000", "12.35 pS"}, {"123456000000000", "123.5 pS"}, {"1000000000000000", "1 nS"}, {"1000000000000000000", "1 uS"}, {"1000000000000000000000", "1 mS"}, {"1000000000000000000000000", "1 SC"}, {"1000000000000000000000000000", "1 KS"}, {"1000000000000000000000000000000", "1 MS"}, {"1000000000000000000000000000000000", "1 GS"}, {"1000000000000000000000000000000000000", "1 TS"}, {"1234560000000000000000000000000000000", "1.235 TS"}, {"1234560000000000000000000000000000000000", "1235 TS"}, } for _, test := range tests { i, _ := new(big.Int).SetString(test.in, 10) out := currencyUnits(types.NewCurrency(i)) if out != test.out { t.Errorf("currencyUnits(%v): expected %v, got %v", test.in, test.out, out) } } } Sia-1.3.0/siac/rentercmd.go000066400000000000000000000407701313565667000155040ustar00rootroot00000000000000package main import ( "fmt" "os" "path/filepath" "sort" "text/tabwriter" "time" "github.com/spf13/cobra" "github.com/NebulousLabs/Sia/api" "github.com/NebulousLabs/Sia/modules" ) var ( renterCmd = &cobra.Command{ Use: "renter", Short: "Perform renter actions", Long: "Upload, download, rename, delete, load, or share files.", Run: wrap(rentercmd), } renterUploadsCmd = &cobra.Command{ Use: "uploads", Short: "View the upload queue", Long: "View the list of files currently uploading.", Run: wrap(renteruploadscmd), } renterDownloadsCmd = &cobra.Command{ Use: "downloads", Short: "View the download queue", Long: "View the list of files currently downloading.", Run: wrap(renterdownloadscmd), } renterAllowanceCmd = &cobra.Command{ Use: "allowance", Short: "View the current allowance", Long: "View the current allowance, which controls how much money is spent on file contracts.", Run: wrap(renterallowancecmd), } renterAllowanceCancelCmd = &cobra.Command{ Use: "cancel", Short: "Cancel the current allowance", Long: "Cancel the current allowance, which controls how much money is spent on file contracts.", Run: wrap(renterallowancecancelcmd), } renterSetAllowanceCmd = &cobra.Command{ Use: "setallowance [amount] [period]", Short: "Set the allowance", Long: `Set the amount of money that can be spent over a given period. amount is given in currency units (SC, KS, etc.) period is given in either blocks (b), hours (h), days (d), or weeks (w). A block is approximately 10 minutes, so one hour is six blocks, a day is 144 blocks, and a week is 1008 blocks. Note that setting the allowance will cause siad to immediately begin forming contracts! You should only set the allowance once you are fully synced and you have a reasonable number (>30) of hosts in your hostdb.`, Run: wrap(rentersetallowancecmd), } renterContractsCmd = &cobra.Command{ Use: "contracts", Short: "View the Renter's contracts", Long: "View the contracts that the Renter has formed with hosts.", Run: wrap(rentercontractscmd), } renterContractsViewCmd = &cobra.Command{ Use: "view [contract-id]", Short: "View details of the specified contract", Long: "View all details available of the specified contract.", Run: wrap(rentercontractsviewcmd), } renterFilesDeleteCmd = &cobra.Command{ Use: "delete [path]", Aliases: []string{"rm"}, Short: "Delete a file", Long: "Delete a file. Does not delete the file on disk.", Run: wrap(renterfilesdeletecmd), } renterFilesDownloadCmd = &cobra.Command{ Use: "download [path] [destination]", Short: "Download a file", Long: "Download a previously-uploaded file to a specified destination.", Run: wrap(renterfilesdownloadcmd), } renterFilesListCmd = &cobra.Command{ Use: "list", Aliases: []string{"ls"}, Short: "List the status of all files", Long: "List the status of all files known to the renter on the Sia network.", Run: wrap(renterfileslistcmd), } renterFilesRenameCmd = &cobra.Command{ Use: "rename [path] [newpath]", Aliases: []string{"mv"}, Short: "Rename a file", Long: "Rename a file.", Run: wrap(renterfilesrenamecmd), } renterFilesUploadCmd = &cobra.Command{ Use: "upload [source] [path]", Short: "Upload a file", Long: "Upload a file to [path] on the Sia network.", Run: wrap(renterfilesuploadcmd), } renterPricesCmd = &cobra.Command{ Use: "prices", Short: "Display the price of storage and bandwidth", Long: "Display the estimated prices of storing files, retrieving files, and creating a set of contracts", Run: wrap(renterpricescmd), } ) // abs returns the absolute representation of a path. // TODO: bad things can happen if you run siac from a non-existent directory. // Implement some checks to catch this problem. func abs(path string) string { abspath, err := filepath.Abs(path) if err != nil { return path } return abspath } // rentercmd displays the renter's financial metrics and lists the files it is // tracking. func rentercmd() { var rg api.RenterGET err := getAPI("/renter", &rg) if err != nil { die("Could not get renter info:", err) } fm := rg.FinancialMetrics unspent := fm.ContractSpending.Sub(fm.DownloadSpending).Sub(fm.StorageSpending).Sub(fm.UploadSpending) fmt.Printf(`Renter info: Storage Spending: %v Upload Spending: %v Download Spending: %v Unspent Funds: %v Total Allocated: %v `, currencyUnits(fm.StorageSpending), currencyUnits(fm.UploadSpending), currencyUnits(fm.DownloadSpending), currencyUnits(unspent), currencyUnits(fm.ContractSpending)) // also list files renterfileslistcmd() } // renteruploadscmd is the handler for the command `siac renter uploads`. // Lists files currently uploading. func renteruploadscmd() { var rf api.RenterFiles err := getAPI("/renter/files", &rf) if err != nil { die("Could not get upload queue:", err) } // TODO: add a --history flag to the uploads command to mirror the --history // flag in the downloads command. This hasn't been done yet because the // call to /renter/files includes files that have been shared with you, // not just files you've uploaded. // Filter out files that have been uploaded. var filteredFiles []modules.FileInfo for _, fi := range rf.Files { if !fi.Available { filteredFiles = append(filteredFiles, fi) } } if len(filteredFiles) == 0 { fmt.Println("No files are uploading.") return } fmt.Println("Uploading", len(filteredFiles), "files:") for _, file := range filteredFiles { fmt.Printf("%13s %s (uploading, %0.2f%%)\n", filesizeUnits(int64(file.Filesize)), file.SiaPath, file.UploadProgress) } } // renterdownloadscmd is the handler for the command `siac renter downloads`. // Lists files currently downloading, and optionally previously downloaded // files if the -H or --history flag is specified. func renterdownloadscmd() { var queue api.RenterDownloadQueue err := getAPI("/renter/downloads", &queue) if err != nil { die("Could not get download queue:", err) } // Filter out files that have been downloaded. var downloading []api.DownloadInfo for _, file := range queue.Downloads { if file.Received != file.Filesize { downloading = append(downloading, file) } } if len(downloading) == 0 { fmt.Println("No files are downloading.") } else { fmt.Println("Downloading", len(downloading), "files:") for _, file := range downloading { fmt.Printf("%s: %5.1f%% %s -> %s\n", file.StartTime.Format("Jan 02 03:04 PM"), 100*float64(file.Received)/float64(file.Filesize), file.SiaPath, file.Destination) } } if !renterShowHistory { return } fmt.Println() // Filter out files that are downloading. var downloaded []api.DownloadInfo for _, file := range queue.Downloads { if file.Received == file.Filesize { downloaded = append(downloaded, file) } } if len(downloaded) == 0 { fmt.Println("No files downloaded.") } else { fmt.Println("Downloaded", len(downloaded), "files:") for _, file := range downloaded { fmt.Printf("%s: %s -> %s\n", file.StartTime.Format("Jan 02 03:04 PM"), file.SiaPath, file.Destination) } } } // renterallowancecmd displays the current allowance. func renterallowancecmd() { var rg api.RenterGET err := getAPI("/renter", &rg) if err != nil { die("Could not get allowance:", err) } allowance := rg.Settings.Allowance // convert to SC fmt.Printf(`Allowance: Amount: %v Period: %v blocks `, currencyUnits(allowance.Funds), allowance.Period) } // renterallowancecancelcmd cancels the current allowance. func renterallowancecancelcmd() { err := post("/renter", "hosts=0&funds=0&period=0&renewwindow=0") if err != nil { die("error cancelling allowance:", err) } fmt.Println("Allowance cancelled.") } // rentersetallowancecmd allows the user to set the allowance. func rentersetallowancecmd(amount, period string) { hastings, err := parseCurrency(amount) if err != nil { die("Could not parse amount:", err) } blocks, err := parsePeriod(period) if err != nil { die("Could not parse period") } err = post("/renter", fmt.Sprintf("funds=%s&period=%s", hastings, blocks)) if err != nil { die("Could not set allowance:", err) } fmt.Println("Allowance updated.") } // byValue sorts contracts by their value in siacoins, high to low. If two // contracts have the same value, they are sorted by their host's address. type byValue []api.RenterContract func (s byValue) Len() int { return len(s) } func (s byValue) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s byValue) Less(i, j int) bool { cmp := s[i].RenterFunds.Cmp(s[j].RenterFunds) if cmp == 0 { return s[i].NetAddress < s[j].NetAddress } return cmp > 0 } // rentercontractscmd is the handler for the comand `siac renter contracts`. // It lists the Renter's contracts. func rentercontractscmd() { var rc api.RenterContracts err := getAPI("/renter/contracts", &rc) if err != nil { die("Could not get contracts:", err) } if len(rc.Contracts) == 0 { fmt.Println("No contracts have been formed.") return } sort.Sort(byValue(rc.Contracts)) fmt.Println("Contracts:") w := tabwriter.NewWriter(os.Stdout, 2, 0, 2, ' ', 0) fmt.Fprintln(w, "Host\tRemaining Funds\tSpent Funds\tSpent Fees\tData\tEnd Height\tID") for _, c := range rc.Contracts { fmt.Fprintf(w, "%v\t%8s\t%8s\t%8s\t%v\t%v\t%v\n", c.NetAddress, currencyUnits(c.RenterFunds), currencyUnits(c.TotalCost.Sub(c.RenterFunds).Sub(c.Fees)), currencyUnits(c.Fees), filesizeUnits(int64(c.Size)), c.EndHeight, c.ID) } w.Flush() } // rentercontractsviewcmd is the handler for the command `siac renter contracts `. // It lists details of a specific contract. func rentercontractsviewcmd(cid string) { var rc api.RenterContracts err := getAPI("/renter/contracts", &rc) if err != nil { die("Could not get contract details: ", err) } for _, rc := range rc.Contracts { if rc.ID.String() == cid { var hostInfo api.HostdbHostsGET err = getAPI("/hostdb/hosts/"+rc.HostPublicKey.String(), &hostInfo) if err != nil { die("Could not fetch details of host: ", err) } fmt.Printf(` Contract %v Host: %v (Public Key: %v) Start Height: %v End Height: %v Total cost: %v (Fees: %v) Funds Allocated: %v Upload Spending: %v Storage Spending: %v Download Spending: %v Remaining Funds: %v File Size: %v `, rc.ID, rc.NetAddress, rc.HostPublicKey.String(), rc.StartHeight, rc.EndHeight, currencyUnits(rc.TotalCost), currencyUnits(rc.Fees), currencyUnits(rc.TotalCost.Sub(rc.Fees)), currencyUnits(rc.UploadSpending), currencyUnits(rc.StorageSpending), currencyUnits(rc.DownloadSpending), currencyUnits(rc.RenterFunds), filesizeUnits(int64(rc.Size))) printScoreBreakdown(&hostInfo) return } } fmt.Println("Contract not found") } // renterfilesdeletecmd is the handler for the command `siac renter delete [path]`. // Removes the specified path from the Sia network. func renterfilesdeletecmd(path string) { err := post("/renter/delete/"+path, "") if err != nil { die("Could not delete file:", err) } fmt.Println("Deleted", path) } // renterfilesdownloadcmd is the handler for the comand `siac renter download [path] [destination]`. // Downloads a path from the Sia network to the local specified destination. func renterfilesdownloadcmd(path, destination string) { destination = abs(destination) done := make(chan struct{}) go downloadprogress(done, path) err := get("/renter/download/" + path + "?destination=" + destination) close(done) if err != nil { die("Could not download file:", err) } fmt.Printf("\nDownloaded '%s' to %s.\n", path, abs(destination)) } func downloadprogress(done chan struct{}, siapath string) { time.Sleep(time.Second) // give download time to initialize for { select { case <-done: return case <-time.Tick(time.Second): // get download progress of file var queue api.RenterDownloadQueue err := getAPI("/renter/downloads", &queue) if err != nil { continue // benign } var d api.DownloadInfo for _, d = range queue.Downloads { if d.SiaPath == siapath { break } } if d.Filesize == 0 { continue // file hasn't appeared in queue yet } pct := 100 * float64(d.Received) / float64(d.Filesize) elapsed := time.Since(d.StartTime) elapsed -= elapsed % time.Second // round to nearest second mbps := (float64(d.Received*8) / 1e6) / time.Since(d.StartTime).Seconds() fmt.Printf("\rDownloading... %5.1f%% of %v, %v elapsed, %.2f Mbps ", pct, filesizeUnits(int64(d.Filesize)), elapsed, mbps) } } } // bySiaPath implements sort.Interface for [] modules.FileInfo based on the // SiaPath field. type bySiaPath []modules.FileInfo func (s bySiaPath) Len() int { return len(s) } func (s bySiaPath) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s bySiaPath) Less(i, j int) bool { return s[i].SiaPath < s[j].SiaPath } // renterfileslistcmd is the handler for the command `siac renter list`. // Lists files known to the renter on the network. func renterfileslistcmd() { var rf api.RenterFiles err := getAPI("/renter/files", &rf) if err != nil { die("Could not get file list:", err) } if len(rf.Files) == 0 { fmt.Println("No files have been uploaded.") return } fmt.Println("Tracking", len(rf.Files), "files:") w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) if renterListVerbose { fmt.Fprintln(w, "File size\tAvailable\tProgress\tRedundancy\tRenewing\tSia path") } sort.Sort(bySiaPath(rf.Files)) for _, file := range rf.Files { fmt.Fprintf(w, "%9s", filesizeUnits(int64(file.Filesize))) if renterListVerbose { availableStr := yesNo(file.Available) renewingStr := yesNo(file.Renewing) redundancyStr := fmt.Sprintf("%.2f", file.Redundancy) if file.Redundancy == -1 { redundancyStr = "-" } uploadProgressStr := fmt.Sprintf("%.2f%%", file.UploadProgress) if file.UploadProgress == -1 { uploadProgressStr = "-" } fmt.Fprintf(w, "\t%s\t%8s\t%10s\t%s", availableStr, uploadProgressStr, redundancyStr, renewingStr) } fmt.Fprintf(w, "\t%s", file.SiaPath) if !renterListVerbose && !file.Available { fmt.Fprintf(w, " (uploading, %0.2f%%)", file.UploadProgress) } fmt.Fprintln(w, "") } w.Flush() } // renterfilesrenamecmd is the handler for the command `siac renter rename [path] [newpath]`. // Renames a file on the Sia network. func renterfilesrenamecmd(path, newpath string) { err := post("/renter/rename/"+path, "newsiapath="+newpath) if err != nil { die("Could not rename file:", err) } fmt.Printf("Renamed %s to %s\n", path, newpath) } // renterfilesuploadcmd is the handler for the command `siac renter upload // [source] [path]`. Uploads the [source] file to [path] on the Sia network. // If [source] is a directory, all files inside it will be uploaded and named // relative to [path]. func renterfilesuploadcmd(source, path string) { stat, err := os.Stat(source) if err != nil { die("Could not stat file or folder:", err) } if stat.IsDir() { // folder var files []string err := filepath.Walk(source, func(path string, info os.FileInfo, err error) error { if err != nil { fmt.Println("Warning: skipping file:", err) return nil } if info.IsDir() { return nil } files = append(files, path) return nil }) if err != nil { die("Could not read folder:", err) } else if len(files) == 0 { die("Nothing to upload.") } for _, file := range files { fpath, _ := filepath.Rel(source, file) fpath = filepath.Join(path, fpath) fpath = filepath.ToSlash(fpath) err = post("/renter/upload/"+fpath, "source="+abs(file)) if err != nil { die("Could not upload file:", err) } } fmt.Printf("Uploaded %d files into '%s'.\n", len(files), path) } else { // single file err = post("/renter/upload/"+path, "source="+abs(source)) if err != nil { die("Could not upload file:", err) } fmt.Printf("Uploaded '%s' as %s.\n", abs(source), path) } } // renterpricescmd is the handler for the command `siac renter prices`, which // displays the prices of various storage operations. func renterpricescmd() { var rpg api.RenterPricesGET err := getAPI("/renter/prices", &rpg) if err != nil { die("Could not read the renter prices:", err) } fmt.Println("Renter Prices (estimated):") w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) fmt.Fprintln(w, "\tFees for Creating a Set of Contracts:\t", currencyUnits(rpg.FormContracts)) fmt.Fprintln(w, "\tDownload 1 TB:\t", currencyUnits(rpg.DownloadTerabyte)) fmt.Fprintln(w, "\tStore 1 TB for 1 Month:\t", currencyUnits(rpg.StorageTerabyteMonth)) fmt.Fprintln(w, "\tUpload 1 TB:\t", currencyUnits(rpg.UploadTerabyte)) w.Flush() } Sia-1.3.0/siac/sort_test.go000066400000000000000000000020771313565667000155450ustar00rootroot00000000000000package main import ( "sort" "testing" "github.com/NebulousLabs/Sia/api" "github.com/NebulousLabs/Sia/types" ) // TestSortByValue tests that byValue sorts contracts correctly. func TestSortByValue(t *testing.T) { contracts := []api.RenterContract{ {RenterFunds: types.NewCurrency64(1), NetAddress: "b"}, {RenterFunds: types.NewCurrency64(4), NetAddress: "a"}, {RenterFunds: types.NewCurrency64(2), NetAddress: "c"}, {RenterFunds: types.NewCurrency64(5), NetAddress: "z"}, {RenterFunds: types.NewCurrency64(2), NetAddress: "c"}, {RenterFunds: types.NewCurrency64(0), NetAddress: "e"}, {RenterFunds: types.NewCurrency64(2), NetAddress: "a"}, } sort.Sort(byValue(contracts)) // check ordering for i := 0; i < len(contracts)-1; i++ { a, b := contracts[i], contracts[i+1] if cmp := a.RenterFunds.Cmp(b.RenterFunds); cmp < 0 { t.Error("contracts not primarily sorted by value:", a.RenterFunds, b.RenterFunds) } else if cmp == 0 && a.NetAddress > b.NetAddress { t.Error("contracts not secondarily sorted by address:", a.NetAddress, b.NetAddress) } } } Sia-1.3.0/siac/walletcmd.go000066400000000000000000000360761313565667000155010ustar00rootroot00000000000000package main import ( "fmt" "math/big" "github.com/bgentry/speakeasy" "github.com/spf13/cobra" "github.com/NebulousLabs/Sia/api" "github.com/NebulousLabs/Sia/types" ) var ( walletCmd = &cobra.Command{ Use: "wallet", Short: "Perform wallet actions", Long: `Generate a new address, send coins to another wallet, or view info about the wallet. Units: The smallest unit of siacoins is the hasting. One siacoin is 10^24 hastings. Other supported units are: pS (pico, 10^-12 SC) nS (nano, 10^-9 SC) uS (micro, 10^-6 SC) mS (milli, 10^-3 SC) SC KS (kilo, 10^3 SC) MS (mega, 10^6 SC) GS (giga, 10^9 SC) TS (tera, 10^12 SC)`, Run: wrap(walletbalancecmd), } walletAddressCmd = &cobra.Command{ Use: "address", Short: "Get a new wallet address", Long: "Generate a new wallet address from the wallet's primary seed.", Run: wrap(walletaddresscmd), } walletAddressesCmd = &cobra.Command{ Use: "addresses", Short: "List all addresses", Long: "List all addresses that have been generated by the wallet.", Run: wrap(walletaddressescmd), } walletChangepasswordCmd = &cobra.Command{ Use: "change-password", Short: "Change the wallet password", Long: "Change the encryption password of the wallet, re-encrypting all keys + seeds kept by the wallet.", Run: wrap(walletchangepasswordcmd), } walletInitCmd = &cobra.Command{ Use: "init", Short: "Initialize and encrypt a new wallet", Long: `Generate a new wallet from a randomly generated seed, and encrypt it. By default the wallet encryption / unlock password is the same as the generated seed.`, Run: wrap(walletinitcmd), } walletInitSeedCmd = &cobra.Command{ Use: "init-seed", Short: "Initialize and encrypt a new wallet using a pre-existing seed", Long: `Initialize and encrypt a new wallet using a pre-existing seed.`, Run: wrap(walletinitseedcmd), } walletLoadCmd = &cobra.Command{ Use: "load", Short: "Load a wallet seed, v0.3.3.x wallet, or siag keyset", // Run field is not set, as the load command itself is not a valid command. // A subcommand must be provided. } walletLoad033xCmd = &cobra.Command{ Use: "033x [filepath]", Short: "Load a v0.3.3.x wallet", Long: "Load a v0.3.3.x wallet into the current wallet", Run: wrap(walletload033xcmd), } walletLoadSeedCmd = &cobra.Command{ Use: `seed`, Short: "Add a seed to the wallet", Long: "Loads an auxiliary seed into the wallet.", Run: wrap(walletloadseedcmd), } walletLoadSiagCmd = &cobra.Command{ Use: `siag [filepath,...]`, Short: "Load siag key(s) into the wallet", Long: "Load siag key(s) into the wallet - typically used for siafunds.", Example: "siac wallet load siag key1.siakey,key2.siakey", Run: wrap(walletloadsiagcmd), } walletLockCmd = &cobra.Command{ Use: "lock", Short: "Lock the wallet", Long: "Lock the wallet, preventing further use", Run: wrap(walletlockcmd), } walletSeedsCmd = &cobra.Command{ Use: "seeds", Short: "View information about your seeds", Long: "View your primary and auxiliary wallet seeds.", Run: wrap(walletseedscmd), } walletSendCmd = &cobra.Command{ Use: "send", Short: "Send either siacoins or siafunds to an address", Long: "Send either siacoins or siafunds to an address", // Run field is not set, as the send command itself is not a valid command. // A subcommand must be provided. } walletSendSiacoinsCmd = &cobra.Command{ Use: "siacoins [amount] [dest]", Short: "Send siacoins to an address", Long: `Send siacoins to an address. 'dest' must be a 76-byte hexadecimal address. 'amount' can be specified in units, e.g. 1.23KS. Run 'wallet --help' for a list of units. If no unit is supplied, hastings will be assumed. A miner fee of 10 SC is levied on all transactions.`, Run: wrap(walletsendsiacoinscmd), } walletSendSiafundsCmd = &cobra.Command{ Use: "siafunds [amount] [dest]", Short: "Send siafunds", Long: `Send siafunds to an address, and transfer the claim siacoins to your wallet. Run 'wallet send --help' to see a list of available units.`, Run: wrap(walletsendsiafundscmd), } walletSweepCmd = &cobra.Command{ Use: "sweep", Short: "Sweep siacoins and siafunds from a seed.", Long: `Sweep siacoins and siafunds from a seed. The outputs belonging to the seed will be sent to your wallet.`, Run: wrap(walletsweepcmd), } walletBalanceCmd = &cobra.Command{ Use: "balance", Short: "View wallet balance", Long: "View wallet balance, including confirmed and unconfirmed siacoins and siafunds.", Run: wrap(walletbalancecmd), } walletTransactionsCmd = &cobra.Command{ Use: "transactions", Short: "View transactions", Long: "View transactions related to addresses spendable by the wallet, providing a net flow of siacoins and siafunds for each transaction", Run: wrap(wallettransactionscmd), } walletUnlockCmd = &cobra.Command{ Use: `unlock`, Short: "Unlock the wallet", Long: "Decrypt and load the wallet into memory", Run: wrap(walletunlockcmd), } ) const askPasswordText = "We need to encrypt the new data using the current wallet password, please provide: " const currentPasswordText = "Current Password: " const newPasswordText = "New Password: " // walletaddresscmd fetches a new address from the wallet that will be able to // receive coins. func walletaddresscmd() { addr := new(api.WalletAddressGET) err := getAPI("/wallet/address", addr) if err != nil { die("Could not generate new address:", err) } fmt.Printf("Created new address: %s\n", addr.Address) } // walletaddressescmd fetches the list of addresses that the wallet knows. func walletaddressescmd() { addrs := new(api.WalletAddressesGET) err := getAPI("/wallet/addresses", addrs) if err != nil { die("Failed to fetch addresses:", err) } for _, addr := range addrs.Addresses { fmt.Println(addr) } } // walletchangepasswordcmd changes the password of the wallet. func walletchangepasswordcmd() { currentPassword, err := speakeasy.Ask(currentPasswordText) if err != nil { die("Reading password failed:", err) } newPassword, err := speakeasy.Ask(newPasswordText) if err != nil { die("Reading password failed:", err) } qs := fmt.Sprintf("newpassword=%s&encryptionpassword=%s", newPassword, currentPassword) err = post("/wallet/changepassword", qs) if err != nil { die("Changing the password failed:", err) } fmt.Println("Password changed sucessfully.") } // walletinitcmd encrypts the wallet with the given password func walletinitcmd() { var er api.WalletInitPOST qs := fmt.Sprintf("dictionary=%s", "english") if initPassword { password, err := speakeasy.Ask("Wallet password: ") if err != nil { die("Reading password failed:", err) } qs += fmt.Sprintf("&encryptionpassword=%s", password) } if initForce { qs += "&force=true" } err := postResp("/wallet/init", qs, &er) if err != nil { die("Error when encrypting wallet:", err) } fmt.Printf("Recovery seed:\n%s\n\n", er.PrimarySeed) if initPassword { fmt.Printf("Wallet encrypted with given password\n") } else { fmt.Printf("Wallet encrypted with password:\n%s\n", er.PrimarySeed) } } // walletinitseedcmd initializes the wallet from a preexisting seed. func walletinitseedcmd() { seed, err := speakeasy.Ask("Seed: ") if err != nil { die("Reading seed failed:", err) } qs := fmt.Sprintf("&seed=%s&dictionary=%s", seed, "english") if initPassword { password, err := speakeasy.Ask("Wallet password: ") if err != nil { die("Reading password failed:", err) } qs += fmt.Sprintf("&encryptionpassword=%s", password) } if initForce { qs += "&force=true" } err = post("/wallet/init/seed", qs) if err != nil { die("Could not initialize wallet from seed:", err) } if initPassword { fmt.Println("Wallet initialized and encrypted with given password.") } else { fmt.Println("Wallet initialized and encrypted with seed.") } } // walletload033xcmd loads a v0.3.3.x wallet into the current wallet. func walletload033xcmd(source string) { password, err := speakeasy.Ask(askPasswordText) if err != nil { die("Reading password failed:", err) } qs := fmt.Sprintf("source=%s&encryptionpassword=%s", abs(source), password) err = post("/wallet/033x", qs) if err != nil { die("Loading wallet failed:", err) } fmt.Println("Wallet loading successful.") } // walletloadseedcmd adds a seed to the wallet's list of seeds func walletloadseedcmd() { seed, err := speakeasy.Ask("New seed: ") if err != nil { die("Reading seed failed:", err) } password, err := speakeasy.Ask(askPasswordText) if err != nil { die("Reading password failed:", err) } qs := fmt.Sprintf("encryptionpassword=%s&seed=%s&dictionary=%s", password, seed, "english") err = post("/wallet/seed", qs) if err != nil { die("Could not add seed:", err) } fmt.Println("Added Key") } // walletloadsiagcmd loads a siag key set into the wallet. func walletloadsiagcmd(keyfiles string) { password, err := speakeasy.Ask(askPasswordText) if err != nil { die("Reading password failed:", err) } qs := fmt.Sprintf("keyfiles=%s&encryptionpassword=%s", keyfiles, password) err = post("/wallet/siagkey", qs) if err != nil { die("Loading siag key failed:", err) } fmt.Println("Wallet loading successful.") } // walletlockcmd locks the wallet func walletlockcmd() { err := post("/wallet/lock", "") if err != nil { die("Could not lock wallet:", err) } } // walletseedcmd returns the current seed { func walletseedscmd() { var seedInfo api.WalletSeedsGET err := getAPI("/wallet/seeds", &seedInfo) if err != nil { die("Error retrieving the current seed:", err) } fmt.Println("Primary Seed:") fmt.Println(seedInfo.PrimarySeed) if len(seedInfo.AllSeeds) == 1 { // AllSeeds includes the primary seed return } fmt.Println() fmt.Println("Auxiliary Seeds:") for _, seed := range seedInfo.AllSeeds { if seed == seedInfo.PrimarySeed { continue } fmt.Println() // extra newline for readability fmt.Println(seed) } } // walletsendsiacoinscmd sends siacoins to a destination address. func walletsendsiacoinscmd(amount, dest string) { hastings, err := parseCurrency(amount) if err != nil { die("Could not parse amount:", err) } err = post("/wallet/siacoins", fmt.Sprintf("amount=%s&destination=%s", hastings, dest)) if err != nil { die("Could not send siacoins:", err) } fmt.Printf("Sent %s hastings to %s\n", hastings, dest) } // walletsendsiafundscmd sends siafunds to a destination address. func walletsendsiafundscmd(amount, dest string) { err := post("/wallet/siafunds", fmt.Sprintf("amount=%s&destination=%s", amount, dest)) if err != nil { die("Could not send siafunds:", err) } fmt.Printf("Sent %s siafunds to %s\n", amount, dest) } // walletbalancecmd retrieves and displays information about the wallet. func walletbalancecmd() { status := new(api.WalletGET) err := getAPI("/wallet", status) if err != nil { die("Could not get wallet status:", err) } var fees api.TpoolFeeGET err = getAPI("/tpool/fee", &fees) if err != nil { die("Could not get fee estimation:", err) } encStatus := "Unencrypted" if status.Encrypted { encStatus = "Encrypted" } if !status.Unlocked { fmt.Printf(`Wallet status: %v, Locked Unlock the wallet to view balance `, encStatus) return } unconfirmedBalance := status.ConfirmedSiacoinBalance.Add(status.UnconfirmedIncomingSiacoins).Sub(status.UnconfirmedOutgoingSiacoins) var delta string if unconfirmedBalance.Cmp(status.ConfirmedSiacoinBalance) >= 0 { delta = "+" + currencyUnits(unconfirmedBalance.Sub(status.ConfirmedSiacoinBalance)) } else { delta = "-" + currencyUnits(status.ConfirmedSiacoinBalance.Sub(unconfirmedBalance)) } fmt.Printf(`Wallet status: %s, Unlocked Confirmed Balance: %v Unconfirmed Delta: %v Exact: %v H Siafunds: %v SF Siafund Claims: %v H Estimated Fee: %v / KB `, encStatus, currencyUnits(status.ConfirmedSiacoinBalance), delta, status.ConfirmedSiacoinBalance, status.SiafundBalance, status.SiacoinClaimBalance, fees.Maximum.Mul64(1e3).HumanString()) } // walletsweepcmd sweeps coins and funds from a seed. func walletsweepcmd() { seed, err := speakeasy.Ask("Seed: ") if err != nil { die("Reading seed failed:", err) } var swept api.WalletSweepPOST err = postResp("/wallet/sweep/seed", fmt.Sprintf("seed=%s&dictionary=%s", seed, "english"), &swept) if err != nil { die("Could not sweep seed:", err) } fmt.Printf("Swept %v and %v SF from seed.\n", currencyUnits(swept.Coins), swept.Funds) } // wallettransactionscmd lists all of the transactions related to the wallet, // providing a net flow of siacoins and siafunds for each. func wallettransactionscmd() { wtg := new(api.WalletTransactionsGET) err := getAPI("/wallet/transactions?startheight=0&endheight=10000000", wtg) if err != nil { die("Could not fetch transaction history:", err) } fmt.Println(" [height] [transaction id] [net siacoins] [net siafunds]") txns := append(wtg.ConfirmedTransactions, wtg.UnconfirmedTransactions...) for _, txn := range txns { // Determine the number of outgoing siacoins and siafunds. var outgoingSiacoins types.Currency var outgoingSiafunds types.Currency for _, input := range txn.Inputs { if input.FundType == types.SpecifierSiacoinInput && input.WalletAddress { outgoingSiacoins = outgoingSiacoins.Add(input.Value) } if input.FundType == types.SpecifierSiafundInput && input.WalletAddress { outgoingSiafunds = outgoingSiafunds.Add(input.Value) } } // Determine the number of incoming siacoins and siafunds. var incomingSiacoins types.Currency var incomingSiafunds types.Currency for _, output := range txn.Outputs { if output.FundType == types.SpecifierMinerPayout { incomingSiacoins = incomingSiacoins.Add(output.Value) } if output.FundType == types.SpecifierSiacoinOutput && output.WalletAddress { incomingSiacoins = incomingSiacoins.Add(output.Value) } if output.FundType == types.SpecifierSiafundOutput && output.WalletAddress { incomingSiafunds = incomingSiafunds.Add(output.Value) } } // Convert the siacoins to a float. incomingSiacoinsFloat, _ := new(big.Rat).SetFrac(incomingSiacoins.Big(), types.SiacoinPrecision.Big()).Float64() outgoingSiacoinsFloat, _ := new(big.Rat).SetFrac(outgoingSiacoins.Big(), types.SiacoinPrecision.Big()).Float64() // Print the results. if txn.ConfirmationHeight < 1e9 { fmt.Printf("%12v", txn.ConfirmationHeight) } else { fmt.Printf(" unconfirmed") } fmt.Printf("%67v%15.2f SC", txn.TransactionID, incomingSiacoinsFloat-outgoingSiacoinsFloat) // For siafunds, need to avoid having a negative types.Currency. if incomingSiafunds.Cmp(outgoingSiafunds) >= 0 { fmt.Printf("%14v SF\n", incomingSiafunds.Sub(outgoingSiafunds)) } else { fmt.Printf("-%14v SF\n", outgoingSiafunds.Sub(incomingSiafunds)) } } } // walletunlockcmd unlocks a saved wallet func walletunlockcmd() { password, err := speakeasy.Ask("Wallet password: ") if err != nil { die("Reading password failed:", err) } qs := fmt.Sprintf("encryptionpassword=%s&dictonary=%s", password, "english") err = post("/wallet/unlock", qs) if err != nil { die("Could not unlock wallet:", err) } fmt.Println("Wallet unlocked") } Sia-1.3.0/siad/000077500000000000000000000000001313565667000131635ustar00rootroot00000000000000Sia-1.3.0/siad/daemon.go000066400000000000000000000230231313565667000147550ustar00rootroot00000000000000package main import ( "errors" "fmt" "os" "os/signal" "path/filepath" "strconv" "strings" "time" "github.com/NebulousLabs/Sia/api" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/modules/consensus" "github.com/NebulousLabs/Sia/modules/explorer" "github.com/NebulousLabs/Sia/modules/gateway" "github.com/NebulousLabs/Sia/modules/host" "github.com/NebulousLabs/Sia/modules/miner" "github.com/NebulousLabs/Sia/modules/renter" "github.com/NebulousLabs/Sia/modules/transactionpool" "github.com/NebulousLabs/Sia/modules/wallet" "github.com/NebulousLabs/Sia/profile" "github.com/bgentry/speakeasy" "github.com/spf13/cobra" ) // verifyAPISecurity checks that the security values are consistent with a // sane, secure system. func verifyAPISecurity(config Config) error { // Make sure that only the loopback address is allowed unless the // --disable-api-security flag has been used. if !config.Siad.AllowAPIBind { addr := modules.NetAddress(config.Siad.APIaddr) if !addr.IsLoopback() { if addr.Host() == "" { return fmt.Errorf("a blank host will listen on all interfaces, did you mean localhost:%v?\nyou must pass --disable-api-security to bind Siad to a non-localhost address", addr.Port()) } return errors.New("you must pass --disable-api-security to bind Siad to a non-localhost address") } return nil } // If the --disable-api-security flag is used, enforce that // --authenticate-api must also be used. if config.Siad.AllowAPIBind && !config.Siad.AuthenticateAPI { return errors.New("cannot use --disable-api-security without setting an api password") } return nil } // processNetAddr adds a ':' to a bare integer, so that it is a proper port // number. func processNetAddr(addr string) string { _, err := strconv.Atoi(addr) if err == nil { return ":" + addr } return addr } // processModules makes the modules string lowercase to make checking if a // module in the string easier, and returns an error if the string contains an // invalid module character. func processModules(modules string) (string, error) { modules = strings.ToLower(modules) validModules := "cghmrtwe" invalidModules := modules for _, m := range validModules { invalidModules = strings.Replace(invalidModules, string(m), "", 1) } if len(invalidModules) > 0 { return "", errors.New("Unable to parse --modules flag, unrecognized or duplicate modules: " + invalidModules) } return modules, nil } // processProfileFlags checks that the flags given for profiling are valid. func processProfileFlags(profile string) (string, error) { profile = strings.ToLower(profile) validProfiles := "cmt" invalidProfiles := profile for _, p := range validProfiles { invalidProfiles = strings.Replace(invalidProfiles, string(p), "", 1) } if len(invalidProfiles) > 0 { return "", errors.New("Unable to parse --profile flags, unrecognized or duplicate flags: " + invalidProfiles) } return profile, nil } // processConfig checks the configuration values and performs cleanup on // incorrect-but-allowed values. func processConfig(config Config) (Config, error) { var err1, err2 error config.Siad.APIaddr = processNetAddr(config.Siad.APIaddr) config.Siad.RPCaddr = processNetAddr(config.Siad.RPCaddr) config.Siad.HostAddr = processNetAddr(config.Siad.HostAddr) config.Siad.Modules, err1 = processModules(config.Siad.Modules) config.Siad.Profile, err2 = processProfileFlags(config.Siad.Profile) err3 := verifyAPISecurity(config) err := build.JoinErrors([]error{err1, err2, err3}, ", and ") if err != nil { return Config{}, err } return config, nil } // startDaemon uses the config parameters to initialize Sia modules and start // siad. func startDaemon(config Config) (err error) { // Prompt user for API password. if config.Siad.AuthenticateAPI { config.APIPassword, err = speakeasy.Ask("Enter API password: ") if err != nil { return err } if config.APIPassword == "" { return errors.New("password cannot be blank") } } // Process the config variables after they are parsed by cobra. config, err = processConfig(config) if err != nil { return err } // Print a startup message. fmt.Println("Loading...") loadStart := time.Now() // Create the server and start serving daemon routes immediately. fmt.Printf("(0/%d) Loading siad...\n", len(config.Siad.Modules)) srv, err := NewServer(config.Siad.APIaddr, config.Siad.RequiredUserAgent, config.APIPassword) if err != nil { return err } servErrs := make(chan error) go func() { servErrs <- srv.Serve() }() // Initialize the Sia modules i := 0 var g modules.Gateway if strings.Contains(config.Siad.Modules, "g") { i++ fmt.Printf("(%d/%d) Loading gateway...\n", i, len(config.Siad.Modules)) g, err = gateway.New(config.Siad.RPCaddr, !config.Siad.NoBootstrap, filepath.Join(config.Siad.SiaDir, modules.GatewayDir)) if err != nil { return err } defer func() { fmt.Println("Closing gateway...") err := g.Close() if err != nil { fmt.Println("Error during gateway shutdown:", err) } }() } var cs modules.ConsensusSet if strings.Contains(config.Siad.Modules, "c") { i++ fmt.Printf("(%d/%d) Loading consensus...\n", i, len(config.Siad.Modules)) cs, err = consensus.New(g, !config.Siad.NoBootstrap, filepath.Join(config.Siad.SiaDir, modules.ConsensusDir)) if err != nil { return err } defer func() { fmt.Println("Closing consensus...") err := cs.Close() if err != nil { fmt.Println("Error during consensus set shutdown:", err) } }() } var e modules.Explorer if strings.Contains(config.Siad.Modules, "e") { i++ fmt.Printf("(%d/%d) Loading explorer...\n", i, len(config.Siad.Modules)) e, err = explorer.New(cs, filepath.Join(config.Siad.SiaDir, modules.ExplorerDir)) if err != nil { return err } defer func() { fmt.Println("Closing explorer...") err := e.Close() if err != nil { fmt.Println("Error during explorer shutdown:", err) } }() } var tpool modules.TransactionPool if strings.Contains(config.Siad.Modules, "t") { i++ fmt.Printf("(%d/%d) Loading transaction pool...\n", i, len(config.Siad.Modules)) tpool, err = transactionpool.New(cs, g, filepath.Join(config.Siad.SiaDir, modules.TransactionPoolDir)) if err != nil { return err } defer func() { fmt.Println("Closing transaction pool...") err := tpool.Close() if err != nil { fmt.Println("Error during transaction pool shutdown:", err) } }() } var w modules.Wallet if strings.Contains(config.Siad.Modules, "w") { i++ fmt.Printf("(%d/%d) Loading wallet...\n", i, len(config.Siad.Modules)) w, err = wallet.New(cs, tpool, filepath.Join(config.Siad.SiaDir, modules.WalletDir)) if err != nil { return err } defer func() { fmt.Println("Closing wallet...") err := w.Close() if err != nil { fmt.Println("Error during wallet shutdown:", err) } }() } var m modules.Miner if strings.Contains(config.Siad.Modules, "m") { i++ fmt.Printf("(%d/%d) Loading miner...\n", i, len(config.Siad.Modules)) m, err = miner.New(cs, tpool, w, filepath.Join(config.Siad.SiaDir, modules.MinerDir)) if err != nil { return err } defer func() { fmt.Println("Closing miner...") err := m.Close() if err != nil { fmt.Println("Error during miner shutdown:", err) } }() } var h modules.Host if strings.Contains(config.Siad.Modules, "h") { i++ fmt.Printf("(%d/%d) Loading host...\n", i, len(config.Siad.Modules)) h, err = host.New(cs, tpool, w, config.Siad.HostAddr, filepath.Join(config.Siad.SiaDir, modules.HostDir)) if err != nil { return err } defer func() { fmt.Println("Closing host...") err := h.Close() if err != nil { fmt.Println("Error during host shutdown:", err) } }() } var r modules.Renter if strings.Contains(config.Siad.Modules, "r") { i++ fmt.Printf("(%d/%d) Loading renter...\n", i, len(config.Siad.Modules)) r, err = renter.New(g, cs, w, tpool, filepath.Join(config.Siad.SiaDir, modules.RenterDir)) if err != nil { return err } defer func() { fmt.Println("Closing renter...") err := r.Close() if err != nil { fmt.Println("Error during renter shutdown:", err) } }() } // Create the Sia API a := api.New( config.Siad.RequiredUserAgent, config.APIPassword, cs, e, g, h, m, r, tpool, w, ) // connect the API to the server srv.mux.Handle("/", a) // stop the server if a kill signal is caught sigChan := make(chan os.Signal, 1) signal.Notify(sigChan, os.Interrupt, os.Kill) go func() { <-sigChan fmt.Println("\rCaught stop signal, quitting...") srv.Close() }() // Print a 'startup complete' message. startupTime := time.Since(loadStart) fmt.Println("Finished loading in", startupTime.Seconds(), "seconds") err = <-servErrs if err != nil { build.Critical(err) } return nil } // startDaemonCmd is a passthrough function for startDaemon. func startDaemonCmd(cmd *cobra.Command, _ []string) { var profileCPU, profileMem, profileTrace bool profileCPU = strings.Contains(globalConfig.Siad.Profile, "c") profileMem = strings.Contains(globalConfig.Siad.Profile, "m") profileTrace = strings.Contains(globalConfig.Siad.Profile, "t") if build.DEBUG { profileCPU = true profileMem = true } if profileCPU || profileMem || profileTrace { go profile.StartContinuousProfile(globalConfig.Siad.ProfileDir, profileCPU, profileMem, profileTrace) } // Start siad. startDaemon will only return when it is shutting down. err := startDaemon(globalConfig) if err != nil { die(err) } // Daemon seems to have closed cleanly. Print a 'closed' mesasge. fmt.Println("Shutdown complete.") } Sia-1.3.0/siad/daemon_test.go000066400000000000000000000137601313565667000160230ustar00rootroot00000000000000package main import ( "testing" ) // TestUnitProcessNetAddr probes the 'processNetAddr' function. func TestUnitProcessNetAddr(t *testing.T) { testVals := struct { inputs []string expectedOutputs []string }{ inputs: []string{"9980", ":9980", "localhost:9980", "test.com:9980", "192.168.14.92:9980"}, expectedOutputs: []string{":9980", ":9980", "localhost:9980", "test.com:9980", "192.168.14.92:9980"}, } for i, input := range testVals.inputs { output := processNetAddr(input) if output != testVals.expectedOutputs[i] { t.Error("unexpected result", i) } } } // TestUnitProcessModules tests that processModules correctly processes modules // passed to the -M / --modules flag. func TestUnitProcessModules(t *testing.T) { // Test valid modules. testVals := []struct { in string out string }{ {"cghmrtwe", "cghmrtwe"}, {"CGHMRTWE", "cghmrtwe"}, {"c", "c"}, {"g", "g"}, {"h", "h"}, {"m", "m"}, {"r", "r"}, {"t", "t"}, {"w", "w"}, {"e", "e"}, {"C", "c"}, {"G", "g"}, {"H", "h"}, {"M", "m"}, {"R", "r"}, {"T", "t"}, {"W", "w"}, {"E", "e"}, } for _, testVal := range testVals { out, err := processModules(testVal.in) if err != nil { t.Error("processModules failed with error:", err) } if out != testVal.out { t.Errorf("processModules returned incorrect modules: expected %s, got %s\n", testVal.out, out) } } // Test invalid modules. invalidModules := []string{"abdfijklnopqsuvxyz", "cghmrtwez", "cz", "z", "cc", "ccz", "ccm", "cmm", "ccmm"} for _, invalidModule := range invalidModules { _, err := processModules(invalidModule) if err == nil { t.Error("processModules didn't error on invalid module:", invalidModule) } } } // TestUnitProcessProfile tests that processProfileFlags correctly processes profiles // passed to the --profile flag. func TestUnitProcessProfile(t *testing.T) { // Test valid profiles. testVals := []struct { in string out string }{ {"cmt", "cmt"}, {"CMT", "cmt"}, {"c", "c"}, {"m", "m"}, {"t", "t"}, {"C", "c"}, {"M", "m"}, {"T", "t"}, } for _, testVal := range testVals { out, err := processProfileFlags(testVal.in) if err != nil { t.Error("processProfileFlags failed with error:", err) } if out != testVal.out { t.Errorf("processProfileFlags returned incorrect modules: expected %s, got %s\n", testVal.out, out) } } // Test invalid modules. invalidProfiles := []string{"abdfijklnopqsuvxyz", "cghmrtwez", "cz", "z", "cc", "ccz", "ccm", "cmm", "ccmm", "g", "h", "cghmrtwe", "CGHMRTWE", "mts"} for _, invalidProfiles := range invalidProfiles { _, err := processProfileFlags(invalidProfiles) if err == nil { t.Error("processProfileFlags didn't error on invalid profile:", invalidProfiles) } } } // TestUnitProcessConfig probes the 'processConfig' function. func TestUnitProcessConfig(t *testing.T) { // Test valid configs. testVals := struct { inputs [][]string expectedOutputs [][]string }{ inputs: [][]string{ {"localhost:9980", "localhost:9981", "localhost:9982", "cghmrtwe"}, {"localhost:9980", "localhost:9981", "localhost:9982", "CGHMRTWE"}, }, expectedOutputs: [][]string{ {"localhost:9980", "localhost:9981", "localhost:9982", "cghmrtwe"}, {"localhost:9980", "localhost:9981", "localhost:9982", "cghmrtwe"}, }, } var config Config for i := range testVals.inputs { config.Siad.APIaddr = testVals.inputs[i][0] config.Siad.RPCaddr = testVals.inputs[i][1] config.Siad.HostAddr = testVals.inputs[i][2] config, err := processConfig(config) if err != nil { t.Error("processConfig failed with error:", err) } if config.Siad.APIaddr != testVals.expectedOutputs[i][0] { t.Error("processing failure at check", i, 0) } if config.Siad.RPCaddr != testVals.expectedOutputs[i][1] { t.Error("processing failure at check", i, 1) } if config.Siad.HostAddr != testVals.expectedOutputs[i][2] { t.Error("processing failure at check", i, 2) } } // Test invalid configs. invalidModule := "z" config.Siad.Modules = invalidModule _, err := processConfig(config) if err == nil { t.Error("processModules didn't error on invalid module:", invalidModule) } } // TestVerifyAPISecurity checks that the verifyAPISecurity function is // correctly banning the use of a non-loopback address without the // --disable-security flag, and that the --disable-security flag cannot be used // without an api password. func TestVerifyAPISecurity(t *testing.T) { // Check that the loopback address is accepted when security is enabled. var securityOnLoopback Config securityOnLoopback.Siad.APIaddr = "127.0.0.1:9980" err := verifyAPISecurity(securityOnLoopback) if err != nil { t.Error("loopback + securityOn was rejected") } // Check that the blank address is rejected when security is enabled. var securityOnBlank Config securityOnBlank.Siad.APIaddr = ":9980" err = verifyAPISecurity(securityOnBlank) if err == nil { t.Error("blank + securityOn was accepted") } // Check that a public hostname is rejected when security is enabled. var securityOnPublic Config securityOnPublic.Siad.APIaddr = "sia.tech:9980" err = verifyAPISecurity(securityOnPublic) if err == nil { t.Error("public + securityOn was accepted") } // Check that a public hostname is rejected when security is disabled and // there is no api password. var securityOffPublic Config securityOffPublic.Siad.APIaddr = "sia.tech:9980" securityOffPublic.Siad.AllowAPIBind = true err = verifyAPISecurity(securityOffPublic) if err == nil { t.Error("public + securityOff was accepted without authentication") } // Check that a public hostname is accepted when security is disabled and // there is an api password. var securityOffPublicAuthenticated Config securityOffPublicAuthenticated.Siad.APIaddr = "sia.tech:9980" securityOffPublicAuthenticated.Siad.AllowAPIBind = true securityOffPublicAuthenticated.Siad.AuthenticateAPI = true err = verifyAPISecurity(securityOffPublicAuthenticated) if err != nil { t.Error("public + securityOff with authentication was rejected:", err) } } Sia-1.3.0/siad/main.go000066400000000000000000000145141313565667000144430ustar00rootroot00000000000000package main import ( "fmt" "os" "github.com/spf13/cobra" "github.com/NebulousLabs/Sia/build" ) var ( // globalConfig is used by the cobra package to fill out the configuration // variables. globalConfig Config ) // exit codes // inspired by sysexits.h const ( exitCodeGeneral = 1 // Not in sysexits.h, but is standard practice. exitCodeUsage = 64 // EX_USAGE in sysexits.h ) // The Config struct contains all configurable variables for siad. It is // compatible with gcfg. type Config struct { // The APIPassword is input by the user after the daemon starts up, if the // --authenticate-api flag is set. APIPassword string // The Siad variables are referenced directly by cobra, and are set // according to the flags. Siad struct { APIaddr string RPCaddr string HostAddr string AllowAPIBind bool Modules string NoBootstrap bool RequiredUserAgent string AuthenticateAPI bool Profile string ProfileDir string SiaDir string } } // die prints its arguments to stderr, then exits the program with the default // error code. func die(args ...interface{}) { fmt.Fprintln(os.Stderr, args...) os.Exit(exitCodeGeneral) } // versionCmd is a cobra command that prints the version of siad. func versionCmd(*cobra.Command, []string) { switch build.Release { case "dev": fmt.Println("Sia Daemon v" + build.Version + "-dev") case "standard": fmt.Println("Sia Daemon v" + build.Version) case "testing": fmt.Println("Sia Daemon v" + build.Version + "-testing") default: fmt.Println("Sia Daemon v" + build.Version + "-???") } } // modulesCmd is a cobra command that prints help info about modules. func modulesCmd(*cobra.Command, []string) { fmt.Println(`Use the -M or --modules flag to only run specific modules. Modules are independent components of Sia. This flag should only be used by developers or people who want to reduce overhead from unused modules. Modules are specified by their first letter. If the -M or --modules flag is not specified the default modules are run. The default modules are: gateway, consensus set, host, miner, renter, transaction pool, wallet This is equivalent to: siad -M cghmrtw Below is a list of all the modules available. Gateway (g): The gateway maintains a peer to peer connection to the network and enables other modules to perform RPC calls on peers. The gateway is required by all other modules. Example: siad -M g Consensus Set (c): The consensus set manages everything related to consensus and keeps the blockchain in sync with the rest of the network. The consensus set requires the gateway. Example: siad -M gc Transaction Pool (t): The transaction pool manages unconfirmed transactions. The transaction pool requires the consensus set. Example: siad -M gct Wallet (w): The wallet stores and manages siacoins and siafunds. The wallet requires the consensus set and transaction pool. Example: siad -M gctw Renter (r): The renter manages the user's files on the network. The renter requires the consensus set, transaction pool, and wallet. Example: siad -M gctwr Host (h): The host provides storage from local disks to the network. The host negotiates file contracts with remote renters to earn money for storing other users' files. The host requires the consensus set, transaction pool, and wallet. Example: siad -M gctwh Miner (m): The miner provides a basic CPU mining implementation as well as an API for external miners to use. The miner requires the consensus set, transaction pool, and wallet. Example: siad -M gctwm Explorer (e): The explorer provides statistics about the blockchain and can be queried for information about specific transactions or other objects on the blockchain. The explorer requires the consenus set. Example: siad -M gce`) } // main establishes a set of commands and flags using the cobra package. func main() { if build.DEBUG { fmt.Println("Running with debugging enabled") } root := &cobra.Command{ Use: os.Args[0], Short: "Sia Daemon v" + build.Version, Long: "Sia Daemon v" + build.Version, Run: startDaemonCmd, } root.AddCommand(&cobra.Command{ Use: "version", Short: "Print version information", Long: "Print version information about the Sia Daemon", Run: versionCmd, }) root.AddCommand(&cobra.Command{ Use: "modules", Short: "List available modules for use with -M, --modules flag", Long: "List available modules for use with -M, --modules flag and their uses", Run: modulesCmd, }) // Set default values, which have the lowest priority. root.Flags().StringVarP(&globalConfig.Siad.RequiredUserAgent, "agent", "", "Sia-Agent", "required substring for the user agent") root.Flags().StringVarP(&globalConfig.Siad.HostAddr, "host-addr", "", ":9982", "which port the host listens on") root.Flags().StringVarP(&globalConfig.Siad.ProfileDir, "profile-directory", "", "profiles", "location of the profiling directory") root.Flags().StringVarP(&globalConfig.Siad.APIaddr, "api-addr", "", "localhost:9980", "which host:port the API server listens on") root.Flags().StringVarP(&globalConfig.Siad.SiaDir, "sia-directory", "d", "", "location of the sia directory") root.Flags().BoolVarP(&globalConfig.Siad.NoBootstrap, "no-bootstrap", "", false, "disable bootstrapping on this run") root.Flags().StringVarP(&globalConfig.Siad.Profile, "profile", "", "", "enable profiling with flags 'cmt' for CPU, memory, trace") root.Flags().StringVarP(&globalConfig.Siad.RPCaddr, "rpc-addr", "", ":9981", "which port the gateway listens on") root.Flags().StringVarP(&globalConfig.Siad.Modules, "modules", "M", "cghrtw", "enabled modules, see 'siad modules' for more info") root.Flags().BoolVarP(&globalConfig.Siad.AuthenticateAPI, "authenticate-api", "", false, "enable API password protection") root.Flags().BoolVarP(&globalConfig.Siad.AllowAPIBind, "disable-api-security", "", false, "allow siad to listen on a non-localhost address (DANGEROUS)") // Parse cmdline flags, overwriting both the default values and the config // file values. if err := root.Execute(); err != nil { // Since no commands return errors (all commands set Command.Run instead of // Command.RunE), Command.Execute() should only return an error on an // invalid command or flag. Therefore Command.Usage() was called (assuming // Command.SilenceUsage is false) and we should exit with exitCodeUsage. os.Exit(exitCodeUsage) } } Sia-1.3.0/siad/server.go000066400000000000000000000335071313565667000150300ustar00rootroot00000000000000package main import ( "archive/zip" "bytes" "encoding/json" "errors" "fmt" "io" "io/ioutil" "math/big" "net" "net/http" "path" "path/filepath" "runtime" "sort" "strings" "time" "github.com/NebulousLabs/Sia/api" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/types" "github.com/inconshreveable/go-update" "github.com/julienschmidt/httprouter" "github.com/kardianos/osext" ) var errEmptyUpdateResponse = errors.New("API call to https://api.github.com/repos/NebulousLabs/Sia/releases/latest is returning an empty response") type ( // Server creates and serves a HTTP server that offers communication with a // Sia API. Server struct { httpServer *http.Server mux *http.ServeMux listener net.Listener } // SiaConstants is a struct listing all of the constants in use. SiaConstants struct { BlockFrequency types.BlockHeight `json:"blockfrequency"` BlockSizeLimit uint64 `json:"blocksizelimit"` ExtremeFutureThreshold types.Timestamp `json:"extremefuturethreshold"` FutureThreshold types.Timestamp `json:"futurethreshold"` GenesisTimestamp types.Timestamp `json:"genesistimestamp"` MaturityDelay types.BlockHeight `json:"maturitydelay"` MedianTimestampWindow uint64 `json:"mediantimestampwindow"` SiafundCount types.Currency `json:"siafundcount"` SiafundPortion *big.Rat `json:"siafundportion"` TargetWindow types.BlockHeight `json:"targetwindow"` InitialCoinbase uint64 `json:"initialcoinbase"` MinimumCoinbase uint64 `json:"minimumcoinbase"` RootTarget types.Target `json:"roottarget"` RootDepth types.Target `json:"rootdepth"` MaxAdjustmentUp *big.Rat `json:"maxadjustmentup"` MaxAdjustmentDown *big.Rat `json:"maxadjustmentdown"` SiacoinPrecision types.Currency `json:"siacoinprecision"` } DaemonVersion struct { Version string `json:"version"` } // UpdateInfo indicates whether an update is available, and to what // version. UpdateInfo struct { Available bool `json:"available"` Version string `json:"version"` } // githubRelease represents some of the JSON returned by the GitHub release API // endpoint. Only the fields relevant to updating are included. githubRelease struct { TagName string `json:"tag_name"` Assets []struct { Name string `json:"name"` DownloadURL string `json:"browser_download_url"` } `json:"assets"` } ) const ( // The developer key is used to sign updates and other important Sia- // related information. developerKey = `-----BEGIN PUBLIC KEY----- MIIEIjANBgkqhkiG9w0BAQEFAAOCBA8AMIIECgKCBAEAsoQHOEU6s/EqMDtw5HvA YPTUaBgnviMFbG3bMsRqSCD8ug4XJYh+Ik6WP0xgq+OPDehPiaXK8ghAtBiW1EJK mBRwlABXAzREZg8wRfG4l8Zj6ckAPJOgLn0jobXy6/SCQ+jZSWh4Y8DYr+LA3Mn3 EOga7Jvhpc3fTZ232GBGJ1BobuNfRfYmwxSphv+T4vzIA3JUjVfa8pYZGIjh5XbJ 5M8Lef0Xa9eqr6lYm5kQoOIXeOW56ImqI2BKg/I9NGw9phSPbwaFfy1V2kfHp5Xy DtKnyj/O9zDi+qUKjoIivnEoV+3DkioHUWv7Fpf7yx/9cPyckwvaBsTd9Cfp4uBx qJ5Qyv69VZQiD6DikNwgzjGbIjiLwfTObhInKZUoYl48yzgkR80ja5TW0SoidNvO 4WTbWcLolOl522VarTs7wlgbq0Ad7yrNVnHzo447v2iT20ILH2oeAcZqvpcvRmTl U6uKoaVmBH3D3Y19dPluOjK53BrqfQ5L8RFli2wEJktPsi5fUTd4UI9BgnUieuDz S7h/VH9bv9ZVvyjpu/uVjdvaikT3zbIy9J6wS6uE5qPLPhI4B9HgbrQ03muDGpql gZrMiL3GdYrBiqpIbaWHfM0eMWEK3ZScUdtCgUXMMrkvaUJ4g9wEgbONFVVOMIV+ YubIuzBFqug6WyxN/EAM/6Fss832AwVPcYM0NDTVGVdVplLMdN8YNjrYuaPngBCG e8QaTWtHzLujyBIkVdAHqfkRS65jp7JLLMx7jUA74/E/v+0cNew3Y1p2gt3iQH8t w93xn9IPUfQympc4h3KerP/Yn6P/qAh68jQkOiMMS+VbCq/BOn8Q3GbR+8rQ8dmk qVoGA7XrPQ6bymKBTghk2Ek+ZjxrpAoj0xYoYyzWf0kuxeOT8kAjlLLmfQ8pm75S QHLqH49FyfeETIU02rkw2oMOX/EYdJzZukHuouwbpKSElpRx+xTnaSemMJo+U7oX xVjma3Zynh9w12abnFWkZKtrxwXv7FCSzb0UZmMWUqWzCS03Rrlur21jp4q2Wl71 Vt92xe5YbC/jbh386F1e/qGq6p+D1AmBynIpp/HE6fPsc9LWgJDDkREZcp7hthGW IdYPeP3CesFHnsZMueZRib0i7lNUkBSRneO1y/C9poNv1vOeTCNEE0jvhp/XOJuc yCQtrUSNALsvm7F+bnwP2F7K34k7MOlOgnTGqCqW+9WwBcjR44B0HI+YERCcRmJ8 krBuVo9OBMV0cYBWpjo3UI9j3lHESCYhLnCz7SPap7C1yORc2ydJh+qjKqdLBHom t+JydcdJLbIG+kb3jB9QIIu5A4TlSGlHV6ewtxIWLS1473jEkITiVTt0Y5k+VLfW bwIDAQAB -----END PUBLIC KEY-----` ) // version returns the version number of a non-LTS release. This assumes that // tag names will always be of the form "vX.Y.Z". func (r *githubRelease) version() string { return strings.TrimPrefix(r.TagName, "v") } // byVersion sorts non-LTS releases by their version string, placing the highest // version number first. type byVersion []githubRelease func (rs byVersion) Len() int { return len(rs) } func (rs byVersion) Swap(i, j int) { rs[i], rs[j] = rs[j], rs[i] } func (rs byVersion) Less(i, j int) bool { // we want the higher version number to reported as "less" so that it is // placed first in the slice return build.VersionCmp(rs[i].version(), rs[j].version()) >= 0 } // latestRelease returns the latest non-LTS release, given a set of arbitrary // releases. func latestRelease(releases []githubRelease) (githubRelease, error) { // filter the releases to exclude LTS releases nonLTS := releases[:0] for _, r := range releases { if !strings.Contains(r.TagName, "lts") && build.IsVersion(r.version()) { nonLTS = append(nonLTS, r) } } // sort by version sort.Sort(byVersion(nonLTS)) // return the latest release if len(nonLTS) == 0 { return githubRelease{}, errEmptyUpdateResponse } return nonLTS[0], nil } // fetchLatestRelease returns metadata about the most recent non-LTS GitHub // release. func fetchLatestRelease() (githubRelease, error) { req, err := http.NewRequest("GET", "https://api.github.com/repos/NebulousLabs/Sia/releases", nil) if err != nil { return githubRelease{}, err } req.Header.Set("Accept", "application/vnd.github.v3+json") resp, err := http.DefaultClient.Do(req) if err != nil { return githubRelease{}, err } defer resp.Body.Close() var releases []githubRelease err = json.NewDecoder(resp.Body).Decode(&releases) if err != nil { return githubRelease{}, err } return latestRelease(releases) } // updateToRelease updates siad and siac to the release specified. siac is // assumed to be in the same folder as siad. func updateToRelease(release githubRelease) error { updateOpts := update.Options{ Verifier: update.NewRSAVerifier(), } err := updateOpts.SetPublicKeyPEM([]byte(developerKey)) if err != nil { // should never happen return err } binaryFolder, err := osext.ExecutableFolder() if err != nil { return err } // construct release filename releaseName := fmt.Sprintf("Sia-%s-%s-%s.zip", release.TagName, runtime.GOOS, runtime.GOARCH) // find release var downloadURL string for _, asset := range release.Assets { if asset.Name == releaseName { downloadURL = asset.DownloadURL break } } if downloadURL == "" { return errors.New("couldn't find download URL for " + releaseName) } // download release archive resp, err := http.Get(downloadURL) if err != nil { return err } // release should be small enough to store in memory (<10 MiB); use // LimitReader to ensure we don't download more than 32 MiB content, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<25)) resp.Body.Close() if err != nil { return err } r := bytes.NewReader(content) z, err := zip.NewReader(r, r.Size()) if err != nil { return err } // process zip, finding siad/siac binaries and signatures for _, binary := range []string{"siad", "siac"} { var binData io.ReadCloser var signature []byte var binaryName string // needed for TargetPath below for _, zf := range z.File { switch base := path.Base(zf.Name); base { case binary, binary + ".exe": binaryName = base binData, err = zf.Open() if err != nil { return err } defer binData.Close() case binary + ".sig", binary + ".exe.sig": sigFile, err := zf.Open() if err != nil { return err } defer sigFile.Close() signature, err = ioutil.ReadAll(sigFile) if err != nil { return err } } } if binData == nil { return errors.New("could not find " + binary + " binary") } else if signature == nil { return errors.New("could not find " + binary + " signature") } // apply update updateOpts.Signature = signature updateOpts.TargetMode = 0775 // executable updateOpts.TargetPath = filepath.Join(binaryFolder, binaryName) err = update.Apply(binData, updateOpts) if err != nil { return err } } return nil } // daemonUpdateHandlerGET handles the API call that checks for an update. func (srv *Server) daemonUpdateHandlerGET(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) { release, err := fetchLatestRelease() if err != nil { api.WriteError(w, api.Error{Message: "Failed to fetch latest release: " + err.Error()}, http.StatusInternalServerError) return } latestVersion := release.TagName[1:] // delete leading 'v' api.WriteJSON(w, UpdateInfo{ Available: build.VersionCmp(latestVersion, build.Version) > 0, Version: latestVersion, }) } // daemonUpdateHandlerPOST handles the API call that updates siad and siac. // There is no safeguard to prevent "updating" to the same release, so callers // should always check the latest version via daemonUpdateHandlerGET first. // TODO: add support for specifying version to update to. func (srv *Server) daemonUpdateHandlerPOST(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) { release, err := fetchLatestRelease() if err != nil { api.WriteError(w, api.Error{Message: "Failed to fetch latest release: " + err.Error()}, http.StatusInternalServerError) return } err = updateToRelease(release) if err != nil { if rerr := update.RollbackError(err); rerr != nil { api.WriteError(w, api.Error{Message: "Serious error: Failed to rollback from bad update: " + rerr.Error()}, http.StatusInternalServerError) } else { api.WriteError(w, api.Error{Message: "Failed to apply update: " + err.Error()}, http.StatusInternalServerError) } return } api.WriteSuccess(w) } // debugConstantsHandler prints a json file containing all of the constants. func (srv *Server) daemonConstantsHandler(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) { sc := SiaConstants{ BlockFrequency: types.BlockFrequency, BlockSizeLimit: types.BlockSizeLimit, ExtremeFutureThreshold: types.ExtremeFutureThreshold, FutureThreshold: types.FutureThreshold, GenesisTimestamp: types.GenesisTimestamp, MaturityDelay: types.MaturityDelay, MedianTimestampWindow: types.MedianTimestampWindow, SiafundCount: types.SiafundCount, SiafundPortion: types.SiafundPortion, TargetWindow: types.TargetWindow, InitialCoinbase: types.InitialCoinbase, MinimumCoinbase: types.MinimumCoinbase, RootTarget: types.RootTarget, RootDepth: types.RootDepth, MaxAdjustmentUp: types.MaxAdjustmentUp, MaxAdjustmentDown: types.MaxAdjustmentDown, SiacoinPrecision: types.SiacoinPrecision, } api.WriteJSON(w, sc) } // daemonVersionHandler handles the API call that requests the daemon's version. func (srv *Server) daemonVersionHandler(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) { api.WriteJSON(w, DaemonVersion{Version: build.Version}) } // daemonStopHandler handles the API call to stop the daemon cleanly. func (srv *Server) daemonStopHandler(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) { // can't write after we stop the server, so lie a bit. api.WriteSuccess(w) // need to flush the response before shutting down the server f, ok := w.(http.Flusher) if !ok { panic("Server does not support flushing") } f.Flush() if err := srv.Close(); err != nil { build.Critical(err) } } func (srv *Server) daemonHandler(password string) http.Handler { router := httprouter.New() router.GET("/daemon/constants", srv.daemonConstantsHandler) router.GET("/daemon/version", srv.daemonVersionHandler) router.GET("/daemon/update", srv.daemonUpdateHandlerGET) router.POST("/daemon/update", srv.daemonUpdateHandlerPOST) router.GET("/daemon/stop", api.RequirePassword(srv.daemonStopHandler, password)) return router } // NewServer creates a new net.http server listening on bindAddr. Only the // /daemon/ routes are registered by this func, additional routes can be // registered later by calling serv.mux.Handle. func NewServer(bindAddr, requiredUserAgent, requiredPassword string) (*Server, error) { // Create the listener for the server l, err := net.Listen("tcp", bindAddr) if err != nil { return nil, err } // Create the Server mux := http.NewServeMux() srv := &Server{ mux: mux, listener: l, httpServer: &http.Server{ Handler: mux, // set reasonable timeout windows for requests, to prevent the Sia API // server from leaking file descriptors due to slow, disappearing, or // unreliable API clients. // ReadTimeout defines the maximum amount of time allowed to fully read // the request body. This timeout is applied to every handler in the // server. ReadTimeout: time.Minute * 5, // ReadHeaderTimeout defines the amount of time allowed to fully read the // request headers. ReadHeaderTimeout: time.Minute * 2, // IdleTimeout defines the maximum duration a HTTP Keep-Alive connection // the API is kept open with no activity before closing. IdleTimeout: time.Minute * 5, }, } // Register siad routes srv.mux.Handle("/daemon/", api.RequireUserAgent(srv.daemonHandler(requiredPassword), requiredUserAgent)) return srv, nil } func (srv *Server) Serve() error { // The server will run until an error is encountered or the listener is // closed, via either the Close method or the signal handling above. // Closing the listener will result in the benign error handled below. err := srv.httpServer.Serve(srv.listener) if err != nil && !strings.HasSuffix(err.Error(), "use of closed network connection") { return err } return nil } // Close closes the Server's listener, causing the HTTP server to shut down. func (srv *Server) Close() error { // Close the listener, which will cause Server.Serve() to return. if err := srv.listener.Close(); err != nil { return err } return nil } Sia-1.3.0/siad/server_test.go000066400000000000000000000027021313565667000160600ustar00rootroot00000000000000package main import "testing" // TestLatestRelease tests that the latestRelease function properly processes a // set of GitHub releases, returning the release with the highest version // number. func TestLatestRelease(t *testing.T) { tests := []struct { releases []githubRelease expectedTag string }{ { releases: []githubRelease{ {TagName: "lts-v1.0.4"}, {TagName: "v3.0.7"}, {TagName: "lts-v2.0.0"}, }, expectedTag: "v3.0.7", }, { releases: []githubRelease{ {TagName: "lts-v1.0.4"}, {TagName: "v3.0.7"}, {TagName: "v5.2.2"}, }, expectedTag: "v5.2.2", }, { releases: []githubRelease{ {TagName: "lts-v1.0.4"}, {TagName: "lts-v1.0.7"}, {TagName: "lts-v1.0.5"}, }, expectedTag: "", // no non-LTS versions }, { releases: []githubRelease{ {TagName: "v1.0.4"}, {TagName: "v1.0.7"}, {TagName: "v1.0.5"}, }, expectedTag: "v1.0.7", }, { releases: []githubRelease{ {TagName: "v1.0.4"}, {TagName: "v1.0.4.1"}, {TagName: "v1.0.4-patch1"}, }, expectedTag: "v1.0.4.1", // -patch is invalid }, { releases: []githubRelease{ {TagName: "abc"}, {TagName: "def"}, {TagName: "ghi"}, }, expectedTag: "", // invalid version strings }, } for i, test := range tests { r, _ := latestRelease(test.releases) if r.TagName != test.expectedTag { t.Errorf("test %v failed: expected %q, got %q", i, test.expectedTag, r.TagName) } } } Sia-1.3.0/snap/000077500000000000000000000000001313565667000132045ustar00rootroot00000000000000Sia-1.3.0/snap/snapcraft.yaml000066400000000000000000000014271313565667000160550ustar00rootroot00000000000000name: sia version: git summary: Blockchain-based marketplace for file storage description: | Sia is a new decentralized cloud storage platform that radically alters the landscape of cloud storage. By leveraging smart contracts, client-side encryption, and sophisticated redundancy (via Reed-Solomon codes), Sia allows users to safely store their data with hosts that they do not know or trust. grade: devel # must be 'stable' to release into candidate/stable channels confinement: strict apps: daemon: command: siad plugs: [network, network-bind] aliases: [siad] client: command: siac plugs: [network] aliases: [siac] parts: sia: source: . plugin: go go-importpath: github.com/NebulousLabs/Sia after: [go] go: source-tag: go1.8 Sia-1.3.0/sync/000077500000000000000000000000001313565667000132175ustar00rootroot00000000000000Sia-1.3.0/sync/lock.go000066400000000000000000000103471313565667000145030ustar00rootroot00000000000000package sync import ( "fmt" "os" "runtime" "sync" "time" ) // RWMutex provides locking functions, and an ability to detect and remove // deadlocks. type RWMutex struct { openLocks map[int]lockInfo openLocksCounter int openLocksMutex sync.Mutex callDepth int maxLockTime time.Duration mu sync.RWMutex } // lockInfo contains information about when and how a lock call was made. type lockInfo struct { // When the lock was called. lockTime time.Time // Whether it was a RLock or a Lock. read bool // Call stack of the caller. callingFiles []string callingLines []int } // New takes a maxLockTime and returns a lock. The lock will never stay locked // for more than maxLockTime, instead printing an error and unlocking after // maxLockTime has passed. func New(maxLockTime time.Duration, callDepth int) *RWMutex { rwm := &RWMutex{ openLocks: make(map[int]lockInfo), maxLockTime: maxLockTime, callDepth: callDepth, } go rwm.threadedDeadlockFinder() return rwm } // threadedDeadlockFinder occasionally freezes the mutexes and scans all open mutexes, // reporting any that have exceeded their time limit. func (rwm *RWMutex) threadedDeadlockFinder() { for { rwm.openLocksMutex.Lock() for id, info := range rwm.openLocks { // Check if the lock has been held for longer than 'maxLockTime'. if time.Now().Sub(info.lockTime) > rwm.maxLockTime { str := fmt.Sprintf("A lock was held for too long, id '%v'. Call stack:\n", id) for i := 0; i <= rwm.callDepth; i++ { str += fmt.Sprintf("\tFile: '%v:%v'\n", info.callingFiles[i], info.callingLines[i]) } os.Stderr.WriteString(str) os.Stderr.Sync() // Undo the deadlock and delete the entry from the map. if info.read { rwm.mu.RUnlock() } else { rwm.mu.Unlock() } delete(rwm.openLocks, id) } } rwm.openLocksMutex.Unlock() time.Sleep(rwm.maxLockTime) } } // safeLock is the generic function for doing safe locking. If the read flag is // set, then a readlock will be used, otherwise a lock will be used. func (rwm *RWMutex) safeLock(read bool) int { // Get the call stack. var li lockInfo li.read = read li.callingFiles = make([]string, rwm.callDepth+1) li.callingLines = make([]int, rwm.callDepth+1) for i := 0; i <= rwm.callDepth; i++ { _, li.callingFiles[i], li.callingLines[i], _ = runtime.Caller(2 + i) } // Lock the mutex. if read { rwm.mu.RLock() } else { rwm.mu.Lock() } // Safely register that a lock has been triggered. rwm.openLocksMutex.Lock() li.lockTime = time.Now() id := rwm.openLocksCounter rwm.openLocks[id] = li rwm.openLocksCounter++ rwm.openLocksMutex.Unlock() return id } // safeUnlock is the generic function for doing safe unlocking. If the lock had // to be removed because a deadlock was detected, an error is printed. func (rwm *RWMutex) safeUnlock(read bool, id int) { rwm.openLocksMutex.Lock() defer rwm.openLocksMutex.Unlock() // Check if a deadlock has been detected and fixed manually. _, exists := rwm.openLocks[id] if !exists { // Get the call stack. callingFiles := make([]string, rwm.callDepth+1) callingLines := make([]int, rwm.callDepth+1) for i := 0; i <= rwm.callDepth; i++ { _, callingFiles[i], callingLines[i], _ = runtime.Caller(2 + i) } fmt.Printf("A lock was held until deadlock, subsequent call to unlock failed. id '%v'. Call stack:\n", id) for i := 0; i <= rwm.callDepth; i++ { fmt.Printf("\tFile: '%v:%v'\n", callingFiles[i], callingLines[i]) } return } // Remove the lock and delete the entry from the map. if read { rwm.mu.RUnlock() } else { rwm.mu.Unlock() } delete(rwm.openLocks, id) } // RLock will read lock the RWMutex. The return value must be used as input // when calling RUnlock. func (rwm *RWMutex) RLock() int { return rwm.safeLock(true) } // RUnlock will read unlock the RWMutex. The return value of calling RLock must // be used as input. func (rwm *RWMutex) RUnlock(id int) { rwm.safeUnlock(true, id) } // Lock will lock the RWMutex. The return value must be used as input when // calling RUnlock. func (rwm *RWMutex) Lock() int { return rwm.safeLock(false) } // Unlock will unlock the RWMutex. The return value of calling Lock must be // used as input. func (rwm *RWMutex) Unlock(id int) { rwm.safeUnlock(false, id) } Sia-1.3.0/sync/lock_test.go000066400000000000000000000075371313565667000155510ustar00rootroot00000000000000package sync import ( "runtime" "sync" "testing" "time" ) // TestLowThreadLocking checks that locks are functional in the safelock // mechanism, only 2 threads are used to try and trigger a race condition. func TestLowThreadLocking(t *testing.T) { if testing.Short() { t.SkipNow() } // Create a value and lock a mutex to protect the value. value := 0 safeLock := New(time.Second, 1) outerID := safeLock.Lock() go func() { // Lock a mutex and read the value. Value should be 1, since the old // mutex was not released innerID := safeLock.Lock() defer safeLock.Unlock(innerID) if value != 1 { t.Fatal("Lock was grabbed incorrectly") } }() // After spawning the other thread, increment value. value = 1 safeLock.Unlock(outerID) } // TestHighThreadLocking tries to trigger race conditions while using lots of // threads and sleep tactics. func TestHighThreadLocking(t *testing.T) { if testing.Short() { t.SkipNow() } // Try to trigger a race condition by using lots of threads. for i := 0; i < 50; i++ { go func() { // Create a value and lock a mutex to protect the value. value := 0 safeLock := New(time.Second, 1) outerID := safeLock.Lock() go func() { // Lock a mutex and read the value. Value should be 1, since // the old mutex was not released innerID := safeLock.Lock() defer safeLock.Unlock(innerID) if value != 1 { t.Fatal("Lock was grabbed incorrectly") } }() // Some sleeps and a call to gosched to try and give the thread // control to the spawned thread. time.Sleep(time.Millisecond * 25) runtime.Gosched() time.Sleep(time.Millisecond * 25) value = 1 safeLock.Unlock(outerID) }() } } // TestReadLocking checks that the readlocks can overlap without interference // from a writelock. func TestReadLocking(t *testing.T) { if testing.Short() { t.SkipNow() } startTime := time.Now().Unix() value := 0 safeLock := New(time.Second, 1) writeID := safeLock.Lock() readThreads := 100 var wg sync.WaitGroup wg.Add(readThreads) for i := 0; i < readThreads; i++ { go func() { readID := safeLock.RLock() defer safeLock.RUnlock(readID) if value != 1 { t.Error("reading is not happening correctly") } // Sleep 250 milliseconds after grabbing the readlock. Because // there are a bunch of threads, if the readlocks are not grabbing // the lock in parallel the test will take a long time. time.Sleep(time.Millisecond * 250) wg.Done() }() } value = 1 // A combination of sleep and gosched to give priority to the other // threads. time.Sleep(time.Millisecond * 100) runtime.Gosched() time.Sleep(time.Millisecond * 100) safeLock.Unlock(writeID) // Wait for all of the threads to finish sleeping. wg.Wait() // Check that the whole test took under 3 seconds. If the readlocks were // efficiently being grabbed in parallel, the test should be subtantially // less than 3 seconds. if time.Now().Unix()-startTime > 3 { t.Error("test took too long to complete") } } // TestLockSafety checks that a safelock correctly unwinds a deadlock. func TestLockSafety(t *testing.T) { if testing.Short() { t.SkipNow() } startTime := time.Now().Unix() safeLock := New(time.Millisecond*25, 1) // Trigger a deadlock by writelocking twice. The deadlock detector should // resolve the issue. outerWrite := safeLock.Lock() innerWrite := safeLock.Lock() safeLock.Unlock(outerWrite) safeLock.Unlock(innerWrite) // Trigger a deadlock by readlocking and then writelocking. The deadlock // detector should resolve the issue. readID := safeLock.RLock() writeID := safeLock.Lock() safeLock.RUnlock(readID) safeLock.Unlock(writeID) // Check that the whole test took under 3 seconds. If the deadlock detector // is working, the time elapsed should be much less than 3 seconds. if time.Now().Unix()-startTime > 2 { t.Error("test took too long to complete") } } Sia-1.3.0/sync/threadgroup.go000066400000000000000000000105211313565667000160710ustar00rootroot00000000000000package sync import ( "errors" "sync" ) // ErrStopped is returned by ThreadGroup methods if Stop has already been // called. var ErrStopped = errors.New("ThreadGroup already stopped") // A ThreadGroup is a one-time-use object to manage the life cycle of a group // of threads. It is a sync.WaitGroup that provides functions for coordinating // actions and shutting down threads. After Stop() is called, the thread group // is no longer useful. // // It is safe to call Add(), Done(), and Stop() concurrently, however it is not // safe to nest calls to Add(). A simple example of a nested call to add would // be: // tg.Add() // tg.Add() // tg.Done() // tg.Done() type ThreadGroup struct { onStopFns []func() afterStopFns []func() once sync.Once stopChan chan struct{} bmu sync.Mutex // Ensures blocking between calls to 'Add', 'Flush', and 'Stop' mu sync.Mutex // Protects the 'onStopFns' and 'afterStopFns' variable wg sync.WaitGroup } // init creates the stop channel for the thread group. func (tg *ThreadGroup) init() { tg.stopChan = make(chan struct{}) } // isStopped will return true if Stop() has been called on the thread group. func (tg *ThreadGroup) isStopped() bool { tg.once.Do(tg.init) select { case <-tg.stopChan: return true default: return false } } // Add increments the thread group counter. func (tg *ThreadGroup) Add() error { tg.bmu.Lock() defer tg.bmu.Unlock() if tg.isStopped() { return ErrStopped } tg.wg.Add(1) return nil } // AfterStop ensures that a function will be called after Stop() has been // called and after all running routines have called Done(). The functions will // be called in reverse order to how they were added, similar to defer. If // Stop() has already been called, the input function will be called // immediately. // // The primary use of AfterStop is to allow code that opens and closes // resources to be positioned next to each other. The purpose is similar to // `defer`, except for resources that outlive the function which creates them. func (tg *ThreadGroup) AfterStop(fn func()) { tg.mu.Lock() defer tg.mu.Unlock() if tg.isStopped() { fn() return } tg.afterStopFns = append(tg.afterStopFns, fn) } // OnStop ensures that a function will be called after Stop() has been called, // and before blocking until all running routines have called Done(). It is // safe to use OnStop to coordinate the closing of long-running threads. The // OnStop functions will be called in the reverse order in which they were // added, similar to defer. If Stop() has already been called, the input // function will be called immediately. func (tg *ThreadGroup) OnStop(fn func()) { tg.mu.Lock() defer tg.mu.Unlock() if tg.isStopped() { fn() return } tg.onStopFns = append(tg.onStopFns, fn) } // Done decrements the thread group counter. func (tg *ThreadGroup) Done() { tg.wg.Done() } // Flush will block all calls to 'tg.Add' until all current routines have // called 'tg.Done'. This in effect 'flushes' the module, letting it complete // any tasks that are open before taking on new ones. func (tg *ThreadGroup) Flush() error { tg.bmu.Lock() defer tg.bmu.Unlock() if tg.isStopped() { return ErrStopped } tg.wg.Wait() return nil } // Stop will close the stop channel of the thread group, then call all 'OnStop' // functions in reverse order, then will wait until the thread group counter // reaches zero, then will call all of the 'AfterStop' functions in reverse // order. After Stop is called, most actions will return ErrStopped. func (tg *ThreadGroup) Stop() error { // Establish that Stop has been called. tg.bmu.Lock() defer tg.bmu.Unlock() if tg.isStopped() { return ErrStopped } close(tg.stopChan) tg.mu.Lock() for i := len(tg.onStopFns) - 1; i >= 0; i-- { tg.onStopFns[i]() } tg.onStopFns = nil tg.mu.Unlock() tg.wg.Wait() // After waiting for all resources to release the thread group, iterate // through the stop functions and call them in reverse oreder. tg.mu.Lock() for i := len(tg.afterStopFns) - 1; i >= 0; i-- { tg.afterStopFns[i]() } tg.afterStopFns = nil tg.mu.Unlock() return nil } // StopChan provides read-only access to the ThreadGroup's stopChan. Callers // should select on StopChan in order to interrupt long-running reads (such as // time.After). func (tg *ThreadGroup) StopChan() <-chan struct{} { tg.once.Do(tg.init) return tg.stopChan } Sia-1.3.0/sync/threadgroup_test.go000066400000000000000000000262371313565667000171430ustar00rootroot00000000000000package sync import ( "net" "os" "path/filepath" "sync" "testing" "time" "github.com/NebulousLabs/Sia/build" ) // TestThreadGroupStopEarly tests that a thread group can correctly interrupt // an ongoing process. func TestThreadGroupStopEarly(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() var tg ThreadGroup for i := 0; i < 10; i++ { err := tg.Add() if err != nil { t.Fatal(err) } go func() { defer tg.Done() select { case <-time.After(1 * time.Second): case <-tg.StopChan(): } }() } start := time.Now() err := tg.Stop() elapsed := time.Since(start) if err != nil { t.Fatal(err) } else if elapsed > 100*time.Millisecond { t.Fatal("Stop did not interrupt goroutines") } } // TestThreadGroupWait tests that a thread group will correctly wait for // existing processes to halt. func TestThreadGroupWait(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() var tg ThreadGroup for i := 0; i < 10; i++ { err := tg.Add() if err != nil { t.Fatal(err) } go func() { defer tg.Done() time.Sleep(time.Second) }() } start := time.Now() err := tg.Stop() elapsed := time.Since(start) if err != nil { t.Fatal(err) } else if elapsed < time.Millisecond*950 { t.Fatal("Stop did not wait for goroutines:", elapsed) } } // TestThreadGroupStop tests the behavior of a ThreadGroup after Stop has been // called. func TestThreadGroupStop(t *testing.T) { // Create a thread group and stop it. var tg ThreadGroup // Create an array to track the order of execution for OnStop and AfterStop // calls. var stopCalls []int // isStopped should return false if tg.isStopped() { t.Error("isStopped returns true on unstopped ThreadGroup") } // The cannel provided by StopChan should be open. select { case <-tg.StopChan(): t.Error("stop chan appears to be closed") default: } // OnStop and AfterStop should queue their functions, but not call them. // 'Add' and 'Done' are setup around the OnStop functions, to make sure // that the OnStop functions are called before waiting for all calls to // 'Done' to come through. // // Note: the practice of calling Add outside of OnStop and Done inside of // OnStop is a bad one - any call to tg.Flush() will cause a deadlock // because the stop functions will not be called but tg.Flush will be // waiting for the thread group counter to reach zero. err := tg.Add() if err != nil { t.Fatal(err) } err = tg.Add() if err != nil { t.Fatal(err) } tg.OnStop(func() { tg.Done() stopCalls = append(stopCalls, 1) }) tg.OnStop(func() { tg.Done() stopCalls = append(stopCalls, 2) }) tg.AfterStop(func() { stopCalls = append(stopCalls, 10) }) tg.AfterStop(func() { stopCalls = append(stopCalls, 20) }) // None of the stop calls should have been called yet. if len(stopCalls) != 0 { t.Fatal("Stop calls were called too early") } // Stop the thread group. err = tg.Stop() if err != nil { t.Fatal(err) } // isStopped should return true. if !tg.isStopped() { t.Error("isStopped returns false on stopped ThreadGroup") } // The cannel provided by StopChan should be closed. select { case <-tg.StopChan(): default: t.Error("stop chan appears to be closed") } // The OnStop calls should have been called first, in reverse order, and // the AfterStop calls should have been called second, in reverse order. if len(stopCalls) != 4 { t.Fatal("Stop did not call the stopping functions correctly") } if stopCalls[0] != 2 { t.Error("Stop called the stopping functions in the wrong order") } if stopCalls[1] != 1 { t.Error("Stop called the stopping functions in the wrong order") } if stopCalls[2] != 20 { t.Error("Stop called the stopping functions in the wrong order") } if stopCalls[3] != 10 { t.Error("Stop called the stopping functions in the wrong order") } // Add and Stop should return errors. err = tg.Add() if err != ErrStopped { t.Error("expected ErrStopped, got", err) } err = tg.Stop() if err != ErrStopped { t.Error("expected ErrStopped, got", err) } // OnStop and AfterStop should call their functions immediately now that // the thread group has stopped. onStopCalled := false tg.OnStop(func() { onStopCalled = true }) if !onStopCalled { t.Error("OnStop function not called immediately despite the thread group being closed already.") } afterStopCalled := false tg.AfterStop(func() { afterStopCalled = true }) if !afterStopCalled { t.Error("AfterStop function not called immediately despite the thread group being closed already.") } } // TestThreadGroupConcurrentAdd tests that Add can be called concurrently with Stop. func TestThreadGroupConcurrentAdd(t *testing.T) { if testing.Short() { t.SkipNow() } var tg ThreadGroup for i := 0; i < 10; i++ { go func() { err := tg.Add() if err != nil { return } defer tg.Done() select { case <-time.After(1 * time.Second): case <-tg.StopChan(): } }() } time.Sleep(10 * time.Millisecond) // wait for at least one Add err := tg.Stop() if err != nil { t.Fatal(err) } } // TestThreadGroupOnce tests that a zero-valued ThreadGroup's stopChan is // properly initialized. func TestThreadGroupOnce(t *testing.T) { tg := new(ThreadGroup) if tg.stopChan != nil { t.Error("expected nil stopChan") } // these methods should cause stopChan to be initialized tg.StopChan() if tg.stopChan == nil { t.Error("stopChan should have been initialized by StopChan") } tg = new(ThreadGroup) tg.isStopped() if tg.stopChan == nil { t.Error("stopChan should have been initialized by isStopped") } tg = new(ThreadGroup) tg.Add() if tg.stopChan == nil { t.Error("stopChan should have been initialized by Add") } tg = new(ThreadGroup) tg.Stop() if tg.stopChan == nil { t.Error("stopChan should have been initialized by Stop") } } // TestThreadGroupOnStop tests that Stop calls functions registered with // OnStop. func TestThreadGroupOnStop(t *testing.T) { if testing.Short() { t.SkipNow() } l, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatal(err) } // create ThreadGroup and register the closer var tg ThreadGroup tg.OnStop(func() { l.Close() }) // send on channel when listener is closed var closed bool tg.Add() go func() { defer tg.Done() _, err := l.Accept() closed = err != nil }() tg.Stop() if !closed { t.Fatal("Stop did not close listener") } } // TestThreadGroupRace tests that calling ThreadGroup methods concurrently // does not trigger the race detector. func TestThreadGroupRace(t *testing.T) { var tg ThreadGroup go tg.StopChan() go func() { if tg.Add() == nil { tg.Done() } }() err := tg.Stop() if err != nil { t.Fatal(err) } } // TestThreadGroupCloseAfterStop checks that an AfterStop function is // correctly called after the thread is stopped. func TestThreadGroupClosedAfterStop(t *testing.T) { var tg ThreadGroup var closed bool tg.AfterStop(func() { closed = true }) if closed { t.Fatal("close function should not have been called yet") } if err := tg.Stop(); err != nil { t.Fatal(err) } if !closed { t.Fatal("close function should have been called") } // Stop has already been called, so the close function should be called // immediately closed = false tg.AfterStop(func() { closed = true }) if !closed { t.Fatal("close function should have been called immediately") } } // TestThreadGroupSiaExample tries to use a thread group as it might be // expected to be used by a module of Sia. func TestThreadGroupSiaExample(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() testDir := build.TempDir("sync", t.Name()) err := os.MkdirAll(testDir, 0700) if err != nil { t.Fatal(err) } var tg ThreadGroup // Open an example file. The file is expected to be used throughout the // lifetime of the module, and should not be closed until 'AfterStop' is // called. fileClosed := false file, err := os.Create(filepath.Join(testDir, "exampleFile.txt")) if err != nil { t.Fatal(err) } tg.AfterStop(func() { fileClosed = true err := file.Close() if err != nil { t.Fatal(err) } }) // Open a listener. The listener and handler thread should be closed before // the file is closed. listenerCleanedUp := false listener, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatal(err) } // Open a thread to accept calls from the listener. handlerFinishedChan := make(chan struct{}) go func() { for { _, err := listener.Accept() if err != nil { break } } handlerFinishedChan <- struct{}{} }() tg.OnStop(func() { err := listener.Close() if err != nil { t.Fatal(err) } <-handlerFinishedChan if fileClosed { t.Error("file should be open while the listener is shutting down") } listenerCleanedUp = true }) // Create a thread that does some stuff which takes time, and then closes. // Use Flush to clear out the process without closing the resources. threadFinished := false err = tg.Add() if err != nil { t.Fatal(err) } go func() { time.Sleep(time.Second) threadFinished = true tg.Done() }() tg.Flush() if !threadFinished { t.Error("call to Flush should have allowed the working thread to finish") } if listenerCleanedUp || fileClosed { t.Error("call to Flush resulted in permanent resources being closed") } // Create a thread that does some stuff which takes time, and then closes. // Use Stop to wait for the threead to finish and then check that all // resources have closed. threadFinished2 := false err = tg.Add() if err != nil { t.Fatal(err) } go func() { time.Sleep(time.Second) threadFinished2 = true tg.Done() }() tg.Stop() if !threadFinished2 || !listenerCleanedUp || !fileClosed { t.Error("stop did not block until all running resources had closed") } } // TestAddOnStop checks that you can safely call OnStop from under the // protection of an Add call. func TestAddOnStop(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() var tg ThreadGroup var data int addChan := make(chan struct{}) stopChan := make(chan struct{}) tg.OnStop(func() { close(stopChan) }) go func() { err := tg.Add() if err != nil { t.Fatal(err) } close(addChan) // Wait for the call to 'Stop' to be called in the parent thread, and // then queue a bunch of 'OnStop' and 'AfterStop' functions before // calling 'Done'. <-stopChan for i := 0; i < 10; i++ { tg.OnStop(func() { data++ }) tg.AfterStop(func() { data++ }) } tg.Done() }() // Wait for 'Add' to be called in the above thread, to guarantee that // OnStop and AfterStop will be called after 'Add' and 'Stop' have been // called together. <-addChan err := tg.Stop() if err != nil { t.Fatal(err) } if data != 20 { t.Error("20 calls were made to increment data, but value is", data) } } // BenchmarkThreadGroup times how long it takes to add a ton of threads and // trigger goroutines that call Done. func BenchmarkThreadGroup(b *testing.B) { var tg ThreadGroup for i := 0; i < b.N; i++ { tg.Add() go tg.Done() } tg.Stop() } // BenchmarkWaitGroup times how long it takes to add a ton of threads to a wait // group and trigger goroutines that call Done. func BenchmarkWaitGroup(b *testing.B) { var wg sync.WaitGroup for i := 0; i < b.N; i++ { wg.Add(1) go wg.Done() } wg.Wait() } Sia-1.3.0/sync/trymutex.go000066400000000000000000000024451313565667000154540ustar00rootroot00000000000000package sync import ( "sync" "time" ) // TryMutex provides a mutex that allows you to attempt to grab a mutex, and // then fail if the mutex is either not grabbed immediately or is not grabbed // by the specified duration. type TryMutex struct { once sync.Once lock chan struct{} } // init will create the channel that manages the lock. func (tm *TryMutex) init() { tm.lock = make(chan struct{}, 1) tm.lock <- struct{}{} } // Lock grabs a lock on the TryMutex, blocking until the lock is obtained. func (tm *TryMutex) Lock() { tm.once.Do(tm.init) <-tm.lock } // TryLock grabs a lock on the TryMutex, returning an error if the mutex is // already locked. func (tm *TryMutex) TryLock() bool { tm.once.Do(tm.init) select { case <-tm.lock: return true default: return false } } // TryLockTimed grabs a lock on the TryMutex, returning an error if the mutex // is not grabbed after the provided duration. func (tm *TryMutex) TryLockTimed(t time.Duration) bool { tm.once.Do(tm.init) select { case <-tm.lock: return true case <-time.After(t): return false } } // Unlock releases a lock on the TryMutex. func (tm *TryMutex) Unlock() { tm.once.Do(tm.init) select { case tm.lock <- struct{}{}: // Success - do nothing. default: panic("unlock called when TryMutex is not locked") } } Sia-1.3.0/sync/trymutex_test.go000066400000000000000000000075151313565667000165160ustar00rootroot00000000000000package sync import ( "sync" "testing" "time" ) // TestTryMutexBasicMutex verifies that Lock and Unlock work the same as a // normal mutex would. func TestTryMutexBasicMutex(t *testing.T) { // Check that two calls to lock will execute in the correct order. var tm TryMutex var data int tm.Lock() go func() { data = 15 tm.Unlock() }() tm.Lock() if data != 15 { t.Error("Locking did not safely protect the data") } tm.Unlock() } // TestTryMutexConcurrentLocking checks that doing lots of concurrent locks is // handled as expected. func TestTryMutexConcurrentLocking(t *testing.T) { if testing.Short() { t.SkipNow() } // Try executing multiple additions concurrently. var tm TryMutex var data int var wg sync.WaitGroup for i := 0; i < 250; i++ { wg.Add(1) go func() { tm.Lock() data++ tm.Unlock() wg.Done() }() } wg.Wait() if data != 250 { t.Error("Locking did not safely protect the data") } } // TestTryMutexBasicTryLock checks that a TryLock will succeed if nobody is // holding a lock, and will fail if the lock is being held. func TestTryMutexBasicTryLock(t *testing.T) { // Lock and then TryLock. var tm TryMutex tm.Lock() if tm.TryLock() { t.Error("TryLock should have failed") } tm.Unlock() tm.Lock() tm.Unlock() // TryLock and then TryLock. if !tm.TryLock() { t.Error("Could not get a blank lock") } if tm.TryLock() { t.Error("should not have been able to get the lock") } tm.Unlock() } // TestTryMutexConcurrentTries attempts to grab locks from many threads, giving // the race detector a chance to detect any issues. func TestTryMutexConncurrentTries(t *testing.T) { if testing.Short() { t.SkipNow() } // Try executing multiple additions concurrently. var tm TryMutex var data int var wg sync.WaitGroup for i := 0; i < 250; i++ { wg.Add(1) go func() { for !tm.TryLock() { } data++ tm.Unlock() wg.Done() }() } wg.Wait() if data != 250 { t.Error("Locking did not safely protect the data") } } // TestTryMutexTimed checks that a timed lock will correctly time out if it // cannot grab a lock. func TestTryMutexTimed(t *testing.T) { if testing.Short() { t.SkipNow() } var tm TryMutex tm.Lock() startTime := time.Now() if tm.TryLockTimed(time.Millisecond * 500) { t.Error("was able to grab a locked lock") } wait := time.Now().Sub(startTime) if wait < time.Millisecond*450 { t.Error("lock did not wait the correct amount of time before timing out", wait) } if wait > time.Millisecond*900 { t.Error("lock waited too long before timing out", wait) } tm.Unlock() if !tm.TryLockTimed(time.Millisecond * 1) { t.Error("Unable to get an unlocked lock") } tm.Unlock() } // TestTryMutexTimedConcurrent checks that a timed lock will correctly time out // if it cannot grab a lock. func TestTryMutexTimedConcurrent(t *testing.T) { if testing.Short() { t.SkipNow() } var tm TryMutex // Engage a lock and launch a gothread to wait for a lock, fail, and then // call unlock. tm.Lock() go func() { startTime := time.Now() if tm.TryLockTimed(time.Millisecond * 500) { t.Error("was able to grab a locked lock") } wait := time.Now().Sub(startTime) if wait < time.Millisecond*450 { t.Error("lock did not wait the correct amount of time before timing out:", wait) } if wait > time.Millisecond*900 { t.Error("lock waited too long before timing out", wait) } tm.Unlock() }() // Try to get a lock, but don't wait long enough. if tm.TryLockTimed(time.Millisecond * 250) { // Lock shoud time out because the gothread responsible for releasing // the lock will be idle for 500 milliseconds. t.Error("Lock should have timed out") } if !tm.TryLockTimed(time.Millisecond * 950) { // Lock should be successful - the above thread should finish in under // 950 milliseconds. t.Error("Lock should have been successful") } tm.Unlock() } Sia-1.3.0/sync/tryrwmutex.go000066400000000000000000000054111313565667000160210ustar00rootroot00000000000000package sync import ( "runtime" "sync" "sync/atomic" ) const ( readOffset = uint64(1) writeOffset = uint64(1 << 20) tryOffset = uint64(1 << 40) tryMask = uint64(1099511627775) // equivalent to setting the first 39 bits to '1'. ) // TryRWMutex allows you to try to grab a RWMutex, failing if the mutex is // unavailable. Standard blocking RLock and Lock calls also available. // // Note that there will be inconsistencies if there are more than 1 << 20 // operations active at once. type TryRWMutex struct { lock uint64 mu sync.RWMutex } // Lock blocks until the mutex is available, and then locks it. func (tm *TryRWMutex) Lock() { // Signal that a write lock is waiting. v := atomic.AddUint64(&tm.lock, writeOffset) // Spin until there is no contention from a Try call. for v > tryOffset { runtime.Gosched() v = atomic.LoadUint64(&tm.lock) } // Grab the lock. tm.mu.Lock() } // RLock blocks until the mutex is available, then grabs a read lock. func (tm *TryRWMutex) RLock() { // Signal that a read lock is waiting. v := atomic.AddUint64(&tm.lock, readOffset) // Spin until there is no contention from a Try call. for v > tryOffset { runtime.Gosched() v = atomic.LoadUint64(&tm.lock) } // Grab the lock. tm.mu.RLock() } // RUnlock releases a read lock on the mutex. func (tm *TryRWMutex) RUnlock() { // Release the lock, then signal that the read lock is no longer waiting. tm.mu.RUnlock() atomic.AddUint64(&tm.lock, ^(readOffset - 1)) } // TryLock grabs a lock on the mutex, returning false if the mutex is // unavailable. func (tm *TryRWMutex) TryLock() bool { // If there are no readlocks waiting, and no writelocks waiting, signal // that a writelock is waiting and that there contention from a Try call. if atomic.CompareAndSwapUint64(&tm.lock, 0, writeOffset+tryOffset) { tm.mu.Lock() // Signal that the Try call contention is resolved. atomic.AddUint64(&tm.lock, ^(tryOffset - 1)) return true } return false } // TryRLock grabs a read lock on the mutex, returning false if the mutex is // already locked. func (tm *TryRWMutex) TryRLock() bool { // Signal that a read lock is waiting, and that there is contention from a // Try call. v := atomic.AddUint64(&tm.lock, readOffset+tryOffset) // Mask the try offset when performing the comparison. v = v & tryMask if v > writeOffset { // If there is a write lock waiting, revert the signal and return // false. atomic.AddUint64(&tm.lock, ^(readOffset + tryOffset - 1)) return false } // Grab the read lock and return true. tm.mu.RLock() // Signal that the Try call contention is resolved. atomic.AddUint64(&tm.lock, ^(tryOffset - 1)) return true } // Unlock releases a lock on the mutex. func (tm *TryRWMutex) Unlock() { tm.mu.Unlock() atomic.AddUint64(&tm.lock, ^(writeOffset - 1)) } Sia-1.3.0/sync/tryrwmutex_test.go000066400000000000000000000077121313565667000170660ustar00rootroot00000000000000package sync import ( "runtime" "sync" "testing" ) // TestTryRWMutexBasicMutex verifies that Lock and Unlock work the same as a // normal mutex would. func TestTryRWMutexBasicMutex(t *testing.T) { // Check that two calls to lock will execute in the correct order. var tm TryRWMutex var data int tm.Lock() go func() { data = 15 tm.Unlock() }() tm.Lock() if data != 15 { t.Error("Locking did not safely protect the data") } tm.Unlock() } // TestTryRWMutexConcurrentLocking checks that doing lots of concurrent locks // is handled as expected. func TestTryRWMutexConcurrentLocking(t *testing.T) { if testing.Short() { t.SkipNow() } // Try executing multiple additions concurrently. var tm TryRWMutex var data int var wg sync.WaitGroup for i := 0; i < 250; i++ { wg.Add(1) go func() { tm.Lock() data++ tm.Unlock() wg.Done() }() } wg.Wait() if data != 250 { t.Error("Locking did not safely protect the data") } } // TestTryRWMutexBasicTryLock checks that a TryLock will succeed if nobody is // holding a lock, and will fail if the lock is being held. func TestTryRWMutexBasicTryLock(t *testing.T) { // Lock and then TryLock. var tm TryRWMutex tm.Lock() if tm.TryLock() { t.Error("TryLock should have failed") } tm.Unlock() tm.Lock() tm.Unlock() // TryLock and then TryLock. if !tm.TryLock() { t.Error("Could not get a blank lock") } if tm.TryLock() { t.Error("should not have been able to get the lock") } tm.Unlock() } // TestTryRWMutexConcurrentTries attempts to grab locks from many threads, // giving the race detector a chance to detect any issues. func TestTryRWMutexConncurrentTries(t *testing.T) { if testing.Short() { t.SkipNow() } // Try executing multiple additions concurrently. var tm TryRWMutex var data int var wg sync.WaitGroup for i := 0; i < 250; i++ { wg.Add(1) go func() { for !tm.TryLock() { } data++ tm.Unlock() wg.Done() }() } wg.Wait() if data != 250 { t.Error("Locking did not safely protect the data") } } // TestTryRWMutexReadAvailable will try to acquire a read lock on the mutex // when it is supposed to be available. func TestTryRWMutexReadAvailable(t *testing.T) { var tm TryRWMutex if !tm.TryRLock() { t.Fatal("Unable to get readlock on a fresh TryRWMutex") } // Grab the lock and increment the data in a goroutine. var data int var wg sync.WaitGroup wg.Add(2) go func() { defer wg.Done() tm.Lock() data++ tm.Unlock() }() runtime.Gosched() go func() { defer wg.Done() tm.Lock() data++ tm.Unlock() }() runtime.Gosched() // Read the data, readlock should be held. if data != 0 { t.Fatal("Data should not have changed while under readlock") } // Release the lock and wait for the other locks to finish their // modifications. tm.RUnlock() wg.Wait() // Try to grab another readlock. It should succeed. The data should have // changed. if !tm.TryRLock() { t.Fatal("Unable to get readlock on available TryRWMutex") } if data != 2 { t.Error("Data does not seem to have been altered correctly") } tm.RUnlock() } // TestTryRWMutexReadUnavailable will try to acquire a read lock on the mutex // when it is supposed to be available. func TestTryRWMutexReadUnavailable(t *testing.T) { var tm TryRWMutex if !tm.TryRLock() { t.Fatal("Unable to get readlock on a fresh TryRWMutex") } // Grab the lock and increment the data in a goroutine. var data int var wg sync.WaitGroup wg.Add(2) go func() { defer wg.Done() tm.Lock() data++ tm.Unlock() }() runtime.Gosched() go func() { defer wg.Done() tm.Lock() data++ tm.Unlock() }() runtime.Gosched() // Read the data, readlock should be held. if data != 0 { t.Fatal("Data should not have changed while under readlock") } // Try to grab another readlock. It should not succeed. if tm.TryRLock() { t.Fatal("Able to get readlock on available TryRWMutex") } // Release the lock and wait for the other locks to finish their // modifications. tm.RUnlock() wg.Wait() } Sia-1.3.0/types/000077500000000000000000000000001313565667000134075ustar00rootroot00000000000000Sia-1.3.0/types/block.go000066400000000000000000000114241313565667000150320ustar00rootroot00000000000000package types // block.go defines the Block type for Sia, and provides some helper functions // for working with blocks. import ( "bytes" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" ) const ( // BlockHeaderSize is the size, in bytes, of a block header. // 32 (ParentID) + 8 (Nonce) + 8 (Timestamp) + 32 (MerkleRoot) BlockHeaderSize = 80 ) type ( // A Block is a summary of changes to the state that have occurred since the // previous block. Blocks reference the ID of the previous block (their // "parent"), creating the linked-list commonly known as the blockchain. Their // primary function is to bundle together transactions on the network. Blocks // are created by "miners," who collect transactions from other nodes, and // then try to pick a Nonce that results in a block whose BlockID is below a // given Target. Block struct { ParentID BlockID `json:"parentid"` Nonce BlockNonce `json:"nonce"` Timestamp Timestamp `json:"timestamp"` MinerPayouts []SiacoinOutput `json:"minerpayouts"` Transactions []Transaction `json:"transactions"` } // A BlockHeader, when encoded, is an 80-byte constant size field // containing enough information to do headers-first block downloading. // Hashing the header results in the block ID. BlockHeader struct { ParentID BlockID `json:"parentid"` Nonce BlockNonce `json:"nonce"` Timestamp Timestamp `json:"timestamp"` MerkleRoot crypto.Hash `json:"merkleroot"` } BlockHeight uint64 BlockID crypto.Hash BlockNonce [8]byte ) // CalculateCoinbase calculates the coinbase for a given height. The coinbase // equation is: // // coinbase := max(InitialCoinbase - height, MinimumCoinbase) * SiacoinPrecision func CalculateCoinbase(height BlockHeight) Currency { base := InitialCoinbase - uint64(height) if uint64(height) > InitialCoinbase || base < MinimumCoinbase { base = MinimumCoinbase } return NewCurrency64(base).Mul(SiacoinPrecision) } // CalculateNumSiacoins calculates the number of siacoins in circulation at a // given height. func CalculateNumSiacoins(height BlockHeight) Currency { deflationBlocks := BlockHeight(InitialCoinbase - MinimumCoinbase) avgDeflationSiacoins := CalculateCoinbase(0).Add(CalculateCoinbase(height)).Div(NewCurrency64(2)) if height <= deflationBlocks { deflationSiacoins := avgDeflationSiacoins.Mul(NewCurrency64(uint64(height + 1))) return deflationSiacoins } deflationSiacoins := avgDeflationSiacoins.Mul(NewCurrency64(uint64(deflationBlocks + 1))) trailingSiacoins := NewCurrency64(uint64(height - deflationBlocks)).Mul(CalculateCoinbase(height)) return deflationSiacoins.Add(trailingSiacoins) } // ID returns the ID of a Block, which is calculated by hashing the header. func (h BlockHeader) ID() BlockID { return BlockID(crypto.HashObject(h)) } // CalculateSubsidy takes a block and a height and determines the block // subsidy. func (b Block) CalculateSubsidy(height BlockHeight) Currency { subsidy := CalculateCoinbase(height) for _, txn := range b.Transactions { for _, fee := range txn.MinerFees { subsidy = subsidy.Add(fee) } } return subsidy } // Header returns the header of a block. func (b Block) Header() BlockHeader { return BlockHeader{ ParentID: b.ParentID, Nonce: b.Nonce, Timestamp: b.Timestamp, MerkleRoot: b.MerkleRoot(), } } // ID returns the ID of a Block, which is calculated by hashing the // concatenation of the block's parent's ID, nonce, and the result of the // b.MerkleRoot(). It is equivalent to calling block.Header().ID() func (b Block) ID() BlockID { return b.Header().ID() } // MerkleRoot calculates the Merkle root of a Block. The leaves of the Merkle // tree are composed of the miner outputs (one leaf per payout), and the // transactions (one leaf per transaction). func (b Block) MerkleRoot() crypto.Hash { tree := crypto.NewTree() var buf bytes.Buffer for _, payout := range b.MinerPayouts { payout.MarshalSia(&buf) tree.Push(buf.Bytes()) buf.Reset() } for _, txn := range b.Transactions { txn.MarshalSia(&buf) tree.Push(buf.Bytes()) buf.Reset() } // Sanity check - verify that this root is the same as the root provided in // the old implementation. if build.DEBUG { verifyTree := crypto.NewTree() for _, payout := range b.MinerPayouts { verifyTree.PushObject(payout) } for _, txn := range b.Transactions { verifyTree.PushObject(txn) } if tree.Root() != verifyTree.Root() { panic("Block MerkleRoot implementation is broken") } } return tree.Root() } // MinerPayoutID returns the ID of the miner payout at the given index, which // is calculated by hashing the concatenation of the BlockID and the payout // index. func (b Block) MinerPayoutID(i uint64) SiacoinOutputID { return SiacoinOutputID(crypto.HashAll( b.ID(), i, )) } Sia-1.3.0/types/block_bench_test.go000066400000000000000000000013411313565667000172250ustar00rootroot00000000000000package types import ( "testing" "github.com/NebulousLabs/Sia/encoding" ) // BenchmarkEncodeEmptyBlock benchmarks encoding an empty block. // // i5-4670K, 9a90f86: 48 MB/s func BenchmarkEncodeBlock(b *testing.B) { var block Block b.SetBytes(int64(len(encoding.Marshal(block)))) for i := 0; i < b.N; i++ { encoding.Marshal(block) } } // BenchmarkDecodeEmptyBlock benchmarks decoding an empty block. // // i7-4770, b0b162d: 38 MB/s // i5-4670K, 9a90f86: 55 MB/s func BenchmarkDecodeEmptyBlock(b *testing.B) { var block Block encodedBlock := encoding.Marshal(block) b.SetBytes(int64(len(encodedBlock))) for i := 0; i < b.N; i++ { err := encoding.Unmarshal(encodedBlock, &block) if err != nil { b.Fatal(err) } } } Sia-1.3.0/types/block_test.go000066400000000000000000000160141313565667000160710ustar00rootroot00000000000000package types import ( "testing" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/encoding" ) // TestCalculateCoinbase probes the CalculateCoinbase function. The test code // is probably too similar to the function code to be of value. func TestCalculateCoinbase(t *testing.T) { c := CalculateCoinbase(0) if c.Cmp(NewCurrency64(InitialCoinbase).Mul(SiacoinPrecision)) != 0 { t.Error("Unexpected CalculateCoinbase result") } c = CalculateCoinbase(1) if c.Cmp(NewCurrency64(InitialCoinbase-1).Mul(SiacoinPrecision)) != 0 { t.Error("Unexpected CalculateCoinbase result") } c = CalculateCoinbase(295000) if c.Cmp(NewCurrency64(MinimumCoinbase).Mul(SiacoinPrecision)) != 0 { t.Error(c) t.Error(NewCurrency64(MinimumCoinbase).Mul(SiacoinPrecision)) t.Error("Unexpected CalculateCoinbase result") } c = CalculateCoinbase(1000000000) if c.Cmp(NewCurrency64(MinimumCoinbase).Mul(SiacoinPrecision)) != 0 { t.Error(c) t.Error(NewCurrency64(MinimumCoinbase).Mul(SiacoinPrecision)) t.Error("Unexpected CalculateCoinbase result") } } // TestCalculateNumSiacoins checks that the siacoin calculator is correctly // determining the number of siacoins in circulation. The check is performed by // doing a naive computation, instead of by doing the optimized computation. func TestCalculateNumSiacoins(t *testing.T) { c := CalculateNumSiacoins(0) if c.Cmp(CalculateCoinbase(0)) != 0 { t.Error("unexpected circulation result for value 0, got", c) } if testing.Short() { t.SkipNow() } totalCoins := NewCurrency64(0) for i := BlockHeight(0); i < 500e3; i++ { totalCoins = totalCoins.Add(CalculateCoinbase(i)) if totalCoins.Cmp(CalculateNumSiacoins(i)) != 0 { t.Fatal("coin miscalculation", i, totalCoins, CalculateNumSiacoins(i)) } } } // TestBlockHeader checks that BlockHeader returns the correct value, and that // the hash is consistent with the old method for obtaining the hash. func TestBlockHeader(t *testing.T) { var b Block b.ParentID[1] = 1 b.Nonce[2] = 2 b.Timestamp = 3 b.MinerPayouts = []SiacoinOutput{{Value: NewCurrency64(4)}} b.Transactions = []Transaction{{ArbitraryData: [][]byte{{'5'}}}} id1 := b.ID() id2 := BlockID(crypto.HashBytes(encoding.Marshal(b.Header()))) id3 := BlockID(crypto.HashAll( b.ParentID, b.Nonce, b.Timestamp, b.MerkleRoot(), )) if id1 != id2 || id2 != id3 || id3 != id1 { t.Error("Methods for getting block id don't return the same results") } } // TestBlockID probes the ID function of the block type. func TestBlockID(t *testing.T) { // Create a bunch of different blocks and check that all of them have // unique ids. var b Block var ids []BlockID ids = append(ids, b.ID()) b.ParentID[0] = 1 ids = append(ids, b.ID()) b.Nonce[0] = 45 ids = append(ids, b.ID()) b.Timestamp = CurrentTimestamp() ids = append(ids, b.ID()) b.MinerPayouts = append(b.MinerPayouts, SiacoinOutput{Value: CalculateCoinbase(0)}) ids = append(ids, b.ID()) b.MinerPayouts = append(b.MinerPayouts, SiacoinOutput{Value: CalculateCoinbase(0)}) ids = append(ids, b.ID()) b.Transactions = append(b.Transactions, Transaction{MinerFees: []Currency{CalculateCoinbase(1)}}) ids = append(ids, b.ID()) b.Transactions = append(b.Transactions, Transaction{MinerFees: []Currency{CalculateCoinbase(1)}}) ids = append(ids, b.ID()) knownIDs := make(map[BlockID]struct{}) for i, id := range ids { _, exists := knownIDs[id] if exists { t.Error("id repeat for index", i) } knownIDs[id] = struct{}{} } } // TestHeaderID probes the ID function of the BlockHeader type. func TestHeaderID(t *testing.T) { // Create a bunch of different blocks and check that all of them have // unique ids. var blocks []Block var b Block blocks = append(blocks, b) b.ParentID[0] = 1 blocks = append(blocks, b) b.Nonce[0] = 45 blocks = append(blocks, b) b.Timestamp = CurrentTimestamp() blocks = append(blocks, b) b.MinerPayouts = append(b.MinerPayouts, SiacoinOutput{Value: CalculateCoinbase(0)}) blocks = append(blocks, b) b.MinerPayouts = append(b.MinerPayouts, SiacoinOutput{Value: CalculateCoinbase(0)}) blocks = append(blocks, b) b.Transactions = append(b.Transactions, Transaction{MinerFees: []Currency{CalculateCoinbase(1)}}) blocks = append(blocks, b) b.Transactions = append(b.Transactions, Transaction{MinerFees: []Currency{CalculateCoinbase(1)}}) blocks = append(blocks, b) knownIDs := make(map[BlockID]struct{}) for i, block := range blocks { blockID := block.ID() headerID := block.Header().ID() if blockID != headerID { t.Error("headerID does not match blockID for index", i) } _, exists := knownIDs[headerID] if exists { t.Error("id repeat for index", i) } knownIDs[headerID] = struct{}{} } } // TestBlockCalculateSubsidy probes the CalculateSubsidy function of the block // type. func TestBlockCalculateSubsidy(t *testing.T) { // All tests are done at height = 0. coinbase := CalculateCoinbase(0) // Calculate the subsidy on a block with 0 fees at height 0. Result should // be 300,000. var b Block if b.CalculateSubsidy(0).Cmp(coinbase) != 0 { t.Error("subsidy is miscalculated for an empty block") } // Calculate when there is a fee in a transcation. expected := coinbase.Add(NewCurrency64(123)) txn := Transaction{ MinerFees: []Currency{NewCurrency64(123)}, } b.Transactions = append(b.Transactions, txn) if b.CalculateSubsidy(0).Cmp(expected) != 0 { t.Error("subsidy is miscalculated for a block with a single transaction") } // Add a single no-fee transaction and check again. txn = Transaction{ ArbitraryData: [][]byte{{'6'}}, } b.Transactions = append(b.Transactions, txn) if b.CalculateSubsidy(0).Cmp(expected) != 0 { t.Error("subsidy is miscalculated with empty transactions.") } // Add a transaction with multiple fees. expected = expected.Add(NewCurrency64(1 + 2 + 3)) txn = Transaction{ MinerFees: []Currency{ NewCurrency64(1), NewCurrency64(2), NewCurrency64(3), }, } b.Transactions = append(b.Transactions, txn) if b.CalculateSubsidy(0).Cmp(expected) != 0 { t.Error("subsidy is miscalculated for a block with a single transaction") } // Add an empty transaction to the beginning. txn = Transaction{ ArbitraryData: [][]byte{{'7'}}, } b.Transactions = append([]Transaction{txn}, b.Transactions...) if b.CalculateSubsidy(0).Cmp(expected) != 0 { t.Error("subsidy is miscalculated with empty transactions.") } } // TestBlockMinerPayoutID probes the MinerPayout function of the block type. func TestBlockMinerPayoutID(t *testing.T) { // Create a block with 2 miner payouts, and check that each payout has a // different id, and that the id is dependent on the block id. var ids []SiacoinOutputID b := Block{ MinerPayouts: []SiacoinOutput{ {Value: CalculateCoinbase(0)}, {Value: CalculateCoinbase(0)}, }, } ids = append(ids, b.MinerPayoutID(1), b.MinerPayoutID(2)) b.ParentID[0] = 1 ids = append(ids, b.MinerPayoutID(1), b.MinerPayoutID(2)) knownIDs := make(map[SiacoinOutputID]struct{}) for i, id := range ids { _, exists := knownIDs[id] if exists { t.Error("id repeat for index", i) } knownIDs[id] = struct{}{} } } Sia-1.3.0/types/constants.go000066400000000000000000000504221313565667000157550ustar00rootroot00000000000000package types // constants.go contains the Sia constants. Depending on which build tags are // used, the constants will be initialized to different values. // // CONTRIBUTE: We don't have way to check that the non-test constants are all // sane, plus we have no coverage for them. import ( "math/big" "github.com/NebulousLabs/Sia/build" ) var ( BlockSizeLimit = uint64(2e6) RootDepth = Target{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255} BlockFrequency BlockHeight MaturityDelay BlockHeight GenesisTimestamp Timestamp RootTarget Target MedianTimestampWindow = uint64(11) TargetWindow BlockHeight MaxAdjustmentUp *big.Rat MaxAdjustmentDown *big.Rat FutureThreshold Timestamp ExtremeFutureThreshold Timestamp SiafundCount = NewCurrency64(10000) SiafundPortion = big.NewRat(39, 1000) SiacoinPrecision = NewCurrency(new(big.Int).Exp(big.NewInt(10), big.NewInt(24), nil)) InitialCoinbase = uint64(300e3) MinimumCoinbase uint64 GenesisSiafundAllocation []SiafundOutput GenesisBlock Block // The GenesisID is used in many places. Calculating it once saves lots of // redundant computation. GenesisID BlockID // Oak hardfork constants. Oak is the name of the difficulty algorithm for // Sia following a hardfork at block 135e3. OakHardforkBlock BlockHeight OakDecayNum int64 OakDecayDenom int64 OakMaxRise *big.Rat OakMaxDrop *big.Rat OakHardforkTxnSizeLimit = uint64(64e3) // 64 KB ) // init checks which build constant is in place and initializes the variables // accordingly. func init() { if build.Release == "dev" { // 'dev' settings are for small developer testnets, usually on the same // computer. Settings are slow enough that a small team of developers // can coordinate their actions over a the developer testnets, but fast // enough that there isn't much time wasted on waiting for things to // happen. BlockFrequency = 12 // 12 seconds: slow enough for developers to see ~each block, fast enough that blocks don't waste time. MaturityDelay = 10 // 60 seconds before a delayed output matures. GenesisTimestamp = Timestamp(1424139000) // Change as necessary. RootTarget = Target{0, 0, 2} // Standard developer CPUs will be able to mine blocks with the race library activated. TargetWindow = 20 // Difficulty is adjusted based on prior 20 blocks. MaxAdjustmentUp = big.NewRat(120, 100) // Difficulty adjusts quickly. MaxAdjustmentDown = big.NewRat(100, 120) // Difficulty adjusts quickly. FutureThreshold = 2 * 60 // 2 minutes. ExtremeFutureThreshold = 4 * 60 // 4 minutes. MinimumCoinbase = 30e3 OakHardforkBlock = 100 OakDecayNum = 985 OakDecayDenom = 1000 OakMaxRise = big.NewRat(102, 100) OakMaxDrop = big.NewRat(100, 102) GenesisSiafundAllocation = []SiafundOutput{ { Value: NewCurrency64(2000), UnlockHash: UnlockHash{214, 166, 197, 164, 29, 201, 53, 236, 106, 239, 10, 158, 127, 131, 20, 138, 63, 221, 230, 16, 98, 247, 32, 77, 210, 68, 116, 12, 241, 89, 27, 223}, }, { Value: NewCurrency64(7000), UnlockHash: UnlockHash{209, 246, 228, 60, 248, 78, 242, 110, 9, 8, 227, 248, 225, 216, 163, 52, 142, 93, 47, 176, 103, 41, 137, 80, 212, 8, 132, 58, 241, 189, 2, 17}, }, { Value: NewCurrency64(1000), UnlockHash: UnlockConditions{}.UnlockHash(), }, } } else if build.Release == "testing" { // 'testing' settings are for automatic testing, and create much faster // environments than a human can interact with. BlockFrequency = 1 // As fast as possible MaturityDelay = 3 GenesisTimestamp = CurrentTimestamp() - 1e6 RootTarget = Target{128} // Takes an expected 2 hashes; very fast for testing but still probes 'bad hash' code. // A restrictive difficulty clamp prevents the difficulty from climbing // during testing, as the resolution on the difficulty adjustment is // only 1 second and testing mining should be happening substantially // faster than that. TargetWindow = 200 MaxAdjustmentUp = big.NewRat(10001, 10000) MaxAdjustmentDown = big.NewRat(9999, 10000) FutureThreshold = 3 // 3 seconds ExtremeFutureThreshold = 6 // 6 seconds MinimumCoinbase = 299990 // Minimum coinbase is hit after 10 blocks to make testing minimum-coinbase code easier. // Do not let the difficulty change rapidly - blocks will be getting // mined far faster than the difficulty can adjust to. OakHardforkBlock = 20 OakDecayNum = 9999 OakDecayDenom = 10e3 OakMaxRise = big.NewRat(10001, 10e3) OakMaxDrop = big.NewRat(10e3, 10001) GenesisSiafundAllocation = []SiafundOutput{ { Value: NewCurrency64(2000), UnlockHash: UnlockHash{214, 166, 197, 164, 29, 201, 53, 236, 106, 239, 10, 158, 127, 131, 20, 138, 63, 221, 230, 16, 98, 247, 32, 77, 210, 68, 116, 12, 241, 89, 27, 223}, }, { Value: NewCurrency64(7000), UnlockHash: UnlockHash{209, 246, 228, 60, 248, 78, 242, 110, 9, 8, 227, 248, 225, 216, 163, 52, 142, 93, 47, 176, 103, 41, 137, 80, 212, 8, 132, 58, 241, 189, 2, 17}, }, { Value: NewCurrency64(1000), UnlockHash: UnlockConditions{}.UnlockHash(), }, } } else if build.Release == "standard" { // 'standard' settings are for the full network. They are slow enough // that the network is secure in a real-world byzantine environment. // A block time of 1 block per 10 minutes is chosen to follow Bitcoin's // example. The security lost by lowering the block time is not // insignificant, and the convenience gained by lowering the blocktime // even down to 90 seconds is not significant. I do feel that 10 // minutes could even be too short, but it has worked well for Bitcoin. BlockFrequency = 600 // Payouts take 1 day to mature. This is to prevent a class of double // spending attacks parties unintentionally spend coins that will stop // existing after a blockchain reorganization. There are multiple // classes of payouts in Sia that depend on a previous block - if that // block changes, then the output changes and the previously existing // output ceases to exist. This delay stops both unintentional double // spending and stops a small set of long-range mining attacks. MaturityDelay = 144 // The genesis timestamp is set to June 6th, because that is when the // 100-block developer premine started. The trailing zeroes are a // bonus, and make the timestamp easier to memorize. GenesisTimestamp = Timestamp(1433600000) // June 6th, 2015 @ 2:13pm UTC. // The RootTarget was set such that the developers could reasonable // premine 100 blocks in a day. It was known to the developers at launch // this this was at least one and perhaps two orders of magnitude too // small. RootTarget = Target{0, 0, 0, 0, 32} // When the difficulty is adjusted, it is adjusted by looking at the // timestamp of the 1000th previous block. This minimizes the abilities // of miners to attack the network using rogue timestamps. TargetWindow = 1e3 // The difficulty adjustment is clamped to 2.5x every 500 blocks. This // corresponds to 6.25x every 2 weeks, which can be compared to // Bitcoin's clamp of 4x every 2 weeks. The difficulty clamp is // primarily to stop difficulty raising attacks. Sia's safety margin is // similar to Bitcoin's despite the looser clamp because Sia's // difficulty is adjusted four times as often. This does result in // greater difficulty oscillation, a tradeoff that was chosen to be // acceptable due to Sia's more vulnerable position as an altcoin. MaxAdjustmentUp = big.NewRat(25, 10) MaxAdjustmentDown = big.NewRat(10, 25) // Blocks will not be accepted if their timestamp is more than 3 hours // into the future, but will be accepted as soon as they are no longer // 3 hours into the future. Blocks that are greater than 5 hours into // the future are rejected outright, as it is assumed that by the time // 2 hours have passed, those blocks will no longer be on the longest // chain. Blocks cannot be kept forever because this opens a DoS // vector. FutureThreshold = 3 * 60 * 60 // 3 hours. ExtremeFutureThreshold = 5 * 60 * 60 // 5 hours. // The minimum coinbase is set to 30,000. Because the coinbase // decreases by 1 every time, it means that Sia's coinbase will have an // increasingly potent dropoff for about 5 years, until inflation more // or less permanently settles around 2%. MinimumCoinbase = 30e3 // The oak difficulty adjustment hardfork is set to trigger at block // 135,000, which is just under 6 months after the hardfork was first // released as beta software to the network. This hopefully gives // everyone plenty of time to upgrade and adopt the hardfork, while also // being earlier than the most optimistic shipping dates for the miners // that would otherwise be very disruptive to the network. OakHardforkBlock = 135e3 // The decay is kept at 995/1000, or a decay of about 0.5% each block. // This puts the halflife of a block's relevance at about 1 day. This // allows the difficulty to adjust rapidly if the hashrate is adjusting // rapidly, while still keeping a relatively strong insulation against // random variance. OakDecayNum = 995 OakDecayDenom = 1e3 // The max rise and max drop for the difficulty is kept at 0.4% per // block, which means that in 1008 blocks the difficulty can move a // maximum of about 55x. This is significant, and means that dramatic // hashrate changes can be responded to quickly, while still forcing an // attacker to do a significant amount of work in order to execute a // difficulty raising attack, and minimizing the chance that an attacker // can get lucky and fake a ton of work. OakMaxRise = big.NewRat(1004, 1e3) OakMaxDrop = big.NewRat(1e3, 1004) GenesisSiafundAllocation = []SiafundOutput{ { Value: NewCurrency64(2), UnlockHash: UnlockHash{4, 57, 229, 188, 127, 20, 204, 245, 211, 167, 232, 130, 208, 64, 146, 62, 69, 98, 81, 102, 221, 7, 123, 100, 70, 107, 199, 113, 121, 26, 198, 252}, }, { Value: NewCurrency64(6), UnlockHash: UnlockHash{4, 158, 29, 42, 105, 119, 43, 5, 138, 72, 190, 190, 101, 114, 79, 243, 189, 248, 208, 151, 30, 187, 233, 148, 225, 233, 28, 159, 19, 232, 75, 244}, }, { Value: NewCurrency64(7), UnlockHash: UnlockHash{8, 7, 66, 250, 25, 74, 247, 108, 162, 79, 220, 151, 202, 228, 241, 11, 130, 138, 13, 248, 193, 167, 136, 197, 65, 63, 234, 174, 205, 216, 71, 230}, }, { Value: NewCurrency64(8), UnlockHash: UnlockHash{44, 106, 239, 51, 138, 102, 242, 19, 204, 197, 248, 178, 219, 122, 152, 251, 19, 20, 52, 32, 175, 32, 4, 156, 73, 33, 163, 165, 222, 184, 217, 218}, }, { Value: NewCurrency64(3), UnlockHash: UnlockHash{44, 163, 31, 233, 74, 103, 55, 132, 230, 159, 97, 78, 149, 147, 65, 110, 164, 211, 105, 173, 158, 29, 202, 43, 85, 217, 85, 75, 83, 37, 205, 223}, }, { Value: NewCurrency64(1), UnlockHash: UnlockHash{51, 151, 146, 84, 199, 7, 59, 89, 111, 172, 227, 200, 62, 55, 165, 253, 238, 186, 28, 145, 47, 137, 200, 15, 70, 199, 187, 125, 243, 104, 179, 240}, }, { Value: NewCurrency64(10), UnlockHash: UnlockHash{53, 118, 253, 229, 254, 229, 28, 131, 233, 156, 108, 58, 197, 152, 17, 160, 74, 252, 11, 49, 112, 240, 66, 119, 40, 98, 114, 251, 5, 86, 233, 117}, }, { Value: NewCurrency64(50), UnlockHash: UnlockHash{56, 219, 3, 50, 28, 3, 166, 95, 141, 163, 202, 35, 60, 199, 219, 10, 151, 176, 228, 97, 176, 133, 189, 33, 211, 202, 83, 197, 31, 208, 254, 193}, }, { Value: NewCurrency64(75), UnlockHash: UnlockHash{68, 190, 140, 87, 96, 232, 150, 32, 161, 177, 204, 65, 228, 223, 87, 217, 134, 90, 25, 56, 51, 45, 72, 107, 129, 12, 29, 202, 6, 7, 50, 13}, }, { Value: NewCurrency64(10), UnlockHash: UnlockHash{69, 14, 201, 200, 90, 73, 245, 45, 154, 94, 161, 19, 199, 241, 203, 56, 13, 63, 5, 220, 121, 245, 247, 52, 194, 181, 252, 76, 130, 6, 114, 36}, }, { Value: NewCurrency64(10), UnlockHash: UnlockHash{72, 128, 253, 207, 169, 48, 1, 26, 237, 205, 169, 102, 196, 224, 42, 186, 95, 151, 59, 226, 203, 136, 251, 223, 165, 38, 88, 110, 47, 213, 121, 224}, }, { Value: NewCurrency64(50), UnlockHash: UnlockHash{72, 130, 164, 227, 218, 28, 60, 15, 56, 151, 212, 242, 77, 131, 232, 131, 42, 57, 132, 173, 113, 118, 66, 183, 38, 79, 96, 178, 105, 108, 26, 247}, }, { Value: NewCurrency64(10), UnlockHash: UnlockHash{74, 210, 58, 228, 111, 69, 253, 120, 53, 195, 110, 26, 115, 76, 211, 202, 199, 159, 204, 14, 78, 92, 14, 131, 250, 22, 141, 236, 154, 44, 39, 135}, }, { Value: NewCurrency64(15), UnlockHash: UnlockHash{85, 198, 154, 41, 196, 116, 226, 114, 202, 94, 214, 147, 87, 84, 247, 164, 195, 79, 58, 123, 26, 33, 68, 65, 116, 79, 181, 241, 241, 208, 215, 184}, }, { Value: NewCurrency64(121), UnlockHash: UnlockHash{87, 239, 83, 125, 152, 14, 19, 22, 203, 136, 46, 192, 203, 87, 224, 190, 77, 236, 125, 18, 142, 223, 146, 70, 16, 23, 252, 19, 100, 69, 91, 111}, }, { Value: NewCurrency64(222), UnlockHash: UnlockHash{91, 201, 101, 11, 188, 40, 35, 111, 236, 133, 31, 124, 97, 246, 140, 136, 143, 245, 152, 174, 111, 245, 188, 124, 21, 125, 187, 192, 203, 92, 253, 57}, }, { Value: NewCurrency64(10), UnlockHash: UnlockHash{110, 240, 238, 173, 78, 138, 185, 138, 179, 227, 135, 153, 54, 132, 46, 62, 226, 206, 204, 35, 174, 107, 156, 15, 142, 2, 93, 132, 163, 60, 50, 89}, }, { Value: NewCurrency64(3), UnlockHash: UnlockHash{114, 58, 147, 44, 64, 69, 72, 184, 65, 178, 213, 94, 157, 44, 88, 106, 92, 31, 145, 193, 215, 200, 215, 233, 99, 116, 36, 197, 160, 70, 79, 153}, }, { Value: NewCurrency64(1), UnlockHash: UnlockHash{123, 106, 229, 101, 220, 252, 50, 203, 38, 183, 133, 152, 250, 167, 210, 155, 252, 102, 150, 29, 187, 3, 178, 53, 11, 145, 143, 33, 166, 115, 250, 40}, }, { Value: NewCurrency64(5), UnlockHash: UnlockHash{124, 101, 207, 175, 50, 119, 207, 26, 62, 15, 247, 141, 150, 174, 73, 247, 238, 28, 77, 255, 222, 104, 166, 244, 112, 86, 227, 80, 215, 45, 69, 143}, }, { Value: NewCurrency64(10), UnlockHash: UnlockHash{130, 184, 72, 15, 227, 79, 217, 205, 120, 254, 67, 69, 10, 49, 76, 194, 222, 30, 242, 62, 88, 179, 51, 117, 27, 166, 140, 6, 7, 22, 222, 185}, }, { Value: NewCurrency64(25), UnlockHash: UnlockHash{134, 137, 198, 172, 96, 54, 45, 10, 100, 128, 91, 225, 226, 134, 143, 108, 31, 70, 187, 228, 54, 212, 70, 229, 149, 57, 64, 166, 153, 123, 238, 180}, }, { Value: NewCurrency64(1), UnlockHash: UnlockHash{143, 253, 118, 229, 109, 181, 141, 224, 91, 144, 123, 160, 203, 221, 119, 104, 172, 13, 105, 77, 171, 185, 122, 54, 229, 168, 6, 130, 160, 130, 182, 151}, }, { Value: NewCurrency64(8), UnlockHash: UnlockHash{147, 108, 249, 16, 36, 249, 108, 184, 196, 212, 241, 120, 219, 63, 45, 184, 86, 53, 96, 207, 130, 96, 210, 251, 136, 9, 193, 160, 131, 198, 221, 185}, }, { Value: NewCurrency64(58), UnlockHash: UnlockHash{155, 79, 89, 28, 69, 71, 239, 198, 246, 2, 198, 254, 92, 59, 192, 205, 229, 152, 36, 186, 110, 122, 233, 221, 76, 143, 3, 238, 89, 231, 192, 23}, }, { Value: NewCurrency64(2), UnlockHash: UnlockHash{156, 32, 76, 105, 213, 46, 66, 50, 27, 85, 56, 9, 106, 193, 80, 145, 19, 101, 84, 177, 145, 4, 125, 28, 79, 252, 43, 83, 118, 110, 206, 247}, }, { Value: NewCurrency64(23), UnlockHash: UnlockHash{157, 169, 134, 24, 254, 22, 58, 188, 119, 87, 201, 238, 55, 168, 194, 131, 88, 18, 39, 168, 37, 2, 198, 194, 93, 202, 116, 146, 189, 17, 108, 44}, }, { Value: NewCurrency64(10), UnlockHash: UnlockHash{158, 51, 104, 36, 242, 114, 67, 16, 168, 230, 4, 111, 241, 72, 5, 14, 182, 102, 169, 156, 144, 220, 103, 117, 223, 8, 58, 187, 124, 102, 80, 44}, }, { Value: NewCurrency64(1), UnlockHash: UnlockHash{160, 175, 59, 33, 223, 30, 82, 60, 34, 110, 28, 203, 249, 93, 3, 16, 218, 12, 250, 206, 138, 231, 85, 67, 69, 191, 68, 198, 160, 87, 154, 68}, }, { Value: NewCurrency64(75), UnlockHash: UnlockHash{163, 94, 51, 220, 14, 144, 83, 112, 62, 10, 0, 173, 161, 234, 211, 176, 186, 84, 9, 189, 250, 111, 33, 231, 114, 87, 100, 75, 72, 217, 11, 26}, }, { Value: NewCurrency64(3), UnlockHash: UnlockHash{170, 7, 138, 116, 205, 20, 132, 197, 166, 251, 75, 93, 69, 6, 109, 244, 212, 119, 173, 114, 34, 18, 25, 21, 111, 203, 203, 253, 138, 104, 27, 36}, }, { Value: NewCurrency64(90), UnlockHash: UnlockHash{173, 120, 128, 104, 186, 86, 151, 140, 191, 23, 231, 193, 77, 245, 243, 104, 196, 55, 155, 243, 111, 15, 84, 139, 148, 187, 173, 47, 104, 69, 141, 39}, }, { Value: NewCurrency64(20), UnlockHash: UnlockHash{179, 185, 228, 166, 139, 94, 13, 193, 255, 227, 174, 99, 120, 105, 109, 221, 247, 4, 155, 243, 229, 37, 26, 98, 222, 12, 91, 80, 223, 33, 61, 56}, }, { Value: NewCurrency64(5), UnlockHash: UnlockHash{193, 49, 103, 20, 170, 135, 182, 85, 149, 18, 159, 194, 152, 120, 162, 208, 49, 158, 220, 188, 114, 79, 1, 131, 62, 27, 86, 57, 244, 46, 64, 66}, }, { Value: NewCurrency64(1), UnlockHash: UnlockHash{196, 71, 45, 222, 0, 21, 12, 121, 197, 224, 101, 65, 40, 57, 19, 119, 112, 205, 166, 23, 2, 91, 75, 231, 69, 143, 221, 68, 245, 75, 7, 52}, }, { Value: NewCurrency64(44), UnlockHash: UnlockHash{196, 214, 236, 211, 227, 216, 152, 127, 164, 2, 235, 14, 235, 46, 142, 231, 83, 38, 7, 131, 208, 29, 179, 189, 62, 88, 129, 180, 119, 158, 214, 97}, }, { Value: NewCurrency64(23), UnlockHash: UnlockHash{206, 58, 114, 148, 131, 49, 87, 197, 86, 18, 216, 26, 62, 79, 152, 175, 33, 4, 132, 160, 108, 231, 53, 200, 48, 76, 125, 94, 156, 85, 32, 130}, }, { Value: NewCurrency64(80), UnlockHash: UnlockHash{200, 103, 135, 126, 197, 2, 203, 63, 241, 6, 245, 195, 220, 102, 27, 74, 232, 249, 201, 86, 207, 34, 51, 26, 180, 151, 136, 108, 112, 56, 132, 72}, }, { Value: NewCurrency64(2), UnlockHash: UnlockHash{200, 249, 245, 218, 58, 253, 76, 250, 88, 114, 70, 239, 14, 2, 250, 123, 10, 192, 198, 61, 187, 155, 247, 152, 165, 174, 198, 24, 142, 39, 177, 119}, }, { Value: NewCurrency64(1), UnlockHash: UnlockHash{209, 1, 199, 184, 186, 57, 21, 137, 33, 252, 219, 184, 130, 38, 32, 98, 63, 252, 250, 79, 70, 146, 169, 78, 180, 161, 29, 93, 38, 45, 175, 176}, }, { Value: NewCurrency64(2), UnlockHash: UnlockHash{212, 107, 233, 43, 185, 138, 79, 253, 12, 237, 214, 17, 219, 198, 151, 92, 81, 129, 17, 120, 139, 58, 66, 119, 126, 220, 132, 136, 3, 108, 57, 58}, }, { Value: NewCurrency64(3), UnlockHash: UnlockHash{214, 244, 146, 173, 173, 80, 33, 185, 29, 133, 77, 167, 185, 1, 38, 23, 111, 179, 104, 150, 105, 162, 120, 26, 245, 63, 114, 119, 52, 1, 44, 222}, }, { Value: NewCurrency64(1), UnlockHash: UnlockHash{217, 218, 172, 16, 53, 134, 160, 226, 44, 138, 93, 53, 181, 62, 4, 209, 190, 27, 0, 93, 105, 17, 169, 61, 98, 145, 131, 112, 121, 55, 97, 184}, }, { Value: NewCurrency64(1), UnlockHash: UnlockHash{223, 162, 172, 55, 54, 193, 37, 142, 200, 213, 230, 48, 186, 145, 184, 206, 15, 225, 167, 19, 37, 70, 38, 48, 135, 87, 205, 81, 187, 237, 181, 180}, }, { Value: NewCurrency64(1), UnlockHash: UnlockHash{241, 46, 139, 41, 40, 63, 47, 169, 131, 173, 124, 246, 228, 213, 102, 44, 100, 217, 62, 237, 133, 154, 248, 69, 228, 2, 36, 206, 47, 250, 249, 170}, }, { Value: NewCurrency64(50), UnlockHash: UnlockHash{241, 50, 229, 211, 66, 32, 115, 241, 117, 87, 180, 239, 76, 246, 14, 129, 105, 181, 153, 105, 105, 203, 229, 237, 23, 130, 193, 170, 100, 201, 38, 71}, }, { Value: NewCurrency64(8841), UnlockHash: UnlockHash{125, 12, 68, 247, 102, 78, 45, 52, 229, 62, 253, 224, 102, 26, 111, 98, 142, 201, 38, 71, 133, 174, 142, 60, 215, 201, 115, 232, 209, 144, 195, 201}, }, } } // Create the genesis block. GenesisBlock = Block{ Timestamp: GenesisTimestamp, Transactions: []Transaction{ {SiafundOutputs: GenesisSiafundAllocation}, }, } // Calculate the genesis ID. GenesisID = GenesisBlock.ID() } Sia-1.3.0/types/constants_test.go000066400000000000000000000011311313565667000170050ustar00rootroot00000000000000package types import ( "testing" "github.com/NebulousLabs/Sia/build" ) // TestCheckBuildConstants checks that the required build constants have been // set. func TestCheckBuildConstants(t *testing.T) { // Verify that the build has been set to 'testing'. if build.Release != "testing" { t.Error("build.Release needs to be set to \"testing\"") t.Error(build.Release) } if testing.Short() { t.SkipNow() } // Verify that, for the longer tests, the 'debug' build tag has been used. if !build.DEBUG { t.Error("DEBUG needs to be enabled for testing to work.") t.Error(build.DEBUG) } } Sia-1.3.0/types/currency.go000066400000000000000000000127171313565667000156000ustar00rootroot00000000000000package types // currency.go defines the internal currency object. One design goal of the // currency type is immutability: the currency type should be safe to pass // directly to other objects and packages. The currency object should never // have a negative value. The currency should never overflow. There is a // maximum size value that can be encoded (around 10^10^20), however exceeding // this value will not result in overflow. import ( "errors" "math" "math/big" "github.com/NebulousLabs/Sia/build" ) type ( // A Currency represents a number of siacoins or siafunds. Internally, a // Currency value is unbounded; however, Currency values sent over the wire // protocol are subject to a maximum size of 255 bytes (approximately 10^614). // Unlike the math/big library, whose methods modify their receiver, all // arithmetic Currency methods return a new value. Currency cannot be negative. Currency struct { i big.Int } ) var ( // ZeroCurrency defines a currency of value zero. ZeroCurrency = NewCurrency64(0) // ErrNegativeCurrency is the error that is returned if performing an // operation results in a negative currency. ErrNegativeCurrency = errors.New("negative currency not allowed") // ErrUint64Overflow is the error that is returned if converting to a // unit64 would cause an overflow. ErrUint64Overflow = errors.New("cannot return the uint64 of this currency - result is an overflow") ) // NewCurrency creates a Currency value from a big.Int. Undefined behavior // occurs if a negative input is used. func NewCurrency(b *big.Int) (c Currency) { if b.Sign() < 0 { build.Critical(ErrNegativeCurrency) } else { c.i = *b } return } // NewCurrency64 creates a Currency value from a uint64. func NewCurrency64(x uint64) (c Currency) { c.i.SetUint64(x) return } // Add returns a new Currency value c = x + y func (x Currency) Add(y Currency) (c Currency) { c.i.Add(&x.i, &y.i) return } // Big returns the value of c as a *big.Int. Importantly, it does not provide // access to the c's internal big.Int object, only a copy. func (c Currency) Big() *big.Int { return new(big.Int).Set(&c.i) } // Cmp compares two Currency values. The return value follows the convention // of math/big. func (x Currency) Cmp(y Currency) int { return x.i.Cmp(&y.i) } // Cmp64 compares x to a uint64. The return value follows the convention of // math/big. func (x Currency) Cmp64(y uint64) int { return x.Cmp(NewCurrency64(y)) } // Div returns a new Currency value c = x / y. func (x Currency) Div(y Currency) (c Currency) { c.i.Div(&x.i, &y.i) return } // Div64 returns a new Currency value c = x / y. func (x Currency) Div64(y uint64) (c Currency) { c.i.Div(&x.i, new(big.Int).SetUint64(y)) return } // Equals returns true if x and y have the same value. func (x Currency) Equals(y Currency) bool { return x.Cmp(y) == 0 } // Equals64 returns true if x and y have the same value. func (x Currency) Equals64(y uint64) bool { return x.Cmp64(y) == 0 } // Mul returns a new Currency value c = x * y. func (x Currency) Mul(y Currency) (c Currency) { c.i.Mul(&x.i, &y.i) return } // Mul64 returns a new Currency value c = x * y. func (x Currency) Mul64(y uint64) (c Currency) { c.i.Mul(&x.i, new(big.Int).SetUint64(y)) return } // COMPATv0.4.0 - until the first 10e3 blocks have been archived, MulFloat is // needed while verifying the first set of blocks. // // MulFloat returns a new Currency value y = c * x, where x is a float64. // Behavior is undefined when x is negative. func (x Currency) MulFloat(y float64) (c Currency) { if y < 0 { build.Critical(ErrNegativeCurrency) } else { cRat := new(big.Rat).Mul( new(big.Rat).SetInt(&x.i), new(big.Rat).SetFloat64(y), ) c.i.Div(cRat.Num(), cRat.Denom()) } return } // MulRat returns a new Currency value c = x * y, where y is a big.Rat. func (x Currency) MulRat(y *big.Rat) (c Currency) { if y.Sign() < 0 { build.Critical(ErrNegativeCurrency) } else { c.i.Mul(&x.i, y.Num()) c.i.Div(&c.i, y.Denom()) } return } // MulTax returns a new Currency value c = x * 0.039, where 0.039 is a big.Rat. func (x Currency) MulTax() (c Currency) { c.i.Mul(&x.i, big.NewInt(39)) c.i.Div(&c.i, big.NewInt(1000)) return c } // RoundDown returns the largest multiple of y <= x. func (x Currency) RoundDown(y Currency) (c Currency) { diff := new(big.Int).Mod(&x.i, &y.i) c.i.Sub(&x.i, diff) return } // IsZero returns true if the value is 0, false otherwise. func (c Currency) IsZero() bool { return c.i.Sign() <= 0 } // Sqrt returns a new Currency value y = sqrt(c). Result is rounded down to the // nearest integer. func (x Currency) Sqrt() (c Currency) { f, _ := new(big.Rat).SetInt(&x.i).Float64() sqrt := new(big.Rat).SetFloat64(math.Sqrt(f)) c.i.Div(sqrt.Num(), sqrt.Denom()) return } // Sub returns a new Currency value c = x - y. Behavior is undefined when // x < y. func (x Currency) Sub(y Currency) (c Currency) { if x.Cmp(y) < 0 { c = x build.Critical(ErrNegativeCurrency) } else { c.i.Sub(&x.i, &y.i) } return } // Uint64 converts a Currency to a uint64. An error is returned because this // function is sometimes called on values that can be determined by users - // rather than have all user-facing points do input checking, the input // checking should happen at the base type. This minimizes the chances of a // rogue user causing a build.Critical to be triggered. func (c Currency) Uint64() (u uint64, err error) { if c.Cmp(NewCurrency64(math.MaxUint64)) > 0 { return 0, ErrUint64Overflow } return c.Big().Uint64(), nil } Sia-1.3.0/types/currency_test.go000066400000000000000000000213531313565667000166330ustar00rootroot00000000000000package types import ( "math" "math/big" "testing" ) // TestNewCurrency initializes a standard new currency. func TestNewCurrency(t *testing.T) { b := big.NewInt(481) c := NewCurrency(b) if b.String() != c.String() { t.Error("NewCurrency does't seem to work properly") } } // TestCurrencyAdd probes the addition function of the currency type. func TestCurrencyAdd(t *testing.T) { c7 := NewCurrency64(7) c12 := NewCurrency64(12) c19 := NewCurrency64(19) if c7.Add(c12).Cmp(c19) != 0 { t.Error("Add doesn't seem to work right") } } // TestCurrencyToBig tests the Big method for the currency type func TestCurrencyToBig(t *testing.T) { c := NewCurrency64(125) cb := c.Big() b := big.NewInt(125) if b.Cmp(cb) != 0 { t.Error("currency to big has failed") } } // TestCurrencyCmp tests the Cmp method for the currency type func TestCurrencyCmp(t *testing.T) { tests := []struct { x, y Currency exp int }{ {NewCurrency64(0), NewCurrency64(0), 0}, {NewCurrency64(0), NewCurrency64(1), -1}, {NewCurrency64(1), NewCurrency64(0), 1}, {NewCurrency64(100), NewCurrency64(7), 1}, {NewCurrency64(777), NewCurrency(big.NewInt(777)), 0}, {NewCurrency(big.NewInt(7)), NewCurrency(big.NewInt(8)), -1}, } for _, test := range tests { if c := test.x.Cmp(test.y); c != test.exp { t.Errorf("expected %v.Cmp(%v) == %v, got %v", test.x, test.y, test.exp, c) } else if bc := test.x.Big().Cmp(test.y.Big()); c != bc { t.Errorf("Currency.Cmp (%v) does not match big.Int.Cmp (%v) for %v.Cmp(%v)", c, bc, test.x, test.y) } } } // TestCurrencyCmp64 tests the Cmp64 method for the currency type func TestCurrencyCmp64(t *testing.T) { tests := []struct { x Currency y uint64 exp int }{ {NewCurrency64(0), 0, 0}, {NewCurrency64(0), 1, -1}, {NewCurrency64(1), 0, 1}, {NewCurrency64(100), 7, 1}, {NewCurrency64(777), 777, 0}, {NewCurrency(big.NewInt(7)), 8, -1}, } for _, test := range tests { if c := test.x.Cmp64(test.y); c != test.exp { t.Errorf("expected %v.Cmp64(%v) == %v, got %v", test.x, test.y, test.exp, c) } else if bc := test.x.Big().Cmp(big.NewInt(int64(test.y))); c != bc { t.Errorf("Currency.Cmp64 (%v) does not match big.Int.Cmp (%v) for %v.Cmp64(%v)", c, bc, test.x, test.y) } } } // TestCurrencyDiv checks that the div function has been correctly implemented. func TestCurrencyDiv(t *testing.T) { c9 := NewCurrency64(9) c10 := NewCurrency64(10) c90 := NewCurrency64(90) c97 := NewCurrency64(97) c90D10 := c90.Div(c10) if c90D10.Cmp(c9) != 0 { t.Error("Dividing 90 by 10 should produce 9") } c97D10 := c97.Div(c10) if c97D10.Cmp(c9) != 0 { t.Error("Dividing 97 by 10 should produce 9") } } // TestCurrencyDiv64 checks that the Div64 function has been correctly implemented. func TestCurrencyDiv64(t *testing.T) { c9 := NewCurrency64(9) u10 := uint64(10) c90 := NewCurrency64(90) c97 := NewCurrency64(97) c90D10 := c90.Div64(u10) if c90D10.Cmp(c9) != 0 { t.Error("Dividing 90 by 10 should produce 9") } c97D10 := c97.Div64(u10) if c97D10.Cmp(c9) != 0 { t.Error("Dividing 97 by 10 should produce 9") } } // TestCurrencyEquals tests the Equals method for the currency type func TestCurrencyEquals(t *testing.T) { tests := []struct { x, y Currency exp bool }{ {NewCurrency64(0), NewCurrency64(0), true}, {NewCurrency64(0), NewCurrency64(1), false}, {NewCurrency64(1), NewCurrency64(0), false}, {NewCurrency64(100), NewCurrency64(7), false}, {NewCurrency64(777), NewCurrency(big.NewInt(777)), true}, {NewCurrency(big.NewInt(7)), NewCurrency(big.NewInt(8)), false}, } for _, test := range tests { if eq := test.x.Equals(test.y); eq != test.exp { t.Errorf("expected %v.Equals(%v) == %v, got %v", test.x, test.y, test.exp, eq) } else if bc := test.x.Big().Cmp(test.y.Big()); (bc == 0) != eq { t.Errorf("Currency.Equals (%v) does not match big.Int.Cmp (%v) for %v.Equals(%v)", eq, bc, test.x, test.y) } } } // TestCurrencyEquals64 tests the Equals64 method for the currency type func TestCurrencyEquals64(t *testing.T) { tests := []struct { x Currency y uint64 exp bool }{ {NewCurrency64(0), 0, true}, {NewCurrency64(0), 1, false}, {NewCurrency64(1), 0, false}, {NewCurrency64(100), 7, false}, {NewCurrency64(777), 777, true}, {NewCurrency(big.NewInt(7)), 8, false}, } for _, test := range tests { if eq := test.x.Equals64(test.y); eq != test.exp { t.Errorf("expected %v.Equals64(%v) == %v, got %v", test.x, test.y, test.exp, eq) } else if bc := test.x.Big().Cmp(big.NewInt(int64(test.y))); (bc == 0) != eq { t.Errorf("Currency.Equals64 (%v) does not match big.Int.Cmp (%v) for %v.Equals64(%v)", eq, bc, test.x, test.y) } } } // TestCurrencyMul probes the Mul function of the currency type. func TestCurrencyMul(t *testing.T) { c5 := NewCurrency64(5) c6 := NewCurrency64(6) c30 := NewCurrency64(30) if c5.Mul(c6).Cmp(c30) != 0 { t.Error("Multiplying 5 by 6 should equal 30") } } // TestCurrencyMul64 probes the Mul64 function of the currency type. func TestCurrencyMul64(t *testing.T) { c5 := NewCurrency64(5) u6 := uint64(6) c30 := NewCurrency64(30) if c5.Mul64(u6).Cmp(c30) != 0 { t.Error("Multiplying 5 by 6 should equal 30") } } // TestCurrencyMulRat probes the MulRat function of the currency type. func TestCurrencyMulRat(t *testing.T) { c5 := NewCurrency64(5) c7 := NewCurrency64(7) c10 := NewCurrency64(10) if c5.MulRat(big.NewRat(2, 1)).Cmp(c10) != 0 { t.Error("Multiplying 5 by 2 should return 10") } if c5.MulRat(big.NewRat(3, 2)).Cmp(c7) != 0 { t.Error("Multiplying 5 by 1.5 should return 7") } } // TestCurrencyRoundDown probes the RoundDown function of the currency type. func TestCurrencyRoundDown(t *testing.T) { // 10,000 is chosen because that's how many siafunds there usually are. c40000 := NewCurrency64(40000) c45000 := NewCurrency64(45000) if c45000.RoundDown(NewCurrency64(10000)).Cmp(c40000) != 0 { t.Error("rounding down 45000 to the nearest 10000 didn't work") } } // TestCurrencyIsZero probes the IsZero function of the currency type. func TestCurrencyIsZero(t *testing.T) { c0 := NewCurrency64(0) c1 := NewCurrency64(1) if !c0.IsZero() { t.Error("IsZero returns wrong value for 0") } if c1.IsZero() { t.Error("IsZero returns wrong value for 1") } } // TestCurrencySqrt probes the Sqrt function of the currency type. func TestCurrencySqrt(t *testing.T) { c8 := NewCurrency64(8) c64 := NewCurrency64(64) c80 := NewCurrency64(80) sqrt64 := c64.Sqrt() sqrt80 := c80.Sqrt() if c8.Cmp(sqrt64) != 0 { t.Error("square root of 64 should be 8") } if c8.Cmp(sqrt80) != 0 { t.Error("square root of 80 should be 8") } } // TestCurrencySub probes the Sub function of the currency type. func TestCurrencySub(t *testing.T) { c3 := NewCurrency64(3) c13 := NewCurrency64(13) c16 := NewCurrency64(16) if c16.Sub(c3).Cmp(c13) != 0 { t.Error("16 minus 3 should equal 13") } } // TestNegativeCurrencyMulRat checks that negative numbers are rejected when // calling MulRat on the currency type. func TestNegativeCurrencyMulRat(t *testing.T) { // In debug mode, attempting to get a negative currency results in a panic. defer func() { r := recover() if r == nil { t.Error("no panic occurred when trying to create a negative currency") } }() c := NewCurrency64(12) _ = c.MulRat(big.NewRat(-1, 1)) } // TestNegativeCurrencySub checks that negative numbers are prevented when // using subtraction on the currency type. func TestNegativeCurrencySub(t *testing.T) { // In debug mode, attempting to get a negative currency results in a panic. defer func() { r := recover() if r == nil { t.Error("no panic occurred when trying to create a negative currency") } }() c1 := NewCurrency64(1) c2 := NewCurrency64(2) _ = c1.Sub(c2) } // TestNegativeCurrencies tries an array of ways to produce a negative currency. func TestNegativeNewCurrency(t *testing.T) { // In debug mode, attempting to get a negative currency results in a panic. defer func() { r := recover() if r == nil { t.Error("no panic occurred when trying to create a negative currency") } }() // Try to create a new currency from a negative number. negBig := big.NewInt(-1) _ = NewCurrency(negBig) } // TestCurrencyUint64 tests that a currency is correctly converted to a uint64. func TestCurrencyUint64(t *testing.T) { // Try a set of valid values. values := []uint64{0, 1, 2, 3, 4, 25e3, math.MaxUint64 - 1e6, math.MaxUint64} for _, value := range values { c := NewCurrency64(value) result, err := c.Uint64() if err != nil { t.Error(err) } if value != result { t.Error("uint64 conversion failed") } } // Try an overflow. c := NewCurrency64(math.MaxUint64) c = c.Mul(NewCurrency64(2)) result, err := c.Uint64() if err != ErrUint64Overflow { t.Error(err) } if result != 0 { t.Error("result is not being zeroed in the event of an error") } } Sia-1.3.0/types/encoding.go000066400000000000000000000466701313565667000155410ustar00rootroot00000000000000package types import ( "bytes" "encoding/hex" "encoding/json" "fmt" "io" "math/big" "strings" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/encoding" ) // sanityCheckWriter checks that the bytes written to w exactly match the // bytes in buf. type sanityCheckWriter struct { w io.Writer buf *bytes.Buffer } func (s sanityCheckWriter) Write(p []byte) (int, error) { if !bytes.Equal(p, s.buf.Next(len(p))) { panic("encoding mismatch") } return s.w.Write(p) } // MarshalSia implements the encoding.SiaMarshaler interface. func (b Block) MarshalSia(w io.Writer) error { if build.DEBUG { // Sanity check: compare against the old encoding buf := new(bytes.Buffer) encoding.NewEncoder(buf).EncodeAll( b.ParentID, b.Nonce, b.Timestamp, b.MinerPayouts, b.Transactions, ) w = sanityCheckWriter{w, buf} } w.Write(b.ParentID[:]) w.Write(b.Nonce[:]) encoding.WriteUint64(w, uint64(b.Timestamp)) encoding.WriteInt(w, len(b.MinerPayouts)) for i := range b.MinerPayouts { b.MinerPayouts[i].MarshalSia(w) } encoding.WriteInt(w, len(b.Transactions)) for i := range b.Transactions { if err := b.Transactions[i].MarshalSia(w); err != nil { return err } } return nil } // UnmarshalSia implements the encoding.SiaUnmarshaler interface. func (b *Block) UnmarshalSia(r io.Reader) error { io.ReadFull(r, b.ParentID[:]) io.ReadFull(r, b.Nonce[:]) tsBytes := make([]byte, 8) io.ReadFull(r, tsBytes) b.Timestamp = Timestamp(encoding.DecUint64(tsBytes)) return encoding.NewDecoder(r).DecodeAll(&b.MinerPayouts, &b.Transactions) } // MarshalJSON marshales a block id as a hex string. func (bid BlockID) MarshalJSON() ([]byte, error) { return json.Marshal(bid.String()) } // String prints the block id in hex. func (bid BlockID) String() string { return fmt.Sprintf("%x", bid[:]) } // UnmarshalJSON decodes the json hex string of the block id. func (bid *BlockID) UnmarshalJSON(b []byte) error { return (*crypto.Hash)(bid).UnmarshalJSON(b) } // MarshalSia implements the encoding.SiaMarshaler interface. func (cf CoveredFields) MarshalSia(w io.Writer) error { if cf.WholeTransaction { w.Write([]byte{1}) } else { w.Write([]byte{0}) } fields := [][]uint64{ cf.SiacoinInputs, cf.SiacoinOutputs, cf.FileContracts, cf.FileContractRevisions, cf.StorageProofs, cf.SiafundInputs, cf.SiafundOutputs, cf.MinerFees, cf.ArbitraryData, cf.TransactionSignatures, } for _, f := range fields { encoding.WriteInt(w, len(f)) for _, u := range f { if err := encoding.WriteUint64(w, u); err != nil { return err } } } return nil } // MarshalSiaSize returns the encoded size of cf. func (cf CoveredFields) MarshalSiaSize() (size int) { size += 1 // WholeTransaction size += 8 + len(cf.SiacoinInputs)*8 size += 8 + len(cf.SiacoinOutputs)*8 size += 8 + len(cf.FileContracts)*8 size += 8 + len(cf.FileContractRevisions)*8 size += 8 + len(cf.StorageProofs)*8 size += 8 + len(cf.SiafundInputs)*8 size += 8 + len(cf.SiafundOutputs)*8 size += 8 + len(cf.MinerFees)*8 size += 8 + len(cf.ArbitraryData)*8 size += 8 + len(cf.TransactionSignatures)*8 return } // MarshalJSON implements the json.Marshaler interface. func (c Currency) MarshalJSON() ([]byte, error) { // Must enclosed the value in quotes; otherwise JS will convert it to a // double and lose precision. return []byte(`"` + c.String() + `"`), nil } // UnmarshalJSON implements the json.Unmarshaler interface. An error is // returned if a negative number is provided. func (c *Currency) UnmarshalJSON(b []byte) error { // UnmarshalJSON does not expect quotes b = bytes.Trim(b, `"`) err := c.i.UnmarshalJSON(b) if err != nil { return err } if c.i.Sign() < 0 { c.i = *big.NewInt(0) return ErrNegativeCurrency } return nil } // MarshalSia implements the encoding.SiaMarshaler interface. It writes the // byte-slice representation of the Currency's internal big.Int to w. Note // that as the bytes of the big.Int correspond to the absolute value of the // integer, there is no way to marshal a negative Currency. func (c Currency) MarshalSia(w io.Writer) error { return encoding.WritePrefix(w, c.i.Bytes()) } // MarshalSiaSize returns the encoded size of c. func (c Currency) MarshalSiaSize() int { // from math/big/arith.go const ( _m = ^big.Word(0) _logS = _m>>8&1 + _m>>16&1 + _m>>32&1 _S = 1 << _logS // number of bytes per big.Word ) // start with the number of Words * number of bytes per Word, then // subtract trailing bytes that are 0 bits := c.i.Bits() size := len(bits) * _S zeros: for i := len(bits) - 1; i >= 0; i-- { for j := _S - 1; j >= 0; j-- { if (bits[i] >> uintptr(j*8)) != 0 { break zeros } size-- } } return 8 + size // account for length prefix } // UnmarshalSia implements the encoding.SiaUnmarshaler interface. func (c *Currency) UnmarshalSia(r io.Reader) error { b, err := encoding.ReadPrefix(r, 256) if err != nil { return err } var dec Currency dec.i.SetBytes(b) *c = dec return nil } // HumanString prints the Currency using human readable units. The unit used // will be the largest unit that results in a value greater than 1. The value is // rounded to 4 significant digits. func (c Currency) HumanString() string { pico := SiacoinPrecision.Div64(1e12) if c.Cmp(pico) < 0 { return c.String() + " H" } // iterate until we find a unit greater than c mag := pico unit := "" for _, unit = range []string{"pS", "nS", "uS", "mS", "SC", "KS", "MS", "GS", "TS"} { if c.Cmp(mag.Mul64(1e3)) < 0 { break } else if unit != "TS" { // don't want to perform this multiply on the last iter; that // would give us 1.235 TS instead of 1235 TS mag = mag.Mul64(1e3) } } num := new(big.Rat).SetInt(c.Big()) denom := new(big.Rat).SetInt(mag.Big()) res, _ := new(big.Rat).Mul(num, denom.Inv(denom)).Float64() return fmt.Sprintf("%.4g %s", res, unit) } // String implements the fmt.Stringer interface. func (c Currency) String() string { return c.i.String() } // Scan implements the fmt.Scanner interface, allowing Currency values to be // scanned from text. func (c *Currency) Scan(s fmt.ScanState, ch rune) error { var dec Currency err := dec.i.Scan(s, ch) if err != nil { return err } if dec.i.Sign() < 0 { return ErrNegativeCurrency } *c = dec return nil } // MarshalSia implements the encoding.SiaMarshaler interface. func (fc FileContract) MarshalSia(w io.Writer) error { encoding.WriteUint64(w, fc.FileSize) w.Write(fc.FileMerkleRoot[:]) encoding.WriteUint64(w, uint64(fc.WindowStart)) encoding.WriteUint64(w, uint64(fc.WindowEnd)) fc.Payout.MarshalSia(w) encoding.WriteInt(w, len(fc.ValidProofOutputs)) for _, sco := range fc.ValidProofOutputs { sco.MarshalSia(w) } encoding.WriteInt(w, len(fc.MissedProofOutputs)) for _, sco := range fc.MissedProofOutputs { sco.MarshalSia(w) } w.Write(fc.UnlockHash[:]) return encoding.WriteUint64(w, fc.RevisionNumber) } // MarshalSiaSize returns the encoded size of fc. func (fc FileContract) MarshalSiaSize() (size int) { size += 8 // FileSize size += len(fc.FileMerkleRoot) size += 8 + 8 // WindowStart + WindowEnd size += fc.Payout.MarshalSiaSize() size += 8 for _, sco := range fc.ValidProofOutputs { size += sco.Value.MarshalSiaSize() size += len(sco.UnlockHash) } size += 8 for _, sco := range fc.MissedProofOutputs { size += sco.Value.MarshalSiaSize() size += len(sco.UnlockHash) } size += len(fc.UnlockHash) size += 8 // RevisionNumber return } // MarshalSia implements the encoding.SiaMarshaler interface. func (fcr FileContractRevision) MarshalSia(w io.Writer) error { w.Write(fcr.ParentID[:]) fcr.UnlockConditions.MarshalSia(w) encoding.WriteUint64(w, fcr.NewRevisionNumber) encoding.WriteUint64(w, fcr.NewFileSize) w.Write(fcr.NewFileMerkleRoot[:]) encoding.WriteUint64(w, uint64(fcr.NewWindowStart)) encoding.WriteUint64(w, uint64(fcr.NewWindowEnd)) encoding.WriteInt(w, len(fcr.NewValidProofOutputs)) for _, sco := range fcr.NewValidProofOutputs { sco.MarshalSia(w) } encoding.WriteInt(w, len(fcr.NewMissedProofOutputs)) for _, sco := range fcr.NewMissedProofOutputs { sco.MarshalSia(w) } _, err := w.Write(fcr.NewUnlockHash[:]) return err } // MarshalSiaSize returns the encoded size of fcr. func (fcr FileContractRevision) MarshalSiaSize() (size int) { size += len(fcr.ParentID) size += fcr.UnlockConditions.MarshalSiaSize() size += 8 // NewRevisionNumber size += 8 // NewFileSize size += len(fcr.NewFileMerkleRoot) size += 8 + 8 // NewWindowStart + NewWindowEnd size += 8 for _, sco := range fcr.NewValidProofOutputs { size += sco.Value.MarshalSiaSize() size += len(sco.UnlockHash) } size += 8 for _, sco := range fcr.NewMissedProofOutputs { size += sco.Value.MarshalSiaSize() size += len(sco.UnlockHash) } size += len(fcr.NewUnlockHash) return } // MarshalJSON marshals an id as a hex string. func (fcid FileContractID) MarshalJSON() ([]byte, error) { return json.Marshal(fcid.String()) } // String prints the id in hex. func (fcid FileContractID) String() string { return fmt.Sprintf("%x", fcid[:]) } // UnmarshalJSON decodes the json hex string of the id. func (fcid *FileContractID) UnmarshalJSON(b []byte) error { return (*crypto.Hash)(fcid).UnmarshalJSON(b) } // MarshalJSON marshals an id as a hex string. func (oid OutputID) MarshalJSON() ([]byte, error) { return json.Marshal(oid.String()) } // String prints the id in hex. func (oid OutputID) String() string { return fmt.Sprintf("%x", oid[:]) } // UnmarshalJSON decodes the json hex string of the id. func (oid *OutputID) UnmarshalJSON(b []byte) error { return (*crypto.Hash)(oid).UnmarshalJSON(b) } // MarshalSia implements the encoding.SiaMarshaler interface. func (sci SiacoinInput) MarshalSia(w io.Writer) error { w.Write(sci.ParentID[:]) return sci.UnlockConditions.MarshalSia(w) } // MarshalSia implements the encoding.SiaMarshaler interface. func (sco SiacoinOutput) MarshalSia(w io.Writer) error { sco.Value.MarshalSia(w) _, err := w.Write(sco.UnlockHash[:]) return err } // MarshalJSON marshals an id as a hex string. func (scoid SiacoinOutputID) MarshalJSON() ([]byte, error) { return json.Marshal(scoid.String()) } // String prints the id in hex. func (scoid SiacoinOutputID) String() string { return fmt.Sprintf("%x", scoid[:]) } // UnmarshalJSON decodes the json hex string of the id. func (scoid *SiacoinOutputID) UnmarshalJSON(b []byte) error { return (*crypto.Hash)(scoid).UnmarshalJSON(b) } // MarshalSia implements the encoding.SiaMarshaler interface. func (sfi SiafundInput) MarshalSia(w io.Writer) error { w.Write(sfi.ParentID[:]) sfi.UnlockConditions.MarshalSia(w) _, err := w.Write(sfi.ClaimUnlockHash[:]) return err } // MarshalSia implements the encoding.SiaMarshaler interface. func (sfo SiafundOutput) MarshalSia(w io.Writer) error { sfo.Value.MarshalSia(w) w.Write(sfo.UnlockHash[:]) return sfo.ClaimStart.MarshalSia(w) } // MarshalJSON marshals an id as a hex string. func (sfoid SiafundOutputID) MarshalJSON() ([]byte, error) { return json.Marshal(sfoid.String()) } // String prints the id in hex. func (sfoid SiafundOutputID) String() string { return fmt.Sprintf("%x", sfoid[:]) } // UnmarshalJSON decodes the json hex string of the id. func (sfoid *SiafundOutputID) UnmarshalJSON(b []byte) error { return (*crypto.Hash)(sfoid).UnmarshalJSON(b) } // MarshalSia implements the encoding.SiaMarshaler interface. func (spk SiaPublicKey) MarshalSia(w io.Writer) error { w.Write(spk.Algorithm[:]) return encoding.WritePrefix(w, spk.Key) } // LoadString is the inverse of SiaPublicKey.String(). func (spk *SiaPublicKey) LoadString(s string) { parts := strings.Split(s, ":") if len(parts) != 2 { return } var err error spk.Key, err = hex.DecodeString(parts[1]) if err != nil { spk.Key = nil return } copy(spk.Algorithm[:], []byte(parts[0])) } // String defines how to print a SiaPublicKey - hex is used to keep things // compact during logging. The key type prefix and lack of a checksum help to // separate it from a sia address. func (spk *SiaPublicKey) String() string { return spk.Algorithm.String() + ":" + fmt.Sprintf("%x", spk.Key) } // MarshalJSON marshals a specifier as a string. func (s Specifier) MarshalJSON() ([]byte, error) { return json.Marshal(s.String()) } // String returns the specifier as a string, trimming any trailing zeros. func (s Specifier) String() string { var i int for i = range s { if s[i] == 0 { break } } return string(s[:i]) } // UnmarshalJSON decodes the json string of the specifier. func (s *Specifier) UnmarshalJSON(b []byte) error { var str string if err := json.Unmarshal(b, &str); err != nil { return err } copy(s[:], str) return nil } // MarshalSia implements the encoding.SiaMarshaler interface. func (sp *StorageProof) MarshalSia(w io.Writer) error { w.Write(sp.ParentID[:]) w.Write(sp.Segment[:]) encoding.WriteInt(w, len(sp.HashSet)) for i := range sp.HashSet { if _, err := w.Write(sp.HashSet[i][:]); err != nil { return err } } return nil } // MarshalSia implements the encoding.SiaMarshaler interface. func (t Transaction) MarshalSia(w io.Writer) error { if build.DEBUG { // Sanity check: compare against the old encoding buf := new(bytes.Buffer) encoding.NewEncoder(buf).EncodeAll( t.SiacoinInputs, t.SiacoinOutputs, t.FileContracts, t.FileContractRevisions, t.StorageProofs, t.SiafundInputs, t.SiafundOutputs, t.MinerFees, t.ArbitraryData, t.TransactionSignatures, ) w = sanityCheckWriter{w, buf} } t.marshalSiaNoSignatures(w) encoding.WriteInt(w, len((t.TransactionSignatures))) for i := range t.TransactionSignatures { err := t.TransactionSignatures[i].MarshalSia(w) if err != nil { return err } } return nil } // marshalSiaNoSignatures is a helper function for calculating certain hashes // that do not include the transaction's signatures. func (t Transaction) marshalSiaNoSignatures(w io.Writer) { encoding.WriteInt(w, len((t.SiacoinInputs))) for i := range t.SiacoinInputs { t.SiacoinInputs[i].MarshalSia(w) } encoding.WriteInt(w, len((t.SiacoinOutputs))) for i := range t.SiacoinOutputs { t.SiacoinOutputs[i].MarshalSia(w) } encoding.WriteInt(w, len((t.FileContracts))) for i := range t.FileContracts { t.FileContracts[i].MarshalSia(w) } encoding.WriteInt(w, len((t.FileContractRevisions))) for i := range t.FileContractRevisions { t.FileContractRevisions[i].MarshalSia(w) } encoding.WriteInt(w, len((t.StorageProofs))) for i := range t.StorageProofs { t.StorageProofs[i].MarshalSia(w) } encoding.WriteInt(w, len((t.SiafundInputs))) for i := range t.SiafundInputs { t.SiafundInputs[i].MarshalSia(w) } encoding.WriteInt(w, len((t.SiafundOutputs))) for i := range t.SiafundOutputs { t.SiafundOutputs[i].MarshalSia(w) } encoding.WriteInt(w, len((t.MinerFees))) for i := range t.MinerFees { t.MinerFees[i].MarshalSia(w) } encoding.WriteInt(w, len((t.ArbitraryData))) for i := range t.ArbitraryData { encoding.WritePrefix(w, t.ArbitraryData[i]) } } // MarshalSiaSize returns the encoded size of t. func (t Transaction) MarshalSiaSize() (size int) { size += 8 for _, sci := range t.SiacoinInputs { size += len(sci.ParentID) size += sci.UnlockConditions.MarshalSiaSize() } size += 8 for _, sco := range t.SiacoinOutputs { size += sco.Value.MarshalSiaSize() size += len(sco.UnlockHash) } size += 8 for i := range t.FileContracts { size += t.FileContracts[i].MarshalSiaSize() } size += 8 for i := range t.FileContractRevisions { size += t.FileContractRevisions[i].MarshalSiaSize() } size += 8 for _, sp := range t.StorageProofs { size += len(sp.ParentID) size += len(sp.Segment) size += 8 + len(sp.HashSet)*crypto.HashSize } size += 8 for _, sfi := range t.SiafundInputs { size += len(sfi.ParentID) size += len(sfi.ClaimUnlockHash) size += sfi.UnlockConditions.MarshalSiaSize() } size += 8 for _, sfo := range t.SiafundOutputs { size += sfo.Value.MarshalSiaSize() size += len(sfo.UnlockHash) size += sfo.ClaimStart.MarshalSiaSize() } size += 8 for i := range t.MinerFees { size += t.MinerFees[i].MarshalSiaSize() } size += 8 for i := range t.ArbitraryData { size += 8 + len(t.ArbitraryData[i]) } size += 8 for _, ts := range t.TransactionSignatures { size += len(ts.ParentID) size += 8 // ts.PublicKeyIndex size += 8 // ts.Timelock size += ts.CoveredFields.MarshalSiaSize() size += 8 + len(ts.Signature) } // Sanity check against the slower method. if build.DEBUG { expectedSize := len(encoding.Marshal(t)) if expectedSize != size { panic("Transaction size different from expected size.") } } return } // MarshalJSON marshals an id as a hex string. func (tid TransactionID) MarshalJSON() ([]byte, error) { return json.Marshal(tid.String()) } // String prints the id in hex. func (tid TransactionID) String() string { return fmt.Sprintf("%x", tid[:]) } // UnmarshalJSON decodes the json hex string of the id. func (tid *TransactionID) UnmarshalJSON(b []byte) error { return (*crypto.Hash)(tid).UnmarshalJSON(b) } // MarshalSia implements the encoding.SiaMarshaler interface. func (ts TransactionSignature) MarshalSia(w io.Writer) error { w.Write(ts.ParentID[:]) encoding.WriteUint64(w, ts.PublicKeyIndex) encoding.WriteUint64(w, uint64(ts.Timelock)) ts.CoveredFields.MarshalSia(w) return encoding.WritePrefix(w, ts.Signature) } // MarshalSia implements the encoding.SiaMarshaler interface. func (uc UnlockConditions) MarshalSia(w io.Writer) error { encoding.WriteUint64(w, uint64(uc.Timelock)) encoding.WriteInt(w, len(uc.PublicKeys)) for _, spk := range uc.PublicKeys { spk.MarshalSia(w) } return encoding.WriteUint64(w, uc.SignaturesRequired) } // MarshalSiaSize returns the encoded size of uc. func (uc UnlockConditions) MarshalSiaSize() (size int) { size += 8 // Timelock size += 8 // length prefix for PublicKeys for _, spk := range uc.PublicKeys { size += len(spk.Algorithm) size += 8 + len(spk.Key) } size += 8 // SignaturesRequired return } // MarshalJSON is implemented on the unlock hash to always produce a hex string // upon marshalling. func (uh UnlockHash) MarshalJSON() ([]byte, error) { return json.Marshal(uh.String()) } // UnmarshalJSON is implemented on the unlock hash to recover an unlock hash // that has been encoded to a hex string. func (uh *UnlockHash) UnmarshalJSON(b []byte) error { // Check the length of b. if len(b) != crypto.HashSize*2+UnlockHashChecksumSize*2+2 && len(b) != crypto.HashSize*2+2 { return ErrUnlockHashWrongLen } return uh.LoadString(string(b[1 : len(b)-1])) } // String returns the hex representation of the unlock hash as a string - this // includes a checksum. func (uh UnlockHash) String() string { uhChecksum := crypto.HashObject(uh) return fmt.Sprintf("%x%x", uh[:], uhChecksum[:UnlockHashChecksumSize]) } // LoadString loads a hex representation (including checksum) of an unlock hash // into an unlock hash object. An error is returned if the string is invalid or // fails the checksum. func (uh *UnlockHash) LoadString(strUH string) error { // Check the length of strUH. if len(strUH) != crypto.HashSize*2+UnlockHashChecksumSize*2 { return ErrUnlockHashWrongLen } // Decode the unlock hash. var byteUnlockHash []byte var checksum []byte _, err := fmt.Sscanf(strUH[:crypto.HashSize*2], "%x", &byteUnlockHash) if err != nil { return err } // Decode and verify the checksum. _, err = fmt.Sscanf(strUH[crypto.HashSize*2:], "%x", &checksum) if err != nil { return err } expectedChecksum := crypto.HashBytes(byteUnlockHash) if !bytes.Equal(expectedChecksum[:UnlockHashChecksumSize], checksum) { return ErrInvalidUnlockHashChecksum } copy(uh[:], byteUnlockHash[:]) return nil } Sia-1.3.0/types/encoding_test.go000066400000000000000000000415071313565667000165720ustar00rootroot00000000000000package types import ( "bytes" "encoding/json" "fmt" "math/big" "strings" "testing" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/fastrand" ) func hashStr(v interface{}) string { h := crypto.HashObject(v) return fmt.Sprintf("%x", h[:]) } // TestBlockEncodes probes the MarshalSia and UnmarshalSia methods of the // Block type. func TestBlockEncoding(t *testing.T) { b := Block{ MinerPayouts: []SiacoinOutput{ {Value: CalculateCoinbase(0)}, {Value: CalculateCoinbase(0)}, }, } var decB Block err := encoding.Unmarshal(encoding.Marshal(b), &decB) if err != nil { t.Fatal(err) } if len(decB.MinerPayouts) != len(b.MinerPayouts) || decB.MinerPayouts[0].Value.Cmp(b.MinerPayouts[0].Value) != 0 || decB.MinerPayouts[1].Value.Cmp(b.MinerPayouts[1].Value) != 0 { t.Fatal("block changed after encode/decode:", b, decB) } } // TestCurrencyMarshalJSON probes the MarshalJSON and UnmarshalJSON functions // of the currency type. func TestCurrencyMarshalJSON(t *testing.T) { b30 := big.NewInt(30) c30 := NewCurrency64(30) bMar30, err := b30.MarshalJSON() if err != nil { t.Fatal(err) } cMar30, err := c30.MarshalJSON() if err != nil { t.Fatal(err) } if !bytes.Equal(bMar30, bytes.Trim(cMar30, `"`)) { t.Error("Currency does not match the marshalling of its math/big equivalent") } var cUmar30 Currency err = cUmar30.UnmarshalJSON(cMar30) if err != nil { t.Fatal(err) } if c30.Cmp(cUmar30) != 0 { t.Error("Incorrect unmarshalling of currency type.") } cMar30[0] = 0 err = cUmar30.UnmarshalJSON(cMar30) if err == nil { t.Error("JSON decoded nonsense input") } } // TestCurrencyMarshalSia probes the MarshalSia and UnmarshalSia functions of // the currency type. func TestCurrencyMarshalSia(t *testing.T) { c := NewCurrency64(1656) buf := new(bytes.Buffer) err := c.MarshalSia(buf) if err != nil { t.Fatal(err) } var cUmar Currency cUmar.UnmarshalSia(buf) if c.Cmp(cUmar) != 0 { t.Error("marshal and unmarshal mismatch for currency type") } } // TestCurrencyString probes the String function of the currency type. func TestCurrencyString(t *testing.T) { b := big.NewInt(7135) c := NewCurrency64(7135) if b.String() != c.String() { t.Error("string function not behaving as expected") } } // TestCurrencyScan probes the Scan function of the currency type. func TestCurrencyScan(t *testing.T) { var c0 Currency c1 := NewCurrency64(81293) _, err := fmt.Sscan("81293", &c0) if err != nil { t.Fatal(err) } if c0.Cmp(c1) != 0 { t.Error("scanned number does not equal expected value") } _, err = fmt.Sscan("z", &c0) if err == nil { t.Fatal("scan is accepting garbage input") } } // TestCurrencyEncoding checks that a currency can encode and decode without // error. func TestCurrencyEncoding(t *testing.T) { c := NewCurrency64(351) cMar := encoding.Marshal(c) var cUmar Currency err := encoding.Unmarshal(cMar, &cUmar) if err != nil { t.Error("Error unmarshalling a currency:", err) } if cUmar.Cmp(c) != 0 { t.Error("Marshalling and Unmarshalling a currency did not work correctly") } } // TestNegativeCurrencyUnmarshalJSON tries to unmarshal a negative number from // JSON. func TestNegativeCurrencyUnmarshalJSON(t *testing.T) { // Marshal a 2 digit number. c := NewCurrency64(35) cMar, err := c.MarshalJSON() if err != nil { t.Fatal(err) } // Change the first digit to a negative character. cMar[0] = 45 // Try unmarshalling the negative currency. var cNeg Currency err = cNeg.UnmarshalJSON(cMar) if err != ErrNegativeCurrency { t.Error("expecting ErrNegativeCurrency:", err) } if cNeg.i.Sign() < 0 { t.Error("negative currency returned") } } // TestNegativeCurrencyScan tries to scan in a negative number and checks for // an error. func TestNegativeCurrencyScan(t *testing.T) { var c Currency _, err := fmt.Sscan("-23", &c) if err != ErrNegativeCurrency { t.Error("expecting ErrNegativeCurrency:", err) } } // TestCurrencyUnsafeDecode tests that decoding into an existing Currency // value does not overwrite its contents. func TestCurrencyUnsafeDecode(t *testing.T) { // Scan backup := SiacoinPrecision.Mul64(1) c := SiacoinPrecision _, err := fmt.Sscan("7", &c) if err != nil { t.Error(err) } else if !SiacoinPrecision.Equals(backup) { t.Errorf("Scan changed value of SiacoinPrecision: %v -> %v", backup, SiacoinPrecision) } // UnmarshalSia c = SiacoinPrecision err = encoding.Unmarshal(encoding.Marshal(NewCurrency64(7)), &c) if err != nil { t.Error(err) } else if !SiacoinPrecision.Equals(backup) { t.Errorf("UnmarshalSia changed value of SiacoinPrecision: %v -> %v", backup, SiacoinPrecision) } } // TestTransactionEncoding tests that optimizations applied to the encoding of // the Transaction type do not change its encoding. func TestTransactionEncoding(t *testing.T) { var txn Transaction if h := hashStr(txn); h != "143aa0da2b6a4ca39eee3ee50a6536d75eedff3b5ef0229a6d603afa7854d5b8" { t.Error("encoding mismatch:", h) } txn = Transaction{ SiacoinInputs: []SiacoinInput{{}}, SiacoinOutputs: []SiacoinOutput{{}}, FileContracts: []FileContract{{}}, FileContractRevisions: []FileContractRevision{{}}, StorageProofs: []StorageProof{{}}, SiafundInputs: []SiafundInput{{}}, SiafundOutputs: []SiafundOutput{{}}, MinerFees: []Currency{{}}, ArbitraryData: [][]byte{{}}, TransactionSignatures: []TransactionSignature{{}}, } if h := hashStr(txn); h != "a6c0f41cb89aaede0682ab06c1e757e12d662a0156ec878f85b935bc219fb3ca" { t.Error("encoding mismatch:", h) } } // TestSiacoinInputEncoding tests that optimizations applied to the encoding // of the SiacoinInput type do not change its encoding. func TestSiacoinInputEncoding(t *testing.T) { var sci SiacoinInput if h := hashStr(sci); h != "2f806f905436dc7c5079ad8062467266e225d8110a3c58d17628d609cb1c99d0" { t.Error("encoding mismatch:", h) } sci = SiacoinInput{ ParentID: SiacoinOutputID{1, 2, 3}, UnlockConditions: UnlockConditions{}, } if h := hashStr(sci); h != "f172a8f5892bb2b63eff32de6fd83c132be5ad134d1227d8881632bd809ae075" { t.Error("encoding mismatch:", h) } } // TestSiacoinOutputEncoding tests that optimizations applied to the encoding // of the SiacoinOutput type do not change its encoding. func TestSiacoinOutputEncoding(t *testing.T) { var sco SiacoinOutput if h := hashStr(sco); h != "4a1931803561f431decab002e7425f0a8531d5e456a1a47fd9998a2530c0f800" { t.Error("encoding mismatch:", h) } sco = SiacoinOutput{ Value: NewCurrency64(0), UnlockHash: UnlockHash{1, 2, 3}, } if h := hashStr(sco); h != "32fb94ae64201f3e0a373947382367666bcf205d47a58ece9260c459986ae6fd" { t.Error("encoding mismatch:", h) } } // TestSiafundInputEncoding tests that optimizations applied to the encoding // of the SiafundInput type do not change its encoding. func TestSiafundInputEncoding(t *testing.T) { var sci SiafundInput if h := hashStr(sci); h != "978a948b1a92bcddcea382bafc7718a25f8cc49b8fb11db5d9159afa960cf70a" { t.Error("encoding mismatch:", h) } sci = SiafundInput{ ParentID: SiafundOutputID{1, 2, 3}, UnlockConditions: UnlockConditions{1, nil, 3}, ClaimUnlockHash: UnlockHash{1, 2, 3}, } if h := hashStr(sci); h != "1a6781ca002262e1def98e294f86dd81f866e2db9029954c64a36d20d0c6b46f" { t.Error("encoding mismatch:", h) } } // TestSiafundOutputEncoding tests that optimizations applied to the encoding // of the SiafundOutput type do not change its encoding. func TestSiafundOutputEncoding(t *testing.T) { var sco SiafundOutput if h := hashStr(sco); h != "df69a516de12056d0895fdea7a0274c5aba67091543238670513104c1af69c1f" { t.Error("encoding mismatch:", h) } sco = SiafundOutput{ Value: NewCurrency64(0), UnlockHash: UnlockHash{1, 2, 3}, ClaimStart: NewCurrency64(4), } if h := hashStr(sco); h != "9524d2250b21adc76967e9f86d26a68982727329e5c42a6bf5e62504891a5176" { t.Error("encoding mismatch:", h) } } // TestCoveredFieldsEncoding tests that optimizations applied to the encoding // of the CoveredFields type do not change its encoding. func TestCoveredFieldsEncoding(t *testing.T) { var cf CoveredFields if h := hashStr(cf); h != "aecfdceb8b630b5b00668d229221f876b3be1630703c4615a642db2c666b4fd7" { t.Error("encoding mismatch:", h) } cf = CoveredFields{ WholeTransaction: true, SiacoinInputs: []uint64{0}, SiacoinOutputs: []uint64{1}, FileContracts: []uint64{2}, FileContractRevisions: []uint64{3}, StorageProofs: []uint64{4}, SiafundInputs: []uint64{5}, SiafundOutputs: []uint64{6}, MinerFees: []uint64{7}, ArbitraryData: []uint64{8}, TransactionSignatures: []uint64{9, 10}, } if h := hashStr(cf); h != "5b10cd6b50b09447aae02829643e62b513ce99b969a80aeb620f74e77ca9bbba" { t.Error("encoding mismatch:", h) } } // TestSiaPublicKeyEncoding tests that optimizations applied to the encoding // of the SiaPublicKey type do not change its encoding. func TestSiaPublicKeyEncoding(t *testing.T) { var spk SiaPublicKey if h := hashStr(spk); h != "19ea4a516c66775ea1f648d71f6b8fa227e8b0c1a0c9203f82c33b89c4e759b5" { t.Error("encoding mismatch:", h) } spk = SiaPublicKey{ Algorithm: Specifier{1, 2, 3}, Key: []byte{4, 5, 6}, } if h := hashStr(spk); h != "9c781bbeebc23a1885d00e778c358f0a4bc81a82b48191449129752a380adc03" { t.Error("encoding mismatch:", h) } } // TestSiaPublicKeyLoadString checks that the LoadString method is the proper // inverse of the String() method, also checks that there are no stupid panics // or severe errors. func TestSiaPublicKeyLoadString(t *testing.T) { spk := SiaPublicKey{ Algorithm: SignatureEd25519, Key: fastrand.Bytes(32), } spkString := spk.String() var loadedSPK SiaPublicKey loadedSPK.LoadString(spkString) if !bytes.Equal(loadedSPK.Algorithm[:], spk.Algorithm[:]) { t.Error("SiaPublicKey is not loading correctly") } if !bytes.Equal(loadedSPK.Key, spk.Key) { t.Log(loadedSPK.Key, spk.Key) t.Error("SiaPublicKey is not loading correctly") } // Try loading crappy strings. parts := strings.Split(spkString, ":") spk.LoadString(parts[0]) spk.LoadString(parts[0][1:]) spk.LoadString(parts[0][:1]) spk.LoadString(parts[1]) spk.LoadString(parts[1][1:]) spk.LoadString(parts[1][:1]) spk.LoadString(parts[0] + parts[1]) } // TestSiaPublicKeyString does a quick check to verify that the String method // on the SiaPublicKey is producing the expected output. func TestSiaPublicKeyString(t *testing.T) { spk := SiaPublicKey{ Algorithm: SignatureEd25519, Key: make([]byte, 32), } if spk.String() != "ed25519:0000000000000000000000000000000000000000000000000000000000000000" { t.Error("got wrong value for spk.String():", spk.String()) } } // TestSpecifierMarshaling tests the marshaling methods of the specifier // type. func TestSpecifierMarshaling(t *testing.T) { s1 := SpecifierClaimOutput b, err := json.Marshal(s1) if err != nil { t.Fatal(err) } var s2 Specifier err = json.Unmarshal(b, &s2) if err != nil { t.Fatal(err) } else if s2 != s1 { t.Fatal("mismatch:", s1, s2) } // invalid json x := 3 b, _ = json.Marshal(x) err = json.Unmarshal(b, &s2) if err == nil { t.Fatal("Unmarshal should have failed") } } // TestTransactionSignatureEncoding tests that optimizations applied to the // encoding of the TransactionSignature type do not change its encoding. func TestTransactionSignatureEncoding(t *testing.T) { var ts TransactionSignature if h := hashStr(ts); h != "5801097b0ae98fe7cedd4569afc11c0a433f284681ad4d66dd7181293f6d2bba" { t.Error("encoding mismatch:", h) } ts = TransactionSignature{ ParentID: crypto.Hash{1, 2, 3}, PublicKeyIndex: 4, Timelock: 5, CoveredFields: CoveredFields{}, Signature: []byte{6, 7, 8}, } if h := hashStr(ts); h != "a3ce36fd8e1d6b7e5b030cdc2630d24a44472072bbd06e94d32d11132d817db0" { t.Error("encoding mismatch:", h) } } // TestUnlockConditionsEncoding tests that optimizations applied to the // encoding of the UnlockConditions type do not change its encoding. func TestUnlockConditionsEncoding(t *testing.T) { var uc UnlockConditions if h := hashStr(uc); h != "19ea4a516c66775ea1f648d71f6b8fa227e8b0c1a0c9203f82c33b89c4e759b5" { t.Error("encoding mismatch:", h) } uc = UnlockConditions{ Timelock: 1, PublicKeys: []SiaPublicKey{{}}, SignaturesRequired: 3, } if h := hashStr(uc); h != "164d3741bd274d5333ab1fe8ab641b9d25cb0e0bed8e1d7bc466b5fffc956d96" { t.Error("encoding mismatch:", h) } } // TestUnlockHashJSONMarshalling checks that when an unlock hash is marshalled // and unmarshalled using JSON, the result is what is expected. func TestUnlockHashJSONMarshalling(t *testing.T) { // Create an unlock hash. uc := UnlockConditions{ Timelock: 5, SignaturesRequired: 3, } uh := uc.UnlockHash() // Marshal the unlock hash. marUH, err := json.Marshal(uh) if err != nil { t.Fatal(err) } // Unmarshal the unlock hash and compare to the original. var umarUH UnlockHash err = json.Unmarshal(marUH, &umarUH) if err != nil { t.Fatal(err) } if umarUH != uh { t.Error("Marshalled and unmarshalled unlock hash are not equivalent") } // Corrupt the checksum. marUH[36]++ err = umarUH.UnmarshalJSON(marUH) if err != ErrInvalidUnlockHashChecksum { t.Error("expecting an invalid checksum:", err) } marUH[36]-- // Try an input that's not correct hex. marUH[7] += 100 err = umarUH.UnmarshalJSON(marUH) if err == nil { t.Error("Expecting error after corrupting input") } marUH[7] -= 100 // Try an input of the wrong length. err = (&umarUH).UnmarshalJSON(marUH[2:]) if err != ErrUnlockHashWrongLen { t.Error("Got wrong error:", err) } } // TestUnlockHashStringMarshalling checks that when an unlock hash is // marshalled and unmarshalled using String and LoadString, the result is what // is expected. func TestUnlockHashStringMarshalling(t *testing.T) { // Create an unlock hash. uc := UnlockConditions{ Timelock: 2, SignaturesRequired: 7, } uh := uc.UnlockHash() // Marshal the unlock hash. marUH := uh.String() // Unmarshal the unlock hash and compare to the original. var umarUH UnlockHash err := umarUH.LoadString(marUH) if err != nil { t.Fatal(err) } if umarUH != uh { t.Error("Marshalled and unmarshalled unlock hash are not equivalent") } // Corrupt the checksum. byteMarUH := []byte(marUH) byteMarUH[36]++ err = umarUH.LoadString(string(byteMarUH)) if err != ErrInvalidUnlockHashChecksum { t.Error("expecting an invalid checksum:", err) } byteMarUH[36]-- // Try an input that's not correct hex. byteMarUH[7] += 100 err = umarUH.LoadString(string(byteMarUH)) if err == nil { t.Error("Expecting error after corrupting input") } byteMarUH[7] -= 100 // Try an input of the wrong length. err = umarUH.LoadString(string(byteMarUH[2:])) if err != ErrUnlockHashWrongLen { t.Error("Got wrong error:", err) } } // TestCurrencyHumanString checks that the HumanString method of the currency // type is correctly formatting values. func TestCurrencyUnits(t *testing.T) { tests := []struct { in Currency out string }{ {NewCurrency64(1), "1 H"}, {NewCurrency64(1000), "1000 H"}, {NewCurrency64(100000000000), "100000000000 H"}, {NewCurrency64(1000000000000), "1 pS"}, {NewCurrency64(1234560000000), "1.235 pS"}, {NewCurrency64(12345600000000), "12.35 pS"}, {NewCurrency64(123456000000000), "123.5 pS"}, {NewCurrency64(1000000000000000), "1 nS"}, {NewCurrency64(1000000000000000000), "1 uS"}, {NewCurrency64(1000000000).Mul64(1000000000000), "1 mS"}, {NewCurrency64(1).Mul(SiacoinPrecision), "1 SC"}, {NewCurrency64(1000).Mul(SiacoinPrecision), "1 KS"}, {NewCurrency64(1000000).Mul(SiacoinPrecision), "1 MS"}, {NewCurrency64(1000000000).Mul(SiacoinPrecision), "1 GS"}, {NewCurrency64(1000000000000).Mul(SiacoinPrecision), "1 TS"}, {NewCurrency64(1234560000000).Mul(SiacoinPrecision), "1.235 TS"}, {NewCurrency64(1234560000000000).Mul(SiacoinPrecision), "1235 TS"}, } for _, test := range tests { if test.in.HumanString() != test.out { t.Errorf("currencyUnits(%v): expected %v, got %v", test.in, test.out, test.in.HumanString()) } } } // TestTransactionMarshalSiaSize tests that the txn.MarshalSiaSize method is // always consistent with len(encoding.Marshal(txn)). func TestTransactionMarshalSiaSize(t *testing.T) { txn := Transaction{ SiacoinInputs: []SiacoinInput{{}}, SiacoinOutputs: []SiacoinOutput{{}}, FileContracts: []FileContract{{}}, FileContractRevisions: []FileContractRevision{{}}, StorageProofs: []StorageProof{{}}, SiafundInputs: []SiafundInput{{}}, SiafundOutputs: []SiafundOutput{{}}, MinerFees: []Currency{{}}, ArbitraryData: [][]byte{{}}, TransactionSignatures: []TransactionSignature{{}}, } if txn.MarshalSiaSize() != len(encoding.Marshal(txn)) { t.Errorf("sizes do not match: expected %v, got %v", len(encoding.Marshal(txn)), txn.MarshalSiaSize()) } } Sia-1.3.0/types/filecontracts.go000066400000000000000000000130211313565667000165730ustar00rootroot00000000000000package types // filecontracts.go contains the basic structs and helper functions for file // contracts. import ( "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" ) var ( ProofValid ProofStatus = true ProofMissed ProofStatus = false ) type ( // A FileContract is a public record of a storage agreement between a "host" // and a "renter." It mandates that a host must submit a storage proof to the // network, proving that they still possess the file they have agreed to // store. // // The party must submit the storage proof in a block that is between // 'WindowStart' and 'WindowEnd'. Upon submitting the proof, the outputs // for 'ValidProofOutputs' are created. If the party does not submit a // storage proof by 'WindowEnd', then the outputs for 'MissedProofOutputs' // are created instead. The sum of 'MissedProofOutputs' must equal // 'Payout', and the sum of 'ValidProofOutputs' must equal 'Payout' plus // the siafund fee. This fee is sent to the siafund pool, which is a set // of siacoins only spendable by siafund owners. // // Under normal circumstances, the payout will be funded by both the host and // the renter, which gives the host incentive not to lose the file. The // 'ValidProofUnlockHash' will typically be spendable by host, and the // 'MissedProofUnlockHash' will either by spendable by the renter or by // nobody (the ZeroUnlockHash). // // A contract can be terminated early by submitting a FileContractTermination // whose UnlockConditions hash to 'TerminationHash'. FileContract struct { FileSize uint64 `json:"filesize"` FileMerkleRoot crypto.Hash `json:"filemerkleroot"` WindowStart BlockHeight `json:"windowstart"` WindowEnd BlockHeight `json:"windowend"` Payout Currency `json:"payout"` ValidProofOutputs []SiacoinOutput `json:"validproofoutputs"` MissedProofOutputs []SiacoinOutput `json:"missedproofoutputs"` UnlockHash UnlockHash `json:"unlockhash"` RevisionNumber uint64 `json:"revisionnumber"` } // A FileContractRevision revises an existing file contract. The ParentID // points to the file contract that is being revised. The UnlockConditions // are the conditions under which the revision is valid, and must match the // UnlockHash of the parent file contract. The Payout of the file contract // cannot be changed, but all other fields are allowed to be changed. The // sum of the outputs must match the original payout (taking into account // the fee for the proof payouts.) A revision number is included. When // getting accepted, the revision number of the revision must be higher // than any previously seen revision number for that file contract. // // FileContractRevisions enable trust-free modifications to existing file // contracts. FileContractRevision struct { ParentID FileContractID `json:"parentid"` UnlockConditions UnlockConditions `json:"unlockconditions"` NewRevisionNumber uint64 `json:"newrevisionnumber"` NewFileSize uint64 `json:"newfilesize"` NewFileMerkleRoot crypto.Hash `json:"newfilemerkleroot"` NewWindowStart BlockHeight `json:"newwindowstart"` NewWindowEnd BlockHeight `json:"newwindowend"` NewValidProofOutputs []SiacoinOutput `json:"newvalidproofoutputs"` NewMissedProofOutputs []SiacoinOutput `json:"newmissedproofoutputs"` NewUnlockHash UnlockHash `json:"newunlockhash"` } // A StorageProof fulfills a FileContract. The proof contains a specific // segment of the file, along with a set of hashes from the file's Merkle // tree. In combination, these can be used to prove that the segment came // from the file. To prevent abuse, the segment must be chosen randomly, so // the ID of block 'WindowStart' - 1 is used as a seed value; see // StorageProofSegment for the exact implementation. // // A transaction with a StorageProof cannot have any SiacoinOutputs, // SiafundOutputs, or FileContracts. This is because a mundane reorg can // invalidate the proof, and with it the rest of the transaction. StorageProof struct { ParentID FileContractID `json:"parentid"` Segment [crypto.SegmentSize]byte `json:"segment"` HashSet []crypto.Hash `json:"hashset"` } ProofStatus bool ) // StorageProofOutputID returns the ID of an output created by a file // contract, given the status of the storage proof. The ID is calculating by // hashing the concatenation of the StorageProofOutput Specifier, the ID of // the file contract that the proof is for, a boolean indicating whether the // proof was valid (true) or missed (false), and the index of the output // within the file contract. func (fcid FileContractID) StorageProofOutputID(proofStatus ProofStatus, i uint64) SiacoinOutputID { return SiacoinOutputID(crypto.HashAll( SpecifierStorageProofOutput, fcid, proofStatus, i, )) } // PostTax returns the amount of currency remaining in a file contract payout // after tax. func PostTax(height BlockHeight, payout Currency) Currency { return payout.Sub(Tax(height, payout)) } // Tax returns the amount of Currency that will be taxed from fc. func Tax(height BlockHeight, payout Currency) Currency { // COMPATv0.4.0 - until the first 20,000 blocks have been archived, they // will need to be handled in a special way. if (height < 21e3 && build.Release == "standard") || (height < 10 && build.Release == "testing") { return payout.MulFloat(0.039).RoundDown(SiafundCount) } return payout.MulTax().RoundDown(SiafundCount) } Sia-1.3.0/types/filecontracts_test.go000066400000000000000000000017631313565667000176440ustar00rootroot00000000000000package types import ( "testing" ) // TestFileContractTax probes the Tax function. func TestTax(t *testing.T) { // Test explicit values for post-hardfork tax values. if Tax(1e9, NewCurrency64(125e9)).Cmp(NewCurrency64(4875e6)) != 0 { t.Error("tax is being calculated incorrectly") } if PostTax(1e9, NewCurrency64(125e9)).Cmp(NewCurrency64(120125e6)) != 0 { t.Error("tax is being calculated incorrectly") } // Test equivalency for a series of values. if testing.Short() { t.SkipNow() } // COMPATv0.4.0 - check at height 0. for i := uint64(0); i < 10e3; i++ { val := NewCurrency64((1e3 * i) + i) tax := Tax(0, val) postTax := PostTax(0, val) if val.Cmp(tax.Add(postTax)) != 0 { t.Error("tax calculation inconsistent for", i) } } // Check at height 1e9 for i := uint64(0); i < 10e3; i++ { val := NewCurrency64((1e3 * i) + i) tax := Tax(1e9, val) postTax := PostTax(1e9, val) if val.Cmp(tax.Add(postTax)) != 0 { t.Error("tax calculation inconsistent for", i) } } } Sia-1.3.0/types/siag0of1of1.siakey000066400000000000000000000002571313565667000166410ustar00rootroot00000000000000siag1.0hMw(T-' ΥfYD62>(5!("ą1,ލJ ed25519 2>(5!("ą1,ލJ Sia-1.3.0/types/siag0of2of3.siakey000066400000000000000000000004371313565667000166440ustar00rootroot00000000000000siag1.0آqiq#wuTgOەv;bq~<;M(pι0ed25519 v;bq~<;M(pι0ed25519 ij})\jo;4Q^a*ed25519 ^3*>Qr3l|ǩ*% HvSia-1.3.0/types/siag1of2of3.siakey000066400000000000000000000004371313565667000166450ustar00rootroot00000000000000siag1.0_^bjBAgU“,8oij})\jo;4Q^a*ed25519 v;bq~<;M(pι0ed25519 ij})\jo;4Q^a*ed25519 ^3*>Qr3l|ǩ*% HvSia-1.3.0/types/siag2of2of3.siakey000066400000000000000000000004371313565667000166460ustar00rootroot00000000000000siag1.0~BV^UW2wVCb:WZ-'F ^3*>Qr3l|ǩ*% Hved25519 v;bq~<;M(pι0ed25519 ij})\jo;4Q^a*ed25519 ^3*>Qr3l|ǩ*% HvSia-1.3.0/types/signatures.go000066400000000000000000000337361313565667000161360ustar00rootroot00000000000000package types // signatures.go contains all of the types and functions related to creating // and verifying transaction signatures. There are a lot of rules surrounding // the correct use of signatures. Signatures can cover part or all of a // transaction, can be multiple different algorithms, and must satify a field // called 'UnlockConditions'. import ( "errors" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/encoding" ) var ( // These Specifiers enumerate the types of signatures that are recognized // by this implementation. If a signature's type is unrecognized, the // signature is treated as valid. Signatures using the special "entropy" // type are always treated as invalid; see Consensus.md for more details. SignatureEntropy = Specifier{'e', 'n', 't', 'r', 'o', 'p', 'y'} SignatureEd25519 = Specifier{'e', 'd', '2', '5', '5', '1', '9'} ErrEntropyKey = errors.New("transaction tries to sign an entproy public key") ErrFrivolousSignature = errors.New("transaction contains a frivolous signature") ErrInvalidPubKeyIndex = errors.New("transaction contains a signature that points to a nonexistent public key") ErrInvalidUnlockHashChecksum = errors.New("provided unlock hash has an invalid checksum") ErrMissingSignatures = errors.New("transaction has inputs with missing signatures") ErrPrematureSignature = errors.New("timelock on signature has not expired") ErrPublicKeyOveruse = errors.New("public key was used multiple times while signing transaction") ErrSortedUniqueViolation = errors.New("sorted unique violation") ErrUnlockHashWrongLen = errors.New("marshalled unlock hash is the wrong length") ErrWholeTransactionViolation = errors.New("covered fields violation") // FullCoveredFields is a covered fileds object where the // 'WholeTransaction' field has been set to true. The primary purpose of // this variable is syntactic sugar. FullCoveredFields = CoveredFields{WholeTransaction: true} ) type ( // CoveredFields indicates which fields in a transaction have been covered by // the signature. (Note that the signature does not sign the fields // themselves, but rather their combined hash; see SigHash.) Each slice // corresponds to a slice in the Transaction type, indicating which indices of // the slice have been signed. The indices must be valid, i.e. within the // bounds of the slice. In addition, they must be sorted and unique. // // As a convenience, a signature of the entire transaction can be indicated by // the 'WholeTransaction' field. If 'WholeTransaction' == true, all other // fields must be empty (except for the Signatures field, since a signature // cannot sign itself). CoveredFields struct { WholeTransaction bool `json:"wholetransaction"` SiacoinInputs []uint64 `json:"siacoininputs"` SiacoinOutputs []uint64 `json:"siacoinoutputs"` FileContracts []uint64 `json:"filecontracts"` FileContractRevisions []uint64 `json:"filecontractrevisions"` StorageProofs []uint64 `json:"storageproofs"` SiafundInputs []uint64 `json:"siafundinputs"` SiafundOutputs []uint64 `json:"siafundoutputs"` MinerFees []uint64 `json:"minerfees"` ArbitraryData []uint64 `json:"arbitrarydata"` TransactionSignatures []uint64 `json:"transactionsignatures"` } // A SiaPublicKey is a public key prefixed by a Specifier. The Specifier // indicates the algorithm used for signing and verification. Unrecognized // algorithms will always verify, which allows new algorithms to be added to // the protocol via a soft-fork. SiaPublicKey struct { Algorithm Specifier `json:"algorithm"` Key []byte `json:"key"` } // A TransactionSignature is a signature that is included in the transaction. // The signature should correspond to a public key in one of the // UnlockConditions of the transaction. This key is specified first by // 'ParentID', which specifies the UnlockConditions, and then // 'PublicKeyIndex', which indicates the key in the UnlockConditions. There // are three types that use UnlockConditions: SiacoinInputs, SiafundInputs, // and FileContractTerminations. Each of these types also references a // ParentID, and this is the hash that 'ParentID' must match. The 'Timelock' // prevents the signature from being used until a certain height. // 'CoveredFields' indicates which parts of the transaction are being signed; // see CoveredFields. TransactionSignature struct { ParentID crypto.Hash `json:"parentid"` PublicKeyIndex uint64 `json:"publickeyindex"` Timelock BlockHeight `json:"timelock"` CoveredFields CoveredFields `json:"coveredfields"` Signature []byte `json:"signature"` } // UnlockConditions are a set of conditions which must be met to execute // certain actions, such as spending a SiacoinOutput or terminating a // FileContract. // // The simplest requirement is that the block containing the UnlockConditions // must have a height >= 'Timelock'. // // 'PublicKeys' specifies the set of keys that can be used to satisfy the // UnlockConditions; of these, at least 'SignaturesRequired' unique keys must sign // the transaction. The keys that do not need to use the same cryptographic // algorithm. // // If 'SignaturesRequired' == 0, the UnlockConditions are effectively "anyone can // unlock." If 'SignaturesRequired' > len('PublicKeys'), then the UnlockConditions // cannot be fulfilled under any circumstances. UnlockConditions struct { Timelock BlockHeight `json:"timelock"` PublicKeys []SiaPublicKey `json:"publickeys"` SignaturesRequired uint64 `json:"signaturesrequired"` } // Each input has a list of public keys and a required number of signatures. // inputSignatures keeps track of which public keys have been used and how many // more signatures are needed. inputSignatures struct { remainingSignatures uint64 possibleKeys []SiaPublicKey usedKeys map[uint64]struct{} index int } ) // Ed25519PublicKey returns pk as a SiaPublicKey, denoting its algorithm as // Ed25519. func Ed25519PublicKey(pk crypto.PublicKey) SiaPublicKey { return SiaPublicKey{ Algorithm: SignatureEd25519, Key: pk[:], } } // UnlockHash calculates the root hash of a Merkle tree of the // UnlockConditions object. The leaves of this tree are formed by taking the // hash of the timelock, the hash of the public keys (one leaf each), and the // hash of the number of signatures. The keys are put in the middle because // Timelock and SignaturesRequired are both low entropy fields; they can be // protected by having random public keys next to them. func (uc UnlockConditions) UnlockHash() UnlockHash { tree := crypto.NewTree() tree.PushObject(uc.Timelock) for i := range uc.PublicKeys { tree.PushObject(uc.PublicKeys[i]) } tree.PushObject(uc.SignaturesRequired) return UnlockHash(tree.Root()) } // SigHash returns the hash of the fields in a transaction covered by a given // signature. See CoveredFields for more details. func (t Transaction) SigHash(i int) (hash crypto.Hash) { cf := t.TransactionSignatures[i].CoveredFields h := crypto.NewHash() if cf.WholeTransaction { t.marshalSiaNoSignatures(h) h.Write(t.TransactionSignatures[i].ParentID[:]) encoding.WriteUint64(h, t.TransactionSignatures[i].PublicKeyIndex) encoding.WriteUint64(h, uint64(t.TransactionSignatures[i].Timelock)) } else { for _, input := range cf.SiacoinInputs { t.SiacoinInputs[input].MarshalSia(h) } for _, output := range cf.SiacoinOutputs { t.SiacoinOutputs[output].MarshalSia(h) } for _, contract := range cf.FileContracts { t.FileContracts[contract].MarshalSia(h) } for _, revision := range cf.FileContractRevisions { t.FileContractRevisions[revision].MarshalSia(h) } for _, storageProof := range cf.StorageProofs { t.StorageProofs[storageProof].MarshalSia(h) } for _, siafundInput := range cf.SiafundInputs { t.SiafundInputs[siafundInput].MarshalSia(h) } for _, siafundOutput := range cf.SiafundOutputs { t.SiafundOutputs[siafundOutput].MarshalSia(h) } for _, minerFee := range cf.MinerFees { t.MinerFees[minerFee].MarshalSia(h) } for _, arbData := range cf.ArbitraryData { encoding.WritePrefix(h, t.ArbitraryData[arbData]) } } for _, sig := range cf.TransactionSignatures { t.TransactionSignatures[sig].MarshalSia(h) } h.Sum(hash[:0]) return } // sortedUnique checks that 'elems' is sorted, contains no repeats, and that no // element is larger than or equal to 'max'. func sortedUnique(elems []uint64, max int) bool { if len(elems) == 0 { return true } biggest := elems[0] for _, elem := range elems[1:] { if elem <= biggest { return false } biggest = elem } if biggest >= uint64(max) { return false } return true } // validCoveredFields makes sure that all covered fields objects in the // signatures follow the rules. This means that if 'WholeTransaction' is set to // true, all fields except for 'Signatures' must be empty. All fields must be // sorted numerically, and there can be no repeats. func (t Transaction) validCoveredFields() error { for _, sig := range t.TransactionSignatures { // convenience variables cf := sig.CoveredFields fieldMaxs := []struct { field []uint64 max int }{ {cf.SiacoinInputs, len(t.SiacoinInputs)}, {cf.SiacoinOutputs, len(t.SiacoinOutputs)}, {cf.FileContracts, len(t.FileContracts)}, {cf.FileContractRevisions, len(t.FileContractRevisions)}, {cf.StorageProofs, len(t.StorageProofs)}, {cf.SiafundInputs, len(t.SiafundInputs)}, {cf.SiafundOutputs, len(t.SiafundOutputs)}, {cf.MinerFees, len(t.MinerFees)}, {cf.ArbitraryData, len(t.ArbitraryData)}, {cf.TransactionSignatures, len(t.TransactionSignatures)}, } // Check that all fields are empty if 'WholeTransaction' is set, except // for the Signatures field which isn't affected. if cf.WholeTransaction { // 'WholeTransaction' does not check signatures. for _, fieldMax := range fieldMaxs[:len(fieldMaxs)-1] { if len(fieldMax.field) != 0 { return ErrWholeTransactionViolation } } } // Check that all fields are sorted, and without repeat values, and // that all elements point to objects that exists within the // transaction. If there are repeats, it means a transaction is trying // to sign the same object twice. This is unncecessary, and opens up a // DoS vector where the transaction asks the verifier to verify many GB // of data. for _, fieldMax := range fieldMaxs { if !sortedUnique(fieldMax.field, fieldMax.max) { return ErrSortedUniqueViolation } } } return nil } // validSignatures checks the validaty of all signatures in a transaction. func (t *Transaction) validSignatures(currentHeight BlockHeight) error { // Check that all covered fields objects follow the rules. err := t.validCoveredFields() if err != nil { return err } // Create the inputSignatures object for each input. sigMap := make(map[crypto.Hash]*inputSignatures) for i, input := range t.SiacoinInputs { id := crypto.Hash(input.ParentID) _, exists := sigMap[id] if exists { return ErrDoubleSpend } sigMap[id] = &inputSignatures{ remainingSignatures: input.UnlockConditions.SignaturesRequired, possibleKeys: input.UnlockConditions.PublicKeys, usedKeys: make(map[uint64]struct{}), index: i, } } for i, revision := range t.FileContractRevisions { id := crypto.Hash(revision.ParentID) _, exists := sigMap[id] if exists { return ErrDoubleSpend } sigMap[id] = &inputSignatures{ remainingSignatures: revision.UnlockConditions.SignaturesRequired, possibleKeys: revision.UnlockConditions.PublicKeys, usedKeys: make(map[uint64]struct{}), index: i, } } for i, input := range t.SiafundInputs { id := crypto.Hash(input.ParentID) _, exists := sigMap[id] if exists { return ErrDoubleSpend } sigMap[id] = &inputSignatures{ remainingSignatures: input.UnlockConditions.SignaturesRequired, possibleKeys: input.UnlockConditions.PublicKeys, usedKeys: make(map[uint64]struct{}), index: i, } } // Check all of the signatures for validity. for i, sig := range t.TransactionSignatures { // Check that sig corresponds to an entry in sigMap. inSig, exists := sigMap[crypto.Hash(sig.ParentID)] if !exists || inSig.remainingSignatures == 0 { return ErrFrivolousSignature } // Check that sig's key hasn't already been used. _, exists = inSig.usedKeys[sig.PublicKeyIndex] if exists { return ErrPublicKeyOveruse } // Check that the public key index refers to an existing public key. if sig.PublicKeyIndex >= uint64(len(inSig.possibleKeys)) { return ErrInvalidPubKeyIndex } // Check that the timelock has expired. if sig.Timelock > currentHeight { return ErrPrematureSignature } // Check that the signature verifies. Multiple signature schemes are // supported. publicKey := inSig.possibleKeys[sig.PublicKeyIndex] switch publicKey.Algorithm { case SignatureEntropy: // Entropy cannot ever be used to sign a transaction. return ErrEntropyKey case SignatureEd25519: // Decode the public key and signature. var edPK crypto.PublicKey err := encoding.Unmarshal([]byte(publicKey.Key), &edPK) if err != nil { return err } var edSig [crypto.SignatureSize]byte err = encoding.Unmarshal([]byte(sig.Signature), &edSig) if err != nil { return err } cryptoSig := crypto.Signature(edSig) sigHash := t.SigHash(i) err = crypto.VerifyHash(sigHash, edPK, cryptoSig) if err != nil { return err } default: // If the identifier is not recognized, assume that the signature // is valid. This allows more signature types to be added via soft // forking. } inSig.usedKeys[sig.PublicKeyIndex] = struct{}{} inSig.remainingSignatures-- } // Check that all inputs have been sufficiently signed. for _, reqSigs := range sigMap { if reqSigs.remainingSignatures != 0 { return ErrMissingSignatures } } return nil } Sia-1.3.0/types/signatures_test.go000066400000000000000000000253021313565667000171630ustar00rootroot00000000000000package types import ( "bytes" "testing" "github.com/NebulousLabs/Sia/crypto" ) // TestEd25519PublicKey tests the Ed25519PublicKey function. func TestEd25519PublicKey(t *testing.T) { _, pk := crypto.GenerateKeyPair() spk := Ed25519PublicKey(pk) if spk.Algorithm != SignatureEd25519 { t.Error("Ed25519PublicKey created key with wrong algorithm specifier:", spk.Algorithm) } if !bytes.Equal(spk.Key, pk[:]) { t.Error("Ed25519PublicKey created key with wrong data") } } // TestUnlockHash runs the UnlockHash code. func TestUnlockHash(t *testing.T) { uc := UnlockConditions{ Timelock: 1, PublicKeys: []SiaPublicKey{ { Algorithm: SignatureEntropy, Key: []byte{'f', 'a', 'k', 'e'}, }, }, SignaturesRequired: 3, } _ = uc.UnlockHash() } // TestSigHash runs the SigHash function of the transaction type. func TestSigHash(t *testing.T) { txn := Transaction{ SiacoinInputs: []SiacoinInput{{}}, SiacoinOutputs: []SiacoinOutput{{}}, FileContracts: []FileContract{{}}, FileContractRevisions: []FileContractRevision{{}}, StorageProofs: []StorageProof{{}}, SiafundInputs: []SiafundInput{{}}, SiafundOutputs: []SiafundOutput{{}}, MinerFees: []Currency{{}}, ArbitraryData: [][]byte{{'o'}, {'t'}}, TransactionSignatures: []TransactionSignature{ { CoveredFields: CoveredFields{ WholeTransaction: true, }, }, { CoveredFields: CoveredFields{ SiacoinInputs: []uint64{0}, SiacoinOutputs: []uint64{0}, FileContracts: []uint64{0}, FileContractRevisions: []uint64{0}, StorageProofs: []uint64{0}, SiafundInputs: []uint64{0}, SiafundOutputs: []uint64{0}, MinerFees: []uint64{0}, ArbitraryData: []uint64{0}, TransactionSignatures: []uint64{0}, }, }, }, } _ = txn.SigHash(0) _ = txn.SigHash(1) } // TestSortedUnique probes the sortedUnique function. func TestSortedUnique(t *testing.T) { su := []uint64{3, 5, 6, 8, 12} if !sortedUnique(su, 13) { t.Error("sortedUnique rejected a valid array") } if sortedUnique(su, 12) { t.Error("sortedUnique accepted an invalid max") } if sortedUnique(su, 11) { t.Error("sortedUnique accepted an invalid max") } unsorted := []uint64{3, 5, 3} if sortedUnique(unsorted, 6) { t.Error("sortedUnique accepted an unsorted array") } repeats := []uint64{2, 4, 4, 7} if sortedUnique(repeats, 8) { t.Error("sortedUnique accepted an array with repeats") } bothFlaws := []uint64{2, 3, 4, 5, 6, 6, 4} if sortedUnique(bothFlaws, 7) { t.Error("Sorted unique accetped array with multiple flaws") } } // TestTransactionValidCoveredFields probes the validCoveredFields menthod of // the transaction type. func TestTransactionValidCoveredFields(t *testing.T) { if testing.Short() { t.SkipNow() } // Create a transaction with all fields filled in minimally. The first // check has a legal CoveredFields object with 'WholeTransaction' set. txn := Transaction{ SiacoinInputs: []SiacoinInput{{}}, SiacoinOutputs: []SiacoinOutput{{}}, FileContracts: []FileContract{{}}, FileContractRevisions: []FileContractRevision{{}}, StorageProofs: []StorageProof{{}}, SiafundInputs: []SiafundInput{{}}, SiafundOutputs: []SiafundOutput{{}}, MinerFees: []Currency{{}}, ArbitraryData: [][]byte{{'o'}, {'t'}}, TransactionSignatures: []TransactionSignature{ { CoveredFields: CoveredFields{ WholeTransaction: true, }, }, }, } err := txn.validCoveredFields() if err != nil { t.Error(err) } // Second check has CoveredFields object where 'WholeTransaction' is not // set. txn.TransactionSignatures = append(txn.TransactionSignatures, TransactionSignature{ CoveredFields: CoveredFields{ SiacoinOutputs: []uint64{0}, MinerFees: []uint64{0}, ArbitraryData: []uint64{0}, FileContractRevisions: []uint64{0}, }, }) err = txn.validCoveredFields() if err != nil { t.Error(err) } // Add signature coverage to the first signature. This should not violate // any rules. txn.TransactionSignatures[0].CoveredFields.TransactionSignatures = []uint64{1} err = txn.validCoveredFields() if err != nil { t.Error(err) } // Add siacoin output coverage to the first signature. This should violate // rules, as the fields are not allowed to be set when 'WholeTransaction' // is set. txn.TransactionSignatures[0].CoveredFields.SiacoinOutputs = []uint64{0} err = txn.validCoveredFields() if err != ErrWholeTransactionViolation { t.Error("Expecting ErrWholeTransactionViolation, got", err) } // Create a SortedUnique violation instead of a WholeTransactionViolation. txn.TransactionSignatures[0].CoveredFields.SiacoinOutputs = nil txn.TransactionSignatures[0].CoveredFields.TransactionSignatures = []uint64{1, 2} err = txn.validCoveredFields() if err != ErrSortedUniqueViolation { t.Error("Expecting ErrSortedUniqueViolation, got", err) } } // TestTransactionValidSignatures probes the validSignatures method of the // Transaction type. func TestTransactionValidSignatures(t *testing.T) { // Create keys for use in signing and verifying. sk, pk := crypto.GenerateKeyPair() // Create UnlockConditions with 3 keys, 2 of which are required. The first // possible key is a standard signature. The second key is an unknown // signature type, which should always be accepted. The final type is an // entropy type, which should never be accepted. uc := UnlockConditions{ PublicKeys: []SiaPublicKey{ {Algorithm: SignatureEd25519, Key: pk[:]}, {}, {Algorithm: SignatureEntropy}, }, SignaturesRequired: 2, } // Create a transaction with each type of unlock condition. txn := Transaction{ SiacoinInputs: []SiacoinInput{ {UnlockConditions: uc}, }, FileContractRevisions: []FileContractRevision{ {UnlockConditions: uc}, }, SiafundInputs: []SiafundInput{ {UnlockConditions: uc}, }, } txn.FileContractRevisions[0].ParentID[0] = 1 // can't overlap with other objects txn.SiafundInputs[0].ParentID[0] = 2 // can't overlap with other objects // Create the signatures that spend the output. txn.TransactionSignatures = []TransactionSignature{ // First signatures use cryptography. { Timelock: 5, CoveredFields: CoveredFields{WholeTransaction: true}, }, { CoveredFields: CoveredFields{WholeTransaction: true}, }, { CoveredFields: CoveredFields{WholeTransaction: true}, }, // The second signatures should always work for being unrecognized // types. {PublicKeyIndex: 1}, {PublicKeyIndex: 1}, {PublicKeyIndex: 1}, } txn.TransactionSignatures[1].ParentID[0] = 1 txn.TransactionSignatures[2].ParentID[0] = 2 txn.TransactionSignatures[4].ParentID[0] = 1 txn.TransactionSignatures[5].ParentID[0] = 2 sigHash0 := txn.SigHash(0) sigHash1 := txn.SigHash(1) sigHash2 := txn.SigHash(2) sig0 := crypto.SignHash(sigHash0, sk) sig1 := crypto.SignHash(sigHash1, sk) sig2 := crypto.SignHash(sigHash2, sk) txn.TransactionSignatures[0].Signature = sig0[:] txn.TransactionSignatures[1].Signature = sig1[:] txn.TransactionSignatures[2].Signature = sig2[:] // Check that the signing was successful. err := txn.validSignatures(10) if err != nil { t.Error(err) } // Corrupt one of the signatures. sig0[0]++ txn.TransactionSignatures[0].Signature = sig0[:] err = txn.validSignatures(10) if err == nil { t.Error("Corrupted a signature but the txn was still accepted as valid!") } sig0[0]-- txn.TransactionSignatures[0].Signature = sig0[:] // Fail the validCoveredFields check. txn.TransactionSignatures[0].CoveredFields.SiacoinInputs = []uint64{33} err = txn.validSignatures(10) if err == nil { t.Error("failed to flunk the validCoveredFields check") } txn.TransactionSignatures[0].CoveredFields.SiacoinInputs = nil // Double spend a SiacoinInput, FileContractTermination, and SiafundInput. txn.SiacoinInputs = append(txn.SiacoinInputs, SiacoinInput{UnlockConditions: UnlockConditions{}}) err = txn.validSignatures(10) if err == nil { t.Error("failed to double spend a siacoin input") } txn.SiacoinInputs = txn.SiacoinInputs[:len(txn.SiacoinInputs)-1] txn.FileContractRevisions = append(txn.FileContractRevisions, FileContractRevision{UnlockConditions: UnlockConditions{}}) err = txn.validSignatures(10) if err == nil { t.Error("failed to double spend a file contract termination") } txn.FileContractRevisions = txn.FileContractRevisions[:len(txn.FileContractRevisions)-1] txn.SiafundInputs = append(txn.SiafundInputs, SiafundInput{UnlockConditions: UnlockConditions{}}) err = txn.validSignatures(10) if err == nil { t.Error("failed to double spend a siafund input") } txn.SiafundInputs = txn.SiafundInputs[:len(txn.SiafundInputs)-1] // Add a frivolous signature txn.TransactionSignatures = append(txn.TransactionSignatures, TransactionSignature{}) err = txn.validSignatures(10) if err != ErrFrivolousSignature { t.Error(err) } txn.TransactionSignatures = txn.TransactionSignatures[:len(txn.TransactionSignatures)-1] // Replace one of the cryptography signatures with an always-accepted // signature. This should get rejected because the always-accepted // signature has already been used. tmpTxn0 := txn.TransactionSignatures[0] txn.TransactionSignatures[0] = TransactionSignature{PublicKeyIndex: 1} err = txn.validSignatures(10) if err != ErrPublicKeyOveruse { t.Error(err) } txn.TransactionSignatures[0] = tmpTxn0 // Fail the timelock check for signatures. err = txn.validSignatures(4) if err != ErrPrematureSignature { t.Error(err) } // Try to spend an entropy signature. txn.TransactionSignatures[0] = TransactionSignature{PublicKeyIndex: 2} err = txn.validSignatures(10) if err != ErrEntropyKey { t.Error(err) } txn.TransactionSignatures[0] = tmpTxn0 // Try to point to a nonexistent public key. txn.TransactionSignatures[0] = TransactionSignature{PublicKeyIndex: 5} err = txn.validSignatures(10) if err != ErrInvalidPubKeyIndex { t.Error(err) } txn.TransactionSignatures[0] = tmpTxn0 // Insert a malformed public key into the transaction. txn.SiacoinInputs[0].UnlockConditions.PublicKeys[0].Key = []byte{'b', 'a', 'd'} err = txn.validSignatures(10) if err == nil { t.Error(err) } txn.SiacoinInputs[0].UnlockConditions.PublicKeys[0].Key = pk[:] // Insert a malformed signature into the transaction. txn.TransactionSignatures[0].Signature = []byte{'m', 'a', 'l'} err = txn.validSignatures(10) if err == nil { t.Error(err) } txn.TransactionSignatures[0] = tmpTxn0 // Try to spend a transaction when not every required signature is // available. txn.TransactionSignatures = txn.TransactionSignatures[1:] err = txn.validSignatures(10) if err != ErrMissingSignatures { t.Error(err) } } Sia-1.3.0/types/target.go000066400000000000000000000063101313565667000152240ustar00rootroot00000000000000package types // target.go defines the target type and implements a few helper functions for // manipulating the target type. import ( "errors" "math/big" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" ) type ( // A Target is a hash that a block's ID must be "less than" in order for // the block to be considered valid. Miners vary the block's 'Nonce' field // in order to brute-force such an ID. The inverse of a Target is called // the "difficulty," because it is proportional to the amount of time // required to brute-force the Target. Target crypto.Hash ) var ( ErrNegativeTarget = errors.New("negative value used when converting to target") ) // AddDifficulties returns the resulting target with the difficulty of 'x' and // 'y' are added together. Note that the difficulty is the inverse of the // target. The sum is defined by: // sum(x, y) = 1/(1/x + 1/y) func (x Target) AddDifficulties(y Target) (t Target) { sumDifficulty := new(big.Rat).Add(x.Inverse(), y.Inverse()) return RatToTarget(new(big.Rat).Inv(sumDifficulty)) } // Cmp compares the difficulties of two targets. Note that the difficulty is // the inverse of the target. The results are as follows: // -1 if x < y // 0 if x == y // +1 if x > y func (x Target) Cmp(y Target) int { return x.Int().Cmp(y.Int()) } // Difficulty returns the difficulty associated with a given target. func (t Target) Difficulty() Currency { if t == (Target{}) { return NewCurrency(RootDepth.Int()) } return NewCurrency(new(big.Int).Div(RootDepth.Int(), t.Int())) } // Int converts a Target to a big.Int. func (t Target) Int() *big.Int { return new(big.Int).SetBytes(t[:]) } // IntToTarget converts a big.Int to a Target. Negative inputs trigger a panic. func IntToTarget(i *big.Int) (t Target) { // Check for negatives. if i.Sign() < 0 { if build.DEBUG { panic(ErrNegativeTarget) } } else { // In the event of overflow, return the maximum. if i.BitLen() > 256 { return RootDepth } b := i.Bytes() offset := len(t[:]) - len(b) copy(t[offset:], b) } return } // Inverse returns the inverse of a Target as a big.Rat func (t Target) Inverse() *big.Rat { return new(big.Rat).Inv(t.Rat()) } // Mul multiplies the difficulty of a target by y. The product is defined by: // y / x func (x Target) MulDifficulty(y *big.Rat) (t Target) { product := new(big.Rat).Mul(y, x.Inverse()) product = product.Inv(product) return RatToTarget(product) } // Rat converts a Target to a big.Rat. func (t Target) Rat() *big.Rat { return new(big.Rat).SetInt(t.Int()) } // RatToTarget converts a big.Rat to a Target. func RatToTarget(r *big.Rat) (t Target) { if r.Num().Sign() < 0 { if build.DEBUG { panic(ErrNegativeTarget) } } else { i := new(big.Int).Div(r.Num(), r.Denom()) t = IntToTarget(i) } return } // SubtractDifficulties returns the resulting target with the difficulty of 'x' // is subtracted from the target with difficulty 'y'. Note that the difficulty // is the inverse of the target. The difference is defined by: // sum(x, y) = 1/(1/x - 1/y) func (x Target) SubtractDifficulties(y Target) (t Target) { sumDifficulty := new(big.Rat).Sub(x.Inverse(), y.Inverse()) return RatToTarget(new(big.Rat).Inv(sumDifficulty)) } Sia-1.3.0/types/target_test.go000066400000000000000000000123771313565667000162750ustar00rootroot00000000000000package types import ( "math/big" "testing" "github.com/NebulousLabs/Sia/crypto" ) // TestTargetAdd probes the Add function of the target type. func TestTargetAdd(t *testing.T) { var target3, target5, target10 Target target3[crypto.HashSize-1] = 3 target5[crypto.HashSize-1] = 5 target10[crypto.HashSize-1] = 10 expect5 := target10.AddDifficulties(target10) if expect5 != target5 { t.Error("Target.Add not working as expected") } expect3 := target10.AddDifficulties(target5) if expect3 != target3 { t.Error("Target.Add not working as expected") } } // TestTargetCmp probes the Cmp function of the target type. func TestTargetCmp(t *testing.T) { var target1, target2 Target target1[crypto.HashSize-1] = 1 target2[crypto.HashSize-1] = 2 if target1.Cmp(target2) != -1 { t.Error("Target.Cmp not behaving as expected") } if target2.Cmp(target2) != 0 { t.Error("Target.Cmp not behaving as expected") } if target2.Cmp(target1) != 1 { t.Error("Target.Cmp not behaving as expected") } } // TestTargetDifficulty probes the Difficulty function of the target type. func TestTargetDifficulty(t *testing.T) { var target1, target2, target3 Target target2[crypto.HashSize-1] = 1 target3[crypto.HashSize-1] = 2 expDifficulty1 := NewCurrency(RootDepth.Int()) expDifficulty2 := NewCurrency(RootDepth.Int()) expDifficulty3 := NewCurrency(RootDepth.Int()).Div(NewCurrency64(2)) if difficulty := target1.Difficulty(); difficulty.Cmp(expDifficulty1) != 0 { t.Errorf("Expected difficulty %v, got %v", expDifficulty1, difficulty) } if difficulty := target2.Difficulty(); difficulty.Cmp(expDifficulty2) != 0 { t.Errorf("Expected difficulty %v, got %v", expDifficulty2, difficulty) } if difficulty := target3.Difficulty(); difficulty.Cmp(expDifficulty3) != 0 { t.Errorf("Expected difficulty %v, got %v", expDifficulty3, difficulty) } } // TestTargetInt probes the Int function of the target type. func TestTargetInt(t *testing.T) { var target Target target[crypto.HashSize-1] = 1 b := target.Int() if b.Cmp(big.NewInt(1)) != 0 { t.Error("Target.Int did not work correctly") } } // TestTargetIntToTarget probes the IntToTarget function of the target type. func TestTargetIntToTarget(t *testing.T) { var target Target target[crypto.HashSize-1] = 5 b := big.NewInt(5) if IntToTarget(b) != target { t.Error("IntToTarget not working as expected") } } // TestTargetInverse probes the Inverse function of the target type. func TestTargetInverse(t *testing.T) { var target Target target[crypto.HashSize-1] = 2 r := target.Inverse() if r.Num().Cmp(big.NewInt(1)) != 0 { t.Error("Target.Rat did not work as expected") } if r.Denom().Cmp(big.NewInt(2)) != 0 { t.Error("Target.Rat did not work as expected") } } // TestTargetMul probes the Mul function of the target type. func TestTargetMul(t *testing.T) { var target2, target6, target10, target14, target20 Target target2[crypto.HashSize-1] = 2 target6[crypto.HashSize-1] = 6 target10[crypto.HashSize-1] = 10 target14[crypto.HashSize-1] = 14 target20[crypto.HashSize-1] = 20 // Multiplying the difficulty of a target at '10' by 5 will yield a target // of '2'. Similar math follows for the remaining checks. expect2 := target10.MulDifficulty(big.NewRat(5, 1)) if expect2 != target2 { t.Error(expect2) t.Error(target2) t.Error("Target.Mul did not work as expected") } expect6 := target10.MulDifficulty(big.NewRat(3, 2)) if expect6 != target6 { t.Error("Target.Mul did not work as expected") } expect14 := target10.MulDifficulty(big.NewRat(7, 10)) if expect14 != target14 { t.Error("Target.Mul did not work as expected") } expect20 := target10.MulDifficulty(big.NewRat(1, 2)) if expect20 != target20 { t.Error("Target.Mul did not work as expected") } } // TestTargetRat probes the Rat function of the target type. func TestTargetRat(t *testing.T) { var target Target target[crypto.HashSize-1] = 3 r := target.Rat() if r.Num().Cmp(big.NewInt(3)) != 0 { t.Error("Target.Rat did not work as expected") } if r.Denom().Cmp(big.NewInt(1)) != 0 { t.Error("Target.Rat did not work as expected") } } // TestTargetOverflow checks that IntToTarget will return a maximum target if // there is an overflow. func TestTargetOverflow(t *testing.T) { largeInt := new(big.Int).Lsh(big.NewInt(1), 260) expectRoot := IntToTarget(largeInt) if expectRoot != RootDepth { t.Error("IntToTarget does not properly handle overflows") } } // TestTargetNegativeIntToTarget tries to create a negative target using // IntToTarget. func TestTargetNegativeIntToTarget(t *testing.T) { if testing.Short() { t.SkipNow() } // In debug mode, attempting to create a negative target should trigger a // panic. defer func() { r := recover() if r != ErrNegativeTarget { t.Error("no panic occurred when trying to create a negative target") } }() b := big.NewInt(-3) _ = IntToTarget(b) } // TestTargetNegativeRatToTarget tries to create a negative target using // RatToTarget. func TestTargetNegativeRatToTarget(t *testing.T) { if testing.Short() { t.SkipNow() } // In debug mode, attempting to create a negative target should trigger a // panic. defer func() { r := recover() if r != ErrNegativeTarget { t.Error("no panic occurred when trying to create a negative target") } }() r := big.NewRat(3, -5) _ = RatToTarget(r) } Sia-1.3.0/types/timestamp.go000066400000000000000000000017311313565667000157430ustar00rootroot00000000000000package types // timestamp.go defines the timestamp type and implements sort.Interface // interface for slices of timestamps. import ( "time" ) type ( Timestamp uint64 TimestampSlice []Timestamp ) // CurrentTimestamp returns the current time as a Timestamp. func CurrentTimestamp() Timestamp { return Timestamp(time.Now().Unix()) } // Len is part of sort.Interface func (ts TimestampSlice) Len() int { return len(ts) } // Less is part of sort.Interface func (ts TimestampSlice) Less(i, j int) bool { return ts[i] < ts[j] } // Swap is part of sort.Interface func (ts TimestampSlice) Swap(i, j int) { ts[i], ts[j] = ts[j], ts[i] } // Clock allows clients to retrieve the current time. type Clock interface { Now() Timestamp } // StdClock is an implementation of Clock that retrieves the current time using // the system time. type StdClock struct{} // Now retrieves the current timestamp. func (c StdClock) Now() Timestamp { return Timestamp(time.Now().Unix()) } Sia-1.3.0/types/timestamp_test.go000066400000000000000000000010341313565667000167760ustar00rootroot00000000000000package types import ( "sort" "testing" ) // TestTimestampSorting verifies that using sort.Sort accurately sorts // timestamps. func TestTimestampSorting(t *testing.T) { ts := TimestampSlice{ CurrentTimestamp(), CurrentTimestamp() - 5, CurrentTimestamp() + 5, CurrentTimestamp() + 12, CurrentTimestamp() - 3, CurrentTimestamp() - 25, } sort.Sort(ts) currentTime := ts[0] for _, timestamp := range ts { if timestamp < currentTime { t.Error("timestamp slice not properly sorted") } currentTime = timestamp } } Sia-1.3.0/types/transaction_helpers.go000066400000000000000000000061361313565667000200130ustar00rootroot00000000000000// +build testing package types import ( "errors" ) // TransactionGraphEdge defines an edge in a TransactionGraph, containing a // source transaction, a destination transaction, a value, and a miner fee. type TransactionGraphEdge struct { Dest int Fee Currency Source int Value Currency } // TransactionGraph will return a set of valid transactions that all spend // outputs according to the input graph. Each [source, dest] pair defines an // edge of the graph. The graph must be fully connected and the granparent of // the graph must be the sourceOutput. '0' refers to an edge from the source // output. Each edge also specifies a value for the output, and an amount of // fees. If the fees are zero, no fees will be added for that edge. 'sources' // must be sorted. // // Example of acceptable input: // // sourceOutput: // a valid siacoin output spending to UnlockConditions{}.UnlockHash() // // Sources: [0, 0, 1, 2, 3, 3, 3, 4] // Dests: [1, 2, 3, 3, 4, 4, 5, 6] // // Resulting Graph: // // o // / \ // o o // \ / // o // /|\ // \| \ // o x // 'x' transactions are symbolic, not actually created // | // x // func TransactionGraph(sourceOutput SiacoinOutputID, edges []TransactionGraphEdge) ([]Transaction, error) { // Basic input validation. if len(edges) < 1 { return nil, errors.New("no graph specificed") } // Check that the first value of 'sources' is zero, and that the rest of the // array is sorted. if edges[0].Source != 0 { return nil, errors.New("first edge must speficy node 0 as the parent") } if edges[0].Dest != 1 { return nil, errors.New("first edge must speficy node 1 as the child") } latest := edges[0].Source for _, edge := range edges { if edge.Source < latest { return nil, errors.New("'sources' input is not sorted") } latest = edge.Source } // Create the set of output ids, and fill out the input ids for the source // transaction. biggest := 0 for _, edge := range edges { if edge.Dest > biggest { biggest = edge.Dest } } txnInputs := make([][]SiacoinOutputID, biggest+1) txnInputs[0] = []SiacoinOutputID{sourceOutput} // Go through the nodes bit by bit and create outputs. // Fill out the outputs for the source. i, j := 0, 0 ts := make([]Transaction, edges[len(edges)-1].Source+1) for i < len(edges) { var t Transaction // Grab the inputs for this transaction. for _, outputID := range txnInputs[j] { t.SiacoinInputs = append(t.SiacoinInputs, SiacoinInput{ ParentID: outputID, }) } // Grab the outputs for this transaction. startingPoint := i current := edges[i].Source for i < len(edges) && edges[i].Source == current { t.SiacoinOutputs = append(t.SiacoinOutputs, SiacoinOutput{ Value: edges[i].Value, UnlockHash: UnlockConditions{}.UnlockHash(), }) if !edges[i].Fee.IsZero() { t.MinerFees = append(t.MinerFees, edges[i].Fee) } i++ } // Record the inputs for the next transactions. for k := startingPoint; k < i; k++ { txnInputs[edges[k].Dest] = append(txnInputs[edges[k].Dest], t.SiacoinOutputID(uint64(k-startingPoint))) } ts[j] = t j++ } return ts, nil } Sia-1.3.0/types/transactions.go000066400000000000000000000272631313565667000164600ustar00rootroot00000000000000package types // transaction.go defines the transaction type and all of the sub-fields of the // transaction, as well as providing helper functions for working with // transactions. The various IDs are designed such that, in a legal blockchain, // it is cryptographically unlikely that any two objects would share an id. import ( "errors" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/encoding" ) const ( SpecifierLen = 16 // UnlockHashChecksumSize is the size of the checksum used to verify // human-readable addresses. It is not a crypytographically secure // checksum, it's merely intended to prevent typos. 6 is chosen because it // brings the total size of the address to 38 bytes, leaving 2 bytes for // potential version additions in the future. UnlockHashChecksumSize = 6 ) // These Specifiers are used internally when calculating a type's ID. See // Specifier for more details. var ( SpecifierMinerPayout = Specifier{'m', 'i', 'n', 'e', 'r', ' ', 'p', 'a', 'y', 'o', 'u', 't'} SpecifierSiacoinInput = Specifier{'s', 'i', 'a', 'c', 'o', 'i', 'n', ' ', 'i', 'n', 'p', 'u', 't'} SpecifierSiacoinOutput = Specifier{'s', 'i', 'a', 'c', 'o', 'i', 'n', ' ', 'o', 'u', 't', 'p', 'u', 't'} SpecifierFileContract = Specifier{'f', 'i', 'l', 'e', ' ', 'c', 'o', 'n', 't', 'r', 'a', 'c', 't'} SpecifierFileContractRevision = Specifier{'f', 'i', 'l', 'e', ' ', 'c', 'o', 'n', 't', 'r', 'a', 'c', 't', ' ', 'r', 'e'} SpecifierStorageProof = Specifier{'s', 't', 'o', 'r', 'a', 'g', 'e', ' ', 'p', 'r', 'o', 'o', 'f'} SpecifierStorageProofOutput = Specifier{'s', 't', 'o', 'r', 'a', 'g', 'e', ' ', 'p', 'r', 'o', 'o', 'f'} SpecifierSiafundInput = Specifier{'s', 'i', 'a', 'f', 'u', 'n', 'd', ' ', 'i', 'n', 'p', 'u', 't'} SpecifierSiafundOutput = Specifier{'s', 'i', 'a', 'f', 'u', 'n', 'd', ' ', 'o', 'u', 't', 'p', 'u', 't'} SpecifierClaimOutput = Specifier{'c', 'l', 'a', 'i', 'm', ' ', 'o', 'u', 't', 'p', 'u', 't'} SpecifierMinerFee = Specifier{'m', 'i', 'n', 'e', 'r', ' ', 'f', 'e', 'e'} ErrTransactionIDWrongLen = errors.New("input has wrong length to be an encoded transaction id") ) type ( // A Specifier is a fixed-length byte-array that serves two purposes. In // the wire protocol, they are used to identify a particular encoding // algorithm, signature algorithm, etc. This allows nodes to communicate on // their own terms; for example, to reduce bandwidth costs, a node might // only accept compressed messages. // // Internally, Specifiers are used to guarantee unique IDs. Various // consensus types have an associated ID, calculated by hashing the data // contained in the type. By prepending the data with Specifier, we can // guarantee that distinct types will never produce the same hash. Specifier [SpecifierLen]byte // IDs are used to refer to a type without revealing its contents. They // are constructed by hashing specific fields of the type, along with a // Specifier. While all of these types are hashes, defining type aliases // gives us type safety and makes the code more readable. TransactionID crypto.Hash SiacoinOutputID crypto.Hash SiafundOutputID crypto.Hash FileContractID crypto.Hash OutputID crypto.Hash // A Transaction is an atomic component of a block. Transactions can contain // inputs and outputs, file contracts, storage proofs, and even arbitrary // data. They can also contain signatures to prove that a given party has // approved the transaction, or at least a particular subset of it. // // Transactions can depend on other previous transactions in the same block, // but transactions cannot spend outputs that they create or otherwise be // self-dependent. Transaction struct { SiacoinInputs []SiacoinInput `json:"siacoininputs"` SiacoinOutputs []SiacoinOutput `json:"siacoinoutputs"` FileContracts []FileContract `json:"filecontracts"` FileContractRevisions []FileContractRevision `json:"filecontractrevisions"` StorageProofs []StorageProof `json:"storageproofs"` SiafundInputs []SiafundInput `json:"siafundinputs"` SiafundOutputs []SiafundOutput `json:"siafundoutputs"` MinerFees []Currency `json:"minerfees"` ArbitraryData [][]byte `json:"arbitrarydata"` TransactionSignatures []TransactionSignature `json:"transactionsignatures"` } // A SiacoinInput consumes a SiacoinOutput and adds the siacoins to the set of // siacoins that can be spent in the transaction. The ParentID points to the // output that is getting consumed, and the UnlockConditions contain the rules // for spending the output. The UnlockConditions must match the UnlockHash of // the output. SiacoinInput struct { ParentID SiacoinOutputID `json:"parentid"` UnlockConditions UnlockConditions `json:"unlockconditions"` } // A SiacoinOutput holds a volume of siacoins. Outputs must be spent // atomically; that is, they must all be spent in the same transaction. The // UnlockHash is the hash of the UnlockConditions that must be fulfilled // in order to spend the output. SiacoinOutput struct { Value Currency `json:"value"` UnlockHash UnlockHash `json:"unlockhash"` } // A SiafundInput consumes a SiafundOutput and adds the siafunds to the set of // siafunds that can be spent in the transaction. The ParentID points to the // output that is getting consumed, and the UnlockConditions contain the rules // for spending the output. The UnlockConditions must match the UnlockHash of // the output. SiafundInput struct { ParentID SiafundOutputID `json:"parentid"` UnlockConditions UnlockConditions `json:"unlockconditions"` ClaimUnlockHash UnlockHash `json:"claimunlockhash"` } // A SiafundOutput holds a volume of siafunds. Outputs must be spent // atomically; that is, they must all be spent in the same transaction. The // UnlockHash is the hash of a set of UnlockConditions that must be fulfilled // in order to spend the output. // // When the SiafundOutput is spent, a SiacoinOutput is created, where: // // SiacoinOutput.Value := (SiafundPool - ClaimStart) / 10,000 // SiacoinOutput.UnlockHash := SiafundOutput.ClaimUnlockHash // // When a SiafundOutput is put into a transaction, the ClaimStart must always // equal zero. While the transaction is being processed, the ClaimStart is set // to the value of the SiafundPool. SiafundOutput struct { Value Currency `json:"value"` UnlockHash UnlockHash `json:"unlockhash"` ClaimStart Currency `json:"claimstart"` } // An UnlockHash is a specially constructed hash of the UnlockConditions type. // "Locked" values can be unlocked by providing the UnlockConditions that hash // to a given UnlockHash. See UnlockConditions.UnlockHash for details on how the // UnlockHash is constructed. UnlockHash crypto.Hash ) // ID returns the id of a transaction, which is taken by marshalling all of the // fields except for the signatures and taking the hash of the result. func (t Transaction) ID() TransactionID { // Get the transaction id by hashing all data minus the signatures. var txid TransactionID h := crypto.NewHash() t.marshalSiaNoSignatures(h) h.Sum(txid[:0]) // Sanity check in debug builds to make sure that the ids are going to be // the same. if build.DEBUG { verify := TransactionID(crypto.HashAll( t.SiacoinInputs, t.SiacoinOutputs, t.FileContracts, t.FileContractRevisions, t.StorageProofs, t.SiafundInputs, t.SiafundOutputs, t.MinerFees, t.ArbitraryData, )) if verify != txid { panic("TransactionID is not marshalling correctly") } } return txid } // SiacoinOutputID returns the ID of a siacoin output at the given index, // which is calculated by hashing the concatenation of the SiacoinOutput // Specifier, all of the fields in the transaction (except the signatures), // and output index. func (t Transaction) SiacoinOutputID(i uint64) SiacoinOutputID { // Create the id. var id SiacoinOutputID h := crypto.NewHash() h.Write(SpecifierSiacoinOutput[:]) t.marshalSiaNoSignatures(h) // Encode non-signature fields into hash. encoding.WriteUint64(h, i) // Writes index of this output. h.Sum(id[:0]) // Sanity check - verify that the optimized code is always returning the // same ids as the unoptimized code. if build.DEBUG { verificationID := SiacoinOutputID(crypto.HashAll( SpecifierSiacoinOutput, t.SiacoinInputs, t.SiacoinOutputs, t.FileContracts, t.FileContractRevisions, t.StorageProofs, t.SiafundInputs, t.SiafundOutputs, t.MinerFees, t.ArbitraryData, i, )) if id != verificationID { panic("SiacoinOutputID is not marshalling correctly") } } return id } // FileContractID returns the ID of a file contract at the given index, which // is calculated by hashing the concatenation of the FileContract Specifier, // all of the fields in the transaction (except the signatures), and the // contract index. func (t Transaction) FileContractID(i uint64) FileContractID { var id FileContractID h := crypto.NewHash() h.Write(SpecifierFileContract[:]) t.marshalSiaNoSignatures(h) // Encode non-signature fields into hash. encoding.WriteUint64(h, i) // Writes index of this output. h.Sum(id[:0]) // Sanity check - verify that the optimized code is always returning the // same ids as the unoptimized code. if build.DEBUG { verificationID := FileContractID(crypto.HashAll( SpecifierFileContract, t.SiacoinInputs, t.SiacoinOutputs, t.FileContracts, t.FileContractRevisions, t.StorageProofs, t.SiafundInputs, t.SiafundOutputs, t.MinerFees, t.ArbitraryData, i, )) if id != verificationID { panic("FileContractID is not marshalling correctly") } } return id } // SiafundOutputID returns the ID of a SiafundOutput at the given index, which // is calculated by hashing the concatenation of the SiafundOutput Specifier, // all of the fields in the transaction (except the signatures), and output // index. func (t Transaction) SiafundOutputID(i uint64) SiafundOutputID { var id SiafundOutputID h := crypto.NewHash() h.Write(SpecifierSiafundOutput[:]) t.marshalSiaNoSignatures(h) // Encode non-signature fields into hash. encoding.WriteUint64(h, i) // Writes index of this output. h.Sum(id[:0]) // Sanity check - verify that the optimized code is always returning the // same ids as the unoptimized code. if build.DEBUG { verificationID := SiafundOutputID(crypto.HashAll( SpecifierSiafundOutput, t.SiacoinInputs, t.SiacoinOutputs, t.FileContracts, t.FileContractRevisions, t.StorageProofs, t.SiafundInputs, t.SiafundOutputs, t.MinerFees, t.ArbitraryData, i, )) if id != verificationID { panic("SiafundOutputID is not marshalling correctly") } } return id } // SiacoinOutputSum returns the sum of all the siacoin outputs in the // transaction, which must match the sum of all the siacoin inputs. Siacoin // outputs created by storage proofs and siafund outputs are not considered, as // they were considered when the contract responsible for funding them was // created. func (t Transaction) SiacoinOutputSum() (sum Currency) { // Add the siacoin outputs. for _, sco := range t.SiacoinOutputs { sum = sum.Add(sco.Value) } // Add the file contract payouts. for _, fc := range t.FileContracts { sum = sum.Add(fc.Payout) } // Add the miner fees. for _, fee := range t.MinerFees { sum = sum.Add(fee) } return } // SiaClaimOutputID returns the ID of the SiacoinOutput that is created when // the siafund output is spent. The ID is the hash the SiafundOutputID. func (id SiafundOutputID) SiaClaimOutputID() SiacoinOutputID { return SiacoinOutputID(crypto.HashObject(id)) } Sia-1.3.0/types/transactions_test.go000066400000000000000000000033311313565667000175050ustar00rootroot00000000000000package types import ( "testing" "github.com/NebulousLabs/Sia/crypto" ) // TestTransactionIDs probes all of the ID functions of the Transaction type. func TestIDs(t *testing.T) { // Create every type of ID using empty fields. txn := Transaction{ SiacoinOutputs: []SiacoinOutput{{}}, FileContracts: []FileContract{{}}, SiafundOutputs: []SiafundOutput{{}}, } tid := txn.ID() scoid := txn.SiacoinOutputID(0) fcid := txn.FileContractID(0) spidT := fcid.StorageProofOutputID(ProofValid, 0) spidF := fcid.StorageProofOutputID(ProofMissed, 0) sfoid := txn.SiafundOutputID(0) scloid := sfoid.SiaClaimOutputID() // Put all of the ids into a slice. var ids []crypto.Hash ids = append(ids, crypto.Hash(tid), crypto.Hash(scoid), crypto.Hash(fcid), crypto.Hash(spidT), crypto.Hash(spidF), crypto.Hash(sfoid), crypto.Hash(scloid), ) // Check that each id is unique. knownIDs := make(map[crypto.Hash]struct{}) for i, id := range ids { _, exists := knownIDs[id] if exists { t.Error("id repeat for index", i) } knownIDs[id] = struct{}{} } } // TestTransactionSiacoinOutputSum probes the SiacoinOutputSum method of the // Transaction type. func TestTransactionSiacoinOutputSum(t *testing.T) { // Create a transaction with all types of siacoin outputs. txn := Transaction{ SiacoinOutputs: []SiacoinOutput{ {Value: NewCurrency64(1)}, {Value: NewCurrency64(20)}, }, FileContracts: []FileContract{ {Payout: NewCurrency64(300)}, {Payout: NewCurrency64(4000)}, }, MinerFees: []Currency{ NewCurrency64(50000), NewCurrency64(600000), }, } if txn.SiacoinOutputSum().Cmp(NewCurrency64(654321)) != 0 { t.Error("wrong siacoin output sum was calculated, got:", txn.SiacoinOutputSum()) } } Sia-1.3.0/types/validtransaction.go000066400000000000000000000235311313565667000173070ustar00rootroot00000000000000package types // validtransaction.go has functions for checking whether a transaction is // valid outside of the context of a consensus set. This means checking the // size of the transaction, the content of the signatures, and a large set of // other rules that are inherent to how a transaction should be constructed. import ( "errors" ) var ( ErrDoubleSpend = errors.New("transaction uses a parent object twice") ErrFileContractWindowEndViolation = errors.New("file contract window must end at least one block after it starts") ErrFileContractWindowStartViolation = errors.New("file contract window must start in the future") ErrFileContractOutputSumViolation = errors.New("file contract has invalid output sums") ErrNonZeroClaimStart = errors.New("transaction has a siafund output with a non-zero siafund claim") ErrNonZeroRevision = errors.New("new file contract has a nonzero revision number") ErrStorageProofWithOutputs = errors.New("transaction has both a storage proof and other outputs") ErrTimelockNotSatisfied = errors.New("timelock has not been met") ErrTransactionTooLarge = errors.New("transaction is too large to fit in a block") ErrZeroMinerFee = errors.New("transaction has a zero value miner fee") ErrZeroOutput = errors.New("transaction cannot have an output or payout that has zero value") ErrZeroRevision = errors.New("transaction has a file contract revision with RevisionNumber=0") ) // correctFileContracts checks that the file contracts adhere to the file // contract rules. func (t Transaction) correctFileContracts(currentHeight BlockHeight) error { // Check that FileContract rules are being followed. for _, fc := range t.FileContracts { // Check that start and expiration are reasonable values. if fc.WindowStart <= currentHeight { return ErrFileContractWindowStartViolation } if fc.WindowEnd <= fc.WindowStart { return ErrFileContractWindowEndViolation } // Check that the proof outputs sum to the payout after the // siafund fee has been applied. var validProofOutputSum, missedProofOutputSum Currency for _, output := range fc.ValidProofOutputs { /* - Future hardforking code. if output.Value.IsZero() { return ErrZeroOutput } */ validProofOutputSum = validProofOutputSum.Add(output.Value) } for _, output := range fc.MissedProofOutputs { /* - Future hardforking code. if output.Value.IsZero() { return ErrZeroOutput } */ missedProofOutputSum = missedProofOutputSum.Add(output.Value) } outputPortion := PostTax(currentHeight, fc.Payout) if validProofOutputSum.Cmp(outputPortion) != 0 { return ErrFileContractOutputSumViolation } if missedProofOutputSum.Cmp(outputPortion) != 0 { return ErrFileContractOutputSumViolation } } return nil } // correctFileContractRevisions checks that any file contract revisions adhere // to the revision rules. func (t Transaction) correctFileContractRevisions(currentHeight BlockHeight) error { for _, fcr := range t.FileContractRevisions { // Check that start and expiration are reasonable values. if fcr.NewWindowStart <= currentHeight { return ErrFileContractWindowStartViolation } if fcr.NewWindowEnd <= fcr.NewWindowStart { return ErrFileContractWindowEndViolation } // Check that the valid outputs and missed outputs sum to the same // value. var validProofOutputSum, missedProofOutputSum Currency for _, output := range fcr.NewValidProofOutputs { /* - Future hardforking code. if output.Value.IsZero() { return ErrZeroOutput } */ validProofOutputSum = validProofOutputSum.Add(output.Value) } for _, output := range fcr.NewMissedProofOutputs { /* - Future hardforking code. if output.Value.IsZero() { return ErrZeroOutput } */ missedProofOutputSum = missedProofOutputSum.Add(output.Value) } if validProofOutputSum.Cmp(missedProofOutputSum) != 0 { return ErrFileContractOutputSumViolation } } return nil } // fitsInABlock checks if the transaction is likely to fit in a block. After // OakHardforkHeight, transactions must be smaller than 64 KiB. func (t Transaction) fitsInABlock(currentHeight BlockHeight) error { // Check that the transaction will fit inside of a block, leaving 5kb for // overhead. size := uint64(t.MarshalSiaSize()) if size > BlockSizeLimit-5e3 { return ErrTransactionTooLarge } if currentHeight >= OakHardforkBlock { if size > OakHardforkTxnSizeLimit { return ErrTransactionTooLarge } } return nil } // followsMinimumValues checks that all outputs adhere to the rules for the // minimum allowed value (generally 1). func (t Transaction) followsMinimumValues() error { for _, sco := range t.SiacoinOutputs { if sco.Value.IsZero() { return ErrZeroOutput } } for _, fc := range t.FileContracts { if fc.Payout.IsZero() { return ErrZeroOutput } } for _, sfo := range t.SiafundOutputs { // SiafundOutputs are special in that they have a reserved field, the // ClaimStart, which gets sent over the wire but must always be set to // 0. The Value must always be greater than 0. if !sfo.ClaimStart.IsZero() { return ErrNonZeroClaimStart } if sfo.Value.IsZero() { return ErrZeroOutput } } for _, fee := range t.MinerFees { if fee.IsZero() { return ErrZeroMinerFee } } return nil } // FollowsStorageProofRules checks that a transaction follows the limitations // placed on transactions that have storage proofs. func (t Transaction) followsStorageProofRules() error { // No storage proofs, no problems. if len(t.StorageProofs) == 0 { return nil } // If there are storage proofs, there can be no siacoin outputs, siafund // outputs, new file contracts, or file contract terminations. These // restrictions are in place because a storage proof can be invalidated by // a simple reorg, which will also invalidate the rest of the transaction. // These restrictions minimize blockchain turbulence. These other types // cannot be invalidated by a simple reorg, and must instead by replaced by // a conflicting transaction. if len(t.SiacoinOutputs) != 0 { return ErrStorageProofWithOutputs } if len(t.FileContracts) != 0 { return ErrStorageProofWithOutputs } if len(t.FileContractRevisions) != 0 { return ErrStorageProofWithOutputs } if len(t.SiafundOutputs) != 0 { return ErrStorageProofWithOutputs } return nil } // noRepeats checks that a transaction does not spend multiple outputs twice, // submit two valid storage proofs for the same file contract, etc. We // frivolously check that a file contract termination and storage proof don't // act on the same file contract. There is very little overhead for doing so, // and the check is only frivolous because of the current rule that file // contract terminations are not valid after the proof window opens. func (t Transaction) noRepeats() error { // Check that there are no repeat instances of siacoin outputs, storage // proofs, contract terminations, or siafund outputs. siacoinInputs := make(map[SiacoinOutputID]struct{}) for _, sci := range t.SiacoinInputs { _, exists := siacoinInputs[sci.ParentID] if exists { return ErrDoubleSpend } siacoinInputs[sci.ParentID] = struct{}{} } doneFileContracts := make(map[FileContractID]struct{}) for _, sp := range t.StorageProofs { _, exists := doneFileContracts[sp.ParentID] if exists { return ErrDoubleSpend } doneFileContracts[sp.ParentID] = struct{}{} } for _, fcr := range t.FileContractRevisions { _, exists := doneFileContracts[fcr.ParentID] if exists { return ErrDoubleSpend } doneFileContracts[fcr.ParentID] = struct{}{} } siafundInputs := make(map[SiafundOutputID]struct{}) for _, sfi := range t.SiafundInputs { _, exists := siafundInputs[sfi.ParentID] if exists { return ErrDoubleSpend } siafundInputs[sfi.ParentID] = struct{}{} } return nil } // validUnlockConditions checks that the conditions of uc have been met. The // height is taken as input so that modules who might be at a different height // can do the verification without needing to use their own function. // Additionally, it means that the function does not need to be a method of the // consensus set. func validUnlockConditions(uc UnlockConditions, currentHeight BlockHeight) (err error) { if uc.Timelock > currentHeight { return ErrTimelockNotSatisfied } return } // validUnlockConditions checks that all of the unlock conditions in the // transaction are valid. func (t Transaction) validUnlockConditions(currentHeight BlockHeight) (err error) { for _, sci := range t.SiacoinInputs { err = validUnlockConditions(sci.UnlockConditions, currentHeight) if err != nil { return } } for _, fcr := range t.FileContractRevisions { err = validUnlockConditions(fcr.UnlockConditions, currentHeight) if err != nil { return } } for _, sfi := range t.SiafundInputs { err = validUnlockConditions(sfi.UnlockConditions, currentHeight) if err != nil { return } } return } // StandaloneValid returns an error if a transaction is not valid in any // context, for example if the same output is spent twice in the same // transaction. StandaloneValid will not check that all outputs being spent are // legal outputs, as it has no confirmed or unconfirmed set to look at. func (t Transaction) StandaloneValid(currentHeight BlockHeight) (err error) { err = t.fitsInABlock(currentHeight) if err != nil { return } err = t.followsStorageProofRules() if err != nil { return } err = t.noRepeats() if err != nil { return } err = t.followsMinimumValues() if err != nil { return } err = t.correctFileContracts(currentHeight) if err != nil { return } err = t.correctFileContractRevisions(currentHeight) if err != nil { return } err = t.validUnlockConditions(currentHeight) if err != nil { return } err = t.validSignatures(currentHeight) if err != nil { return } return } Sia-1.3.0/types/validtransaction_bench_test.go000066400000000000000000000026351313565667000215070ustar00rootroot00000000000000package types import ( "testing" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/encoding" ) // BenchmarkStandaloneValid times how long it takes to verify a single // large transaction, with a certain number of signatures func BenchmarkStandaloneValid(b *testing.B) { numSigs := 7 // make a transaction numSigs with valid inputs with valid signatures b.ReportAllocs() txn := Transaction{} sk := make([]crypto.SecretKey, numSigs) pk := make([]crypto.PublicKey, numSigs) for i := 0; i < numSigs; i++ { s, p := crypto.GenerateKeyPair() sk[i] = s pk[i] = p uc := UnlockConditions{ PublicKeys: []SiaPublicKey{ {Algorithm: SignatureEd25519, Key: pk[i][:]}, }, SignaturesRequired: 1, } txn.SiacoinInputs = append(txn.SiacoinInputs, SiacoinInput{ UnlockConditions: uc, }) copy(txn.SiacoinInputs[i].ParentID[:], encoding.Marshal(i)) txn.TransactionSignatures = append(txn.TransactionSignatures, TransactionSignature{ CoveredFields: CoveredFields{WholeTransaction: true}, }) copy(txn.TransactionSignatures[i].ParentID[:], encoding.Marshal(i)) } // Transaction must be constructed before signing for i := 0; i < numSigs; i++ { sigHash := txn.SigHash(i) sig0 := crypto.SignHash(sigHash, sk[i]) txn.TransactionSignatures[i].Signature = sig0[:] } b.ResetTimer() for i := 0; i < b.N; i++ { err := txn.StandaloneValid(10) if err != nil { b.Fatal(err) } } } Sia-1.3.0/types/validtransaction_test.go000066400000000000000000000323361313565667000203510ustar00rootroot00000000000000package types import ( "testing" ) // TestTransactionCorrectFileContracts probes the correctFileContracts function // of the Transaction type. func TestTransactionCorrectFileContracts(t *testing.T) { // Try a transaction with a FileContract that is correct. txn := Transaction{ FileContracts: []FileContract{ { WindowStart: 35, WindowEnd: 40, Payout: NewCurrency64(1e6), ValidProofOutputs: []SiacoinOutput{ {Value: NewCurrency64(70e3)}, {Value: NewCurrency64(900e3)}, }, MissedProofOutputs: []SiacoinOutput{ {Value: NewCurrency64(70e3)}, {Value: NewCurrency64(900e3)}, }, }, }, } err := txn.correctFileContracts(30) if err != nil { t.Error(err) } // Try when the start height was missed. err = txn.correctFileContracts(35) if err != ErrFileContractWindowStartViolation { t.Error(err) } err = txn.correctFileContracts(135) if err != ErrFileContractWindowStartViolation { t.Error(err) } // Try when the expiration equal to and less than the start. txn.FileContracts[0].WindowEnd = 35 err = txn.correctFileContracts(30) if err != ErrFileContractWindowEndViolation { t.Error(err) } txn.FileContracts[0].WindowEnd = 35 err = txn.correctFileContracts(30) if err != ErrFileContractWindowEndViolation { t.Error(err) } txn.FileContracts[0].WindowEnd = 40 // Attempt under and over output sums. txn.FileContracts[0].ValidProofOutputs[0].Value = NewCurrency64(69e3) err = txn.correctFileContracts(30) if err != ErrFileContractOutputSumViolation { t.Error(err) } txn.FileContracts[0].ValidProofOutputs[0].Value = NewCurrency64(71e3) err = txn.correctFileContracts(30) if err != ErrFileContractOutputSumViolation { t.Error(err) } txn.FileContracts[0].ValidProofOutputs[0].Value = NewCurrency64(70e3) txn.FileContracts[0].MissedProofOutputs[0].Value = NewCurrency64(69e3) err = txn.correctFileContracts(30) if err != ErrFileContractOutputSumViolation { t.Error(err) } txn.FileContracts[0].MissedProofOutputs[0].Value = NewCurrency64(71e3) err = txn.correctFileContracts(30) if err != ErrFileContractOutputSumViolation { t.Error(err) } txn.FileContracts[0].MissedProofOutputs[0].Value = NewCurrency64(70e3) // Try the payouts when the value of the contract is too low to incur a // fee. txn.FileContracts = append(txn.FileContracts, FileContract{ WindowStart: 35, WindowEnd: 40, Payout: NewCurrency64(1e3), ValidProofOutputs: []SiacoinOutput{ {Value: NewCurrency64(1e3)}, }, MissedProofOutputs: []SiacoinOutput{ {Value: NewCurrency64(1e3)}, }, }) err = txn.correctFileContracts(30) if err != nil { t.Error(err) } } // TestCorrectFileContractRevisions probes the correctFileContractRevisions // method of the Transaction type. func TestCorrectFileContractRevisions(t *testing.T) { // Try a revision that starts in the past. txn := Transaction{ FileContractRevisions: []FileContractRevision{{}}, } err := txn.correctFileContractRevisions(0) if err != ErrFileContractWindowStartViolation { t.Error(err) } // Try a revision that has a window which ends before it starts. txn = Transaction{ FileContractRevisions: []FileContractRevision{ {NewWindowStart: 1}, }, } err = txn.correctFileContractRevisions(0) if err != ErrFileContractWindowEndViolation { t.Error(err) } // Try a revision with misaligned payouts. txn.FileContractRevisions = []FileContractRevision{ { NewWindowStart: 1, NewWindowEnd: 2, NewMissedProofOutputs: []SiacoinOutput{ {Value: NewCurrency64(10)}, }, }, } err = txn.correctFileContractRevisions(0) if err != ErrFileContractOutputSumViolation { t.Error("Expecting ErrFileContractOutputSumViolation:", err) } } // TestTransactionFitsInABlock probes the fitsInABlock method of the // Transaction type. func TestTransactionFitsInABlock(t *testing.T) { // Try a transaction that will fit in a block, followed by one that won't. data := make([]byte, BlockSizeLimit/2) txn := Transaction{ArbitraryData: [][]byte{data}} err := txn.fitsInABlock(0) if err != nil { t.Error(err) } data = make([]byte, BlockSizeLimit) txn.ArbitraryData[0] = data err = txn.fitsInABlock(0) if err != ErrTransactionTooLarge { t.Error(err) } // Try a too-large transaction before and after the hardfork height. data = make([]byte, OakHardforkTxnSizeLimit+1) txn.ArbitraryData[0] = data err = txn.fitsInABlock(0) if err != nil { t.Error(err) } err = txn.fitsInABlock(OakHardforkBlock) if err != ErrTransactionTooLarge { t.Error(err) } } // TestTransactionFollowsMinimumValues probes the followsMinimumValues method // of the Transaction type. func TestTransactionFollowsMinimumValues(t *testing.T) { // Start with a transaction that follows all of minimum-values rules. txn := Transaction{ SiacoinOutputs: []SiacoinOutput{{Value: NewCurrency64(1)}}, FileContracts: []FileContract{{Payout: NewCurrency64(1)}}, SiafundOutputs: []SiafundOutput{{Value: NewCurrency64(1)}}, MinerFees: []Currency{NewCurrency64(1)}, } err := txn.followsMinimumValues() if err != nil { t.Error(err) } // Try a zero value for each type. txn.SiacoinOutputs[0].Value = ZeroCurrency err = txn.followsMinimumValues() if err != ErrZeroOutput { t.Error(err) } txn.SiacoinOutputs[0].Value = NewCurrency64(1) txn.FileContracts[0].Payout = ZeroCurrency err = txn.followsMinimumValues() if err != ErrZeroOutput { t.Error(err) } txn.FileContracts[0].Payout = NewCurrency64(1) txn.SiafundOutputs[0].Value = ZeroCurrency err = txn.followsMinimumValues() if err != ErrZeroOutput { t.Error(err) } txn.SiafundOutputs[0].Value = NewCurrency64(1) txn.MinerFees[0] = ZeroCurrency err = txn.followsMinimumValues() if err != ErrZeroMinerFee { t.Error(err) } txn.MinerFees[0] = NewCurrency64(1) // Try a non-zero value for the ClaimStart field of a siafund output. txn.SiafundOutputs[0].ClaimStart = NewCurrency64(1) err = txn.followsMinimumValues() if err != ErrNonZeroClaimStart { t.Error(err) } txn.SiafundOutputs[0].ClaimStart = ZeroCurrency } // TestTransactionFollowsStorageProofRules probes the followsStorageProofRules // method of the Transaction type. func TestTransactionFollowsStorageProofRules(t *testing.T) { // Try a transaction with no storage proofs. txn := Transaction{} err := txn.followsStorageProofRules() if err != nil { t.Error(err) } // Try a transaction with a legal storage proof. txn.StorageProofs = append(txn.StorageProofs, StorageProof{}) err = txn.followsStorageProofRules() if err != nil { t.Error(err) } // Try a transaction with a storage proof and a SiacoinOutput. txn.SiacoinOutputs = append(txn.SiacoinOutputs, SiacoinOutput{}) err = txn.followsStorageProofRules() if err != ErrStorageProofWithOutputs { t.Error(err) } txn.SiacoinOutputs = nil // Try a transaction with a storage proof and a FileContract. txn.FileContracts = append(txn.FileContracts, FileContract{}) err = txn.followsStorageProofRules() if err != ErrStorageProofWithOutputs { t.Error(err) } txn.FileContracts = nil // Try a transaction with a storage proof and a FileContractRevision. txn.FileContractRevisions = append(txn.FileContractRevisions, FileContractRevision{}) err = txn.followsStorageProofRules() if err != ErrStorageProofWithOutputs { t.Error(err) } txn.FileContractRevisions = nil // Try a transaction with a storage proof and a FileContractRevision. txn.SiafundOutputs = append(txn.SiafundOutputs, SiafundOutput{}) err = txn.followsStorageProofRules() if err != ErrStorageProofWithOutputs { t.Error(err) } txn.SiafundOutputs = nil } // TestTransactionNoRepeats probes the noRepeats method of the Transaction // type. func TestTransactionNoRepeats(t *testing.T) { // Try a transaction all the repeatable types but no conflicts. txn := Transaction{ SiacoinInputs: []SiacoinInput{{}}, StorageProofs: []StorageProof{{}}, FileContractRevisions: []FileContractRevision{{}}, SiafundInputs: []SiafundInput{{}}, } txn.FileContractRevisions[0].ParentID[0] = 1 // Otherwise it will conflict with the storage proof. err := txn.noRepeats() if err != nil { t.Error(err) } // Try a transaction double spending a siacoin output. txn.SiacoinInputs = append(txn.SiacoinInputs, SiacoinInput{}) err = txn.noRepeats() if err != ErrDoubleSpend { t.Error(err) } txn.SiacoinInputs = txn.SiacoinInputs[:1] // Try double spending a file contract, checking that both storage proofs // and terminations can conflict with each other. txn.StorageProofs = append(txn.StorageProofs, StorageProof{}) err = txn.noRepeats() if err != ErrDoubleSpend { t.Error(err) } txn.StorageProofs = txn.StorageProofs[:1] // Have the storage proof conflict with the file contract termination. txn.StorageProofs[0].ParentID[0] = 1 err = txn.noRepeats() if err != ErrDoubleSpend { t.Error(err) } txn.StorageProofs[0].ParentID[0] = 0 // Have the file contract termination conflict with itself. txn.FileContractRevisions = append(txn.FileContractRevisions, FileContractRevision{}) txn.FileContractRevisions[1].ParentID[0] = 1 err = txn.noRepeats() if err != ErrDoubleSpend { t.Error(err) } txn.FileContractRevisions = txn.FileContractRevisions[:1] // Try a transaction double spending a siafund output. txn.SiafundInputs = append(txn.SiafundInputs, SiafundInput{}) err = txn.noRepeats() if err != ErrDoubleSpend { t.Error(err) } txn.SiafundInputs = txn.SiafundInputs[:1] } // TestValudUnlockConditions probes the validUnlockConditions function. func TestValidUnlockConditions(t *testing.T) { // The only thing to check is the timelock. uc := UnlockConditions{Timelock: 3} err := validUnlockConditions(uc, 2) if err != ErrTimelockNotSatisfied { t.Error(err) } err = validUnlockConditions(uc, 3) if err != nil { t.Error(err) } err = validUnlockConditions(uc, 4) if err != nil { t.Error(err) } } // TestTransactionValidUnlockConditions probes the validUnlockConditions method // of the transaction type. func TestTransactionValidUnlockConditions(t *testing.T) { // Create a transaction with each type of valid unlock condition. txn := Transaction{ SiacoinInputs: []SiacoinInput{ {UnlockConditions: UnlockConditions{Timelock: 3}}, }, FileContractRevisions: []FileContractRevision{ {UnlockConditions: UnlockConditions{Timelock: 3}}, }, SiafundInputs: []SiafundInput{ {UnlockConditions: UnlockConditions{Timelock: 3}}, }, } err := txn.validUnlockConditions(4) if err != nil { t.Error(err) } // Try with illegal conditions in the siacoin inputs. txn.SiacoinInputs[0].UnlockConditions.Timelock = 5 err = txn.validUnlockConditions(4) if err == nil { t.Error(err) } txn.SiacoinInputs[0].UnlockConditions.Timelock = 3 // Try with illegal conditions in the siafund inputs. txn.FileContractRevisions[0].UnlockConditions.Timelock = 5 err = txn.validUnlockConditions(4) if err == nil { t.Error(err) } txn.FileContractRevisions[0].UnlockConditions.Timelock = 3 // Try with illegal conditions in the siafund inputs. txn.SiafundInputs[0].UnlockConditions.Timelock = 5 err = txn.validUnlockConditions(4) if err == nil { t.Error(err) } txn.SiafundInputs[0].UnlockConditions.Timelock = 3 } // TestTransactionStandaloneValid probes the StandaloneValid method of the // Transaction type. func TestTransactionStandaloneValid(t *testing.T) { // Build a working transaction. var txn Transaction err := txn.StandaloneValid(0) if err != nil { t.Error(err) } // Violate fitsInABlock. data := make([]byte, BlockSizeLimit) txn.ArbitraryData = [][]byte{data} err = txn.StandaloneValid(0) if err == nil { t.Error("failed to trigger fitsInABlock error") } txn.ArbitraryData = nil // Violate followsStorageProofRules txn.StorageProofs = []StorageProof{{}} txn.SiacoinOutputs = []SiacoinOutput{{}} txn.SiacoinOutputs[0].Value = NewCurrency64(1) err = txn.StandaloneValid(0) if err == nil { t.Error("failed to trigger followsStorageProofRules error") } txn.StorageProofs = nil txn.SiacoinOutputs = nil // Violate noRepeats txn.SiacoinInputs = []SiacoinInput{{}, {}} err = txn.StandaloneValid(0) if err == nil { t.Error("failed to trigger noRepeats error") } txn.SiacoinInputs = nil // Violate followsMinimumValues txn.SiacoinOutputs = []SiacoinOutput{{}} err = txn.StandaloneValid(0) if err == nil { t.Error("failed to trigger followsMinimumValues error") } txn.SiacoinOutputs = nil // Violate correctFileContracts txn.FileContracts = []FileContract{ { Payout: NewCurrency64(1), WindowStart: 5, WindowEnd: 5, }, } err = txn.StandaloneValid(0) if err == nil { t.Error("failed to trigger correctFileContracts error") } txn.FileContracts = nil // Violate correctFileContractRevisions txn.FileContractRevisions = []FileContractRevision{{}} err = txn.StandaloneValid(0) if err == nil { t.Error("failed to trigger correctFileContractRevisions error") } txn.FileContractRevisions = nil // Violate validUnlockConditions txn.SiacoinInputs = []SiacoinInput{{}} txn.SiacoinInputs[0].UnlockConditions.Timelock = 1 err = txn.StandaloneValid(0) if err == nil { t.Error("failed to trigger validUnlockConditions error") } txn.SiacoinInputs = nil // Violate validSignatures txn.TransactionSignatures = []TransactionSignature{{}} err = txn.StandaloneValid(0) if err == nil { t.Error("failed to trigger validSignatures error") } txn.TransactionSignatures = nil } Sia-1.3.0/vendor/000077500000000000000000000000001313565667000135405ustar00rootroot00000000000000Sia-1.3.0/vendor/github.com/000077500000000000000000000000001313565667000155775ustar00rootroot00000000000000Sia-1.3.0/vendor/github.com/pkg/000077500000000000000000000000001313565667000163605ustar00rootroot00000000000000Sia-1.3.0/vendor/github.com/pkg/errors/000077500000000000000000000000001313565667000176745ustar00rootroot00000000000000Sia-1.3.0/vendor/github.com/pkg/errors/.gitignore000066400000000000000000000004121313565667000216610ustar00rootroot00000000000000# Compiled Object files, Static and Dynamic libs (Shared Objects) *.o *.a *.so # Folders _obj _test # Architecture specific extensions/prefixes *.[568vq] [568vq].out *.cgo1.go *.cgo2.c _cgo_defun.c _cgo_gotypes.go _cgo_export.* _testmain.go *.exe *.test *.prof Sia-1.3.0/vendor/github.com/pkg/errors/.travis.yml000066400000000000000000000002171313565667000220050ustar00rootroot00000000000000language: go go_import_path: github.com/pkg/errors go: - 1.4.x - 1.5.x - 1.6.x - 1.7.x - 1.8.x - tip script: - go test -v ./... Sia-1.3.0/vendor/github.com/pkg/errors/LICENSE000066400000000000000000000024401313565667000207010ustar00rootroot00000000000000Copyright (c) 2015, Dave Cheney All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Sia-1.3.0/vendor/github.com/pkg/errors/README.md000066400000000000000000000043021313565667000211520ustar00rootroot00000000000000# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) Package errors provides simple error handling primitives. `go get github.com/pkg/errors` The traditional error handling idiom in Go is roughly akin to ```go if err != nil { return err } ``` which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error. ## Adding context to an error The errors.Wrap function returns a new error that adds context to the original error. For example ```go _, err := ioutil.ReadAll(r) if err != nil { return errors.Wrap(err, "read failed") } ``` ## Retrieving the cause of an error Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`. ```go type causer interface { Cause() error } ``` `errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example: ```go switch err := errors.Cause(err).(type) { case *MyError: // handle specifically default: // unknown error } ``` [Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). ## Contributing We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high. Before proposing a change, please discuss your change by raising an issue. ## Licence BSD-2-Clause Sia-1.3.0/vendor/github.com/pkg/errors/appveyor.yml000066400000000000000000000011771313565667000222720ustar00rootroot00000000000000version: build-{build}.{branch} clone_folder: C:\gopath\src\github.com\pkg\errors shallow_clone: true # for startup speed environment: GOPATH: C:\gopath platform: - x64 # http://www.appveyor.com/docs/installed-software install: # some helpful output for debugging builds - go version - go env # pre-installed MinGW at C:\MinGW is 32bit only # but MSYS2 at C:\msys64 has mingw64 - set PATH=C:\msys64\mingw64\bin;%PATH% - gcc --version - g++ --version build_script: - go install -v ./... test_script: - set PATH=C:\gopath\bin;%PATH% - go test -v ./... #artifacts: # - path: '%GOPATH%\bin\*.exe' deploy: off Sia-1.3.0/vendor/github.com/pkg/errors/bench_test.go000066400000000000000000000020411313565667000223360ustar00rootroot00000000000000// +build go1.7 package errors import ( "fmt" "testing" stderrors "errors" ) func noErrors(at, depth int) error { if at >= depth { return stderrors.New("no error") } return noErrors(at+1, depth) } func yesErrors(at, depth int) error { if at >= depth { return New("ye error") } return yesErrors(at+1, depth) } // GlobalE is an exported global to store the result of benchmark results, // preventing the compiler from optimising the benchmark functions away. var GlobalE error func BenchmarkErrors(b *testing.B) { type run struct { stack int std bool } runs := []run{ {10, false}, {10, true}, {100, false}, {100, true}, {1000, false}, {1000, true}, } for _, r := range runs { part := "pkg/errors" if r.std { part = "errors" } name := fmt.Sprintf("%s-stack-%d", part, r.stack) b.Run(name, func(b *testing.B) { var err error f := yesErrors if r.std { f = noErrors } b.ReportAllocs() for i := 0; i < b.N; i++ { err = f(0, r.stack) } b.StopTimer() GlobalE = err }) } } Sia-1.3.0/vendor/github.com/pkg/errors/errors.go000066400000000000000000000152661313565667000215510ustar00rootroot00000000000000// Package errors provides simple error handling primitives. // // The traditional error handling idiom in Go is roughly akin to // // if err != nil { // return err // } // // which applied recursively up the call stack results in error reports // without context or debugging information. The errors package allows // programmers to add context to the failure path in their code in a way // that does not destroy the original value of the error. // // Adding context to an error // // The errors.Wrap function returns a new error that adds context to the // original error by recording a stack trace at the point Wrap is called, // and the supplied message. For example // // _, err := ioutil.ReadAll(r) // if err != nil { // return errors.Wrap(err, "read failed") // } // // If additional control is required the errors.WithStack and errors.WithMessage // functions destructure errors.Wrap into its component operations of annotating // an error with a stack trace and an a message, respectively. // // Retrieving the cause of an error // // Using errors.Wrap constructs a stack of errors, adding context to the // preceding error. Depending on the nature of the error it may be necessary // to reverse the operation of errors.Wrap to retrieve the original error // for inspection. Any error value which implements this interface // // type causer interface { // Cause() error // } // // can be inspected by errors.Cause. errors.Cause will recursively retrieve // the topmost error which does not implement causer, which is assumed to be // the original cause. For example: // // switch err := errors.Cause(err).(type) { // case *MyError: // // handle specifically // default: // // unknown error // } // // causer interface is not exported by this package, but is considered a part // of stable public API. // // Formatted printing of errors // // All error values returned from this package implement fmt.Formatter and can // be formatted by the fmt package. The following verbs are supported // // %s print the error. If the error has a Cause it will be // printed recursively // %v see %s // %+v extended format. Each Frame of the error's StackTrace will // be printed in detail. // // Retrieving the stack trace of an error or wrapper // // New, Errorf, Wrap, and Wrapf record a stack trace at the point they are // invoked. This information can be retrieved with the following interface. // // type stackTracer interface { // StackTrace() errors.StackTrace // } // // Where errors.StackTrace is defined as // // type StackTrace []Frame // // The Frame type represents a call site in the stack trace. Frame supports // the fmt.Formatter interface that can be used for printing information about // the stack trace of this error. For example: // // if err, ok := err.(stackTracer); ok { // for _, f := range err.StackTrace() { // fmt.Printf("%+s:%d", f) // } // } // // stackTracer interface is not exported by this package, but is considered a part // of stable public API. // // See the documentation for Frame.Format for more details. package errors import ( "fmt" "io" ) // New returns an error with the supplied message. // New also records the stack trace at the point it was called. func New(message string) error { return &fundamental{ msg: message, stack: callers(), } } // Errorf formats according to a format specifier and returns the string // as a value that satisfies error. // Errorf also records the stack trace at the point it was called. func Errorf(format string, args ...interface{}) error { return &fundamental{ msg: fmt.Sprintf(format, args...), stack: callers(), } } // fundamental is an error that has a message and a stack, but no caller. type fundamental struct { msg string *stack } func (f *fundamental) Error() string { return f.msg } func (f *fundamental) Format(s fmt.State, verb rune) { switch verb { case 'v': if s.Flag('+') { io.WriteString(s, f.msg) f.stack.Format(s, verb) return } fallthrough case 's': io.WriteString(s, f.msg) case 'q': fmt.Fprintf(s, "%q", f.msg) } } // WithStack annotates err with a stack trace at the point WithStack was called. // If err is nil, WithStack returns nil. func WithStack(err error) error { if err == nil { return nil } return &withStack{ err, callers(), } } type withStack struct { error *stack } func (w *withStack) Cause() error { return w.error } func (w *withStack) Format(s fmt.State, verb rune) { switch verb { case 'v': if s.Flag('+') { fmt.Fprintf(s, "%+v", w.Cause()) w.stack.Format(s, verb) return } fallthrough case 's': io.WriteString(s, w.Error()) case 'q': fmt.Fprintf(s, "%q", w.Error()) } } // Wrap returns an error annotating err with a stack trace // at the point Wrap is called, and the supplied message. // If err is nil, Wrap returns nil. func Wrap(err error, message string) error { if err == nil { return nil } err = &withMessage{ cause: err, msg: message, } return &withStack{ err, callers(), } } // Wrapf returns an error annotating err with a stack trace // at the point Wrapf is call, and the format specifier. // If err is nil, Wrapf returns nil. func Wrapf(err error, format string, args ...interface{}) error { if err == nil { return nil } err = &withMessage{ cause: err, msg: fmt.Sprintf(format, args...), } return &withStack{ err, callers(), } } // WithMessage annotates err with a new message. // If err is nil, WithMessage returns nil. func WithMessage(err error, message string) error { if err == nil { return nil } return &withMessage{ cause: err, msg: message, } } type withMessage struct { cause error msg string } func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } func (w *withMessage) Cause() error { return w.cause } func (w *withMessage) Format(s fmt.State, verb rune) { switch verb { case 'v': if s.Flag('+') { fmt.Fprintf(s, "%+v\n", w.Cause()) io.WriteString(s, w.msg) return } fallthrough case 's', 'q': io.WriteString(s, w.Error()) } } // Cause returns the underlying cause of the error, if possible. // An error value has a cause if it implements the following // interface: // // type causer interface { // Cause() error // } // // If the error does not implement Cause, the original error will // be returned. If the error is nil, nil will be returned without further // investigation. func Cause(err error) error { type causer interface { Cause() error } for err != nil { cause, ok := err.(causer) if !ok { break } err = cause.Cause() } return err } Sia-1.3.0/vendor/github.com/pkg/errors/errors_test.go000066400000000000000000000113671313565667000226060ustar00rootroot00000000000000package errors import ( "errors" "fmt" "io" "reflect" "testing" ) func TestNew(t *testing.T) { tests := []struct { err string want error }{ {"", fmt.Errorf("")}, {"foo", fmt.Errorf("foo")}, {"foo", New("foo")}, {"string with format specifiers: %v", errors.New("string with format specifiers: %v")}, } for _, tt := range tests { got := New(tt.err) if got.Error() != tt.want.Error() { t.Errorf("New.Error(): got: %q, want %q", got, tt.want) } } } func TestWrapNil(t *testing.T) { got := Wrap(nil, "no error") if got != nil { t.Errorf("Wrap(nil, \"no error\"): got %#v, expected nil", got) } } func TestWrap(t *testing.T) { tests := []struct { err error message string want string }{ {io.EOF, "read error", "read error: EOF"}, {Wrap(io.EOF, "read error"), "client error", "client error: read error: EOF"}, } for _, tt := range tests { got := Wrap(tt.err, tt.message).Error() if got != tt.want { t.Errorf("Wrap(%v, %q): got: %v, want %v", tt.err, tt.message, got, tt.want) } } } type nilError struct{} func (nilError) Error() string { return "nil error" } func TestCause(t *testing.T) { x := New("error") tests := []struct { err error want error }{{ // nil error is nil err: nil, want: nil, }, { // explicit nil error is nil err: (error)(nil), want: nil, }, { // typed nil is nil err: (*nilError)(nil), want: (*nilError)(nil), }, { // uncaused error is unaffected err: io.EOF, want: io.EOF, }, { // caused error returns cause err: Wrap(io.EOF, "ignored"), want: io.EOF, }, { err: x, // return from errors.New want: x, }, { WithMessage(nil, "whoops"), nil, }, { WithMessage(io.EOF, "whoops"), io.EOF, }, { WithStack(nil), nil, }, { WithStack(io.EOF), io.EOF, }} for i, tt := range tests { got := Cause(tt.err) if !reflect.DeepEqual(got, tt.want) { t.Errorf("test %d: got %#v, want %#v", i+1, got, tt.want) } } } func TestWrapfNil(t *testing.T) { got := Wrapf(nil, "no error") if got != nil { t.Errorf("Wrapf(nil, \"no error\"): got %#v, expected nil", got) } } func TestWrapf(t *testing.T) { tests := []struct { err error message string want string }{ {io.EOF, "read error", "read error: EOF"}, {Wrapf(io.EOF, "read error without format specifiers"), "client error", "client error: read error without format specifiers: EOF"}, {Wrapf(io.EOF, "read error with %d format specifier", 1), "client error", "client error: read error with 1 format specifier: EOF"}, } for _, tt := range tests { got := Wrapf(tt.err, tt.message).Error() if got != tt.want { t.Errorf("Wrapf(%v, %q): got: %v, want %v", tt.err, tt.message, got, tt.want) } } } func TestErrorf(t *testing.T) { tests := []struct { err error want string }{ {Errorf("read error without format specifiers"), "read error without format specifiers"}, {Errorf("read error with %d format specifier", 1), "read error with 1 format specifier"}, } for _, tt := range tests { got := tt.err.Error() if got != tt.want { t.Errorf("Errorf(%v): got: %q, want %q", tt.err, got, tt.want) } } } func TestWithStackNil(t *testing.T) { got := WithStack(nil) if got != nil { t.Errorf("WithStack(nil): got %#v, expected nil", got) } } func TestWithStack(t *testing.T) { tests := []struct { err error want string }{ {io.EOF, "EOF"}, {WithStack(io.EOF), "EOF"}, } for _, tt := range tests { got := WithStack(tt.err).Error() if got != tt.want { t.Errorf("WithStack(%v): got: %v, want %v", tt.err, got, tt.want) } } } func TestWithMessageNil(t *testing.T) { got := WithMessage(nil, "no error") if got != nil { t.Errorf("WithMessage(nil, \"no error\"): got %#v, expected nil", got) } } func TestWithMessage(t *testing.T) { tests := []struct { err error message string want string }{ {io.EOF, "read error", "read error: EOF"}, {WithMessage(io.EOF, "read error"), "client error", "client error: read error: EOF"}, } for _, tt := range tests { got := WithMessage(tt.err, tt.message).Error() if got != tt.want { t.Errorf("WithMessage(%v, %q): got: %q, want %q", tt.err, tt.message, got, tt.want) } } } // errors.New, etc values are not expected to be compared by value // but the change in errors#27 made them incomparable. Assert that // various kinds of errors have a functional equality operator, even // if the result of that equality is always false. func TestErrorEquality(t *testing.T) { vals := []error{ nil, io.EOF, errors.New("EOF"), New("EOF"), Errorf("EOF"), Wrap(io.EOF, "EOF"), Wrapf(io.EOF, "EOF%d", 2), WithMessage(nil, "whoops"), WithMessage(io.EOF, "whoops"), WithStack(io.EOF), WithStack(nil), } for i := range vals { for j := range vals { _ = vals[i] == vals[j] // mustn't panic } } } Sia-1.3.0/vendor/github.com/pkg/errors/example_test.go000066400000000000000000000124751313565667000227260ustar00rootroot00000000000000package errors_test import ( "fmt" "github.com/pkg/errors" ) func ExampleNew() { err := errors.New("whoops") fmt.Println(err) // Output: whoops } func ExampleNew_printf() { err := errors.New("whoops") fmt.Printf("%+v", err) // Example output: // whoops // github.com/pkg/errors_test.ExampleNew_printf // /home/dfc/src/github.com/pkg/errors/example_test.go:17 // testing.runExample // /home/dfc/go/src/testing/example.go:114 // testing.RunExamples // /home/dfc/go/src/testing/example.go:38 // testing.(*M).Run // /home/dfc/go/src/testing/testing.go:744 // main.main // /github.com/pkg/errors/_test/_testmain.go:106 // runtime.main // /home/dfc/go/src/runtime/proc.go:183 // runtime.goexit // /home/dfc/go/src/runtime/asm_amd64.s:2059 } func ExampleWithMessage() { cause := errors.New("whoops") err := errors.WithMessage(cause, "oh noes") fmt.Println(err) // Output: oh noes: whoops } func ExampleWithStack() { cause := errors.New("whoops") err := errors.WithStack(cause) fmt.Println(err) // Output: whoops } func ExampleWithStack_printf() { cause := errors.New("whoops") err := errors.WithStack(cause) fmt.Printf("%+v", err) // Example Output: // whoops // github.com/pkg/errors_test.ExampleWithStack_printf // /home/fabstu/go/src/github.com/pkg/errors/example_test.go:55 // testing.runExample // /usr/lib/go/src/testing/example.go:114 // testing.RunExamples // /usr/lib/go/src/testing/example.go:38 // testing.(*M).Run // /usr/lib/go/src/testing/testing.go:744 // main.main // github.com/pkg/errors/_test/_testmain.go:106 // runtime.main // /usr/lib/go/src/runtime/proc.go:183 // runtime.goexit // /usr/lib/go/src/runtime/asm_amd64.s:2086 // github.com/pkg/errors_test.ExampleWithStack_printf // /home/fabstu/go/src/github.com/pkg/errors/example_test.go:56 // testing.runExample // /usr/lib/go/src/testing/example.go:114 // testing.RunExamples // /usr/lib/go/src/testing/example.go:38 // testing.(*M).Run // /usr/lib/go/src/testing/testing.go:744 // main.main // github.com/pkg/errors/_test/_testmain.go:106 // runtime.main // /usr/lib/go/src/runtime/proc.go:183 // runtime.goexit // /usr/lib/go/src/runtime/asm_amd64.s:2086 } func ExampleWrap() { cause := errors.New("whoops") err := errors.Wrap(cause, "oh noes") fmt.Println(err) // Output: oh noes: whoops } func fn() error { e1 := errors.New("error") e2 := errors.Wrap(e1, "inner") e3 := errors.Wrap(e2, "middle") return errors.Wrap(e3, "outer") } func ExampleCause() { err := fn() fmt.Println(err) fmt.Println(errors.Cause(err)) // Output: outer: middle: inner: error // error } func ExampleWrap_extended() { err := fn() fmt.Printf("%+v\n", err) // Example output: // error // github.com/pkg/errors_test.fn // /home/dfc/src/github.com/pkg/errors/example_test.go:47 // github.com/pkg/errors_test.ExampleCause_printf // /home/dfc/src/github.com/pkg/errors/example_test.go:63 // testing.runExample // /home/dfc/go/src/testing/example.go:114 // testing.RunExamples // /home/dfc/go/src/testing/example.go:38 // testing.(*M).Run // /home/dfc/go/src/testing/testing.go:744 // main.main // /github.com/pkg/errors/_test/_testmain.go:104 // runtime.main // /home/dfc/go/src/runtime/proc.go:183 // runtime.goexit // /home/dfc/go/src/runtime/asm_amd64.s:2059 // github.com/pkg/errors_test.fn // /home/dfc/src/github.com/pkg/errors/example_test.go:48: inner // github.com/pkg/errors_test.fn // /home/dfc/src/github.com/pkg/errors/example_test.go:49: middle // github.com/pkg/errors_test.fn // /home/dfc/src/github.com/pkg/errors/example_test.go:50: outer } func ExampleWrapf() { cause := errors.New("whoops") err := errors.Wrapf(cause, "oh noes #%d", 2) fmt.Println(err) // Output: oh noes #2: whoops } func ExampleErrorf_extended() { err := errors.Errorf("whoops: %s", "foo") fmt.Printf("%+v", err) // Example output: // whoops: foo // github.com/pkg/errors_test.ExampleErrorf // /home/dfc/src/github.com/pkg/errors/example_test.go:101 // testing.runExample // /home/dfc/go/src/testing/example.go:114 // testing.RunExamples // /home/dfc/go/src/testing/example.go:38 // testing.(*M).Run // /home/dfc/go/src/testing/testing.go:744 // main.main // /github.com/pkg/errors/_test/_testmain.go:102 // runtime.main // /home/dfc/go/src/runtime/proc.go:183 // runtime.goexit // /home/dfc/go/src/runtime/asm_amd64.s:2059 } func Example_stackTrace() { type stackTracer interface { StackTrace() errors.StackTrace } err, ok := errors.Cause(fn()).(stackTracer) if !ok { panic("oops, err does not implement stackTracer") } st := err.StackTrace() fmt.Printf("%+v", st[0:2]) // top two frames // Example output: // github.com/pkg/errors_test.fn // /home/dfc/src/github.com/pkg/errors/example_test.go:47 // github.com/pkg/errors_test.Example_stackTrace // /home/dfc/src/github.com/pkg/errors/example_test.go:127 } func ExampleCause_printf() { err := errors.Wrap(func() error { return func() error { return errors.Errorf("hello %s", fmt.Sprintf("world")) }() }(), "failed") fmt.Printf("%v", err) // Output: failed: hello world } Sia-1.3.0/vendor/github.com/pkg/errors/format_test.go000066400000000000000000000310021313565667000225460ustar00rootroot00000000000000package errors import ( "errors" "fmt" "io" "regexp" "strings" "testing" ) func TestFormatNew(t *testing.T) { tests := []struct { error format string want string }{{ New("error"), "%s", "error", }, { New("error"), "%v", "error", }, { New("error"), "%+v", "error\n" + "github.com/pkg/errors.TestFormatNew\n" + "\t.+/github.com/pkg/errors/format_test.go:26", }, { New("error"), "%q", `"error"`, }} for i, tt := range tests { testFormatRegexp(t, i, tt.error, tt.format, tt.want) } } func TestFormatErrorf(t *testing.T) { tests := []struct { error format string want string }{{ Errorf("%s", "error"), "%s", "error", }, { Errorf("%s", "error"), "%v", "error", }, { Errorf("%s", "error"), "%+v", "error\n" + "github.com/pkg/errors.TestFormatErrorf\n" + "\t.+/github.com/pkg/errors/format_test.go:56", }} for i, tt := range tests { testFormatRegexp(t, i, tt.error, tt.format, tt.want) } } func TestFormatWrap(t *testing.T) { tests := []struct { error format string want string }{{ Wrap(New("error"), "error2"), "%s", "error2: error", }, { Wrap(New("error"), "error2"), "%v", "error2: error", }, { Wrap(New("error"), "error2"), "%+v", "error\n" + "github.com/pkg/errors.TestFormatWrap\n" + "\t.+/github.com/pkg/errors/format_test.go:82", }, { Wrap(io.EOF, "error"), "%s", "error: EOF", }, { Wrap(io.EOF, "error"), "%v", "error: EOF", }, { Wrap(io.EOF, "error"), "%+v", "EOF\n" + "error\n" + "github.com/pkg/errors.TestFormatWrap\n" + "\t.+/github.com/pkg/errors/format_test.go:96", }, { Wrap(Wrap(io.EOF, "error1"), "error2"), "%+v", "EOF\n" + "error1\n" + "github.com/pkg/errors.TestFormatWrap\n" + "\t.+/github.com/pkg/errors/format_test.go:103\n", }, { Wrap(New("error with space"), "context"), "%q", `"context: error with space"`, }} for i, tt := range tests { testFormatRegexp(t, i, tt.error, tt.format, tt.want) } } func TestFormatWrapf(t *testing.T) { tests := []struct { error format string want string }{{ Wrapf(io.EOF, "error%d", 2), "%s", "error2: EOF", }, { Wrapf(io.EOF, "error%d", 2), "%v", "error2: EOF", }, { Wrapf(io.EOF, "error%d", 2), "%+v", "EOF\n" + "error2\n" + "github.com/pkg/errors.TestFormatWrapf\n" + "\t.+/github.com/pkg/errors/format_test.go:134", }, { Wrapf(New("error"), "error%d", 2), "%s", "error2: error", }, { Wrapf(New("error"), "error%d", 2), "%v", "error2: error", }, { Wrapf(New("error"), "error%d", 2), "%+v", "error\n" + "github.com/pkg/errors.TestFormatWrapf\n" + "\t.+/github.com/pkg/errors/format_test.go:149", }} for i, tt := range tests { testFormatRegexp(t, i, tt.error, tt.format, tt.want) } } func TestFormatWithStack(t *testing.T) { tests := []struct { error format string want []string }{{ WithStack(io.EOF), "%s", []string{"EOF"}, }, { WithStack(io.EOF), "%v", []string{"EOF"}, }, { WithStack(io.EOF), "%+v", []string{"EOF", "github.com/pkg/errors.TestFormatWithStack\n" + "\t.+/github.com/pkg/errors/format_test.go:175"}, }, { WithStack(New("error")), "%s", []string{"error"}, }, { WithStack(New("error")), "%v", []string{"error"}, }, { WithStack(New("error")), "%+v", []string{"error", "github.com/pkg/errors.TestFormatWithStack\n" + "\t.+/github.com/pkg/errors/format_test.go:189", "github.com/pkg/errors.TestFormatWithStack\n" + "\t.+/github.com/pkg/errors/format_test.go:189"}, }, { WithStack(WithStack(io.EOF)), "%+v", []string{"EOF", "github.com/pkg/errors.TestFormatWithStack\n" + "\t.+/github.com/pkg/errors/format_test.go:197", "github.com/pkg/errors.TestFormatWithStack\n" + "\t.+/github.com/pkg/errors/format_test.go:197"}, }, { WithStack(WithStack(Wrapf(io.EOF, "message"))), "%+v", []string{"EOF", "message", "github.com/pkg/errors.TestFormatWithStack\n" + "\t.+/github.com/pkg/errors/format_test.go:205", "github.com/pkg/errors.TestFormatWithStack\n" + "\t.+/github.com/pkg/errors/format_test.go:205", "github.com/pkg/errors.TestFormatWithStack\n" + "\t.+/github.com/pkg/errors/format_test.go:205"}, }, { WithStack(Errorf("error%d", 1)), "%+v", []string{"error1", "github.com/pkg/errors.TestFormatWithStack\n" + "\t.+/github.com/pkg/errors/format_test.go:216", "github.com/pkg/errors.TestFormatWithStack\n" + "\t.+/github.com/pkg/errors/format_test.go:216"}, }} for i, tt := range tests { testFormatCompleteCompare(t, i, tt.error, tt.format, tt.want, true) } } func TestFormatWithMessage(t *testing.T) { tests := []struct { error format string want []string }{{ WithMessage(New("error"), "error2"), "%s", []string{"error2: error"}, }, { WithMessage(New("error"), "error2"), "%v", []string{"error2: error"}, }, { WithMessage(New("error"), "error2"), "%+v", []string{ "error", "github.com/pkg/errors.TestFormatWithMessage\n" + "\t.+/github.com/pkg/errors/format_test.go:244", "error2"}, }, { WithMessage(io.EOF, "addition1"), "%s", []string{"addition1: EOF"}, }, { WithMessage(io.EOF, "addition1"), "%v", []string{"addition1: EOF"}, }, { WithMessage(io.EOF, "addition1"), "%+v", []string{"EOF", "addition1"}, }, { WithMessage(WithMessage(io.EOF, "addition1"), "addition2"), "%v", []string{"addition2: addition1: EOF"}, }, { WithMessage(WithMessage(io.EOF, "addition1"), "addition2"), "%+v", []string{"EOF", "addition1", "addition2"}, }, { Wrap(WithMessage(io.EOF, "error1"), "error2"), "%+v", []string{"EOF", "error1", "error2", "github.com/pkg/errors.TestFormatWithMessage\n" + "\t.+/github.com/pkg/errors/format_test.go:272"}, }, { WithMessage(Errorf("error%d", 1), "error2"), "%+v", []string{"error1", "github.com/pkg/errors.TestFormatWithMessage\n" + "\t.+/github.com/pkg/errors/format_test.go:278", "error2"}, }, { WithMessage(WithStack(io.EOF), "error"), "%+v", []string{ "EOF", "github.com/pkg/errors.TestFormatWithMessage\n" + "\t.+/github.com/pkg/errors/format_test.go:285", "error"}, }, { WithMessage(Wrap(WithStack(io.EOF), "inside-error"), "outside-error"), "%+v", []string{ "EOF", "github.com/pkg/errors.TestFormatWithMessage\n" + "\t.+/github.com/pkg/errors/format_test.go:293", "inside-error", "github.com/pkg/errors.TestFormatWithMessage\n" + "\t.+/github.com/pkg/errors/format_test.go:293", "outside-error"}, }} for i, tt := range tests { testFormatCompleteCompare(t, i, tt.error, tt.format, tt.want, true) } } func TestFormatGeneric(t *testing.T) { starts := []struct { err error want []string }{ {New("new-error"), []string{ "new-error", "github.com/pkg/errors.TestFormatGeneric\n" + "\t.+/github.com/pkg/errors/format_test.go:315"}, }, {Errorf("errorf-error"), []string{ "errorf-error", "github.com/pkg/errors.TestFormatGeneric\n" + "\t.+/github.com/pkg/errors/format_test.go:319"}, }, {errors.New("errors-new-error"), []string{ "errors-new-error"}, }, } wrappers := []wrapper{ { func(err error) error { return WithMessage(err, "with-message") }, []string{"with-message"}, }, { func(err error) error { return WithStack(err) }, []string{ "github.com/pkg/errors.(func·002|TestFormatGeneric.func2)\n\t" + ".+/github.com/pkg/errors/format_test.go:333", }, }, { func(err error) error { return Wrap(err, "wrap-error") }, []string{ "wrap-error", "github.com/pkg/errors.(func·003|TestFormatGeneric.func3)\n\t" + ".+/github.com/pkg/errors/format_test.go:339", }, }, { func(err error) error { return Wrapf(err, "wrapf-error%d", 1) }, []string{ "wrapf-error1", "github.com/pkg/errors.(func·004|TestFormatGeneric.func4)\n\t" + ".+/github.com/pkg/errors/format_test.go:346", }, }, } for s := range starts { err := starts[s].err want := starts[s].want testFormatCompleteCompare(t, s, err, "%+v", want, false) testGenericRecursive(t, err, want, wrappers, 3) } } func testFormatRegexp(t *testing.T, n int, arg interface{}, format, want string) { got := fmt.Sprintf(format, arg) gotLines := strings.SplitN(got, "\n", -1) wantLines := strings.SplitN(want, "\n", -1) if len(wantLines) > len(gotLines) { t.Errorf("test %d: wantLines(%d) > gotLines(%d):\n got: %q\nwant: %q", n+1, len(wantLines), len(gotLines), got, want) return } for i, w := range wantLines { match, err := regexp.MatchString(w, gotLines[i]) if err != nil { t.Fatal(err) } if !match { t.Errorf("test %d: line %d: fmt.Sprintf(%q, err):\n got: %q\nwant: %q", n+1, i+1, format, got, want) } } } var stackLineR = regexp.MustCompile(`\.`) // parseBlocks parses input into a slice, where: // - incase entry contains a newline, its a stacktrace // - incase entry contains no newline, its a solo line. // // Detecting stack boundaries only works incase the WithStack-calls are // to be found on the same line, thats why it is optionally here. // // Example use: // // for _, e := range blocks { // if strings.ContainsAny(e, "\n") { // // Match as stack // } else { // // Match as line // } // } // func parseBlocks(input string, detectStackboundaries bool) ([]string, error) { var blocks []string stack := "" wasStack := false lines := map[string]bool{} // already found lines for _, l := range strings.Split(input, "\n") { isStackLine := stackLineR.MatchString(l) switch { case !isStackLine && wasStack: blocks = append(blocks, stack, l) stack = "" lines = map[string]bool{} case isStackLine: if wasStack { // Detecting two stacks after another, possible cause lines match in // our tests due to WithStack(WithStack(io.EOF)) on same line. if detectStackboundaries { if lines[l] { if len(stack) == 0 { return nil, errors.New("len of block must not be zero here") } blocks = append(blocks, stack) stack = l lines = map[string]bool{l: true} continue } } stack = stack + "\n" + l } else { stack = l } lines[l] = true case !isStackLine && !wasStack: blocks = append(blocks, l) default: return nil, errors.New("must not happen") } wasStack = isStackLine } // Use up stack if stack != "" { blocks = append(blocks, stack) } return blocks, nil } func testFormatCompleteCompare(t *testing.T, n int, arg interface{}, format string, want []string, detectStackBoundaries bool) { gotStr := fmt.Sprintf(format, arg) got, err := parseBlocks(gotStr, detectStackBoundaries) if err != nil { t.Fatal(err) } if len(got) != len(want) { t.Fatalf("test %d: fmt.Sprintf(%s, err) -> wrong number of blocks: got(%d) want(%d)\n got: %s\nwant: %s\ngotStr: %q", n+1, format, len(got), len(want), prettyBlocks(got), prettyBlocks(want), gotStr) } for i := range got { if strings.ContainsAny(want[i], "\n") { // Match as stack match, err := regexp.MatchString(want[i], got[i]) if err != nil { t.Fatal(err) } if !match { t.Fatalf("test %d: block %d: fmt.Sprintf(%q, err):\ngot:\n%q\nwant:\n%q\nall-got:\n%s\nall-want:\n%s\n", n+1, i+1, format, got[i], want[i], prettyBlocks(got), prettyBlocks(want)) } } else { // Match as message if got[i] != want[i] { t.Fatalf("test %d: fmt.Sprintf(%s, err) at block %d got != want:\n got: %q\nwant: %q", n+1, format, i+1, got[i], want[i]) } } } } type wrapper struct { wrap func(err error) error want []string } func prettyBlocks(blocks []string, prefix ...string) string { var out []string for _, b := range blocks { out = append(out, fmt.Sprintf("%v", b)) } return " " + strings.Join(out, "\n ") } func testGenericRecursive(t *testing.T, beforeErr error, beforeWant []string, list []wrapper, maxDepth int) { if len(beforeWant) == 0 { panic("beforeWant must not be empty") } for _, w := range list { if len(w.want) == 0 { panic("want must not be empty") } err := w.wrap(beforeErr) // Copy required cause append(beforeWant, ..) modified beforeWant subtly. beforeCopy := make([]string, len(beforeWant)) copy(beforeCopy, beforeWant) beforeWant := beforeCopy last := len(beforeWant) - 1 var want []string // Merge two stacks behind each other. if strings.ContainsAny(beforeWant[last], "\n") && strings.ContainsAny(w.want[0], "\n") { want = append(beforeWant[:last], append([]string{beforeWant[last] + "((?s).*)" + w.want[0]}, w.want[1:]...)...) } else { want = append(beforeWant, w.want...) } testFormatCompleteCompare(t, maxDepth, err, "%+v", want, false) if maxDepth > 0 { testGenericRecursive(t, err, want, list, maxDepth-1) } } } Sia-1.3.0/vendor/github.com/pkg/errors/stack.go000066400000000000000000000112671313565667000213370ustar00rootroot00000000000000package errors import ( "fmt" "io" "path" "runtime" "strings" ) // Frame represents a program counter inside a stack frame. type Frame uintptr // pc returns the program counter for this frame; // multiple frames may have the same PC value. func (f Frame) pc() uintptr { return uintptr(f) - 1 } // file returns the full path to the file that contains the // function for this Frame's pc. func (f Frame) file() string { fn := runtime.FuncForPC(f.pc()) if fn == nil { return "unknown" } file, _ := fn.FileLine(f.pc()) return file } // line returns the line number of source code of the // function for this Frame's pc. func (f Frame) line() int { fn := runtime.FuncForPC(f.pc()) if fn == nil { return 0 } _, line := fn.FileLine(f.pc()) return line } // Format formats the frame according to the fmt.Formatter interface. // // %s source file // %d source line // %n function name // %v equivalent to %s:%d // // Format accepts flags that alter the printing of some verbs, as follows: // // %+s path of source file relative to the compile time GOPATH // %+v equivalent to %+s:%d func (f Frame) Format(s fmt.State, verb rune) { switch verb { case 's': switch { case s.Flag('+'): pc := f.pc() fn := runtime.FuncForPC(pc) if fn == nil { io.WriteString(s, "unknown") } else { file, _ := fn.FileLine(pc) fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file) } default: io.WriteString(s, path.Base(f.file())) } case 'd': fmt.Fprintf(s, "%d", f.line()) case 'n': name := runtime.FuncForPC(f.pc()).Name() io.WriteString(s, funcname(name)) case 'v': f.Format(s, 's') io.WriteString(s, ":") f.Format(s, 'd') } } // StackTrace is stack of Frames from innermost (newest) to outermost (oldest). type StackTrace []Frame // Format formats the stack of Frames according to the fmt.Formatter interface. // // %s lists source files for each Frame in the stack // %v lists the source file and line number for each Frame in the stack // // Format accepts flags that alter the printing of some verbs, as follows: // // %+v Prints filename, function, and line number for each Frame in the stack. func (st StackTrace) Format(s fmt.State, verb rune) { switch verb { case 'v': switch { case s.Flag('+'): for _, f := range st { fmt.Fprintf(s, "\n%+v", f) } case s.Flag('#'): fmt.Fprintf(s, "%#v", []Frame(st)) default: fmt.Fprintf(s, "%v", []Frame(st)) } case 's': fmt.Fprintf(s, "%s", []Frame(st)) } } // stack represents a stack of program counters. type stack []uintptr func (s *stack) Format(st fmt.State, verb rune) { switch verb { case 'v': switch { case st.Flag('+'): for _, pc := range *s { f := Frame(pc) fmt.Fprintf(st, "\n%+v", f) } } } } func (s *stack) StackTrace() StackTrace { f := make([]Frame, len(*s)) for i := 0; i < len(f); i++ { f[i] = Frame((*s)[i]) } return f } func callers() *stack { const depth = 32 var pcs [depth]uintptr n := runtime.Callers(3, pcs[:]) var st stack = pcs[0:n] return &st } // funcname removes the path prefix component of a function's name reported by func.Name(). func funcname(name string) string { i := strings.LastIndex(name, "/") name = name[i+1:] i = strings.Index(name, ".") return name[i+1:] } func trimGOPATH(name, file string) string { // Here we want to get the source file path relative to the compile time // GOPATH. As of Go 1.6.x there is no direct way to know the compiled // GOPATH at runtime, but we can infer the number of path segments in the // GOPATH. We note that fn.Name() returns the function name qualified by // the import path, which does not include the GOPATH. Thus we can trim // segments from the beginning of the file path until the number of path // separators remaining is one more than the number of path separators in // the function name. For example, given: // // GOPATH /home/user // file /home/user/src/pkg/sub/file.go // fn.Name() pkg/sub.Type.Method // // We want to produce: // // pkg/sub/file.go // // From this we can easily see that fn.Name() has one less path separator // than our desired output. We count separators from the end of the file // path until it finds two more than in the function name and then move // one character forward to preserve the initial path segment without a // leading separator. const sep = "/" goal := strings.Count(name, sep) + 2 i := len(file) for n := 0; n < goal; n++ { i = strings.LastIndex(file[:i], sep) if i == -1 { // not enough separators found, set i so that the slice expression // below leaves file unmodified i = -len(sep) break } } // get back to 0 or trim the leading separator file = file[i+len(sep):] return file } Sia-1.3.0/vendor/github.com/pkg/errors/stack_test.go000066400000000000000000000126461313565667000224000ustar00rootroot00000000000000package errors import ( "fmt" "runtime" "testing" ) var initpc, _, _, _ = runtime.Caller(0) func TestFrameLine(t *testing.T) { var tests = []struct { Frame want int }{{ Frame(initpc), 9, }, { func() Frame { var pc, _, _, _ = runtime.Caller(0) return Frame(pc) }(), 20, }, { func() Frame { var pc, _, _, _ = runtime.Caller(1) return Frame(pc) }(), 28, }, { Frame(0), // invalid PC 0, }} for _, tt := range tests { got := tt.Frame.line() want := tt.want if want != got { t.Errorf("Frame(%v): want: %v, got: %v", uintptr(tt.Frame), want, got) } } } type X struct{} func (x X) val() Frame { var pc, _, _, _ = runtime.Caller(0) return Frame(pc) } func (x *X) ptr() Frame { var pc, _, _, _ = runtime.Caller(0) return Frame(pc) } func TestFrameFormat(t *testing.T) { var tests = []struct { Frame format string want string }{{ Frame(initpc), "%s", "stack_test.go", }, { Frame(initpc), "%+s", "github.com/pkg/errors.init\n" + "\t.+/github.com/pkg/errors/stack_test.go", }, { Frame(0), "%s", "unknown", }, { Frame(0), "%+s", "unknown", }, { Frame(initpc), "%d", "9", }, { Frame(0), "%d", "0", }, { Frame(initpc), "%n", "init", }, { func() Frame { var x X return x.ptr() }(), "%n", `\(\*X\).ptr`, }, { func() Frame { var x X return x.val() }(), "%n", "X.val", }, { Frame(0), "%n", "", }, { Frame(initpc), "%v", "stack_test.go:9", }, { Frame(initpc), "%+v", "github.com/pkg/errors.init\n" + "\t.+/github.com/pkg/errors/stack_test.go:9", }, { Frame(0), "%v", "unknown:0", }} for i, tt := range tests { testFormatRegexp(t, i, tt.Frame, tt.format, tt.want) } } func TestFuncname(t *testing.T) { tests := []struct { name, want string }{ {"", ""}, {"runtime.main", "main"}, {"github.com/pkg/errors.funcname", "funcname"}, {"funcname", "funcname"}, {"io.copyBuffer", "copyBuffer"}, {"main.(*R).Write", "(*R).Write"}, } for _, tt := range tests { got := funcname(tt.name) want := tt.want if got != want { t.Errorf("funcname(%q): want: %q, got %q", tt.name, want, got) } } } func TestTrimGOPATH(t *testing.T) { var tests = []struct { Frame want string }{{ Frame(initpc), "github.com/pkg/errors/stack_test.go", }} for i, tt := range tests { pc := tt.Frame.pc() fn := runtime.FuncForPC(pc) file, _ := fn.FileLine(pc) got := trimGOPATH(fn.Name(), file) testFormatRegexp(t, i, got, "%s", tt.want) } } func TestStackTrace(t *testing.T) { tests := []struct { err error want []string }{{ New("ooh"), []string{ "github.com/pkg/errors.TestStackTrace\n" + "\t.+/github.com/pkg/errors/stack_test.go:172", }, }, { Wrap(New("ooh"), "ahh"), []string{ "github.com/pkg/errors.TestStackTrace\n" + "\t.+/github.com/pkg/errors/stack_test.go:177", // this is the stack of Wrap, not New }, }, { Cause(Wrap(New("ooh"), "ahh")), []string{ "github.com/pkg/errors.TestStackTrace\n" + "\t.+/github.com/pkg/errors/stack_test.go:182", // this is the stack of New }, }, { func() error { return New("ooh") }(), []string{ `github.com/pkg/errors.(func·009|TestStackTrace.func1)` + "\n\t.+/github.com/pkg/errors/stack_test.go:187", // this is the stack of New "github.com/pkg/errors.TestStackTrace\n" + "\t.+/github.com/pkg/errors/stack_test.go:187", // this is the stack of New's caller }, }, { Cause(func() error { return func() error { return Errorf("hello %s", fmt.Sprintf("world")) }() }()), []string{ `github.com/pkg/errors.(func·010|TestStackTrace.func2.1)` + "\n\t.+/github.com/pkg/errors/stack_test.go:196", // this is the stack of Errorf `github.com/pkg/errors.(func·011|TestStackTrace.func2)` + "\n\t.+/github.com/pkg/errors/stack_test.go:197", // this is the stack of Errorf's caller "github.com/pkg/errors.TestStackTrace\n" + "\t.+/github.com/pkg/errors/stack_test.go:198", // this is the stack of Errorf's caller's caller }, }} for i, tt := range tests { x, ok := tt.err.(interface { StackTrace() StackTrace }) if !ok { t.Errorf("expected %#v to implement StackTrace() StackTrace", tt.err) continue } st := x.StackTrace() for j, want := range tt.want { testFormatRegexp(t, i, st[j], "%+v", want) } } } func stackTrace() StackTrace { const depth = 8 var pcs [depth]uintptr n := runtime.Callers(1, pcs[:]) var st stack = pcs[0:n] return st.StackTrace() } func TestStackTraceFormat(t *testing.T) { tests := []struct { StackTrace format string want string }{{ nil, "%s", `\[\]`, }, { nil, "%v", `\[\]`, }, { nil, "%+v", "", }, { nil, "%#v", `\[\]errors.Frame\(nil\)`, }, { make(StackTrace, 0), "%s", `\[\]`, }, { make(StackTrace, 0), "%v", `\[\]`, }, { make(StackTrace, 0), "%+v", "", }, { make(StackTrace, 0), "%#v", `\[\]errors.Frame{}`, }, { stackTrace()[:2], "%s", `\[stack_test.go stack_test.go\]`, }, { stackTrace()[:2], "%v", `\[stack_test.go:225 stack_test.go:272\]`, }, { stackTrace()[:2], "%+v", "\n" + "github.com/pkg/errors.stackTrace\n" + "\t.+/github.com/pkg/errors/stack_test.go:225\n" + "github.com/pkg/errors.TestStackTraceFormat\n" + "\t.+/github.com/pkg/errors/stack_test.go:276", }, { stackTrace()[:2], "%#v", `\[\]errors.Frame{stack_test.go:225, stack_test.go:284}`, }} for i, tt := range tests { testFormatRegexp(t, i, tt.StackTrace, tt.format, tt.want) } } Sia-1.3.0/vendor/github.com/xtaci/000077500000000000000000000000001313565667000167075ustar00rootroot00000000000000Sia-1.3.0/vendor/github.com/xtaci/smux/000077500000000000000000000000001313565667000177035ustar00rootroot00000000000000Sia-1.3.0/vendor/github.com/xtaci/smux/.gitignore000066400000000000000000000004121313565667000216700ustar00rootroot00000000000000# Compiled Object files, Static and Dynamic libs (Shared Objects) *.o *.a *.so # Folders _obj _test # Architecture specific extensions/prefixes *.[568vq] [568vq].out *.cgo1.go *.cgo2.c _cgo_defun.c _cgo_gotypes.go _cgo_export.* _testmain.go *.exe *.test *.prof Sia-1.3.0/vendor/github.com/xtaci/smux/.travis.yml000066400000000000000000000003751313565667000220210ustar00rootroot00000000000000language: go go: - tip before_install: - go get -t -v ./... install: - go get github.com/xtaci/smux script: - go test -coverprofile=coverage.txt -covermode=atomic -bench . after_success: - bash <(curl -s https://codecov.io/bash) Sia-1.3.0/vendor/github.com/xtaci/smux/LICENSE000066400000000000000000000020571313565667000207140ustar00rootroot00000000000000MIT License Copyright (c) 2016-2017 Daniel Fu Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Sia-1.3.0/vendor/github.com/xtaci/smux/README.md000066400000000000000000000052401313565667000211630ustar00rootroot00000000000000smux [![GoDoc][1]][2] [![MIT licensed][3]][4] [![Build Status][5]][6] [![Go Report Card][7]][8] [![Coverage Statusd][9]][10] smux [1]: https://godoc.org/github.com/xtaci/smux?status.svg [2]: https://godoc.org/github.com/xtaci/smux [3]: https://img.shields.io/badge/license-MIT-blue.svg [4]: LICENSE [5]: https://travis-ci.org/xtaci/smux.svg?branch=master [6]: https://travis-ci.org/xtaci/smux [7]: https://goreportcard.com/badge/github.com/xtaci/smux [8]: https://goreportcard.com/report/github.com/xtaci/smux [9]: https://codecov.io/gh/xtaci/smux/branch/master/graph/badge.svg [10]: https://codecov.io/gh/xtaci/smux ## Introduction Smux ( **S**imple **MU**ltiple**X**ing) is a multiplexing library for Golang. It relies on an underlying connection to provide reliability and ordering, such as TCP or [KCP](https://github.com/xtaci/kcp-go), and provides stream-oriented multiplexing. The original intention of this library is to power the connection management for [kcp-go](https://github.com/xtaci/kcp-go). ## Features 1. Tiny, less than 600 LOC. 2. ***Token bucket*** controlled receiving, which provides smoother bandwidth graph(see picture below). 3. Session-wide receive buffer, shared among streams, tightly controlled overall memory usage. 4. Minimized header(8Bytes), maximized payload. 5. Well-tested on millions of devices in [kcptun](https://github.com/xtaci/kcptun). ![smooth bandwidth curve](curve.jpg) ## Documentation For complete documentation, see the associated [Godoc](https://godoc.org/github.com/xtaci/smux). ## Specification ``` VERSION(1B) | CMD(1B) | LENGTH(2B) | STREAMID(4B) | DATA(LENGTH) ``` ## Usage The API of smux are mostly taken from [yamux](https://github.com/hashicorp/yamux) ```go func client() { // Get a TCP connection conn, err := net.Dial(...) if err != nil { panic(err) } // Setup client side of smux session, err := smux.Client(conn, nil) if err != nil { panic(err) } // Open a new stream stream, err := session.OpenStream() if err != nil { panic(err) } // Stream implements io.ReadWriteCloser stream.Write([]byte("ping")) } func server() { // Accept a TCP connection conn, err := listener.Accept() if err != nil { panic(err) } // Setup server side of smux session, err := smux.Server(conn, nil) if err != nil { panic(err) } // Accept a stream stream, err := session.AcceptStream() if err != nil { panic(err) } // Listen for a message buf := make([]byte, 4) stream.Read(buf) } ``` ## Status Stable Sia-1.3.0/vendor/github.com/xtaci/smux/curve.jpg000066400000000000000000003202021313565667000215300ustar00rootroot00000000000000JFIFC    "##! %*5-%'2( .?/279<<<$-BFA:F5;<9C  9& &99999999999999999999999999999999999999999999999999" vN$s;!d6&8rRW% 6I\RԚ9Ri))+g'ge9qRDmQ$$$* " aUFY*ԄfFU)ơ"B6FHFȐ)B3dwӓ2jlj9shcۈd3wNKrɖeZ(̦553'+Ց=1VJidJ ]SNd&(ŖK7PcJi M-u_^ȏ-ZfZՕUufEj՛\1ɬǛ'-J(˫rv9 !.ǥ2Jw-rį.q˖J$6s I7wRIXjZ5g6/᪋Lb` `=5Z$v{U6K|kz=f664 ג:9:SUj6?w 6V& T 4 @ C;aʒ{k<'kp-pdQg&TXЗ1`Ll#,%WFDb"JYթ nzO]5YprRE:8Inu%[:uuQ54@*q(:.4vSZ5D/B$bQ"@$c5'0/᪛jPhjgǸ(䃤3oNv|L/YtLkٲ (3u;:nh&@b1E @( ux<9N˜+9x^^ץ_VDSdV`UdVD#cL7drHU:2 y\Bv;+Dsiw(**Yi}:&#=yrQCެ±!.YmdREX%Jؕu[FQA0h`ёMu5er$]l,̂hj729iF_WUWC @ bgmQz?HW[iuF`p09ny֗sy{js,r//<Ӹ/Q8yiZ07:SV& (0M@ 4 ׂ3|mc"F+ cdC6cZ2HtZ?++mTBhb*o]}6G#D{'>*}c>=N^._53)Ʃ !,I F+&,"4ߏCY- UUč˺ԐRU2Jf,ZY|ccNPa晱o5je2hE9fl9v:5&Q=rL\lU%Ocgn418b68Eokh(@44h( M(?<]n~M7(bl9UXYКe*֭YT8 dq:_Cs=E\,<_Tm']MfԦD0l+ 8ByVԄ-VуIX؊s"aebG5&V8JI+$ I,Xģ/9VLh=tǦ]l{\i 'U8.i4me,k饠6Yƙ/%׬c,Ƶ3Xs7[6''Ŏtf1^RKr$Q1!Uxg)&CP^{GokqyvW}(іM9<념lU\lVb1جTWu_Y7XnI]5KQb]O|(` @ L΍âm2~ۜ%O:s4̓/ m$Lbc@@?8733u]W.qՄ.ڮĆ&f"Ԭ-4Ȓ|$԰ɣ7aDV[uV3,[\$I"& \Gn||U#Bt_;^֩.zM+:ȁn]Y'.h2.UDE=Gfp>8;mWfsexMvLl(s:z;1i`hyܫ*5]1ckˣъ:6=mhP/Uѐ!c;#*ڣWhEH]U|Q$D*V-ܨjg Pr?છjƀ   0bd}rz&a0i9z}ӝ83~AtӜ *4`sOqHw.z98:@#V!@3sSBl?'kzeXmXuJ&6a9 M4G]=S]hzAzcK7N7X8blݮ2^ʖVfbKV&FKx7gsi o:lYgħ3fN&qϩy7.>y%5PZ;yII>D0E3r5 Ћ'n䂹VKxq龎t`'C: oge:j]n/7#9%~ gjx;n>UGuyw'ћD$嵉bV!b  ̷Dw6[]N&ϖiŒΩEUʷVmEzܽh0Zzt癟o>LLG=t /vUԻO_pr0ǻntH220r:{*]x:Q*1%5 :.O,.Ƿ(x_jV" pd K"X5&7-)W-f)v5eFoʗ|nCb!1 v;{5=Ws'y\ď@tZEIW\?Wema]W-+0sf3:ӎJ̻}.tl>WczӇ|^defXՊ& aUS8FBvl(]Vך6jyO9 L`?C'yM/D-sj,vz>'"t,gieAEoUыzh0!hP@@23N-oJ|PqIy$-Vd8*H/s;뺽j; ]8Crss+:XhQf.\Uh%ˑ)[UOZ}o_eWb*cf=^B\(D5i夜V2Kҕam$jBWE=sQ뻿 GSǀ3׹1G<5io|k1=v^g='3Ӯs7%XXeTętazJVAn$rX;V%0nzGwoqƣoϪ]Rj Y۞}[zY(W"+[PoMdyyŵ߉qVyD":yΛCΚ1<0ōX\lXXȦ7aT..rǧ*rУnG4!~1I caReBgoӒ2+2;s;iRހebDKӳ\i]3GMsf7ϛwEՀ4C: ]&vKvɝrYXzj͡^оa;!,YFH zr,EuX9ݖFG3TnYreN,E@c(Mwy^WHlV:}1ڭa^\=gtex,3iw:ȊۛÝȎ2p(ltе Uθ34O=%<-JTTYX6 QF(ChЬhV0@$PbօAjeeD#lޏu婃FpP8֯1]Qϳ ymrtȅ[>7~?clrkqx[?"X)wbJIU($HPܒ뉐7TVBp(DF_G`4F]]e-p.|/6w~bafdq-.uQ_oq<iP.ٚcrcrcrVNnPnNnN[sp&j j spspspspB7iZspFۆۅ;/yt ʑe.^^.Q0w^n<\>Ӑ o۞PUn!D3SSvq}<_]_Kapܙ:L+"tOROeBQYu9z+ddYF"[KƲ4_L]U__M<;PZ,ʣYVEGJ*ʙ2Fc.TF=.RiJ/d@`yB4tk>f[@A.nj`=:Q7Cg8'1MB1X)"D[vmBZAĈ7N=# \Gg|~\'\곬<\N*#( yӕVn,앰̂1q3|n^=:-.GZTX%U6=zm$ՃeFJ)tV}sWdf<ӗFL qcڼ[zw8y~no6*Ia;qu"ʮ1JTجSāi^rԽH,K㨰hz`An|9X-m|`oyKNs'7B,uvAaXRm5i@ b`fh8Ϣ΢1!㙷ƕWG"Q+\sJe\*kp[U?_/7y\N=+9W.C^:>s)F? p}.k%UIqlg4ѡE4g.uY{=3h2,3v45Sʲ6k*6B涟X5# IF YR+PbhntQ+b`4 @Sh4+(-F!X N6QCD18jzN;Jlϩa;:ә&mɼͳ|ä2zK7.-#Qu㺾W՝.&fνceq=6Ms]3O Q5 XIeFq 0il#.Cc uxINrN+ .YFtcq#((#U XiOE610i64` X W(:ӵZ! ՑIl\lXj4DYdQBN,r _A궾JUTUSUmWԺK-Fs/XfÝb>4zA$dXV-AnK_<`caN0)M3q^nx>1ϧ*%tWSy!;D"d@A4&&fMYl1!_˦nn5n <[6+AlN:GDsW>Ѻݚ0To^ɺZbɨF@1$%M4qiFh Ť['ŒqcDj(Eac(5Yp "" n,n-d8DwmFɩV/q貺/tY#eU(Er׶.zDŽok<:|>tFYX{)=4_g,]<ԃNf PC@"o3#,]ׇNO0x釅x}h oɎ+@ @ hP i@ Mi b`A!0` 1$ՉQ"jdLM[ !q Rp7HDI$%7ǫ% sgQ+46^+iyS HX)\mY~`t>{yj+4^nj~}p:g8;'Y_UUT46roeY8|kQ@B$4Ѐh3:j;>1z~]svn9ݶ:=a7VF?@ JQ&i &؆EI :%@n,+6Fԁ_sZfrXY$X qhSōNǤz?&eگ J~~q+WTF"5,F,ab7Cp$Z"qݦ75I5b`;nԹ<.`UjK(p%._W>цDu+ IR@'u6XGo'MM 1s6q^iNpϤ\{z}ƿM 5@FQA`44 L D&Ehn.& m1&@wvzus-th8[@)H  b`IŒ"DzΏ.U]S.E jbMO)FV))Tڥ]1Ǧ]sdt_IN:k'|mo{m}'.n1~]mQֹjna9EhY 5rM" @R%lg-J9עymYX%E0WbA_=x1^%‰Lh)A! -0 mL&11A'@chIfXNx7)en2V!&D &7;kܷ+xى}lcLis,]#4Ψq Ih˫ l\:N/YwmKCc8yzuI,Ytg<_zۀ`>]~{+=(RNs@ B3Fd4J8_5sY>~OtXbc:X3ĽifS뺊l .#5"1hC@BD +d!`10L` L& !0iL@ `@FQ'Ee(2N@@7LNMwf5zjrܳ5jzv~:YG@r"YUZ9IP)F+N^}3Z,|[wxOJ|2ٺ2ݴ)/xk9XCk[{`v!jN)E"ʥY,a:t^W)睶^偼޳f^7=oi\{ y}"RhIDQ`1@0ic4  !cg#Du̱ V0$$ @0&11αz+l{ ;FUǷS5hX6|[FRJ -fU(jPD[y-UqQ^=sT=7V=2m[D`bb{%޹b&Ҽ|3cQvkXbBQ[",YG7?rIL5zUٓ!Y_XE+=1&tSH@,+dl0i048@@DIYUX&.VT HN bcgq`lF.V.QMjt?嶧bAY=r5YM{>1^5a<7Uvמ 嫖7|} ,˝2v8덠5祎d}tm\܌p8oqy^sU.xgUV)7,9:˴w:Țh@DQUNG"DA0IFa4  %jm\Iūd L0@644F3Mk H @ N!b10MŒ": 罖vbe_l;=_{Gt軬-siW N&HRUSs,=)mz狝bGW9)ӫ=wK';fJy+~,2ū:y\-޻3xYs N7CQS8;`BG@ 3cmȧ JqgDwjBdŐ4`  RQ` ($i$&  J[]o[Z./̬xVÖwoέ\u>#[q]u=̫jָ]Y^㉛9_6 }'7q^-Dq FP_qtܺh014wy^]lVwylۭo>0e=o6Q3Mfq=cʌ]0k#y6i[dmLjX^='Uw+qCP+ ,yx콜krlk :peCvߣL-H@»pt*0╍0h b``Hk`F2B,$@ (Lf8Z*Y벗'_n/<}3q0r+6zL<v.d;1U`s&F7>.<8׳y;jTpv{(_fN~z/WSq,3MΓĽ ~NKŝF|uULZ<_<ПyYot}zR={C;ˬ̌@fF&^^= ̏F;q^_y[\hVSmWơmqg)鶓>oGyA& K|9]~^b ]V `cV 9n.\TBw&mtU*D,yq+uB9PJCNY;]Vެ".2*\tʜe(q8c([BYbYr &v Za( @(iY80Q0j9z[sw9\AuYZL6/Wrprua;?0ޝ']gA^e c%%8oz]uSwoxSh}Y?<į_21rx2 uC, ..;XujxLwMۿ碻;+8% ݷ}7/IY8m*/6\&hoqj?1>:qML&CO[c~ 9QYpT,HYP+q Kt%$Hd@X`[L,CӖJ9{1yN!(e UrB5䀹zxF3A4- h#%HdLM i)@`emuW~́fE\Jmlv:B޲ܺew1z$q߫s|ihetdKLZ5v8]Zo=IG# ,3v,7UkvKϷu27&X.n!55^yp{#E b49E;M}O)i~Z7YzOnVV6I)JZlΈ_|etfS̶cHnV癇fefYF'uljſFf-9r&$iTHU*QG qBpdTD$$cEI#F an쳫v5]FXJUTc,/,ZB;mu )%){}w{~NOȍ(m;Fy1ɣO[l\lq \֗3a(Ź .zy)5xyL3ZH{k>i۪ka ^Z{jk7iSwh|äkws]73Cq \{P̭VfUi9/*Ǩ[$02UL錫vhmq=VM[ ȴ*#L3wGf9z}dz$z]]TsȩQ5&;+P|uamBH-F.*m*ƥLjg@@V "Ȍ;e-5PL \4"ш%Nd-Sxl0͖/#ZnNU(5$!@O7]fu L=U*: ?ynάjvEwrٲw^c?굚N8Wh84Ze/A-ަ5 jFu9gA|8Q.. g@6md:>zhYI'su~n[z=,Z;"+4akl2Q8cgkTiUufFUGSAĮ]][PU񕉦jye`y6Sf$58Ja\9yҡ{7eta&}NXyUךumz[Ϯ3`m<Ǡ;<K'Rq%n2%kW1 Azwy/&/wA=urmaqε*j9Zl[O2I\Jv*G/; q!2Rκ]3'D@IqQ*桤5 bHT3vEz55bYUVCqjɌY6ؼ_Wdu6.iSpxއ <#97(rяhtஒuܳ#+DWeS*x钱`d7 *l5.]3q|ۯ9n!' ݷluMc_E /Fm_:мlr蛃mO$i'M"nE9ȣ2${^_7{a6C-u/A_'R68v,ʡlkMW-7U}T-5ڬs *$UFDZ8ӊ躎:tO6ґ-5bc\t{󭄗,;2qD m5/{XyvFhuou:j̞ފo?AVo _G MnYNGu"^˦qя-UKRۮ艑  NCJYqngոW1FGGs|9Mw ۋfu~!ܩDWuIn!22"I,rmpisBn>guKR d@,   n,5!n ,:Nn'Qe}tM3 YΑk)1K̻tXzͩdc*l*JB.!XQe2re+@ |D;^觋gTUY"QbpVPBMƛs\yB $".Qm.FbUEQR[b{lwl4q6 }-ᮣS7co30!VCr!ZEj=clO1mfavF//aiqy\X{nW-a׵3su6:Uuiv4Doηq*2N==glʏLY^M+FEMP2/ .3Jpn[K'SID,ufI:c,~YeJ0%gEa*GE.M7r(Ǭ6i-UE]6jn3/x,ve\P,vi31cFL).TT"Qnm KZ,Uj"uSj`D]lT{,GixroG=il7 O.-vWWjql;}.#E%yre﫣[͝-!{#G_k dsW;ӊGjqev_d۾/O򌾿;Gu8Gkj<io} IK=F~W^0S' Oїᄾg_ݏɳX{оg3{k駚8̓ҟ3ҏ5I~jy=(Y^Φ8urOi8<=ggIyvurxcf-F:7[innZ$^> B7}8zh9͏"$.m>X.^^;C En9VoDDe.ueKmq(@H@HbDucsA^F\:N",b  82DrʞZ:I"dBDH&k,1V@dyz`Z:mSuY? e\`UqºUh'X\ TKe@NUDR"Z eHK|\ь;-Gj|'m|VŲTl+#вS{]^sEy8$zwӟ%_^B b $pq%]m31즁7HBHHHn,dBDXBDA@0%7H@q.·*qD"!$$*dD@7oE=^V=&qшF ڍz\pzp ʷ,ȄȄ2dC+eVN g:rh6l˳w )½r14lq ̅H\YjLT2-(EI("ƴҦMF+'(@TIĶ5ȷyծ1Q%VF(&V8o2H() 0@P`pHR  t al6eOO~?l[-~2l[~2N?oQӡGN CH om񭥴Zxih3x^"Ӎ:πGx`2Zu`G g[(-((u0ׇ[Ou׫HaBzӣ(m GN:tׯ^zSFRE1H :O۷`琻+pƔ94)~_)x,m 5[ UiGraf~Em-=wУMH_ + ȯtcKf3t7/_>=c=|W)RQ3︅`RG_`H)߿*Vܸ^/JKk=$[N,2.3X Z>4G^:R]k]BM,-=zRZƓď}X2.K"n91V`NbSN?W9TK2{Y#Ų(+Y]%o_q3uŞbFL*_Ks+_~ZISSyo]dt SӧN:8)5mIGN?oIz HDuשNU+E&,-=BuǩMS@}QS45 #!lХ)'@u'Z ԎhP=(*(ҩ^7HED {Po=aŸ(-?1 X01Y|c;V1V?^ ?LYllkǐ(RG_ŮV[3#5?hWLnA/-r\,^|g5Fldtu@Ѱpb0ONkCӴ4"8fz'vԚ!vF.ͥ-jšz#J:RT[T]QFGВY?c` dܳ=|g2k= >B_ ^}l^rںmxz-WKWen^, `42BBzu)#GQSBLp@#ZրPZӢB_bmņK9P}/"Lw֖,R(PZ:tS$H-x^υ:u(E=z*_c`z4՟ȉ=mɌ-12_[ /̰Q7@ #Zש JЦEM6'PZZjan;(I"ArImq3t<]*]*5:t(H$n݊ ߶įӷcbՋbm pxp1938$KMC~U 6e9377m6+ɒo2gűY(-Qloh{Thꐀ۷n]wP[QggJ˫a,b|#LH5|rfBԦiUKg}wi\BE=HUV$dq+vjUSH(v |rŵŦSUŲ7?me%YvBnXS-uq(X#`̰- <@^uת@e/ʗnk%ktU2*yfFޱKFI k.4徭 ;KQSJ[lQIZU*w4<} RUBJީ4D/OWe%js.-9Q”몐d?Ԅ8M[?-r`i,-pWM͵w\y,8FZ])uKuֵG]#?H\l_L<)8Mw-LRxq]ʻ4]9[3LiZI-µCb+hP3 b4z{^cuiq%$Az4)RUP*kHO>5h/5vg6C7uN˰\ū ڮ풲<Ǥpt 95I\G^Fo}ыauzqqȭʧ)ThidIh{4OG~{~6n̥9rnÈ7\6ff7x ٱp*5";j.4!cNS-aZd֊@0\{:g(h{9<4iTy>8?\&|8@h$)*@l7KhJxJ :t[$Zo /)fů )>L5֐(Ӟ@M'4Motb_|k{e8Q䎦69d|UXg7eY`r12vo/, TJM{[k=geQzI; ݶhY]6QCt]eVEM2s%7E+9Y}C-c"*ǔ]ʝʮӓVM|X){) ߸sX_~}+*Wcc.FZߜI:!oyutŠȒi ʣ* A٥b{v$>䨫ďqZA*.wRKX;eJqO[KmTJ}nհ2,%i2n)v;μ?yR7zFV 4i4٠M'IDc27#0W6>ɱY*X5 'mP(Y(ШvJKfKWnAnzt)Uc0%t="=0ւʤ)e!/^.}O;u˒k 4}V.I*JN שJhFk:肞Sďb7Ԋu߈_xe ͬ^Z*5X-cƄݻ(Q))tRPX6 J]UH./:+]R[N>T\ky\pR`Y6|s[5|l._|C ݐվl{oɬ-'%x k84t#ҩZqȗ)w`ֹ }/0Z(:ʒ$J\WxITʺ㎺)t)#ɭy>\ǘTXBJ ri4h D#Dvhcb r&B4R}HU9M:sd:&_}CܵqԮCćZr;*Z(h)o bm2&hI<hMG,#fƲ6oƨ],q3ybLg=~r1ˡesEu-a\LwN٫bTRӨBQI([Joj&-׋ȇ&3̾RTKR/ޚ;w{Jmll,|vp0?"X58+Z4C)nj@iYWmkF09f5DFQn/1ˇ ]lv&5cLd$yPƳp*3MBuֈ( PtQn2.{,Z-j"'Q'*fԒҗIWGfmEGYw IBtשIGNw(* U= +fO^z4*1H{N`}yNaJM*)֔5:RRҁ S4eR8W.$U+G0E,u% HG7a!@%vq4RX1ԂH,.SkaU*O!"ުz腇B肖'Th9O b9'x&/bx kGݵݲf6  ìlcX٧wűx_~aTGPJO^ԥ9JP-xذOjs<:^Cekw_/YXsu.AEhr੯Xp:ND%[hfE6R \E*8QN Fr,MW@ĭ]ݲ (뢒HE]!]{w8_ѕY;gZ ծNOmh\del"˖]ްܤUEa|La\Fl8reaVwlb/mĦvRZt+E!% N$^)qfa6J\i4[ ߘ)ePY(;*̓Kӵvu1^<9xfkD)iTڶ۶J+$*JRhJ_ J֚ ,kZ^2[BH H'ljmLzZHd+E]X4h֝THӞ!luG^: HHOR%i@hpAmKO?TSO?T=~0w~G{?9M#/E_/Eo7Eѿk-< Mp֚,qHHORr ʬB *JhJ(TФ4C]l IC.٫4&0fV)+HNuC}1if"aHGmx>?arL9~=-ZC䋖t2?kk 'XY!2,[=< YKk@4(85BRBzBWyMi[kRRF^@_V9P^x4P/%5ʵ.L*; Of)ۥR ¾+a17fO'~+gMg(^S?)^.ū沮'zϵBKe2)?!Bk] z8%X[]G1ŲkFk8mťF< l5u&cv i ;[ o{%Di5C¾#$c\g77۲c3dCoLldyu?F\FbOl/ٙM]=<ĨtӦW)rqfښZ È]m B=Y+"䊌T)1(-nɻ7 r*"8߇,Zm PG4i<(Z+"TZf:FTT]Рs;)e^KKP#qF[P)".SZM-xe|iA$lN}LU2a) zh5<֊@Ҩ+zDD!{\-mE)Pp*E)4+{$nP]K?KRqoK/҉-OǺcE-</)%nmF+P୺UOA[*۷nĤҏn*xԬ^v!ytufï،吰6{+ nXQX;Wȑo?j2Xlw5O~W uѥӴ&.'УK&]8t:_CꦙƝAJ ־G-Tw[齒h}Y*Z;>^[o(iL)'p8. V=S.o$ EYRR{b+k+ʕOn{_]iZpxcpeK1IɣK x<6X@s[:HlF(ܢmd%| ]sfLyuowAH' ִAi sQD,uԤHGA¾O?Nsb|qdQƯحh\Ůݻw߶>ߒO8:nB[oUY'Ƽ%nSs/ +R Jphp+geK?%N"'PTjU@=owDQ ֈ#H8Oa5k|Kk793q[{nc90ھf_׋D{ Y1|Z!]j Ϗ X-lFY )*L̹MSKJ x^$ Ih4-}X6Rc^ٽX+2}_"y4~4ݽ[EkE%)48kKo'~^sh22),_;V}}ޮ9irEbkE~0MV9뙛^ ?zT`W;yҲwAAA[K4?k"[P*rr(T<2(VDh% ҽO PWnHr[7ʝLU+73/8|H:d~GyVKȘb#Ȗъ7gd/Z-_w6#Ч^zANJiiZ@qq@a5TI#vN52񛅻?T]M]avoM6/qn3hСIC(P| i4kmӀ5>̼]51,ג"\E'o0z*A4+KGe84oh84)_pPFG!TO __$"OO?9Ko7OPHw4~=XI#4U9^4˵xKjc2 8Bi iusz ::ku{CzQB=M$(7`v( B &*JMjy=D{ڹr5eK)iU-mtR O(vZn$h77XpO8@u94xM'+{;wGҚ $bejQhQ:.Y-{cA Q得T64WXJ2DF,47X *EoSa}#<Aɣi<;(Qqip4JitjbX+@ ]kM~/l#с mڧW T-vyF:6VӰü$/HBZLtH-lz\dF~ثbK}z}#p=4x?X5&Μ]郐id IFBX<˲Ҕxe1/U wWrhƚyMѥzRi)gf;0ZE\#ij 9yGQ~BDu:M8~U@`♫$BB@(RRڭEqc]z[mիT(TP6$JWh7BeR#Tw"7V)6]ޭ,PG= "$0\B8 E$<5>9Zv :T)K-Q,+*pj da\&4"GѣP(1tJb$r8ZT '(=& &}GoBڈV[VH dMSirM&&( qV!6s|J[ 1[lVj*Ȧya4i\H)Z=6M$*X>nRQ)sGN0^.CVl 4*P|dp.JXSt) ٭FF.7K"bm7wB2$ü%ω Da^Mx8*ԍ$ JEIbrT#7ENnoz l_ע?}#6hzn1nf 10? ~lܚD#4uL ]JTӫP6zBRE2R:3&R:Q HA%Rxh 4 tCu-Hju.oZ.Q# M({T*"-FRR4<_UȕKISt~J| J[AU%V8LUMxֺ=d1R^WV/ǐUR&_ ʔ@<̧2d&K&PknKkQ9< a@7"E$SƨԢ|Uȥ%O)K(BSK#iSրJz4Zdor\ۻʚȪ%'jStus8(\ ѤSjJDvhR^=6W,.Y!.'Grya(;^.DfD[K;R'Lȼ;%7-)m XIBAK=HC%C vzonϡř^HZ)TխݜjE ZZ J,-.krHmEb;?n4}p>9`pyM QDv<i-[۞͛5׀\U i@(OGPlSԾ 1lrq8E"lS}]:٩*<+  hR 2YvuшbMTy[{@4>bFŒ Rj]M XkJ)aE Z*A()eK- *!怜b,qrq?ˊ/P{ŰZ]c\e:%+~8ae‹l[kmWkDҿ}[>>CE'8nL@#TjfJ.Ts "RRiIE$(mp Hy`ųL~KHBXceF4(qR %TeDG"foe6y_mDQk{`uoC9< M( C :cBhPM IhBq'&УN-KiQ ~6P󶤮t0vmob_#plbŞjFQ6Qb'J@A,bn,>;tVoW;(}FG}ٽ{{84xr?qQB1,&:X hQUHP2G\A)҅q]7VI2MM+ap+ "Ubr\L)MM-6bE[1Qf<&xZ#^WAP"o}ߨP;zO>B: ȁ ST+oԪ~q5 IrYܥ @}[M(%*'UB뒶fjMĸ'h9}-b\ -6&I~kQ ?*ml\1uk ڡ[zw <( 9ǿcGС>q2fFT;WzqQVK[[Ec 4ₓDHV3QWu$%l6dҀi씄e)M\muS 5cK_Aao7D*i%Qc8MAǰrhVp74tȏLiiOS};J1[-FYvybԪCTl*IWbj=;$S&,\mȆ@0HHWҀ0CN馢FK\N˺\[ty<̉]/S-O=E(VHGBLcx{a슊jFtHq c/bcWF(UH%]R@5"uv JgղEfyiPiL:uׯ]{7MA?hPpX.Pۚ㛝TB.E1Q!CRiړX+[ +EtB]HuHSS "}.)5 _ln"$$%48*RⲄ `2@M\0JvC($%qMty DaL&D,˜g[d2Yy :tI#dBQ nƫ}b˔0 5OWPk)f $ʹu2΢=|~&ar^A"hDaj(T@,Cڲj" !% 4KwฐRL}M[%,=Iᮭ0S$$Q}Ori}i)կQPC-w~D$3%1iQyMۓ]==1L%x!HZXt*p%}j\IE•BU$Ţ.B"=mn  _#AB\q5a1:46B"PitBCE?#Ʋ4k\zⶻfRu +]PBz0EJPTGKZD7\8S|Um]UCNb K-Բ= l9W.amޟ=*g̚zRr9 y/[1He=RjM:,. r\jC2IwDlVꔄ@2ͭ7۞DբL:nV֘8(XB^]%^Cn\KgXސ>VA:V OU)R/0mխjQVؚOǠФA*=6RG*A٣ŅjRb)M5$K؉.rylgn9B(WF![TJPIM#Aֺ eJˈv~:+ k1*-olK]&K84hҸ8ued"i=C{A>4jU*Qb]0KyKͣXdfowrxСƽ Z0@/;:fniW7t@&ޡB{=e4fO ۓ_Fȏ(t$[d^EiotnFƻT6pI$E3.>jֹq RHiRϖR@ѣF%+Jd##G͌c]ljn vt9&:Rq|]\yv<얯zaxlJY,3,>2(heOq hȏnƗ`T:Mpĸ @"T'!m6==a٤O} !cy)1ۇ׷ RFq3t!ԕL)O<%A[t?R>g`0-ih'X~7 wpƣm`chE;*MVI**4zctR:U(N*9~Q uʒԫfgf30ʤlzcqeޣŅlƘb=P1;kKpirqEkErL\mJY>hԄ, 8va}$( O&<<%aY?Zy<@M˅ŃU2S%vj+Sʾ=9+x ][HdRm6iw7lOja S_ xHrcS|Hc.YQQZceJ"]ʰPbmhEuyy|a%2C|VVb-!!4VTDR\'2DHB< +K{\k@D)s.+k Bq%uRrL>jĒVzHel5k cvvՇ+Ǧ@HlhMCF=6.Gbnq?r\k''*#+ؿuo zg/ z q2X]*C Sv&o)^B}۩+@Xwɠ6⊴R$.]2yi%ՅZ Ra2ذIʾ=8Ru!qbnhN[h M唹IMXDѳi+lڌU`KqG<44iTkupG?4({qL@A ,Bm7ܡ' DHx<<|{}fw2٫T*)'.R=n*K舰u峮MKjJD*3nęvr̀WɞTؔʐO¹5vlӋ sXibQocwRb)RRear*5 +{Mh҉;JXFT~MǍd[h4 ݓ{f^_`a.SubV˫.!vJr[SnyH(kʩf+^A^a7QͣfȂ۽ضO_%zkZ֟z]o gPvjRU"lqZy~K\vݐ!u9SW" ) 4oIE&'ѣF.op~{RJk{["ab YYi -O?uMͥ.J+E!pQqnrU&&5wC ؂H6Lm~VuKuQ$ݗ:cA218Vnm%KcQ q+^ڻ}R19*޺i55rw(A!{eد: fsw% l˫}% ldI*Ѣw$Ri\L:?`~jT) Xj\dSrY;Z Um Xn Vr5Y~Ey-"aGןJ}ufkRW":KqB.2XkZ1֗-Zֵn|gikl?)2e;":6نr[)WUJ i8cV{$IQQQ'y?Rh$"+ r@M4\c -w].,˒;zombf[REQif|y,9ovXGiYsw0Ć8=5n 0e[m8Wڮ,>4=MU5 ؠgoAoGR e@*\E5"BmaGm;)qf#-OAEN$$'!ߢ Pcb@M[-7+KgRD[Z˓"*2(Kc6n\=ܱ+KcJ=-!˜TjMOM*LQrwޭyknnٟ]-k@k17:{dIEKR 9[Wj!kKER,\ J"9̈b)*qi|,:d{$FD4"+EwwL|s=Eť6\ "GJ e( T%{R)4k(eX4y.xSz"Q8F9~\'rڲcƐfb-smœmm2^֞]Jy٣KlS[ۿnVZs -jy sb)\KR%rݺ ԃddI$~4~(IR]{huPVS~exØ-Dn0`Nv8iM+MnGR(P8e!&7nrISdFME' Øݝk^9lq5۵As4@o`ewk;Щ))Xlw%s%p/ԵHҐK `ilU<,R< T[QMN7sX(+mTI$JQ""T]P Bhmj"KA!=zxPD-K wJnSJck\$(].vPMTZdFdzkeeGzLzC82{[o7nI wء[~pv3Uu`G]]Hy@=/Ev؉qkݦˊTRԵR(n-{vبlI$To\RnOb$;D4K7}HfdYq1M K{v(SF1dLe3 g`rXx:=VYnDswYJndGe? [7[MoRSfk'.-.RZv-vdC1\4۱Z:mWrB.8TeI [ÐBvبI$>"E2-Rg ;xenj&&[.> Sjjf~D- Oh-.-7[ք~ ia}ZcqˌZJBАʑqo{bnHMx8Ll"{VT]ސ;O5p!l KI+L᠚UԕHtSUj!sedF+[ԵOEX!aa]v۱QQQQRVXDf#([So6Ř.&/ șayc*5$q[;1.J'[jד^ZnCZ2]V[vIpiq}_SvbbmKctV_xtOwk{Q/B?2Ji!JSA{;{XIrBv8Y„ǣBаai\4-~D.R[w~wCyiD-!±D]庱>‚v۱WbI***1&JV~KT3'XxHmQqlL;vqGU ^&f̡.w5UiZ6Vx[Q+ ?"ZSXp7bv=p*c-{mcdme26/N٤boԁj-8Ӫ9{ {Ոk/1WuGSK \rXi'owpy [ݍ)ɗj}$VMVo 7c:on݊Uw_-/".`bfཋ93>lڛ6J%k1?DV_e1FG2|+Vc&1ێ`rUe66I}Ӗ¿ky=vgs?lm*°+(t_?բӂd=o q}ѷ(zoe ~8~Qya]#KkfVM66!Vi1Yd,hmҒO&_H!{ҫ!KQ;֚+RCwmtuWՅS Tuw Aثt88/vWCKau߾%͂竱T, n߷f?P>{8{;}"Wt=o{X[ڟ]loC KljVuAGBN[PWgI{UKO6FԵ!D{{`JV{5%Qһ)arVҥ/Uvy 49 nv8(yn7%|l( }[ZTΘ!_ Sj۶% ٽ`w~}]8bdFe_ 7G3߲2ﷳ\"XeʲSk*]>mq%!Ɵ[Fxl{j+{(?ۯ uhT]VWb(/9A^^}[+.QdJ@4iKݏ#Bv9V95uoPF'qf3f )p2|VK* tڷ4yD]+~{Sw{{JTV!]ZVܬWn`Q!]A,kVf ̳;2kv{DP+Ro[l,( )S\LHu`2V+`턥R XRW߷~_bVP^Խw؞(kPY_d(+{{[AտM{ZQu|[m{omcp\FLm\]`{m{]${o{no}B+\w߾V~}J‚Kܯ@vPPWnݻv۷m;uտd  !1AQ "20@Raq#%PSTU$3BV`bde&'cru5Cts4(Dfp?FC!x<x<7&<oCބ|oM &ЛBo &ЛBg &{gjg &{ЙBf=PZBgjgjgoș&{"goȣ=~Eoȣsoȣmsoȣ~E((ߑG6ߑG6?xϑG6?QͿ"msoȣ~Egȣ>E(ߑG(ߑG(ߑG6?QͿ"msoȣ>ߑG6"goș&{&{&{&{&{L=~DzLL}~Dzߑ3޷L=~Dxߑ3=~D3޷LLLL==}===~Dxߑ337Q?x~&{"gjg"f~DxPBo &Z?h}LϪ}M3x>Dz3ބ=PT|"o"o"o"ooț>Dzzߑ7ޏ7ޏz>D=M7ބzqO7&ЇC>M 7Cބz>D!GțGȇ"&І=PBoM &ЛBo 77ބz}M7ބBo &ЛBoM 7{+_>{%G_~w%K_ľw?%Ox(kLyO1ٿfݥǟ;na8yࠂjyA<pC{H͛-D~ͼǜCykgZ8"v'~ꩲC` Y'e51O Vȸz=4WH]߾fbxh*zICP>Q`YQYxsL0k.IUq2Z*#bsUugo)itSo6=(/OCl26 ˃[;fW>/ZAgK˅K ݫYrc p *,6gIO3ZLxVKNsNpX=ֲ=&wPߪ =%X=5ODI jU\Ɍy: U%KU7_]M<NF`*WI=9xDd|\;eg>=]!;",("Klu]?oB:ܼ9EUt _*a|07‚U5FVl`a$U)}4і@ȈT܃*IK[V _v v1zkհey|F=}:g -7f~k$8|x:AMfgC eHH:l ,t4to3Īڞ)"cF&]*YbFdrC~:7`7h*]39xΝԻz?mEk}ZhYTsQF7~ Z+]-Au=^nDqh(ejD{ (Ƿovz嗆HޘIq`/_rhlqï^=RM'_ʒ%])nhxM!`a0A;+4՚cyil 7~~ 6P[( I_%׸kќx(Z8y6\NOJhrIgaywڡLl3q|R6HXe]\U:͎h q5!qẺB* vx-Ln:i^έAy' Mˤh|0y6*w]=1ncD!xL.1MrGw8M [, ݑMVZu!sc7)(/g)~4I'I`p] Tf:W6PY .JȜd~|/W˗syCիJb[.:k|ֵiL^Kªx8]h y 8ws܆#TChX R1v~o,&+l,~+ ("(S$"E|ҎQ(vr=lwk+.*兲4Ktun}ݰ}I_LL0<._VO9*& W+OܬtvjPÕk}ʀ@`d˵ɤI\I Y1;U׆;W_ ?[ `N5AhQ]oaV0X8@W+D'W`"W[4Mv] .W(hYZT+׍k-+1D{K\|6ͬΐ@i_!˧F9+ Bj=I~2|*[)$ W+clQKnNɟ ۙR0Ʌ撠KW^K-4 t`\U$LEK ,/(7UwV(n#1=􎪮Kעc9D 3颞 XFLTUUw:Q[% )^K]zHD@h*J]tt2UD$FÀ*{=GI!dF#0 tʤ՚Yn"`cs1= w*OIY})Ym^R#J)!d,4w]š-WJW{׈/Wz2<9=(6٦4/,ۯÕ{ĭ{FB^8c`OWC& \57 PZ xY&\%0BL!p:4?RMT,~gc]a2 mel["Mʶzmޠoՙ]o%iٹApԼCHI#@-W\ iHcs.-熪2k/Th5< µq"x$>igOPU: < x{(cjNpO2ESs#,,hT+\7hM#78~ѿlAK^zk(*zdžDj=y [A*6lc7`TR*4G;mS*]l -ס.)S>18FGXk-u=aC]:8P沲dCw~Ŕ sH,O!(t4iq\dq5QQ^01>"AW UʞⲩJ^0*e |3]} V^a^k ! ++ˋ4."Qye+쌲ۙ'z![8ʸeIS}4Hѥ=qTBwH%/#*l}c+dcXY;^< gb73tDLm5/|R-un5k16{+n-laplAW *K2īu6K刱Ñ쭳V Kuh،*X}KnڛG"J)++3MF(lm'Ćv$Tz!=]x5w N@ X ]~rzWN{\~=m(m-'(eTVN EۨXK`d4v5JzO7'*Ae(xÚ pLcC:ڏͽyS]LʚK=}DdiV[VOK!2h|K##`C@JW+HŚGml8\$CI ]Ji@t@''vM LV8\:*Weh5|YW;oUzSN*a*.eW| vxښa bS -Ö`*5O䀗2MO1ݮby~jkUqUڦ{u 35hgH㈸{YK~ʯ n~VâĕRP!82ʠ=\k4G){'(8kdI o\U-*l4P$!ƣ %Zt k:+EFex=/K3yg**^#\jm7 Lp`7s:Zi7ifd=zpTwQKHrS8uUc;adLc| Zn Pc"[y:xh_ԳMY=i af y !_]=E_h?ꦭ~ĕSgNyo}S98V[̰NqQ^uo;VK(k_0Anư]Ķja ^_AEb}^|,cek4.'-;`5O3I XvEA~LO]cC#pᩥ_8MF6 h%*fa5!`sй7r_WMµj|'=3o߲;CpAh'tRp3J95˺ lqr8%0UNFڋ]U.5o(,Rǂ5;3@kDvz⯳ 'UK1{ dxۀmco}춪YDUPNS\x CUF̱YSwkha>G],'墆_rZRZz 8RiUPn" gS >%\\w[]\`dk. #t.RLQhx}ħOb0=((oLepלlߙ;PisM<{UEzրZ2AղjeUֽM0ډW #89;h+$c%sj* sUyik*;\qao v=py}qRq}IJg(קּ+HTgYjzYq\-a,{^[%AxOiAX8UE41Avҿ *,BJ7SD^ sR8iVO8'U.'jUoUI~GTTEƦ:DJX,Ef>{k?^VwLy)q ̱ͬ9ӟ\#v{c)()ei6c~=Ϙ8 ) ިJuLU$\GQ ; =sJI䩉\TuP>guBALK:\Tf4_h= Ә2fd-Cb ԒX3=QEŠ+a?rȑH$A(夃; NZH>#Μd-9t'%AMC=Ldic d2>"(lújv紐$oX?ٿ=>QQEQ(3QU3[rpK'TKjv6XY9Ard Ԕjj %Q8T5q5ej>s*`tOIIRqVó~歷pA5q D[ĂJi٥?=Tnt:)tpN_>S=Əќ;OI<̆$85hq;ڬ\!e4-s[̛V_MI0GY~`So7H];+=u 1-\뺆 [ַ9i 2`ZT)㯢Q Hq&@=7w$}ji>}=aE$QoE15$lA`/Qdx]V+~4,y]i4LT[4ҡD0?@=#;,t7yǖRױe 9hty<Vo\!i.1ST7Oe')N ?s@ǬgCe}X, –/3&qp.#w6&[yxHc$**+&pPar섑8Co\W&`ŝ C#718-#pAOغ$/yŗ8fzg2-|1P1hznvK p՛LPF\EKAUKmFV\PNc>{jXg F p=|o>6Eoe)ar09rC[LJ=K-sNIQjy (<:>1;-W;4E!S/wJ8,ͪ |[`Qrn(ɘS^xj^,Ic2nY}wc1]`3xΜpW [rR68<0x5[l6QSk{zc -)p|á4:TQ 2q-/{,E\+=٬4@fBRP!%Zr˵gegiw;=_o/Զd!C@+jn˖#*ܯ+LBEyl>0r*`5x**O5,N c|d8-u܄gʮtU5:y19DL%|# %Ǹ b$>NXG0N6UGESvEg(#H8iQ^8kU/Om>'; ttH7 )|R7ZSWSfϗU\UZZ*gsDlUɂ"}Xmawm|̝t0 &M'H>UՑJZ:]DxE.'UuiL9g:;옴NsǎS*68~~.j*wjyF|MM+'+y-#}+{7ǹS(2-#kOt×|%`# L'v9a+Y ,;H,{,)P+Sd'i{޻.'96H!`jy##gc#Ǵ v0Ca}] g#nG t\ǷK]m{~/;=Q0SI&k0^lkynE&L*$VBnTtT2*.Az {>c¼8Fj#\|5S:]o#mgLY_f]S@2W~8Ye[ s1c`JM$+ Z%Kl`~.6;D_˯g2fqSs響gq/~V.!230kSnT%tq0Yk+P0!wŴq˭UWU2']uPs+jX*CɌ `/SyĬ54}= |r\2h#b!BH;;v C}cL.1eMzOۙ_<=.UǔX*p?v BwW3ۭ<[ *8{ ݁`H=,fY`fLA{<9u4׺N%^fxcjZUO3N"I⪦pq|;vyp y}Y=Q4p/TT\LWW*j~5hYö7[4|Gqe wOfhCy=, #r,'H+}nZY9! X@ [X ڳSDE iq:H'ER@(Fj| V#i@ǩ凹2̅iJO"ْ/hrlX.\h$ ?e7H86nba9 ŕ7YVOUGKnj[,{5[UtCs;4TSg=-ƧϔACz.sۙgX/-=%Dc1 v_Sx~%}KwƼ <ătcQ<G;I_V6[WwRѯl, /+ݯȻ{9`CGdyv뉆S?S4T[C=JM]޷.cIK]*ʧx@CxjMWuJ]Ǯ Qtw.YD5iՏy}ήF(tq8] VX>SP|ӎӐpBsu7xN9'dMyf8?H}LmMRׯ4yc~u|aV;j]x^.gGs\$k)ٱ>$8%BIk+g8tܨh"[}e1 GUoPYAM߉DA]EK.3F".Ń_rCMn c0>f.EKl؅Nւgy6u^g6‚X8z `FL2>\yw*?b{U}WZdbPGD6xr}s$'~{7XQG(IGvr}5k(]0 Stļ%Ajݚဲh{|N 兄k{C䳚)/3үOYjh|c@9.W~A=HMlʿ\(3h|W"!d/d\J($d0Jzh+ k,fx:Hs"z6M=K$ؘqIqR[tg#O1ٿLWH%<2ԁ;٨C5u>C^AѪz$DehN#[UѾHrҮz-%] *(;t|BzhDox*]|5jav)nJ>iEQ.]8rsԦGr2;=7C"ebsjP5be~Ϫ3JG%0愰5k S2Pes+=r2'Qk1FwX{PtNXݟS/ɏ:\:iE]3Ghވ+Sg><ǽW.1ش<`(Q"[&a , Bb-,/ة\Oyv3A+µM 0y!Pث4V1j#~L_ L~uq@y']ݟfnkQ5ɑJ C9RG{8q[ m$D|e_Mx kĦnC}s+%ΎE\KHv# nuϑ)K9#N0.#eŐ]fry՗}(.vD66ㆀڨIKq~"d>"UE}TUs>i:#I?o>{7רfD!#(H]7ֹPlPy %i+Vɬ_]zܦ臒5s7bԟjiXa '+-YhY+1zlCPx[ -_`P]ݹ1QŠ9`&ya<1s%^)]drkAUWc_qN~;9{wq!I= Q^ j43 A,x{C&!Mquuh/̭'5ѐmp0ke1;Ce8uNeŅVYWHY,DZv=[Y7N,oU.$jhMdw"S6i M5IkXxT0۠lyv8SRi!eBjXrS6Shs9~&U Csu2i^{#cARm ])SZk\=u%`|U2ՆG6z47ߢ zCeaCڸV1 yB񫮓\OWKSRRN zpY w(d~h@W]iE_B sXrHSէɢںSG_]o}4LK1cAv}i F9.'NTP^)m3bGRf΂ VM,U4qtˈpu+\G КXDF><(][  ETo @X|W q%x*YZTB<`ۍM}.{L#cppUU+N뿣[ZgIlpXg>P1Mc.1pm 5*_VULS#DBQ~bMU+(K/OT ih$ɪ&=͍ >}aŪHa!n"" 9ht1舐3۞ $8fEܻP?{!+}~XP\ޮz~*0[e&n BqX T 3}i߾+\GM,)dٮ<+yqWA3omo/%{q.V=#-. ٳ*0 X 1g)2BR5Q7)=vzO]o\3y{(R3̈́6K[BA H=%2/;e-*J#y% 9];5M*Y<,c Qk=u#EN4c;)@Ю>PO; CS 欈? Pa:i_#ge}I[˔9E:!OJhCvz>%B۴֢|PdRa,${⢫guߵAʪ';^3ZN9# 18~ k 2^:w Tԍ=`.P81b;E%gFõgu!=!}$9{S9y\sO2N;wz< о +)bifsr $dOS(?D-~pGZl5 :\N}yN2p:}#ٺ eaZAEҒgkCwU-! D+T! b+K3>gMSE&[%;jXQ]CYoٗ൝%X& ?*9G%0:_)Â0j;'m3Ϡ[4C}n W" sJA݆z8n5A0k+U|ZX))ps `7/~u{ǚf8g\ KSX(YV:QI!x>YHLIES:ѣOx(KN_)!c|ߨTyOF:s>GG>QMFXr|Elf)%s3qǙS]neƦG+!R{U\IKd-2LM8oBIK8"ƍ8?Ny6Ǹ1״ z`DnQB-Z" BS_34tݑ č\I\`ٺ̡okҋnk*z B6 [<{ 6KIZdXy{`۳/%Gv]e-K_qЙl\ZV33H *ib_O}+8R1z*٪K p}f pcg~v5[U6A }ŕƊ;#F}ӟVx ;$4ՀwjR}ޒ*[  'Cb5rf_mRpJ(aM,,݌y**8lr!'8?֪\W6i1@f80v>&8"WRN@;`PZ.K=OMd2]$:bTW3l7eFp;`=#$ #Z}$̞Pe`U7W\86 Ȗ 6/i7 th॔Hg>PNKʲq-~ *jW 򆹣bPgE^qĔe8s h$j 𞄜Jq_ Nic3yU넇PEAWH qXD[=NL{N?d)5O|[Z OJD#ٸ[yݻpQGvQN.! dYorե+ Ybܠ ELX:5G-ʍicB$ Z+$"%e6;iC){Tc,CFB!b { vSNE9m:ܧ)X~mVe\ڪ5pw&XaYvg@dxjb O]ވt;mXA?|i(D3 SqHt9psPɑ_hk-SOH|.(0ǐ/wAl3\{MFt9vGRREQ AA;Mp\k KSL9v+mV g e4/(ZǓFpƾ5_AuI# QAKhKt_P["Ncvny.~AS+BsяAܷV}$SkL)A`p,U3kAa)WMnvzj[޴ۧ浪1ZoQcּH@PTM%Cnp|L؟Egụ-YdDm3c*w:Juw+4=PClXMMI]쪈`H R[)7Z׼z8Xιջ>%#(%/{xuV=YmPy,R= DCj.UTl|ɠlp a 'H 87*cv0W qTk2[LLme7B4c,#mնio⑔;N0B,ꨋI(s9Zn/ZmNj{#wmd1 jDϨe6 Pq-PRj9$*ncB_^tƚUQ HGe/I/%!HWK,4j,|7÷J[m+lbu*0[z0 +z܂e#M#ٿf~~,08$'fYftnw a+%jYffPE `ˊRy]fX}H+-İK{]-S8o>IMrL};*{c'-Yٱovc_ 5-CT[7U X-Y$Zyώ樖*y&ÓeĵtϷ[+|-qk)p$ӑdb{7۳>%G [=ɴ:Yĵx00TK$jPNBXOd c W4=9tr0KRs<' &w7Bw>}dpժ'( ;lS@JNϋn$e2ߘH!p]KUqn#ekc> -5q=P8" &"FǣWR4.9mnPC ͅK7oFvWg4sh9TG%$@v%KIE G,$7Q k˱=T:xdj-;)S$#iqyzc/ ~%$2:9XiÚ QU5Tt+padcT5uԾ6(ϗp傆rSі4Cd6o$5̪P_h*Sђw`o<[w󘷑 j\xr`}tF +4!2|wt2l.*Uu5\v<`fp ; h-XX.a#  Q}7s-{ ma핎KMc p%1y5Z(F[ f,D@w*ly36:q@>9#(^;0<G}3Yd\B0 Z30: P@=-b% )s2rUT!|0z;g蕅+ev ._d ѥaEŽQ;(ުRjݿm{"_dJ&AT~I2T\E5(?7*WżD u1($ÅN#C%K`:pwj IW0oʥq;/4f+tOya$h *+O EZCNjr%[y`rgx8_g!+z[|LP8 /b>y@\:5Y^*6>&wn5 5vH_h- 7Yax9B4Uy-k=ohԁOԙi,P8z{hUľ߁p킕잶jЇҾp%}*ĞE~"pWwCAC ciTvT+jO*zO?-K]^N)dn)SQ/GYXo q|o݌P?>qEV"bew\4VYG+KV݃#仙(PcIݩnyXjÓ+-aj&Bt4ꪮRpWFT85+K軚t;M' M<}KSm]~jߋV toۗ>'~V:o\9V[? cXu/wƦ`}pkl-vußnձs?~V:·o\9V[? ݿcp[l+vußnձRǗC3p[l)uuÿnձw?V:·_\;V[ cp[l+uußnձw?V:·_\;V[ cp[l+uuÿnձw?V:·_\;V[ cp[l+uuÿnձw?V:·_\9V[ꙕ,i|~68?db|ݽ{^gB9lN1NAdeiiRa!N{*N"u r*!M%Na0{7Xpy#%z9ZZJ ;FWA9A apZ&tei[*0{aW[ꪽ"UC)㙅Ut6.L9[ŧCvϷjY(tPK,p/h(!益$1sd'GE#r2>+KFSqSHc4df [#5{Q]xF+Mu0c ?<{#T|&URw5UhA =Ƃ$5/aCm-U%qX@Bد*.O*3V_=ށ`r%٠E j_7hnpM p] Φ #/(|]]} (4M/|3 kLS!Saqs,˙f}mppзsAe],_Cjz //]`Wd||ԿS%hfeo]FE=DwӰm*.:k}(z3QW+PcE*:. [5dF*NA^ m}Rn*c_ZڞYUl  ]1 g $9`U(k0TǍq229/!NXd0iO8zaSRqެ]] {U~>>$3Cĕ|7W!|@;oo CWDhKtz1ô/Ux'Cxe0ST8fоE:)oJl"d=fFզ&jIZs{v󏙿i9+cVڜÖ= ~iqN68$doy|ӖLHYÒ] Dq,;owHK\ P70J?Uo $?so~ߑ.$ݿ#\I~G'и?vOq'~]#q-]K e>78^ 8LVCA$Tux_K'kՉx = Trtjżgz~KiE6\_ɗ@)bڸ_ظnwa\?+,\K+Լ'=p,g+4.5/l\]j_ظԿѱqwbRFſ K~6..5/l\]j_ظԿѱqwbRF߆ K~6.-5/l\]j_ظԿѱqwbRF߆ K~6*|ƹ8pa@Ѽ^/ p!Urb&+v71ǷQC^ݧGg'PG,/DCfZDjk$#lC"Ydyq>+KZYݮ^x6~%eߘH!p%l˭ ^0[}I$UL$#29$0x[ "k.Sw:j&kfFޞ=ķ;%eLI:k>xe;q08U4O5ğ }+?ݿ,W|S_.rD1rG*FX̜eǠ IUZ;ɸ^bf?To/,t=X*rV)-$cX(JSR7b&<.0:P6sYD}Ze{ 70;Wè. IUaM$C'¸Vi(j!cca6(7YpB^!7L[C98~i ~܎Mn5f1ɭAR9 Y3qwv|Nz-|#^By/r9-w= J! krNǗ?ۨX5y$11;%UP\nߓ_%Wx}[|쪆AC6WX/LOGwH*`|qt^mw~{Ыo?<@SYbY<0=gV8g <)v<7zDZN 5*3vdWCb})9XWEsPQ,Pl~3eq5 [4X(>43W{&vGQE oVK{&v|Q$%mɮѮ&!c [?\xE䓕faԥ]<^ PTT;2m[v^*~򭴶PpH2I GKEQm8^EW>=~]3e,8ӍE&=mhyacC TmRi(fu!+QKֽ|9/R]|_ֵΦ_T;*Ig:u8eOwSSQ5x^H+Ul[Szf: LhYZ௶N?Tu)Wô|; ,k1NNw} #glA 7~C_mw`i%cTZ8O|n1 {}e scdϓ9Ĝ |>SR|N3fuqjiH3UMgZ.3.C$ Œ=.T/p$Y2+ŽG Lyt.=JmOBeFJNաuRk12(Aȉ`4vZe}]$BNʧCxTq{KˀLh!lKdaX:a%S spY߰sX@n|i_'OULjL^OU ]J8pc,!II>ܓH0pQeBܲ3{Wy.tsCXxvh5-s?|{w[-f4=HF#q&Ö`y-Dn|ܜ,-kABcςlq{9 P-Qk̙ 6 mp̹m탷_[U3N34D\Nj+jOKl.s ԳDOhk{t+] mm7uM^gi6 #]@_YGQNۮ#,eg]l|lҰH+ u4#[[r4 ?: 3/}9e+M0{1aʬ Ydc lcGx3(0UA,l_3oiǛ3U1^OsOʋL.<ђ jcU_):eCJǛwoS:BҲ<m0udsrۺ}p,m>{d6Zٮ;aɖe{,']z9:`▎yf0aWݸZUTmS\{Ph1TO/t5\KL4_X72dJ>d*?]9VoW*ˋxv1OYpVgj5e2bK \1j.k!3 te[F$"f"ܫ=;5\Րó{DoMAme_w*n3;[c

26j垎˅/uUڮ8-~fY6\8M0ӂ枠n{'s Na/&>F*:"Q1rc-C,#fJ~>B%׳fCþWR8KOuۨU4 /~HDd-/}źr,JS..*Ht=ZGcx]fB|ٹm]`,>vMÚrҷp$5k;jFplZJto Y`Ƕk+a,=梐۟*Z>?+d^ SX.خ5X.,6i"01C4Ԕo|X4kT ¯yjY8P&)ߛz0B™~Ƿ^xbH8N7[nω[JXٮF+ph+tUx5b*}G7BX/|eW3r-p-  W 8nFp^nI?,]Gɦ aAS}x^CZke)eI׻=@pTCA?qT4WyڧD k7_:Oy5{i'^3qynU0xU9M%OeMuK))樝P ?so~ߑ.$]#\I~G'и=vOq'zBO)>ğr} ?so~ߑ.$ݿ#q'~BO)>ğR} ?so\I~G'и?vEğr.$ݿ#\I~E'и?vOq'~BOo~G'и9>.ߑȸ9>.ߑ.#vE.ȸ9>.ߑ.#vOq r} ]#q~]#q' r.#vE.ߑȸ9>r} ?so~G'и9~G"Oo\I]#I\aoƚv+ ƟWQȼ`e -{:څpfCjjdmv#-{Qŀ\:L$Ug7!dUd}zڨitn'Uvt F$"A+QDMYsO,;8l2U>AwjX_5-,Fj]Դ"Y*C ŕTu韡TzAW+CSQCD=Ki[ .W$@q8vi8cRO'[4O9_O+s9k8?l1? ydGQki]`meI,a}|d'PՍΗlWseˈNs$V^)4C1vW];y΍ژ+(R* W+=Cܟi)fs~m\tO2Ĭ~7+пh2qXs[cZ ?[mqZ'TÜ'G`p lL|Ue夥a^!33gJJ4$0恕Zzܨ]uUO<ς("7 ҆A&VPX.2%S jm"q\@( Cղk,K1>@\5<1ӿ[yʘ( f=UWOQ{1[U3h:$\-|{ Xf 2pUxj>A|:HnIPkK9Չ{J#tʦ _͗ c'S3y<jXx.__ק*lZŽʎTPUWI3#Hm*W^EuGlh$v피EŐQ4/顠=V+6ɻqGi BhS3>@w GjE#C#NnԠEhJ*<7=w {8_U> ϐw=H8"Rnp< -X(y⅌?.V4y_ߨ3e"9anlZJ.[m´5=4"Ƈ |KöfW^-Z/WcE^| 7E101HVdeV _9d}ceZ_qwA+vI{Hlg 殦d$ar'_] ů+t-q=9uGLխ줢 F?2nK/_f ɔN$2!1͂CSl zmIcH6rZ%$E-+-聊lWN0A%P Kv+-j6t8Aչ*6kS.ci[`Ҳh=BAGkmfB6tC%h/Ag{;{@?l}:xrsƜE>_oA=sikuF $#ABp' SccHʬx޻ 쒀!E\d0I Ȁ^}͂ aEqpec ]#-rJ7ONI` [ʪp5T i#h, ,b ;;B+ЅmZD*fi;3|'i|hU0#ŪR= Nsrׇm"xA6хVP<SNNNONOOO{v6E9`a( av|EitH14tX=rrF7J(x=;bޡ nV;P1ds^>va;LA+dc,7 ħOcCYe>9oE=E1V{KNG4jK^rXxde %(g'wGNGG O`դ!Bv )(✜>('#(>+>= (x~kC Ɯ 8!<(5|ctEvfn]v4«SL=YP#=;cy1:!ZF0z rvtnwPk=QOV߽:G` to0 Np9U~2.X<o/0=o0aѓ,=XU4(lXT&Yn i mB F uEiFSBy$7+(A4,e'yXApB RĚϪ5K5;rqSvK1eD!?\.A 9Y;sX#goh!xˢٺ%d)iǎyS[OZq'GFjilѼ{8~|K+TNv]_ZTcqj:G2Qir0d֒2Y6qX$߱=vLy#b˹83jsdc@kkr@8L_'+*j*> G/6%Ľ.u(?O~G_d5>%E=>ii{1%AFZva:7awTFZ5)(}0œ*u4)FO#903ʡKȔZ Pkt3 ^kdbIMsS9ct q?J>Z7 ypI"Xk3 :) ]S|MG-_caD~?[nќ/GvfQnV01-QFuI54 FzfH3*q(53/ E>~JMlq*C@)(GWx̃13*iA`v.$HƖjz`ن.A)pp7DJyOB:QlTpMQc7u46V}h1 KvW4KvXP(xWè%PBoEå.b6xO1 {4##tc!j=2B a9'NNc4 ;(ـßsmQZq+nN\|?.yG[:+w ~.w*N^#0[,2`a:w(Kqǰdrcit1z՞cT63y]L+t!1=8e81O'%eW›e jGQ;X!L #R 5a$s%!+w4[m-;9;' n?2<[sć|V$D$ ݟHX+(CvKP?&X:<8]# ue,,7oDzZ>m4M܄-ԒjsQGٔCWsSJٲlvb" |rkֵ{ADRE&KNOH,HJrCXvjr1֖--2)cT; m/wHMқ1k=u6Gҟ[֜@?Cm!z:NG>X[T4j|18pMo}TJtn~5nvAl`A Gg=P';.h(vOfppy$l9'<7dD,RY3vS)b ` (R#iQ܌at=zdveNItP))qp򻷖3yNUh\WU^4&UӾ' [uS❙rkxx{aIX5g& ‚m1${zOML [gHVJ5DUJÅ0v">N\8.fcvIdjLnV0Ħ>y a~ (d?;GP:@(<`vX;-}]{( KY=$=`RwFiˈV^&4_X 6LHܧG ne0Q4O@a ("fFM,nP?P09d]M!-Pkr]L/SOFܪg4!OY aMeEu{=>KEU%84lrz`ifV  3/$m'kYZiŗd!NUW_T+ 2s5<yDc5 bz}Ifa.߽$c^][[X׹dփ.4w2Z0o4K3kNk #tFۚ2;$'惱ZJh`<(Z즕,ip'p eM-3XFɌ 0i脒>#aى"DD:'d#N7D/&ĞK2<4٥eġyH-BTxK#y϶ۿfs,.dM S(%&icEH)Ni>+D(,s7fh~7 y54#P=7xrl?{fHYPS cXU *?f2QfpPG+peaT} jT3P?_ #!W?4R|nHFDެg5ύ7F0=Bq GN0ɇ; M="QVvsȅsZ]9܃MBJ'/yTCka y*ON5t q=3wC;ݭC^qwfK%aSfPr] D,kd n}nw0k@ㆦGANx"څz#t6gB525kNh3-]=Z9<`j{k,F{o c8QKU蚭Ճ jJ֙Pŝq"Ȏ85JZr򋝒#p P8D ܕgt@# yi+p$%9f2P\ @zvRSZpԈhpYn67+x,"pwFKK a fcyO}|%1 Iay4ak$ߜ-?XLt4\22Schc\ч)$s#* hDL q)J51GIltH^`M/-w<r@ːyMhB)B4<\G{z3`x\n>6Cި (!>LBZ@2$be2I- h]( ߡSZ,}ŷw82yD5,F1!Fd#%ڎL_\=3R@Q!{/$p䄎@] BH@GꭊXn Z"I(M6߫t4jZ`c|h2Z7SBA!sZ2#6 #j C.r= ocG1%~HQ`RB8']́ n."x–.{xX7HSI'1B~d XZ܄ _ocr<0OZWq$lLǝ wEfL&=P\Gȝ$F D]^3/6?[s V=F2m" 7z5Yh e[Q,#@u^ZX;KypfSL'dNQcܴ$Bzj,ƀk+pHXY9;:ýT5acӼÈQ v4 ףc(GŸTu" +]v1 L"`~4rQŗƏgVPxDyZ-ӎO/ n \2F:e;=WƳ%dc&{$aHqP{rBϬhu=,mX{`8;a2;S??Pcvun,b2k[l9zE\ey,ykU[}rJǤݏYRީEKhs=9izWD9IC@g ,,#ڍ}eҚFn|ݼ:Ƃ\NLWW44F -1S\jʊjlcb'ܮNcb_Z];l"w 眢uќ9[/ElW+ghٗF4ۚ\V" 6B-bD y?eBmDQ35ze0}a[#VYp9n i]7MEҵcFYv5<)9|tüy!J <"䋹frNp.ZbJ@DtYiNm$;b`}non۰~M%K7>=Esy9 QPh]pIPW1W0Ԅk\**ꍲbtnUc kzVJi]J2RV.ѯO=iT{p֔ 56;ҟ$8;%3[Agf&ݮrNO'(sϳ9fJl=fJwyѯֆnIYdlj֖KVZǻ/ir1邵 @5CN0Aߢn ]ǩϩZ3eHv\5ǧ D0p#=HNɆ/+(КSHE 2~{Os0p}- d?y2_jBA̫U0|m8h*UMqBS<<{7 k۱RETUB HPh3hC\ 2+(:= Baxh!֢ڊc1dkhW]QM ܦ ;!;LcR F[m%Td{n(Tҭy/Z<ҟ? ٫euP:Wt [37VFYZߤuBY(@ժ2k_[Y6쒢QeCM.`V~#8‘5 a )kXF/4Z)N+ZˈaX8s\c$H`m&7E{ql4C Km p:TeЯ ub$wTX=ST ;hu Եߵ8UVA䳫TT6H/ <⧞X98t}%j]"^\VuvY=UNKw`X>5pVm, V(Շc)ZN BvX\0BuՅ1 NB[-K%83!{1Vgbm֡dUV1 B;~nn+.Y!YM_[LI%CFFN[1ibnOVJf+S:Ɨ`;!e8 k[C$pˑ]3nw\*O(n9vr#e$-w+eh2,{z&4--'ԇ$a c%z+nv>D/~008l)v1᩠x%jDu CpI"I+b |2Lӑ ݐxˆF 򴷴0R=+ 9=axcJqbH&/C(\xs[_2>ڛd$jVka.eǠR9J *XF Q8+=]_]u4dpG{&aVdy%jDᆧ3y˜r{s# 7ȵTK1v:_ nr⣎(TCG~Z`wE(a*yƜ1ⱁ$LVGiLȀ4 ` NaeyʏtI@u=wn2˂2 8nqxoA.zsZҰGѡw- A'=D o4٪XpBus(On1hqC++]zy$;*E`֧},[qy`+d-fP%>4NUUkNኁK>ۢFTl;ò(x6#/2YY3J 35+()\ʂնgJ7(53*U@FX`֪d4zGQS%fT(w*pwq&[ci2ƶ~Fe* HC թds䐻 gEwD˖)yܦ# Y.2wi"7)Ns#Vb\#Tꕆh{ |UNi뿶3[Ȫ=I>wwyRɐ o*R1z Umcuv_pHP ZXlnTwD)NNXg,)ŃJ?f;*onNZ,wPAG crkliC*q84tVDBkg`YzٔTցYWlwZȽMQ#(h5f\V)]` DEk(w[hhBKp\'Bd2 LH5>JQ-𼞉=.!&҉F:) 8c%H^P~J&Le=O$4g)jA< ʄ=ϔ EG N=nQ2S.sPV\dmk2nm¼qo,yS[@2m"ʲc-cN=# UL:zv":v(BN-.Tԓ8MP&T[b CESN0ҿvVL,wG:rمS_5Q1u=Ll; $Ɋh4:ɘZZNq17SVA=z!bLTA]!Ci+U@#fj4 >bW, Tf3E Srwfd7pbaqUВ⦳ 4t c(8>O!qH{J$+x,"þ?-;7u sH<웝S$#$;9O`dd90YL8t{f$p995ϧw=e<8].[{xBu+=p)ʆ*kAP9QC$$K$)Z4x {lk *]-oU,DGԗ$1g8'21ȹCrȋk(eaߩi$oƽ(.VO25lyˋ4հN?h𢮶٘=TmqJ"0R>f"OaG#)Y,Uhﮆ^X \mj>ND \Vʗ(W-! -1*LaRS界;Ww Y))_+ z.ZDm Y h lm5)eXB 2p6FNJGy'qS)!z(ctwz-a c$XFr;oQˠi=Wۉ"{ Szv9=\"^axǞlѕq*i|ԼDo;ʂR L`˜L8S9 AK)i!?eaMl < ѩS9k`LIRj"1G+ANpZul5э0|܆khQ(cBIՄ$= e878z:Xi.=%j.L{[NӒS((Z0΁0Q h )eWI0GGlvshc8=rw0INYge %ݩSPSjDK*wO;rTr F=UVD#vj:xc I]@"6+'crӒ6jwANB:}`Q;%8&.Qc;Ֆy-]$%3p`zPթ -!S˽Se6 G ~$ƍ|gQ<`QL0< )˩W* \Pj]*i\~%vHj2+UTω c!QS!K):%8)VVJnu߭ H46ef':QS|܅=#ke ;pGƳ?S,TŁ"B<ӥl jv2 VcVeTՕ1r%Nf0l 1.yz.zU-GC.U}}m#͂JSR5Eygq YFue@ɴwsPJhnG5u1CFD2f*I gA f> XAX˞֎d +v(FV},& d$gd (tqA9!wٹivMO$Lc\7EKG4c3n4NV1$2feCb OPS1 g2CʒOR2BeTT0dTD/k Q 8\CVT!NAN~ħ!<(Xܡ[?pN2-R Sꟸ8QP #d+)u횾:xZIqN9-h쒝r=zA /o<'N=Y4*1 PZ(L -tnN>@nWN+hkrh䵝e@[;HDc#94t78Xy ea`)GVJ[f?ofxdL.%j*hXX2J Ajv9>ӒSCf198>,zeI;'TɚA &ʫ-3JÎRdž9ܢۅ̄r{]YW;6.*2AEZݑ'ҁd#eYvk)@$whFQs~yeD/NQXNUxL7*hSOT`._rdA$q0,빼jH]h% a QG|AOSA19ĒV+*X|SN%g0뙼c nYtk7UU J;:{r'@X:@TO/'p cr,v-O~i :]a..AS7ҍ䪨vUh;DJ=9TKpk]9FG .*G`"9+[))e01&7Kp=EAp  SKpXL-n5ѝZN z#0[S ]vٷ 2 ((bi5 ʪRrJsTN A *S;CT K3qr[0y%8T݂cZbqis6hꠞRw;6S[#N *]V9ٰ-GZX{AbIA^S&z0QÍ2&iY;xG% :Bm[Hy08r)%tB'1?'dDxksd{7g@oZ݊8ղwr^F#ry<'psQ2lNi]wsIk yҴ~4|#DUr5c^0Ly) D\`TLLP8d1K&rJ{GKNC {{r GWB&ǁBhøXm4E^rR귈1!(ɥn lE>(5ǒvA![rYTDAȊ5I6*_67f8BJ5D؃Fܵ3Qh Y^ϥɩVO ;>f/%zѨ$7 lFŁ MdJѩT^K||8+#h3ݞ:5w !#`yls(1QpAb= S!1f@#T@z&%(t(42<9΅$ i-U nt'0 ~+pOt1IH&ktܵ^=y;8v{ w1C}EeHZ*sr(ITth! 9>A2<NƘ!zWK'7$I*iH=:EoXzk,Z3%,KG9 rWy h#[2<Ʒ ^ DVBٙ#=[6*fiZ_BB|i*H|b21VWBCiPf*6VFzpNѨ)ӓ=*gRS5wdu@QkyeI ;kS !W 1mۋ^^т;BJo ywk/P9)瞍j3!9ː=^\QS!ZZ <JΙNQܔ&vFvS_;y`v9]'xw(}C!ۆ 4Oyw0syn OlqND眕_'Y=F'y\b, F 6@vУ`˞|SE u*v Uu93{%g*Q &cP Щ p9 d;;I)9Ar`Mj.)78BitB7i"BvY#p{ ߪS_.UD׺^NnJ׷=F>%5ܑMnL}RgH&q<4nI^Ae[;5y}d^4B1d]5:0;4,.}5aw c,S9҇uIZwB&aaBi%5-9@$5rA JAer[П90 `׀=1Ja`XArzDJ\U_!(}e4(`-:-v\iD,vZ^zJ!-iHF5r fH w+I+kv fJբBeJ 2 hw~`vG/r@À+ (lW'Gp~LOFgG6:ZcDF;(^z%6*kghX 8J0¡.piIGx=\UOyO-Cof 4I%KKJ# -xQy$ƶ}Si(@ƣS_,(2Zdha+`aaND!v ߶;Xn+Dy#s-=iAY r"M! 1hb; S9N3X qv쁏K2.(9S {-Iۢ"D3+ޥqy Bd-y+9^Kv]JM79<3 -> XTr\J>,(Mwn{1Bv4iB IE䵥g!,LXfC\^톕|8y!}z$9SgB*I;Z"$+'+/{sِ[)w`UnN|HWzk_e@ܧ["ktG8*%L=‚LA X LĕIp:*'ŮbJ[&,/ioKT9R^4pU6U -+x(=,L@-Xx Ǟ3%[0ܲ jvxVdzOTJ1+#<>p\rct䝼3H+Accqs̕ARӼ=Tg)!w:F KP7(؜}L(zvQs2YP+oI Y@IqLIZ{QZwx9^RL;LAA4`G'z":.9gWcU3zҔ϶J0{ !jfcPuUYSw?h8fGk.+$B1qf$>es3ӥy˝4H\^t c|d)V$ "(ۂP}T۰GNkݹ4+Y=Ues,Iu g`t)61zҭ\j01-ꐲf xOl{I`vDBj@pT -xˑ4/v}aj! ;TM%w vjP<94divjpLq*VIʒ1 }]ٙvZ#P`)Ȭ&WdM ٺS:-.ܡ+KpSng7wha/9 ܔ]NY('#anu/`dnqТsָݞeeLbhr1S>Ḧ{e 9Zd0&Q#ȟq8 WX!&КC 'OcBhMjJ$g(z ʊdEy0n>g,i W8jM!Mpt8OXpAU46:VNS1l8=‹ER qdJc,-vV9Kei2zwkW0`ڤkj2`TOψM{H'PYZ${3{OScJHzط}3-Mtt.Bk4dd-xӆA; f K ]yGwd0 KItEkgrkt9i< 6y軙>A *2ޫ/|͂h92E07:tNh7[jo#& w2' ]$=-?]Ϝ w*gGbB;%ULo6z:/S GPΨt)Z7sۆf Èҩ^0Sy |G FRޝ]4xKwd`{5CpF-ֱ'a*ٝ%G.앮r(Petc#9FJђ#֛<}T$2wGP;Kvˉ>әB P5:I墋N 2WnP&5Xb-+(A*'rx@v̠΃DzG2ǨL h pB.<^Z6qRcFp(2]6M'PJan #bЄeKXxͽÙ[qCL"hNӐTHszMGDЂjjjiL=@ziZSGP}48)e?%7Ji+>vnZV\)cfD .LcK=KP-2W3!9\WrrRBcfT=9VV${3=STѵQ |ݗu@db 9ǠTϗ zQIL|Gl\NrV K@o{l@Nn䦴8;I ]M% 6 䡓9ISρQ0G 3NQ$P}+l=řt <6%>m{K(s)Mxꇊ)(xSP( B$)Ntۜaݥ:gݛ"+ 1?ÓXJE"!n{5LnnXעOaixYa 3Z/qwOĢN^쁃X c|^9z, jlRۣgWqw{?_a=rp~1[< ~rg90AFKJ̥n(:WW?{`(7M~G"6 L ,4fyi BFt F4x["sNO$9Ȃ5"`xB+]ܭwO7$!M`A;2✏dV[,BVz@y)uP<ΒP_ Vz4xbҥfࡾ8 Fsi삺v2OD淡>W1<*C[P\RQWW\h`+Fd}niwbfM[QwqKQ)ZB;#)n\/rj 1>WBtup .$\<+0< > Go[-s&t氐S+%z h(`;dҢp':$](Îppy y72"n;h`ĭfss7'!1ptb1'?lŤ&2cxMEN((y%W+5ISz[ȒKscc2++PWBKPpЖ+m%-Msg8튻g| (07+SB" ,ƯgWp.rzmUq#-SsNSML)4 />*hAl2੍,TOvw @8rQUV1M-7%‚u--]AUtraEEKu?Wz;W휨*r[5ֵ|oF7**e7A:0TV%/Ǯ_~Ø9Dž/φz&aFA$9^'2 2u-L=sqvP#4'Hy2iSVnS3C^EkptRcjh$:'=BO0O9.XBZ)?Y[Ab6t yqj̇Zd޳{J v NenLDZa#(5j8A OІpYQhqq)n'-'(+%BsϚ;HD UԠSPsu<djKp?%4߆C{4Md{DV4Ѐ@&CZJ!ql9Gr;yjzr2 ΓAvҲR!ѠX(-y=39TlTZ~nܢ6[tЃsߟ8F)*\uUP%U@FK| rL,zWf;bC@*uK糙 j`T0]RqKF 51%=S}OJ{R uqgNaeT1OM.C1pOYO1^=|2şBjoM召aVXlu) Ee=M] ؙz5Io0U<*XnUcY!;B4JjZ=Kdw A ݇ X8nv%|zt6rH,ASWl:QQ^5ÍCZZʊ^Hc4ʵ]+-.NmMaOK *ji.WX!wQfG*.Y CLU_VR"neª*YHTG~=q'>kvUpD}xHTu,T aT'+LhVbqr9WCtlݻrՙybY?Y7[#%\Aa *c5e)+q/L,J;r d( J̧gZFIY~E9kNM#<Ȝc汲sLȚr2%`"c2 G99ܮMAԁvwsS='5[~} ;TQSJyK%@yI%Dy?|C8/*!VA;V߾ABwЭ|n+?SKSH^g IPUH|m.pkNs* !2HE@8jw/[} ;T/c*d#4`;#]M|kіP[k$‚fCM3+Zy2ȡv8x`U?*O_U9۾!T8Y)6L2㌏|~nSwЭ|m+o?[} :9 ~)ew-?|~e6>OaRIr5AWfԟ):ƃOXrd<՚WǩBOߚЬ|d+4H ٠ʏwЬ$ϚЬ~d+'߿4Y> ?VO~iB}Ow?V ?V4bYoV$Zf~¬} AXG*@2}cFQkg; g+6yg?VrYOǼr @ acG+/ߟ4Y~ ?Vo>iB}OϚЬ~|f+7ߟ4Y ?Vo>iB}OϚЬ~|g+?ߟ4Y (S?V9xFY ?Vo>iB}O+>iBm3q?9ZEAcmBOϛЭ1:1ʃwШ1?9P}>.T no*r9[j?AB}4rH2v:Jbld@y]1T_ϯwNFU>/@/*gH>By D~"_Ӱs~"1P'*[?9b#OT1PbTbߘU1۽Sjy*%RDQ{%S>DU??1P}O1P}OOTtSOOU??1TtSOOU??1TtS}MO*D_Tu~%R}JI_*);0aX= = {;>}zhb/c;e#V- 3N8 SvS@NXᒮ'EY@1TKNe=f:6!* d331,؀쒦}9q%K=O$4"p)[!H}~?h0FoB:'xYk1آ-MM h![.\0,s>HcdpF;яDyT\!Rm 4A2HOg5jv`b0=zV~B1L- )ՔqD5aiW;8&;  45Q[4Ja%MSCao5mS7>LI O6J4|m!AtT z_ll{q-2}bry:S\qMR1t_)ptL՟9J)D45{sȐbfr/Ʃ>%K(FK0FU bHri* 0NB8^y/)O-O +n}m{țj2 -YZ*pٷf =}cYٔOfAr`2G\ʫ0tup<+*Z[-2j|&tqOI㰶r|J2ӵ,9ܫ)>l*J7꠵'KʹꝠcv aYa %thת2tTP@i䢺k5Xsk+64lmh;w8 @[d>PFv>$O,{NC +4Nshd; TJacpA7 M=. <@n93}WWRpKLn, >2&Ӑ!huՙ=޺Vbef Jfx<{0 kyktSVO99L#ùOi[L%UkglAƤA%8hc+EH>cDER\=z-~\ 3#pGWH[!EpOTD)p6>mEU 4 rQRYw.*_Q `at:? VX}1fC U jNbi,e-Xɭd+?v;*(S$%U28q>akA%e j6UJ'\_ I,.y>d4DhZn'wAs Dhz!Kkd4 n#t;pۨrFR[?ls]!ڜ J<YhBչ_^^gfaNR p[#h䡜[I)[lոEd^HE8'iYE;;|쪐V$p^M86b,2Oߢ~sIjrYv|$/7F51{ʗo<{ʗgvQ"?AԢDYF'HǽjkP!piÕˆ){ |dGüPLUm2j(P2+%%;q}L HXkLZMdҸGF0v>Ym.{k@I䨭8-TC#syVfUTޟi2EO32p}hi_<di궪w>4fy+G$tTkFLFV!&$SO6QP*ˍtkC s+sفK̠+9Wt39sA vV&:&wna Yz*Ns57ġ +G]Y<(*{3(˖# ;Aa4IY8(- lrR7E9OJjcN(d܁$ٔCIVp֊j7`<0+nAALO9n3w/Jod鞁QKjމkEW^jgE3ţ[(  {X Uh>tzuG*gEQXf:3>\3 6'ovwy.;Wp7th߹iw9ј4|BEVa]g ӱ|S~t]nj_`˽] T>ABW;]Eֶ@,V~UFT>t5 <q] !Vh "a$$MfK  kΌ(kP%Vo\wjg|I9*x yeW1zG tV:/%oJI &Il!,vU\l *nsMI @0V[{t'<aE`vtz8;/'8$"=Z5wp;ZsOn^V>e ݄ȽӬ,AܶDBsDdZ:-=Te]o}9gzEKgɼw^ԮqSEg%uTZZ>X>qm:AE85z䁜𪫳s=g8 T=Veock޿2b_mfC/a\OW q`\$D:8}f!=e1앬TvoȅACr Eh^RB ZiAn=y t6VܾjWޫ_[q}D {_S/?1R jݡ>~//W]aM,Sq!޾w/۾?" +R7isU)e;%]K=S+U MI:j`=Hy&|:a}NWh.aWRMmd30`*x-78HOH&0fZ߅h \>gyPбo+eŷtQ6+oaDr1s证VO4s+ج8pxAC{vz^pCڇc;׌8SHÈQ˗@r1;K(y_rQ4ǘC!M:ZcJ>Pd'u{wρ P w7t!)xL*31#CC 4ޥv!G4wބ&ԅx; #u(;wAAAC * !01APQ@2"apq?B3͖1o7o2@l&Aee&ofofnfnfnfnfnfnfnes-}-RwnŖ^!EY,Ⱦ(2K'ƳX1hPi1iV6$,]y%xxe2/K,L 6HoM<$JAOI=:%pP,jqyYC\bX,OƫEOKEQX[#tN5DPCE e!vXk 1<$(hƋ,u(,ce /?d> Yc,R7!,QEwJ,bP51eቖIc:,r+-Yhr㨋,r1%NF71P7!1r. pM"mE!^%""ŗK,of"Ni>GE _,j"F7C.+ %К7"D\DXcS,lph&GȸIr1HYXxBBbLj aLV/"(H+:qMKDQE67Ȳ,j>ylbb\&(#?|8ℸ1DFzb6Q,iyLŖ6,"_bI@L3g#ёZLҍh1:K4Mt$;1.Lvy会o(q hZFQEGL~ /'bӱh@A=z,YgS%_! H2>s,,>f7b2k"O<]04KMb">s5hj2J씉2$iΈ\>M5ӱ(N,O"J)F)bJĨO(]  4k%cl+#CDaXcdE12>1(٥_MKPe2b ud~PhB#%}9uMHqfѵT(OwF5\#! 2IvaBȽ0lQHˊbEuC,_^Wh>t:/!²6͌PH3ӡy$e"0fH]QDqR/bQ |K#O%aW + 63C:9MYMA#H}MlLڐ戶u&2(Q;C: ȡxBt_;,C1vBu684mb:͍;#PLlkAu8)4&78/ǂQ?e/b>yYeJ_dHڍȬ1qQ\+9vd (/"K ȍ{ܝ_Wjj+q |e};XO}_- 0P1@!ARQa2`p?qEB/+,Z.jp$R(((+V+|Z("Ejy'EabHЖ+fQXz/)U|+)U>;11n/&sͼQ[Fbw"EayH&3귄E f(Y^R8Hk -!12eŕ,!,$^Y^MuKj(PieȲDyұ\tVhբ+ +N4Q[QE""Y|K+ɡh$ $V(k hŗS"Ee/u[-#&'TAVVk",QE # '',7Cw#hW^,cx$!2oc^R:^;EX(DxD:ny4GbcIuh]T:A$<h#+VfE]~q].=$:C =E5E(-V&^dt W/O(|=<-$1T_~.lxO^oY{ȄlV+ yO>"(H3^E.6+U#DL2XoO'x["(D^Q!:gJ NSşgqXQX^,,u-l·PYFO'2"GUYz$}C )"hRGr-Xd&#mpIKPGJֆ+ɭlf-".>ĔDŽMPUYUV'GGGԉ'r; ("(NGͮ CDJivL|HbcJJKyp'O#Ȳ37c R"΢+-> {phwEMV/ HyCDƸV~j#'İP6> |K1ŏG| aQC0YXV$Mބ!}B%<(=V$B4ԖW^ a E{b0(~XC/ 'C(kd'/b(>,! zָ=3]4W`ׇEV#aہejGG*%;JƋDEWEa?CXdt~ { [,dV)_6/Gk)EmyLXxE¾DuxX,\lYZЕ#j<OXHy\(1Z(eHX"B/ yQEb+jxeRb~HbUVaֵC$>xD}V[,,~4ڡ!R+պa?o{(jC"<&/h"˼2#D~bZ+j;NұzVPwَ%%ge#|d]O۞Ch"~"<ՅΛ6v,ve2O ,gk;YLgk;YLgk52(G뉋Dž:wO}GQ1>1>1>1>O=GQ1>1>1>O}G:>(1>O}GQ}_W eQXX|/W*|V"~NOD'{-,Ee8(M?oz)u׵-/E~Sia-1.3.0/vendor/github.com/xtaci/smux/frame.go000066400000000000000000000020731313565667000213260ustar00rootroot00000000000000package smux import ( "encoding/binary" "fmt" ) const ( version = 1 ) const ( // cmds cmdSYN byte = iota // stream open cmdFIN // stream close, a.k.a EOF mark cmdPSH // data push cmdNOP // no operation ) const ( sizeOfVer = 1 sizeOfCmd = 1 sizeOfLength = 2 sizeOfSid = 4 headerSize = sizeOfVer + sizeOfCmd + sizeOfSid + sizeOfLength ) // Frame defines a packet from or to be multiplexed into a single connection type Frame struct { ver byte cmd byte sid uint32 data []byte } func newFrame(cmd byte, sid uint32) Frame { return Frame{ver: version, cmd: cmd, sid: sid} } type rawHeader []byte func (h rawHeader) Version() byte { return h[0] } func (h rawHeader) Cmd() byte { return h[1] } func (h rawHeader) Length() uint16 { return binary.LittleEndian.Uint16(h[2:]) } func (h rawHeader) StreamID() uint32 { return binary.LittleEndian.Uint32(h[4:]) } func (h rawHeader) String() string { return fmt.Sprintf("Version:%d Cmd:%d StreamID:%d Length:%d", h.Version(), h.Cmd(), h.StreamID(), h.Length()) } Sia-1.3.0/vendor/github.com/xtaci/smux/mux.go000066400000000000000000000040371313565667000210470ustar00rootroot00000000000000package smux import ( "fmt" "io" "time" "github.com/pkg/errors" ) // Config is used to tune the Smux session type Config struct { // KeepAliveInterval is how often to send a NOP command to the remote KeepAliveInterval time.Duration // KeepAliveTimeout is how long the session // will be closed if no data has arrived KeepAliveTimeout time.Duration // MaxFrameSize is used to control the maximum // frame size to sent to the remote MaxFrameSize int // MaxReceiveBuffer is used to control the maximum // number of data in the buffer pool MaxReceiveBuffer int } // DefaultConfig is used to return a default configuration func DefaultConfig() *Config { return &Config{ KeepAliveInterval: 10 * time.Second, KeepAliveTimeout: 30 * time.Second, MaxFrameSize: 4096, MaxReceiveBuffer: 4194304, } } // VerifyConfig is used to verify the sanity of configuration func VerifyConfig(config *Config) error { if config.KeepAliveInterval == 0 { return errors.New("keep-alive interval must be positive") } if config.KeepAliveTimeout < config.KeepAliveInterval { return fmt.Errorf("keep-alive timeout must be larger than keep-alive interval") } if config.MaxFrameSize <= 0 { return errors.New("max frame size must be positive") } if config.MaxFrameSize > 65535 { return errors.New("max frame size must not be larger than 65535") } if config.MaxReceiveBuffer <= 0 { return errors.New("max receive buffer must be positive") } return nil } // Server is used to initialize a new server-side connection. func Server(conn io.ReadWriteCloser, config *Config) (*Session, error) { if config == nil { config = DefaultConfig() } if err := VerifyConfig(config); err != nil { return nil, err } return newSession(config, conn, false), nil } // Client is used to initialize a new client-side connection. func Client(conn io.ReadWriteCloser, config *Config) (*Session, error) { if config == nil { config = DefaultConfig() } if err := VerifyConfig(config); err != nil { return nil, err } return newSession(config, conn, true), nil } Sia-1.3.0/vendor/github.com/xtaci/smux/mux.jpg000066400000000000000000000140671313565667000212260ustar00rootroot00000000000000JFIFC    $.' ",#(7),01444'9=82<.342C  2!!22222222222222222222222222222222222222222222222222R 0i͊'.% 4FvB˜k@Bi`(@>^ fepYC4R1~3^ P 2HLjfh` x@ѧ{")Ɩpȍ \\2#E.zk@Rig 5X"SgyN,g! 2E &h\&px02r".s3u@>b<ˉ[&CHi-Ј&؎5#cu+wZd0<֔qU.{-Omш$N/(0f匬K1 M4 g: #4.l6K2AkDh d~CS0(3.d'4Rm&j ր(@TbU 9a-@3-'4VJq.@Saf,Mh BT YT-"U x0Ky{Q- 7@P0461!`ke%,}hL-7K%Vۍ,aJ'u/ >% * yw`~%C㕲c)d{r9F X0 .w Fko<"`#"z>7g\nθݝA@TSRVJ\\An3w<°dv39^ʋm}Kv(ƳXxb%e0#p,ɰ6a]000ʊEEm]8XXXI4wYIy\s,Z[^s[dXXXnѳLooe;0#OLW&+ƓWtm*ϱ* >4LW&*YԳusw}q;P-C%>sS߷k>'u]װh)'\$z]ku?EC_A2WS'Pu(p:hnO3/qcls i1-mKkwh%S:ɯLVy]\1w8(]3lnЍgq-c'K\0"xZ*jٛlRyu־7q|E ɾ]frr=rg^@ FG oڵ0w-.G%} " d8)%R3\$l"\݀>'ti-&WmS˧:3Gdm#UUTo0gץz)G0$x#)9KP4kƫƫƫƫƫƫƫƫƫƫƫƫƫƫƫƫƫƫƫƫƫƫƫƫƫƫƫƫƫƫƫƫƨp]+t0HpqD[#&ܬ)Wq &z2@eL5)~///1AOIM?ysI pbkɥV"2- g[KZm&فZXD_ݓ8Q3dspJpJpJ c5_ͺ?EC3gor%r%r%AC7Ux/K}_gvxWgzw]+t?ECJ]+tov 8{)9#xW Tͼj6?S lz?W UQUsþS؆ SF {KMN~z:ig᳊ 4BGk^b}-kV aaxkд| ^1B 8^M'hxl6qPLe6W l;ԤI}29Іczxޡ@VE !1Q "@APUa#02SqBRTr$4`cd ?b(K'b[n=O I>Lq -j&/Hn,7R3>jȶ }𿨋PrSIWQ*g`B̭͓]Ap>HRP(ME^ #c%TC'"5Pڋ(}*'z$`L[Yխ#ҮrisHO} 1xRw߀ĴڡW3sr!_]F, )u(Uq!b5 @RO|#:[pnFJդzU^#ҮrOL/\|>*<{!%䷯8iM)16"3&+Fy3'ė88cx5cj!T]]\) ]@  YuŮvc ` W{b3R_.: a6"VŚժt8ハ<6 V~?[!lShi.Gw4Oa#PkdgL4$5;|E[!lUOQe*9!ǁKȈ[PZJ;VҠ?#gxߜ ;KZ|ƺZJ;VҬme DV;FD_ȎqoCA;S&){\x'nTkZOfJnTkR4G-w `{9_78 $)Zn}򴠗0і2FCZjqC-7mHFJ FjbE>/(qSp IXr42RpQQ M;6!_$Wu0M4)W)GX55{]AZ[ bPj4(} Wۊ9x?HieNp5nTkZOfJnTkW]+jp _8[ej8 HMOfCH)A߳WWiLAK[IY~wVj$Vw4AtSnHم}s>#SB-J-V5xI q6 (}|Y{\j Hd= ). #x2,NI!N' %i+ u{VvX)1X2TS)0Myw'#>oMT4($Hb2AA%8Ѹ؁WtFCX -dZT*K1u k~39^[)7)hnAښ/)m--+IEg_+^CH*-N2髌d|zbcW۰— +z;UMdM kmʖY#`TҊ*<.O] 5BdкsY=t.O] 5BdкsY=t.O] 5BdкsY=t.O] 5BdкsY=t.O] 5BdкsY=t.O] 5BdкsY=t.O] 5BdкsY=t.O] 5BdкsY=t.O]4Mx[Q-lA'.(`k]~ 8T1Ĺ%q']p0HD;)eJ IHg 'j9NGzrrilED-Oӆ8vPמyҏ2RPsY=t.O] 5BNVK#ޓ7'pMVF ̆dT[PV ?JrbNCIƦi0 0ea(0О@go(:a>lIuҬ@ )[orPhu x5=ɫK-8Xl*VqW&a |dq%ZKIDGP==ߜ!yGvb+_؊XuIaH4H_Q#!yGw[)(vJm>ߚ<Fsj<FǍ(ܑ=ߜ ;Fl>n0Xlpmѫmk KokܣpD \ !LiX^Hd58p2jH? :'D 3%䚅TF- DpicJhP"S)N_-o샚Y?YкcQ=I9Q[ĚƊXeLsdwO(u)*n%GZ7~ު7!6)3;K2]Oji2y6^璣wS ҫ"8IrЩ[8w!课GU^UXU^UCZuzjVU~_)Cc!tY \-a]sK]8thD8;Oo?2??2?Sia-1.3.0/vendor/github.com/xtaci/smux/mux_test.go000066400000000000000000000021321313565667000221000ustar00rootroot00000000000000package smux import ( "bytes" "testing" ) type buffer struct { bytes.Buffer } func (b *buffer) Close() error { b.Buffer.Reset() return nil } func TestConfig(t *testing.T) { VerifyConfig(DefaultConfig()) config := DefaultConfig() config.KeepAliveInterval = 0 err := VerifyConfig(config) t.Log(err) if err == nil { t.Fatal(err) } config = DefaultConfig() config.KeepAliveInterval = 10 config.KeepAliveTimeout = 5 err = VerifyConfig(config) t.Log(err) if err == nil { t.Fatal(err) } config = DefaultConfig() config.MaxFrameSize = 0 err = VerifyConfig(config) t.Log(err) if err == nil { t.Fatal(err) } config = DefaultConfig() config.MaxFrameSize = 65536 err = VerifyConfig(config) t.Log(err) if err == nil { t.Fatal(err) } config = DefaultConfig() config.MaxReceiveBuffer = 0 err = VerifyConfig(config) t.Log(err) if err == nil { t.Fatal(err) } var bts buffer if _, err := Server(&bts, config); err == nil { t.Fatal("server started with wrong config") } if _, err := Client(&bts, config); err == nil { t.Fatal("client started with wrong config") } } Sia-1.3.0/vendor/github.com/xtaci/smux/session.go000066400000000000000000000172151313565667000217230ustar00rootroot00000000000000package smux import ( "encoding/binary" "io" "sync" "sync/atomic" "time" "github.com/pkg/errors" ) const ( defaultAcceptBacklog = 1024 ) const ( errBrokenPipe = "broken pipe" errInvalidProtocol = "invalid protocol version" errGoAway = "stream id overflows, should start a new connection" ) type writeRequest struct { frame Frame result chan writeResult } type writeResult struct { n int err error } // Session defines a multiplexed connection for streams type Session struct { conn io.ReadWriteCloser config *Config nextStreamID uint32 // next stream identifier nextStreamIDLock sync.Mutex bucket int32 // token bucket bucketNotify chan struct{} // used for waiting for tokens streams map[uint32]*Stream // all streams in this session streamLock sync.Mutex // locks streams die chan struct{} // flag session has died dieLock sync.Mutex chAccepts chan *Stream dataReady int32 // flag data has arrived goAway int32 // flag id exhausted deadline atomic.Value writes chan writeRequest } func newSession(config *Config, conn io.ReadWriteCloser, client bool) *Session { s := new(Session) s.die = make(chan struct{}) s.conn = conn s.config = config s.streams = make(map[uint32]*Stream) s.chAccepts = make(chan *Stream, defaultAcceptBacklog) s.bucket = int32(config.MaxReceiveBuffer) s.bucketNotify = make(chan struct{}, 1) s.writes = make(chan writeRequest) if client { s.nextStreamID = 1 } else { s.nextStreamID = 0 } go s.recvLoop() go s.sendLoop() go s.keepalive() return s } // OpenStream is used to create a new stream func (s *Session) OpenStream() (*Stream, error) { if s.IsClosed() { return nil, errors.New(errBrokenPipe) } // generate stream id s.nextStreamIDLock.Lock() if s.goAway > 0 { s.nextStreamIDLock.Unlock() return nil, errors.New(errGoAway) } s.nextStreamID += 2 sid := s.nextStreamID if sid == sid%2 { // stream-id overflows s.goAway = 1 s.nextStreamIDLock.Unlock() return nil, errors.New(errGoAway) } s.nextStreamIDLock.Unlock() stream := newStream(sid, s.config.MaxFrameSize, s) if _, err := s.writeFrame(newFrame(cmdSYN, sid)); err != nil { return nil, errors.Wrap(err, "writeFrame") } s.streamLock.Lock() s.streams[sid] = stream s.streamLock.Unlock() return stream, nil } // AcceptStream is used to block until the next available stream // is ready to be accepted. func (s *Session) AcceptStream() (*Stream, error) { var deadline <-chan time.Time if d, ok := s.deadline.Load().(time.Time); ok && !d.IsZero() { timer := time.NewTimer(d.Sub(time.Now())) defer timer.Stop() deadline = timer.C } select { case stream := <-s.chAccepts: return stream, nil case <-deadline: return nil, errTimeout case <-s.die: return nil, errors.New(errBrokenPipe) } } // Close is used to close the session and all streams. func (s *Session) Close() (err error) { s.dieLock.Lock() select { case <-s.die: s.dieLock.Unlock() return errors.New(errBrokenPipe) default: close(s.die) s.dieLock.Unlock() s.streamLock.Lock() for k := range s.streams { s.streams[k].sessionClose() } s.streamLock.Unlock() s.notifyBucket() return s.conn.Close() } } // notifyBucket notifies recvLoop that bucket is available func (s *Session) notifyBucket() { select { case s.bucketNotify <- struct{}{}: default: } } // IsClosed does a safe check to see if we have shutdown func (s *Session) IsClosed() bool { select { case <-s.die: return true default: return false } } // NumStreams returns the number of currently open streams func (s *Session) NumStreams() int { if s.IsClosed() { return 0 } s.streamLock.Lock() defer s.streamLock.Unlock() return len(s.streams) } // SetDeadline sets a deadline used by Accept* calls. // A zero time value disables the deadline. func (s *Session) SetDeadline(t time.Time) error { s.deadline.Store(t) return nil } // notify the session that a stream has closed func (s *Session) streamClosed(sid uint32) { s.streamLock.Lock() if n := s.streams[sid].recycleTokens(); n > 0 { // return remaining tokens to the bucket if atomic.AddInt32(&s.bucket, int32(n)) > 0 { s.notifyBucket() } } delete(s.streams, sid) s.streamLock.Unlock() } // returnTokens is called by stream to return token after read func (s *Session) returnTokens(n int) { if atomic.AddInt32(&s.bucket, int32(n)) > 0 { s.notifyBucket() } } // session read a frame from underlying connection // it's data is pointed to the input buffer func (s *Session) readFrame(buffer []byte) (f Frame, err error) { if _, err := io.ReadFull(s.conn, buffer[:headerSize]); err != nil { return f, errors.Wrap(err, "readFrame") } dec := rawHeader(buffer) if dec.Version() != version { return f, errors.New(errInvalidProtocol) } f.ver = dec.Version() f.cmd = dec.Cmd() f.sid = dec.StreamID() if length := dec.Length(); length > 0 { if _, err := io.ReadFull(s.conn, buffer[headerSize:headerSize+length]); err != nil { return f, errors.Wrap(err, "readFrame") } f.data = buffer[headerSize : headerSize+length] } return f, nil } // recvLoop keeps on reading from underlying connection if tokens are available func (s *Session) recvLoop() { buffer := make([]byte, (1<<16)+headerSize) for { for atomic.LoadInt32(&s.bucket) <= 0 && !s.IsClosed() { <-s.bucketNotify } if f, err := s.readFrame(buffer); err == nil { atomic.StoreInt32(&s.dataReady, 1) switch f.cmd { case cmdNOP: case cmdSYN: s.streamLock.Lock() if _, ok := s.streams[f.sid]; !ok { stream := newStream(f.sid, s.config.MaxFrameSize, s) s.streams[f.sid] = stream select { case s.chAccepts <- stream: case <-s.die: } } s.streamLock.Unlock() case cmdFIN: s.streamLock.Lock() if stream, ok := s.streams[f.sid]; ok { stream.markRST() stream.notifyReadEvent() } s.streamLock.Unlock() case cmdPSH: s.streamLock.Lock() if stream, ok := s.streams[f.sid]; ok { atomic.AddInt32(&s.bucket, -int32(len(f.data))) stream.pushBytes(f.data) stream.notifyReadEvent() } s.streamLock.Unlock() default: s.Close() return } } else { s.Close() return } } } func (s *Session) keepalive() { tickerPing := time.NewTicker(s.config.KeepAliveInterval) tickerTimeout := time.NewTicker(s.config.KeepAliveTimeout) defer tickerPing.Stop() defer tickerTimeout.Stop() for { select { case <-tickerPing.C: s.writeFrame(newFrame(cmdNOP, 0)) s.notifyBucket() // force a signal to the recvLoop case <-tickerTimeout.C: if !atomic.CompareAndSwapInt32(&s.dataReady, 1, 0) { s.Close() return } case <-s.die: return } } } func (s *Session) sendLoop() { buf := make([]byte, (1<<16)+headerSize) for { select { case <-s.die: return case request, ok := <-s.writes: if !ok { continue } buf[0] = request.frame.ver buf[1] = request.frame.cmd binary.LittleEndian.PutUint16(buf[2:], uint16(len(request.frame.data))) binary.LittleEndian.PutUint32(buf[4:], request.frame.sid) copy(buf[headerSize:], request.frame.data) n, err := s.conn.Write(buf[:headerSize+len(request.frame.data)]) n -= headerSize if n < 0 { n = 0 } result := writeResult{ n: n, err: err, } request.result <- result close(request.result) } } } // writeFrame writes the frame to the underlying connection // and returns the number of bytes written if successful func (s *Session) writeFrame(f Frame) (n int, err error) { req := writeRequest{ frame: f, result: make(chan writeResult, 1), } select { case <-s.die: return 0, errors.New(errBrokenPipe) case s.writes <- req: } result := <-req.result return result.n, result.err } Sia-1.3.0/vendor/github.com/xtaci/smux/session_test.go000066400000000000000000000322041313565667000227550ustar00rootroot00000000000000package smux import ( crand "crypto/rand" "encoding/binary" "fmt" "io" "log" "math/rand" "net" "net/http" _ "net/http/pprof" "strings" "sync" "testing" "time" ) func init() { go func() { log.Println(http.ListenAndServe("localhost:6060", nil)) }() log.SetFlags(log.LstdFlags | log.Lshortfile) ln, err := net.Listen("tcp", "127.0.0.1:19999") if err != nil { // handle error panic(err) } go func() { for { conn, err := ln.Accept() if err != nil { // handle error } go handleConnection(conn) } }() } func handleConnection(conn net.Conn) { session, _ := Server(conn, nil) for { if stream, err := session.AcceptStream(); err == nil { go func(s io.ReadWriteCloser) { buf := make([]byte, 65536) for { n, err := s.Read(buf) if err != nil { return } s.Write(buf[:n]) } }(stream) } else { return } } } func TestEcho(t *testing.T) { cli, err := net.Dial("tcp", "127.0.0.1:19999") if err != nil { t.Fatal(err) } session, _ := Client(cli, nil) stream, _ := session.OpenStream() const N = 100 buf := make([]byte, 10) var sent string var received string for i := 0; i < N; i++ { msg := fmt.Sprintf("hello%v", i) stream.Write([]byte(msg)) sent += msg if n, err := stream.Read(buf); err != nil { t.Fatal(err) } else { received += string(buf[:n]) } } if sent != received { t.Fatal("data mimatch") } session.Close() } func TestSpeed(t *testing.T) { cli, err := net.Dial("tcp", "127.0.0.1:19999") if err != nil { t.Fatal(err) } session, _ := Client(cli, nil) stream, _ := session.OpenStream() t.Log(stream.LocalAddr(), stream.RemoteAddr()) start := time.Now() var wg sync.WaitGroup wg.Add(1) go func() { buf := make([]byte, 1024*1024) nrecv := 0 for { n, err := stream.Read(buf) if err != nil { t.Fatal(err) break } else { nrecv += n if nrecv == 4096*4096 { break } } } stream.Close() t.Log("time for 16MB rtt", time.Since(start)) wg.Done() }() msg := make([]byte, 8192) for i := 0; i < 2048; i++ { stream.Write(msg) } wg.Wait() session.Close() } func TestParallel(t *testing.T) { cli, err := net.Dial("tcp", "127.0.0.1:19999") if err != nil { t.Fatal(err) } session, _ := Client(cli, nil) par := 1000 messages := 100 var wg sync.WaitGroup wg.Add(par) for i := 0; i < par; i++ { stream, _ := session.OpenStream() go func(s *Stream) { buf := make([]byte, 20) for j := 0; j < messages; j++ { msg := fmt.Sprintf("hello%v", j) s.Write([]byte(msg)) if _, err := s.Read(buf); err != nil { break } } s.Close() wg.Done() }(stream) } t.Log("created", session.NumStreams(), "streams") wg.Wait() session.Close() } func TestCloseThenOpen(t *testing.T) { cli, err := net.Dial("tcp", "127.0.0.1:19999") if err != nil { t.Fatal(err) } session, _ := Client(cli, nil) session.Close() if _, err := session.OpenStream(); err == nil { t.Fatal("opened after close") } } func TestStreamDoubleClose(t *testing.T) { cli, err := net.Dial("tcp", "127.0.0.1:19999") if err != nil { t.Fatal(err) } session, _ := Client(cli, nil) stream, _ := session.OpenStream() stream.Close() if err := stream.Close(); err == nil { t.Log("double close doesn't return error") } session.Close() } func TestConcurrentClose(t *testing.T) { cli, err := net.Dial("tcp", "127.0.0.1:19999") if err != nil { t.Fatal(err) } session, _ := Client(cli, nil) numStreams := 100 streams := make([]*Stream, 0, numStreams) var wg sync.WaitGroup wg.Add(numStreams) for i := 0; i < 100; i++ { stream, _ := session.OpenStream() streams = append(streams, stream) } for _, s := range streams { stream := s go func() { stream.Close() wg.Done() }() } session.Close() wg.Wait() } func TestTinyReadBuffer(t *testing.T) { cli, err := net.Dial("tcp", "127.0.0.1:19999") if err != nil { t.Fatal(err) } session, _ := Client(cli, nil) stream, _ := session.OpenStream() const N = 100 tinybuf := make([]byte, 6) var sent string var received string for i := 0; i < N; i++ { msg := fmt.Sprintf("hello%v", i) sent += msg nsent, err := stream.Write([]byte(msg)) if err != nil { t.Fatal("cannot write") } nrecv := 0 for nrecv < nsent { if n, err := stream.Read(tinybuf); err == nil { nrecv += n received += string(tinybuf[:n]) } else { t.Fatal("cannot read with tiny buffer") } } } if sent != received { t.Fatal("data mimatch") } session.Close() } func TestIsClose(t *testing.T) { cli, err := net.Dial("tcp", "127.0.0.1:19999") if err != nil { t.Fatal(err) } session, _ := Client(cli, nil) session.Close() if session.IsClosed() != true { t.Fatal("still open after close") } } func TestKeepAliveTimeout(t *testing.T) { ln, err := net.Listen("tcp", "127.0.0.1:29999") if err != nil { // handle error panic(err) } go func() { ln.Accept() }() cli, err := net.Dial("tcp", "127.0.0.1:29999") if err != nil { t.Fatal(err) } config := DefaultConfig() config.KeepAliveInterval = time.Second config.KeepAliveTimeout = 2 * time.Second session, _ := Client(cli, config) <-time.After(3 * time.Second) if session.IsClosed() != true { t.Fatal("keepalive-timeout failed") } } func TestServerEcho(t *testing.T) { ln, err := net.Listen("tcp", "127.0.0.1:39999") if err != nil { // handle error panic(err) } go func() { if conn, err := ln.Accept(); err == nil { session, _ := Server(conn, nil) if stream, err := session.OpenStream(); err == nil { const N = 100 buf := make([]byte, 10) for i := 0; i < N; i++ { msg := fmt.Sprintf("hello%v", i) stream.Write([]byte(msg)) if n, err := stream.Read(buf); err != nil { t.Fatal(err) } else if string(buf[:n]) != msg { t.Fatal(err) } } stream.Close() } else { t.Fatal(err) } } else { t.Fatal(err) } }() cli, err := net.Dial("tcp", "127.0.0.1:39999") if err != nil { t.Fatal(err) } if session, err := Client(cli, nil); err == nil { if stream, err := session.AcceptStream(); err == nil { buf := make([]byte, 65536) for { n, err := stream.Read(buf) if err != nil { break } stream.Write(buf[:n]) } } else { t.Fatal(err) } } else { t.Fatal(err) } } func TestSendWithoutRecv(t *testing.T) { cli, err := net.Dial("tcp", "127.0.0.1:19999") if err != nil { t.Fatal(err) } session, _ := Client(cli, nil) stream, _ := session.OpenStream() const N = 100 for i := 0; i < N; i++ { msg := fmt.Sprintf("hello%v", i) stream.Write([]byte(msg)) } buf := make([]byte, 1) if _, err := stream.Read(buf); err != nil { t.Fatal(err) } stream.Close() } func TestWriteAfterClose(t *testing.T) { cli, err := net.Dial("tcp", "127.0.0.1:19999") if err != nil { t.Fatal(err) } session, _ := Client(cli, nil) stream, _ := session.OpenStream() stream.Close() if _, err := stream.Write([]byte("write after close")); err == nil { t.Fatal("write after close failed") } } func TestReadStreamAfterSessionClose(t *testing.T) { cli, err := net.Dial("tcp", "127.0.0.1:19999") if err != nil { t.Fatal(err) } session, _ := Client(cli, nil) stream, _ := session.OpenStream() session.Close() buf := make([]byte, 10) if _, err := stream.Read(buf); err != nil { t.Log(err) } else { t.Fatal("read stream after session close succeeded") } } func TestWriteStreamAfterConnectionClose(t *testing.T) { cli, err := net.Dial("tcp", "127.0.0.1:19999") if err != nil { t.Fatal(err) } session, _ := Client(cli, nil) stream, _ := session.OpenStream() session.conn.Close() if _, err := stream.Write([]byte("write after connection close")); err == nil { t.Fatal("write after connection close failed") } } func TestNumStreamAfterClose(t *testing.T) { cli, err := net.Dial("tcp", "127.0.0.1:19999") if err != nil { t.Fatal(err) } session, _ := Client(cli, nil) if _, err := session.OpenStream(); err == nil { if session.NumStreams() != 1 { t.Fatal("wrong number of streams after opened") } session.Close() if session.NumStreams() != 0 { t.Fatal("wrong number of streams after session closed") } } else { t.Fatal(err) } cli.Close() } func TestRandomFrame(t *testing.T) { // pure random cli, err := net.Dial("tcp", "127.0.0.1:19999") if err != nil { t.Fatal(err) } session, _ := Client(cli, nil) for i := 0; i < 100; i++ { rnd := make([]byte, rand.Uint32()%1024) io.ReadFull(crand.Reader, rnd) session.conn.Write(rnd) } cli.Close() // double syn cli, err = net.Dial("tcp", "127.0.0.1:19999") if err != nil { t.Fatal(err) } session, _ = Client(cli, nil) for i := 0; i < 100; i++ { f := newFrame(cmdSYN, 1000) session.writeFrame(f) } cli.Close() // random cmds cli, err = net.Dial("tcp", "127.0.0.1:19999") if err != nil { t.Fatal(err) } allcmds := []byte{cmdSYN, cmdFIN, cmdPSH, cmdNOP} session, _ = Client(cli, nil) for i := 0; i < 100; i++ { f := newFrame(allcmds[rand.Int()%len(allcmds)], rand.Uint32()) session.writeFrame(f) } cli.Close() // random cmds & sids cli, err = net.Dial("tcp", "127.0.0.1:19999") if err != nil { t.Fatal(err) } session, _ = Client(cli, nil) for i := 0; i < 100; i++ { f := newFrame(byte(rand.Uint32()), rand.Uint32()) session.writeFrame(f) } cli.Close() // random version cli, err = net.Dial("tcp", "127.0.0.1:19999") if err != nil { t.Fatal(err) } session, _ = Client(cli, nil) for i := 0; i < 100; i++ { f := newFrame(byte(rand.Uint32()), rand.Uint32()) f.ver = byte(rand.Uint32()) session.writeFrame(f) } cli.Close() // incorrect size cli, err = net.Dial("tcp", "127.0.0.1:19999") if err != nil { t.Fatal(err) } session, _ = Client(cli, nil) f := newFrame(byte(rand.Uint32()), rand.Uint32()) rnd := make([]byte, rand.Uint32()%1024) io.ReadFull(crand.Reader, rnd) f.data = rnd buf := make([]byte, headerSize+len(f.data)) buf[0] = f.ver buf[1] = f.cmd binary.LittleEndian.PutUint16(buf[2:], uint16(len(rnd)+1)) /// incorrect size binary.LittleEndian.PutUint32(buf[4:], f.sid) copy(buf[headerSize:], f.data) session.conn.Write(buf) t.Log(rawHeader(buf)) cli.Close() } func TestReadDeadline(t *testing.T) { cli, err := net.Dial("tcp", "127.0.0.1:19999") if err != nil { t.Fatal(err) } session, _ := Client(cli, nil) stream, _ := session.OpenStream() const N = 100 buf := make([]byte, 10) var readErr error for i := 0; i < N; i++ { msg := fmt.Sprintf("hello%v", i) stream.Write([]byte(msg)) stream.SetReadDeadline(time.Now().Add(-1 * time.Minute)) if _, readErr = stream.Read(buf); readErr != nil { break } } if readErr != nil { if !strings.Contains(readErr.Error(), "i/o timeout") { t.Fatalf("Wrong error: %v", readErr) } } else { t.Fatal("No error when reading with past deadline") } session.Close() } func TestWriteDeadline(t *testing.T) { cli, err := net.Dial("tcp", "127.0.0.1:19999") if err != nil { t.Fatal(err) } session, _ := Client(cli, nil) stream, _ := session.OpenStream() buf := make([]byte, 10) var writeErr error for { stream.SetWriteDeadline(time.Now().Add(-1 * time.Minute)) if _, writeErr = stream.Write(buf); writeErr != nil { if !strings.Contains(writeErr.Error(), "i/o timeout") { t.Fatalf("Wrong error: %v", writeErr) } break } } session.Close() } func BenchmarkAcceptClose(b *testing.B) { cli, err := net.Dial("tcp", "127.0.0.1:19999") if err != nil { b.Fatal(err) } session, _ := Client(cli, nil) for i := 0; i < b.N; i++ { if stream, err := session.OpenStream(); err == nil { stream.Close() } else { b.Fatal(err) } } } func BenchmarkConnSmux(b *testing.B) { cs, ss, err := getSmuxStreamPair() if err != nil { b.Fatal(err) } defer cs.Close() defer ss.Close() bench(b, cs, ss) } func BenchmarkConnTCP(b *testing.B) { cs, ss, err := getTCPConnectionPair() if err != nil { b.Fatal(err) } defer cs.Close() defer ss.Close() bench(b, cs, ss) } func getSmuxStreamPair() (*Stream, *Stream, error) { c1, c2, err := getTCPConnectionPair() if err != nil { return nil, nil, err } s, err := Server(c2, nil) if err != nil { return nil, nil, err } c, err := Client(c1, nil) if err != nil { return nil, nil, err } var ss *Stream done := make(chan error) go func() { var rerr error ss, rerr = s.AcceptStream() done <- rerr close(done) }() cs, err := c.OpenStream() if err != nil { return nil, nil, err } err = <-done if err != nil { return nil, nil, err } return cs, ss, nil } func getTCPConnectionPair() (net.Conn, net.Conn, error) { lst, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { return nil, nil, err } var conn0 net.Conn var err0 error done := make(chan struct{}) go func() { conn0, err0 = lst.Accept() close(done) }() conn1, err := net.Dial("tcp", lst.Addr().String()) if err != nil { return nil, nil, err } <-done if err0 != nil { return nil, nil, err0 } return conn0, conn1, nil } func bench(b *testing.B, rd io.Reader, wr io.Writer) { buf := make([]byte, 128*1024) buf2 := make([]byte, 128*1024) b.SetBytes(128 * 1024) b.ResetTimer() var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() count := 0 for { n, _ := rd.Read(buf2) count += n if count == 128*1024*b.N { return } } }() for i := 0; i < b.N; i++ { wr.Write(buf) } wg.Wait() } Sia-1.3.0/vendor/github.com/xtaci/smux/smux.png000066400000000000000000000232431313565667000214110ustar00rootroot00000000000000PNG  IHDRF iCCPICC ProfileHWXS[R -)wH`#$BTȢkAł]Qp-,6,XXʺXʛ{9NΝoe;vNN@ OLHLb:pl('22 @}]nCk(7l$u*'DBq!>@hz9y9<Kp kJp [Kmb f@4$4GIN 'ͅ!ٳ!V&Cl]LfaY.R!E9Yyg9dgG0. v svS!nG@e>Wj/r~ 0@  qf;R_hBb8E8;JdH煌?.b%Eѡr~6Bq1RQ2L3[4faKׂceXO6ʁq7 jg(?'a}>rE?7fcją-1;]z` X+vJ:ᩴFWr˄q6vv}vX-__R/Qonc3OOKcݘ plv.Hv!ݳoܳ@e7}S0_lǀ(@~Z@spdb@" +!9`X A)X 6`'(Mvp <} ^0!!4h! b8 !QH"!D,@!HٍT!"'st#}TTEM +ꃆ1 4 E "t5Du9z B_1f`%a[`X%V5օcq"NǙ `<"|?x7>%:+;!@H#! 'wK" ~ |*vb-,C$HZ$+')&告I[HHgH^"Y@$'Br9 4<`UFaBu^a*ŌIdPR6Sj())o *(nVxV9xM&ODV'-VV:zGڸT9;/jW13duPK(:{tZuuttstc26ӧ{7af173/0 t  c k Q\R65 O1^`\m|D$dI{S3x/4B ̪̽s+oZ-\-2-[[N׭P+g+vk &ߦڦۖaf[h[oj &LjdedBF7 4@Ŏ 'ZMM1irf/.B>cdm.w\]#]W^v#-vkr~/L/&MM;Г۳ˋ˫]eXdyk+=oY?ȿĿ-@- 6`k@ Ag pBB&L^8B(54:tk00aXt)< 7 G""s#J9b(Q-Yb|c<56)M{_ߕ0!aaµDD~bC)).i_ഀiNw^< sg\=3kYʳس%&fG+ك)!)R8~M\w+=OL-K}晶>/;<3}fD̑lrvrI Spa칳;rrsrs7CDh!OsZ^96Wu`n>3y ,ܽYyŽKXJYB²w5-))bbawWWt\enRϫ8lGVn[fZZ(S-+(Y?e}憒 6x|bMMM]67l1޲v[oUVnٶr;X;jv,iA*M+y7no/TW~Q.TTU9W~ݵ#&zhcjv~W7P^ՐqrFɠƩ5)N)83x6ls=ͳO8 mC/^x|O˙˞_9yj5kuN'~wDs[u n:Nwzw͐n{wrᄌu<$<,yX˹Tw'z8=/~-zF{V\y M}}Le?UX$ y&k|<=4և]?||xg_,4~ p${d$-dKhj*o@Kgv(JT}Q²T v a-9zǰ86"JutŢ []H| o\ٝO"Dxe!Amfl6nz pHYs%%IR$2IDATx] xW>" ׾uCkV*EknJURR}'I"H?L3LGݾݙ{ssB#`GG G[77G1?s2aLE˄A6/ahoLS2aK#`*0G FTLdfȄ0-ټ9s2a|\ƿ8Jr-X7#ഢ]9zm[HVrWKQPҨ)QǶ`^r~ /9+(Mp_eΝ]I~fT0[N>\'"#b۹{9#R)tpt~Znӕm(2_eYnI;z:^t .Pd *ћiݮ(4G?sq{,#;dN#ɤFkU*B4 sjsVȋץmܸy~{Hhb~?-%imoiNFtq,YU'ԁ2$HSpy٫?[||? mLEf$bbJ( I۹˛Ix{ƍ MRS= w\\5~w4u'E~)oBSG*t+#%4dzj„eTFmBVm.X }[.T5 ROl|koR#r:EK pW|PpTT{/QU[*\kBqTL!i,>|.JLr} s|:A~T +(H"f[!:]Z1m9Lu'Z&{߰Fh}]lL"_}*]FPiq=7S#Y!/H/=E=O3Nb˨>5*ѸAC[P=D{eY9KǏvE?zIDRju:y%Fkg)%!^ +N)!}ЫJv֝ KуK:*U$] v]o.5PiV+7l%( ̽<walw&X&f^),`'=1d %0eww |ze(@8d1nըR^fh'щs ُr嶪fuѠVvL(*Щ6.PvXڝQˤ}@ `4b݁y <|f#wzmEYjR;Q3E:)-D(cv+))>HRUBT'җ;LHg]wSPeS{^OWR;\u9.F%`oǓU `a*n UPVVͣ Kot6MMNev#)˸;`MDi e eD~b+愴['29/@l)0~%>vH^d""ِ`hEQ_ݿHK:OY ̦uQŧjWf50SulYK&HW~P$3皴t2Lb><>1TWй :443EX)nِ|[@<_(H0EC?hgŽmFaqA.FtʖF0ʩ%[N_|}@r;8O{^Un9e$S4j92 WFO/@mܶ(!B&Z-g {:l=v#Tټث7m0* >-u^N4hk}v rdNlETGFc鶔1kp"s$C.' {|>e.#`Sow^N.Jj|2˨2d1*#"fN6 ȏɏCu\J4l+Nq'Ӣgrv ̿A=QBvk)bF[,p{`TVqMPMT_&L%SDrE²*ێQ#hp=J mvLbvm?ByRP>"ٛ0iSt9}$ߓ m% 2R1"6t T|)+ SZMmߤ.D>a*Ӻv8ά\JGזqG&||Uc0 aK!+f)F~@`| Zieh e{j?fſu!dOl Ѻ".w_K`4 A=d2'p8?R&X\=qFLsIJUٱU^]s ƸH;f  ɇ=3NUx| vL"8VCܲVU>dB%QNtĠkLFl LDeD~ bA8/4+E[կ;y3Ξv膎s9$F߶MO$ A'K}(@AN!x+,C)lF8m8%cF;f,Q`ʤ yEi6ԿɊM3Nodc56%oxŝ m/D,DEjpkHI|jNCUD̊XN/[{ڍ8reGjgW1U?H^90,W,1/V=-hb d`G0_`5u!;y ATTb00HLnᤙI?(|* qo4!mvnhx#*#şm Sw-*RYŷX2/+C9f,cj8U8؏Nֳ`_ɯ8L60ڇ?{U-Fy?R/N~̐I v2;1Ʋ.NA̖I'o)4{PaeP4) h,Ɠ30PΞS2;jJ48I%8& Lp`DcduSADi4^*vqW"RV3t @=\$=}FYU {}E]U=@|Qò 2X%!!E-o2wJQnguLK8"q`rX,a $$rjZY:ѭ.-yةX䶂eTgMFҕ$5|I 9,>zs#y5tH80Ka)7Ҫƽa6S#P^L|̘c5q@tl&Xg\W> 3"ש\@ 5y8hq̰/^1h\{;vŽC *Qn芸-Awñ5,0q7^älǞ{!/(+0D`yy.bF(UgUر4<ޝ (lk| < H=|78]n58UcC?%P*Lp(F86nsb"exPʝ7,KTe'= ]?Xu} {!gCѵ cxF ?Q8#IbIWƅeiӄ]db!mZ,kfg,1`r!1{)7,ŀzCfDbtRŞTf3q}RXѠhE$mLJ?MQvvM w3q8LN]E'ey,Iq9KA$& Gmheo7ťH= ll*#pm5m3++tJ銆h̩% I ^/C&6ƼjIgy]zGJ=e{`vEI2MpnZfу#lj0rTO{'fh-6 qVӆt']pO8Q*=RWxX};t"& 0}b#7TU}=,cfw2N'!"f.m*#+Dj8[>=×!nԃs12|ڍd{`u 7UJ@tcJ> {5fFJ8_0yvQQeY !j6ۣ L# U4]Ғkt_xR,Dl*n\qH@_a0Aڒ>-8h;1G F ̻%Jd0-}'f TGpLEˆ_yKYoLEz߉٣l81ێ]]5}IENDB`Sia-1.3.0/vendor/github.com/xtaci/smux/stream.go000066400000000000000000000122541313565667000215310ustar00rootroot00000000000000package smux import ( "bytes" "io" "net" "sync" "sync/atomic" "time" "github.com/pkg/errors" ) // Stream implements net.Conn type Stream struct { id uint32 rstflag int32 sess *Session buffer bytes.Buffer bufferLock sync.Mutex frameSize int chReadEvent chan struct{} // notify a read event die chan struct{} // flag the stream has closed dieLock sync.Mutex readDeadline atomic.Value writeDeadline atomic.Value } // newStream initiates a Stream struct func newStream(id uint32, frameSize int, sess *Session) *Stream { s := new(Stream) s.id = id s.chReadEvent = make(chan struct{}, 1) s.frameSize = frameSize s.sess = sess s.die = make(chan struct{}) return s } // ID returns the unique stream ID. func (s *Stream) ID() uint32 { return s.id } // Read implements net.Conn func (s *Stream) Read(b []byte) (n int, err error) { var deadline <-chan time.Time if d, ok := s.readDeadline.Load().(time.Time); ok && !d.IsZero() { timer := time.NewTimer(d.Sub(time.Now())) defer timer.Stop() deadline = timer.C } READ: s.bufferLock.Lock() n, err = s.buffer.Read(b) s.bufferLock.Unlock() if n > 0 { s.sess.returnTokens(n) return n, nil } else if atomic.LoadInt32(&s.rstflag) == 1 { _ = s.Close() return 0, io.EOF } select { case <-s.chReadEvent: goto READ case <-deadline: return n, errTimeout case <-s.die: return 0, errors.New(errBrokenPipe) } } // Write implements net.Conn func (s *Stream) Write(b []byte) (n int, err error) { var deadline <-chan time.Time if d, ok := s.writeDeadline.Load().(time.Time); ok && !d.IsZero() { timer := time.NewTimer(d.Sub(time.Now())) defer timer.Stop() deadline = timer.C } select { case <-s.die: return 0, errors.New(errBrokenPipe) default: } frames := s.split(b, cmdPSH, s.id) sent := 0 for k := range frames { req := writeRequest{ frame: frames[k], result: make(chan writeResult, 1), } select { case s.sess.writes <- req: case <-s.die: return sent, errors.New(errBrokenPipe) case <-deadline: return sent, errTimeout } select { case result := <-req.result: sent += result.n if result.err != nil { return sent, result.err } case <-s.die: return sent, errors.New(errBrokenPipe) case <-deadline: return sent, errTimeout } } return sent, nil } // Close implements net.Conn func (s *Stream) Close() error { s.dieLock.Lock() select { case <-s.die: s.dieLock.Unlock() return errors.New(errBrokenPipe) default: close(s.die) s.dieLock.Unlock() s.sess.streamClosed(s.id) _, err := s.sess.writeFrame(newFrame(cmdFIN, s.id)) return err } } // SetReadDeadline sets the read deadline as defined by // net.Conn.SetReadDeadline. // A zero time value disables the deadline. func (s *Stream) SetReadDeadline(t time.Time) error { s.readDeadline.Store(t) return nil } // SetWriteDeadline sets the write deadline as defined by // net.Conn.SetWriteDeadline. // A zero time value disables the deadline. func (s *Stream) SetWriteDeadline(t time.Time) error { s.writeDeadline.Store(t) return nil } // SetDeadline sets both read and write deadlines as defined by // net.Conn.SetDeadline. // A zero time value disables the deadlines. func (s *Stream) SetDeadline(t time.Time) error { if err := s.SetReadDeadline(t); err != nil { return err } if err := s.SetWriteDeadline(t); err != nil { return err } return nil } // session closes the stream func (s *Stream) sessionClose() { s.dieLock.Lock() defer s.dieLock.Unlock() select { case <-s.die: default: close(s.die) } } // LocalAddr satisfies net.Conn interface func (s *Stream) LocalAddr() net.Addr { if ts, ok := s.sess.conn.(interface { LocalAddr() net.Addr }); ok { return ts.LocalAddr() } return nil } // RemoteAddr satisfies net.Conn interface func (s *Stream) RemoteAddr() net.Addr { if ts, ok := s.sess.conn.(interface { RemoteAddr() net.Addr }); ok { return ts.RemoteAddr() } return nil } // pushBytes a slice into buffer func (s *Stream) pushBytes(p []byte) { s.bufferLock.Lock() s.buffer.Write(p) s.bufferLock.Unlock() } // recycleTokens transform remaining bytes to tokens(will truncate buffer) func (s *Stream) recycleTokens() (n int) { s.bufferLock.Lock() n = s.buffer.Len() s.buffer.Reset() s.bufferLock.Unlock() return } // split large byte buffer into smaller frames, reference only func (s *Stream) split(bts []byte, cmd byte, sid uint32) []Frame { frames := make([]Frame, 0, len(bts)/s.frameSize+1) for len(bts) > s.frameSize { frame := newFrame(cmd, sid) frame.data = bts[:s.frameSize] bts = bts[s.frameSize:] frames = append(frames, frame) } if len(bts) > 0 { frame := newFrame(cmd, sid) frame.data = bts frames = append(frames, frame) } return frames } // notify read event func (s *Stream) notifyReadEvent() { select { case s.chReadEvent <- struct{}{}: default: } } // mark this stream has been reset func (s *Stream) markRST() { atomic.StoreInt32(&s.rstflag, 1) } var errTimeout error = &timeoutError{} type timeoutError struct{} func (e *timeoutError) Error() string { return "i/o timeout" } func (e *timeoutError) Timeout() bool { return true } func (e *timeoutError) Temporary() bool { return true } Sia-1.3.0/vendor/golang.org/000077500000000000000000000000001313565667000155755ustar00rootroot00000000000000Sia-1.3.0/vendor/golang.org/x/000077500000000000000000000000001313565667000160445ustar00rootroot00000000000000Sia-1.3.0/vendor/golang.org/x/crypto/000077500000000000000000000000001313565667000173645ustar00rootroot00000000000000Sia-1.3.0/vendor/golang.org/x/crypto/LICENSE000066400000000000000000000027071313565667000203770ustar00rootroot00000000000000Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Sia-1.3.0/vendor/golang.org/x/crypto/PATENTS000066400000000000000000000024271313565667000204320ustar00rootroot00000000000000Additional IP Rights Grant (Patents) "This implementation" means the copyrightable works distributed by Google as part of the Go project. Google hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, transfer and otherwise run, modify and propagate the contents of this implementation of Go, where such license applies only to those patent claims, both currently owned or controlled by Google and acquired in the future, licensable by Google that are necessarily infringed by this implementation of Go. This grant does not include claims that would be infringed only as a consequence of further modification of this implementation. If you or your agent or exclusive licensee institute or order or agree to the institution of patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that this implementation of Go or any code incorporated within this implementation of Go constitutes direct or contributory patent infringement, or inducement of patent infringement, then any patent rights granted to you under this License for this implementation of Go shall terminate as of the date such litigation is filed. Sia-1.3.0/vendor/golang.org/x/crypto/twofish/000077500000000000000000000000001313565667000210475ustar00rootroot00000000000000Sia-1.3.0/vendor/golang.org/x/crypto/twofish/twofish.go000066400000000000000000000272261313565667000230720ustar00rootroot00000000000000// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package twofish implements Bruce Schneier's Twofish encryption algorithm. package twofish // Twofish is defined in http://www.schneier.com/paper-twofish-paper.pdf [TWOFISH] // This code is a port of the LibTom C implementation. // See http://libtom.org/?page=features&newsitems=5&whatfile=crypt. // LibTomCrypt is free for all purposes under the public domain. // It was heavily inspired by the go blowfish package. import "strconv" // BlockSize is the constant block size of Twofish. const BlockSize = 16 const mdsPolynomial = 0x169 // x^8 + x^6 + x^5 + x^3 + 1, see [TWOFISH] 4.2 const rsPolynomial = 0x14d // x^8 + x^6 + x^3 + x^2 + 1, see [TWOFISH] 4.3 // A Cipher is an instance of Twofish encryption using a particular key. type Cipher struct { s [4][256]uint32 k [40]uint32 } type KeySizeError int func (k KeySizeError) Error() string { return "crypto/twofish: invalid key size " + strconv.Itoa(int(k)) } // NewCipher creates and returns a Cipher. // The key argument should be the Twofish key, 16, 24 or 32 bytes. func NewCipher(key []byte) (*Cipher, error) { keylen := len(key) if keylen != 16 && keylen != 24 && keylen != 32 { return nil, KeySizeError(keylen) } // k is the number of 64 bit words in key k := keylen / 8 // Create the S[..] words var S [4 * 4]byte for i := 0; i < k; i++ { // Computes [y0 y1 y2 y3] = rs . [x0 x1 x2 x3 x4 x5 x6 x7] for j, rsRow := range rs { for k, rsVal := range rsRow { S[4*i+j] ^= gfMult(key[8*i+k], rsVal, rsPolynomial) } } } // Calculate subkeys c := new(Cipher) var tmp [4]byte for i := byte(0); i < 20; i++ { // A = h(p * 2x, Me) for j := range tmp { tmp[j] = 2 * i } A := h(tmp[:], key, 0) // B = rolc(h(p * (2x + 1), Mo), 8) for j := range tmp { tmp[j] = 2*i + 1 } B := h(tmp[:], key, 1) B = rol(B, 8) c.k[2*i] = A + B // K[2i+1] = (A + 2B) <<< 9 c.k[2*i+1] = rol(2*B+A, 9) } // Calculate sboxes switch k { case 2: for i := range c.s[0] { c.s[0][i] = mdsColumnMult(sbox[1][sbox[0][sbox[0][byte(i)]^S[0]]^S[4]], 0) c.s[1][i] = mdsColumnMult(sbox[0][sbox[0][sbox[1][byte(i)]^S[1]]^S[5]], 1) c.s[2][i] = mdsColumnMult(sbox[1][sbox[1][sbox[0][byte(i)]^S[2]]^S[6]], 2) c.s[3][i] = mdsColumnMult(sbox[0][sbox[1][sbox[1][byte(i)]^S[3]]^S[7]], 3) } case 3: for i := range c.s[0] { c.s[0][i] = mdsColumnMult(sbox[1][sbox[0][sbox[0][sbox[1][byte(i)]^S[0]]^S[4]]^S[8]], 0) c.s[1][i] = mdsColumnMult(sbox[0][sbox[0][sbox[1][sbox[1][byte(i)]^S[1]]^S[5]]^S[9]], 1) c.s[2][i] = mdsColumnMult(sbox[1][sbox[1][sbox[0][sbox[0][byte(i)]^S[2]]^S[6]]^S[10]], 2) c.s[3][i] = mdsColumnMult(sbox[0][sbox[1][sbox[1][sbox[0][byte(i)]^S[3]]^S[7]]^S[11]], 3) } default: for i := range c.s[0] { c.s[0][i] = mdsColumnMult(sbox[1][sbox[0][sbox[0][sbox[1][sbox[1][byte(i)]^S[0]]^S[4]]^S[8]]^S[12]], 0) c.s[1][i] = mdsColumnMult(sbox[0][sbox[0][sbox[1][sbox[1][sbox[0][byte(i)]^S[1]]^S[5]]^S[9]]^S[13]], 1) c.s[2][i] = mdsColumnMult(sbox[1][sbox[1][sbox[0][sbox[0][sbox[0][byte(i)]^S[2]]^S[6]]^S[10]]^S[14]], 2) c.s[3][i] = mdsColumnMult(sbox[0][sbox[1][sbox[1][sbox[0][sbox[1][byte(i)]^S[3]]^S[7]]^S[11]]^S[15]], 3) } } return c, nil } // BlockSize returns the Twofish block size, 16 bytes. func (c *Cipher) BlockSize() int { return BlockSize } // store32l stores src in dst in little-endian form. func store32l(dst []byte, src uint32) { dst[0] = byte(src) dst[1] = byte(src >> 8) dst[2] = byte(src >> 16) dst[3] = byte(src >> 24) return } // load32l reads a little-endian uint32 from src. func load32l(src []byte) uint32 { return uint32(src[0]) | uint32(src[1])<<8 | uint32(src[2])<<16 | uint32(src[3])<<24 } // rol returns x after a left circular rotation of y bits. func rol(x, y uint32) uint32 { return (x << (y & 31)) | (x >> (32 - (y & 31))) } // ror returns x after a right circular rotation of y bits. func ror(x, y uint32) uint32 { return (x >> (y & 31)) | (x << (32 - (y & 31))) } // The RS matrix. See [TWOFISH] 4.3 var rs = [4][8]byte{ {0x01, 0xA4, 0x55, 0x87, 0x5A, 0x58, 0xDB, 0x9E}, {0xA4, 0x56, 0x82, 0xF3, 0x1E, 0xC6, 0x68, 0xE5}, {0x02, 0xA1, 0xFC, 0xC1, 0x47, 0xAE, 0x3D, 0x19}, {0xA4, 0x55, 0x87, 0x5A, 0x58, 0xDB, 0x9E, 0x03}, } // sbox tables var sbox = [2][256]byte{ { 0xa9, 0x67, 0xb3, 0xe8, 0x04, 0xfd, 0xa3, 0x76, 0x9a, 0x92, 0x80, 0x78, 0xe4, 0xdd, 0xd1, 0x38, 0x0d, 0xc6, 0x35, 0x98, 0x18, 0xf7, 0xec, 0x6c, 0x43, 0x75, 0x37, 0x26, 0xfa, 0x13, 0x94, 0x48, 0xf2, 0xd0, 0x8b, 0x30, 0x84, 0x54, 0xdf, 0x23, 0x19, 0x5b, 0x3d, 0x59, 0xf3, 0xae, 0xa2, 0x82, 0x63, 0x01, 0x83, 0x2e, 0xd9, 0x51, 0x9b, 0x7c, 0xa6, 0xeb, 0xa5, 0xbe, 0x16, 0x0c, 0xe3, 0x61, 0xc0, 0x8c, 0x3a, 0xf5, 0x73, 0x2c, 0x25, 0x0b, 0xbb, 0x4e, 0x89, 0x6b, 0x53, 0x6a, 0xb4, 0xf1, 0xe1, 0xe6, 0xbd, 0x45, 0xe2, 0xf4, 0xb6, 0x66, 0xcc, 0x95, 0x03, 0x56, 0xd4, 0x1c, 0x1e, 0xd7, 0xfb, 0xc3, 0x8e, 0xb5, 0xe9, 0xcf, 0xbf, 0xba, 0xea, 0x77, 0x39, 0xaf, 0x33, 0xc9, 0x62, 0x71, 0x81, 0x79, 0x09, 0xad, 0x24, 0xcd, 0xf9, 0xd8, 0xe5, 0xc5, 0xb9, 0x4d, 0x44, 0x08, 0x86, 0xe7, 0xa1, 0x1d, 0xaa, 0xed, 0x06, 0x70, 0xb2, 0xd2, 0x41, 0x7b, 0xa0, 0x11, 0x31, 0xc2, 0x27, 0x90, 0x20, 0xf6, 0x60, 0xff, 0x96, 0x5c, 0xb1, 0xab, 0x9e, 0x9c, 0x52, 0x1b, 0x5f, 0x93, 0x0a, 0xef, 0x91, 0x85, 0x49, 0xee, 0x2d, 0x4f, 0x8f, 0x3b, 0x47, 0x87, 0x6d, 0x46, 0xd6, 0x3e, 0x69, 0x64, 0x2a, 0xce, 0xcb, 0x2f, 0xfc, 0x97, 0x05, 0x7a, 0xac, 0x7f, 0xd5, 0x1a, 0x4b, 0x0e, 0xa7, 0x5a, 0x28, 0x14, 0x3f, 0x29, 0x88, 0x3c, 0x4c, 0x02, 0xb8, 0xda, 0xb0, 0x17, 0x55, 0x1f, 0x8a, 0x7d, 0x57, 0xc7, 0x8d, 0x74, 0xb7, 0xc4, 0x9f, 0x72, 0x7e, 0x15, 0x22, 0x12, 0x58, 0x07, 0x99, 0x34, 0x6e, 0x50, 0xde, 0x68, 0x65, 0xbc, 0xdb, 0xf8, 0xc8, 0xa8, 0x2b, 0x40, 0xdc, 0xfe, 0x32, 0xa4, 0xca, 0x10, 0x21, 0xf0, 0xd3, 0x5d, 0x0f, 0x00, 0x6f, 0x9d, 0x36, 0x42, 0x4a, 0x5e, 0xc1, 0xe0, }, { 0x75, 0xf3, 0xc6, 0xf4, 0xdb, 0x7b, 0xfb, 0xc8, 0x4a, 0xd3, 0xe6, 0x6b, 0x45, 0x7d, 0xe8, 0x4b, 0xd6, 0x32, 0xd8, 0xfd, 0x37, 0x71, 0xf1, 0xe1, 0x30, 0x0f, 0xf8, 0x1b, 0x87, 0xfa, 0x06, 0x3f, 0x5e, 0xba, 0xae, 0x5b, 0x8a, 0x00, 0xbc, 0x9d, 0x6d, 0xc1, 0xb1, 0x0e, 0x80, 0x5d, 0xd2, 0xd5, 0xa0, 0x84, 0x07, 0x14, 0xb5, 0x90, 0x2c, 0xa3, 0xb2, 0x73, 0x4c, 0x54, 0x92, 0x74, 0x36, 0x51, 0x38, 0xb0, 0xbd, 0x5a, 0xfc, 0x60, 0x62, 0x96, 0x6c, 0x42, 0xf7, 0x10, 0x7c, 0x28, 0x27, 0x8c, 0x13, 0x95, 0x9c, 0xc7, 0x24, 0x46, 0x3b, 0x70, 0xca, 0xe3, 0x85, 0xcb, 0x11, 0xd0, 0x93, 0xb8, 0xa6, 0x83, 0x20, 0xff, 0x9f, 0x77, 0xc3, 0xcc, 0x03, 0x6f, 0x08, 0xbf, 0x40, 0xe7, 0x2b, 0xe2, 0x79, 0x0c, 0xaa, 0x82, 0x41, 0x3a, 0xea, 0xb9, 0xe4, 0x9a, 0xa4, 0x97, 0x7e, 0xda, 0x7a, 0x17, 0x66, 0x94, 0xa1, 0x1d, 0x3d, 0xf0, 0xde, 0xb3, 0x0b, 0x72, 0xa7, 0x1c, 0xef, 0xd1, 0x53, 0x3e, 0x8f, 0x33, 0x26, 0x5f, 0xec, 0x76, 0x2a, 0x49, 0x81, 0x88, 0xee, 0x21, 0xc4, 0x1a, 0xeb, 0xd9, 0xc5, 0x39, 0x99, 0xcd, 0xad, 0x31, 0x8b, 0x01, 0x18, 0x23, 0xdd, 0x1f, 0x4e, 0x2d, 0xf9, 0x48, 0x4f, 0xf2, 0x65, 0x8e, 0x78, 0x5c, 0x58, 0x19, 0x8d, 0xe5, 0x98, 0x57, 0x67, 0x7f, 0x05, 0x64, 0xaf, 0x63, 0xb6, 0xfe, 0xf5, 0xb7, 0x3c, 0xa5, 0xce, 0xe9, 0x68, 0x44, 0xe0, 0x4d, 0x43, 0x69, 0x29, 0x2e, 0xac, 0x15, 0x59, 0xa8, 0x0a, 0x9e, 0x6e, 0x47, 0xdf, 0x34, 0x35, 0x6a, 0xcf, 0xdc, 0x22, 0xc9, 0xc0, 0x9b, 0x89, 0xd4, 0xed, 0xab, 0x12, 0xa2, 0x0d, 0x52, 0xbb, 0x02, 0x2f, 0xa9, 0xd7, 0x61, 0x1e, 0xb4, 0x50, 0x04, 0xf6, 0xc2, 0x16, 0x25, 0x86, 0x56, 0x55, 0x09, 0xbe, 0x91, }, } // gfMult returns a·b in GF(2^8)/p func gfMult(a, b byte, p uint32) byte { B := [2]uint32{0, uint32(b)} P := [2]uint32{0, p} var result uint32 // branchless GF multiplier for i := 0; i < 7; i++ { result ^= B[a&1] a >>= 1 B[1] = P[B[1]>>7] ^ (B[1] << 1) } result ^= B[a&1] return byte(result) } // mdsColumnMult calculates y{col} where [y0 y1 y2 y3] = MDS · [x0] func mdsColumnMult(in byte, col int) uint32 { mul01 := in mul5B := gfMult(in, 0x5B, mdsPolynomial) mulEF := gfMult(in, 0xEF, mdsPolynomial) switch col { case 0: return uint32(mul01) | uint32(mul5B)<<8 | uint32(mulEF)<<16 | uint32(mulEF)<<24 case 1: return uint32(mulEF) | uint32(mulEF)<<8 | uint32(mul5B)<<16 | uint32(mul01)<<24 case 2: return uint32(mul5B) | uint32(mulEF)<<8 | uint32(mul01)<<16 | uint32(mulEF)<<24 case 3: return uint32(mul5B) | uint32(mul01)<<8 | uint32(mulEF)<<16 | uint32(mul5B)<<24 } panic("unreachable") } // h implements the S-box generation function. See [TWOFISH] 4.3.5 func h(in, key []byte, offset int) uint32 { var y [4]byte for x := range y { y[x] = in[x] } switch len(key) / 8 { case 4: y[0] = sbox[1][y[0]] ^ key[4*(6+offset)+0] y[1] = sbox[0][y[1]] ^ key[4*(6+offset)+1] y[2] = sbox[0][y[2]] ^ key[4*(6+offset)+2] y[3] = sbox[1][y[3]] ^ key[4*(6+offset)+3] fallthrough case 3: y[0] = sbox[1][y[0]] ^ key[4*(4+offset)+0] y[1] = sbox[1][y[1]] ^ key[4*(4+offset)+1] y[2] = sbox[0][y[2]] ^ key[4*(4+offset)+2] y[3] = sbox[0][y[3]] ^ key[4*(4+offset)+3] fallthrough case 2: y[0] = sbox[1][sbox[0][sbox[0][y[0]]^key[4*(2+offset)+0]]^key[4*(0+offset)+0]] y[1] = sbox[0][sbox[0][sbox[1][y[1]]^key[4*(2+offset)+1]]^key[4*(0+offset)+1]] y[2] = sbox[1][sbox[1][sbox[0][y[2]]^key[4*(2+offset)+2]]^key[4*(0+offset)+2]] y[3] = sbox[0][sbox[1][sbox[1][y[3]]^key[4*(2+offset)+3]]^key[4*(0+offset)+3]] } // [y0 y1 y2 y3] = MDS . [x0 x1 x2 x3] var mdsMult uint32 for i := range y { mdsMult ^= mdsColumnMult(y[i], i) } return mdsMult } // Encrypt encrypts a 16-byte block from src to dst, which may overlap. // Note that for amounts of data larger than a block, // it is not safe to just call Encrypt on successive blocks; // instead, use an encryption mode like CBC (see crypto/cipher/cbc.go). func (c *Cipher) Encrypt(dst, src []byte) { S1 := c.s[0] S2 := c.s[1] S3 := c.s[2] S4 := c.s[3] // Load input ia := load32l(src[0:4]) ib := load32l(src[4:8]) ic := load32l(src[8:12]) id := load32l(src[12:16]) // Pre-whitening ia ^= c.k[0] ib ^= c.k[1] ic ^= c.k[2] id ^= c.k[3] for i := 0; i < 8; i++ { k := c.k[8+i*4 : 12+i*4] t2 := S2[byte(ib)] ^ S3[byte(ib>>8)] ^ S4[byte(ib>>16)] ^ S1[byte(ib>>24)] t1 := S1[byte(ia)] ^ S2[byte(ia>>8)] ^ S3[byte(ia>>16)] ^ S4[byte(ia>>24)] + t2 ic = ror(ic^(t1+k[0]), 1) id = rol(id, 1) ^ (t2 + t1 + k[1]) t2 = S2[byte(id)] ^ S3[byte(id>>8)] ^ S4[byte(id>>16)] ^ S1[byte(id>>24)] t1 = S1[byte(ic)] ^ S2[byte(ic>>8)] ^ S3[byte(ic>>16)] ^ S4[byte(ic>>24)] + t2 ia = ror(ia^(t1+k[2]), 1) ib = rol(ib, 1) ^ (t2 + t1 + k[3]) } // Output with "undo last swap" ta := ic ^ c.k[4] tb := id ^ c.k[5] tc := ia ^ c.k[6] td := ib ^ c.k[7] store32l(dst[0:4], ta) store32l(dst[4:8], tb) store32l(dst[8:12], tc) store32l(dst[12:16], td) } // Decrypt decrypts a 16-byte block from src to dst, which may overlap. func (c *Cipher) Decrypt(dst, src []byte) { S1 := c.s[0] S2 := c.s[1] S3 := c.s[2] S4 := c.s[3] // Load input ta := load32l(src[0:4]) tb := load32l(src[4:8]) tc := load32l(src[8:12]) td := load32l(src[12:16]) // Undo undo final swap ia := tc ^ c.k[6] ib := td ^ c.k[7] ic := ta ^ c.k[4] id := tb ^ c.k[5] for i := 8; i > 0; i-- { k := c.k[4+i*4 : 8+i*4] t2 := S2[byte(id)] ^ S3[byte(id>>8)] ^ S4[byte(id>>16)] ^ S1[byte(id>>24)] t1 := S1[byte(ic)] ^ S2[byte(ic>>8)] ^ S3[byte(ic>>16)] ^ S4[byte(ic>>24)] + t2 ia = rol(ia, 1) ^ (t1 + k[2]) ib = ror(ib^(t2+t1+k[3]), 1) t2 = S2[byte(ib)] ^ S3[byte(ib>>8)] ^ S4[byte(ib>>16)] ^ S1[byte(ib>>24)] t1 = S1[byte(ia)] ^ S2[byte(ia>>8)] ^ S3[byte(ia>>16)] ^ S4[byte(ia>>24)] + t2 ic = rol(ic, 1) ^ (t1 + k[0]) id = ror(id^(t2+t1+k[1]), 1) } // Undo pre-whitening ia ^= c.k[0] ib ^= c.k[1] ic ^= c.k[2] id ^= c.k[3] store32l(dst[0:4], ia) store32l(dst[4:8], ib) store32l(dst[8:12], ic) store32l(dst[12:16], id) }